@@ -7,6 +7,7 @@ use rustc_data_structures::sync::{AtomicU32, AtomicU64, Lock, LockGuard, Lrc, Or
7
7
use rustc_data_structures:: unlikely;
8
8
use rustc_errors:: Diagnostic ;
9
9
use rustc_index:: vec:: { Idx , IndexVec } ;
10
+ use rustc_serialize:: { Encodable , Encoder } ;
10
11
11
12
use parking_lot:: { Condvar , Mutex } ;
12
13
use smallvec:: { smallvec, SmallVec } ;
@@ -21,7 +22,7 @@ use std::sync::atomic::Ordering::Relaxed;
21
22
use super :: debug:: EdgeFilter ;
22
23
use super :: prev:: PreviousDepGraph ;
23
24
use super :: query:: DepGraphQuery ;
24
- use super :: serialized:: { SerializedDepGraph , SerializedDepNodeIndex } ;
25
+ use super :: serialized:: SerializedDepNodeIndex ;
25
26
use super :: { DepContext , DepKind , DepNode , WorkProductId } ;
26
27
27
28
#[ derive( Clone ) ]
@@ -148,7 +149,7 @@ impl<K: DepKind> DepGraph<K> {
148
149
let mut edge_list_indices = Vec :: with_capacity ( node_count) ;
149
150
let mut edge_list_data = Vec :: with_capacity ( edge_count) ;
150
151
151
- // See `serialize` for notes on the approach used here.
152
+ // See `DepGraph`'s `Encodable` implementation for notes on the approach used here.
152
153
153
154
edge_list_data. extend ( data. unshared_edges . iter ( ) . map ( |i| i. index ( ) ) ) ;
154
155
@@ -551,19 +552,6 @@ impl<K: DepKind> DepGraph<K> {
551
552
self . data . as_ref ( ) ?. dep_node_debug . borrow ( ) . get ( & dep_node) . cloned ( )
552
553
}
553
554
554
- pub fn edge_deduplication_data ( & self ) -> Option < ( u64 , u64 ) > {
555
- if cfg ! ( debug_assertions) {
556
- let current_dep_graph = & self . data . as_ref ( ) . unwrap ( ) . current ;
557
-
558
- Some ( (
559
- current_dep_graph. total_read_count . load ( Relaxed ) ,
560
- current_dep_graph. total_duplicate_read_count . load ( Relaxed ) ,
561
- ) )
562
- } else {
563
- None
564
- }
565
- }
566
-
567
555
fn edge_count ( & self , node_data : & LockGuard < ' _ , DepNodeData < K > > ) -> usize {
568
556
let data = self . data . as_ref ( ) . unwrap ( ) ;
569
557
let previous = & data. previous ;
@@ -579,84 +567,6 @@ impl<K: DepKind> DepGraph<K> {
579
567
edge_count
580
568
}
581
569
582
- pub fn serialize ( & self ) -> SerializedDepGraph < K > {
583
- type SDNI = SerializedDepNodeIndex ;
584
-
585
- let data = self . data . as_ref ( ) . unwrap ( ) ;
586
- let previous = & data. previous ;
587
-
588
- // Note locking order: `prev_index_to_index`, then `data`.
589
- let prev_index_to_index = data. current . prev_index_to_index . lock ( ) ;
590
- let data = data. current . data . lock ( ) ;
591
- let node_count = data. hybrid_indices . len ( ) ;
592
- let edge_count = self . edge_count ( & data) ;
593
-
594
- let mut nodes = IndexVec :: with_capacity ( node_count) ;
595
- let mut fingerprints = IndexVec :: with_capacity ( node_count) ;
596
- let mut edge_list_indices = IndexVec :: with_capacity ( node_count) ;
597
- let mut edge_list_data = Vec :: with_capacity ( edge_count) ;
598
-
599
- // `rustc_middle::ty::query::OnDiskCache` expects nodes to be in
600
- // `DepNodeIndex` order. The edges in `edge_list_data`, on the other
601
- // hand, don't need to be in a particular order, as long as each node
602
- // can reference its edges as a contiguous range within it. This is why
603
- // we're able to copy `unshared_edges` directly into `edge_list_data`.
604
- // It meets the above requirements, and each non-dark-green node already
605
- // knows the range of edges to reference within it, which they'll push
606
- // onto `edge_list_indices`. Dark green nodes, however, don't have their
607
- // edges in `unshared_edges`, so need to add them to `edge_list_data`.
608
-
609
- edge_list_data. extend ( data. unshared_edges . iter ( ) . map ( |i| SDNI :: new ( i. index ( ) ) ) ) ;
610
-
611
- for & hybrid_index in data. hybrid_indices . iter ( ) {
612
- match hybrid_index. into ( ) {
613
- HybridIndex :: New ( i) => {
614
- let new = & data. new ;
615
- nodes. push ( new. nodes [ i] ) ;
616
- fingerprints. push ( new. fingerprints [ i] ) ;
617
- let edges = & new. edges [ i] ;
618
- edge_list_indices. push ( ( edges. start . as_u32 ( ) , edges. end . as_u32 ( ) ) ) ;
619
- }
620
- HybridIndex :: Red ( i) => {
621
- let red = & data. red ;
622
- nodes. push ( previous. index_to_node ( red. node_indices [ i] ) ) ;
623
- fingerprints. push ( red. fingerprints [ i] ) ;
624
- let edges = & red. edges [ i] ;
625
- edge_list_indices. push ( ( edges. start . as_u32 ( ) , edges. end . as_u32 ( ) ) ) ;
626
- }
627
- HybridIndex :: LightGreen ( i) => {
628
- let lg = & data. light_green ;
629
- nodes. push ( previous. index_to_node ( lg. node_indices [ i] ) ) ;
630
- fingerprints. push ( previous. fingerprint_by_index ( lg. node_indices [ i] ) ) ;
631
- let edges = & lg. edges [ i] ;
632
- edge_list_indices. push ( ( edges. start . as_u32 ( ) , edges. end . as_u32 ( ) ) ) ;
633
- }
634
- HybridIndex :: DarkGreen ( prev_index) => {
635
- nodes. push ( previous. index_to_node ( prev_index) ) ;
636
- fingerprints. push ( previous. fingerprint_by_index ( prev_index) ) ;
637
-
638
- let edges_iter = previous
639
- . edge_targets_from ( prev_index)
640
- . iter ( )
641
- . map ( |& dst| prev_index_to_index[ dst] . as_ref ( ) . unwrap ( ) ) ;
642
-
643
- let start = edge_list_data. len ( ) as u32 ;
644
- edge_list_data. extend ( edges_iter. map ( |i| SDNI :: new ( i. index ( ) ) ) ) ;
645
- let end = edge_list_data. len ( ) as u32 ;
646
- edge_list_indices. push ( ( start, end) ) ;
647
- }
648
- }
649
- }
650
-
651
- debug_assert_eq ! ( nodes. len( ) , node_count) ;
652
- debug_assert_eq ! ( fingerprints. len( ) , node_count) ;
653
- debug_assert_eq ! ( edge_list_indices. len( ) , node_count) ;
654
- debug_assert_eq ! ( edge_list_data. len( ) , edge_count) ;
655
- debug_assert ! ( edge_list_data. len( ) <= u32 :: MAX as usize ) ;
656
-
657
- SerializedDepGraph { nodes, fingerprints, edge_list_indices, edge_list_data }
658
- }
659
-
660
570
pub fn node_color ( & self , dep_node : & DepNode < K > ) -> Option < DepNodeColor > {
661
571
if let Some ( ref data) = self . data {
662
572
if let Some ( prev_index) = data. previous . node_to_index_opt ( dep_node) {
@@ -997,12 +907,251 @@ impl<K: DepKind> DepGraph<K> {
997
907
}
998
908
}
999
909
910
+ pub fn print_incremental_info ( & self ) {
911
+ #[ derive( Clone ) ]
912
+ struct Stat < Kind : DepKind > {
913
+ kind : Kind ,
914
+ node_counter : u64 ,
915
+ edge_counter : u64 ,
916
+ }
917
+
918
+ let data = self . data . as_ref ( ) . unwrap ( ) ;
919
+ let prev = & data. previous ;
920
+ let current = & data. current ;
921
+ let data = current. data . lock ( ) ;
922
+
923
+ let mut stats: FxHashMap < _ , Stat < K > > = FxHashMap :: with_hasher ( Default :: default ( ) ) ;
924
+
925
+ for & hybrid_index in data. hybrid_indices . iter ( ) {
926
+ let ( kind, edge_count) = match hybrid_index. into ( ) {
927
+ HybridIndex :: New ( new_index) => {
928
+ let kind = data. new . nodes [ new_index] . kind ;
929
+ let edge_range = & data. new . edges [ new_index] ;
930
+ ( kind, edge_range. end . as_usize ( ) - edge_range. start . as_usize ( ) )
931
+ }
932
+ HybridIndex :: Red ( red_index) => {
933
+ let kind = prev. index_to_node ( data. red . node_indices [ red_index] ) . kind ;
934
+ let edge_range = & data. red . edges [ red_index] ;
935
+ ( kind, edge_range. end . as_usize ( ) - edge_range. start . as_usize ( ) )
936
+ }
937
+ HybridIndex :: LightGreen ( lg_index) => {
938
+ let kind = prev. index_to_node ( data. light_green . node_indices [ lg_index] ) . kind ;
939
+ let edge_range = & data. light_green . edges [ lg_index] ;
940
+ ( kind, edge_range. end . as_usize ( ) - edge_range. start . as_usize ( ) )
941
+ }
942
+ HybridIndex :: DarkGreen ( prev_index) => {
943
+ let kind = prev. index_to_node ( prev_index) . kind ;
944
+ let edge_count = prev. edge_targets_from ( prev_index) . len ( ) ;
945
+ ( kind, edge_count)
946
+ }
947
+ } ;
948
+
949
+ let stat = stats. entry ( kind) . or_insert ( Stat { kind, node_counter : 0 , edge_counter : 0 } ) ;
950
+ stat. node_counter += 1 ;
951
+ stat. edge_counter += edge_count as u64 ;
952
+ }
953
+
954
+ let total_node_count = data. hybrid_indices . len ( ) ;
955
+ let total_edge_count = self . edge_count ( & data) ;
956
+
957
+ // Drop the lock guard.
958
+ std:: mem:: drop ( data) ;
959
+
960
+ let mut stats: Vec < _ > = stats. values ( ) . cloned ( ) . collect ( ) ;
961
+ stats. sort_by_key ( |s| -( s. node_counter as i64 ) ) ;
962
+
963
+ const SEPARATOR : & str = "[incremental] --------------------------------\
964
+ ----------------------------------------------\
965
+ ------------";
966
+
967
+ println ! ( "[incremental]" ) ;
968
+ println ! ( "[incremental] DepGraph Statistics" ) ;
969
+ println ! ( "{}" , SEPARATOR ) ;
970
+ println ! ( "[incremental]" ) ;
971
+ println ! ( "[incremental] Total Node Count: {}" , total_node_count) ;
972
+ println ! ( "[incremental] Total Edge Count: {}" , total_edge_count) ;
973
+
974
+ if cfg ! ( debug_assertions) {
975
+ let total_edge_reads = current. total_read_count . load ( Relaxed ) ;
976
+ let total_duplicate_edge_reads = current. total_duplicate_read_count . load ( Relaxed ) ;
977
+
978
+ println ! ( "[incremental] Total Edge Reads: {}" , total_edge_reads) ;
979
+ println ! ( "[incremental] Total Duplicate Edge Reads: {}" , total_duplicate_edge_reads) ;
980
+ }
981
+
982
+ println ! ( "[incremental]" ) ;
983
+
984
+ println ! (
985
+ "[incremental] {:<36}| {:<17}| {:<12}| {:<17}|" ,
986
+ "Node Kind" , "Node Frequency" , "Node Count" , "Avg. Edge Count"
987
+ ) ;
988
+
989
+ println ! (
990
+ "[incremental] -------------------------------------\
991
+ |------------------\
992
+ |-------------\
993
+ |------------------|"
994
+ ) ;
995
+
996
+ for stat in stats {
997
+ let node_kind_ratio = ( 100.0 * ( stat. node_counter as f64 ) ) / ( total_node_count as f64 ) ;
998
+ let node_kind_avg_edges = ( stat. edge_counter as f64 ) / ( stat. node_counter as f64 ) ;
999
+
1000
+ println ! (
1001
+ "[incremental] {:<36}|{:>16.1}% |{:>12} |{:>17.1} |" ,
1002
+ format!( "{:?}" , stat. kind) ,
1003
+ node_kind_ratio,
1004
+ stat. node_counter,
1005
+ node_kind_avg_edges,
1006
+ ) ;
1007
+ }
1008
+
1009
+ println ! ( "{}" , SEPARATOR ) ;
1010
+ println ! ( "[incremental]" ) ;
1011
+ }
1012
+
1000
1013
fn next_virtual_depnode_index ( & self ) -> DepNodeIndex {
1001
1014
let index = self . virtual_dep_node_index . fetch_add ( 1 , Relaxed ) ;
1002
1015
DepNodeIndex :: from_u32 ( index)
1003
1016
}
1004
1017
}
1005
1018
1019
+ impl < E : Encoder , K : DepKind + Encodable < E > > Encodable < E > for DepGraph < K > {
1020
+ fn encode ( & self , e : & mut E ) -> Result < ( ) , E :: Error > {
1021
+ // We used to serialize the dep graph by creating and serializing a `SerializedDepGraph`
1022
+ // using data copied from the `DepGraph`. But copying created a large memory spike, so we
1023
+ // now serialize directly from the `DepGraph` as if it's a `SerializedDepGraph`. Because we
1024
+ // deserialize that data into a `SerializedDepGraph` in the next compilation session, we
1025
+ // need `DepGraph`'s `Encodable` and `SerializedDepGraph`'s `Decodable` implementations to
1026
+ // be in sync. If you update this encoding, be sure to update the decoding, and vice-versa.
1027
+
1028
+ let data = self . data . as_ref ( ) . unwrap ( ) ;
1029
+ let prev = & data. previous ;
1030
+
1031
+ // Note locking order: `prev_index_to_index`, then `data`.
1032
+ let prev_index_to_index = data. current . prev_index_to_index . lock ( ) ;
1033
+ let data = data. current . data . lock ( ) ;
1034
+ let new = & data. new ;
1035
+ let red = & data. red ;
1036
+ let lg = & data. light_green ;
1037
+
1038
+ let node_count = data. hybrid_indices . len ( ) ;
1039
+ let edge_count = self . edge_count ( & data) ;
1040
+
1041
+ // `rustc_middle::ty::query::OnDiskCache` expects nodes to be encoded in `DepNodeIndex`
1042
+ // order. The edges in `edge_list_data` don't need to be in a particular order, as long as
1043
+ // each node references its edges as a contiguous range within it. Therefore, we can encode
1044
+ // `edge_list_data` directly from `unshared_edges`. It meets the above requirements, as
1045
+ // each non-dark-green node already knows the range of edges to reference within it, which
1046
+ // they'll encode in `edge_list_indices`. Dark green nodes, however, don't have their edges
1047
+ // in `unshared_edges`, so need to add them to `edge_list_data`.
1048
+
1049
+ use HybridIndex :: * ;
1050
+
1051
+ // Encoded values (nodes, etc.) are explicitly typed below to avoid inadvertently
1052
+ // serializing data in the wrong format (i.e. one incompatible with `SerializedDepGraph`).
1053
+ e. emit_struct ( "SerializedDepGraph" , 4 , |e| {
1054
+ e. emit_struct_field ( "nodes" , 0 , |e| {
1055
+ // `SerializedDepGraph` expects this to be encoded as a sequence of `DepNode`s.
1056
+ e. emit_seq ( node_count, |e| {
1057
+ for ( seq_index, & hybrid_index) in data. hybrid_indices . iter ( ) . enumerate ( ) {
1058
+ let node: DepNode < K > = match hybrid_index. into ( ) {
1059
+ New ( i) => new. nodes [ i] ,
1060
+ Red ( i) => prev. index_to_node ( red. node_indices [ i] ) ,
1061
+ LightGreen ( i) => prev. index_to_node ( lg. node_indices [ i] ) ,
1062
+ DarkGreen ( prev_index) => prev. index_to_node ( prev_index) ,
1063
+ } ;
1064
+
1065
+ e. emit_seq_elt ( seq_index, |e| node. encode ( e) ) ?;
1066
+ }
1067
+
1068
+ Ok ( ( ) )
1069
+ } )
1070
+ } ) ?;
1071
+
1072
+ e. emit_struct_field ( "fingerprints" , 1 , |e| {
1073
+ // `SerializedDepGraph` expects this to be encoded as a sequence of `Fingerprints`s.
1074
+ e. emit_seq ( node_count, |e| {
1075
+ for ( seq_index, & hybrid_index) in data. hybrid_indices . iter ( ) . enumerate ( ) {
1076
+ let fingerprint: Fingerprint = match hybrid_index. into ( ) {
1077
+ New ( i) => new. fingerprints [ i] ,
1078
+ Red ( i) => red. fingerprints [ i] ,
1079
+ LightGreen ( i) => prev. fingerprint_by_index ( lg. node_indices [ i] ) ,
1080
+ DarkGreen ( prev_index) => prev. fingerprint_by_index ( prev_index) ,
1081
+ } ;
1082
+
1083
+ e. emit_seq_elt ( seq_index, |e| fingerprint. encode ( e) ) ?;
1084
+ }
1085
+
1086
+ Ok ( ( ) )
1087
+ } )
1088
+ } ) ?;
1089
+
1090
+ e. emit_struct_field ( "edge_list_indices" , 2 , |e| {
1091
+ // `SerializedDepGraph` expects this to be encoded as a sequence of `(u32, u32)`s.
1092
+ e. emit_seq ( node_count, |e| {
1093
+ // Dark green node edges start after the unshared (all other nodes') edges.
1094
+ let mut dark_green_edge_index = data. unshared_edges . len ( ) ;
1095
+
1096
+ for ( seq_index, & hybrid_index) in data. hybrid_indices . iter ( ) . enumerate ( ) {
1097
+ let edge_indices: ( u32 , u32 ) = match hybrid_index. into ( ) {
1098
+ New ( i) => ( new. edges [ i] . start . as_u32 ( ) , new. edges [ i] . end . as_u32 ( ) ) ,
1099
+ Red ( i) => ( red. edges [ i] . start . as_u32 ( ) , red. edges [ i] . end . as_u32 ( ) ) ,
1100
+ LightGreen ( i) => ( lg. edges [ i] . start . as_u32 ( ) , lg. edges [ i] . end . as_u32 ( ) ) ,
1101
+ DarkGreen ( prev_index) => {
1102
+ let edge_count = prev. edge_targets_from ( prev_index) . len ( ) ;
1103
+ let start = dark_green_edge_index as u32 ;
1104
+ dark_green_edge_index += edge_count;
1105
+ let end = dark_green_edge_index as u32 ;
1106
+ ( start, end)
1107
+ }
1108
+ } ;
1109
+
1110
+ e. emit_seq_elt ( seq_index, |e| edge_indices. encode ( e) ) ?;
1111
+ }
1112
+
1113
+ assert_eq ! ( dark_green_edge_index, edge_count) ;
1114
+
1115
+ Ok ( ( ) )
1116
+ } )
1117
+ } ) ?;
1118
+
1119
+ e. emit_struct_field ( "edge_list_data" , 3 , |e| {
1120
+ // `SerializedDepGraph` expects this to be encoded as a sequence of
1121
+ // `SerializedDepNodeIndex`.
1122
+ e. emit_seq ( edge_count, |e| {
1123
+ for ( seq_index, & edge) in data. unshared_edges . iter ( ) . enumerate ( ) {
1124
+ let serialized_edge = SerializedDepNodeIndex :: new ( edge. index ( ) ) ;
1125
+ e. emit_seq_elt ( seq_index, |e| serialized_edge. encode ( e) ) ?;
1126
+ }
1127
+
1128
+ let mut seq_index = data. unshared_edges . len ( ) ;
1129
+
1130
+ for & hybrid_index in data. hybrid_indices . iter ( ) {
1131
+ if let DarkGreen ( prev_index) = hybrid_index. into ( ) {
1132
+ for & edge in prev. edge_targets_from ( prev_index) {
1133
+ // Dark green node edges are stored in the previous graph
1134
+ // and must be converted to edges in the current graph,
1135
+ // and then serialized as `SerializedDepNodeIndex`.
1136
+ let serialized_edge = SerializedDepNodeIndex :: new (
1137
+ prev_index_to_index[ edge] . as_ref ( ) . unwrap ( ) . index ( ) ,
1138
+ ) ;
1139
+
1140
+ e. emit_seq_elt ( seq_index, |e| serialized_edge. encode ( e) ) ?;
1141
+ seq_index += 1 ;
1142
+ }
1143
+ }
1144
+ }
1145
+
1146
+ assert_eq ! ( seq_index, edge_count) ;
1147
+
1148
+ Ok ( ( ) )
1149
+ } )
1150
+ } )
1151
+ } )
1152
+ }
1153
+ }
1154
+
1006
1155
/// A "work product" is an intermediate result that we save into the
1007
1156
/// incremental directory for later re-use. The primary example are
1008
1157
/// the object files that we save for each partition at code
0 commit comments