@@ -65,19 +65,49 @@ pub const single_threaded = builtin.single_threaded or !want_multi_threaded;
65
65
pub const TrackedInst = extern struct {
66
66
file : FileIndex ,
67
67
inst : Zir.Inst.Index ,
68
- comptime {
69
- // The fields should be tightly packed. See also serialiation logic in `Compilation.saveState`.
70
- assert (@sizeOf (@This ()) == @sizeOf (FileIndex ) + @sizeOf (Zir .Inst .Index ));
71
- }
68
+
69
+ pub const MaybeLost = extern struct {
70
+ file : FileIndex ,
71
+ inst : ZirIndex ,
72
+ pub const ZirIndex = enum (u32 ) {
73
+ /// Tracking failed for this ZIR instruction. Uses of it should fail.
74
+ lost = std .math .maxInt (u32 ),
75
+ _ ,
76
+ pub fn unwrap (inst : ZirIndex ) ? Zir.Inst.Index {
77
+ return switch (inst ) {
78
+ .lost = > null ,
79
+ _ = > @enumFromInt (@intFromEnum (inst )),
80
+ };
81
+ }
82
+ pub fn wrap (inst : Zir.Inst.Index ) ZirIndex {
83
+ return @enumFromInt (@intFromEnum (inst ));
84
+ }
85
+ };
86
+ comptime {
87
+ // The fields should be tightly packed. See also serialiation logic in `Compilation.saveState`.
88
+ assert (@sizeOf (@This ()) == @sizeOf (FileIndex ) + @sizeOf (ZirIndex ));
89
+ }
90
+ };
91
+
72
92
pub const Index = enum (u32 ) {
73
93
_ ,
74
- pub fn resolveFull (tracked_inst_index : TrackedInst.Index , ip : * const InternPool ) TrackedInst {
94
+ pub fn resolveFull (tracked_inst_index : TrackedInst.Index , ip : * const InternPool ) ? TrackedInst {
75
95
const tracked_inst_unwrapped = tracked_inst_index .unwrap (ip );
76
96
const tracked_insts = ip .getLocalShared (tracked_inst_unwrapped .tid ).tracked_insts .acquire ();
77
- return tracked_insts .view ().items (.@"0" )[tracked_inst_unwrapped .index ];
97
+ const maybe_lost = tracked_insts .view ().items (.@"0" )[tracked_inst_unwrapped .index ];
98
+ return .{
99
+ .file = maybe_lost .file ,
100
+ .inst = maybe_lost .inst .unwrap () orelse return null ,
101
+ };
78
102
}
79
- pub fn resolve (i : TrackedInst.Index , ip : * const InternPool ) Zir.Inst.Index {
80
- return i .resolveFull (ip ).inst ;
103
+ pub fn resolveFile (tracked_inst_index : TrackedInst.Index , ip : * const InternPool ) FileIndex {
104
+ const tracked_inst_unwrapped = tracked_inst_index .unwrap (ip );
105
+ const tracked_insts = ip .getLocalShared (tracked_inst_unwrapped .tid ).tracked_insts .acquire ();
106
+ const maybe_lost = tracked_insts .view ().items (.@"0" )[tracked_inst_unwrapped .index ];
107
+ return maybe_lost .file ;
108
+ }
109
+ pub fn resolve (i : TrackedInst.Index , ip : * const InternPool ) ? Zir.Inst.Index {
110
+ return (i .resolveFull (ip ) orelse return null ).inst ;
81
111
}
82
112
83
113
pub fn toOptional (i : TrackedInst.Index ) Optional {
@@ -120,7 +150,11 @@ pub fn trackZir(
120
150
tid : Zcu.PerThread.Id ,
121
151
key : TrackedInst ,
122
152
) Allocator.Error ! TrackedInst.Index {
123
- const full_hash = Hash .hash (0 , std .mem .asBytes (& key ));
153
+ const maybe_lost_key : TrackedInst.MaybeLost = .{
154
+ .file = key .file ,
155
+ .inst = TrackedInst .MaybeLost .ZirIndex .wrap (key .inst ),
156
+ };
157
+ const full_hash = Hash .hash (0 , std .mem .asBytes (& maybe_lost_key ));
124
158
const hash : u32 = @truncate (full_hash >> 32 );
125
159
const shard = & ip .shards [@intCast (full_hash & (ip .shards .len - 1 ))];
126
160
var map = shard .shared .tracked_inst_map .acquire ();
@@ -132,12 +166,11 @@ pub fn trackZir(
132
166
const entry = & map .entries [map_index ];
133
167
const index = entry .acquire ().unwrap () orelse break ;
134
168
if (entry .hash != hash ) continue ;
135
- if (std .meta .eql (index .resolveFull (ip ), key )) return index ;
169
+ if (std .meta .eql (index .resolveFull (ip ) orelse continue , key )) return index ;
136
170
}
137
171
shard .mutate .tracked_inst_map .mutex .lock ();
138
172
defer shard .mutate .tracked_inst_map .mutex .unlock ();
139
173
if (map .entries != shard .shared .tracked_inst_map .entries ) {
140
- shard .mutate .tracked_inst_map .len += 1 ;
141
174
map = shard .shared .tracked_inst_map ;
142
175
map_mask = map .header ().mask ();
143
176
map_index = hash ;
@@ -147,7 +180,7 @@ pub fn trackZir(
147
180
const entry = & map .entries [map_index ];
148
181
const index = entry .acquire ().unwrap () orelse break ;
149
182
if (entry .hash != hash ) continue ;
150
- if (std .meta .eql (index .resolveFull (ip ), key )) return index ;
183
+ if (std .meta .eql (index .resolveFull (ip ) orelse continue , key )) return index ;
151
184
}
152
185
defer shard .mutate .tracked_inst_map .len += 1 ;
153
186
const local = ip .getLocal (tid );
@@ -161,7 +194,7 @@ pub fn trackZir(
161
194
.tid = tid ,
162
195
.index = list .mutate .len ,
163
196
}).wrap (ip );
164
- list .appendAssumeCapacity (.{key });
197
+ list .appendAssumeCapacity (.{maybe_lost_key });
165
198
entry .release (index .toOptional ());
166
199
return index ;
167
200
}
@@ -205,12 +238,91 @@ pub fn trackZir(
205
238
.tid = tid ,
206
239
.index = list .mutate .len ,
207
240
}).wrap (ip );
208
- list .appendAssumeCapacity (.{key });
241
+ list .appendAssumeCapacity (.{maybe_lost_key });
209
242
map .entries [map_index ] = .{ .value = index .toOptional (), .hash = hash };
210
243
shard .shared .tracked_inst_map .release (new_map );
211
244
return index ;
212
245
}
213
246
247
+ pub fn rehashTrackedInsts (
248
+ ip : * InternPool ,
249
+ gpa : Allocator ,
250
+ /// TODO: maybe don't take this? it doesn't actually matter, only one thread is running at this point
251
+ tid : Zcu.PerThread.Id ,
252
+ ) Allocator.Error ! void {
253
+ // TODO: this function doesn't handle OOM well. What should it do?
254
+ // Indeed, what should anyone do when they run out of memory?
255
+
256
+ // We don't lock anything, as this function assumes that no other thread is
257
+ // accessing `tracked_insts`. This is necessary because we're going to be
258
+ // iterating the `TrackedInst`s in each `Local`, so we have to know that
259
+ // none will be added as we work.
260
+
261
+ // Figure out how big each shard need to be and store it in its mutate `len`.
262
+ for (ip .shards ) | * shard | shard .mutate .tracked_inst_map .len = 0 ;
263
+ for (ip .locals ) | * local | {
264
+ // `getMutableTrackedInsts` is okay only because no other thread is currently active.
265
+ // We need the `mutate` for the len.
266
+ for (local .getMutableTrackedInsts (gpa ).viewAllowEmpty ().items (.@"0" )) | tracked_inst | {
267
+ if (tracked_inst .inst == .lost ) continue ; // we can ignore this one!
268
+ const full_hash = Hash .hash (0 , std .mem .asBytes (& tracked_inst ));
269
+ const shard = & ip .shards [@intCast (full_hash & (ip .shards .len - 1 ))];
270
+ shard .mutate .tracked_inst_map .len += 1 ;
271
+ }
272
+ }
273
+
274
+ const Map = Shard .Map (TrackedInst .Index .Optional );
275
+
276
+ const arena_state = & ip .getLocal (tid ).mutate .arena ;
277
+
278
+ // We know how big each shard must be, so ensure we have the capacity we need.
279
+ for (ip .shards ) | * shard | {
280
+ const want_capacity = std .math .ceilPowerOfTwo (u32 , shard .mutate .tracked_inst_map .len * 5 / 3 ) catch unreachable ;
281
+ const have_capacity = shard .shared .tracked_inst_map .header ().capacity ; // no acquire because we hold the mutex
282
+ if (have_capacity >= want_capacity ) {
283
+ @memset (shard .shared .tracked_inst_map .entries [0.. have_capacity ], .{ .value = .none , .hash = undefined });
284
+ continue ;
285
+ }
286
+ var arena = arena_state .promote (gpa );
287
+ defer arena_state .* = arena .state ;
288
+ const new_map_buf = try arena .allocator ().alignedAlloc (
289
+ u8 ,
290
+ Map .alignment ,
291
+ Map .entries_offset + want_capacity * @sizeOf (Map .Entry ),
292
+ );
293
+ const new_map : Map = .{ .entries = @ptrCast (new_map_buf [Map .entries_offset .. ].ptr ) };
294
+ new_map .header ().* = .{ .capacity = want_capacity };
295
+ @memset (new_map .entries [0.. want_capacity ], .{ .value = .none , .hash = undefined });
296
+ shard .shared .tracked_inst_map .release (new_map );
297
+ }
298
+
299
+ // Now, actually insert the items.
300
+ for (ip .locals , 0.. ) | * local , local_tid | {
301
+ // `getMutableTrackedInsts` is okay only because no other thread is currently active.
302
+ // We need the `mutate` for the len.
303
+ for (local .getMutableTrackedInsts (gpa ).viewAllowEmpty ().items (.@"0" ), 0.. ) | tracked_inst , local_inst_index | {
304
+ if (tracked_inst .inst == .lost ) continue ; // we can ignore this one!
305
+ const full_hash = Hash .hash (0 , std .mem .asBytes (& tracked_inst ));
306
+ const hash : u32 = @truncate (full_hash >> 32 );
307
+ const shard = & ip .shards [@intCast (full_hash & (ip .shards .len - 1 ))];
308
+ const map = shard .shared .tracked_inst_map ; // no acquire because we hold the mutex
309
+ const map_mask = map .header ().mask ();
310
+ var map_index = hash ;
311
+ const entry = while (true ) : (map_index += 1 ) {
312
+ map_index &= map_mask ;
313
+ const entry = & map .entries [map_index ];
314
+ if (entry .acquire () == .none ) break entry ;
315
+ };
316
+ const index = TrackedInst .Index .Unwrapped .wrap (.{
317
+ .tid = @enumFromInt (local_tid ),
318
+ .index = @intCast (local_inst_index ),
319
+ }, ip );
320
+ entry .hash = hash ;
321
+ entry .release (index .toOptional ());
322
+ }
323
+ }
324
+ }
325
+
214
326
/// Analysis Unit. Represents a single entity which undergoes semantic analysis.
215
327
/// This is either a `Cau` or a runtime function.
216
328
/// The LSB is used as a tag bit.
@@ -728,7 +840,7 @@ const Local = struct {
728
840
else = > @compileError ("unsupported host" ),
729
841
};
730
842
const Strings = List (struct { u8 });
731
- const TrackedInsts = List (struct { TrackedInst });
843
+ const TrackedInsts = List (struct { TrackedInst . MaybeLost });
732
844
const Maps = List (struct { FieldMap });
733
845
const Caus = List (struct { Cau });
734
846
const Navs = List (Nav .Repr );
@@ -959,6 +1071,14 @@ const Local = struct {
959
1071
mutable .list .release (new_list );
960
1072
}
961
1073
1074
+ pub fn viewAllowEmpty (mutable : Mutable ) View {
1075
+ const capacity = mutable .list .header ().capacity ;
1076
+ return .{
1077
+ .bytes = mutable .list .bytes ,
1078
+ .len = mutable .mutate .len ,
1079
+ .capacity = capacity ,
1080
+ };
1081
+ }
962
1082
pub fn view (mutable : Mutable ) View {
963
1083
const capacity = mutable .list .header ().capacity ;
964
1084
assert (capacity > 0 ); // optimizes `MultiArrayList.Slice.items`
@@ -996,7 +1116,6 @@ const Local = struct {
996
1116
fn header (list : ListSelf ) * Header {
997
1117
return @ptrFromInt (@intFromPtr (list .bytes ) - bytes_offset );
998
1118
}
999
-
1000
1119
pub fn view (list : ListSelf ) View {
1001
1120
const capacity = list .header ().capacity ;
1002
1121
assert (capacity > 0 ); // optimizes `MultiArrayList.Slice.items`
@@ -11000,7 +11119,6 @@ pub fn getOrPutTrailingString(
11000
11119
shard .mutate .string_map .mutex .lock ();
11001
11120
defer shard .mutate .string_map .mutex .unlock ();
11002
11121
if (map .entries != shard .shared .string_map .entries ) {
11003
- shard .mutate .string_map .len += 1 ;
11004
11122
map = shard .shared .string_map ;
11005
11123
map_mask = map .header ().mask ();
11006
11124
map_index = hash ;
0 commit comments