From 5e69348043a8575b09c8ae76ee6554655d4316e6 Mon Sep 17 00:00:00 2001 From: sin-ack Date: Sun, 4 Jun 2023 16:42:03 +0000 Subject: [PATCH 1/5] runtime: Allocate an inline cache inside ExecutableMap This is a very naive implementation that has the following issues; - It over-allocates by a lot, and I mean *a lot*. It allocates two words even when the instruction isn't a message send. - Currently the memory is owned by the global actor, but who really owns the memory is kinda dubious. When do the writes to the inline cache happen? Should we record actor-owned objects on a global object's inline cache? There are still many questions to be answered, but this should be a good MVP. --- src/runtime/Activation.zig | 5 +-- src/runtime/interpreter.zig | 10 +++--- src/runtime/objects/block.zig | 26 +++++++++----- src/runtime/objects/executable_map.zig | 49 +++++++++++++++++++++++++- src/runtime/objects/method.zig | 32 ++++++++++------- 5 files changed, 93 insertions(+), 29 deletions(-) diff --git a/src/runtime/Activation.zig b/src/runtime/Activation.zig index 6986677..d959ab2 100644 --- a/src/runtime/Activation.zig +++ b/src/runtime/Activation.zig @@ -227,13 +227,14 @@ pub const ActivationStack = struct { ) !void { var source_range = SourceRange.initNoRef(current_executable, .{ .start = 0, .end = 1 }); + const entrypoint_block = new_executable.value.getEntrypointBlock(); var token = try vm.heap.getAllocation( - MethodObject.requiredSizeForCreatingTopLevelContext() + + MethodObject.requiredSizeForCreatingTopLevelContext(entrypoint_block) + ActivationObject.requiredSizeForAllocation(0, 0), ); defer token.deinit(); - const toplevel_context_method = try MethodObject.createTopLevelContextForExecutable(vm, &token, new_executable, new_executable.value.getEntrypointBlock()); + const toplevel_context_method = try MethodObject.createTopLevelContextForExecutable(vm, &token, new_executable, entrypoint_block); const activation_slot = try self.getNewActivationSlot(vm.allocator); toplevel_context_method.activateMethod(vm, &token, vm.current_actor.id, vm.lobby(), &.{}, target_location, source_range, activation_slot); } diff --git a/src/runtime/interpreter.zig b/src/runtime/interpreter.zig index 07ece61..0ecd43b 100644 --- a/src/runtime/interpreter.zig +++ b/src/runtime/interpreter.zig @@ -734,15 +734,15 @@ fn createMethod( argument_slot_count += 1; } + const block = executable.value.getBlock(block_index); var token = try vm.heap.getAllocation( - MethodMap.requiredSizeForAllocation(total_slot_count) + + MethodMap.requiredSizeForAllocation(block, total_slot_count) + MethodObject.requiredSizeForAllocation(total_assignable_slot_count), ); defer token.deinit(); - const block = executable.value.getBlock(block_index); var method_map = try MethodMap.create( - vm.getMapMap(), + vm, &token, argument_slot_count, total_slot_count, @@ -803,13 +803,13 @@ fn createBlock( std.debug.assert(nonlocal_return_target_activation.get(actor.activation_stack).?.nonlocal_return_target_activation == null); var token = try vm.heap.getAllocation( - BlockMap.requiredSizeForAllocation(total_slot_count) + + BlockMap.requiredSizeForAllocation(block, total_slot_count) + BlockObject.requiredSizeForAllocation(total_assignable_slot_count), ); defer token.deinit(); var block_map = try BlockMap.create( - vm.getMapMap(), + vm, &token, argument_slot_count, total_slot_count, diff --git a/src/runtime/objects/block.zig b/src/runtime/objects/block.zig index cc9c43e..8a88a74 100644 --- a/src/runtime/objects/block.zig +++ b/src/runtime/objects/block.zig @@ -182,7 +182,7 @@ pub const BlockMap = extern struct { /// Borrows a ref for `script` from the caller. Takes ownership of /// `statements`. pub fn create( - map_map: Map.Ptr, + vm: *VirtualMachine, token: *Heap.AllocationToken, argument_slot_count: u8, total_slot_count: u32, @@ -191,11 +191,11 @@ pub const BlockMap = extern struct { block: *bytecode.Block, executable: bytecode.Executable.Ref, ) !BlockMap.Ptr { - const size = BlockMap.requiredSizeForAllocation(total_slot_count); + const size = BlockMap.requiredSizeForSelfAllocation(total_slot_count); var memory_area = token.allocate(.Object, size); var self = @ptrCast(BlockMap.Ptr, memory_area); - self.init(map_map, argument_slot_count, total_slot_count, parent_activation, nonlocal_return_target_activation, block, executable); + self.init(vm, token, argument_slot_count, total_slot_count, parent_activation, nonlocal_return_target_activation, block, executable); try token.heap.markAddressAsNeedingFinalization(memory_area); return self; @@ -203,7 +203,8 @@ pub const BlockMap = extern struct { fn init( self: BlockMap.Ptr, - map_map: Map.Ptr, + vm: *VirtualMachine, + token: *Heap.AllocationToken, argument_slot_count: u8, total_slot_count: u32, parent_activation: Activation.ActivationRef, @@ -211,7 +212,7 @@ pub const BlockMap = extern struct { block: *bytecode.Block, executable: bytecode.Executable.Ref, ) void { - self.base_map.init(.Block, map_map, argument_slot_count, total_slot_count, block, executable); + self.base_map.allocateAndInit(vm, token, .Block, argument_slot_count, total_slot_count, block, executable); self.parent_activation = parent_activation; self.nonlocal_return_target_activation = nonlocal_return_target_activation; } @@ -231,7 +232,7 @@ pub const BlockMap = extern struct { pub fn clone(self: BlockMap.Ptr, vm: *VirtualMachine, token: *Heap.AllocationToken) !BlockMap.Ptr { const new_map = try create( - vm.getMapMap(), + vm, token, self.getArgumentSlotCount(), self.base_map.slots.information.slot_count, @@ -248,14 +249,21 @@ pub const BlockMap = extern struct { } pub fn getSizeInMemory(self: BlockMap.Ptr) usize { - return requiredSizeForAllocation(self.base_map.slots.information.slot_count); + return requiredSizeForSelfAllocation(self.base_map.slots.information.slot_count); } pub fn getSizeForCloning(self: BlockMap.Ptr) usize { - return self.getSizeInMemory(); + return requiredSizeForAllocation(self.base_map.block.get(), self.base_map.slots.information.slot_count); } - pub fn requiredSizeForAllocation(slot_count: u32) usize { + /// Return the size required for allocating just the map itself. + pub fn requiredSizeForSelfAllocation(slot_count: u32) usize { return @sizeOf(BlockMap) + slot_count * @sizeOf(Slot); } + + pub fn requiredSizeForAllocation(bytecode_block: *bytecode.Block, slot_count: u32) usize { + var required_memory = requiredSizeForSelfAllocation(slot_count); + required_memory += ExecutableMap.requiredSizeForAllocation(bytecode_block); + return required_memory; + } }; diff --git a/src/runtime/objects/executable_map.zig b/src/runtime/objects/executable_map.zig index 4be64ec..c035f0d 100644 --- a/src/runtime/objects/executable_map.zig +++ b/src/runtime/objects/executable_map.zig @@ -6,13 +6,17 @@ const std = @import("std"); const Allocator = std.mem.Allocator; const Map = @import("map.zig").Map; +const Heap = @import("../Heap.zig"); const Value = value_import.Value; const MapType = @import("map.zig").MapType; const bytecode = @import("../bytecode.zig"); const SlotsMap = @import("slots.zig").SlotsMap; +const ArrayMap = @import("./array.zig").ArrayMap; +const ArrayObject = @import("./array.zig").Array; const value_import = @import("../value.zig"); const PointerValue = value_import.PointerValue; const stage2_compat = @import("../../utility/stage2_compat.zig"); +const VirtualMachine = @import("../VirtualMachine.zig"); const RefCountedValue = value_import.RefCountedValue; /// An "executable map" is one that contains a reference to executable code. @@ -24,6 +28,11 @@ pub const ExecutableMap = extern struct { slots: SlotsMap align(@alignOf(u64)), /// The address of the bytecode block. Owned by definition_executable_ref. block: PointerValue(bytecode.Block) align(@alignOf(u64)), + /// An inline cache twice the length of `block`. The items form pairs of + /// [receiver map reference, method reference]. When the receiver map + /// reference matches what's in the cache, we directly use the method + /// instead of performing a lookup. + inline_cache: ArrayObject.Value, /// The executable which this map was created from. definition_executable_ref: RefCountedValue(bytecode.Executable) align(@alignOf(u64)), @@ -35,13 +44,42 @@ pub const ExecutableMap = extern struct { }; /// Refs `script`. - pub fn init( + pub fn allocateAndInit( + self: ExecutableMap.Ptr, + vm: *VirtualMachine, + token: *Heap.AllocationToken, + comptime map_type: MapType, + argument_slot_count: u8, + total_slot_count: u32, + block: *bytecode.Block, + executable: bytecode.Executable.Ref, + ) void { + const map_map = vm.getMapMap(); + + const inline_cache_size = block.getLength() * 2; + const inline_cache_map = ArrayMap.create(map_map, token, inline_cache_size); + // TODO: Use GlobalActorID! + const inline_cache = ArrayObject.createWithValues(token, 0, inline_cache_map, &.{}, vm.nil()); + + self.init( + map_type, + map_map, + argument_slot_count, + total_slot_count, + block, + ArrayObject.Value.init(inline_cache), + executable, + ); + } + + fn init( self: ExecutableMap.Ptr, comptime map_type: MapType, map_map: Map.Ptr, argument_slot_count: u8, total_slot_count: u32, block: *bytecode.Block, + inline_cache: ArrayObject.Value, executable: bytecode.Executable.Ref, ) void { std.debug.assert(argument_slot_count <= total_slot_count); @@ -51,6 +89,7 @@ pub const ExecutableMap = extern struct { self.setArgumentSlotCount(argument_slot_count); self.block = PointerValue(bytecode.Block).init(block); + self.inline_cache = inline_cache; self.definition_executable_ref = RefCountedValue(bytecode.Executable).init(executable); } @@ -68,4 +107,12 @@ pub const ExecutableMap = extern struct { fn setArgumentSlotCount(self: ExecutableMap.Ptr, count: u8) void { @ptrCast(*ExecutableInformation, &self.slots.information.extra).argument_slot_count = count; } + + pub fn requiredSizeForAllocation(block: *bytecode.Block) usize { + // Since we will be allocating an array as well as its map, we need to + // include both of those in our required size calculation. + var required_size = ArrayMap.requiredSizeForAllocation(); + required_size += ArrayObject.requiredSizeForAllocation(block.getLength() * 2); + return required_size; + } }; diff --git a/src/runtime/objects/method.zig b/src/runtime/objects/method.zig index bd1fe47..311c076 100644 --- a/src/runtime/objects/method.zig +++ b/src/runtime/objects/method.zig @@ -103,14 +103,14 @@ pub const Method = extern struct { ) !Method.Ptr { const toplevel_context_method_map = blk: { const toplevel_context_name = ByteArray.createFromString(token, toplevel_context_string); - break :blk try MethodMap.create(vm.getMapMap(), token, 0, 0, false, toplevel_context_name, block, executable); + break :blk try MethodMap.create(vm, token, 0, 0, false, toplevel_context_name, block, executable); }; return create(token, vm.current_actor.id, toplevel_context_method_map, &.{}); } - pub fn requiredSizeForCreatingTopLevelContext() usize { + pub fn requiredSizeForCreatingTopLevelContext(block: *bytecode.Block) usize { return ByteArray.requiredSizeForAllocation(toplevel_context_string.len) + - MethodMap.requiredSizeForAllocation(0) + + MethodMap.requiredSizeForAllocation(block, 0) + Method.requiredSizeForAllocation(0); } @@ -177,7 +177,7 @@ pub const MethodMap = extern struct { /// Borrows a ref for `script` from the caller. Takes ownership of /// `statements`. pub fn create( - map_map: Map.Ptr, + vm: *VirtualMachine, token: *Heap.AllocationToken, argument_slot_count: u8, total_slot_count: u32, @@ -186,11 +186,11 @@ pub const MethodMap = extern struct { block: *bytecode.Block, executable: bytecode.Executable.Ref, ) !MethodMap.Ptr { - const size = MethodMap.requiredSizeForAllocation(total_slot_count); + const size = MethodMap.requiredSizeForSelfAllocation(total_slot_count); var memory_area = token.allocate(.Object, size); var self = @ptrCast(MethodMap.Ptr, memory_area); - self.init(map_map, argument_slot_count, total_slot_count, is_inline_method, method_name, block, executable); + self.init(vm, token, argument_slot_count, total_slot_count, is_inline_method, method_name, block, executable); try token.heap.markAddressAsNeedingFinalization(memory_area); return self; @@ -198,7 +198,8 @@ pub const MethodMap = extern struct { fn init( self: MethodMap.Ptr, - map_map: Map.Ptr, + vm: *VirtualMachine, + token: *Heap.AllocationToken, argument_slot_count: u8, total_slot_count: u32, is_inline_method: bool, @@ -206,7 +207,7 @@ pub const MethodMap = extern struct { block: *bytecode.Block, executable: bytecode.Executable.Ref, ) void { - self.base_map.init(.Method, map_map, argument_slot_count, total_slot_count, block, executable); + self.base_map.allocateAndInit(vm, token, .Method, argument_slot_count, total_slot_count, block, executable); self.setInlineMethod(is_inline_method); self.method_name = method_name.asValue(); } @@ -238,7 +239,7 @@ pub const MethodMap = extern struct { pub fn clone(self: MethodMap.Ptr, vm: *VirtualMachine, token: *Heap.AllocationToken) !MethodMap.Ptr { const new_map = try create( - vm.getMapMap(), + vm, token, self.getArgumentSlotCount(), self.base_map.slots.information.slot_count, @@ -255,14 +256,21 @@ pub const MethodMap = extern struct { } pub fn getSizeInMemory(self: MethodMap.Ptr) usize { - return requiredSizeForAllocation(self.base_map.slots.information.slot_count); + return requiredSizeForSelfAllocation(self.base_map.slots.information.slot_count); } pub fn getSizeForCloning(self: MethodMap.Ptr) usize { - return self.getSizeInMemory(); + return requiredSizeForAllocation(self.base_map.block.get(), self.base_map.slots.information.slot_count); } - pub fn requiredSizeForAllocation(slot_count: u32) usize { + /// Return the size required for allocating just the map itself. + pub fn requiredSizeForSelfAllocation(slot_count: u32) usize { return @sizeOf(MethodMap) + slot_count * @sizeOf(Slot); } + + pub fn requiredSizeForAllocation(bytecode_block: *bytecode.Block, slot_count: u32) usize { + var required_memory = requiredSizeForSelfAllocation(slot_count); + required_memory += ExecutableMap.requiredSizeForAllocation(bytecode_block); + return required_memory; + } }; From 094c6c7ed37c88ab51493fd0bfa25ebaa93169dc Mon Sep 17 00:00:00 2001 From: sin-ack Date: Wed, 7 Jun 2023 22:13:28 +0000 Subject: [PATCH 2/5] runtime: Add missing Type decl for ArrayObject --- src/runtime/objects/array.zig | 1 + 1 file changed, 1 insertion(+) diff --git a/src/runtime/objects/array.zig b/src/runtime/objects/array.zig index e8b8277..482b196 100644 --- a/src/runtime/objects/array.zig +++ b/src/runtime/objects/array.zig @@ -22,6 +22,7 @@ pub const Array = extern struct { object: Object align(@alignOf(u64)), pub const Ptr = stage2_compat.HeapPtr(Array, .Mutable); + pub const Type = .Array; pub const Value = value_import.ObjectValue(Array); /// Create a new array with the given values and filling extra items with From 8b57b46897800d9df309a8b59fbeb395ec60d930 Mon Sep 17 00:00:00 2001 From: sin-ack Date: Wed, 7 Jun 2023 22:13:53 +0000 Subject: [PATCH 3/5] runtime: Allow activation dispatchers to pass arguments --- src/runtime/objects/activation.zig | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/runtime/objects/activation.zig b/src/runtime/objects/activation.zig index 270e544..3226967 100644 --- a/src/runtime/objects/activation.zig +++ b/src/runtime/objects/activation.zig @@ -128,11 +128,11 @@ pub const Activation = extern struct { // --- Slot counts --- pub fn getAssignableSlotCount(self: Activation.Ptr) u8 { - return self.dispatch("getAssignableSlotCount"); + return self.dispatch("getAssignableSlotCount", .{}); } pub fn getArgumentSlotCount(self: Activation.Ptr) u8 { - return self.dispatch("getArgumentSlotCount"); + return self.dispatch("getArgumentSlotCount", .{}); } // --- Map forwarding --- @@ -154,7 +154,7 @@ pub const Activation = extern struct { // --- Slots and slot values --- pub fn getSlots(self: Activation.Ptr) Slot.Slice { - return self.dispatch("getSlots"); + return self.dispatch("getSlots", .{}); } /// Return a slice of `GenericValue`s for the assignable slots that are after the @@ -265,10 +265,10 @@ pub const Activation = extern struct { return @typeInfo(@TypeOf(@field(MethodMap, fn_name))).Fn.return_type.?; } - fn dispatch(self: Activation.Ptr, comptime fn_name: []const u8) DispatchReturn(fn_name) { + fn dispatch(self: Activation.Ptr, comptime fn_name: []const u8, args: anytype) DispatchReturn(fn_name) { return switch (self.getActivationType()) { - .Method => @call(.auto, @field(MethodMap, fn_name), .{self.getMethodMap()}), - .Block => @call(.auto, @field(BlockMap, fn_name), .{self.getBlockMap()}), + .Method => @call(.auto, @field(MethodMap, fn_name), .{self.getMethodMap()} ++ args), + .Block => @call(.auto, @field(BlockMap, fn_name), .{self.getBlockMap()} ++ args), }; } }; From c9c19ca729576c62b1b5b62e0448deb078bd450d Mon Sep 17 00:00:00 2001 From: sin-ack Date: Wed, 7 Jun 2023 22:13:09 +0000 Subject: [PATCH 4/5] runtime: Put receiver map-method object pairs inside the inline cache --- src/runtime/Activation.zig | 16 ++++++++++++++++ src/runtime/interpreter.zig | 16 ++++++++++++++++ src/runtime/objects/activation.zig | 6 ++++++ src/runtime/objects/block.zig | 6 ++++++ src/runtime/objects/executable_map.zig | 13 +++++++++++++ src/runtime/objects/method.zig | 5 +++++ 6 files changed, 62 insertions(+) diff --git a/src/runtime/Activation.zig b/src/runtime/Activation.zig index d959ab2..8d7d93c 100644 --- a/src/runtime/Activation.zig +++ b/src/runtime/Activation.zig @@ -9,6 +9,7 @@ const Allocator = std.mem.Allocator; const Actor = @import("./Actor.zig"); const value = @import("./value.zig"); const Value = value.Value; +const Object = @import("object.zig").Object; const bytecode = @import("./bytecode.zig"); const SourceRange = @import("./SourceRange.zig"); const IntegerValue = value.IntegerValue; @@ -107,6 +108,21 @@ pub fn restart(self: *Self) void { self.pc = 0; } +/// Write the given receiver-method pair into the appropriate offset of the +/// activation object's inline cache (stored in the map). +pub fn writeIntoInlineCache( + self: *Self, + receiver: Object.Ptr, + method: MethodObject.Ptr, +) void { + const activation_object = self.activation_object.get(); + // NOTE: For the time being, we are wasting memory by allocating an + // equally-sized inline cache for the bytecode block of the + // activation; in the future we will map each send instruction to + // an offset within the cache and shrink it drastically. + activation_object.writeIntoInlineCacheAtOffset(self.pc, receiver, method); +} + pub fn format( activation: Self, comptime fmt: []const u8, diff --git a/src/runtime/interpreter.zig b/src/runtime/interpreter.zig index 0ecd43b..05df3cc 100644 --- a/src/runtime/interpreter.zig +++ b/src/runtime/interpreter.zig @@ -422,6 +422,20 @@ fn performPrimitiveSend( ); } +/// If the receiver is an object, write the receiver-method pair into the +/// current activation's inline cache. +fn writeIntoInlineCache( + actor: *Actor, + receiver: Value, + method: MethodObject.Ptr, +) void { + if (!receiver.isObjectReference()) + return; + + const current_activation = actor.activation_stack.getCurrent(); + current_activation.writeIntoInlineCache(receiver.asObject(), method); +} + /// Sends a message to the given receiver, returning the result as a normal /// completion if it can be immediately resolved; if the message send must /// create a new activation, pushes the activation onto the stack and returns @@ -474,6 +488,8 @@ pub fn sendMessage( .Regular => |lookup_result| { if (lookup_result.isObjectReference()) { if (lookup_result.asObject().asType(.Method)) |method| { + writeIntoInlineCache(actor, receiver, method); + const argument_count = method.getArgumentSlotCount(); const argument_slice = actor.argument_stack.lastNItems(argument_count); diff --git a/src/runtime/objects/activation.zig b/src/runtime/objects/activation.zig index 3226967..7a5ed68 100644 --- a/src/runtime/objects/activation.zig +++ b/src/runtime/objects/activation.zig @@ -9,6 +9,7 @@ const Map = map_import.Map; const Slot = @import("../slot.zig").Slot; const Heap = @import("../Heap.zig"); const slots = @import("slots.zig"); +const Object = @import("../object.zig").Object; const MapType = map_import.MapType; const bytecode = @import("../bytecode.zig"); const BlockMap = @import("block.zig").BlockMap; @@ -16,6 +17,7 @@ const MethodMap = @import("method.zig").MethodMap; const map_import = @import("map.zig"); const SlotsObject = slots.Slots; const GenericValue = value_import.Value; +const MethodObject = @import("method.zig").Method; const value_import = @import("../value.zig"); const object_lookup = @import("../object_lookup.zig"); const stage2_compat = @import("../../utility/stage2_compat.zig"); @@ -151,6 +153,10 @@ pub const Activation = extern struct { }; } + pub fn writeIntoInlineCacheAtOffset(self: Activation.Ptr, offset: usize, object: Object.Ptr, method: MethodObject.Ptr) void { + self.dispatch("writeIntoInlineCacheAtOffset", .{ offset, object, method }); + } + // --- Slots and slot values --- pub fn getSlots(self: Activation.Ptr) Slot.Slice { diff --git a/src/runtime/objects/block.zig b/src/runtime/objects/block.zig index 8a88a74..a3aff14 100644 --- a/src/runtime/objects/block.zig +++ b/src/runtime/objects/block.zig @@ -11,10 +11,12 @@ const Heap = @import("../Heap.zig"); const debug = @import("../../debug.zig"); const slots = @import("slots.zig"); const Value = value_import.Value; +const Object = @import("../object.zig").Object; const bytecode = @import("../bytecode.zig"); const Activation = @import("../Activation.zig"); const SlotsObject = slots.Slots; const SourceRange = @import("../SourceRange.zig"); +const MethodObject = @import("method.zig").Method; const value_import = @import("../value.zig"); const ExecutableMap = @import("executable_map.zig").ExecutableMap; const stage2_compat = @import("../../utility/stage2_compat.zig"); @@ -266,4 +268,8 @@ pub const BlockMap = extern struct { required_memory += ExecutableMap.requiredSizeForAllocation(bytecode_block); return required_memory; } + + pub fn writeIntoInlineCacheAtOffset(self: BlockMap.Ptr, offset: usize, object: Object.Ptr, method: MethodObject.Ptr) void { + self.base_map.writeIntoInlineCacheAtOffset(offset, object, method); + } }; diff --git a/src/runtime/objects/executable_map.zig b/src/runtime/objects/executable_map.zig index c035f0d..acc3fc8 100644 --- a/src/runtime/objects/executable_map.zig +++ b/src/runtime/objects/executable_map.zig @@ -8,11 +8,13 @@ const Allocator = std.mem.Allocator; const Map = @import("map.zig").Map; const Heap = @import("../Heap.zig"); const Value = value_import.Value; +const Object = @import("../object.zig").Object; const MapType = @import("map.zig").MapType; const bytecode = @import("../bytecode.zig"); const SlotsMap = @import("slots.zig").SlotsMap; const ArrayMap = @import("./array.zig").ArrayMap; const ArrayObject = @import("./array.zig").Array; +const MethodObject = @import("./method.zig").Method; const value_import = @import("../value.zig"); const PointerValue = value_import.PointerValue; const stage2_compat = @import("../../utility/stage2_compat.zig"); @@ -115,4 +117,15 @@ pub const ExecutableMap = extern struct { required_size += ArrayObject.requiredSizeForAllocation(block.getLength() * 2); return required_size; } + + // --- Inline cache operations --- + + pub fn writeIntoInlineCacheAtOffset(self: ExecutableMap.Ptr, offset: usize, object: Object.Ptr, method: MethodObject.Ptr) void { + const inline_cache = self.inline_cache.get(); + std.debug.assert(offset < inline_cache.getSize() / 2); + + const inline_cache_array = self.inline_cache.get().getValues(); + inline_cache_array[offset * 2] = object.map; + inline_cache_array[(offset * 2) + 1] = method.asValue(); + } }; diff --git a/src/runtime/objects/method.zig b/src/runtime/objects/method.zig index 311c076..3bf3129 100644 --- a/src/runtime/objects/method.zig +++ b/src/runtime/objects/method.zig @@ -9,6 +9,7 @@ const Map = @import("map.zig").Map; const Heap = @import("../Heap.zig"); const Slot = @import("../slot.zig").Slot; const slots = @import("slots.zig"); +const Object = @import("../object.zig").Object; const bytecode = @import("../bytecode.zig"); const ByteArray = @import("../ByteArray.zig"); const Activation = @import("../Activation.zig"); @@ -273,4 +274,8 @@ pub const MethodMap = extern struct { required_memory += ExecutableMap.requiredSizeForAllocation(bytecode_block); return required_memory; } + + pub fn writeIntoInlineCacheAtOffset(self: MethodMap.Ptr, offset: usize, object: Object.Ptr, method: Method.Ptr) void { + self.base_map.writeIntoInlineCacheAtOffset(offset, object, method); + } }; From 10bfbf8786652f8d5419e35fe05a6448782f4b9b Mon Sep 17 00:00:00 2001 From: sin-ack Date: Wed, 7 Jun 2023 23:41:29 +0000 Subject: [PATCH 5/5] wip --- src/runtime/Activation.zig | 19 ++++++- src/runtime/Heap.zig | 76 +++++++++++++++++++++++++- src/runtime/interpreter.zig | 58 +++++++++++++++++++- src/runtime/objects/activation.zig | 8 ++- src/runtime/objects/block.zig | 8 ++- src/runtime/objects/executable_map.zig | 34 +++++++++++- src/runtime/objects/method.zig | 8 ++- 7 files changed, 196 insertions(+), 15 deletions(-) diff --git a/src/runtime/Activation.zig b/src/runtime/Activation.zig index 8d7d93c..c7c11bd 100644 --- a/src/runtime/Activation.zig +++ b/src/runtime/Activation.zig @@ -108,10 +108,27 @@ pub fn restart(self: *Self) void { self.pc = 0; } +/// Try to see if the current slot for the inline cache is filled and matches +/// the receiver's map; if so, return the matching method object. Invalidate the +/// inline cache entry and return null otherwise. +pub fn getOrInvalidateMethodFromInlineCacheForReceiver( + self: *Self, + vm: *VirtualMachine, + receiver: Object.Ptr, +) ?MethodObject.Ptr { + const activation_object = self.activation_object.get(); + // NOTE: For the time being, we are wasting memory by allocating an + // equally-sized inline cache for the bytecode block of the + // activation; in the future we will map each send instruction to + // an offset within the cache and shrink it drastically. + return activation_object.getOrInvalidateMethodFromInlineCacheAtOffsetForReceiver(vm, self.pc, receiver); +} + /// Write the given receiver-method pair into the appropriate offset of the /// activation object's inline cache (stored in the map). pub fn writeIntoInlineCache( self: *Self, + vm: *VirtualMachine, receiver: Object.Ptr, method: MethodObject.Ptr, ) void { @@ -120,7 +137,7 @@ pub fn writeIntoInlineCache( // equally-sized inline cache for the bytecode block of the // activation; in the future we will map each send instruction to // an offset within the cache and shrink it drastically. - activation_object.writeIntoInlineCacheAtOffset(self.pc, receiver, method); + activation_object.writeIntoInlineCacheAtOffset(vm, self.pc, receiver, method); } pub fn format( diff --git a/src/runtime/Heap.zig b/src/runtime/Heap.zig index b1e0a98..a1127a5 100644 --- a/src/runtime/Heap.zig +++ b/src/runtime/Heap.zig @@ -403,7 +403,7 @@ const Space = struct { /// order to evacuate all the objects to a higher generation. tenure_target: ?*Space = null, /// The name of this space. - name: [*:0]const u8, + name: []const u8, /// A link node for a newer generation space to scan in order to update /// references from the newer space to the older one. @@ -412,7 +412,7 @@ const Space = struct { previous: ?*const NewerGenerationLink, }; - pub fn lazyInit(heap: *Self, comptime name: [*:0]const u8, size: usize) Space { + pub fn lazyInit(heap: *Self, comptime name: []const u8, size: usize) Space { return Space{ .heap = heap, .name = name, @@ -422,7 +422,7 @@ const Space = struct { }; } - pub fn init(heap: *Self, allocator: Allocator, comptime name: [*:0]const u8, size: usize) !Space { + pub fn init(heap: *Self, allocator: Allocator, comptime name: []const u8, size: usize) !Space { var self = lazyInit(heap, name, size); try self.allocateMemory(allocator); return self; @@ -1038,3 +1038,73 @@ test "link an object to another and perform scavenge" { var referenced_object_value = new_referenced_object.getMap().getSlotByName("actual").?.value; try std.testing.expectEqual(@as(u64, 0xDEADBEEF), referenced_object_value.asUnsignedInteger()); } + +fn HeapAddress(comptime T: type) type { + return struct { + heap: *const Self, + address: T, + + fn spaceNameIfAddressWithin(self: @This(), space: *const Space) ?[]const u8 { + const memory_start = @ptrToInt(space.memory.ptr); + const memory_end = @ptrToInt(space.memory.ptr + space.memory.len); + const address = @ptrToInt(self.address); + + if (address >= memory_start and address < memory_end) + return @as([]const u8, space.name); + return null; + } + + fn spaceName(self: @This()) []const u8 { + if (self.spaceNameIfAddressWithin(&self.heap.eden)) |name| + return name; + if (self.spaceNameIfAddressWithin(&self.heap.from_space)) |name| + return name; + if (self.spaceNameIfAddressWithin(&self.heap.to_space)) |name| + return name; + if (self.spaceNameIfAddressWithin(&self.heap.old_space)) |name| + return name; + @panic("!!! This address isn't within the heap!"); + } + + fn spaceOffsetIfAddressWithin(self: @This(), space: *const Space) ?usize { + const memory_start = @ptrToInt(space.memory.ptr); + const memory_end = @ptrToInt(space.memory.ptr + space.memory.len); + const address = @ptrToInt(self.address); + + if (address >= memory_start and address < memory_end) + return address - memory_start; + return null; + } + + fn spaceOffset(self: @This()) usize { + if (self.spaceOffsetIfAddressWithin(&self.heap.eden)) |offset| + return offset; + if (self.spaceOffsetIfAddressWithin(&self.heap.from_space)) |offset| + return offset; + if (self.spaceOffsetIfAddressWithin(&self.heap.to_space)) |offset| + return offset; + if (self.spaceOffsetIfAddressWithin(&self.heap.old_space)) |offset| + return offset; + @panic("!!! This address isn't within the heap!"); + } + + pub fn format( + self: @This(), + comptime fmt: []const u8, + options: std.fmt.FormatOptions, + writer: anytype, + ) !void { + _ = fmt; + + try writer.writeByte('['); + try writer.writeAll(self.spaceName()); + try writer.writeByte('+'); + try std.fmt.formatInt(self.spaceOffset(), 10, .lower, options, writer); + try writer.writeByte(']'); + } + }; +} + +pub fn asHeapAddress(self: *const Self, value: anytype) HeapAddress(@TypeOf(value)) { + return .{ .heap = self, .address = value }; +} diff --git a/src/runtime/interpreter.zig b/src/runtime/interpreter.zig index 05df3cc..8e3d8e1 100644 --- a/src/runtime/interpreter.zig +++ b/src/runtime/interpreter.zig @@ -422,18 +422,54 @@ fn performPrimitiveSend( ); } +/// Try to see if the current slot for the inline cache is filled and matches +/// the receiver's map; if so, return the matching method object. Invalidate the +/// inline cache entry and return null otherwise. +fn getOrInvalidateMethodFromInlineCacheForReceiver( + vm: *VirtualMachine, + actor: *Actor, + receiver: Value, +) ?MethodObject.Ptr { + if (!receiver.isObjectReference()) { + // std.debug.print("MISS because not object ref\n", .{}); + return null; + } + var real_receiver = receiver; + if (receiver.asObject().asType(.Activation)) |activation| { + real_receiver = activation.findActivationReceiver(); + } + if (!real_receiver.isObjectReference()) { + // std.debug.print("MISS because not object ref\n", .{}); + return null; + } + + const current_activation = actor.activation_stack.getCurrent(); + return current_activation.getOrInvalidateMethodFromInlineCacheForReceiver(vm, real_receiver.asObject()); +} + /// If the receiver is an object, write the receiver-method pair into the /// current activation's inline cache. fn writeIntoInlineCache( + vm: *VirtualMachine, actor: *Actor, receiver: Value, method: MethodObject.Ptr, ) void { - if (!receiver.isObjectReference()) + if (!receiver.isObjectReference()) { + // std.debug.print("NOWR because not object ref\n", .{}); + return; + } + var real_receiver = receiver; + if (receiver.asObject().asType(.Activation)) |activation| { + real_receiver = activation.findActivationReceiver(); + } + if (!real_receiver.isObjectReference()) { + // std.debug.print("NOWR because not object ref\n", .{}); return; + } const current_activation = actor.activation_stack.getCurrent(); - current_activation.writeIntoInlineCache(receiver.asObject(), method); + current_activation.writeIntoInlineCache(vm, real_receiver.asObject(), method); } /// Sends a message to the given receiver, returning the result as a normal @@ -484,11 +520,27 @@ pub fn sendMessage( actor.ensureCanRead(receiver, source_range); + if (getOrInvalidateMethodFromInlineCacheForReceiver(vm, actor, receiver)) |method| { + const argument_count = method.getArgumentSlotCount(); + const argument_slice = actor.argument_stack.lastNItems(argument_count); + + // Advance the instruction for the activation that will be returned to. + _ = actor.activation_stack.getCurrent().advanceInstruction(); + + try executeMethod(vm, actor, receiver, method, argument_slice, target_location, source_range); + + actor.argument_stack.popNItems(argument_count); + // Bump the argument stack height of the (now current) activation since + // we've now popped this activation's items off it. + actor.activation_stack.getCurrent().stack_snapshot.bumpArgumentHeight(actor); + return null; + } + return switch (receiver.lookup(vm, message_name)) { .Regular => |lookup_result| { if (lookup_result.isObjectReference()) { if (lookup_result.asObject().asType(.Method)) |method| { - writeIntoInlineCache(actor, receiver, method); + writeIntoInlineCache(vm, actor, receiver, method); const argument_count = method.getArgumentSlotCount(); const argument_slice = actor.argument_stack.lastNItems(argument_count); diff --git a/src/runtime/objects/activation.zig b/src/runtime/objects/activation.zig index 7a5ed68..4ba6316 100644 --- a/src/runtime/objects/activation.zig +++ b/src/runtime/objects/activation.zig @@ -153,8 +153,12 @@ pub const Activation = extern struct { }; } - pub fn writeIntoInlineCacheAtOffset(self: Activation.Ptr, offset: usize, object: Object.Ptr, method: MethodObject.Ptr) void { - self.dispatch("writeIntoInlineCacheAtOffset", .{ offset, object, method }); + pub fn getOrInvalidateMethodFromInlineCacheAtOffsetForReceiver(self: Activation.Ptr, vm: *VirtualMachine, offset: usize, receiver: Object.Ptr) ?MethodObject.Ptr { + return self.dispatch("getOrInvalidateMethodFromInlineCacheAtOffsetForReceiver", .{ vm, offset, receiver }); + } + + pub fn writeIntoInlineCacheAtOffset(self: Activation.Ptr, vm: *VirtualMachine, offset: usize, receiver: Object.Ptr, method: MethodObject.Ptr) void { + self.dispatch("writeIntoInlineCacheAtOffset", .{ vm, offset, receiver, method }); } // --- Slots and slot values --- diff --git a/src/runtime/objects/block.zig b/src/runtime/objects/block.zig index a3aff14..c4e9a99 100644 --- a/src/runtime/objects/block.zig +++ b/src/runtime/objects/block.zig @@ -269,7 +269,11 @@ pub const BlockMap = extern struct { return required_memory; } - pub fn writeIntoInlineCacheAtOffset(self: BlockMap.Ptr, offset: usize, object: Object.Ptr, method: MethodObject.Ptr) void { - self.base_map.writeIntoInlineCacheAtOffset(offset, object, method); + pub fn getOrInvalidateMethodFromInlineCacheAtOffsetForReceiver(self: BlockMap.Ptr, vm: *VirtualMachine, offset: usize, receiver: Object.Ptr) ?MethodObject.Ptr { + return self.base_map.getOrInvalidateMethodFromInlineCacheAtOffsetForReceiver(vm, offset, receiver); + } + + pub fn writeIntoInlineCacheAtOffset(self: BlockMap.Ptr, vm: *VirtualMachine, offset: usize, object: Object.Ptr, method: MethodObject.Ptr) void { + self.base_map.writeIntoInlineCacheAtOffset(vm, offset, object, method); } }; diff --git a/src/runtime/objects/executable_map.zig b/src/runtime/objects/executable_map.zig index acc3fc8..1ee22ab 100644 --- a/src/runtime/objects/executable_map.zig +++ b/src/runtime/objects/executable_map.zig @@ -19,6 +19,7 @@ const value_import = @import("../value.zig"); const PointerValue = value_import.PointerValue; const stage2_compat = @import("../../utility/stage2_compat.zig"); const VirtualMachine = @import("../VirtualMachine.zig"); +const value_inspector = @import("../value_inspector.zig"); const RefCountedValue = value_import.RefCountedValue; /// An "executable map" is one that contains a reference to executable code. @@ -120,12 +121,41 @@ pub const ExecutableMap = extern struct { // --- Inline cache operations --- - pub fn writeIntoInlineCacheAtOffset(self: ExecutableMap.Ptr, offset: usize, object: Object.Ptr, method: MethodObject.Ptr) void { + pub fn getOrInvalidateMethodFromInlineCacheAtOffsetForReceiver( + self: ExecutableMap.Ptr, + vm: *VirtualMachine, + offset: usize, + receiver: Object.Ptr, + ) ?MethodObject.Ptr { + const inline_cache = self.inline_cache.get(); + std.debug.assert(offset < inline_cache.getSize() / 2); + + const inline_cache_array = self.inline_cache.get().getValues(); + if (inline_cache_array[offset * 2].data != receiver.map.data) { + // std.debug.print("MISS expected={} got={}\n", .{ vm.heap.asHeapAddress(receiver.map.asObject()), vm.heap.asHeapAddress(inline_cache_array[offset * 2].asObject()) }); + inline_cache_array[offset * 2] = vm.nil(); + inline_cache_array[(offset * 2) + 1] = vm.nil(); + return null; + } + + const method = inline_cache_array[(offset * 2) + 1].asObject().mustBeType(.Method); + // std.debug.print("HIT receiver.map={} method={} (\"{s}\")\n", .{ vm.heap.asHeapAddress(receiver.map.asObject()), vm.heap.asHeapAddress(method), method.getMap().method_name.asByteArray().getValues() }); + // std.debug.print(" ", .{}); + // value_inspector.inspectValue(.Inline, vm, receiver.asValue()) catch unreachable; + // std.debug.print("\n", .{}); + return method; + } + + pub fn writeIntoInlineCacheAtOffset(self: ExecutableMap.Ptr, vm: *VirtualMachine, offset: usize, receiver: Object.Ptr, method: MethodObject.Ptr) void { + if (vm.getMapMap().asValue().data == receiver.map.data) + return; + const inline_cache = self.inline_cache.get(); std.debug.assert(offset < inline_cache.getSize() / 2); const inline_cache_array = self.inline_cache.get().getValues(); - inline_cache_array[offset * 2] = object.map; + inline_cache_array[offset * 2] = receiver.map; inline_cache_array[(offset * 2) + 1] = method.asValue(); + // std.debug.print("WRITE receiver.map={} method={} (\"{s}\")\n", .{ vm.heap.asHeapAddress(receiver.map.asObject()), vm.heap.asHeapAddress(method), method.getMap().method_name.asByteArray().getValues() }); } }; diff --git a/src/runtime/objects/method.zig b/src/runtime/objects/method.zig index 3bf3129..b69f007 100644 --- a/src/runtime/objects/method.zig +++ b/src/runtime/objects/method.zig @@ -275,7 +275,11 @@ pub const MethodMap = extern struct { return required_memory; } - pub fn writeIntoInlineCacheAtOffset(self: MethodMap.Ptr, offset: usize, object: Object.Ptr, method: Method.Ptr) void { - self.base_map.writeIntoInlineCacheAtOffset(offset, object, method); + pub fn getOrInvalidateMethodFromInlineCacheAtOffsetForReceiver(self: MethodMap.Ptr, vm: *VirtualMachine, offset: usize, receiver: Object.Ptr) ?Method.Ptr { + return self.base_map.getOrInvalidateMethodFromInlineCacheAtOffsetForReceiver(vm, offset, receiver); + } + + pub fn writeIntoInlineCacheAtOffset(self: MethodMap.Ptr, vm: *VirtualMachine, offset: usize, object: Object.Ptr, method: Method.Ptr) void { + self.base_map.writeIntoInlineCacheAtOffset(vm, offset, object, method); } };