Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
38 changes: 36 additions & 2 deletions src/runtime/Activation.zig
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ const Allocator = std.mem.Allocator;
const Actor = @import("./Actor.zig");
const value = @import("./value.zig");
const Value = value.Value;
const Object = @import("object.zig").Object;
const bytecode = @import("./bytecode.zig");
const SourceRange = @import("./SourceRange.zig");
const IntegerValue = value.IntegerValue;
Expand Down Expand Up @@ -107,6 +108,38 @@ pub fn restart(self: *Self) void {
self.pc = 0;
}

/// Try to see if the current slot for the inline cache is filled and matches
/// the receiver's map; if so, return the matching method object. Invalidate the
/// inline cache entry and return null otherwise.
pub fn getOrInvalidateMethodFromInlineCacheForReceiver(
self: *Self,
vm: *VirtualMachine,
receiver: Object.Ptr,
) ?MethodObject.Ptr {
const activation_object = self.activation_object.get();
// NOTE: For the time being, we are wasting memory by allocating an
// equally-sized inline cache for the bytecode block of the
// activation; in the future we will map each send instruction to
// an offset within the cache and shrink it drastically.
return activation_object.getOrInvalidateMethodFromInlineCacheAtOffsetForReceiver(vm, self.pc, receiver);
}

/// Write the given receiver-method pair into the appropriate offset of the
/// activation object's inline cache (stored in the map).
pub fn writeIntoInlineCache(
self: *Self,
vm: *VirtualMachine,
receiver: Object.Ptr,
method: MethodObject.Ptr,
) void {
const activation_object = self.activation_object.get();
// NOTE: For the time being, we are wasting memory by allocating an
// equally-sized inline cache for the bytecode block of the
// activation; in the future we will map each send instruction to
// an offset within the cache and shrink it drastically.
activation_object.writeIntoInlineCacheAtOffset(vm, self.pc, receiver, method);
}

pub fn format(
activation: Self,
comptime fmt: []const u8,
Expand Down Expand Up @@ -227,13 +260,14 @@ pub const ActivationStack = struct {
) !void {
var source_range = SourceRange.initNoRef(current_executable, .{ .start = 0, .end = 1 });

const entrypoint_block = new_executable.value.getEntrypointBlock();
var token = try vm.heap.getAllocation(
MethodObject.requiredSizeForCreatingTopLevelContext() +
MethodObject.requiredSizeForCreatingTopLevelContext(entrypoint_block) +
ActivationObject.requiredSizeForAllocation(0, 0),
);
defer token.deinit();

const toplevel_context_method = try MethodObject.createTopLevelContextForExecutable(vm, &token, new_executable, new_executable.value.getEntrypointBlock());
const toplevel_context_method = try MethodObject.createTopLevelContextForExecutable(vm, &token, new_executable, entrypoint_block);
const activation_slot = try self.getNewActivationSlot(vm.allocator);
toplevel_context_method.activateMethod(vm, &token, vm.current_actor.id, vm.lobby(), &.{}, target_location, source_range, activation_slot);
}
Expand Down
76 changes: 73 additions & 3 deletions src/runtime/Heap.zig
Original file line number Diff line number Diff line change
Expand Up @@ -403,7 +403,7 @@ const Space = struct {
/// order to evacuate all the objects to a higher generation.
tenure_target: ?*Space = null,
/// The name of this space.
name: [*:0]const u8,
name: []const u8,

/// A link node for a newer generation space to scan in order to update
/// references from the newer space to the older one.
Expand All @@ -412,7 +412,7 @@ const Space = struct {
previous: ?*const NewerGenerationLink,
};

pub fn lazyInit(heap: *Self, comptime name: [*:0]const u8, size: usize) Space {
pub fn lazyInit(heap: *Self, comptime name: []const u8, size: usize) Space {
return Space{
.heap = heap,
.name = name,
Expand All @@ -422,7 +422,7 @@ const Space = struct {
};
}

pub fn init(heap: *Self, allocator: Allocator, comptime name: [*:0]const u8, size: usize) !Space {
pub fn init(heap: *Self, allocator: Allocator, comptime name: []const u8, size: usize) !Space {
var self = lazyInit(heap, name, size);
try self.allocateMemory(allocator);
return self;
Expand Down Expand Up @@ -1038,3 +1038,73 @@ test "link an object to another and perform scavenge" {
var referenced_object_value = new_referenced_object.getMap().getSlotByName("actual").?.value;
try std.testing.expectEqual(@as(u64, 0xDEADBEEF), referenced_object_value.asUnsignedInteger());
}

fn HeapAddress(comptime T: type) type {
return struct {
heap: *const Self,
address: T,

fn spaceNameIfAddressWithin(self: @This(), space: *const Space) ?[]const u8 {
const memory_start = @ptrToInt(space.memory.ptr);
const memory_end = @ptrToInt(space.memory.ptr + space.memory.len);
const address = @ptrToInt(self.address);

if (address >= memory_start and address < memory_end)
return @as([]const u8, space.name);
return null;
}

fn spaceName(self: @This()) []const u8 {
if (self.spaceNameIfAddressWithin(&self.heap.eden)) |name|
return name;
if (self.spaceNameIfAddressWithin(&self.heap.from_space)) |name|
return name;
if (self.spaceNameIfAddressWithin(&self.heap.to_space)) |name|
return name;
if (self.spaceNameIfAddressWithin(&self.heap.old_space)) |name|
return name;
@panic("!!! This address isn't within the heap!");
}

fn spaceOffsetIfAddressWithin(self: @This(), space: *const Space) ?usize {
const memory_start = @ptrToInt(space.memory.ptr);
const memory_end = @ptrToInt(space.memory.ptr + space.memory.len);
const address = @ptrToInt(self.address);

if (address >= memory_start and address < memory_end)
return address - memory_start;
return null;
}

fn spaceOffset(self: @This()) usize {
if (self.spaceOffsetIfAddressWithin(&self.heap.eden)) |offset|
return offset;
if (self.spaceOffsetIfAddressWithin(&self.heap.from_space)) |offset|
return offset;
if (self.spaceOffsetIfAddressWithin(&self.heap.to_space)) |offset|
return offset;
if (self.spaceOffsetIfAddressWithin(&self.heap.old_space)) |offset|
return offset;
@panic("!!! This address isn't within the heap!");
}

pub fn format(
self: @This(),
comptime fmt: []const u8,
options: std.fmt.FormatOptions,
writer: anytype,
) !void {
_ = fmt;

try writer.writeByte('[');
try writer.writeAll(self.spaceName());
try writer.writeByte('+');
try std.fmt.formatInt(self.spaceOffset(), 10, .lower, options, writer);
try writer.writeByte(']');
}
};
}

pub fn asHeapAddress(self: *const Self, value: anytype) HeapAddress(@TypeOf(value)) {
return .{ .heap = self, .address = value };
}
78 changes: 73 additions & 5 deletions src/runtime/interpreter.zig
Original file line number Diff line number Diff line change
Expand Up @@ -422,6 +422,56 @@ fn performPrimitiveSend(
);
}

/// Try to see if the current slot for the inline cache is filled and matches
/// the receiver's map; if so, return the matching method object. Invalidate the
/// inline cache entry and return null otherwise.
fn getOrInvalidateMethodFromInlineCacheForReceiver(
vm: *VirtualMachine,
actor: *Actor,
receiver: Value,
) ?MethodObject.Ptr {
if (!receiver.isObjectReference()) {
// std.debug.print("MISS because not object ref\n", .{});
return null;
}
var real_receiver = receiver;
if (receiver.asObject().asType(.Activation)) |activation| {
real_receiver = activation.findActivationReceiver();
}
if (!real_receiver.isObjectReference()) {
// std.debug.print("MISS because not object ref\n", .{});
return null;
}

const current_activation = actor.activation_stack.getCurrent();
return current_activation.getOrInvalidateMethodFromInlineCacheForReceiver(vm, real_receiver.asObject());
}

/// If the receiver is an object, write the receiver-method pair into the
/// current activation's inline cache.
fn writeIntoInlineCache(
vm: *VirtualMachine,
actor: *Actor,
receiver: Value,
method: MethodObject.Ptr,
) void {
if (!receiver.isObjectReference()) {
// std.debug.print("NOWR because not object ref\n", .{});
return;
}
var real_receiver = receiver;
if (receiver.asObject().asType(.Activation)) |activation| {
real_receiver = activation.findActivationReceiver();
}
if (!real_receiver.isObjectReference()) {
// std.debug.print("NOWR because not object ref\n", .{});
return;
}

const current_activation = actor.activation_stack.getCurrent();
current_activation.writeIntoInlineCache(vm, real_receiver.asObject(), method);
}

/// Sends a message to the given receiver, returning the result as a normal
/// completion if it can be immediately resolved; if the message send must
/// create a new activation, pushes the activation onto the stack and returns
Expand Down Expand Up @@ -470,10 +520,28 @@ pub fn sendMessage(

actor.ensureCanRead(receiver, source_range);

if (getOrInvalidateMethodFromInlineCacheForReceiver(vm, actor, receiver)) |method| {
const argument_count = method.getArgumentSlotCount();
const argument_slice = actor.argument_stack.lastNItems(argument_count);

// Advance the instruction for the activation that will be returned to.
_ = actor.activation_stack.getCurrent().advanceInstruction();

try executeMethod(vm, actor, receiver, method, argument_slice, target_location, source_range);

actor.argument_stack.popNItems(argument_count);
// Bump the argument stack height of the (now current) activation since
// we've now popped this activation's items off it.
actor.activation_stack.getCurrent().stack_snapshot.bumpArgumentHeight(actor);
return null;
}

return switch (receiver.lookup(vm, message_name)) {
.Regular => |lookup_result| {
if (lookup_result.isObjectReference()) {
if (lookup_result.asObject().asType(.Method)) |method| {
writeIntoInlineCache(vm, actor, receiver, method);

const argument_count = method.getArgumentSlotCount();
const argument_slice = actor.argument_stack.lastNItems(argument_count);

Expand Down Expand Up @@ -734,15 +802,15 @@ fn createMethod(
argument_slot_count += 1;
}

const block = executable.value.getBlock(block_index);
var token = try vm.heap.getAllocation(
MethodMap.requiredSizeForAllocation(total_slot_count) +
MethodMap.requiredSizeForAllocation(block, total_slot_count) +
MethodObject.requiredSizeForAllocation(total_assignable_slot_count),
);
defer token.deinit();

const block = executable.value.getBlock(block_index);
var method_map = try MethodMap.create(
vm.getMapMap(),
vm,
&token,
argument_slot_count,
total_slot_count,
Expand Down Expand Up @@ -803,13 +871,13 @@ fn createBlock(
std.debug.assert(nonlocal_return_target_activation.get(actor.activation_stack).?.nonlocal_return_target_activation == null);

var token = try vm.heap.getAllocation(
BlockMap.requiredSizeForAllocation(total_slot_count) +
BlockMap.requiredSizeForAllocation(block, total_slot_count) +
BlockObject.requiredSizeForAllocation(total_assignable_slot_count),
);
defer token.deinit();

var block_map = try BlockMap.create(
vm.getMapMap(),
vm,
&token,
argument_slot_count,
total_slot_count,
Expand Down
22 changes: 16 additions & 6 deletions src/runtime/objects/activation.zig
Original file line number Diff line number Diff line change
Expand Up @@ -9,13 +9,15 @@ const Map = map_import.Map;
const Slot = @import("../slot.zig").Slot;
const Heap = @import("../Heap.zig");
const slots = @import("slots.zig");
const Object = @import("../object.zig").Object;
const MapType = map_import.MapType;
const bytecode = @import("../bytecode.zig");
const BlockMap = @import("block.zig").BlockMap;
const MethodMap = @import("method.zig").MethodMap;
const map_import = @import("map.zig");
const SlotsObject = slots.Slots;
const GenericValue = value_import.Value;
const MethodObject = @import("method.zig").Method;
const value_import = @import("../value.zig");
const object_lookup = @import("../object_lookup.zig");
const stage2_compat = @import("../../utility/stage2_compat.zig");
Expand Down Expand Up @@ -128,11 +130,11 @@ pub const Activation = extern struct {
// --- Slot counts ---

pub fn getAssignableSlotCount(self: Activation.Ptr) u8 {
return self.dispatch("getAssignableSlotCount");
return self.dispatch("getAssignableSlotCount", .{});
}

pub fn getArgumentSlotCount(self: Activation.Ptr) u8 {
return self.dispatch("getArgumentSlotCount");
return self.dispatch("getArgumentSlotCount", .{});
}

// --- Map forwarding ---
Expand All @@ -151,10 +153,18 @@ pub const Activation = extern struct {
};
}

pub fn getOrInvalidateMethodFromInlineCacheAtOffsetForReceiver(self: Activation.Ptr, vm: *VirtualMachine, offset: usize, receiver: Object.Ptr) ?MethodObject.Ptr {
return self.dispatch("getOrInvalidateMethodFromInlineCacheAtOffsetForReceiver", .{ vm, offset, receiver });
}

pub fn writeIntoInlineCacheAtOffset(self: Activation.Ptr, vm: *VirtualMachine, offset: usize, receiver: Object.Ptr, method: MethodObject.Ptr) void {
self.dispatch("writeIntoInlineCacheAtOffset", .{ vm, offset, receiver, method });
}

// --- Slots and slot values ---

pub fn getSlots(self: Activation.Ptr) Slot.Slice {
return self.dispatch("getSlots");
return self.dispatch("getSlots", .{});
}

/// Return a slice of `GenericValue`s for the assignable slots that are after the
Expand Down Expand Up @@ -265,10 +275,10 @@ pub const Activation = extern struct {
return @typeInfo(@TypeOf(@field(MethodMap, fn_name))).Fn.return_type.?;
}

fn dispatch(self: Activation.Ptr, comptime fn_name: []const u8) DispatchReturn(fn_name) {
fn dispatch(self: Activation.Ptr, comptime fn_name: []const u8, args: anytype) DispatchReturn(fn_name) {
return switch (self.getActivationType()) {
.Method => @call(.auto, @field(MethodMap, fn_name), .{self.getMethodMap()}),
.Block => @call(.auto, @field(BlockMap, fn_name), .{self.getBlockMap()}),
.Method => @call(.auto, @field(MethodMap, fn_name), .{self.getMethodMap()} ++ args),
.Block => @call(.auto, @field(BlockMap, fn_name), .{self.getBlockMap()} ++ args),
};
}
};
1 change: 1 addition & 0 deletions src/runtime/objects/array.zig
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ pub const Array = extern struct {
object: Object align(@alignOf(u64)),

pub const Ptr = stage2_compat.HeapPtr(Array, .Mutable);
pub const Type = .Array;
pub const Value = value_import.ObjectValue(Array);

/// Create a new array with the given values and filling extra items with
Expand Down
Loading