Are you linking libc? In this case, {#syntax#}std.heap.c_allocator{#endsyntax#} is likely
@@ -8452,10 +8452,10 @@ pub fn main() !void {
var direct_allocator = std.heap.DirectAllocator.init();
defer direct_allocator.deinit();
- var arena = std.heap.ArenaAllocator.init(&direct_allocator.allocator);
+ var arena = std.heap.ArenaAllocator.init(direct_allocator.allocator());
defer arena.deinit();
- const allocator = &arena.allocator;
+ const allocator = arena.allocator();
const ptr = try allocator.create(i32);
std.debug.warn("ptr={*}\n", ptr);
@@ -8613,7 +8613,7 @@ test "string literal to constant slice" {
For example, the function's documentation may say "caller owns the returned memory", in which case
the code that calls the function must have a plan for when to free that memory. Probably in this situation,
- the function will accept an {#syntax#}*Allocator{#endsyntax#} parameter.
+ the function will accept an {#syntax#}Allocator{#endsyntax#} parameter.
Sometimes the lifetime of a pointer may be more complicated. For example, when using
diff --git a/example/guess_number/main.zig b/example/guess_number/main.zig
index b4eb1c292a4f..eaab8d4a9997 100644
--- a/example/guess_number/main.zig
+++ b/example/guess_number/main.zig
@@ -6,7 +6,7 @@ const os = std.os;
pub fn main() !void {
var stdout_file = try io.getStdOut();
- const stdout = &stdout_file.outStream().stream;
+ const stdout = stdout_file.outStreamAdapter().outStream();
try stdout.print("Welcome to the Guess Number Game in Zig.\n");
@@ -18,7 +18,7 @@ pub fn main() !void {
const seed = std.mem.readIntNative(u64, &seed_bytes);
var prng = std.rand.DefaultPrng.init(seed);
- const answer = prng.random.range(u8, 0, 100) + 1;
+ const answer = prng.random().range(u8, 0, 100) + 1;
while (true) {
try stdout.print("\nGuess a number between 1 and 100: ");
diff --git a/libc/process_headers.zig b/libc/process_headers.zig
index e6b174c8f9f7..db77a3d43dd2 100644
--- a/libc/process_headers.zig
+++ b/libc/process_headers.zig
@@ -708,8 +708,8 @@ const PathTable = std.AutoHashMap([]const u8, *TargetToHash);
pub fn main() !void {
var direct_allocator = std.heap.DirectAllocator.init();
- var arena = std.heap.ArenaAllocator.init(&direct_allocator.allocator);
- const allocator = &arena.allocator;
+ var arena = std.heap.ArenaAllocator.init(direct_allocator.allocator());
+ const allocator = arena.allocator();
const args = try std.os.argsAlloc(allocator);
var search_paths = std.ArrayList([]const u8).init(allocator);
var opt_out_dir: ?[]const u8 = null;
diff --git a/src-self-hosted/arg.zig b/src-self-hosted/arg.zig
index 7bbd233a7528..b0cecb145abf 100644
--- a/src-self-hosted/arg.zig
+++ b/src-self-hosted/arg.zig
@@ -31,7 +31,7 @@ fn argInAllowedSet(maybe_set: ?[]const []const u8, arg: []const u8) bool {
}
// Modifies the current argument index during iteration
-fn readFlagArguments(allocator: *Allocator, args: []const []const u8, required: usize, allowed_set: ?[]const []const u8, index: *usize) !FlagArg {
+fn readFlagArguments(allocator: Allocator, args: []const []const u8, required: usize, allowed_set: ?[]const []const u8, index: *usize) !FlagArg {
switch (required) {
0 => return FlagArg{ .None = undefined }, // TODO: Required to force non-tag but value?
1 => {
@@ -80,7 +80,7 @@ pub const Args = struct {
flags: HashMapFlags,
positionals: ArrayList([]const u8),
- pub fn parse(allocator: *Allocator, comptime spec: []const Flag, args: []const []const u8) !Args {
+ pub fn parse(allocator: Allocator, comptime spec: []const Flag, args: []const []const u8) !Args {
var parsed = Args{
.flags = HashMapFlags.init(allocator),
.positionals = ArrayList([]const u8).init(allocator),
diff --git a/src-self-hosted/codegen.zig b/src-self-hosted/codegen.zig
index 90f5309faf92..b92ef4beaca3 100644
--- a/src-self-hosted/codegen.zig
+++ b/src-self-hosted/codegen.zig
@@ -46,7 +46,7 @@ pub async fn renderToLlvm(comp: *Compilation, fn_val: *Value.Fn, code: *ir.Code)
// Don't use ZIG_VERSION_STRING here. LLVM misparses it when it includes
// the git revision.
const producer = try std.Buffer.allocPrint(
- &code.arena.allocator,
+ code.arena.allocator(),
"zig {}.{}.{}",
u32(c.ZIG_VERSION_MAJOR),
u32(c.ZIG_VERSION_MINOR),
@@ -80,7 +80,7 @@ pub async fn renderToLlvm(comp: *Compilation, fn_val: *Value.Fn, code: *ir.Code)
.dibuilder = dibuilder,
.context = context,
.lock = event.Lock.init(comp.loop),
- .arena = &code.arena.allocator,
+ .arena = code.arena.allocator(),
};
try renderToLlvmModule(&ofile, fn_val, code);
@@ -142,9 +142,9 @@ pub const ObjectFile = struct {
dibuilder: *llvm.DIBuilder,
context: *llvm.Context,
lock: event.Lock,
- arena: *std.mem.Allocator,
+ arena: std.mem.Allocator,
- fn gpa(self: *ObjectFile) *std.mem.Allocator {
+ fn gpa(self: *ObjectFile) std.mem.Allocator {
return self.comp.gpa();
}
};
diff --git a/src-self-hosted/compilation.zig b/src-self-hosted/compilation.zig
index 7a30bbad9860..d0a01e34be11 100644
--- a/src-self-hosted/compilation.zig
+++ b/src-self-hosted/compilation.zig
@@ -101,7 +101,7 @@ pub const ZigCompiler = struct {
}
/// Must be called only once, ever. Sets global state.
- pub fn setLlvmArgv(allocator: *Allocator, llvm_argv: []const []const u8) !void {
+ pub fn setLlvmArgv(allocator: Allocator, llvm_argv: []const []const u8) !void {
if (llvm_argv.len != 0) {
var c_compatible_args = try std.cstr.NullTerminated2DArray.fromSlices(allocator, [][]const []const u8{
[][]const u8{"zig (LLVM option parsing)"},
@@ -348,7 +348,7 @@ pub const Compilation = struct {
zig_lib_dir: []const u8,
) !*Compilation {
var optional_comp: ?*Compilation = null;
- const handle = try async createAsync(
+ const handle = try async<&zig_compiler.loop.allocator> createAsync(
&optional_comp,
zig_compiler,
name,
@@ -1178,13 +1178,13 @@ pub const Compilation = struct {
}
/// General Purpose Allocator. Must free when done.
- fn gpa(self: Compilation) *mem.Allocator {
+ fn gpa(self: Compilation) mem.Allocator {
return self.loop.allocator;
}
/// Arena Allocator. Automatically freed when the Compilation is destroyed.
- fn arena(self: *Compilation) *mem.Allocator {
- return &self.arena_allocator.allocator;
+ fn arena(self: *Compilation) mem.Allocator {
+ return self.arena_allocator.allocator();
}
/// If the temporary directory for this compilation has not been created, it creates it.
@@ -1235,7 +1235,7 @@ pub const Compilation = struct {
const held = await (async self.zig_compiler.prng.acquire() catch unreachable);
defer held.release();
- held.value.random.bytes(rand_bytes[0..]);
+ held.value.random().bytes(rand_bytes[0..]);
}
var result: [12]u8 = undefined;
@@ -1383,7 +1383,7 @@ async fn addFnToLinkSet(comp: *Compilation, fn_val: *Value.Fn) void {
held.value.append(fn_val.link_set_node);
}
-fn getZigDir(allocator: *mem.Allocator) ![]u8 {
+fn getZigDir(allocator: mem.Allocator) ![]u8 {
return os.getAppDataDir(allocator, "zig");
}
@@ -1469,9 +1469,9 @@ async fn generateDeclFnProto(comp: *Compilation, fn_decl: *Decl.Fn) !void {
}
// TODO these are hacks which should probably be solved by the language
-fn getAwaitResult(allocator: *Allocator, handle: var) @typeInfo(@typeOf(handle)).Promise.child.? {
+fn getAwaitResult(allocator: Allocator, handle: var) @typeInfo(@typeOf(handle)).Promise.child.? {
var result: ?@typeInfo(@typeOf(handle)).Promise.child.? = null;
- cancel (async getAwaitResultAsync(handle, &result) catch unreachable);
+ cancel (async<&allocator> getAwaitResultAsync(handle, &result) catch unreachable);
return result.?;
}
diff --git a/src-self-hosted/errmsg.zig b/src-self-hosted/errmsg.zig
index fc49fad410b6..f9cc5ea73a5c 100644
--- a/src-self-hosted/errmsg.zig
+++ b/src-self-hosted/errmsg.zig
@@ -46,7 +46,7 @@ pub const Msg = struct {
const PathAndTree = struct {
span: Span,
tree: *ast.Tree,
- allocator: *mem.Allocator,
+ allocator: mem.Allocator,
};
const ScopeAndComp = struct {
@@ -56,7 +56,7 @@ pub const Msg = struct {
};
const Cli = struct {
- allocator: *mem.Allocator,
+ allocator: mem.Allocator,
};
pub fn destroy(self: *Msg) void {
@@ -80,7 +80,7 @@ pub const Msg = struct {
}
}
- fn getAllocator(self: *const Msg) *mem.Allocator {
+ fn getAllocator(self: *const Msg) mem.Allocator {
switch (self.data) {
Data.Cli => |cli| return cli.allocator,
Data.PathAndTree => |path_and_tree| {
@@ -163,7 +163,7 @@ pub const Msg = struct {
const realpath_copy = try mem.dupe(comp.gpa(), u8, tree_scope.root().realpath);
errdefer comp.gpa().free(realpath_copy);
- var out_stream = &std.io.BufferOutStream.init(&text_buf).stream;
+ var out_stream = std.io.BufferOutStream.init(&text_buf).outStream();
try parse_error.render(&tree_scope.tree.tokens, out_stream);
const msg = try comp.gpa().create(Msg);
@@ -191,7 +191,7 @@ pub const Msg = struct {
/// Caller owns returned Msg and must free with `allocator`
/// allocator will additionally be used for printing messages later.
pub fn createFromParseError(
- allocator: *mem.Allocator,
+ allocator: mem.Allocator,
parse_error: *const ast.Error,
tree: *ast.Tree,
realpath: []const u8,
@@ -203,7 +203,7 @@ pub const Msg = struct {
const realpath_copy = try mem.dupe(allocator, u8, realpath);
errdefer allocator.free(realpath_copy);
- var out_stream = &std.io.BufferOutStream.init(&text_buf).stream;
+ var out_stream = std.io.BufferOutStream.init(&text_buf).outStream();
try parse_error.render(&tree.tokens, out_stream);
const msg = try allocator.create(Msg);
@@ -282,7 +282,7 @@ pub const Msg = struct {
Color.On => true,
Color.Off => false,
};
- var stream = &file.outStream().stream;
+ var stream = file.outStreamAdapter().outStream();
return msg.printToStream(stream, color_on);
}
};
diff --git a/src-self-hosted/introspect.zig b/src-self-hosted/introspect.zig
index 8f859a82cea5..900783aad151 100644
--- a/src-self-hosted/introspect.zig
+++ b/src-self-hosted/introspect.zig
@@ -7,7 +7,7 @@ const os = std.os;
const warn = std.debug.warn;
/// Caller must free result
-pub fn testZigInstallPrefix(allocator: *mem.Allocator, test_path: []const u8) ![]u8 {
+pub fn testZigInstallPrefix(allocator: mem.Allocator, test_path: []const u8) ![]u8 {
const test_zig_dir = try os.path.join(allocator, [][]const u8{ test_path, "lib", "zig" });
errdefer allocator.free(test_zig_dir);
@@ -21,7 +21,7 @@ pub fn testZigInstallPrefix(allocator: *mem.Allocator, test_path: []const u8) ![
}
/// Caller must free result
-pub fn findZigLibDir(allocator: *mem.Allocator) ![]u8 {
+pub fn findZigLibDir(allocator: mem.Allocator) ![]u8 {
const self_exe_path = try os.selfExeDirPathAlloc(allocator);
defer allocator.free(self_exe_path);
@@ -42,7 +42,7 @@ pub fn findZigLibDir(allocator: *mem.Allocator) ![]u8 {
return error.FileNotFound;
}
-pub fn resolveZigLibDir(allocator: *mem.Allocator) ![]u8 {
+pub fn resolveZigLibDir(allocator: mem.Allocator) ![]u8 {
return findZigLibDir(allocator) catch |err| {
warn(
\\Unable to find zig lib directory: {}.
@@ -55,6 +55,6 @@ pub fn resolveZigLibDir(allocator: *mem.Allocator) ![]u8 {
}
/// Caller must free result
-pub fn resolveZigCacheDir(allocator: *mem.Allocator) ![]u8 {
+pub fn resolveZigCacheDir(allocator: mem.Allocator) ![]u8 {
return std.mem.dupe(allocator, u8, "zig-cache");
}
diff --git a/src-self-hosted/ir.zig b/src-self-hosted/ir.zig
index db89af7a42d9..26a091cf9224 100644
--- a/src-self-hosted/ir.zig
+++ b/src-self-hosted/ir.zig
@@ -964,7 +964,7 @@ pub const Code = struct {
tree_scope: *Scope.AstTree,
/// allocator is comp.gpa()
- pub fn destroy(self: *Code, allocator: *Allocator) void {
+ pub fn destroy(self: *Code, allocator: Allocator) void {
self.arena.deinit();
allocator.destroy(self);
}
@@ -1028,7 +1028,7 @@ pub const Builder = struct {
.return_type = null,
.tree_scope = tree_scope,
};
- code.basic_block_list = std.ArrayList(*BasicBlock).init(&code.arena.allocator);
+ code.basic_block_list = std.ArrayList(*BasicBlock).init(code.arena.allocator());
errdefer code.destroy(comp.gpa());
return Builder{
@@ -1762,8 +1762,8 @@ pub const Builder = struct {
}
}
- fn arena(self: *Builder) *Allocator {
- return &self.code.arena.allocator;
+ fn arena(self: *Builder) Allocator {
+ return self.code.arena.allocator();
}
fn buildExtra(
diff --git a/src-self-hosted/libc_installation.zig b/src-self-hosted/libc_installation.zig
index 6a530da1f02f..7ebba6e802a4 100644
--- a/src-self-hosted/libc_installation.zig
+++ b/src-self-hosted/libc_installation.zig
@@ -28,9 +28,9 @@ pub const LibCInstallation = struct {
pub fn parse(
self: *LibCInstallation,
- allocator: *std.mem.Allocator,
+ allocator: std.mem.Allocator,
libc_file: []const u8,
- stderr: *std.io.OutStream(std.os.File.WriteError),
+ stderr: std.io.OutStream(std.os.File.WriteError),
) !void {
self.initEmpty();
@@ -100,7 +100,7 @@ pub const LibCInstallation = struct {
}
}
- pub fn render(self: *const LibCInstallation, out: *std.io.OutStream(std.os.File.WriteError)) !void {
+ pub fn render(self: *const LibCInstallation, out: std.io.OutStream(std.os.File.WriteError)) !void {
@setEvalBranchQuota(4000);
try out.print(
\\# The directory that contains `stdlib.h`.
@@ -251,7 +251,7 @@ pub const LibCInstallation = struct {
for (searches) |search| {
result_buf.shrink(0);
- const stream = &std.io.BufferOutStream.init(&result_buf).stream;
+ const stream = std.io.BufferOutStream.init(&result_buf).outStream();
try stream.print("{}\\Include\\{}\\ucrt", search.path, search.version);
const stdlib_path = try std.os.path.join(
@@ -278,7 +278,7 @@ pub const LibCInstallation = struct {
for (searches) |search| {
result_buf.shrink(0);
- const stream = &std.io.BufferOutStream.init(&result_buf).stream;
+ const stream = std.io.BufferOutStream.init(&result_buf).outStream();
try stream.print("{}\\Lib\\{}\\ucrt\\", search.path, search.version);
switch (builtin.arch) {
builtin.Arch.i386 => try stream.write("x86"),
@@ -356,7 +356,7 @@ pub const LibCInstallation = struct {
for (searches) |search| {
result_buf.shrink(0);
- const stream = &std.io.BufferOutStream.init(&result_buf).stream;
+ const stream = std.io.BufferOutStream.init(&result_buf).outStream();
try stream.print("{}\\Lib\\{}\\um\\", search.path, search.version);
switch (builtin.arch) {
builtin.Arch.i386 => try stream.write("x86\\"),
diff --git a/src-self-hosted/link.zig b/src-self-hosted/link.zig
index 5689ee7925a6..95f90176d186 100644
--- a/src-self-hosted/link.zig
+++ b/src-self-hosted/link.zig
@@ -33,13 +33,13 @@ pub async fn link(comp: *Compilation) !void {
.out_file_path = undefined,
};
defer ctx.arena.deinit();
- ctx.args = std.ArrayList([*]const u8).init(&ctx.arena.allocator);
- ctx.link_msg = std.Buffer.initNull(&ctx.arena.allocator);
+ ctx.args = std.ArrayList([*]const u8).init(ctx.arena.allocator());
+ ctx.link_msg = std.Buffer.initNull(ctx.arena.allocator());
if (comp.link_out_file) |out_file| {
- ctx.out_file_path = try std.Buffer.init(&ctx.arena.allocator, out_file);
+ ctx.out_file_path = try std.Buffer.init(ctx.arena.allocator(), out_file);
} else {
- ctx.out_file_path = try std.Buffer.init(&ctx.arena.allocator, comp.name.toSliceConst());
+ ctx.out_file_path = try std.Buffer.init(ctx.arena.allocator(), comp.name.toSliceConst());
switch (comp.kind) {
Compilation.Kind.Exe => {
try ctx.out_file_path.append(comp.target.exeFileExt());
@@ -214,10 +214,10 @@ fn constructLinkerArgsElf(ctx: *Context) !void {
if (ctx.comp.haveLibC()) {
try ctx.args.append(c"-L");
- try ctx.args.append((try std.cstr.addNullByte(&ctx.arena.allocator, ctx.libc.lib_dir.?)).ptr);
+ try ctx.args.append((try std.cstr.addNullByte(ctx.arena.allocator(), ctx.libc.lib_dir.?)).ptr);
try ctx.args.append(c"-L");
- try ctx.args.append((try std.cstr.addNullByte(&ctx.arena.allocator, ctx.libc.static_lib_dir.?)).ptr);
+ try ctx.args.append((try std.cstr.addNullByte(ctx.arena.allocator(), ctx.libc.static_lib_dir.?)).ptr);
if (!ctx.comp.is_static) {
const dl = blk: {
@@ -226,7 +226,7 @@ fn constructLinkerArgsElf(ctx: *Context) !void {
return error.LibCMissingDynamicLinker;
};
try ctx.args.append(c"-dynamic-linker");
- try ctx.args.append((try std.cstr.addNullByte(&ctx.arena.allocator, dl)).ptr);
+ try ctx.args.append((try std.cstr.addNullByte(ctx.arena.allocator(), dl)).ptr);
}
}
@@ -237,7 +237,7 @@ fn constructLinkerArgsElf(ctx: *Context) !void {
// .o files
for (ctx.comp.link_objects) |link_object| {
- const link_obj_with_null = try std.cstr.addNullByte(&ctx.arena.allocator, link_object);
+ const link_obj_with_null = try std.cstr.addNullByte(ctx.arena.allocator(), link_object);
try ctx.args.append(link_obj_with_null.ptr);
}
try addFnObjects(ctx);
@@ -311,8 +311,8 @@ fn constructLinkerArgsElf(ctx: *Context) !void {
}
fn addPathJoin(ctx: *Context, dirname: []const u8, basename: []const u8) !void {
- const full_path = try std.os.path.join(&ctx.arena.allocator, [][]const u8{ dirname, basename });
- const full_path_with_null = try std.cstr.addNullByte(&ctx.arena.allocator, full_path);
+ const full_path = try std.os.path.join(ctx.arena.allocator(), [][]const u8{ dirname, basename });
+ const full_path_with_null = try std.cstr.addNullByte(ctx.arena.allocator(), full_path);
try ctx.args.append(full_path_with_null.ptr);
}
@@ -338,13 +338,13 @@ fn constructLinkerArgsCoff(ctx: *Context) !void {
const is_library = ctx.comp.kind == Compilation.Kind.Lib;
- const out_arg = try std.fmt.allocPrint(&ctx.arena.allocator, "-OUT:{}\x00", ctx.out_file_path.toSliceConst());
+ const out_arg = try std.fmt.allocPrint(ctx.arena.allocator(), "-OUT:{}\x00", ctx.out_file_path.toSliceConst());
try ctx.args.append(out_arg.ptr);
if (ctx.comp.haveLibC()) {
- try ctx.args.append((try std.fmt.allocPrint(&ctx.arena.allocator, "-LIBPATH:{}\x00", ctx.libc.msvc_lib_dir.?)).ptr);
- try ctx.args.append((try std.fmt.allocPrint(&ctx.arena.allocator, "-LIBPATH:{}\x00", ctx.libc.kernel32_lib_dir.?)).ptr);
- try ctx.args.append((try std.fmt.allocPrint(&ctx.arena.allocator, "-LIBPATH:{}\x00", ctx.libc.lib_dir.?)).ptr);
+ try ctx.args.append((try std.fmt.allocPrint(ctx.arena.allocator(), "-LIBPATH:{}\x00", ctx.libc.msvc_lib_dir.?)).ptr);
+ try ctx.args.append((try std.fmt.allocPrint(ctx.arena.allocator(), "-LIBPATH:{}\x00", ctx.libc.kernel32_lib_dir.?)).ptr);
+ try ctx.args.append((try std.fmt.allocPrint(ctx.arena.allocator(), "-LIBPATH:{}\x00", ctx.libc.lib_dir.?)).ptr);
}
if (ctx.link_in_crt) {
@@ -352,17 +352,17 @@ fn constructLinkerArgsCoff(ctx: *Context) !void {
const d_str = if (ctx.comp.build_mode == builtin.Mode.Debug) "d" else "";
if (ctx.comp.is_static) {
- const cmt_lib_name = try std.fmt.allocPrint(&ctx.arena.allocator, "libcmt{}.lib\x00", d_str);
+ const cmt_lib_name = try std.fmt.allocPrint(ctx.arena.allocator(), "libcmt{}.lib\x00", d_str);
try ctx.args.append(cmt_lib_name.ptr);
} else {
- const msvcrt_lib_name = try std.fmt.allocPrint(&ctx.arena.allocator, "msvcrt{}.lib\x00", d_str);
+ const msvcrt_lib_name = try std.fmt.allocPrint(ctx.arena.allocator(), "msvcrt{}.lib\x00", d_str);
try ctx.args.append(msvcrt_lib_name.ptr);
}
- const vcruntime_lib_name = try std.fmt.allocPrint(&ctx.arena.allocator, "{}vcruntime{}.lib\x00", lib_str, d_str);
+ const vcruntime_lib_name = try std.fmt.allocPrint(ctx.arena.allocator(), "{}vcruntime{}.lib\x00", lib_str, d_str);
try ctx.args.append(vcruntime_lib_name.ptr);
- const crt_lib_name = try std.fmt.allocPrint(&ctx.arena.allocator, "{}ucrt{}.lib\x00", lib_str, d_str);
+ const crt_lib_name = try std.fmt.allocPrint(ctx.arena.allocator(), "{}ucrt{}.lib\x00", lib_str, d_str);
try ctx.args.append(crt_lib_name.ptr);
// Visual C++ 2015 Conformance Changes
@@ -394,7 +394,7 @@ fn constructLinkerArgsCoff(ctx: *Context) !void {
//}
for (ctx.comp.link_objects) |link_object| {
- const link_obj_with_null = try std.cstr.addNullByte(&ctx.arena.allocator, link_object);
+ const link_obj_with_null = try std.cstr.addNullByte(ctx.arena.allocator(), link_object);
try ctx.args.append(link_obj_with_null.ptr);
}
try addFnObjects(ctx);
@@ -505,7 +505,7 @@ fn constructLinkerArgsMachO(ctx: *Context) !void {
try ctx.args.append(c"-arch");
const darwin_arch_str = try std.cstr.addNullByte(
- &ctx.arena.allocator,
+ ctx.arena.allocator(),
ctx.comp.target.getDarwinArchString(),
);
try ctx.args.append(darwin_arch_str.ptr);
@@ -516,7 +516,7 @@ fn constructLinkerArgsMachO(ctx: *Context) !void {
DarwinPlatform.Kind.IPhoneOS => try ctx.args.append(c"-iphoneos_version_min"),
DarwinPlatform.Kind.IPhoneOSSimulator => try ctx.args.append(c"-ios_simulator_version_min"),
}
- const ver_str = try std.fmt.allocPrint(&ctx.arena.allocator, "{}.{}.{}\x00", platform.major, platform.minor, platform.micro);
+ const ver_str = try std.fmt.allocPrint(ctx.arena.allocator(), "{}.{}.{}\x00", platform.major, platform.minor, platform.micro);
try ctx.args.append(ver_str.ptr);
if (ctx.comp.kind == Compilation.Kind.Exe) {
@@ -571,7 +571,7 @@ fn constructLinkerArgsMachO(ctx: *Context) !void {
//}
for (ctx.comp.link_objects) |link_object| {
- const link_obj_with_null = try std.cstr.addNullByte(&ctx.arena.allocator, link_object);
+ const link_obj_with_null = try std.cstr.addNullByte(ctx.arena.allocator(), link_object);
try ctx.args.append(link_obj_with_null.ptr);
}
try addFnObjects(ctx);
@@ -592,10 +592,10 @@ fn constructLinkerArgsMachO(ctx: *Context) !void {
try ctx.args.append(c"-lSystem");
} else {
if (mem.indexOfScalar(u8, lib.name, '/') == null) {
- const arg = try std.fmt.allocPrint(&ctx.arena.allocator, "-l{}\x00", lib.name);
+ const arg = try std.fmt.allocPrint(ctx.arena.allocator(), "-l{}\x00", lib.name);
try ctx.args.append(arg.ptr);
} else {
- const arg = try std.cstr.addNullByte(&ctx.arena.allocator, lib.name);
+ const arg = try std.cstr.addNullByte(ctx.arena.allocator(), lib.name);
try ctx.args.append(arg.ptr);
}
}
diff --git a/src-self-hosted/main.zig b/src-self-hosted/main.zig
index cbbf73f3f567..a7798d46863d 100644
--- a/src-self-hosted/main.zig
+++ b/src-self-hosted/main.zig
@@ -21,8 +21,8 @@ const errmsg = @import("errmsg.zig");
const LibCInstallation = @import("libc_installation.zig").LibCInstallation;
var stderr_file: os.File = undefined;
-var stderr: *io.OutStream(os.File.WriteError) = undefined;
-var stdout: *io.OutStream(os.File.WriteError) = undefined;
+var stderr: io.OutStream(os.File.WriteError) = undefined;
+var stdout: io.OutStream(os.File.WriteError) = undefined;
pub const max_src_size = 2 * 1024 * 1024 * 1024; // 2 GiB
@@ -45,7 +45,7 @@ const usage =
const Command = struct {
name: []const u8,
- exec: fn (*Allocator, []const []const u8) anyerror!void,
+ exec: fn (Allocator, []const []const u8) anyerror!void,
};
pub fn main() !void {
@@ -55,12 +55,12 @@ pub fn main() !void {
const allocator = std.heap.c_allocator;
var stdout_file = try std.io.getStdOut();
- var stdout_out_stream = stdout_file.outStream();
- stdout = &stdout_out_stream.stream;
+ var stdout_out_stream = stdout_file.outStreamAdapter();
+ stdout = stdout_out_stream.outStream();
stderr_file = try std.io.getStdErr();
- var stderr_out_stream = stderr_file.outStream();
- stderr = &stderr_out_stream.stream;
+ var stderr_out_stream = stderr_file.outStreamAdapter();
+ stderr = stderr_out_stream.outStream();
const args = try os.argsAlloc(allocator);
// TODO I'm getting unreachable code here, which shouldn't happen
@@ -250,7 +250,7 @@ const args_build_generic = []Flag{
Flag.Arg1("--ver-patch"),
};
-fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Compilation.Kind) !void {
+fn buildOutputType(allocator: Allocator, args: []const []const u8, out_type: Compilation.Kind) !void {
var flags = try Args.parse(allocator, args_build_generic, args);
defer flags.deinit();
@@ -464,7 +464,7 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Co
comp.link_objects = link_objects;
comp.start();
- const process_build_events_handle = try async processBuildEvents(comp, color);
+ const process_build_events_handle = try async<&loop.allocator> processBuildEvents(comp, color);
defer cancel process_build_events_handle;
loop.run();
}
@@ -494,15 +494,15 @@ async fn processBuildEvents(comp: *Compilation, color: errmsg.Color) void {
}
}
-fn cmdBuildExe(allocator: *Allocator, args: []const []const u8) !void {
+fn cmdBuildExe(allocator: Allocator, args: []const []const u8) !void {
return buildOutputType(allocator, args, Compilation.Kind.Exe);
}
-fn cmdBuildLib(allocator: *Allocator, args: []const []const u8) !void {
+fn cmdBuildLib(allocator: Allocator, args: []const []const u8) !void {
return buildOutputType(allocator, args, Compilation.Kind.Lib);
}
-fn cmdBuildObj(allocator: *Allocator, args: []const []const u8) !void {
+fn cmdBuildObj(allocator: Allocator, args: []const []const u8) !void {
return buildOutputType(allocator, args, Compilation.Kind.Obj);
}
@@ -543,7 +543,7 @@ const Fmt = struct {
const SeenMap = std.HashMap([]const u8, void, mem.hash_slice_u8, mem.eql_slice_u8);
};
-fn parseLibcPaths(allocator: *Allocator, libc: *LibCInstallation, libc_paths_file: []const u8) void {
+fn parseLibcPaths(allocator: Allocator, libc: *LibCInstallation, libc_paths_file: []const u8) void {
libc.parse(allocator, libc_paths_file, stderr) catch |err| {
stderr.print(
"Unable to parse libc path file '{}': {}.\n" ++
@@ -555,7 +555,7 @@ fn parseLibcPaths(allocator: *Allocator, libc: *LibCInstallation, libc_paths_fil
};
}
-fn cmdLibC(allocator: *Allocator, args: []const []const u8) !void {
+fn cmdLibC(allocator: Allocator, args: []const []const u8) !void {
switch (args.len) {
0 => {},
1 => {
@@ -576,7 +576,7 @@ fn cmdLibC(allocator: *Allocator, args: []const []const u8) !void {
var zig_compiler = try ZigCompiler.init(&loop);
defer zig_compiler.deinit();
- const handle = try async findLibCAsync(&zig_compiler);
+ const handle = try async<&loop.allocator> findLibCAsync(&zig_compiler);
defer cancel handle;
loop.run();
@@ -590,7 +590,7 @@ async fn findLibCAsync(zig_compiler: *ZigCompiler) void {
libc.render(stdout) catch os.exit(1);
}
-fn cmdFmt(allocator: *Allocator, args: []const []const u8) !void {
+fn cmdFmt(allocator: Allocator, args: []const []const u8) !void {
var flags = try Args.parse(allocator, args_fmt_spec, args);
defer flags.deinit();
@@ -620,9 +620,9 @@ fn cmdFmt(allocator: *Allocator, args: []const []const u8) !void {
}
var stdin_file = try io.getStdIn();
- var stdin = stdin_file.inStream();
+ var stdin = stdin_file.inStreamAdapter();
- const source_code = try stdin.stream.readAllAlloc(allocator, max_src_size);
+ const source_code = try stdin.inStream().readAllAlloc(allocator, max_src_size);
defer allocator.free(source_code);
const tree = std.zig.parse(allocator, source_code) catch |err| {
@@ -661,7 +661,7 @@ fn cmdFmt(allocator: *Allocator, args: []const []const u8) !void {
defer loop.deinit();
var result: FmtError!void = undefined;
- const main_handle = try async asyncFmtMainChecked(
+ const main_handle = try async<&allocator> asyncFmtMainChecked(
&result,
&loop,
&flags,
@@ -798,7 +798,7 @@ async fn fmtPath(fmt: *Fmt, file_path_ref: []const u8, check_mode: bool) FmtErro
const baf = try io.BufferedAtomicFile.create(fmt.loop.allocator, file_path);
defer baf.destroy();
- const anything_changed = try std.zig.render(fmt.loop.allocator, baf.stream(), tree);
+ const anything_changed = try std.zig.render(fmt.loop.allocator, baf.outStream(), tree);
if (anything_changed) {
try stderr.print("{}\n", file_path);
try baf.finish();
@@ -808,7 +808,7 @@ async fn fmtPath(fmt: *Fmt, file_path_ref: []const u8, check_mode: bool) FmtErro
// cmd:targets /////////////////////////////////////////////////////////////////////////////////////
-fn cmdTargets(allocator: *Allocator, args: []const []const u8) !void {
+fn cmdTargets(allocator: Allocator, args: []const []const u8) !void {
try stdout.write("Architectures:\n");
{
comptime var i: usize = 0;
@@ -848,13 +848,13 @@ fn cmdTargets(allocator: *Allocator, args: []const []const u8) !void {
}
}
-fn cmdVersion(allocator: *Allocator, args: []const []const u8) !void {
+fn cmdVersion(allocator: Allocator, args: []const []const u8) !void {
try stdout.print("{}\n", std.cstr.toSliceConst(c.ZIG_VERSION_STRING));
}
const args_test_spec = []Flag{Flag.Bool("--help")};
-fn cmdHelp(allocator: *Allocator, args: []const []const u8) !void {
+fn cmdHelp(allocator: Allocator, args: []const []const u8) !void {
try stdout.write(usage);
}
@@ -875,7 +875,7 @@ pub const info_zen =
\\
;
-fn cmdZen(allocator: *Allocator, args: []const []const u8) !void {
+fn cmdZen(allocator: Allocator, args: []const []const u8) !void {
try stdout.write(info_zen);
}
@@ -888,7 +888,7 @@ const usage_internal =
\\
;
-fn cmdInternal(allocator: *Allocator, args: []const []const u8) !void {
+fn cmdInternal(allocator: Allocator, args: []const []const u8) !void {
if (args.len == 0) {
try stderr.write(usage_internal);
os.exit(1);
@@ -910,7 +910,7 @@ fn cmdInternal(allocator: *Allocator, args: []const []const u8) !void {
try stderr.write(usage_internal);
}
-fn cmdInternalBuildInfo(allocator: *Allocator, args: []const []const u8) !void {
+fn cmdInternalBuildInfo(allocator: Allocator, args: []const []const u8) !void {
try stdout.print(
\\ZIG_CMAKE_BINARY_DIR {}
\\ZIG_CXX_COMPILER {}
@@ -939,7 +939,7 @@ const CliPkg = struct {
children: ArrayList(*CliPkg),
parent: ?*CliPkg,
- pub fn init(allocator: *mem.Allocator, name: []const u8, path: []const u8, parent: ?*CliPkg) !*CliPkg {
+ pub fn init(allocator: mem.Allocator, name: []const u8, path: []const u8, parent: ?*CliPkg) !*CliPkg {
var pkg = try allocator.create(CliPkg);
pkg.* = CliPkg{
.name = name,
diff --git a/src-self-hosted/package.zig b/src-self-hosted/package.zig
index 0d31731b551e..6c55775d6708 100644
--- a/src-self-hosted/package.zig
+++ b/src-self-hosted/package.zig
@@ -14,7 +14,7 @@ pub const Package = struct {
/// makes internal copies of root_src_dir and root_src_path
/// allocator should be an arena allocator because Package never frees anything
- pub fn create(allocator: *mem.Allocator, root_src_dir: []const u8, root_src_path: []const u8) !*Package {
+ pub fn create(allocator: mem.Allocator, root_src_dir: []const u8, root_src_path: []const u8) !*Package {
const ptr = try allocator.create(Package);
ptr.* = Package{
.root_src_dir = try Buffer.init(allocator, root_src_dir),
diff --git a/src-self-hosted/stage1.zig b/src-self-hosted/stage1.zig
index 7a44e8f3a03b..e70892c5d8dd 100644
--- a/src-self-hosted/stage1.zig
+++ b/src-self-hosted/stage1.zig
@@ -105,7 +105,7 @@ export fn stage2_free_clang_errors(errors_ptr: [*]translate_c.ClangErrMsg, error
}
export fn stage2_render_ast(tree: *ast.Tree, output_file: *FILE) Error {
- const c_out_stream = &std.io.COutStream.init(output_file).stream;
+ const c_out_stream = std.io.COutStream.init(output_file).outStream();
_ = std.zig.render(std.heap.c_allocator, c_out_stream, tree) catch |e| switch (e) {
error.SystemResources => return Error.SystemResources,
error.OperationAborted => return Error.OperationAborted,
@@ -144,12 +144,12 @@ fn fmtMain(argc: c_int, argv: [*]const [*]const u8) !void {
}
var stdout_file = try std.io.getStdOut();
- var stdout_out_stream = stdout_file.outStream();
- stdout = &stdout_out_stream.stream;
+ var stdout_out_stream = stdout_file.outStreamAdapter();
+ stdout = stdout_out_stream.outStream();
stderr_file = try std.io.getStdErr();
- var stderr_out_stream = stderr_file.outStream();
- stderr = &stderr_out_stream.stream;
+ var stderr_out_stream = stderr_file.outStreamAdapter();
+ stderr = stderr_out_stream.outStream();
const args = args_list.toSliceConst();
var flags = try Args.parse(allocator, self_hosted_main.args_fmt_spec, args[2..]);
@@ -181,9 +181,9 @@ fn fmtMain(argc: c_int, argv: [*]const [*]const u8) !void {
}
var stdin_file = try io.getStdIn();
- var stdin = stdin_file.inStream();
+ var stdin = stdin_file.inStreamAdapter();
- const source_code = try stdin.stream.readAllAlloc(allocator, self_hosted_main.max_src_size);
+ const source_code = try stdin.inStream().readAllAlloc(allocator, self_hosted_main.max_src_size);
defer allocator.free(source_code);
const tree = std.zig.parse(allocator, source_code) catch |err| {
@@ -307,7 +307,7 @@ fn fmtPath(fmt: *Fmt, file_path_ref: []const u8, check_mode: bool) FmtError!void
const baf = try io.BufferedAtomicFile.create(fmt.allocator, file_path);
defer baf.destroy();
- const anything_changed = try std.zig.render(fmt.allocator, baf.stream(), tree);
+ const anything_changed = try std.zig.render(fmt.allocator, baf.outStream(), tree);
if (anything_changed) {
try stderr.print("{}\n", file_path);
try baf.finish();
@@ -319,13 +319,13 @@ const Fmt = struct {
seen: SeenMap,
any_error: bool,
color: errmsg.Color,
- allocator: *mem.Allocator,
+ allocator: mem.Allocator,
const SeenMap = std.HashMap([]const u8, void, mem.hash_slice_u8, mem.eql_slice_u8);
};
fn printErrMsgToFile(
- allocator: *mem.Allocator,
+ allocator: mem.Allocator,
parse_error: *const ast.Error,
tree: *ast.Tree,
path: []const u8,
@@ -349,11 +349,11 @@ fn printErrMsgToFile(
const end_loc = tree.tokenLocationPtr(first_token.end, last_token);
var text_buf = try std.Buffer.initSize(allocator, 0);
- var out_stream = &std.io.BufferOutStream.init(&text_buf).stream;
+ var out_stream = std.io.BufferOutStream.init(&text_buf).outStream();
try parse_error.render(&tree.tokens, out_stream);
const text = text_buf.toOwnedSlice();
- const stream = &file.outStream().stream;
+ const stream = file.outStreamAdapter().outStream();
if (!color_on) {
try stream.print(
"{}:{}:{}: error: {}\n",
@@ -392,5 +392,5 @@ const Flag = arg.Flag;
const errmsg = @import("errmsg.zig");
var stderr_file: os.File = undefined;
-var stderr: *io.OutStream(os.File.WriteError) = undefined;
-var stdout: *io.OutStream(os.File.WriteError) = undefined;
+var stderr: io.OutStream(os.File.WriteError) = undefined;
+var stdout: io.OutStream(os.File.WriteError) = undefined;
diff --git a/src-self-hosted/target.zig b/src-self-hosted/target.zig
index 15f09e8da915..8845f118ebc7 100644
--- a/src-self-hosted/target.zig
+++ b/src-self-hosted/target.zig
@@ -112,7 +112,7 @@ pub const Target = union(enum) {
llvm.InitializeAllAsmParsers();
}
- pub fn getTriple(self: Target, allocator: *std.mem.Allocator) !std.Buffer {
+ pub fn getTriple(self: Target, allocator: std.mem.Allocator) !std.Buffer {
var result = try std.Buffer.initSize(allocator, 0);
errdefer result.deinit();
@@ -128,7 +128,7 @@ pub const Target = union(enum) {
// using the target triple `wasm32-unknown-unknown-unknown`.
const env_name = if (self.isWasm()) "wasm" else @tagName(self.getAbi());
- var out = &std.io.BufferOutStream.init(&result).stream;
+ var out = std.io.BufferOutStream.init(&result).outStream();
try out.print("{}-unknown-{}-{}", @tagName(self.getArch()), @tagName(self.getOs()), env_name);
return result;
diff --git a/src-self-hosted/translate_c.zig b/src-self-hosted/translate_c.zig
index 9f42f198b20c..860fc211421c 100644
--- a/src-self-hosted/translate_c.zig
+++ b/src-self-hosted/translate_c.zig
@@ -108,8 +108,8 @@ const Context = struct {
global_scope: *Scope.Root,
mode: Mode,
- fn a(c: *Context) *std.mem.Allocator {
- return &c.tree.arena_allocator.allocator;
+ fn a(c: *Context) std.mem.Allocator {
+ return c.tree.arena_allocator.allocator();
}
/// Convert a null-terminated C string to a slice allocated in the arena
@@ -130,7 +130,7 @@ const Context = struct {
};
pub fn translate(
- backing_allocator: *std.mem.Allocator,
+ backing_allocator: std.mem.Allocator,
args_begin: [*]?[*]const u8,
args_end: [*]?[*]const u8,
mode: Mode,
@@ -151,7 +151,7 @@ pub fn translate(
var tree_arena = std.heap.ArenaAllocator.init(backing_allocator);
errdefer tree_arena.deinit();
- var arena = &tree_arena.allocator;
+ var arena = tree_arena.allocator();
const root_node = try arena.create(ast.Node.Root);
root_node.* = ast.Node.Root{
@@ -171,7 +171,7 @@ pub fn translate(
.errors = ast.Tree.ErrorList.init(arena),
};
tree.arena_allocator = tree_arena;
- arena = &tree.arena_allocator.allocator;
+ arena = tree.arena_allocator.allocator();
var source_buffer = try std.Buffer.initSize(arena, 0);
diff --git a/src-self-hosted/type.zig b/src-self-hosted/type.zig
index 5a8dc47ef9ad..512eb4f87e00 100644
--- a/src-self-hosted/type.zig
+++ b/src-self-hosted/type.zig
@@ -50,7 +50,7 @@ pub const Type = struct {
pub fn getLlvmType(
base: *Type,
- allocator: *Allocator,
+ allocator: Allocator,
llvm_context: *llvm.Context,
) (error{OutOfMemory}!*llvm.Type) {
switch (base.id) {
@@ -218,7 +218,7 @@ pub const Type = struct {
comp.gpa().destroy(self);
}
- pub fn getLlvmType(self: *Struct, allocator: *Allocator, llvm_context: *llvm.Context) *llvm.Type {
+ pub fn getLlvmType(self: *Struct, allocator: Allocator, llvm_context: *llvm.Context) *llvm.Type {
@panic("TODO");
}
};
@@ -425,14 +425,14 @@ pub const Type = struct {
var name_buf = try std.Buffer.initSize(comp.gpa(), 0);
defer name_buf.deinit();
- const name_stream = &std.io.BufferOutStream.init(&name_buf).stream;
+ const name_stream = std.io.BufferOutStream.init(&name_buf).outStream();
switch (key.data) {
Kind.Generic => |generic| {
self.non_key = NonKey{ .Generic = {} };
switch (generic.cc) {
CallingConvention.Async => |async_allocator_type| {
- try name_stream.print("async<{}> ", async_allocator_type.name);
+ try name_stream.print("async<&{}> ", async_allocator_type.name);
},
else => {
const cc_str = ccFnTypeStr(generic.cc);
@@ -496,7 +496,7 @@ pub const Type = struct {
comp.gpa().destroy(self);
}
- pub fn getLlvmType(self: *Fn, allocator: *Allocator, llvm_context: *llvm.Context) !*llvm.Type {
+ pub fn getLlvmType(self: *Fn, allocator: Allocator, llvm_context: *llvm.Context) !*llvm.Type {
const normal = &self.key.data.Normal;
const llvm_return_type = switch (normal.return_type.id) {
Type.Id.Void => llvm.VoidTypeInContext(llvm_context) orelse return error.OutOfMemory,
@@ -559,7 +559,7 @@ pub const Type = struct {
comp.gpa().destroy(self);
}
- pub fn getLlvmType(self: *Bool, allocator: *Allocator, llvm_context: *llvm.Context) *llvm.Type {
+ pub fn getLlvmType(self: *Bool, allocator: Allocator, llvm_context: *llvm.Context) *llvm.Type {
@panic("TODO");
}
};
@@ -658,7 +658,7 @@ pub const Type = struct {
comp.gpa().destroy(self);
}
- pub fn getLlvmType(self: *Int, allocator: *Allocator, llvm_context: *llvm.Context) !*llvm.Type {
+ pub fn getLlvmType(self: *Int, allocator: Allocator, llvm_context: *llvm.Context) !*llvm.Type {
return llvm.IntTypeInContext(llvm_context, self.key.bit_count) orelse return error.OutOfMemory;
}
};
@@ -670,7 +670,7 @@ pub const Type = struct {
comp.gpa().destroy(self);
}
- pub fn getLlvmType(self: *Float, allocator: *Allocator, llvm_context: *llvm.Context) *llvm.Type {
+ pub fn getLlvmType(self: *Float, allocator: Allocator, llvm_context: *llvm.Context) *llvm.Type {
@panic("TODO");
}
};
@@ -836,7 +836,7 @@ pub const Type = struct {
return self;
}
- pub fn getLlvmType(self: *Pointer, allocator: *Allocator, llvm_context: *llvm.Context) !*llvm.Type {
+ pub fn getLlvmType(self: *Pointer, allocator: Allocator, llvm_context: *llvm.Context) !*llvm.Type {
const elem_llvm_type = try self.key.child_type.getLlvmType(allocator, llvm_context);
return llvm.PointerType(elem_llvm_type, 0) orelse return error.OutOfMemory;
}
@@ -904,7 +904,7 @@ pub const Type = struct {
return self;
}
- pub fn getLlvmType(self: *Array, allocator: *Allocator, llvm_context: *llvm.Context) !*llvm.Type {
+ pub fn getLlvmType(self: *Array, allocator: Allocator, llvm_context: *llvm.Context) !*llvm.Type {
const elem_llvm_type = try self.key.elem_type.getLlvmType(allocator, llvm_context);
return llvm.ArrayType(elem_llvm_type, @intCast(c_uint, self.key.len)) orelse return error.OutOfMemory;
}
@@ -917,7 +917,7 @@ pub const Type = struct {
comp.gpa().destroy(self);
}
- pub fn getLlvmType(self: *Vector, allocator: *Allocator, llvm_context: *llvm.Context) *llvm.Type {
+ pub fn getLlvmType(self: *Vector, allocator: Allocator, llvm_context: *llvm.Context) *llvm.Type {
@panic("TODO");
}
};
@@ -981,7 +981,7 @@ pub const Type = struct {
comp.gpa().destroy(self);
}
- pub fn getLlvmType(self: *Optional, allocator: *Allocator, llvm_context: *llvm.Context) *llvm.Type {
+ pub fn getLlvmType(self: *Optional, allocator: Allocator, llvm_context: *llvm.Context) *llvm.Type {
@panic("TODO");
}
};
@@ -993,7 +993,7 @@ pub const Type = struct {
comp.gpa().destroy(self);
}
- pub fn getLlvmType(self: *ErrorUnion, allocator: *Allocator, llvm_context: *llvm.Context) *llvm.Type {
+ pub fn getLlvmType(self: *ErrorUnion, allocator: Allocator, llvm_context: *llvm.Context) *llvm.Type {
@panic("TODO");
}
};
@@ -1005,7 +1005,7 @@ pub const Type = struct {
comp.gpa().destroy(self);
}
- pub fn getLlvmType(self: *ErrorSet, allocator: *Allocator, llvm_context: *llvm.Context) *llvm.Type {
+ pub fn getLlvmType(self: *ErrorSet, allocator: Allocator, llvm_context: *llvm.Context) *llvm.Type {
@panic("TODO");
}
};
@@ -1017,7 +1017,7 @@ pub const Type = struct {
comp.gpa().destroy(self);
}
- pub fn getLlvmType(self: *Enum, allocator: *Allocator, llvm_context: *llvm.Context) *llvm.Type {
+ pub fn getLlvmType(self: *Enum, allocator: Allocator, llvm_context: *llvm.Context) *llvm.Type {
@panic("TODO");
}
};
@@ -1029,7 +1029,7 @@ pub const Type = struct {
comp.gpa().destroy(self);
}
- pub fn getLlvmType(self: *Union, allocator: *Allocator, llvm_context: *llvm.Context) *llvm.Type {
+ pub fn getLlvmType(self: *Union, allocator: Allocator, llvm_context: *llvm.Context) *llvm.Type {
@panic("TODO");
}
};
@@ -1041,7 +1041,7 @@ pub const Type = struct {
comp.gpa().destroy(self);
}
- pub fn getLlvmType(self: *BoundFn, allocator: *Allocator, llvm_context: *llvm.Context) *llvm.Type {
+ pub fn getLlvmType(self: *BoundFn, allocator: Allocator, llvm_context: *llvm.Context) *llvm.Type {
@panic("TODO");
}
};
@@ -1061,7 +1061,7 @@ pub const Type = struct {
comp.gpa().destroy(self);
}
- pub fn getLlvmType(self: *Opaque, allocator: *Allocator, llvm_context: *llvm.Context) *llvm.Type {
+ pub fn getLlvmType(self: *Opaque, allocator: Allocator, llvm_context: *llvm.Context) *llvm.Type {
@panic("TODO");
}
};
@@ -1073,7 +1073,7 @@ pub const Type = struct {
comp.gpa().destroy(self);
}
- pub fn getLlvmType(self: *Promise, allocator: *Allocator, llvm_context: *llvm.Context) *llvm.Type {
+ pub fn getLlvmType(self: *Promise, allocator: Allocator, llvm_context: *llvm.Context) *llvm.Type {
@panic("TODO");
}
};
@@ -1085,9 +1085,9 @@ fn hashAny(x: var, comptime seed: u64) u32 {
comptime var rng = comptime std.rand.DefaultPrng.init(seed);
const unsigned_x = @bitCast(@IntType(false, info.bits), x);
if (info.bits <= 32) {
- return u32(unsigned_x) *% comptime rng.random.scalar(u32);
+ return u32(unsigned_x) *% comptime rng.random().scalar(u32);
} else {
- return @truncate(u32, unsigned_x *% comptime rng.random.scalar(@typeOf(unsigned_x)));
+ return @truncate(u32, unsigned_x *% comptime rng.random().scalar(@typeOf(unsigned_x)));
}
},
builtin.TypeId.Pointer => |info| {
@@ -1101,7 +1101,7 @@ fn hashAny(x: var, comptime seed: u64) u32 {
builtin.TypeId.Enum => return hashAny(@enumToInt(x), seed),
builtin.TypeId.Bool => {
comptime var rng = comptime std.rand.DefaultPrng.init(seed);
- const vals = comptime [2]u32{ rng.random.scalar(u32), rng.random.scalar(u32) };
+ const vals = comptime [2]u32{ rng.random().scalar(u32), rng.random().scalar(u32) };
return vals[@boolToInt(x)];
},
builtin.TypeId.Optional => {
diff --git a/src/analyze.cpp b/src/analyze.cpp
index d6396832c40a..201d02bcfc66 100644
--- a/src/analyze.cpp
+++ b/src/analyze.cpp
@@ -879,7 +879,7 @@ ZigType *get_fn_type(CodeGen *g, FnTypeId *fn_type_id) {
buf_resize(&fn_type->name, 0);
if (fn_type->data.fn.fn_type_id.cc == CallingConventionAsync) {
assert(fn_type_id->async_allocator_type != nullptr);
- buf_appendf(&fn_type->name, "async<%s> ", buf_ptr(&fn_type_id->async_allocator_type->name));
+ buf_appendf(&fn_type->name, "async<&%s> ", buf_ptr(&fn_type_id->async_allocator_type->name));
} else {
const char *cc_str = calling_convention_fn_type_str(fn_type->data.fn.fn_type_id.cc);
buf_appendf(&fn_type->name, "%s", cc_str);
diff --git a/std/array_list.zig b/std/array_list.zig
index ca7d5f911ed8..d4cbe391cce8 100644
--- a/std/array_list.zig
+++ b/std/array_list.zig
@@ -18,10 +18,10 @@ pub fn AlignedArrayList(comptime T: type, comptime A: u29) type {
/// you uninitialized memory.
items: []align(A) T,
len: usize,
- allocator: *Allocator,
+ allocator: Allocator,
/// Deinitialize with `deinit` or use `toOwnedSlice`.
- pub fn init(allocator: *Allocator) Self {
+ pub fn init(allocator: Allocator) Self {
return Self{
.items = []align(A) T{},
.len = 0,
@@ -69,7 +69,7 @@ pub fn AlignedArrayList(comptime T: type, comptime A: u29) type {
/// ArrayList takes ownership of the passed in slice. The slice must have been
/// allocated with `allocator`.
/// Deinitialize with `deinit` or use `toOwnedSlice`.
- pub fn fromOwnedSlice(allocator: *Allocator, slice: []align(A) T) Self {
+ pub fn fromOwnedSlice(allocator: Allocator, slice: []align(A) T) Self {
return Self{
.items = slice,
.len = slice.len,
@@ -221,7 +221,7 @@ pub fn AlignedArrayList(comptime T: type, comptime A: u29) type {
test "std.ArrayList.init" {
var bytes: [1024]u8 = undefined;
- const allocator = &std.heap.FixedBufferAllocator.init(bytes[0..]).allocator;
+ const allocator = std.heap.FixedBufferAllocator.init(bytes[0..]).allocator();
var list = ArrayList(i32).init(allocator);
defer list.deinit();
@@ -232,7 +232,7 @@ test "std.ArrayList.init" {
test "std.ArrayList.basic" {
var bytes: [1024]u8 = undefined;
- const allocator = &std.heap.FixedBufferAllocator.init(bytes[0..]).allocator;
+ const allocator = std.heap.FixedBufferAllocator.init(bytes[0..]).allocator();
var list = ArrayList(i32).init(allocator);
defer list.deinit();
diff --git a/std/atomic/queue.zig b/std/atomic/queue.zig
index 431a96e64b2e..f23f75e3cb48 100644
--- a/std/atomic/queue.zig
+++ b/std/atomic/queue.zig
@@ -105,15 +105,15 @@ pub fn Queue(comptime T: type) type {
pub fn dump(self: *Self) void {
var stderr_file = std.io.getStdErr() catch return;
- const stderr = &stderr_file.outStream().stream;
+ const stderr = stderr_file.outStreamAdapter().outStream();
const Error = @typeInfo(@typeOf(stderr)).Pointer.child.Error;
self.dumpToStream(Error, stderr) catch return;
}
- pub fn dumpToStream(self: *Self, comptime Error: type, stream: *std.io.OutStream(Error)) Error!void {
+ pub fn dumpToStream(self: *Self, comptime Error: type, stream: std.io.OutStream(Error)) Error!void {
const S = struct {
- fn dumpRecursive(s: *std.io.OutStream(Error), optional_node: ?*Node, indent: usize) Error!void {
+ fn dumpRecursive(s: std.io.OutStream(Error), optional_node: ?*Node, indent: usize) Error!void {
try s.writeByteNTimes(' ', indent);
if (optional_node) |node| {
try s.print("0x{x}={}\n", @ptrToInt(node), node.data);
@@ -135,7 +135,7 @@ pub fn Queue(comptime T: type) type {
}
const Context = struct {
- allocator: *std.mem.Allocator,
+ allocator: std.mem.Allocator,
queue: *Queue(i32),
put_sum: isize,
get_sum: isize,
@@ -155,11 +155,11 @@ test "std.atomic.Queue" {
var direct_allocator = std.heap.DirectAllocator.init();
defer direct_allocator.deinit();
- var plenty_of_memory = try direct_allocator.allocator.alloc(u8, 300 * 1024);
- defer direct_allocator.allocator.free(plenty_of_memory);
+ var plenty_of_memory = try direct_allocator.allocator().alloc(u8, 300 * 1024);
+ defer direct_allocator.allocator().free(plenty_of_memory);
var fixed_buffer_allocator = std.heap.ThreadSafeFixedBufferAllocator.init(plenty_of_memory);
- var a = &fixed_buffer_allocator.allocator;
+ var a = fixed_buffer_allocator.allocator();
var queue = Queue(i32).init();
var context = Context{
@@ -221,7 +221,7 @@ fn startPuts(ctx: *Context) u8 {
var r = std.rand.DefaultPrng.init(0xdeadbeef);
while (put_count != 0) : (put_count -= 1) {
std.os.time.sleep(1); // let the os scheduler be our fuzz
- const x = @bitCast(i32, r.random.scalar(u32));
+ const x = @bitCast(i32, r.random().scalar(u32));
const node = ctx.allocator.create(Queue(i32).Node) catch unreachable;
node.* = Queue(i32).Node{
.prev = undefined,
@@ -311,7 +311,7 @@ test "std.atomic.Queue dump" {
// Test empty stream
sos.reset();
- try queue.dumpToStream(SliceOutStream.Error, &sos.stream);
+ try queue.dumpToStream(SliceOutStream.Error, sos.outStream());
expect(mem.eql(u8, buffer[0..sos.pos],
\\head: (null)
\\tail: (null)
@@ -327,7 +327,7 @@ test "std.atomic.Queue dump" {
queue.put(&node_0);
sos.reset();
- try queue.dumpToStream(SliceOutStream.Error, &sos.stream);
+ try queue.dumpToStream(SliceOutStream.Error, sos.outStream());
var expected = try std.fmt.bufPrint(expected_buffer[0..],
\\head: 0x{x}=1
@@ -347,7 +347,7 @@ test "std.atomic.Queue dump" {
queue.put(&node_1);
sos.reset();
- try queue.dumpToStream(SliceOutStream.Error, &sos.stream);
+ try queue.dumpToStream(SliceOutStream.Error, sos.outStream());
expected = try std.fmt.bufPrint(expected_buffer[0..],
\\head: 0x{x}=1
diff --git a/std/atomic/stack.zig b/std/atomic/stack.zig
index 8ae6c997aad6..eff66efebae6 100644
--- a/std/atomic/stack.zig
+++ b/std/atomic/stack.zig
@@ -70,7 +70,7 @@ pub fn Stack(comptime T: type) type {
const std = @import("../std.zig");
const Context = struct {
- allocator: *std.mem.Allocator,
+ allocator: std.mem.Allocator,
stack: *Stack(i32),
put_sum: isize,
get_sum: isize,
@@ -89,11 +89,11 @@ test "std.atomic.stack" {
var direct_allocator = std.heap.DirectAllocator.init();
defer direct_allocator.deinit();
- var plenty_of_memory = try direct_allocator.allocator.alloc(u8, 300 * 1024);
- defer direct_allocator.allocator.free(plenty_of_memory);
+ var plenty_of_memory = try direct_allocator.allocator().alloc(u8, 300 * 1024);
+ defer direct_allocator.allocator().free(plenty_of_memory);
var fixed_buffer_allocator = std.heap.ThreadSafeFixedBufferAllocator.init(plenty_of_memory);
- var a = &fixed_buffer_allocator.allocator;
+ var a = fixed_buffer_allocator.allocator();
var stack = Stack(i32).init();
var context = Context{
@@ -155,7 +155,7 @@ fn startPuts(ctx: *Context) u8 {
var r = std.rand.DefaultPrng.init(0xdeadbeef);
while (put_count != 0) : (put_count -= 1) {
std.os.time.sleep(1); // let the os scheduler be our fuzz
- const x = @bitCast(i32, r.random.scalar(u32));
+ const x = @bitCast(i32, r.random().scalar(u32));
const node = ctx.allocator.create(Stack(i32).Node) catch unreachable;
node.* = Stack(i32).Node{
.next = undefined,
diff --git a/std/buf_map.zig b/std/buf_map.zig
index be0666d972fa..b2a2531f2e11 100644
--- a/std/buf_map.zig
+++ b/std/buf_map.zig
@@ -11,7 +11,7 @@ pub const BufMap = struct {
const BufMapHashMap = HashMap([]const u8, []const u8, mem.hash_slice_u8, mem.eql_slice_u8);
- pub fn init(allocator: *Allocator) BufMap {
+ pub fn init(allocator: Allocator) BufMap {
var self = BufMap{ .hash_map = BufMapHashMap.init(allocator) };
return self;
}
@@ -86,7 +86,7 @@ test "BufMap" {
var direct_allocator = std.heap.DirectAllocator.init();
defer direct_allocator.deinit();
- var bufmap = BufMap.init(&direct_allocator.allocator);
+ var bufmap = BufMap.init(direct_allocator.allocator());
defer bufmap.deinit();
try bufmap.set("x", "1");
diff --git a/std/buf_set.zig b/std/buf_set.zig
index 807b9db35dda..2f1d70b8a1ea 100644
--- a/std/buf_set.zig
+++ b/std/buf_set.zig
@@ -9,7 +9,7 @@ pub const BufSet = struct {
const BufSetHashMap = HashMap([]const u8, void, mem.hash_slice_u8, mem.eql_slice_u8);
- pub fn init(a: *Allocator) BufSet {
+ pub fn init(a: Allocator) BufSet {
var self = BufSet{ .hash_map = BufSetHashMap.init(a) };
return self;
}
@@ -49,7 +49,7 @@ pub const BufSet = struct {
return self.hash_map.iterator();
}
- pub fn allocator(self: *const BufSet) *Allocator {
+ pub fn allocator(self: *const BufSet) Allocator {
return self.hash_map.allocator;
}
@@ -68,7 +68,7 @@ test "BufSet" {
var direct_allocator = std.heap.DirectAllocator.init();
defer direct_allocator.deinit();
- var bufset = BufSet.init(&direct_allocator.allocator);
+ var bufset = BufSet.init(direct_allocator.allocator());
defer bufset.deinit();
try bufset.put("x");
diff --git a/std/buffer.zig b/std/buffer.zig
index 32228af558be..a0efaca5492a 100644
--- a/std/buffer.zig
+++ b/std/buffer.zig
@@ -11,14 +11,14 @@ pub const Buffer = struct {
list: ArrayList(u8),
/// Must deinitialize with deinit.
- pub fn init(allocator: *Allocator, m: []const u8) !Buffer {
+ pub fn init(allocator: Allocator, m: []const u8) !Buffer {
var self = try initSize(allocator, m.len);
mem.copy(u8, self.list.items, m);
return self;
}
/// Must deinitialize with deinit.
- pub fn initSize(allocator: *Allocator, size: usize) !Buffer {
+ pub fn initSize(allocator: Allocator, size: usize) !Buffer {
var self = initNull(allocator);
try self.resize(size);
return self;
@@ -28,7 +28,7 @@ pub const Buffer = struct {
/// None of the other operations are valid until you do one of these:
/// * ::replaceContents
/// * ::resize
- pub fn initNull(allocator: *Allocator) Buffer {
+ pub fn initNull(allocator: Allocator) Buffer {
return Buffer{ .list = ArrayList(u8).init(allocator) };
}
@@ -40,7 +40,7 @@ pub const Buffer = struct {
/// Buffer takes ownership of the passed in slice. The slice must have been
/// allocated with `allocator`.
/// Must deinitialize with deinit.
- pub fn fromOwnedSlice(allocator: *Allocator, slice: []u8) !Buffer {
+ pub fn fromOwnedSlice(allocator: Allocator, slice: []u8) !Buffer {
var self = Buffer{ .list = ArrayList(u8).fromOwnedSlice(allocator, slice) };
try self.list.append(0);
return self;
@@ -55,7 +55,7 @@ pub const Buffer = struct {
return result;
}
- pub fn allocPrint(allocator: *Allocator, comptime format: []const u8, args: ...) !Buffer {
+ pub fn allocPrint(allocator: Allocator, comptime format: []const u8, args: ...) !Buffer {
const countSize = struct {
fn countSize(size: *usize, bytes: []const u8) (error{}!void) {
size.* += bytes.len;
diff --git a/std/build.zig b/std/build.zig
index ff64c6cb93de..040e7135a6e2 100644
--- a/std/build.zig
+++ b/std/build.zig
@@ -22,7 +22,7 @@ pub const Builder = struct {
install_tls: TopLevelStep,
have_uninstall_step: bool,
have_install_step: bool,
- allocator: *Allocator,
+ allocator: Allocator,
native_system_lib_paths: ArrayList([]const u8),
native_system_include_dirs: ArrayList([]const u8),
native_system_rpaths: ArrayList([]const u8),
@@ -93,7 +93,7 @@ pub const Builder = struct {
description: []const u8,
};
- pub fn init(allocator: *Allocator, zig_exe: []const u8, build_root: []const u8, cache_root: []const u8) Builder {
+ pub fn init(allocator: Allocator, zig_exe: []const u8, build_root: []const u8, cache_root: []const u8) Builder {
const env_map = allocator.create(BufMap) catch unreachable;
env_map.* = os.getEnvMap(allocator) catch unreachable;
var self = Builder{
@@ -760,8 +760,8 @@ pub const Builder = struct {
var stdout = std.Buffer.initNull(self.allocator);
defer std.Buffer.deinit(&stdout);
- var stdout_file_in_stream = child.stdout.?.inStream();
- try stdout_file_in_stream.stream.readAllBuffer(&stdout, max_output_size);
+ var stdout_file_in_stream = child.stdout.?.inStreamAdapter();
+ try stdout_file_in_stream.inStream().readAllBuffer(&stdout, max_output_size);
const term = child.wait() catch |err| std.debug.panic("unable to spawn {}: {}", argv[0], err);
switch (term) {
@@ -799,7 +799,7 @@ const CrossTarget = struct {
os: builtin.Os,
abi: builtin.Abi,
- pub fn zigTriple(cross_target: CrossTarget, allocator: *Allocator) []u8 {
+ pub fn zigTriple(cross_target: CrossTarget, allocator: Allocator) []u8 {
return std.fmt.allocPrint(
allocator,
"{}{}-{}-{}",
@@ -810,7 +810,7 @@ const CrossTarget = struct {
) catch unreachable;
}
- pub fn linuxTriple(cross_target: CrossTarget, allocator: *Allocator) []u8 {
+ pub fn linuxTriple(cross_target: CrossTarget, allocator: Allocator) []u8 {
return std.fmt.allocPrint(
allocator,
"{}-{}-{}",
@@ -1268,7 +1268,7 @@ pub const LibExeObjStep = struct {
}
pub fn addBuildOption(self: *LibExeObjStep, comptime T: type, name: []const u8, value: T) void {
- const out = &std.io.BufferOutStream.init(&self.build_options_contents).stream;
+ const out = std.io.BufferOutStream.init(&self.build_options_contents).outStream();
out.print("pub const {} = {};\n", name, value) catch unreachable;
}
@@ -1858,7 +1858,7 @@ pub const Step = struct {
loop_flag: bool,
done_flag: bool,
- pub fn init(name: []const u8, allocator: *Allocator, makeFn: fn (*Step) anyerror!void) Step {
+ pub fn init(name: []const u8, allocator: Allocator, makeFn: fn (*Step) anyerror!void) Step {
return Step{
.name = name,
.makeFn = makeFn,
@@ -1867,7 +1867,7 @@ pub const Step = struct {
.done_flag = false,
};
}
- pub fn initNoOp(name: []const u8, allocator: *Allocator) Step {
+ pub fn initNoOp(name: []const u8, allocator: Allocator) Step {
return init(name, allocator, makeNoOp);
}
@@ -1885,7 +1885,7 @@ pub const Step = struct {
fn makeNoOp(self: *Step) anyerror!void {}
};
-fn doAtomicSymLinks(allocator: *Allocator, output_path: []const u8, filename_major_only: []const u8, filename_name_only: []const u8) !void {
+fn doAtomicSymLinks(allocator: Allocator, output_path: []const u8, filename_major_only: []const u8, filename_name_only: []const u8) !void {
const out_dir = os.path.dirname(output_path) orelse ".";
const out_basename = os.path.basename(output_path);
// sym link for libfoo.so.1 to libfoo.so.1.2.3
diff --git a/std/coff.zig b/std/coff.zig
index c31d2d8233b9..04cb0872b048 100644
--- a/std/coff.zig
+++ b/std/coff.zig
@@ -29,7 +29,7 @@ pub const CoffError = error{
pub const Coff = struct {
in_file: os.File,
- allocator: *mem.Allocator,
+ allocator: mem.Allocator,
coff_header: CoffHeader,
pe_header: OptionalHeader,
@@ -41,8 +41,8 @@ pub const Coff = struct {
pub fn loadHeader(self: *Coff) !void {
const pe_pointer_offset = 0x3C;
- var file_stream = self.in_file.inStream();
- const in = &file_stream.stream;
+ var file_stream = self.in_file.inStreamAdapter();
+ const in = file_stream.inStream();
var magic: [2]u8 = undefined;
try in.readNoEof(magic[0..]);
@@ -77,8 +77,8 @@ pub const Coff = struct {
try self.loadOptionalHeader(&file_stream);
}
- fn loadOptionalHeader(self: *Coff, file_stream: *os.File.InStream) !void {
- const in = &file_stream.stream;
+ fn loadOptionalHeader(self: *Coff, file_stream: *os.File.InStreamAdapter) !void {
+ const in = file_stream.inStream();
self.pe_header.magic = try in.readIntLittle(u16);
// For now we're only interested in finding the reference to the .pdb,
// so we'll skip most of this header, which size is different in 32
@@ -115,8 +115,8 @@ pub const Coff = struct {
const file_offset = debug_dir.virtual_address - header.virtual_address + header.pointer_to_raw_data;
try self.in_file.seekTo(file_offset + debug_dir.size);
- var file_stream = self.in_file.inStream();
- const in = &file_stream.stream;
+ var file_stream = self.in_file.inStreamAdapter();
+ const in = file_stream.inStream();
var cv_signature: [4]u8 = undefined; // CodeView signature
try in.readNoEof(cv_signature[0..]);
@@ -146,8 +146,8 @@ pub const Coff = struct {
self.sections = ArrayList(Section).init(self.allocator);
- var file_stream = self.in_file.inStream();
- const in = &file_stream.stream;
+ var file_stream = self.in_file.inStreamAdapter();
+ const in = file_stream.inStream();
var name: [8]u8 = undefined;
diff --git a/std/crypto/throughput_test.zig b/std/crypto/throughput_test.zig
index 73a2a8612469..7e815cfa7966 100644
--- a/std/crypto/throughput_test.zig
+++ b/std/crypto/throughput_test.zig
@@ -29,7 +29,7 @@ pub fn benchmarkHash(comptime Hash: var, comptime bytes: comptime_int) !u64 {
var h = Hash.init();
var block: [Hash.digest_length]u8 = undefined;
- prng.random.bytes(block[0..]);
+ prng.random().bytes(block[0..]);
var offset: usize = 0;
var timer = try Timer.start();
@@ -56,10 +56,10 @@ pub fn benchmarkMac(comptime Mac: var, comptime bytes: comptime_int) !u64 {
std.debug.assert(32 >= Mac.mac_length and 32 >= Mac.minimum_key_length);
var in: [1 * MiB]u8 = undefined;
- prng.random.bytes(in[0..]);
+ prng.random().bytes(in[0..]);
var key: [32]u8 = undefined;
- prng.random.bytes(key[0..]);
+ prng.random().bytes(key[0..]);
var offset: usize = 0;
var timer = try Timer.start();
@@ -81,10 +81,10 @@ pub fn benchmarkKeyExchange(comptime DhKeyExchange: var, comptime exchange_count
std.debug.assert(DhKeyExchange.minimum_key_length >= DhKeyExchange.secret_length);
var in: [DhKeyExchange.minimum_key_length]u8 = undefined;
- prng.random.bytes(in[0..]);
+ prng.random().bytes(in[0..]);
var out: [DhKeyExchange.minimum_key_length]u8 = undefined;
- prng.random.bytes(out[0..]);
+ prng.random().bytes(out[0..]);
var offset: usize = 0;
var timer = try Timer.start();
@@ -130,12 +130,12 @@ fn printPad(stdout: var, s: []const u8) !void {
pub fn main() !void {
var stdout_file = try std.io.getStdOut();
- var stdout_out_stream = stdout_file.outStream();
- const stdout = &stdout_out_stream.stream;
+ var stdout_out_stream = stdout_file.outStreamAdapter();
+ const stdout = stdout_out_stream.outStream();
var buffer: [1024]u8 = undefined;
var fixed = std.heap.FixedBufferAllocator.init(buffer[0..]);
- const args = try std.os.argsAlloc(&fixed.allocator);
+ const args = try std.os.argsAlloc(fixed.allocator());
var filter: ?[]u8 = "";
diff --git a/std/cstr.zig b/std/cstr.zig
index 49d63737325c..0ac022bd7967 100644
--- a/std/cstr.zig
+++ b/std/cstr.zig
@@ -48,7 +48,7 @@ fn testCStrFnsImpl() void {
/// Returns a mutable slice with 1 more byte of length which is a null byte.
/// Caller owns the returned memory.
-pub fn addNullByte(allocator: *mem.Allocator, slice: []const u8) ![]u8 {
+pub fn addNullByte(allocator: mem.Allocator, slice: []const u8) ![]u8 {
const result = try allocator.alloc(u8, slice.len + 1);
mem.copy(u8, result, slice);
result[slice.len] = 0;
@@ -56,13 +56,13 @@ pub fn addNullByte(allocator: *mem.Allocator, slice: []const u8) ![]u8 {
}
pub const NullTerminated2DArray = struct {
- allocator: *mem.Allocator,
+ allocator: mem.Allocator,
byte_count: usize,
ptr: ?[*]?[*]u8,
/// Takes N lists of strings, concatenates the lists together, and adds a null terminator
/// Caller must deinit result
- pub fn fromSlices(allocator: *mem.Allocator, slices: []const []const []const u8) !NullTerminated2DArray {
+ pub fn fromSlices(allocator: mem.Allocator, slices: []const []const []const u8) !NullTerminated2DArray {
var new_len: usize = 1; // 1 for the list null
var byte_count: usize = 0;
for (slices) |slice| {
diff --git a/std/debug.zig b/std/debug.zig
index 45abfda88ec0..4638e6346fe8 100644
--- a/std/debug.zig
+++ b/std/debug.zig
@@ -16,7 +16,7 @@ const maxInt = std.math.maxInt;
const leb = @import("debug/leb128.zig");
pub const FailingAllocator = @import("debug/failing_allocator.zig").FailingAllocator;
-pub const failing_allocator = &FailingAllocator.init(global_allocator, 0).allocator;
+pub const failing_allocator = FailingAllocator.init(global_allocator, 0).allocator();
pub const runtime_safety = switch (builtin.mode) {
builtin.Mode.Debug, builtin.Mode.ReleaseSafe => true,
@@ -37,9 +37,9 @@ const Module = struct {
/// Tries to write to stderr, unbuffered, and ignores any error returned.
/// Does not append a newline.
var stderr_file: os.File = undefined;
-var stderr_file_out_stream: os.File.OutStream = undefined;
+var stderr_file_out_stream: os.File.OutStreamAdapter = undefined;
-var stderr_stream: ?*io.OutStream(os.File.WriteError) = null;
+var stderr_stream: ?io.OutStream(os.File.WriteError) = null;
var stderr_mutex = std.Mutex.init();
pub fn warn(comptime fmt: []const u8, args: ...) void {
const held = stderr_mutex.acquire();
@@ -48,13 +48,13 @@ pub fn warn(comptime fmt: []const u8, args: ...) void {
stderr.print(fmt, args) catch return;
}
-pub fn getStderrStream() !*io.OutStream(os.File.WriteError) {
+pub fn getStderrStream() !io.OutStream(os.File.WriteError) {
if (stderr_stream) |st| {
return st;
} else {
stderr_file = try io.getStdErr();
- stderr_file_out_stream = stderr_file.outStream();
- const st = &stderr_file_out_stream.stream;
+ stderr_file_out_stream = stderr_file.outStreamAdapter();
+ const st = stderr_file_out_stream.outStream();
stderr_stream = st;
return st;
}
@@ -74,7 +74,7 @@ pub fn getSelfDebugInfo() !*DebugInfo {
fn wantTtyColor() bool {
var bytes: [128]u8 = undefined;
- const allocator = &std.heap.FixedBufferAllocator.init(bytes[0..]).allocator;
+ const allocator = std.heap.FixedBufferAllocator.init(bytes[0..]).allocator();
return if (std.os.getEnvVarOwned(allocator, "ZIG_DEBUG_COLOR")) |_| true else |_| stderr_file.isTty();
}
@@ -211,7 +211,7 @@ const RESET = "\x1b[0m";
pub fn writeStackTrace(
stack_trace: builtin.StackTrace,
out_stream: var,
- allocator: *mem.Allocator,
+ allocator: mem.Allocator,
debug_info: *DebugInfo,
tty_color: bool,
) !void {
@@ -582,15 +582,15 @@ fn populateModule(di: *DebugInfo, mod: *Module) !void {
const modi = di.pdb.getStreamById(mod.mod_info.ModuleSymStream) orelse return error.MissingDebugInfo;
- const signature = try modi.stream.readIntLittle(u32);
+ const signature = try modi.inStream().readIntLittle(u32);
if (signature != 4)
return error.InvalidDebugInfo;
mod.symbols = try allocator.alloc(u8, mod.mod_info.SymByteSize - 4);
- try modi.stream.readNoEof(mod.symbols);
+ try modi.inStream().readNoEof(mod.symbols);
mod.subsect_info = try allocator.alloc(u8, mod.mod_info.C13ByteSize);
- try modi.stream.readNoEof(mod.subsect_info);
+ try modi.inStream().readNoEof(mod.subsect_info);
var sect_offset: usize = 0;
var skip_len: usize = undefined;
@@ -774,7 +774,7 @@ pub const OpenSelfDebugInfoError = error{
UnsupportedOperatingSystem,
};
-pub fn openSelfDebugInfo(allocator: *mem.Allocator) !DebugInfo {
+pub fn openSelfDebugInfo(allocator: mem.Allocator) !DebugInfo {
switch (builtin.os) {
builtin.Os.linux, builtin.Os.freebsd, builtin.Os.netbsd => return openSelfDebugInfoLinux(allocator),
builtin.Os.macosx, builtin.Os.ios => return openSelfDebugInfoMacOs(allocator),
@@ -783,7 +783,7 @@ pub fn openSelfDebugInfo(allocator: *mem.Allocator) !DebugInfo {
}
}
-fn openSelfDebugInfoWindows(allocator: *mem.Allocator) !DebugInfo {
+fn openSelfDebugInfoWindows(allocator: mem.Allocator) !DebugInfo {
const self_file = try os.openSelfExe();
defer self_file.close();
@@ -816,19 +816,19 @@ fn openSelfDebugInfoWindows(allocator: *mem.Allocator) !DebugInfo {
try di.pdb.openFile(di.coff, path);
var pdb_stream = di.pdb.getStream(pdb.StreamType.Pdb) orelse return error.InvalidDebugInfo;
- const version = try pdb_stream.stream.readIntLittle(u32);
- const signature = try pdb_stream.stream.readIntLittle(u32);
- const age = try pdb_stream.stream.readIntLittle(u32);
+ const version = try pdb_stream.inStream().readIntLittle(u32);
+ const signature = try pdb_stream.inStream().readIntLittle(u32);
+ const age = try pdb_stream.inStream().readIntLittle(u32);
var guid: [16]u8 = undefined;
- try pdb_stream.stream.readNoEof(guid[0..]);
+ try pdb_stream.inStream().readNoEof(guid[0..]);
if (!mem.eql(u8, di.coff.guid, guid) or di.coff.age != age)
return error.InvalidDebugInfo;
// We validated the executable and pdb match.
const string_table_index = str_tab_index: {
- const name_bytes_len = try pdb_stream.stream.readIntLittle(u32);
+ const name_bytes_len = try pdb_stream.inStream().readIntLittle(u32);
const name_bytes = try allocator.alloc(u8, name_bytes_len);
- try pdb_stream.stream.readNoEof(name_bytes);
+ try pdb_stream.inStream().readNoEof(name_bytes);
const HashTableHeader = packed struct {
Size: u32,
@@ -838,17 +838,17 @@ fn openSelfDebugInfoWindows(allocator: *mem.Allocator) !DebugInfo {
return cap * 2 / 3 + 1;
}
};
- const hash_tbl_hdr = try pdb_stream.stream.readStruct(HashTableHeader);
+ const hash_tbl_hdr = try pdb_stream.inStream().readStruct(HashTableHeader);
if (hash_tbl_hdr.Capacity == 0)
return error.InvalidDebugInfo;
if (hash_tbl_hdr.Size > HashTableHeader.maxLoad(hash_tbl_hdr.Capacity))
return error.InvalidDebugInfo;
- const present = try readSparseBitVector(&pdb_stream.stream, allocator);
+ const present = try readSparseBitVector(pdb_stream.inStream(), allocator);
if (present.len != hash_tbl_hdr.Size)
return error.InvalidDebugInfo;
- const deleted = try readSparseBitVector(&pdb_stream.stream, allocator);
+ const deleted = try readSparseBitVector(pdb_stream.inStream(), allocator);
const Bucket = struct {
first: u32,
@@ -856,8 +856,8 @@ fn openSelfDebugInfoWindows(allocator: *mem.Allocator) !DebugInfo {
};
const bucket_list = try allocator.alloc(Bucket, present.len);
for (present) |_| {
- const name_offset = try pdb_stream.stream.readIntLittle(u32);
- const name_index = try pdb_stream.stream.readIntLittle(u32);
+ const name_offset = try pdb_stream.inStream().readIntLittle(u32);
+ const name_index = try pdb_stream.inStream().readIntLittle(u32);
const name = mem.toSlice(u8, name_bytes.ptr + name_offset);
if (mem.eql(u8, name, "/names")) {
break :str_tab_index name_index;
@@ -872,7 +872,7 @@ fn openSelfDebugInfoWindows(allocator: *mem.Allocator) !DebugInfo {
const dbi = di.pdb.dbi;
// Dbi Header
- const dbi_stream_header = try dbi.stream.readStruct(pdb.DbiStreamHeader);
+ const dbi_stream_header = try dbi.inStream().readStruct(pdb.DbiStreamHeader);
const mod_info_size = dbi_stream_header.ModInfoSize;
const section_contrib_size = dbi_stream_header.SectionContributionSize;
@@ -881,7 +881,7 @@ fn openSelfDebugInfoWindows(allocator: *mem.Allocator) !DebugInfo {
// Module Info Substream
var mod_info_offset: usize = 0;
while (mod_info_offset != mod_info_size) {
- const mod_info = try dbi.stream.readStruct(pdb.ModInfo);
+ const mod_info = try dbi.inStream().readStruct(pdb.ModInfo);
var this_record_len: usize = @sizeOf(pdb.ModInfo);
const module_name = try dbi.readNullTermString(allocator);
@@ -919,14 +919,14 @@ fn openSelfDebugInfoWindows(allocator: *mem.Allocator) !DebugInfo {
var sect_contribs = ArrayList(pdb.SectionContribEntry).init(allocator);
var sect_cont_offset: usize = 0;
if (section_contrib_size != 0) {
- const ver = @intToEnum(pdb.SectionContrSubstreamVersion, try dbi.stream.readIntLittle(u32));
+ const ver = @intToEnum(pdb.SectionContrSubstreamVersion, try dbi.inStream().readIntLittle(u32));
if (ver != pdb.SectionContrSubstreamVersion.Ver60)
return error.InvalidDebugInfo;
sect_cont_offset += @sizeOf(u32);
}
while (sect_cont_offset != section_contrib_size) {
const entry = try sect_contribs.addOne();
- entry.* = try dbi.stream.readStruct(pdb.SectionContribEntry);
+ entry.* = try dbi.inStream().readStruct(pdb.SectionContribEntry);
sect_cont_offset += @sizeOf(pdb.SectionContribEntry);
if (sect_cont_offset > section_contrib_size)
@@ -938,7 +938,7 @@ fn openSelfDebugInfoWindows(allocator: *mem.Allocator) !DebugInfo {
return di;
}
-fn readSparseBitVector(stream: var, allocator: *mem.Allocator) ![]usize {
+fn readSparseBitVector(stream: var, allocator: mem.Allocator) ![]usize {
const num_words = try stream.readIntLittle(u32);
var word_i: usize = 0;
var list = ArrayList(usize).init(allocator);
@@ -967,7 +967,7 @@ fn findDwarfSectionFromElf(elf_file: *elf.Elf, name: []const u8) !?DwarfInfo.Sec
/// the DwarfInfo fields before calling. These fields can be left undefined:
/// * abbrev_table_list
/// * compile_unit_list
-pub fn openDwarfDebugInfo(di: *DwarfInfo, allocator: *mem.Allocator) !void {
+pub fn openDwarfDebugInfo(di: *DwarfInfo, allocator: mem.Allocator) !void {
di.abbrev_table_list = ArrayList(AbbrevTableHeader).init(allocator);
di.compile_unit_list = ArrayList(CompileUnit).init(allocator);
di.func_list = ArrayList(Func).init(allocator);
@@ -976,9 +976,9 @@ pub fn openDwarfDebugInfo(di: *DwarfInfo, allocator: *mem.Allocator) !void {
}
pub fn openElfDebugInfo(
- allocator: *mem.Allocator,
- elf_seekable_stream: *DwarfSeekableStream,
- elf_in_stream: *DwarfInStream,
+ allocator: mem.Allocator,
+ elf_seekable_stream: DwarfSeekableStream,
+ elf_in_stream: DwarfInStream,
) !DwarfInfo {
var efile: elf.Elf = undefined;
try efile.openStream(allocator, elf_seekable_stream, elf_in_stream);
@@ -1001,7 +1001,7 @@ pub fn openElfDebugInfo(
return di;
}
-fn openSelfDebugInfoLinux(allocator: *mem.Allocator) !DwarfInfo {
+fn openSelfDebugInfoLinux(allocator: mem.Allocator) !DwarfInfo {
const S = struct {
var self_exe_file: os.File = undefined;
var self_exe_mmap_seekable: io.SliceSeekableInStream = undefined;
@@ -1028,13 +1028,13 @@ fn openSelfDebugInfoLinux(allocator: *mem.Allocator) !DwarfInfo {
return openElfDebugInfo(
allocator,
// TODO https://github.com/ziglang/zig/issues/764
- @ptrCast(*DwarfSeekableStream, &S.self_exe_mmap_seekable.seekable_stream),
+ @bitCast(DwarfSeekableStream, S.self_exe_mmap_seekable.seekableStream()),
// TODO https://github.com/ziglang/zig/issues/764
- @ptrCast(*DwarfInStream, &S.self_exe_mmap_seekable.stream),
+ @bitCast(DwarfInStream, S.self_exe_mmap_seekable.inStream()),
);
}
-fn openSelfDebugInfoMacOs(allocator: *mem.Allocator) !DebugInfo {
+fn openSelfDebugInfoMacOs(allocator: mem.Allocator) !DebugInfo {
const hdr = &std.c._mh_execute_header;
assert(hdr.magic == std.macho.MH_MAGIC_64);
@@ -1168,8 +1168,8 @@ pub const DwarfSeekableStream = io.SeekableStream(anyerror, anyerror);
pub const DwarfInStream = io.InStream(anyerror);
pub const DwarfInfo = struct {
- dwarf_seekable_stream: *DwarfSeekableStream,
- dwarf_in_stream: *DwarfInStream,
+ dwarf_seekable_stream: DwarfSeekableStream,
+ dwarf_in_stream: DwarfInStream,
endian: builtin.Endian,
debug_info: Section,
debug_abbrev: Section,
@@ -1185,7 +1185,7 @@ pub const DwarfInfo = struct {
size: u64,
};
- pub fn allocator(self: DwarfInfo) *mem.Allocator {
+ pub fn allocator(self: DwarfInfo) mem.Allocator {
return self.abbrev_table_list.allocator;
}
@@ -1207,7 +1207,7 @@ pub const DebugInfo = switch (builtin.os) {
std.hash_map.getTrivialEqlFn(*macho.nlist_64),
);
- pub fn allocator(self: DebugInfo) *mem.Allocator {
+ pub fn allocator(self: DebugInfo) mem.Allocator {
return self.ofiles.allocator;
}
},
@@ -1347,7 +1347,7 @@ pub const LineInfo = struct {
line: u64,
column: u64,
file_name: []const u8,
- allocator: ?*mem.Allocator,
+ allocator: ?mem.Allocator,
fn deinit(self: LineInfo) void {
const allocator = self.allocator orelse return;
@@ -1432,7 +1432,7 @@ const LineNumberProgram = struct {
}
};
-fn readStringRaw(allocator: *mem.Allocator, in_stream: var) ![]u8 {
+fn readStringRaw(allocator: mem.Allocator, in_stream: var) ![]u8 {
var buf = ArrayList(u8).init(allocator);
while (true) {
const byte = try in_stream.readByte();
@@ -1448,24 +1448,24 @@ fn getString(di: *DwarfInfo, offset: u64) ![]u8 {
return di.readString();
}
-fn readAllocBytes(allocator: *mem.Allocator, in_stream: var, size: usize) ![]u8 {
+fn readAllocBytes(allocator: mem.Allocator, in_stream: var, size: usize) ![]u8 {
const buf = try allocator.alloc(u8, size);
errdefer allocator.free(buf);
if ((try in_stream.read(buf)) < size) return error.EndOfFile;
return buf;
}
-fn parseFormValueBlockLen(allocator: *mem.Allocator, in_stream: var, size: usize) !FormValue {
+fn parseFormValueBlockLen(allocator: mem.Allocator, in_stream: var, size: usize) !FormValue {
const buf = try readAllocBytes(allocator, in_stream, size);
return FormValue{ .Block = buf };
}
-fn parseFormValueBlock(allocator: *mem.Allocator, in_stream: var, size: usize) !FormValue {
+fn parseFormValueBlock(allocator: mem.Allocator, in_stream: var, size: usize) !FormValue {
const block_len = try in_stream.readVarInt(usize, builtin.Endian.Little, size);
return parseFormValueBlockLen(allocator, in_stream, block_len);
}
-fn parseFormValueConstant(allocator: *mem.Allocator, in_stream: var, signed: bool, comptime size: i32) !FormValue {
+fn parseFormValueConstant(allocator: mem.Allocator, in_stream: var, signed: bool, comptime size: i32) !FormValue {
return FormValue{
.Const = Constant{
.signed = signed,
@@ -1489,7 +1489,7 @@ fn parseFormValueTargetAddrSize(in_stream: var) !u64 {
return if (@sizeOf(usize) == 4) u64(try in_stream.readIntLittle(u32)) else if (@sizeOf(usize) == 8) try in_stream.readIntLittle(u64) else unreachable;
}
-fn parseFormValueRef(allocator: *mem.Allocator, in_stream: var, size: i32) !FormValue {
+fn parseFormValueRef(allocator: mem.Allocator, in_stream: var, size: i32) !FormValue {
return FormValue{
.Ref = switch (size) {
1 => try in_stream.readIntLittle(u8),
@@ -1502,7 +1502,7 @@ fn parseFormValueRef(allocator: *mem.Allocator, in_stream: var, size: i32) !Form
};
}
-fn parseFormValue(allocator: *mem.Allocator, in_stream: var, form_id: u64, is_64: bool) anyerror!FormValue {
+fn parseFormValue(allocator: mem.Allocator, in_stream: var, form_id: u64, is_64: bool) anyerror!FormValue {
return switch (form_id) {
DW.FORM_addr => FormValue{ .Address = try parseFormValueTargetAddrSize(in_stream) },
DW.FORM_block1 => parseFormValueBlock(allocator, in_stream, 1),
@@ -2254,7 +2254,7 @@ fn readStringMem(ptr: *[*]const u8) []const u8 {
return result;
}
-fn readInitialLength(comptime E: type, in_stream: *io.InStream(E), is_64: *bool) !u64 {
+fn readInitialLength(comptime E: type, in_stream: io.InStream(E), is_64: *bool) !u64 {
const first_32_bits = try in_stream.readIntLittle(u32);
is_64.* = (first_32_bits == 0xffffffff);
if (is_64.*) {
@@ -2266,19 +2266,19 @@ fn readInitialLength(comptime E: type, in_stream: *io.InStream(E), is_64: *bool)
}
/// This should only be used in temporary test programs.
-pub const global_allocator = &global_fixed_allocator.allocator;
+pub const global_allocator = global_fixed_allocator.allocator();
var global_fixed_allocator = std.heap.ThreadSafeFixedBufferAllocator.init(global_allocator_mem[0..]);
var global_allocator_mem: [100 * 1024]u8 = undefined;
/// TODO multithreaded awareness
-var debug_info_allocator: ?*mem.Allocator = null;
+var debug_info_allocator: ?mem.Allocator = null;
var debug_info_direct_allocator: std.heap.DirectAllocator = undefined;
var debug_info_arena_allocator: std.heap.ArenaAllocator = undefined;
-fn getDebugInfoAllocator() *mem.Allocator {
+fn getDebugInfoAllocator() mem.Allocator {
if (debug_info_allocator) |a| return a;
debug_info_direct_allocator = std.heap.DirectAllocator.init();
- debug_info_arena_allocator = std.heap.ArenaAllocator.init(&debug_info_direct_allocator.allocator);
- debug_info_allocator = &debug_info_arena_allocator.allocator;
- return &debug_info_arena_allocator.allocator;
+ debug_info_arena_allocator = std.heap.ArenaAllocator.init(debug_info_direct_allocator.allocator());
+ debug_info_allocator = debug_info_arena_allocator.allocator();
+ return debug_info_arena_allocator.allocator();
}
diff --git a/std/debug/failing_allocator.zig b/std/debug/failing_allocator.zig
index 5776d23194cb..8b428bd6e5f5 100644
--- a/std/debug/failing_allocator.zig
+++ b/std/debug/failing_allocator.zig
@@ -4,38 +4,34 @@ const mem = std.mem;
/// Allocator that fails after N allocations, useful for making sure out of
/// memory conditions are handled correctly.
pub const FailingAllocator = struct {
- allocator: mem.Allocator,
index: usize,
fail_index: usize,
- internal_allocator: *mem.Allocator,
+ internal_allocator: mem.Allocator,
allocated_bytes: usize,
freed_bytes: usize,
allocations: usize,
deallocations: usize,
- pub fn init(allocator: *mem.Allocator, fail_index: usize) FailingAllocator {
+ pub fn init(internal_allocator: mem.Allocator, fail_index: usize) FailingAllocator {
return FailingAllocator{
- .internal_allocator = allocator,
+ .internal_allocator = internal_allocator,
.fail_index = fail_index,
.index = 0,
.allocated_bytes = 0,
.freed_bytes = 0,
.allocations = 0,
.deallocations = 0,
- .allocator = mem.Allocator{
- .reallocFn = realloc,
- .shrinkFn = shrink,
- },
};
}
- fn realloc(allocator: *mem.Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 {
- const self = @fieldParentPtr(FailingAllocator, "allocator", allocator);
+ fn realloc(a: *const mem.Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) mem.Allocator.Error![]u8 {
+ const self = a.iface.implCast(FailingAllocator);
+
if (self.index == self.fail_index) {
return error.OutOfMemory;
}
const result = try self.internal_allocator.reallocFn(
- self.internal_allocator,
+ &self.internal_allocator,
old_mem,
old_align,
new_size,
@@ -54,12 +50,21 @@ pub const FailingAllocator = struct {
return result;
}
- fn shrink(allocator: *mem.Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
- const self = @fieldParentPtr(FailingAllocator, "allocator", allocator);
- const r = self.internal_allocator.shrinkFn(self.internal_allocator, old_mem, old_align, new_size, new_align);
+ fn shrink(a: *const mem.Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
+ const self = a.iface.implCast(FailingAllocator);
+
+ const r = self.internal_allocator.shrinkFn(&self.internal_allocator, old_mem, old_align, new_size, new_align);
self.freed_bytes += old_mem.len - r.len;
if (new_size == 0)
self.deallocations += 1;
return r;
}
+
+ pub fn allocator(self: *FailingAllocator) mem.Allocator {
+ return mem.Allocator{
+ .iface = mem.Allocator.Iface.init(self),
+ .reallocFn = realloc,
+ .shrinkFn = shrink,
+ };
+ }
};
diff --git a/std/debug/leb128.zig b/std/debug/leb128.zig
index cb59c5b0d214..b110698a9b24 100644
--- a/std/debug/leb128.zig
+++ b/std/debug/leb128.zig
@@ -122,17 +122,17 @@ pub fn readILEB128Mem(comptime T: type, ptr: *[*]const u8) !T {
fn test_read_stream_ileb128(comptime T: type, encoded: []const u8) !T {
var in_stream = std.io.SliceInStream.init(encoded);
- return try readILEB128(T, &in_stream.stream);
+ return try readILEB128(T, in_stream.inStream());
}
fn test_read_stream_uleb128(comptime T: type, encoded: []const u8) !T {
var in_stream = std.io.SliceInStream.init(encoded);
- return try readULEB128(T, &in_stream.stream);
+ return try readULEB128(T, in_stream.inStream());
}
fn test_read_ileb128(comptime T: type, encoded: []const u8) !T {
var in_stream = std.io.SliceInStream.init(encoded);
- const v1 = readILEB128(T, &in_stream.stream);
+ const v1 = readILEB128(T, in_stream.inStream());
var in_ptr = encoded.ptr;
const v2 = readILEB128Mem(T, &in_ptr);
testing.expectEqual(v1, v2);
@@ -141,7 +141,7 @@ fn test_read_ileb128(comptime T: type, encoded: []const u8) !T {
fn test_read_uleb128(comptime T: type, encoded: []const u8) !T {
var in_stream = std.io.SliceInStream.init(encoded);
- const v1 = readULEB128(T, &in_stream.stream);
+ const v1 = readULEB128(T, in_stream.inStream());
var in_ptr = encoded.ptr;
const v2 = readULEB128Mem(T, &in_ptr);
testing.expectEqual(v1, v2);
@@ -153,7 +153,7 @@ fn test_read_ileb128_seq(comptime T: type, comptime N: usize, encoded: []const u
var in_ptr = encoded.ptr;
var i: usize = 0;
while (i < N) : (i += 1) {
- const v1 = readILEB128(T, &in_stream.stream);
+ const v1 = readILEB128(T, in_stream.inStream());
const v2 = readILEB128Mem(T, &in_ptr);
testing.expectEqual(v1, v2);
}
@@ -164,7 +164,7 @@ fn test_read_uleb128_seq(comptime T: type, comptime N: usize, encoded: []const u
var in_ptr = encoded.ptr;
var i: usize = 0;
while (i < N) : (i += 1) {
- const v1 = readULEB128(T, &in_stream.stream);
+ const v1 = readULEB128(T, in_stream.inStream());
const v2 = readULEB128Mem(T, &in_ptr);
testing.expectEqual(v1, v2);
}
diff --git a/std/dynamic_library.zig b/std/dynamic_library.zig
index 57122fd69c60..b4de217953a1 100644
--- a/std/dynamic_library.zig
+++ b/std/dynamic_library.zig
@@ -109,7 +109,7 @@ pub const LinuxDynLib = struct {
map_size: usize,
/// Trusts the file
- pub fn open(allocator: *mem.Allocator, path: []const u8) !DynLib {
+ pub fn open(allocator: mem.Allocator, path: []const u8) !DynLib {
const fd = try std.os.posixOpen(path, 0, linux.O_RDONLY | linux.O_CLOEXEC);
errdefer std.os.close(fd);
@@ -252,10 +252,10 @@ fn checkver(def_arg: *elf.Verdef, vsym_arg: i32, vername: []const u8, strings: [
}
pub const WindowsDynLib = struct {
- allocator: *mem.Allocator,
+ allocator: mem.Allocator,
dll: windows.HMODULE,
- pub fn open(allocator: *mem.Allocator, path: []const u8) !WindowsDynLib {
+ pub fn open(allocator: mem.Allocator, path: []const u8) !WindowsDynLib {
const wpath = try win_util.sliceToPrefixedFileW(path);
return WindowsDynLib{
diff --git a/std/elf.zig b/std/elf.zig
index 39617d3cf460..bc4773bee026 100644
--- a/std/elf.zig
+++ b/std/elf.zig
@@ -353,8 +353,8 @@ pub const SectionHeader = struct {
};
pub const Elf = struct {
- seekable_stream: *io.SeekableStream(anyerror, anyerror),
- in_stream: *io.InStream(anyerror),
+ seekable_stream: io.SeekableStream(anyerror, anyerror),
+ in_stream: io.InStream(anyerror),
auto_close_stream: bool,
is_64: bool,
endian: builtin.Endian,
@@ -366,24 +366,24 @@ pub const Elf = struct {
string_section_index: usize,
string_section: *SectionHeader,
section_headers: []SectionHeader,
- allocator: *mem.Allocator,
+ allocator: mem.Allocator,
prealloc_file: os.File,
/// Call close when done.
- pub fn openPath(elf: *Elf, allocator: *mem.Allocator, path: []const u8) !void {
+ pub fn openPath(elf: *Elf, allocator: mem.Allocator, path: []const u8) !void {
@compileError("TODO implement");
}
/// Call close when done.
- pub fn openFile(elf: *Elf, allocator: *mem.Allocator, file: os.File) !void {
+ pub fn openFile(elf: *Elf, allocator: mem.Allocator, file: os.File) !void {
@compileError("TODO implement");
}
pub fn openStream(
elf: *Elf,
- allocator: *mem.Allocator,
- seekable_stream: *io.SeekableStream(anyerror, anyerror),
- in: *io.InStream(anyerror),
+ allocator: mem.Allocator,
+ seekable_stream: io.SeekableStream(anyerror, anyerror),
+ in: io.InStream(anyerror),
) !void {
elf.auto_close_stream = false;
elf.allocator = allocator;
diff --git a/std/event/channel.zig b/std/event/channel.zig
index be5af4977402..68dda91b0afd 100644
--- a/std/event/channel.zig
+++ b/std/event/channel.zig
@@ -327,7 +327,7 @@ test "std.event.Channel" {
var da = std.heap.DirectAllocator.init();
defer da.deinit();
- const allocator = &da.allocator;
+ const allocator = da.allocator();
var loop: Loop = undefined;
// TODO make a multi threaded test
@@ -337,10 +337,10 @@ test "std.event.Channel" {
const channel = try Channel(i32).create(&loop, 0);
defer channel.destroy();
- const handle = try async testChannelGetter(&loop, channel);
+ const handle = try async<&allocator> testChannelGetter(&loop, channel);
defer cancel handle;
- const putter = try async testChannelPutter(channel);
+ const putter = try async<&allocator> testChannelPutter(channel);
defer cancel putter;
loop.run();
diff --git a/std/event/fs.zig b/std/event/fs.zig
index 9756d15a76f8..3625e769a9fa 100644
--- a/std/event/fs.zig
+++ b/std/event/fs.zig
@@ -791,7 +791,7 @@ pub fn Watch(comptime V: type) type {
errdefer os.close(inotify_fd);
var result: *Self = undefined;
- _ = try async linuxEventPutter(inotify_fd, channel, &result);
+ _ = try async<&loop.allocator> linuxEventPutter(inotify_fd, channel, &result);
return result;
},
@@ -1317,7 +1317,7 @@ const test_tmp_dir = "std_event_fs_test";
// var da = std.heap.DirectAllocator.init();
// defer da.deinit();
//
-// const allocator = &da.allocator;
+// const allocator = da.allocator();
//
// // TODO move this into event loop too
// try os.makePath(allocator, test_tmp_dir);
@@ -1328,7 +1328,7 @@ const test_tmp_dir = "std_event_fs_test";
// defer loop.deinit();
//
// var result: anyerror!void = error.ResultNeverWritten;
-// const handle = try async testFsWatchCantFail(&loop, &result);
+// const handle = try async<&allocator> testFsWatchCantFail(&loop, &result);
// defer cancel handle;
//
// loop.run();
@@ -1389,7 +1389,6 @@ async fn testFsWatch(loop: *Loop) !void {
pub const OutStream = struct {
fd: os.FileHandle,
- stream: Stream,
loop: *Loop,
offset: usize,
@@ -1401,21 +1400,26 @@ pub const OutStream = struct {
.fd = fd,
.loop = loop,
.offset = offset,
- .stream = Stream{ .writeFn = writeFn },
};
}
- async<*mem.Allocator> fn writeFn(out_stream: *Stream, bytes: []const u8) Error!void {
- const self = @fieldParentPtr(OutStream, "stream", out_stream);
+ async<*mem.Allocator> fn writeFn(out_stream: Stream, bytes: []const u8) Error!void {
+ const self = out_stream.iface.implCast(OutStream);
const offset = self.offset;
self.offset += bytes.len;
return await (async pwritev(self.loop, self.fd, [][]const u8{bytes}, offset) catch unreachable);
}
+
+ pub fn outStream(self: *OutStream) Stream {
+ return Stream{
+ .iface = Stream.Iface.init(self),
+ .writeFn = writeFn,
+ };
+ }
};
pub const InStream = struct {
fd: os.FileHandle,
- stream: Stream,
loop: *Loop,
offset: usize,
@@ -1431,10 +1435,17 @@ pub const InStream = struct {
};
}
- async<*mem.Allocator> fn readFn(in_stream: *Stream, bytes: []u8) Error!usize {
- const self = @fieldParentPtr(InStream, "stream", in_stream);
+ async<*mem.Allocator> fn readFn(in_stream: Stream, bytes: []u8) Error!usize {
+ const self = in_stream.iface.implCast(InStream);
const amt = try await (async preadv(self.loop, self.fd, [][]u8{bytes}, self.offset) catch unreachable);
self.offset += amt;
return amt;
}
+
+ pub fn inStream(self: *InStream) Stream {
+ return Stream{
+ .iface = Stream.Iface.init(self),
+ .readFn = readFn,
+ };
+ }
};
diff --git a/std/event/future.zig b/std/event/future.zig
index e288e1ba5a2c..a4cc4783ccaf 100644
--- a/std/event/future.zig
+++ b/std/event/future.zig
@@ -91,13 +91,13 @@ test "std.event.Future" {
var da = std.heap.DirectAllocator.init();
defer da.deinit();
- const allocator = &da.allocator;
+ const allocator = da.allocator();
var loop: Loop = undefined;
try loop.initMultiThreaded(allocator);
defer loop.deinit();
- const handle = try async testFuture(&loop);
+ const handle = try async<&allocator> testFuture(&loop);
defer cancel handle;
loop.run();
diff --git a/std/event/group.zig b/std/event/group.zig
index 455d1bd60cf9..617182c112b0 100644
--- a/std/event/group.zig
+++ b/std/event/group.zig
@@ -78,7 +78,7 @@ pub fn Group(comptime ReturnType: type) type {
}
};
var node: *Stack.Node = undefined;
- const handle = try async S.asyncFunc(&node, args);
+ const handle = try async<&self.lock.loop.allocator> S.asyncFunc(&node, args);
node.* = Stack.Node{
.next = undefined,
.data = handle,
@@ -128,13 +128,13 @@ test "std.event.Group" {
var da = std.heap.DirectAllocator.init();
defer da.deinit();
- const allocator = &da.allocator;
+ const allocator = da.allocator();
var loop: Loop = undefined;
try loop.initMultiThreaded(allocator);
defer loop.deinit();
- const handle = try async testGroup(&loop);
+ const handle = try async<&allocator> testGroup(&loop);
defer cancel handle;
loop.run();
diff --git a/std/event/lock.zig b/std/event/lock.zig
index 031b2adf1984..6a6f84262079 100644
--- a/std/event/lock.zig
+++ b/std/event/lock.zig
@@ -129,7 +129,7 @@ test "std.event.Lock" {
var da = std.heap.DirectAllocator.init();
defer da.deinit();
- const allocator = &da.allocator;
+ const allocator = da.allocator();
var loop: Loop = undefined;
try loop.initMultiThreaded(allocator);
@@ -138,7 +138,7 @@ test "std.event.Lock" {
var lock = Lock.init(&loop);
defer lock.deinit();
- const handle = try async testLock(&loop, &lock);
+ const handle = try async<&allocator> testLock(&loop, &lock);
defer cancel handle;
loop.run();
diff --git a/std/event/loop.zig b/std/event/loop.zig
index 76b1f6455bca..0ee6f041ef46 100644
--- a/std/event/loop.zig
+++ b/std/event/loop.zig
@@ -12,7 +12,7 @@ const windows = os.windows;
const maxInt = std.math.maxInt;
pub const Loop = struct {
- allocator: *mem.Allocator,
+ allocator: mem.Allocator,
next_tick_queue: std.atomic.Queue(promise),
os_data: OsData,
final_resume_node: ResumeNode,
@@ -88,7 +88,7 @@ pub const Loop = struct {
/// After initialization, call run().
/// TODO copy elision / named return values so that the threads referencing *Loop
/// have the correct pointer value.
- pub fn initSingleThreaded(self: *Loop, allocator: *mem.Allocator) !void {
+ pub fn initSingleThreaded(self: *Loop, allocator: mem.Allocator) !void {
return self.initInternal(allocator, 1);
}
@@ -97,7 +97,7 @@ pub const Loop = struct {
/// After initialization, call run().
/// TODO copy elision / named return values so that the threads referencing *Loop
/// have the correct pointer value.
- pub fn initMultiThreaded(self: *Loop, allocator: *mem.Allocator) !void {
+ pub fn initMultiThreaded(self: *Loop, allocator: mem.Allocator) !void {
if (builtin.single_threaded) @compileError("initMultiThreaded unavailable when building in single-threaded mode");
const core_count = try os.cpuCount(allocator);
return self.initInternal(allocator, core_count);
@@ -105,7 +105,7 @@ pub const Loop = struct {
/// Thread count is the total thread count. The thread pool size will be
/// max(thread_count - 1, 0)
- fn initInternal(self: *Loop, allocator: *mem.Allocator, thread_count: usize) !void {
+ fn initInternal(self: *Loop, allocator: mem.Allocator, thread_count: usize) !void {
self.* = Loop{
.pending_event_count = 1,
.allocator = allocator,
@@ -594,7 +594,7 @@ pub const Loop = struct {
}
};
var handle: promise->@typeOf(func).ReturnType = undefined;
- return async S.asyncFunc(self, &handle, args);
+ return async<&self.allocator> S.asyncFunc(self, &handle, args);
}
/// Awaiting a yield lets the event loop run, starting any unstarted async operations.
@@ -869,7 +869,7 @@ test "std.event.Loop - basic" {
var da = std.heap.DirectAllocator.init();
defer da.deinit();
- const allocator = &da.allocator;
+ const allocator = da.allocator();
var loop: Loop = undefined;
try loop.initMultiThreaded(allocator);
@@ -885,7 +885,7 @@ test "std.event.Loop - call" {
var da = std.heap.DirectAllocator.init();
defer da.deinit();
- const allocator = &da.allocator;
+ const allocator = da.allocator();
var loop: Loop = undefined;
try loop.initMultiThreaded(allocator);
diff --git a/std/event/net.zig b/std/event/net.zig
index 687c1199205c..990c805eecd8 100644
--- a/std/event/net.zig
+++ b/std/event/net.zig
@@ -52,7 +52,7 @@ pub const Server = struct {
try os.posixListen(sockfd, posix.SOMAXCONN);
self.listen_address = std.net.Address.initPosix(try os.posixGetSockName(sockfd));
- self.accept_coro = try async Server.handler(self);
+ self.accept_coro = try async<&self.loop.allocator> Server.handler(self);
errdefer cancel self.accept_coro.?;
self.listen_resume_node.handle = self.accept_coro.?;
@@ -82,7 +82,7 @@ pub const Server = struct {
continue;
}
var socket = os.File.openHandle(accepted_fd);
- _ = async self.handleRequestFn(self, &accepted_addr, socket) catch |err| switch (err) {
+ _ = async<&self.loop.allocator> self.handleRequestFn(self, &accepted_addr, socket) catch |err| switch (err) {
error.OutOfMemory => {
socket.close();
continue;
@@ -298,7 +298,7 @@ test "listen on a port, send bytes, receive bytes" {
const addr = _addr.*; // TODO https://github.com/ziglang/zig/issues/1592
var socket = _socket; // TODO https://github.com/ziglang/zig/issues/1592
- const stream = &socket.outStream().stream;
+ const stream = socket.outStreamAdapter().outStream();
try stream.print("hello from server\n");
}
};
@@ -312,7 +312,7 @@ test "listen on a port, send bytes, receive bytes" {
defer server.tcp_server.deinit();
try server.tcp_server.listen(&addr, MyServer.handler);
- const p = try async doAsyncTest(&loop, &server.tcp_server.listen_address, &server.tcp_server);
+ const p = try async<&std.debug.global_allocator> doAsyncTest(&loop, &server.tcp_server.listen_address, &server.tcp_server);
defer cancel p;
loop.run();
}
@@ -332,7 +332,6 @@ async fn doAsyncTest(loop: *Loop, address: *const std.net.Address, server: *Serv
pub const OutStream = struct {
fd: os.FileHandle,
- stream: Stream,
loop: *Loop,
pub const Error = WriteError;
@@ -342,19 +341,24 @@ pub const OutStream = struct {
return OutStream{
.fd = fd,
.loop = loop,
- .stream = Stream{ .writeFn = writeFn },
};
}
- async<*mem.Allocator> fn writeFn(out_stream: *Stream, bytes: []const u8) Error!void {
- const self = @fieldParentPtr(OutStream, "stream", out_stream);
+ async<*mem.Allocator> fn writeFn(out_stream: Stream, bytes: []const u8) Error!void {
+ const self = out_stream.iface.implCast(OutStream);
return await (async write(self.loop, self.fd, bytes) catch unreachable);
}
+
+ pub fn outStream(self: *OutStream) Stream {
+ return Stream {
+ .iface = Stream.Iface.init(self),
+ .writeFn = writeFn,
+ };
+ }
};
pub const InStream = struct {
fd: os.FileHandle,
- stream: Stream,
loop: *Loop,
pub const Error = ReadError;
@@ -364,12 +368,18 @@ pub const InStream = struct {
return InStream{
.fd = fd,
.loop = loop,
- .stream = Stream{ .readFn = readFn },
};
}
- async<*mem.Allocator> fn readFn(in_stream: *Stream, bytes: []u8) Error!usize {
- const self = @fieldParentPtr(InStream, "stream", in_stream);
+ async<*mem.Allocator> fn readFn(in_stream: Stream, bytes: []u8) Error!usize {
+ const self = in_stream.iface.implCast(InStream);
return await (async read(self.loop, self.fd, bytes) catch unreachable);
}
+
+ pub fn outStream(self: *InStream) Stream {
+ return Stream {
+ .iface = Stream.Iface.init(self),
+ .readFn = readFn,
+ };
+ }
};
diff --git a/std/event/rwlock.zig b/std/event/rwlock.zig
index 76b364fedc14..3b822be66024 100644
--- a/std/event/rwlock.zig
+++ b/std/event/rwlock.zig
@@ -218,7 +218,7 @@ test "std.event.RwLock" {
var da = std.heap.DirectAllocator.init();
defer da.deinit();
- const allocator = &da.allocator;
+ const allocator = da.allocator();
var loop: Loop = undefined;
try loop.initMultiThreaded(allocator);
@@ -227,7 +227,7 @@ test "std.event.RwLock" {
var lock = RwLock.init(&loop);
defer lock.deinit();
- const handle = try async testLock(&loop, &lock);
+ const handle = try async<&allocator> testLock(&loop, &lock);
defer cancel handle;
loop.run();
diff --git a/std/fmt.zig b/std/fmt.zig
index 27a8abba6e31..d876a059d0b0 100644
--- a/std/fmt.zig
+++ b/std/fmt.zig
@@ -914,7 +914,7 @@ pub fn bufPrint(buf: []u8, comptime fmt: []const u8, args: ...) ![]u8 {
pub const AllocPrintError = error{OutOfMemory};
-pub fn allocPrint(allocator: *mem.Allocator, comptime fmt: []const u8, args: ...) AllocPrintError![]u8 {
+pub fn allocPrint(allocator: mem.Allocator, comptime fmt: []const u8, args: ...) AllocPrintError![]u8 {
var size: usize = 0;
format(&size, error{}, countSize, fmt, args) catch |err| switch (err) {};
const buf = try allocator.alloc(u8, size);
diff --git a/std/hash_map.zig b/std/hash_map.zig
index 8cbff7be9d70..f8a61c0f3e4d 100644
--- a/std/hash_map.zig
+++ b/std/hash_map.zig
@@ -19,7 +19,7 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
entries: []Entry,
size: usize,
max_distance_from_start_index: usize,
- allocator: *Allocator,
+ allocator: Allocator,
// this is used to detect bugs where a hashtable is edited while an iterator is running.
modification_count: debug_u32,
@@ -75,7 +75,7 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
}
};
- pub fn init(allocator: *Allocator) Self {
+ pub fn init(allocator: Allocator) Self {
return Self{
.entries = []Entry{},
.allocator = allocator,
@@ -378,7 +378,7 @@ test "basic hash map usage" {
var direct_allocator = std.heap.DirectAllocator.init();
defer direct_allocator.deinit();
- var map = AutoHashMap(i32, i32).init(&direct_allocator.allocator);
+ var map = AutoHashMap(i32, i32).init(direct_allocator.allocator());
defer map.deinit();
testing.expect((try map.put(1, 11)) == null);
@@ -421,7 +421,7 @@ test "iterator hash map" {
var direct_allocator = std.heap.DirectAllocator.init();
defer direct_allocator.deinit();
- var reset_map = AutoHashMap(i32, i32).init(&direct_allocator.allocator);
+ var reset_map = AutoHashMap(i32, i32).init(direct_allocator.allocator());
defer reset_map.deinit();
testing.expect((try reset_map.put(1, 11)) == null);
@@ -468,7 +468,7 @@ test "ensure capacity" {
var direct_allocator = std.heap.DirectAllocator.init();
defer direct_allocator.deinit();
- var map = AutoHashMap(i32, i32).init(&direct_allocator.allocator);
+ var map = AutoHashMap(i32, i32).init(direct_allocator.allocator());
defer map.deinit();
try map.ensureCapacity(20);
@@ -502,7 +502,7 @@ pub fn getAutoHashFn(comptime K: type) (fn (K) u32) {
return struct {
fn hash(key: K) u32 {
comptime var rng = comptime std.rand.DefaultPrng.init(0);
- return autoHash(key, &rng.random, u32);
+ return autoHash(key, comptime rng.random(), u32);
}
}.hash;
}
@@ -516,7 +516,7 @@ pub fn getAutoEqlFn(comptime K: type) (fn (K, K) bool) {
}
// TODO improve these hash functions
-pub fn autoHash(key: var, comptime rng: *std.rand.Random, comptime HashInt: type) HashInt {
+pub fn autoHash(key: var, comptime rng: std.rand.Random, comptime HashInt: type) HashInt {
switch (@typeInfo(@typeOf(key))) {
builtin.TypeId.NoReturn,
builtin.TypeId.Opaque,
diff --git a/std/heap.zig b/std/heap.zig
index 3bbb35e65da5..03e4a2c6a6ff 100644
--- a/std/heap.zig
+++ b/std/heap.zig
@@ -11,20 +11,20 @@ const maxInt = std.math.maxInt;
const Allocator = mem.Allocator;
-pub const c_allocator = &c_allocator_state;
-var c_allocator_state = Allocator{
+pub const c_allocator = comptime Allocator{
+ .iface = Allocator.Iface.none(),
.reallocFn = cRealloc,
.shrinkFn = cShrink,
};
-fn cRealloc(self: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 {
+fn cRealloc(a: *const Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 {
assert(new_align <= @alignOf(c_longdouble));
const old_ptr = if (old_mem.len == 0) null else @ptrCast(*c_void, old_mem.ptr);
const buf = c.realloc(old_ptr, new_size) orelse return error.OutOfMemory;
return @ptrCast([*]u8, buf)[0..new_size];
}
-fn cShrink(self: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
+fn cShrink(a: *const Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
const old_ptr = @ptrCast(*c_void, old_mem.ptr);
const buf = c.realloc(old_ptr, new_size) orelse return old_mem[0..new_size];
return @ptrCast([*]u8, buf)[0..new_size];
@@ -33,21 +33,13 @@ fn cShrink(self: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new
/// This allocator makes a syscall directly for every allocation and free.
/// Thread-safe and lock-free.
pub const DirectAllocator = struct {
- allocator: Allocator,
-
pub fn init() DirectAllocator {
- return DirectAllocator{
- .allocator = Allocator{
- .reallocFn = realloc,
- .shrinkFn = shrink,
- },
- };
+ return DirectAllocator{};
}
pub fn deinit(self: *DirectAllocator) void {}
- fn alloc(allocator: *Allocator, n: usize, alignment: u29) error{OutOfMemory}![]u8 {
- const self = @fieldParentPtr(DirectAllocator, "allocator", allocator);
+ fn alloc(a: *const Allocator, n: usize, alignment: u29) Allocator.Error![]u8 {
if (n == 0)
return (([*]u8)(undefined))[0..0];
@@ -134,7 +126,7 @@ pub const DirectAllocator = struct {
}
}
- fn shrink(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
+ fn shrink(a: *const Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
switch (builtin.os) {
Os.linux, Os.macosx, Os.ios, Os.freebsd, Os.netbsd => {
const base_addr = @ptrToInt(old_mem.ptr);
@@ -177,13 +169,13 @@ pub const DirectAllocator = struct {
}
}
- fn realloc(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 {
+ fn realloc(a: *const Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 {
switch (builtin.os) {
Os.linux, Os.macosx, Os.ios, Os.freebsd, Os.netbsd => {
if (new_size <= old_mem.len and new_align <= old_align) {
- return shrink(allocator, old_mem, old_align, new_size, new_align);
+ return shrink(a, old_mem, old_align, new_size, new_align);
}
- const result = try alloc(allocator, new_size, new_align);
+ const result = try alloc(a, new_size, new_align);
if (old_mem.len != 0) {
@memcpy(result.ptr, old_mem.ptr, std.math.min(old_mem.len, result.len));
_ = os.posix.munmap(@ptrToInt(old_mem.ptr), old_mem.len);
@@ -192,11 +184,11 @@ pub const DirectAllocator = struct {
},
.windows => {
if (old_mem.len == 0) {
- return alloc(allocator, new_size, new_align);
+ return alloc(a, new_size, new_align);
}
if (new_size <= old_mem.len and new_align <= old_align) {
- return shrink(allocator, old_mem, old_align, new_size, new_align);
+ return shrink(a, old_mem, old_align, new_size, new_align);
}
const w = os.windows;
@@ -206,7 +198,7 @@ pub const DirectAllocator = struct {
// Current allocation doesn't satisfy the new alignment.
// For now we'll do a new one no matter what, but maybe
// there is something smarter to do instead.
- const result = try alloc(allocator, new_size, new_align);
+ const result = try alloc(a, new_size, new_align);
assert(old_mem.len != 0);
@memcpy(result.ptr, old_mem.ptr, std.math.min(old_mem.len, result.len));
if (w.VirtualFree(old_mem.ptr, 0, w.MEM_RELEASE) == 0) unreachable;
@@ -234,7 +226,7 @@ pub const DirectAllocator = struct {
) orelse {
// Committing new pages at the end of the existing allocation
// failed, we need to try a new one.
- const new_alloc_mem = try alloc(allocator, new_size, new_align);
+ const new_alloc_mem = try alloc(a, new_size, new_align);
@memcpy(new_alloc_mem.ptr, old_mem.ptr, old_mem.len);
if (w.VirtualFree(old_mem.ptr, 0, w.MEM_RELEASE) == 0) unreachable;
@@ -247,21 +239,24 @@ pub const DirectAllocator = struct {
else => @compileError("Unsupported OS"),
}
}
+
+ pub fn allocator(self: *DirectAllocator) Allocator {
+ return Allocator{
+ .iface = Allocator.Iface.none(),
+ .reallocFn = realloc,
+ .shrinkFn = shrink,
+ };
+ }
};
pub const HeapAllocator = switch (builtin.os) {
.windows => struct {
- allocator: Allocator,
heap_handle: ?HeapHandle,
const HeapHandle = os.windows.HANDLE;
pub fn init() HeapAllocator {
return HeapAllocator{
- .allocator = Allocator{
- .reallocFn = realloc,
- .shrinkFn = shrink,
- },
.heap_handle = null,
};
}
@@ -272,8 +267,7 @@ pub const HeapAllocator = switch (builtin.os) {
}
}
- fn alloc(allocator: *Allocator, n: usize, alignment: u29) error{OutOfMemory}![]u8 {
- const self = @fieldParentPtr(HeapAllocator, "allocator", allocator);
+ fn alloc(self: *HeapAllocator, n: usize, alignment: u29) error{OutOfMemory}![]u8 {
if (n == 0)
return (([*]u8)(undefined))[0..0];
@@ -294,8 +288,8 @@ pub const HeapAllocator = switch (builtin.os) {
return @intToPtr([*]u8, adjusted_addr)[0..n];
}
- fn shrink(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
- return realloc(allocator, old_mem, old_align, new_size, new_align) catch {
+ fn shrink(a: *const Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
+ return realloc(a, old_mem, old_align, new_size, new_align) catch {
const old_adjusted_addr = @ptrToInt(old_mem.ptr);
const old_record_addr = old_adjusted_addr + old_mem.len;
const root_addr = @intToPtr(*align(1) usize, old_record_addr).*;
@@ -306,10 +300,11 @@ pub const HeapAllocator = switch (builtin.os) {
};
}
- fn realloc(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 {
- if (old_mem.len == 0) return alloc(allocator, new_size, new_align);
+ fn realloc(a: *const Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 {
+ const self = a.iface.implCast(HeapAllocator);
+
+ if (old_mem.len == 0) return self.alloc(new_size, new_align);
- const self = @fieldParentPtr(HeapAllocator, "allocator", allocator);
const old_adjusted_addr = @ptrToInt(old_mem.ptr);
const old_record_addr = old_adjusted_addr + old_mem.len;
const root_addr = @intToPtr(*align(1) usize, old_record_addr).*;
@@ -344,6 +339,14 @@ pub const HeapAllocator = switch (builtin.os) {
@intToPtr(*align(1) usize, new_record_addr).* = new_root_addr;
return @intToPtr([*]u8, new_adjusted_addr)[0..new_size];
}
+
+ pub fn allocator(self: *HeapAllocator) Allocator {
+ return Allocator{
+ .iface = Allocator.Iface.init(self),
+ .reallocFn = realloc,
+ .shrinkFn = shrink,
+ };
+ }
},
else => @compileError("Unsupported OS"),
};
@@ -351,20 +354,14 @@ pub const HeapAllocator = switch (builtin.os) {
/// This allocator takes an existing allocator, wraps it, and provides an interface
/// where you can allocate without freeing, and then free it all together.
pub const ArenaAllocator = struct {
- pub allocator: Allocator,
-
- child_allocator: *Allocator,
+ child_allocator: Allocator,
buffer_list: std.LinkedList([]u8),
end_index: usize,
const BufNode = std.LinkedList([]u8).Node;
- pub fn init(child_allocator: *Allocator) ArenaAllocator {
+ pub fn init(child_allocator: Allocator) ArenaAllocator {
return ArenaAllocator{
- .allocator = Allocator{
- .reallocFn = realloc,
- .shrinkFn = shrink,
- },
.child_allocator = child_allocator,
.buffer_list = std.LinkedList([]u8).init(),
.end_index = 0,
@@ -402,8 +399,8 @@ pub const ArenaAllocator = struct {
return buf_node;
}
- fn alloc(allocator: *Allocator, n: usize, alignment: u29) ![]u8 {
- const self = @fieldParentPtr(ArenaAllocator, "allocator", allocator);
+ fn alloc(a: *const Allocator, n: usize, alignment: u29) ![]u8 {
+ const self = a.iface.implCast(ArenaAllocator);
var cur_node = if (self.buffer_list.last) |last_node| last_node else try self.createNode(0, n + alignment);
while (true) {
@@ -422,40 +419,42 @@ pub const ArenaAllocator = struct {
}
}
- fn realloc(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 {
+ fn realloc(a: *const Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 {
if (new_size <= old_mem.len and new_align <= new_size) {
// We can't do anything with the memory, so tell the client to keep it.
return error.OutOfMemory;
} else {
- const result = try alloc(allocator, new_size, new_align);
+ const result = try alloc(a, new_size, new_align);
@memcpy(result.ptr, old_mem.ptr, std.math.min(old_mem.len, result.len));
return result;
}
}
- fn shrink(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
+ fn shrink(a: *const Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
return old_mem[0..new_size];
}
+
+ pub fn allocator(self: *ArenaAllocator) Allocator {
+ return Allocator{
+ .iface = Allocator.Iface.init(self),
+ .reallocFn = realloc,
+ .shrinkFn = shrink,
+ };
+ }
};
pub const FixedBufferAllocator = struct {
- allocator: Allocator,
end_index: usize,
buffer: []u8,
pub fn init(buffer: []u8) FixedBufferAllocator {
return FixedBufferAllocator{
- .allocator = Allocator{
- .reallocFn = realloc,
- .shrinkFn = shrink,
- },
.buffer = buffer,
.end_index = 0,
};
}
- fn alloc(allocator: *Allocator, n: usize, alignment: u29) ![]u8 {
- const self = @fieldParentPtr(FixedBufferAllocator, "allocator", allocator);
+ fn alloc(self: *FixedBufferAllocator, n: usize, alignment: u29) ![]u8 {
const addr = @ptrToInt(self.buffer.ptr) + self.end_index;
const adjusted_addr = mem.alignForward(addr, alignment);
const adjusted_index = self.end_index + (adjusted_addr - addr);
@@ -469,8 +468,9 @@ pub const FixedBufferAllocator = struct {
return result;
}
- fn realloc(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 {
- const self = @fieldParentPtr(FixedBufferAllocator, "allocator", allocator);
+ fn realloc(a: *const Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) Allocator.Error![]u8 {
+ const self = a.iface.implCast(FixedBufferAllocator);
+
assert(old_mem.len <= self.end_index);
if (old_mem.ptr == self.buffer.ptr + self.end_index - old_mem.len and
mem.alignForward(@ptrToInt(old_mem.ptr), new_align) == @ptrToInt(old_mem.ptr))
@@ -485,15 +485,23 @@ pub const FixedBufferAllocator = struct {
// We can't do anything with the memory, so tell the client to keep it.
return error.OutOfMemory;
} else {
- const result = try alloc(allocator, new_size, new_align);
+ const result = try self.alloc(new_size, new_align);
@memcpy(result.ptr, old_mem.ptr, std.math.min(old_mem.len, result.len));
return result;
}
}
- fn shrink(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
+ fn shrink(a: *const Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
return old_mem[0..new_size];
}
+
+ pub fn allocator(self: *FixedBufferAllocator) Allocator {
+ return Allocator{
+ .iface = Allocator.Iface.init(self),
+ .reallocFn = realloc,
+ .shrinkFn = shrink,
+ };
+ }
};
// FIXME: Exposed LLVM intrinsics is a bug
@@ -501,19 +509,14 @@ pub const FixedBufferAllocator = struct {
extern fn @"llvm.wasm.memory.size.i32"(u32) u32;
extern fn @"llvm.wasm.memory.grow.i32"(u32, u32) i32;
-pub const wasm_allocator = &wasm_allocator_state.allocator;
+pub const wasm_allocator = wasm_allocator_state.allocator();
var wasm_allocator_state = WasmAllocator{
- .allocator = Allocator{
- .reallocFn = WasmAllocator.realloc,
- .shrinkFn = WasmAllocator.shrink,
- },
.start_ptr = undefined,
.num_pages = 0,
.end_index = 0,
};
const WasmAllocator = struct {
- allocator: Allocator,
start_ptr: [*]u8,
num_pages: usize,
end_index: usize,
@@ -524,9 +527,7 @@ const WasmAllocator = struct {
}
}
- fn alloc(allocator: *Allocator, size: usize, alignment: u29) ![]u8 {
- const self = @fieldParentPtr(WasmAllocator, "allocator", allocator);
-
+ fn alloc(self: *WasmAllocator, size: usize, alignment: u29) ![]u8 {
const addr = @ptrToInt(self.start_ptr) + self.end_index;
const adjusted_addr = mem.alignForward(addr, alignment);
const adjusted_index = self.end_index + (adjusted_addr - addr);
@@ -555,25 +556,24 @@ const WasmAllocator = struct {
}
// Check if memory is the last "item" and is aligned correctly
- fn is_last_item(allocator: *Allocator, memory: []u8, alignment: u29) bool {
- const self = @fieldParentPtr(WasmAllocator, "allocator", allocator);
+ fn is_last_item(self: *WasmAllocator, memory: []u8, alignment: u29) bool {
return memory.ptr == self.start_ptr + self.end_index - memory.len and mem.alignForward(@ptrToInt(memory.ptr), alignment) == @ptrToInt(memory.ptr);
}
- fn realloc(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 {
- const self = @fieldParentPtr(WasmAllocator, "allocator", allocator);
+ fn realloc(a: *const Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) Allocator.Error![]u8 {
+ const self = a.iface.implCast(WasmAllocator);
// Initialize start_ptr at the first realloc
if (self.num_pages == 0) {
self.start_ptr = @intToPtr([*]u8, @intCast(usize, @"llvm.wasm.memory.size.i32"(0)) * os.page_size);
}
- if (is_last_item(allocator, old_mem, new_align)) {
+ if (self.is_last_item(old_mem, new_align)) {
const start_index = self.end_index - old_mem.len;
const new_end_index = start_index + new_size;
if (new_end_index > self.num_pages * os.page_size) {
- _ = try alloc(allocator, new_end_index - self.end_index, new_align);
+ _ = try self.alloc(new_end_index - self.end_index, new_align);
}
const result = self.start_ptr[start_index..new_end_index];
@@ -582,15 +582,23 @@ const WasmAllocator = struct {
} else if (new_size <= old_mem.len and new_align <= old_align) {
return error.OutOfMemory;
} else {
- const result = try alloc(allocator, new_size, new_align);
+ const result = try self.alloc(new_size, new_align);
mem.copy(u8, result, old_mem);
return result;
}
}
- fn shrink(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
+ fn shrink(a: *const Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
return old_mem[0..new_size];
}
+
+ pub fn allocator(self: *WasmAllocator) Allocator {
+ return Allocator{
+ .iface = Allocator.Iface.init(self),
+ .reallocFn = realloc,
+ .shrinkFn = shrink,
+ };
+ }
};
pub const ThreadSafeFixedBufferAllocator = blk: {
@@ -599,23 +607,17 @@ pub const ThreadSafeFixedBufferAllocator = blk: {
} else {
// lock free
break :blk struct {
- allocator: Allocator,
end_index: usize,
buffer: []u8,
pub fn init(buffer: []u8) ThreadSafeFixedBufferAllocator {
return ThreadSafeFixedBufferAllocator{
- .allocator = Allocator{
- .reallocFn = realloc,
- .shrinkFn = shrink,
- },
.buffer = buffer,
.end_index = 0,
};
}
- fn alloc(allocator: *Allocator, n: usize, alignment: u29) ![]u8 {
- const self = @fieldParentPtr(ThreadSafeFixedBufferAllocator, "allocator", allocator);
+ fn alloc(self: *ThreadSafeFixedBufferAllocator, n: usize, alignment: u29) ![]u8 {
var end_index = @atomicLoad(usize, &self.end_index, builtin.AtomicOrder.SeqCst);
while (true) {
const addr = @ptrToInt(self.buffer.ptr) + end_index;
@@ -629,33 +631,39 @@ pub const ThreadSafeFixedBufferAllocator = blk: {
}
}
- fn realloc(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 {
+ fn realloc(a: *const Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) Allocator.Error![]u8 {
+ const self = a.iface.implCast(ThreadSafeFixedBufferAllocator);
+
if (new_size <= old_mem.len and new_align <= old_align) {
// We can't do anything useful with the memory, tell the client to keep it.
return error.OutOfMemory;
} else {
- const result = try alloc(allocator, new_size, new_align);
+ const result = try self.alloc(new_size, new_align);
@memcpy(result.ptr, old_mem.ptr, std.math.min(old_mem.len, result.len));
return result;
}
}
- fn shrink(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
+ fn shrink(a: *const Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
return old_mem[0..new_size];
}
+
+ pub fn allocator(self: *ThreadSafeFixedBufferAllocator) Allocator {
+ return Allocator{
+ .iface = Allocator.Iface.init(self),
+ .reallocFn = realloc,
+ .shrinkFn = shrink,
+ };
+ }
};
}
};
-pub fn stackFallback(comptime size: usize, fallback_allocator: *Allocator) StackFallbackAllocator(size) {
+pub fn stackFallback(comptime size: usize, fallback_allocator: Allocator) StackFallbackAllocator(size) {
return StackFallbackAllocator(size){
.buffer = undefined,
.fallback_allocator = fallback_allocator,
.fixed_buffer_allocator = undefined,
- .allocator = Allocator{
- .reallocFn = StackFallbackAllocator(size).realloc,
- .shrinkFn = StackFallbackAllocator(size).shrink,
- },
};
}
@@ -664,29 +672,29 @@ pub fn StackFallbackAllocator(comptime size: usize) type {
const Self = @This();
buffer: [size]u8,
- allocator: Allocator,
- fallback_allocator: *Allocator,
+ fallback_allocator: Allocator,
fixed_buffer_allocator: FixedBufferAllocator,
- pub fn get(self: *Self) *Allocator {
+ pub fn get(self: *Self) Allocator {
self.fixed_buffer_allocator = FixedBufferAllocator.init(self.buffer[0..]);
- return &self.allocator;
+ return self.allocator();
}
- fn realloc(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 {
- const self = @fieldParentPtr(Self, "allocator", allocator);
+ fn realloc(a: *const Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) Allocator.Error![]u8 {
+ const self = a.iface.implCast(Self);
+
const in_buffer = @ptrToInt(old_mem.ptr) >= @ptrToInt(&self.buffer) and
@ptrToInt(old_mem.ptr) < @ptrToInt(&self.buffer) + self.buffer.len;
if (in_buffer) {
return FixedBufferAllocator.realloc(
- &self.fixed_buffer_allocator.allocator,
+ &self.fixed_buffer_allocator.allocator(),
old_mem,
old_align,
new_size,
new_align,
) catch {
const result = try self.fallback_allocator.reallocFn(
- self.fallback_allocator,
+ &self.fallback_allocator,
([*]u8)(undefined)[0..0],
undefined,
new_size,
@@ -697,7 +705,7 @@ pub fn StackFallbackAllocator(comptime size: usize) type {
};
}
return self.fallback_allocator.reallocFn(
- self.fallback_allocator,
+ &self.fallback_allocator,
old_mem,
old_align,
new_size,
@@ -705,13 +713,13 @@ pub fn StackFallbackAllocator(comptime size: usize) type {
);
}
- fn shrink(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
- const self = @fieldParentPtr(Self, "allocator", allocator);
+ fn shrink(a: *const Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
+ const self = a.iface.implCast(Self);
const in_buffer = @ptrToInt(old_mem.ptr) >= @ptrToInt(&self.buffer) and
@ptrToInt(old_mem.ptr) < @ptrToInt(&self.buffer) + self.buffer.len;
if (in_buffer) {
return FixedBufferAllocator.shrink(
- &self.fixed_buffer_allocator.allocator,
+ &self.fixed_buffer_allocator.allocator(),
old_mem,
old_align,
new_size,
@@ -719,13 +727,21 @@ pub fn StackFallbackAllocator(comptime size: usize) type {
);
}
return self.fallback_allocator.shrinkFn(
- self.fallback_allocator,
+ &self.fallback_allocator,
old_mem,
old_align,
new_size,
new_align,
);
}
+
+ pub fn allocator(self: *Self) Allocator {
+ return Allocator{
+ .iface = Allocator.Iface.init(self),
+ .reallocFn = realloc,
+ .shrinkFn = shrink,
+ };
+ }
};
}
@@ -741,7 +757,7 @@ test "DirectAllocator" {
var direct_allocator = DirectAllocator.init();
defer direct_allocator.deinit();
- const allocator = &direct_allocator.allocator;
+ const allocator = direct_allocator.allocator();
try testAllocator(allocator);
try testAllocatorAligned(allocator, 16);
try testAllocatorLargeAlignment(allocator);
@@ -764,7 +780,7 @@ test "HeapAllocator" {
var heap_allocator = HeapAllocator.init();
defer heap_allocator.deinit();
- const allocator = &heap_allocator.allocator;
+ const allocator = heap_allocator.allocator();
try testAllocator(allocator);
try testAllocatorAligned(allocator, 16);
try testAllocatorLargeAlignment(allocator);
@@ -776,23 +792,23 @@ test "ArenaAllocator" {
var direct_allocator = DirectAllocator.init();
defer direct_allocator.deinit();
- var arena_allocator = ArenaAllocator.init(&direct_allocator.allocator);
+ var arena_allocator = ArenaAllocator.init(direct_allocator.allocator());
defer arena_allocator.deinit();
- try testAllocator(&arena_allocator.allocator);
- try testAllocatorAligned(&arena_allocator.allocator, 16);
- try testAllocatorLargeAlignment(&arena_allocator.allocator);
- try testAllocatorAlignedShrink(&arena_allocator.allocator);
+ try testAllocator(arena_allocator.allocator());
+ try testAllocatorAligned(arena_allocator.allocator(), 16);
+ try testAllocatorLargeAlignment(arena_allocator.allocator());
+ try testAllocatorAlignedShrink(arena_allocator.allocator());
}
var test_fixed_buffer_allocator_memory: [80000 * @sizeOf(u64)]u8 = undefined;
test "FixedBufferAllocator" {
var fixed_buffer_allocator = FixedBufferAllocator.init(test_fixed_buffer_allocator_memory[0..]);
- try testAllocator(&fixed_buffer_allocator.allocator);
- try testAllocatorAligned(&fixed_buffer_allocator.allocator, 16);
- try testAllocatorLargeAlignment(&fixed_buffer_allocator.allocator);
- try testAllocatorAlignedShrink(&fixed_buffer_allocator.allocator);
+ try testAllocator(fixed_buffer_allocator.allocator());
+ try testAllocatorAligned(fixed_buffer_allocator.allocator(), 16);
+ try testAllocatorLargeAlignment(fixed_buffer_allocator.allocator());
+ try testAllocatorAlignedShrink(fixed_buffer_allocator.allocator());
}
test "FixedBufferAllocator Reuse memory on realloc" {
@@ -801,22 +817,22 @@ test "FixedBufferAllocator Reuse memory on realloc" {
{
var fixed_buffer_allocator = FixedBufferAllocator.init(small_fixed_buffer[0..]);
- var slice0 = try fixed_buffer_allocator.allocator.alloc(u8, 5);
+ var slice0 = try fixed_buffer_allocator.allocator().alloc(u8, 5);
testing.expect(slice0.len == 5);
- var slice1 = try fixed_buffer_allocator.allocator.realloc(slice0, 10);
+ var slice1 = try fixed_buffer_allocator.allocator().realloc(slice0, 10);
testing.expect(slice1.ptr == slice0.ptr);
testing.expect(slice1.len == 10);
- testing.expectError(error.OutOfMemory, fixed_buffer_allocator.allocator.realloc(slice1, 11));
+ testing.expectError(error.OutOfMemory, fixed_buffer_allocator.allocator().realloc(slice1, 11));
}
// check that we don't re-use the memory if it's not the most recent block
{
var fixed_buffer_allocator = FixedBufferAllocator.init(small_fixed_buffer[0..]);
- var slice0 = try fixed_buffer_allocator.allocator.alloc(u8, 2);
+ var slice0 = try fixed_buffer_allocator.allocator().alloc(u8, 2);
slice0[0] = 1;
slice0[1] = 2;
- var slice1 = try fixed_buffer_allocator.allocator.alloc(u8, 2);
- var slice2 = try fixed_buffer_allocator.allocator.realloc(slice0, 4);
+ var slice1 = try fixed_buffer_allocator.allocator().alloc(u8, 2);
+ var slice2 = try fixed_buffer_allocator.allocator().realloc(slice0, 4);
testing.expect(slice0.ptr != slice2.ptr);
testing.expect(slice1.ptr != slice2.ptr);
testing.expect(slice2[0] == 1);
@@ -827,13 +843,13 @@ test "FixedBufferAllocator Reuse memory on realloc" {
test "ThreadSafeFixedBufferAllocator" {
var fixed_buffer_allocator = ThreadSafeFixedBufferAllocator.init(test_fixed_buffer_allocator_memory[0..]);
- try testAllocator(&fixed_buffer_allocator.allocator);
- try testAllocatorAligned(&fixed_buffer_allocator.allocator, 16);
- try testAllocatorLargeAlignment(&fixed_buffer_allocator.allocator);
- try testAllocatorAlignedShrink(&fixed_buffer_allocator.allocator);
+ try testAllocator(fixed_buffer_allocator.allocator());
+ try testAllocatorAligned(fixed_buffer_allocator.allocator(), 16);
+ try testAllocatorLargeAlignment(fixed_buffer_allocator.allocator());
+ try testAllocatorAlignedShrink(fixed_buffer_allocator.allocator());
}
-fn testAllocator(allocator: *mem.Allocator) !void {
+fn testAllocator(allocator: Allocator) !void {
var slice = try allocator.alloc(*i32, 100);
testing.expect(slice.len == 100);
for (slice) |*item, i| {
@@ -861,7 +877,7 @@ fn testAllocator(allocator: *mem.Allocator) !void {
allocator.free(slice);
}
-fn testAllocatorAligned(allocator: *mem.Allocator, comptime alignment: u29) !void {
+fn testAllocatorAligned(allocator: Allocator, comptime alignment: u29) !void {
// initial
var slice = try allocator.alignedAlloc(u8, alignment, 10);
testing.expect(slice.len == 10);
@@ -885,7 +901,7 @@ fn testAllocatorAligned(allocator: *mem.Allocator, comptime alignment: u29) !voi
testing.expect(slice.len == 0);
}
-fn testAllocatorLargeAlignment(allocator: *mem.Allocator) mem.Allocator.Error!void {
+fn testAllocatorLargeAlignment(allocator: Allocator) Allocator.Error!void {
//Maybe a platform's page_size is actually the same as or
// very near usize?
if (os.page_size << 2 > maxInt(usize)) return;
@@ -914,9 +930,9 @@ fn testAllocatorLargeAlignment(allocator: *mem.Allocator) mem.Allocator.Error!vo
allocator.free(slice);
}
-fn testAllocatorAlignedShrink(allocator: *mem.Allocator) mem.Allocator.Error!void {
+fn testAllocatorAlignedShrink(allocator: Allocator) Allocator.Error!void {
var debug_buffer: [1000]u8 = undefined;
- const debug_allocator = &FixedBufferAllocator.init(&debug_buffer).allocator;
+ const debug_allocator = FixedBufferAllocator.init(&debug_buffer).allocator();
const alloc_size = os.page_size * 2 + 50;
var slice = try allocator.alignedAlloc(u8, 16, alloc_size);
diff --git a/std/interface.zig b/std/interface.zig
new file mode 100644
index 000000000000..702f0be8d350
--- /dev/null
+++ b/std/interface.zig
@@ -0,0 +1,25 @@
+pub fn Interface() type {
+ const Impl = @OpaqueType();
+
+ return struct {
+ const Self = @This();
+
+ impl: ?*Impl,
+
+ pub fn none() Self {
+ return Self{ .impl = null };
+ }
+
+ pub fn init(ptr: var) Self {
+ const T = @typeOf(ptr);
+ if (@alignOf(T) == 0) @compileError("0-Bit implementations can't be casted (and casting is unnecessary anyway, use .none)");
+ return Self{ .impl = @ptrCast(*Impl, ptr) };
+ }
+
+ pub fn implCast(self: *const Self, comptime T: type) *T {
+ if (@alignOf(T) == 0) @compileError("0-Bit implementations can't be casted (and casting is unnecessary anyway)");
+ const aligned = @alignCast(@alignOf(T), self.impl);
+ return @ptrCast(*T, aligned);
+ }
+ };
+}
diff --git a/std/io.zig b/std/io.zig
index 5e1aaf5da3a1..43eea33bb3f9 100644
--- a/std/io.zig
+++ b/std/io.zig
@@ -43,16 +43,19 @@ pub fn InStream(comptime ReadError: type) type {
return struct {
const Self = @This();
pub const Error = ReadError;
+ pub const Iface = std.Interface();
+
+ iface: Iface,
/// Return the number of bytes read. If the number read is smaller than buf.len, it
/// means the stream reached the end. Reaching the end of a stream is not an error
/// condition.
- readFn: fn (self: *Self, buffer: []u8) Error!usize,
+ readFn: fn (self: Self, buffer: []u8) Error!usize,
/// Replaces `buffer` contents by reading from the stream until it is finished.
/// If `buffer.len()` would exceed `max_size`, `error.StreamTooLong` is returned and
/// the contents read from the stream are lost.
- pub fn readAllBuffer(self: *Self, buffer: *Buffer, max_size: usize) !void {
+ pub fn readAllBuffer(self: Self, buffer: *Buffer, max_size: usize) !void {
try buffer.resize(0);
var actual_buf_len: usize = 0;
@@ -76,7 +79,7 @@ pub fn InStream(comptime ReadError: type) type {
/// memory would be greater than `max_size`, returns `error.StreamTooLong`.
/// Caller owns returned memory.
/// If this function returns an error, the contents from the stream read so far are lost.
- pub fn readAllAlloc(self: *Self, allocator: *mem.Allocator, max_size: usize) ![]u8 {
+ pub fn readAllAlloc(self: Self, allocator: mem.Allocator, max_size: usize) ![]u8 {
var buf = Buffer.initNull(allocator);
defer buf.deinit();
@@ -88,7 +91,7 @@ pub fn InStream(comptime ReadError: type) type {
/// Does not include the delimiter in the result.
/// If `buffer.len()` would exceed `max_size`, `error.StreamTooLong` is returned and the contents
/// read from the stream so far are lost.
- pub fn readUntilDelimiterBuffer(self: *Self, buffer: *Buffer, delimiter: u8, max_size: usize) !void {
+ pub fn readUntilDelimiterBuffer(self: Self, buffer: *Buffer, delimiter: u8, max_size: usize) !void {
try buffer.resize(0);
while (true) {
@@ -110,7 +113,7 @@ pub fn InStream(comptime ReadError: type) type {
/// memory would be greater than `max_size`, returns `error.StreamTooLong`.
/// Caller owns returned memory.
/// If this function returns an error, the contents from the stream read so far are lost.
- pub fn readUntilDelimiterAlloc(self: *Self, allocator: *mem.Allocator, delimiter: u8, max_size: usize) ![]u8 {
+ pub fn readUntilDelimiterAlloc(self: Self, allocator: mem.Allocator, delimiter: u8, max_size: usize) ![]u8 {
var buf = Buffer.initNull(allocator);
defer buf.deinit();
@@ -121,14 +124,14 @@ pub fn InStream(comptime ReadError: type) type {
/// Returns the number of bytes read. It may be less than buffer.len.
/// If the number of bytes read is 0, it means end of stream.
/// End of stream is not an error condition.
- pub fn read(self: *Self, buffer: []u8) Error!usize {
+ pub fn read(self: Self, buffer: []u8) Error!usize {
return self.readFn(self, buffer);
}
/// Returns the number of bytes read. If the number read is smaller than buf.len, it
/// means the stream reached the end. Reaching the end of a stream is not an error
/// condition.
- pub fn readFull(self: *Self, buffer: []u8) Error!usize {
+ pub fn readFull(self: Self, buffer: []u8) Error!usize {
var index: usize = 0;
while (index != buffer.len) {
const amt = try self.read(buffer[index..]);
@@ -139,56 +142,56 @@ pub fn InStream(comptime ReadError: type) type {
}
/// Same as `readFull` but end of stream returns `error.EndOfStream`.
- pub fn readNoEof(self: *Self, buf: []u8) !void {
+ pub fn readNoEof(self: Self, buf: []u8) !void {
const amt_read = try self.read(buf);
if (amt_read < buf.len) return error.EndOfStream;
}
/// Reads 1 byte from the stream or returns `error.EndOfStream`.
- pub fn readByte(self: *Self) !u8 {
+ pub fn readByte(self: Self) !u8 {
var result: [1]u8 = undefined;
try self.readNoEof(result[0..]);
return result[0];
}
/// Same as `readByte` except the returned byte is signed.
- pub fn readByteSigned(self: *Self) !i8 {
+ pub fn readByteSigned(self: Self) !i8 {
return @bitCast(i8, try self.readByte());
}
/// Reads a native-endian integer
- pub fn readIntNative(self: *Self, comptime T: type) !T {
+ pub fn readIntNative(self: Self, comptime T: type) !T {
var bytes: [@sizeOf(T)]u8 = undefined;
try self.readNoEof(bytes[0..]);
return mem.readIntNative(T, &bytes);
}
/// Reads a foreign-endian integer
- pub fn readIntForeign(self: *Self, comptime T: type) !T {
+ pub fn readIntForeign(self: Self, comptime T: type) !T {
var bytes: [@sizeOf(T)]u8 = undefined;
try self.readNoEof(bytes[0..]);
return mem.readIntForeign(T, &bytes);
}
- pub fn readIntLittle(self: *Self, comptime T: type) !T {
+ pub fn readIntLittle(self: Self, comptime T: type) !T {
var bytes: [@sizeOf(T)]u8 = undefined;
try self.readNoEof(bytes[0..]);
return mem.readIntLittle(T, &bytes);
}
- pub fn readIntBig(self: *Self, comptime T: type) !T {
+ pub fn readIntBig(self: Self, comptime T: type) !T {
var bytes: [@sizeOf(T)]u8 = undefined;
try self.readNoEof(bytes[0..]);
return mem.readIntBig(T, &bytes);
}
- pub fn readInt(self: *Self, comptime T: type, endian: builtin.Endian) !T {
+ pub fn readInt(self: Self, comptime T: type, endian: builtin.Endian) !T {
var bytes: [@sizeOf(T)]u8 = undefined;
try self.readNoEof(bytes[0..]);
return mem.readInt(T, &bytes, endian);
}
- pub fn readVarInt(self: *Self, comptime ReturnType: type, endian: builtin.Endian, size: usize) !ReturnType {
+ pub fn readVarInt(self: Self, comptime ReturnType: type, endian: builtin.Endian, size: usize) !ReturnType {
assert(size <= @sizeOf(ReturnType));
var bytes_buf: [@sizeOf(ReturnType)]u8 = undefined;
const bytes = bytes_buf[0..size];
@@ -196,14 +199,14 @@ pub fn InStream(comptime ReadError: type) type {
return mem.readVarInt(ReturnType, bytes, endian);
}
- pub fn skipBytes(self: *Self, num_bytes: u64) !void {
+ pub fn skipBytes(self: Self, num_bytes: u64) !void {
var i: u64 = 0;
while (i < num_bytes) : (i += 1) {
_ = try self.readByte();
}
}
- pub fn readStruct(self: *Self, comptime T: type) !T {
+ pub fn readStruct(self: Self, comptime T: type) !T {
// Only extern and packed structs have defined in-memory layout.
comptime assert(@typeInfo(T).Struct.layout != builtin.TypeInfo.ContainerLayout.Auto);
var res: [1]T = undefined;
@@ -217,23 +220,26 @@ pub fn OutStream(comptime WriteError: type) type {
return struct {
const Self = @This();
pub const Error = WriteError;
+ pub const Iface = std.Interface();
+
+ iface: Iface,
- writeFn: fn (self: *Self, bytes: []const u8) Error!void,
+ writeFn: fn (self: Self, bytes: []const u8) Error!void,
- pub fn print(self: *Self, comptime format: []const u8, args: ...) Error!void {
+ pub fn print(self: Self, comptime format: []const u8, args: ...) Error!void {
return std.fmt.format(self, Error, self.writeFn, format, args);
}
- pub fn write(self: *Self, bytes: []const u8) Error!void {
+ pub fn write(self: Self, bytes: []const u8) Error!void {
return self.writeFn(self, bytes);
}
- pub fn writeByte(self: *Self, byte: u8) Error!void {
+ pub fn writeByte(self: Self, byte: u8) Error!void {
const slice = (*const [1]u8)(&byte)[0..];
return self.writeFn(self, slice);
}
- pub fn writeByteNTimes(self: *Self, byte: u8, n: usize) Error!void {
+ pub fn writeByteNTimes(self: Self, byte: u8, n: usize) Error!void {
const slice = (*const [1]u8)(&byte)[0..];
var i: usize = 0;
while (i < n) : (i += 1) {
@@ -242,32 +248,32 @@ pub fn OutStream(comptime WriteError: type) type {
}
/// Write a native-endian integer.
- pub fn writeIntNative(self: *Self, comptime T: type, value: T) Error!void {
+ pub fn writeIntNative(self: Self, comptime T: type, value: T) Error!void {
var bytes: [@sizeOf(T)]u8 = undefined;
mem.writeIntNative(T, &bytes, value);
return self.writeFn(self, bytes);
}
/// Write a foreign-endian integer.
- pub fn writeIntForeign(self: *Self, comptime T: type, value: T) Error!void {
+ pub fn writeIntForeign(self: Self, comptime T: type, value: T) Error!void {
var bytes: [@sizeOf(T)]u8 = undefined;
mem.writeIntForeign(T, &bytes, value);
return self.writeFn(self, bytes);
}
- pub fn writeIntLittle(self: *Self, comptime T: type, value: T) Error!void {
+ pub fn writeIntLittle(self: Self, comptime T: type, value: T) Error!void {
var bytes: [@sizeOf(T)]u8 = undefined;
mem.writeIntLittle(T, &bytes, value);
return self.writeFn(self, bytes);
}
- pub fn writeIntBig(self: *Self, comptime T: type, value: T) Error!void {
+ pub fn writeIntBig(self: Self, comptime T: type, value: T) Error!void {
var bytes: [@sizeOf(T)]u8 = undefined;
mem.writeIntBig(T, &bytes, value);
return self.writeFn(self, bytes);
}
- pub fn writeInt(self: *Self, comptime T: type, value: T, endian: builtin.Endian) Error!void {
+ pub fn writeInt(self: Self, comptime T: type, value: T, endian: builtin.Endian) Error!void {
var bytes: [@sizeOf(T)]u8 = undefined;
mem.writeInt(T, &bytes, value, endian);
return self.writeFn(self, bytes);
@@ -282,12 +288,12 @@ pub fn writeFile(path: []const u8, data: []const u8) !void {
}
/// On success, caller owns returned buffer.
-pub fn readFileAlloc(allocator: *mem.Allocator, path: []const u8) ![]u8 {
+pub fn readFileAlloc(allocator: mem.Allocator, path: []const u8) ![]u8 {
return readFileAllocAligned(allocator, path, @alignOf(u8));
}
/// On success, caller owns returned buffer.
-pub fn readFileAllocAligned(allocator: *mem.Allocator, path: []const u8, comptime A: u29) ![]align(A) u8 {
+pub fn readFileAllocAligned(allocator: mem.Allocator, path: []const u8, comptime A: u29) ![]align(A) u8 {
var file = try File.openRead(path);
defer file.close();
@@ -295,8 +301,8 @@ pub fn readFileAllocAligned(allocator: *mem.Allocator, path: []const u8, comptim
const buf = try allocator.alignedAlloc(u8, A, size);
errdefer allocator.free(buf);
- var adapter = file.inStream();
- try adapter.stream.readNoEof(buf[0..size]);
+ var adapter = file.inStreamAdapter();
+ try adapter.inStream().readNoEof(buf[0..size]);
return buf;
}
@@ -309,15 +315,13 @@ pub fn BufferedInStreamCustom(comptime buffer_size: usize, comptime Error: type)
const Self = @This();
const Stream = InStream(Error);
- pub stream: Stream,
-
- unbuffered_in_stream: *Stream,
+ unbuffered_in_stream: Stream,
buffer: [buffer_size]u8,
start_index: usize,
end_index: usize,
- pub fn init(unbuffered_in_stream: *Stream) Self {
+ pub fn init(unbuffered_in_stream: Stream) Self {
return Self{
.unbuffered_in_stream = unbuffered_in_stream,
.buffer = undefined,
@@ -328,13 +332,11 @@ pub fn BufferedInStreamCustom(comptime buffer_size: usize, comptime Error: type)
// and 0, the code would think we already hit EOF.
.start_index = buffer_size,
.end_index = buffer_size,
-
- .stream = Stream{ .readFn = readFn },
};
}
- fn readFn(in_stream: *Stream, dest: []u8) !usize {
- const self = @fieldParentPtr(Self, "stream", in_stream);
+ fn readFn(in_stream: Stream, dest: []u8) !usize {
+ const self = in_stream.iface.implCast(Self);
var dest_index: usize = 0;
while (true) {
@@ -370,6 +372,13 @@ pub fn BufferedInStreamCustom(comptime buffer_size: usize, comptime Error: type)
dest_index += copy_amount;
}
}
+
+ pub fn inStream(self: *Self) Stream {
+ return Stream{
+ .iface = Stream.Iface.init(self),
+ .readFn = readFn,
+ };
+ }
};
}
@@ -378,20 +387,18 @@ test "io.BufferedInStream" {
const Error = error{NoError};
const Stream = InStream(Error);
- stream: Stream,
str: []const u8,
curr: usize,
fn init(str: []const u8) @This() {
return @This(){
- .stream = Stream{ .readFn = readFn },
.str = str,
.curr = 0,
};
}
- fn readFn(in_stream: *Stream, dest: []u8) Error!usize {
- const self = @fieldParentPtr(@This(), "stream", in_stream);
+ fn readFn(in_stream: Stream, dest: []u8) Error!usize {
+ const self = in_stream.iface.implCast(@This());
if (self.str.len <= self.curr or dest.len == 0)
return 0;
@@ -399,15 +406,22 @@ test "io.BufferedInStream" {
self.curr += 1;
return 1;
}
+
+ pub fn inStream(self: *@This()) Stream {
+ return Stream{
+ .iface = Stream.Iface.init(self),
+ .readFn = readFn,
+ };
+ }
};
var buf: [100]u8 = undefined;
- const allocator = &std.heap.FixedBufferAllocator.init(buf[0..]).allocator;
+ const allocator = std.heap.FixedBufferAllocator.init(buf[0..]).allocator();
const str = "This is a test";
var one_byte_stream = OneByteReadInStream.init(str);
- var buf_in_stream = BufferedInStream(OneByteReadInStream.Error).init(&one_byte_stream.stream);
- const stream = &buf_in_stream.stream;
+ var buf_in_stream = BufferedInStream(OneByteReadInStream.Error).init(one_byte_stream.inStream());
+ const stream = buf_in_stream.inStream();
const res = try stream.readAllAlloc(allocator, str.len + 1);
testing.expectEqualSlices(u8, str, res);
@@ -421,8 +435,7 @@ pub fn PeekStream(comptime buffer_size: usize, comptime InStreamError: type) typ
pub const Error = InStreamError;
pub const Stream = InStream(Error);
- pub stream: Stream,
- base: *Stream,
+ base: Stream,
// Right now the look-ahead space is statically allocated, but a version with dynamic allocation
// is not too difficult to derive from this.
@@ -430,13 +443,12 @@ pub fn PeekStream(comptime buffer_size: usize, comptime InStreamError: type) typ
index: usize,
at_end: bool,
- pub fn init(base: *Stream) Self {
+ pub fn init(base: Stream) Self {
return Self{
.base = base,
.buffer = undefined,
.index = 0,
.at_end = false,
- .stream = Stream{ .readFn = readFn },
};
}
@@ -453,8 +465,8 @@ pub fn PeekStream(comptime buffer_size: usize, comptime InStreamError: type) typ
}
}
- fn readFn(in_stream: *Stream, dest: []u8) Error!usize {
- const self = @fieldParentPtr(Self, "stream", in_stream);
+ fn readFn(in_stream: Stream, dest: []u8) Error!usize {
+ const self = in_stream.iface.implCast(Self);
// copy over anything putBack()'d
var pos: usize = 0;
@@ -476,6 +488,13 @@ pub fn PeekStream(comptime buffer_size: usize, comptime InStreamError: type) typ
self.at_end = (read < left);
return pos + read;
}
+
+ pub fn inStream(self: *Self) Stream {
+ return Stream{
+ .iface = Stream.Iface.init(self),
+ .readFn = readFn,
+ };
+ }
};
}
@@ -484,8 +503,6 @@ pub const SliceInStream = struct {
pub const Error = error{};
pub const Stream = InStream(Error);
- pub stream: Stream,
-
pos: usize,
slice: []const u8,
@@ -493,12 +510,11 @@ pub const SliceInStream = struct {
return Self{
.slice = slice,
.pos = 0,
- .stream = Stream{ .readFn = readFn },
};
}
- fn readFn(in_stream: *Stream, dest: []u8) Error!usize {
- const self = @fieldParentPtr(Self, "stream", in_stream);
+ fn readFn(in_stream: Stream, dest: []u8) Error!usize {
+ const self = in_stream.iface.implCast(Self);
const size = math.min(dest.len, self.slice.len - self.pos);
const end = self.pos + size;
@@ -507,6 +523,13 @@ pub const SliceInStream = struct {
return size;
}
+
+ pub fn inStream(self: *Self) Stream {
+ return Stream{
+ .iface = Stream.Iface.init(self),
+ .readFn = readFn,
+ };
+ }
};
/// Creates a stream which allows for reading bit fields from another stream
@@ -514,22 +537,20 @@ pub fn BitInStream(endian: builtin.Endian, comptime Error: type) type {
return struct {
const Self = @This();
- in_stream: *Stream,
+ in_stream: Stream,
bit_buffer: u7,
bit_count: u3,
- stream: Stream,
pub const Stream = InStream(Error);
const u8_bit_count = comptime meta.bitCount(u8);
const u7_bit_count = comptime meta.bitCount(u7);
const u4_bit_count = comptime meta.bitCount(u4);
- pub fn init(in_stream: *Stream) Self {
+ pub fn init(in_stream: Stream) Self {
return Self{
.in_stream = in_stream,
.bit_buffer = 0,
.bit_count = 0,
- .stream = Stream{ .readFn = read },
};
}
@@ -636,8 +657,8 @@ pub fn BitInStream(endian: builtin.Endian, comptime Error: type) type {
self.bit_count = 0;
}
- pub fn read(self_stream: *Stream, buffer: []u8) Error!usize {
- var self = @fieldParentPtr(Self, "stream", self_stream);
+ pub fn read(self_stream: Stream, buffer: []u8) Error!usize {
+ var self = self_stream.iface.implCast(Self);
var out_bits: usize = undefined;
var out_bits_total = usize(0);
@@ -653,6 +674,13 @@ pub fn BitInStream(endian: builtin.Endian, comptime Error: type) type {
return self.in_stream.read(buffer);
}
+
+ pub fn inStream(self: *Self) Stream {
+ return Stream{
+ .iface = Stream.Iface.init(self),
+ .readFn = readFn,
+ };
+ }
};
}
@@ -662,8 +690,6 @@ pub const SliceOutStream = struct {
pub const Error = error{OutOfSpace};
pub const Stream = OutStream(Error);
- pub stream: Stream,
-
pub pos: usize,
slice: []u8,
@@ -671,7 +697,6 @@ pub const SliceOutStream = struct {
return SliceOutStream{
.slice = slice,
.pos = 0,
- .stream = Stream{ .writeFn = writeFn },
};
}
@@ -683,8 +708,8 @@ pub const SliceOutStream = struct {
self.pos = 0;
}
- fn writeFn(out_stream: *Stream, bytes: []const u8) Error!void {
- const self = @fieldParentPtr(SliceOutStream, "stream", out_stream);
+ fn writeFn(out_stream: Stream, bytes: []const u8) Error!void {
+ const self = out_stream.iface.implCast(SliceOutStream);
assert(self.pos <= self.slice.len);
@@ -700,39 +725,49 @@ pub const SliceOutStream = struct {
return Error.OutOfSpace;
}
}
+
+ pub fn outStream(self: *SliceOutStream) Stream {
+ return Stream{
+ .iface = Stream.Iface.init(self),
+ .writeFn = writeFn,
+ };
+ }
};
test "io.SliceOutStream" {
var buf: [255]u8 = undefined;
var slice_stream = SliceOutStream.init(buf[0..]);
- const stream = &slice_stream.stream;
+ const stream = slice_stream.outStream();
try stream.print("{}{}!", "Hello", "World");
testing.expectEqualSlices(u8, "HelloWorld!", slice_stream.getWritten());
}
var null_out_stream_state = NullOutStream.init();
-pub const null_out_stream = &null_out_stream_state.stream;
+pub const null_out_stream = null_out_stream_state.outStream();
/// An OutStream that doesn't write to anything.
pub const NullOutStream = struct {
pub const Error = error{};
pub const Stream = OutStream(Error);
- pub stream: Stream,
-
pub fn init() NullOutStream {
- return NullOutStream{
- .stream = Stream{ .writeFn = writeFn },
- };
+ return NullOutStream{};
}
- fn writeFn(out_stream: *Stream, bytes: []const u8) Error!void {}
+ fn writeFn(out_stream: Stream, bytes: []const u8) Error!void {}
+
+ pub fn outStream(self: *NullOutStream) Stream {
+ return Stream{
+ .iface = Stream.Iface.none(),
+ .writeFn = writeFn,
+ };
+ }
};
test "io.NullOutStream" {
var null_stream = NullOutStream.init();
- const stream = &null_stream.stream;
+ const stream = null_stream.outStream();
stream.write("yay" ** 10000) catch unreachable;
}
@@ -743,30 +778,35 @@ pub fn CountingOutStream(comptime OutStreamError: type) type {
pub const Stream = OutStream(Error);
pub const Error = OutStreamError;
- pub stream: Stream,
pub bytes_written: u64,
- child_stream: *Stream,
+ child_stream: Stream,
- pub fn init(child_stream: *Stream) Self {
+ pub fn init(child_stream: Stream) Self {
return Self{
- .stream = Stream{ .writeFn = writeFn },
.bytes_written = 0,
.child_stream = child_stream,
};
}
- fn writeFn(out_stream: *Stream, bytes: []const u8) OutStreamError!void {
- const self = @fieldParentPtr(Self, "stream", out_stream);
+ fn writeFn(out_stream: Stream, bytes: []const u8) OutStreamError!void {
+ const self = out_stream.iface.implCast(Self);
try self.child_stream.write(bytes);
self.bytes_written += bytes.len;
}
+
+ pub fn outStream(self: *Self) Stream {
+ return Stream{
+ .iface = Stream.Iface.init(self),
+ .writeFn = writeFn,
+ };
+ }
};
}
test "io.CountingOutStream" {
var null_stream = NullOutStream.init();
- var counting_stream = CountingOutStream(NullOutStream.Error).init(&null_stream.stream);
- const stream = &counting_stream.stream;
+ var counting_stream = CountingOutStream(NullOutStream.Error).init(null_stream.outStream());
+ const stream = counting_stream.outStream();
const bytes = "yay" ** 10000;
stream.write(bytes) catch unreachable;
@@ -783,19 +823,16 @@ pub fn BufferedOutStreamCustom(comptime buffer_size: usize, comptime OutStreamEr
pub const Stream = OutStream(Error);
pub const Error = OutStreamError;
- pub stream: Stream,
-
- unbuffered_out_stream: *Stream,
+ unbuffered_out_stream: Stream,
buffer: [buffer_size]u8,
index: usize,
- pub fn init(unbuffered_out_stream: *Stream) Self {
+ pub fn init(unbuffered_out_stream: Stream) Self {
return Self{
.unbuffered_out_stream = unbuffered_out_stream,
.buffer = undefined,
.index = 0,
- .stream = Stream{ .writeFn = writeFn },
};
}
@@ -804,8 +841,8 @@ pub fn BufferedOutStreamCustom(comptime buffer_size: usize, comptime OutStreamEr
self.index = 0;
}
- fn writeFn(out_stream: *Stream, bytes: []const u8) !void {
- const self = @fieldParentPtr(Self, "stream", out_stream);
+ fn writeFn(out_stream: Stream, bytes: []const u8) !void {
+ const self = out_stream.iface.implCast(Self);
if (bytes.len >= self.buffer.len) {
try self.flush();
@@ -825,13 +862,19 @@ pub fn BufferedOutStreamCustom(comptime buffer_size: usize, comptime OutStreamEr
src_index += copy_amt;
}
}
+
+ pub fn outStream(self: *Self) Stream {
+ return Stream{
+ .iface = Stream.Iface.init(self),
+ .writeFn = writeFn,
+ };
+ }
};
}
/// Implementation of OutStream trait for Buffer
pub const BufferOutStream = struct {
buffer: *Buffer,
- stream: Stream,
pub const Error = error{OutOfMemory};
pub const Stream = OutStream(Error);
@@ -839,14 +882,20 @@ pub const BufferOutStream = struct {
pub fn init(buffer: *Buffer) BufferOutStream {
return BufferOutStream{
.buffer = buffer,
- .stream = Stream{ .writeFn = writeFn },
};
}
- fn writeFn(out_stream: *Stream, bytes: []const u8) !void {
- const self = @fieldParentPtr(BufferOutStream, "stream", out_stream);
+ fn writeFn(out_stream: Stream, bytes: []const u8) !void {
+ const self = out_stream.iface.implCast(BufferOutStream);
return self.buffer.append(bytes);
}
+
+ pub fn outStream(self: *BufferOutStream) Stream {
+ return Stream{
+ .iface = Stream.Iface.init(self),
+ .writeFn = writeFn,
+ };
+ }
};
/// Creates a stream which allows for writing bit fields to another stream
@@ -854,21 +903,19 @@ pub fn BitOutStream(endian: builtin.Endian, comptime Error: type) type {
return struct {
const Self = @This();
- out_stream: *Stream,
+ out_stream: Stream,
bit_buffer: u8,
bit_count: u4,
- stream: Stream,
pub const Stream = OutStream(Error);
const u8_bit_count = comptime meta.bitCount(u8);
const u4_bit_count = comptime meta.bitCount(u4);
- pub fn init(out_stream: *Stream) Self {
+ pub fn init(out_stream: Stream) Self {
return Self{
.out_stream = out_stream,
.bit_buffer = 0,
.bit_count = 0,
- .stream = Stream{ .writeFn = write },
};
}
@@ -963,8 +1010,8 @@ pub fn BitOutStream(endian: builtin.Endian, comptime Error: type) type {
self.bit_count = 0;
}
- pub fn write(self_stream: *Stream, buffer: []const u8) Error!void {
- var self = @fieldParentPtr(Self, "stream", self_stream);
+ pub fn write(self_stream: Stream, buffer: []const u8) Error!void {
+ var self = self_stream.iface.implCast(Self);
//@NOTE: I'm not sure this is a good idea, maybe flushBits should be forced
if (self.bit_count > 0) {
@@ -975,16 +1022,23 @@ pub fn BitOutStream(endian: builtin.Endian, comptime Error: type) type {
return self.out_stream.write(buffer);
}
+
+ pub fn outStream(self: *Self) Stream {
+ return Stream{
+ .iface = Stream.Iface.init(self),
+ .readFn = readFn,
+ };
+ }
};
}
pub const BufferedAtomicFile = struct {
atomic_file: os.AtomicFile,
- file_stream: os.File.OutStream,
+ file_stream: os.File.OutStreamAdapter,
buffered_stream: BufferedOutStream(os.File.WriteError),
- allocator: *mem.Allocator,
+ allocator: mem.Allocator,
- pub fn create(allocator: *mem.Allocator, dest_path: []const u8) !*BufferedAtomicFile {
+ pub fn create(allocator: mem.Allocator, dest_path: []const u8) !*BufferedAtomicFile {
// TODO with well defined copy elision we don't need this allocation
var self = try allocator.create(BufferedAtomicFile);
self.* = BufferedAtomicFile{
@@ -998,8 +1052,8 @@ pub const BufferedAtomicFile = struct {
self.atomic_file = try os.AtomicFile.init(dest_path, os.File.default_mode);
errdefer self.atomic_file.deinit();
- self.file_stream = self.atomic_file.file.outStream();
- self.buffered_stream = BufferedOutStream(os.File.WriteError).init(&self.file_stream.stream);
+ self.file_stream = self.atomic_file.file.outStreamAdapter();
+ self.buffered_stream = BufferedOutStream(os.File.WriteError).init(self.file_stream.outStream());
return self;
}
@@ -1014,15 +1068,15 @@ pub const BufferedAtomicFile = struct {
try self.atomic_file.finish();
}
- pub fn stream(self: *BufferedAtomicFile) *OutStream(os.File.WriteError) {
- return &self.buffered_stream.stream;
+ pub fn outStream(self: *BufferedAtomicFile) OutStream(os.File.WriteError) {
+ return self.buffered_stream.outStream();
}
};
pub fn readLine(buf: *std.Buffer) ![]u8 {
var stdin = try getStdIn();
- var stdin_stream = stdin.inStream();
- return readLineFrom(&stdin_stream.stream, buf);
+ var stdin_stream = stdin.inStreamAdapter();
+ return readLineFrom(stdin_stream.inStream(), buf);
}
/// Reads all characters until the next newline into buf, and returns
@@ -1045,7 +1099,7 @@ pub fn readLineFrom(stream: var, buf: *std.Buffer) ![]u8 {
test "io.readLineFrom" {
var bytes: [128]u8 = undefined;
- const allocator = &std.heap.FixedBufferAllocator.init(bytes[0..]).allocator;
+ const allocator = std.heap.FixedBufferAllocator.init(bytes[0..]).allocator();
var buf = try std.Buffer.initSize(allocator, 0);
var mem_stream = SliceInStream.init(
@@ -1053,7 +1107,7 @@ test "io.readLineFrom" {
\\Line 22
\\Line 333
);
- const stream = &mem_stream.stream;
+ const stream = mem_stream.inStream();
testing.expectEqualSlices(u8, "Line 1", try readLineFrom(stream, &buf));
testing.expectEqualSlices(u8, "Line 22", try readLineFrom(stream, &buf));
@@ -1063,8 +1117,8 @@ test "io.readLineFrom" {
pub fn readLineSlice(slice: []u8) ![]u8 {
var stdin = try getStdIn();
- var stdin_stream = stdin.inStream();
- return readLineSliceFrom(&stdin_stream.stream, slice);
+ var stdin_stream = stdin.inStreamAdapter();
+ return readLineSliceFrom(stdin_stream.inStream(), slice);
}
/// Reads all characters until the next newline into slice, and returns
@@ -1084,7 +1138,7 @@ test "io.readLineSliceFrom" {
\\Line 22
\\Line 333
);
- const stream = &mem_stream.stream;
+ const stream = mem_stream.inStream();
testing.expectEqualSlices(u8, "Line 1", try readLineSliceFrom(stream, buf[0..]));
testing.expectError(error.OutOfMemory, readLineSliceFrom(stream, buf[0..]));
@@ -1111,11 +1165,11 @@ pub fn Deserializer(comptime endian: builtin.Endian, comptime packing: Packing,
return struct {
const Self = @This();
- in_stream: if (packing == .Bit) BitInStream(endian, Stream.Error) else *Stream,
+ in_stream: if (packing == .Bit) BitInStream(endian, Stream.Error) else Stream,
pub const Stream = InStream(Error);
- pub fn init(in_stream: *Stream) Self {
+ pub fn init(in_stream: Stream) Self {
return Self{
.in_stream = switch (packing) {
.Bit => BitInStream(endian, Stream.Error).init(in_stream),
@@ -1322,11 +1376,11 @@ pub fn Serializer(comptime endian: builtin.Endian, comptime packing: Packing, co
return struct {
const Self = @This();
- out_stream: if (packing == .Bit) BitOutStream(endian, Stream.Error) else *Stream,
+ out_stream: if (packing == .Bit) BitOutStream(endian, Stream.Error) else Stream,
pub const Stream = OutStream(Error);
- pub fn init(out_stream: *Stream) Self {
+ pub fn init(out_stream: Stream) Self {
return Self{
.out_stream = switch (packing) {
.Bit => BitOutStream(endian, Stream.Error).init(out_stream),
diff --git a/std/io/c_out_stream.zig b/std/io/c_out_stream.zig
index c66b342f1e6d..9d3284e9a1d8 100644
--- a/std/io/c_out_stream.zig
+++ b/std/io/c_out_stream.zig
@@ -10,18 +10,16 @@ pub const COutStream = struct {
pub const Error = std.os.File.WriteError;
pub const Stream = OutStream(Error);
- stream: Stream,
c_file: *std.c.FILE,
pub fn init(c_file: *std.c.FILE) COutStream {
return COutStream{
.c_file = c_file,
- .stream = Stream{ .writeFn = writeFn },
};
}
- fn writeFn(out_stream: *Stream, bytes: []const u8) Error!void {
- const self = @fieldParentPtr(COutStream, "stream", out_stream);
+ fn writeFn(out_stream: Stream, bytes: []const u8) Error!void {
+ const self = out_stream.iface.implCast(COutStream);
const amt_written = std.c.fwrite(bytes.ptr, 1, bytes.len, self.c_file);
if (amt_written == bytes.len) return;
// TODO errno on windows. should we have a posix layer for windows?
@@ -45,4 +43,11 @@ pub const COutStream = struct {
else => return std.os.unexpectedErrorPosix(@intCast(usize, errno)),
}
}
+
+ pub fn outStream(self: *COutStream) Stream {
+ return Stream{
+ .iface = Stream.Iface.init(self),
+ .writeFn = writeFn,
+ };
+ }
};
diff --git a/std/io/seekable_stream.zig b/std/io/seekable_stream.zig
index 392e13530258..864db3c689a5 100644
--- a/std/io/seekable_stream.zig
+++ b/std/io/seekable_stream.zig
@@ -1,31 +1,35 @@
const std = @import("../std.zig");
const InStream = std.io.InStream;
+const assert = std.debug.assert;
pub fn SeekableStream(comptime SeekErrorType: type, comptime GetSeekPosErrorType: type) type {
return struct {
const Self = @This();
pub const SeekError = SeekErrorType;
pub const GetSeekPosError = GetSeekPosErrorType;
+ pub const Iface = std.Interface();
- seekToFn: fn (self: *Self, pos: u64) SeekError!void,
- seekForwardFn: fn (self: *Self, pos: i64) SeekError!void,
+ iface: Iface,
- getPosFn: fn (self: *Self) GetSeekPosError!u64,
- getEndPosFn: fn (self: *Self) GetSeekPosError!u64,
+ seekToFn: fn (self: Self, pos: u64) SeekError!void,
+ seekForwardFn: fn (self: Self, pos: i64) SeekError!void,
- pub fn seekTo(self: *Self, pos: u64) SeekError!void {
+ getPosFn: fn (self: Self) GetSeekPosError!u64,
+ getEndPosFn: fn (self: Self) GetSeekPosError!u64,
+
+ pub fn seekTo(self: Self, pos: u64) SeekError!void {
return self.seekToFn(self, pos);
}
- pub fn seekForward(self: *Self, amt: i64) SeekError!void {
+ pub fn seekForward(self: Self, amt: i64) SeekError!void {
return self.seekForwardFn(self, amt);
}
- pub fn getEndPos(self: *Self) GetSeekPosError!u64 {
+ pub fn getEndPos(self: Self) GetSeekPosError!u64 {
return self.getEndPosFn(self);
}
- pub fn getPos(self: *Self) GetSeekPosError!u64 {
+ pub fn getPos(self: Self) GetSeekPosError!u64 {
return self.getPosFn(self);
}
};
@@ -39,9 +43,6 @@ pub const SliceSeekableInStream = struct {
pub const Stream = InStream(Error);
pub const SeekableInStream = SeekableStream(SeekError, GetSeekPosError);
- pub stream: Stream,
- pub seekable_stream: SeekableInStream,
-
pos: usize,
slice: []const u8,
@@ -49,18 +50,11 @@ pub const SliceSeekableInStream = struct {
return Self{
.slice = slice,
.pos = 0,
- .stream = Stream{ .readFn = readFn },
- .seekable_stream = SeekableInStream{
- .seekToFn = seekToFn,
- .seekForwardFn = seekForwardFn,
- .getEndPosFn = getEndPosFn,
- .getPosFn = getPosFn,
- },
};
}
- fn readFn(in_stream: *Stream, dest: []u8) Error!usize {
- const self = @fieldParentPtr(Self, "stream", in_stream);
+ fn readFn(in_stream: Stream, dest: []u8) Error!usize {
+ const self = in_stream.iface.implCast(SliceSeekableInStream);
const size = std.math.min(dest.len, self.slice.len - self.pos);
const end = self.pos + size;
@@ -70,15 +64,15 @@ pub const SliceSeekableInStream = struct {
return size;
}
- fn seekToFn(in_stream: *SeekableInStream, pos: u64) SeekError!void {
- const self = @fieldParentPtr(Self, "seekable_stream", in_stream);
+ fn seekToFn(in_stream: SeekableInStream, pos: u64) SeekError!void {
+ const self = in_stream.iface.implCast(SliceSeekableInStream);
const usize_pos = @intCast(usize, pos);
if (usize_pos >= self.slice.len) return error.EndOfStream;
self.pos = usize_pos;
}
- fn seekForwardFn(in_stream: *SeekableInStream, amt: i64) SeekError!void {
- const self = @fieldParentPtr(Self, "seekable_stream", in_stream);
+ fn seekForwardFn(in_stream: SeekableInStream, amt: i64) SeekError!void {
+ const self = in_stream.iface.implCast(SliceSeekableInStream);
if (amt < 0) {
const abs_amt = @intCast(usize, -amt);
@@ -91,13 +85,30 @@ pub const SliceSeekableInStream = struct {
}
}
- fn getEndPosFn(in_stream: *SeekableInStream) GetSeekPosError!u64 {
- const self = @fieldParentPtr(Self, "seekable_stream", in_stream);
+ fn getEndPosFn(in_stream: SeekableInStream) GetSeekPosError!u64 {
+ const self = in_stream.iface.implCast(SliceSeekableInStream);
return @intCast(u64, self.slice.len);
}
- fn getPosFn(in_stream: *SeekableInStream) GetSeekPosError!u64 {
- const self = @fieldParentPtr(Self, "seekable_stream", in_stream);
+ fn getPosFn(in_stream: SeekableInStream) GetSeekPosError!u64 {
+ const self = in_stream.iface.implCast(SliceSeekableInStream);
return @intCast(u64, self.pos);
}
+
+ pub fn inStream(self: *Self) Stream {
+ return Stream{
+ .iface = Stream.Iface.init(self),
+ .readFn = readFn,
+ };
+ }
+
+ pub fn seekableStream(self: *Self) SeekableInStream {
+ return SeekableInStream{
+ .iface = SeekableInStream.Iface.init(self),
+ .seekToFn = seekToFn,
+ .seekForwardFn = seekForwardFn,
+ .getPosFn = getPosFn,
+ .getEndPosFn = getEndPosFn,
+ };
+ }
};
diff --git a/std/io/test.zig b/std/io/test.zig
index 07a3c0e8dde9..2e58824e5466 100644
--- a/std/io/test.zig
+++ b/std/io/test.zig
@@ -11,19 +11,19 @@ const builtin = @import("builtin");
test "write a file, read it, then delete it" {
var raw_bytes: [200 * 1024]u8 = undefined;
- var allocator = &std.heap.FixedBufferAllocator.init(raw_bytes[0..]).allocator;
+ var allocator = std.heap.FixedBufferAllocator.init(raw_bytes[0..]).allocator();
var data: [1024]u8 = undefined;
var prng = DefaultPrng.init(1234);
- prng.random.bytes(data[0..]);
+ prng.random().bytes(data[0..]);
const tmp_file_name = "temp_test_file.txt";
{
var file = try os.File.openWrite(tmp_file_name);
defer file.close();
- var file_out_stream = file.outStream();
- var buf_stream = io.BufferedOutStream(os.File.WriteError).init(&file_out_stream.stream);
- const st = &buf_stream.stream;
+ var file_out_stream = file.outStreamAdapter();
+ var buf_stream = io.BufferedOutStream(os.File.WriteError).init(file_out_stream.outStream());
+ const st = buf_stream.outStream();
try st.print("begin");
try st.write(data[0..]);
try st.print("end");
@@ -47,9 +47,9 @@ test "write a file, read it, then delete it" {
const expected_file_size = "begin".len + data.len + "end".len;
expect(file_size == expected_file_size);
- var file_in_stream = file.inStream();
- var buf_stream = io.BufferedInStream(os.File.ReadError).init(&file_in_stream.stream);
- const st = &buf_stream.stream;
+ var file_in_stream = file.inStreamAdapter();
+ var buf_stream = io.BufferedInStream(os.File.ReadError).init(file_in_stream.inStream());
+ const st = buf_stream.inStream();
const contents = try st.readAllAlloc(allocator, 2 * 1024);
defer allocator.free(contents);
@@ -62,10 +62,10 @@ test "write a file, read it, then delete it" {
test "BufferOutStream" {
var bytes: [100]u8 = undefined;
- var allocator = &std.heap.FixedBufferAllocator.init(bytes[0..]).allocator;
+ var allocator = std.heap.FixedBufferAllocator.init(bytes[0..]).allocator();
var buffer = try std.Buffer.initSize(allocator, 0);
- var buf_stream = &std.io.BufferOutStream.init(&buffer).stream;
+ var buf_stream = std.io.BufferOutStream.init(&buffer).outStream();
const x: i32 = 42;
const y: i32 = 1234;
@@ -80,46 +80,46 @@ test "SliceInStream" {
var dest: [4]u8 = undefined;
- var read = try ss.stream.read(dest[0..4]);
+ var read = try ss.inStream().read(dest[0..4]);
expect(read == 4);
expect(mem.eql(u8, dest[0..4], bytes[0..4]));
- read = try ss.stream.read(dest[0..4]);
+ read = try ss.inStream().read(dest[0..4]);
expect(read == 3);
expect(mem.eql(u8, dest[0..3], bytes[4..7]));
- read = try ss.stream.read(dest[0..4]);
+ read = try ss.inStream().read(dest[0..4]);
expect(read == 0);
}
test "PeekStream" {
const bytes = []const u8{ 1, 2, 3, 4, 5, 6, 7, 8 };
var ss = io.SliceInStream.init(bytes);
- var ps = io.PeekStream(2, io.SliceInStream.Error).init(&ss.stream);
+ var ps = io.PeekStream(2, io.SliceInStream.Error).init(ss.inStream());
var dest: [4]u8 = undefined;
ps.putBackByte(9);
ps.putBackByte(10);
- var read = try ps.stream.read(dest[0..4]);
+ var read = try ps.inStream().read(dest[0..4]);
expect(read == 4);
expect(dest[0] == 10);
expect(dest[1] == 9);
expect(mem.eql(u8, dest[2..4], bytes[0..2]));
- read = try ps.stream.read(dest[0..4]);
+ read = try ps.inStream().read(dest[0..4]);
expect(read == 4);
expect(mem.eql(u8, dest[0..4], bytes[2..6]));
- read = try ps.stream.read(dest[0..4]);
+ read = try ps.inStream().read(dest[0..4]);
expect(read == 2);
expect(mem.eql(u8, dest[0..2], bytes[6..8]));
ps.putBackByte(11);
ps.putBackByte(12);
- read = try ps.stream.read(dest[0..4]);
+ read = try ps.inStream().read(dest[0..4]);
expect(read == 2);
expect(dest[0] == 12);
expect(dest[1] == 11);
@@ -129,19 +129,19 @@ test "SliceOutStream" {
var buffer: [10]u8 = undefined;
var ss = io.SliceOutStream.init(buffer[0..]);
- try ss.stream.write("Hello");
+ try ss.outStream().write("Hello");
expect(mem.eql(u8, ss.getWritten(), "Hello"));
- try ss.stream.write("world");
+ try ss.outStream().write("world");
expect(mem.eql(u8, ss.getWritten(), "Helloworld"));
- expectError(error.OutOfSpace, ss.stream.write("!"));
+ expectError(error.OutOfSpace, ss.outStream().write("!"));
expect(mem.eql(u8, ss.getWritten(), "Helloworld"));
ss.reset();
expect(ss.getWritten().len == 0);
- expectError(error.OutOfSpace, ss.stream.write("Hello world!"));
+ expectError(error.OutOfSpace, ss.outStream().write("Hello world!"));
expect(mem.eql(u8, ss.getWritten(), "Hello worl"));
}
@@ -151,7 +151,7 @@ test "BitInStream" {
var mem_in_be = io.SliceInStream.init(mem_be[0..]);
const InError = io.SliceInStream.Error;
- var bit_stream_be = io.BitInStream(builtin.Endian.Big, InError).init(&mem_in_be.stream);
+ var bit_stream_be = io.BitInStream(builtin.Endian.Big, InError).init(mem_in_be.inStream());
var out_bits: usize = undefined;
@@ -185,7 +185,7 @@ test "BitInStream" {
expectError(error.EndOfStream, bit_stream_be.readBitsNoEof(u1, 1));
var mem_in_le = io.SliceInStream.init(mem_le[0..]);
- var bit_stream_le = io.BitInStream(builtin.Endian.Little, InError).init(&mem_in_le.stream);
+ var bit_stream_le = io.BitInStream(builtin.Endian.Little, InError).init(mem_in_le.inStream());
expect(1 == try bit_stream_le.readBits(u2, 1, &out_bits));
expect(out_bits == 1);
@@ -223,7 +223,7 @@ test "BitOutStream" {
var mem_out_be = io.SliceOutStream.init(mem_be[0..]);
const OutError = io.SliceOutStream.Error;
- var bit_stream_be = io.BitOutStream(builtin.Endian.Big, OutError).init(&mem_out_be.stream);
+ var bit_stream_be = io.BitOutStream(builtin.Endian.Big, OutError).init(mem_out_be.outStream());
try bit_stream_be.writeBits(u2(1), 1);
try bit_stream_be.writeBits(u5(2), 2);
@@ -247,7 +247,7 @@ test "BitOutStream" {
try bit_stream_be.writeBits(u0(0), 0);
var mem_out_le = io.SliceOutStream.init(mem_le[0..]);
- var bit_stream_le = io.BitOutStream(builtin.Endian.Little, OutError).init(&mem_out_le.stream);
+ var bit_stream_le = io.BitOutStream(builtin.Endian.Little, OutError).init(mem_out_le.outStream());
try bit_stream_le.writeBits(u2(1), 1);
try bit_stream_le.writeBits(u5(2), 2);
@@ -276,8 +276,8 @@ test "BitStreams with File Stream" {
var file = try os.File.openWrite(tmp_file_name);
defer file.close();
- var file_out = file.outStream();
- var file_out_stream = &file_out.stream;
+ var file_out = file.outStreamAdapter();
+ var file_out_stream = file_out.outStream();
const OutError = os.File.WriteError;
var bit_stream = io.BitOutStream(builtin.endian, OutError).init(file_out_stream);
@@ -293,8 +293,8 @@ test "BitStreams with File Stream" {
var file = try os.File.openRead(tmp_file_name);
defer file.close();
- var file_in = file.inStream();
- var file_in_stream = &file_in.stream;
+ var file_in = file.inStreamAdapter();
+ var file_in_stream = file_in.inStream();
const InError = os.File.ReadError;
var bit_stream = io.BitInStream(builtin.endian, InError).init(file_in_stream);
@@ -332,12 +332,12 @@ fn testIntSerializerDeserializer(comptime endian: builtin.Endian, comptime packi
var data_mem: [total_bytes]u8 = undefined;
var out = io.SliceOutStream.init(data_mem[0..]);
const OutError = io.SliceOutStream.Error;
- var out_stream = &out.stream;
+ var out_stream = out.outStream();
var serializer = io.Serializer(endian, packing, OutError).init(out_stream);
var in = io.SliceInStream.init(data_mem[0..]);
const InError = io.SliceInStream.Error;
- var in_stream = &in.stream;
+ var in_stream = in.inStream();
var deserializer = io.Deserializer(endian, packing, InError).init(in_stream);
comptime var i = 0;
@@ -372,7 +372,7 @@ fn testIntSerializerDeserializer(comptime endian: builtin.Endian, comptime packi
//deserializer is covered by SliceInStream
const NullError = io.NullOutStream.Error;
var null_out = io.NullOutStream.init();
- var null_out_stream = &null_out.stream;
+ var null_out_stream = null_out.outStream();
var null_serializer = io.Serializer(endian, packing, NullError).init(null_out_stream);
try null_serializer.serialize(data_mem[0..]);
try null_serializer.flush();
@@ -396,12 +396,12 @@ fn testIntSerializerDeserializerInfNaN(
var out = io.SliceOutStream.init(data_mem[0..]);
const OutError = io.SliceOutStream.Error;
- var out_stream = &out.stream;
+ var out_stream = out.outStream();
var serializer = io.Serializer(endian, packing, OutError).init(out_stream);
var in = io.SliceInStream.init(data_mem[0..]);
const InError = io.SliceInStream.Error;
- var in_stream = &in.stream;
+ var in_stream = in.inStream();
var deserializer = io.Deserializer(endian, packing, InError).init(in_stream);
//@TODO: isInf/isNan not currently implemented for f128.
@@ -528,12 +528,12 @@ fn testSerializerDeserializer(comptime endian: builtin.Endian, comptime packing:
var data_mem: [@sizeOf(MyStruct)]u8 = undefined;
var out = io.SliceOutStream.init(data_mem[0..]);
const OutError = io.SliceOutStream.Error;
- var out_stream = &out.stream;
+ var out_stream = out.outStream();
var serializer = io.Serializer(endian, packing, OutError).init(out_stream);
var in = io.SliceInStream.init(data_mem[0..]);
const InError = io.SliceInStream.Error;
- var in_stream = &in.stream;
+ var in_stream = in.inStream();
var deserializer = io.Deserializer(endian, packing, InError).init(in_stream);
try serializer.serialize(my_inst);
@@ -567,12 +567,12 @@ fn testBadData(comptime endian: builtin.Endian, comptime packing: io.Packing) !v
var data_mem: [4]u8 = undefined;
var out = io.SliceOutStream.init(data_mem[0..]);
const OutError = io.SliceOutStream.Error;
- var out_stream = &out.stream;
+ var out_stream = out.outStream();
var serializer = io.Serializer(endian, packing, OutError).init(out_stream);
var in = io.SliceInStream.init(data_mem[0..]);
const InError = io.SliceInStream.Error;
- var in_stream = &in.stream;
+ var in_stream = in.inStream();
var deserializer = io.Deserializer(endian, packing, InError).init(in_stream);
try serializer.serialize(u14(3));
@@ -597,6 +597,6 @@ test "c out stream" {
const out_file = std.c.fopen(filename, c"w") orelse return error.UnableToOpenTestFile;
defer std.os.deleteFileC(filename) catch {};
- const out_stream = &io.COutStream.init(out_file).stream;
+ const out_stream = io.COutStream.init(out_file).outStream();
try out_stream.print("hi: {}\n", i32(123));
}
diff --git a/std/json.zig b/std/json.zig
index 8d42d1bcf01d..fef8c9713115 100644
--- a/std/json.zig
+++ b/std/json.zig
@@ -1129,7 +1129,7 @@ pub const Value = union(enum) {
// A non-stream JSON parser which constructs a tree of Value's.
pub const Parser = struct {
- allocator: *Allocator,
+ allocator: Allocator,
state: State,
copy_strings: bool,
// Stores parent nodes and un-combined Values.
@@ -1142,7 +1142,7 @@ pub const Parser = struct {
Simple,
};
- pub fn init(allocator: *Allocator, copy_strings: bool) Parser {
+ pub fn init(allocator: Allocator, copy_strings: bool) Parser {
return Parser{
.allocator = allocator,
.state = State.Simple,
@@ -1167,7 +1167,7 @@ pub const Parser = struct {
errdefer arena.deinit();
while (try s.next()) |token| {
- try p.transition(&arena.allocator, input, s.i - 1, token);
+ try p.transition(arena.allocator(), input, s.i - 1, token);
}
debug.assert(p.stack.len == 1);
@@ -1180,7 +1180,7 @@ pub const Parser = struct {
// Even though p.allocator exists, we take an explicit allocator so that allocation state
// can be cleaned up on error correctly during a `parse` on call.
- fn transition(p: *Parser, allocator: *Allocator, input: []const u8, i: usize, token: Token) !void {
+ fn transition(p: *Parser, allocator: Allocator, input: []const u8, i: usize, token: Token) !void {
switch (p.state) {
State.ObjectKey => switch (token.id) {
Token.Id.ObjectEnd => {
@@ -1334,7 +1334,7 @@ pub const Parser = struct {
}
}
- fn parseString(p: *Parser, allocator: *Allocator, token: Token, input: []const u8, i: usize) !Value {
+ fn parseString(p: *Parser, allocator: Allocator, token: Token, input: []const u8, i: usize) !Value {
// TODO: We don't strictly have to copy values which do not contain any escape
// characters if flagged with the option.
const slice = token.slice(input, i);
diff --git a/std/linked_list.zig b/std/linked_list.zig
index c4ad525913f5..6bbf6eb9ca9d 100644
--- a/std/linked_list.zig
+++ b/std/linked_list.zig
@@ -190,7 +190,7 @@ pub fn LinkedList(comptime T: type) type {
///
/// Returns:
/// A pointer to the new node.
- pub fn allocateNode(list: *Self, allocator: *Allocator) !*Node {
+ pub fn allocateNode(list: *Self, allocator: Allocator) !*Node {
return allocator.create(Node);
}
@@ -199,7 +199,7 @@ pub fn LinkedList(comptime T: type) type {
/// Arguments:
/// node: Pointer to the node to deallocate.
/// allocator: Dynamic memory allocator.
- pub fn destroyNode(list: *Self, node: *Node, allocator: *Allocator) void {
+ pub fn destroyNode(list: *Self, node: *Node, allocator: Allocator) void {
allocator.destroy(node);
}
@@ -211,7 +211,7 @@ pub fn LinkedList(comptime T: type) type {
///
/// Returns:
/// A pointer to the new node.
- pub fn createNode(list: *Self, data: T, allocator: *Allocator) !*Node {
+ pub fn createNode(list: *Self, data: T, allocator: Allocator) !*Node {
var node = try list.allocateNode(allocator);
node.* = Node.init(data);
return node;
diff --git a/std/math/big/int.zig b/std/math/big/int.zig
index 008780b2289f..a6911bf460aa 100644
--- a/std/math/big/int.zig
+++ b/std/math/big/int.zig
@@ -32,7 +32,7 @@ pub const Int = struct {
pub const default_capacity = 4;
/// Allocator used by the Int when requesting memory.
- allocator: ?*Allocator,
+ allocator: ?Allocator,
/// Raw digits. These are:
///
@@ -49,14 +49,14 @@ pub const Int = struct {
/// Creates a new Int. default_capacity limbs will be allocated immediately.
/// Int will be zeroed.
- pub fn init(allocator: *Allocator) !Int {
+ pub fn init(allocator: Allocator) !Int {
return try Int.initCapacity(allocator, default_capacity);
}
/// Creates a new Int. Int will be set to `value`.
///
/// This is identical to an `init`, followed by a `set`.
- pub fn initSet(allocator: *Allocator, value: var) !Int {
+ pub fn initSet(allocator: Allocator, value: var) !Int {
var s = try Int.init(allocator);
try s.set(value);
return s;
@@ -64,7 +64,7 @@ pub const Int = struct {
/// Creates a new Int with a specific capacity. If capacity < default_capacity then the
/// default capacity will be used instead.
- pub fn initCapacity(allocator: *Allocator, capacity: usize) !Int {
+ pub fn initCapacity(allocator: Allocator, capacity: usize) !Int {
return Int{
.allocator = allocator,
.metadata = 1,
@@ -432,7 +432,7 @@ pub const Int = struct {
/// Converts self to a string in the requested base. Memory is allocated from the provided
/// allocator and not the one present in self.
/// TODO make this call format instead of the other way around
- pub fn toString(self: Int, allocator: *Allocator, base: u8) ![]const u8 {
+ pub fn toString(self: Int, allocator: Allocator, base: u8) ![]const u8 {
if (base < 2 or base > 16) {
return error.InvalidBase;
}
@@ -952,7 +952,7 @@ pub const Int = struct {
// Handbook of Applied Cryptography, 14.20
//
// x = qy + r where 0 <= r < y
- fn divN(allocator: *Allocator, q: *Int, r: *Int, x: *Int, y: *Int) !void {
+ fn divN(allocator: Allocator, q: *Int, r: *Int, x: *Int, y: *Int) !void {
debug.assert(y.len() >= 2);
debug.assert(x.len() >= y.len());
debug.assert(q.limbs.len >= x.len() + y.len() - 1);
@@ -1199,7 +1199,7 @@ pub const Int = struct {
var buffer: [64 * 8192]u8 = undefined;
var fixed = std.heap.FixedBufferAllocator.init(buffer[0..]);
-const al = &fixed.allocator;
+const al = fixed.allocator();
test "big.int comptime_int set" {
comptime var s = 0xefffffff00000001eeeeeeefaaaaaaab;
diff --git a/std/math/big/rational.zig b/std/math/big/rational.zig
index 58a5e3ac76fd..59b4a4bc6d20 100644
--- a/std/math/big/rational.zig
+++ b/std/math/big/rational.zig
@@ -30,7 +30,7 @@ pub const Rational = struct {
/// Create a new Rational. A small amount of memory will be allocated on initialization.
/// This will be 2 * Int.default_capacity.
- pub fn init(a: *Allocator) !Rational {
+ pub fn init(a: Allocator) !Rational {
return Rational{
.p = try Int.init(a),
.q = try Int.initSet(a, 1),
@@ -589,7 +589,7 @@ fn gcdLehmer(r: *Int, xa: Int, ya: Int) !void {
var buffer: [64 * 8192]u8 = undefined;
var fixed = std.heap.FixedBufferAllocator.init(buffer[0..]);
-var al = &fixed.allocator;
+var al = fixed.allocator();
test "big.rational gcd non-one small" {
var a = try Int.initSet(al, 17);
@@ -758,7 +758,7 @@ test "big.rational set/to Float round-trip" {
var prng = std.rand.DefaultPrng.init(0x5EED);
var i: usize = 0;
while (i < 512) : (i += 1) {
- const r = prng.random.float(f64);
+ const r = prng.random().float(f64);
try a.setFloat(f64, r);
testing.expect((try a.toFloat(f64)) == r);
}
diff --git a/std/mem.zig b/std/mem.zig
index 64fe270edac3..e0571e89c21f 100644
--- a/std/mem.zig
+++ b/std/mem.zig
@@ -8,8 +8,13 @@ const meta = std.meta;
const trait = meta.trait;
const testing = std.testing;
+//@NOTE: The Fns shouldn't need to take `*const Allocator` once coroutine rewrite (#2377) is complete.
+// just `Allocator`.
pub const Allocator = struct {
pub const Error = error{OutOfMemory};
+ pub const Iface = std.Interface();
+
+ iface: Iface,
/// Realloc is used to modify the size or alignment of an existing allocation,
/// as well as to provide the allocator with an opportunity to move an allocation
@@ -33,7 +38,7 @@ pub const Allocator = struct {
/// `return_value[old_mem.len..]` have undefined values.
/// The returned slice must have its pointer aligned at least to `new_alignment` bytes.
reallocFn: fn (
- self: *Allocator,
+ self: *const Allocator,
/// Guaranteed to be the same as what was returned from most recent call to
/// `reallocFn` or `shrinkFn`.
/// If `old_mem.len == 0` then this is a new allocation and `new_byte_count`
@@ -56,7 +61,7 @@ pub const Allocator = struct {
/// This function deallocates memory. It must succeed.
shrinkFn: fn (
- self: *Allocator,
+ self: *const Allocator,
/// Guaranteed to be the same as what was returned from most recent call to
/// `reallocFn` or `shrinkFn`.
old_mem: []u8,
@@ -72,27 +77,27 @@ pub const Allocator = struct {
/// Call `destroy` with the result.
/// Returns undefined memory.
- pub fn create(self: *Allocator, comptime T: type) Error!*T {
+ pub fn create(self: Allocator, comptime T: type) Error!*T {
if (@sizeOf(T) == 0) return &(T{});
const slice = try self.alloc(T, 1);
return &slice[0];
}
/// `ptr` should be the return value of `create`
- pub fn destroy(self: *Allocator, ptr: var) void {
+ pub fn destroy(self: Allocator, ptr: var) void {
const T = @typeOf(ptr).Child;
if (@sizeOf(T) == 0) return;
const non_const_ptr = @intToPtr([*]u8, @ptrToInt(ptr));
- const shrink_result = self.shrinkFn(self, non_const_ptr[0..@sizeOf(T)], @alignOf(T), 0, 1);
+ const shrink_result = self.shrinkFn(&self, non_const_ptr[0..@sizeOf(T)], @alignOf(T), 0, 1);
assert(shrink_result.len == 0);
}
- pub fn alloc(self: *Allocator, comptime T: type, n: usize) ![]T {
+ pub fn alloc(self: Allocator, comptime T: type, n: usize) ![]T {
return self.alignedAlloc(T, @alignOf(T), n);
}
pub fn alignedAlloc(
- self: *Allocator,
+ self: Allocator,
comptime T: type,
comptime alignment: u29,
n: usize,
@@ -102,7 +107,7 @@ pub const Allocator = struct {
}
const byte_count = math.mul(usize, @sizeOf(T), n) catch return Error.OutOfMemory;
- const byte_slice = try self.reallocFn(self, ([*]u8)(undefined)[0..0], undefined, byte_count, alignment);
+ const byte_slice = try self.reallocFn(&self, ([*]u8)(undefined)[0..0], undefined, byte_count, alignment);
assert(byte_slice.len == byte_count);
@memset(byte_slice.ptr, undefined, byte_slice.len);
return @bytesToSlice(T, @alignCast(alignment, byte_slice));
@@ -118,7 +123,7 @@ pub const Allocator = struct {
/// in `std.ArrayList.shrink`.
/// If you need guaranteed success, call `shrink`.
/// If `new_n` is 0, this is the same as `free` and it always succeeds.
- pub fn realloc(self: *Allocator, old_mem: var, new_n: usize) t: {
+ pub fn realloc(self: Allocator, old_mem: var, new_n: usize) t: {
const Slice = @typeInfo(@typeOf(old_mem)).Pointer;
break :t Error![]align(Slice.alignment) Slice.child;
} {
@@ -130,7 +135,7 @@ pub const Allocator = struct {
/// a new alignment, which can be larger, smaller, or the same as the old
/// allocation.
pub fn alignedRealloc(
- self: *Allocator,
+ self: Allocator,
old_mem: var,
comptime new_alignment: u29,
new_n: usize,
@@ -147,7 +152,7 @@ pub const Allocator = struct {
const old_byte_slice = @sliceToBytes(old_mem);
const byte_count = math.mul(usize, @sizeOf(T), new_n) catch return Error.OutOfMemory;
- const byte_slice = try self.reallocFn(self, old_byte_slice, Slice.alignment, byte_count, new_alignment);
+ const byte_slice = try self.reallocFn(&self, old_byte_slice, Slice.alignment, byte_count, new_alignment);
assert(byte_slice.len == byte_count);
if (new_n > old_mem.len) {
@memset(byte_slice.ptr + old_byte_slice.len, undefined, byte_slice.len - old_byte_slice.len);
@@ -160,7 +165,7 @@ pub const Allocator = struct {
/// Shrink always succeeds, and `new_n` must be <= `old_mem.len`.
/// Returned slice has same alignment as old_mem.
/// Shrinking to 0 is the same as calling `free`.
- pub fn shrink(self: *Allocator, old_mem: var, new_n: usize) t: {
+ pub fn shrink(self: Allocator, old_mem: var, new_n: usize) t: {
const Slice = @typeInfo(@typeOf(old_mem)).Pointer;
break :t []align(Slice.alignment) Slice.child;
} {
@@ -172,7 +177,7 @@ pub const Allocator = struct {
/// a new alignment, which must be smaller or the same as the old
/// allocation.
pub fn alignedShrink(
- self: *Allocator,
+ self: Allocator,
old_mem: var,
comptime new_alignment: u29,
new_n: usize,
@@ -193,17 +198,17 @@ pub const Allocator = struct {
const byte_count = @sizeOf(T) * new_n;
const old_byte_slice = @sliceToBytes(old_mem);
- const byte_slice = self.shrinkFn(self, old_byte_slice, Slice.alignment, byte_count, new_alignment);
+ const byte_slice = self.shrinkFn(&self, old_byte_slice, Slice.alignment, byte_count, new_alignment);
assert(byte_slice.len == byte_count);
return @bytesToSlice(T, @alignCast(new_alignment, byte_slice));
}
- pub fn free(self: *Allocator, memory: var) void {
+ pub fn free(self: Allocator, memory: var) void {
const Slice = @typeInfo(@typeOf(memory)).Pointer;
const bytes = @sliceToBytes(memory);
if (bytes.len == 0) return;
const non_const_ptr = @intToPtr([*]u8, @ptrToInt(bytes.ptr));
- const shrink_result = self.shrinkFn(self, non_const_ptr[0..bytes.len], Slice.alignment, 0, 1);
+ const shrink_result = self.shrinkFn(&self, non_const_ptr[0..bytes.len], Slice.alignment, 0, 1);
assert(shrink_result.len == 0);
}
};
@@ -348,7 +353,7 @@ pub fn allEqual(comptime T: type, slice: []const T, scalar: T) bool {
}
/// Copies ::m to newly allocated memory. Caller is responsible to free it.
-pub fn dupe(allocator: *Allocator, comptime T: type, m: []const T) ![]T {
+pub fn dupe(allocator: Allocator, comptime T: type, m: []const T) ![]T {
const new_buf = try allocator.alloc(T, m.len);
copy(T, new_buf, m);
return new_buf;
@@ -957,7 +962,7 @@ pub const SplitIterator = struct {
/// Naively combines a series of slices with a separator.
/// Allocates memory for the result, which must be freed by the caller.
-pub fn join(allocator: *Allocator, separator: []const u8, slices: []const []const u8) ![]u8 {
+pub fn join(allocator: Allocator, separator: []const u8, slices: []const []const u8) ![]u8 {
if (slices.len == 0) return (([*]u8)(undefined))[0..0];
const total_len = blk: {
@@ -985,7 +990,7 @@ pub fn join(allocator: *Allocator, separator: []const u8, slices: []const []cons
test "mem.join" {
var buf: [1024]u8 = undefined;
- const a = &std.heap.FixedBufferAllocator.init(&buf).allocator;
+ const a = std.heap.FixedBufferAllocator.init(&buf).allocator();
testing.expect(eql(u8, try join(a, ",", [][]const u8{ "a", "b", "c" }), "a,b,c"));
testing.expect(eql(u8, try join(a, ",", [][]const u8{"a"}), "a"));
testing.expect(eql(u8, try join(a, ",", [][]const u8{ "a", "", "b", "", "c" }), "a,,b,,c"));
diff --git a/std/mutex.zig b/std/mutex.zig
index 2b3ac4e36658..1340cee524cb 100644
--- a/std/mutex.zig
+++ b/std/mutex.zig
@@ -133,11 +133,11 @@ test "std.Mutex" {
var direct_allocator = std.heap.DirectAllocator.init();
defer direct_allocator.deinit();
- var plenty_of_memory = try direct_allocator.allocator.alloc(u8, 300 * 1024);
- defer direct_allocator.allocator.free(plenty_of_memory);
+ var plenty_of_memory = try direct_allocator.allocator().alloc(u8, 300 * 1024);
+ defer direct_allocator.allocator().free(plenty_of_memory);
var fixed_buffer_allocator = std.heap.ThreadSafeFixedBufferAllocator.init(plenty_of_memory);
- var a = &fixed_buffer_allocator.allocator;
+ var a = fixed_buffer_allocator.allocator();
var mutex = Mutex.init();
defer mutex.deinit();
diff --git a/std/os.zig b/std/os.zig
index 9b452e89ae5c..5ebfd532cd23 100644
--- a/std/os.zig
+++ b/std/os.zig
@@ -168,7 +168,7 @@ fn getRandomBytesDevURandom(buf: []u8) !void {
const fd = try posixOpenC(c"/dev/urandom", posix.O_RDONLY | posix.O_CLOEXEC, 0);
defer close(fd);
- const stream = &File.openHandle(fd).inStream().stream;
+ const stream = File.openHandle(fd).inStreamAdapter().inStream();
stream.readNoEof(buf) catch |err| switch (err) {
error.EndOfStream => unreachable,
error.OperationAborted => unreachable,
@@ -575,7 +575,7 @@ pub fn posixDup2(old_fd: i32, new_fd: i32) !void {
}
}
-pub fn createNullDelimitedEnvMap(allocator: *Allocator, env_map: *const BufMap) ![]?[*]u8 {
+pub fn createNullDelimitedEnvMap(allocator: Allocator, env_map: *const BufMap) ![]?[*]u8 {
const envp_count = env_map.count();
const envp_buf = try allocator.alloc(?[*]u8, envp_count + 1);
mem.set(?[*]u8, envp_buf, null);
@@ -598,7 +598,7 @@ pub fn createNullDelimitedEnvMap(allocator: *Allocator, env_map: *const BufMap)
return envp_buf;
}
-pub fn freeNullDelimitedEnvMap(allocator: *Allocator, envp_buf: []?[*]u8) void {
+pub fn freeNullDelimitedEnvMap(allocator: Allocator, envp_buf: []?[*]u8) void {
for (envp_buf) |env| {
const env_buf = if (env) |ptr| ptr[0 .. cstr.len(ptr) + 1] else break;
allocator.free(env_buf);
@@ -611,7 +611,7 @@ pub fn freeNullDelimitedEnvMap(allocator: *Allocator, envp_buf: []?[*]u8) void {
/// pointers after the args and after the environment variables.
/// `argv[0]` is the executable path.
/// This function also uses the PATH environment variable to get the full path to the executable.
-pub fn posixExecve(argv: []const []const u8, env_map: *const BufMap, allocator: *Allocator) !void {
+pub fn posixExecve(argv: []const []const u8, env_map: *const BufMap, allocator: Allocator) !void {
const argv_buf = try allocator.alloc(?[*]u8, argv.len + 1);
mem.set(?[*]u8, argv_buf, null);
defer {
@@ -740,7 +740,7 @@ pub fn getBaseAddress() usize {
/// Caller must free result when done.
/// TODO make this go through libc when we have it
-pub fn getEnvMap(allocator: *Allocator) !BufMap {
+pub fn getEnvMap(allocator: Allocator) !BufMap {
var result = BufMap.init(allocator);
errdefer result.deinit();
@@ -850,7 +850,7 @@ pub const GetEnvVarOwnedError = error{
/// Caller must free returned memory.
/// TODO make this go through libc when we have it
-pub fn getEnvVarOwned(allocator: *mem.Allocator, key: []const u8) GetEnvVarOwnedError![]u8 {
+pub fn getEnvVarOwned(allocator: mem.Allocator, key: []const u8) GetEnvVarOwnedError![]u8 {
if (is_windows) {
const key_with_null = try std.unicode.utf8ToUtf16LeWithNull(allocator, key);
defer allocator.free(key_with_null);
@@ -897,7 +897,7 @@ test "os.getEnvVarOwned" {
}
/// Caller must free the returned memory.
-pub fn getCwdAlloc(allocator: *Allocator) ![]u8 {
+pub fn getCwdAlloc(allocator: Allocator) ![]u8 {
var buf: [MAX_PATH_BYTES]u8 = undefined;
return mem.dupe(allocator, u8, try getCwd(&buf));
}
@@ -1026,7 +1026,7 @@ pub fn symLinkPosix(existing_path: []const u8, new_path: []const u8) PosixSymLin
const b64_fs_encoder = base64.Base64Encoder.init("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_", base64.standard_pad_char);
/// TODO remove the allocator requirement from this API
-pub fn atomicSymLink(allocator: *Allocator, existing_path: []const u8, new_path: []const u8) !void {
+pub fn atomicSymLink(allocator: Allocator, existing_path: []const u8, new_path: []const u8) !void {
if (symLink(existing_path, new_path)) {
return;
} else |err| switch (err) {
@@ -1136,7 +1136,7 @@ pub fn copyFile(source_path: []const u8, dest_path: []const u8) !void {
defer in_file.close();
const mode = try in_file.mode();
- const in_stream = &in_file.inStream().stream;
+ const in_stream = in_file.inStreamAdapter().inStream();
var atomic_file = try AtomicFile.init(dest_path, mode);
defer atomic_file.deinit();
@@ -1349,7 +1349,7 @@ pub fn makeDirPosix(dir_path: []const u8) !void {
/// Calls makeDir recursively to make an entire path. Returns success if the path
/// already exists and is a directory.
/// TODO determine if we can remove the allocator requirement from this function
-pub fn makePath(allocator: *Allocator, full_path: []const u8) !void {
+pub fn makePath(allocator: Allocator, full_path: []const u8) !void {
const resolved_path = try path.resolve(allocator, [][]const u8{full_path});
defer allocator.free(resolved_path);
@@ -1491,7 +1491,7 @@ const DeleteTreeError = error{
};
/// TODO determine if we can remove the allocator requirement
-pub fn deleteTree(allocator: *Allocator, full_path: []const u8) DeleteTreeError!void {
+pub fn deleteTree(allocator: Allocator, full_path: []const u8) DeleteTreeError!void {
start_over: while (true) {
var got_access_denied = false;
// First, try deleting the item as a file. This way we don't follow sym links.
@@ -1563,7 +1563,7 @@ pub fn deleteTree(allocator: *Allocator, full_path: []const u8) DeleteTreeError!
pub const Dir = struct {
handle: Handle,
- allocator: *Allocator,
+ allocator: Allocator,
pub const Handle = switch (builtin.os) {
Os.macosx, Os.ios, Os.freebsd, Os.netbsd => struct {
@@ -1629,7 +1629,7 @@ pub const Dir = struct {
};
/// TODO remove the allocator requirement from this API
- pub fn open(allocator: *Allocator, dir_path: []const u8) OpenError!Dir {
+ pub fn open(allocator: Allocator, dir_path: []const u8) OpenError!Dir {
return Dir{
.allocator = allocator,
.handle = switch (builtin.os) {
@@ -2050,7 +2050,7 @@ pub const ArgIteratorWindows = struct {
}
/// You must free the returned memory when done.
- pub fn next(self: *ArgIteratorWindows, allocator: *Allocator) ?(NextError![]u8) {
+ pub fn next(self: *ArgIteratorWindows, allocator: Allocator) ?(NextError![]u8) {
// march forward over whitespace
while (true) : (self.index += 1) {
const byte = self.cmd_line[self.index];
@@ -2103,7 +2103,7 @@ pub const ArgIteratorWindows = struct {
}
}
- fn internalNext(self: *ArgIteratorWindows, allocator: *Allocator) NextError![]u8 {
+ fn internalNext(self: *ArgIteratorWindows, allocator: Allocator) NextError![]u8 {
var buf = try Buffer.initSize(allocator, 0);
defer buf.deinit();
@@ -2192,7 +2192,7 @@ pub const ArgIterator = struct {
pub const NextError = ArgIteratorWindows.NextError;
/// You must free the returned memory when done.
- pub fn next(self: *ArgIterator, allocator: *Allocator) ?(NextError![]u8) {
+ pub fn next(self: *ArgIterator, allocator: Allocator) ?(NextError![]u8) {
if (builtin.os == Os.windows) {
return self.inner.next(allocator);
} else {
@@ -2217,7 +2217,7 @@ pub fn args() ArgIterator {
}
/// Caller must call argsFree on result.
-pub fn argsAlloc(allocator: *mem.Allocator) ![]const []u8 {
+pub fn argsAlloc(allocator: mem.Allocator) ![]const []u8 {
if (builtin.os == Os.wasi) {
var count: usize = undefined;
var buf_size: usize = undefined;
@@ -2282,7 +2282,7 @@ pub fn argsAlloc(allocator: *mem.Allocator) ![]const []u8 {
return result_slice_list;
}
-pub fn argsFree(allocator: *mem.Allocator, args_alloc: []const []u8) void {
+pub fn argsFree(allocator: mem.Allocator, args_alloc: []const []u8) void {
if (builtin.os == Os.wasi) {
const last_item = args_alloc[args_alloc.len - 1];
const last_byte_addr = @ptrToInt(last_item.ptr) + last_item.len + 1; // null terminated
@@ -2452,7 +2452,7 @@ pub fn selfExePath(out_buffer: *[MAX_PATH_BYTES]u8) ![]u8 {
/// `selfExeDirPath` except allocates the result on the heap.
/// Caller owns returned memory.
-pub fn selfExeDirPathAlloc(allocator: *Allocator) ![]u8 {
+pub fn selfExeDirPathAlloc(allocator: Allocator) ![]u8 {
var buf: [MAX_PATH_BYTES]u8 = undefined;
return mem.dupe(allocator, u8, try selfExeDirPath(&buf));
}
@@ -3166,7 +3166,7 @@ pub fn spawnThread(context: var, comptime startFn: var) SpawnThreadError!*Thread
const bytes_ptr = windows.HeapAlloc(heap_handle, 0, byte_count) orelse return SpawnThreadError.OutOfMemory;
errdefer assert(windows.HeapFree(heap_handle, 0, bytes_ptr) != 0);
const bytes = @ptrCast([*]u8, bytes_ptr)[0..byte_count];
- const outer_context = std.heap.FixedBufferAllocator.init(bytes).allocator.create(WinThread.OuterContext) catch unreachable;
+ const outer_context = std.heap.FixedBufferAllocator.init(bytes).allocator().create(WinThread.OuterContext) catch unreachable;
outer_context.* = WinThread.OuterContext{
.thread = Thread{
.data = Thread.Data{
@@ -3339,7 +3339,7 @@ pub const CpuCountError = error{
Unexpected,
};
-pub fn cpuCount(fallback_allocator: *mem.Allocator) CpuCountError!usize {
+pub fn cpuCount(fallback_allocator: mem.Allocator) CpuCountError!usize {
switch (builtin.os) {
builtin.Os.macosx, builtin.Os.freebsd, builtin.Os.netbsd => {
var count: c_int = undefined;
diff --git a/std/os/child_process.zig b/std/os/child_process.zig
index 3269e39c7a15..5bb483ce072c 100644
--- a/std/os/child_process.zig
+++ b/std/os/child_process.zig
@@ -22,7 +22,7 @@ pub const ChildProcess = struct {
pub handle: if (is_windows) windows.HANDLE else void,
pub thread_handle: if (is_windows) windows.HANDLE else void,
- pub allocator: *mem.Allocator,
+ pub allocator: mem.Allocator,
pub stdin: ?os.File,
pub stdout: ?os.File,
@@ -86,7 +86,7 @@ pub const ChildProcess = struct {
/// First argument in argv is the executable.
/// On success must call deinit.
- pub fn init(argv: []const []const u8, allocator: *mem.Allocator) !*ChildProcess {
+ pub fn init(argv: []const []const u8, allocator: mem.Allocator) !*ChildProcess {
const child = try allocator.create(ChildProcess);
child.* = ChildProcess{
.allocator = allocator,
@@ -195,7 +195,7 @@ pub const ChildProcess = struct {
/// Spawns a child process, waits for it, collecting stdout and stderr, and then returns.
/// If it succeeds, the caller owns result.stdout and result.stderr memory.
- pub fn exec(allocator: *mem.Allocator, argv: []const []const u8, cwd: ?[]const u8, env_map: ?*const BufMap, max_output_size: usize) !ExecResult {
+ pub fn exec(allocator: mem.Allocator, argv: []const []const u8, cwd: ?[]const u8, env_map: ?*const BufMap, max_output_size: usize) !ExecResult {
const child = try ChildProcess.init(argv, allocator);
defer child.deinit();
@@ -212,11 +212,11 @@ pub const ChildProcess = struct {
defer Buffer.deinit(&stdout);
defer Buffer.deinit(&stderr);
- var stdout_file_in_stream = child.stdout.?.inStream();
- var stderr_file_in_stream = child.stderr.?.inStream();
+ var stdout_file_in_stream = child.stdout.?.inStreamAdapter();
+ var stderr_file_in_stream = child.stderr.?.inStreamAdapter();
- try stdout_file_in_stream.stream.readAllBuffer(&stdout, max_output_size);
- try stderr_file_in_stream.stream.readAllBuffer(&stderr, max_output_size);
+ try stdout_file_in_stream.inStream().readAllBuffer(&stdout, max_output_size);
+ try stderr_file_in_stream.inStream().readAllBuffer(&stderr, max_output_size);
return ExecResult{
.term = try child.wait(),
@@ -699,11 +699,11 @@ fn windowsCreateProcess(app_name: [*]u16, cmd_line: [*]u16, envp_ptr: ?[*]u16, c
/// Caller must dealloc.
/// Guarantees a null byte at result[result.len].
-fn windowsCreateCommandLine(allocator: *mem.Allocator, argv: []const []const u8) ![]u8 {
+fn windowsCreateCommandLine(allocator: mem.Allocator, argv: []const []const u8) ![]u8 {
var buf = try Buffer.initSize(allocator, 0);
defer buf.deinit();
- var buf_stream = &io.BufferOutStream.init(&buf).stream;
+ var buf_stream = io.BufferOutStream.init(&buf).outStream();
for (argv) |arg, arg_i| {
if (arg_i != 0) try buf.appendByte(' ');
@@ -809,11 +809,11 @@ fn forkChildErrReport(fd: i32, err: ChildProcess.SpawnError) noreturn {
const ErrInt = @IntType(false, @sizeOf(anyerror) * 8);
fn writeIntFd(fd: i32, value: ErrInt) !void {
- const stream = &os.File.openHandle(fd).outStream().stream;
+ const stream = os.File.openHandle(fd).outStreamAdapter().outStream();
stream.writeIntNative(ErrInt, value) catch return error.SystemResources;
}
fn readIntFd(fd: i32) !ErrInt {
- const stream = &os.File.openHandle(fd).inStream().stream;
+ const stream = os.File.openHandle(fd).inStreamAdapter().inStream();
return stream.readIntNative(ErrInt) catch return error.SystemResources;
}
diff --git a/std/os/file.zig b/std/os/file.zig
index d223d55a46fd..d47fcebed15a 100644
--- a/std/os/file.zig
+++ b/std/os/file.zig
@@ -428,85 +428,98 @@ pub const File = struct {
}
}
- pub fn inStream(file: File) InStream {
- return InStream{
+ pub fn inStreamAdapter(file: File) InStreamAdapter {
+ return InStreamAdapter{
.file = file,
- .stream = InStream.Stream{ .readFn = InStream.readFn },
};
}
- pub fn outStream(file: File) OutStream {
- return OutStream{
+ pub fn outStreamAdapter(file: File) OutStreamAdapter {
+ return OutStreamAdapter{
.file = file,
- .stream = OutStream.Stream{ .writeFn = OutStream.writeFn },
};
}
- pub fn seekableStream(file: File) SeekableStream {
- return SeekableStream{
+ pub fn seekableStreamAdapter(file: File) SeekableStreamAdapter {
+ return SeekableStreamAdapter{
.file = file,
- .stream = SeekableStream.Stream{
- .seekToFn = SeekableStream.seekToFn,
- .seekForwardFn = SeekableStream.seekForwardFn,
- .getPosFn = SeekableStream.getPosFn,
- .getEndPosFn = SeekableStream.getEndPosFn,
- },
};
}
/// Implementation of io.InStream trait for File
- pub const InStream = struct {
+ pub const InStreamAdapter = struct {
file: File,
- stream: Stream,
pub const Error = ReadError;
pub const Stream = io.InStream(Error);
- fn readFn(in_stream: *Stream, buffer: []u8) Error!usize {
- const self = @fieldParentPtr(InStream, "stream", in_stream);
+ fn readFn(in_stream: Stream, buffer: []u8) Error!usize {
+ const self = in_stream.iface.implCast(InStreamAdapter);
return self.file.read(buffer);
}
+
+ pub fn inStream(self: *InStreamAdapter) Stream {
+ return Stream{
+ .iface = Stream.Iface.init(self),
+ .readFn = readFn,
+ };
+ }
};
/// Implementation of io.OutStream trait for File
- pub const OutStream = struct {
+ pub const OutStreamAdapter = struct {
file: File,
- stream: Stream,
pub const Error = WriteError;
pub const Stream = io.OutStream(Error);
- fn writeFn(out_stream: *Stream, bytes: []const u8) Error!void {
- const self = @fieldParentPtr(OutStream, "stream", out_stream);
+ fn writeFn(out_stream: Stream, bytes: []const u8) Error!void {
+ const self = out_stream.iface.implCast(OutStreamAdapter);
return self.file.write(bytes);
}
+
+ pub fn outStream(self: *OutStreamAdapter) Stream {
+ return Stream{
+ .iface = Stream.Iface.init(self),
+ .writeFn = writeFn,
+ };
+ }
};
/// Implementation of io.SeekableStream trait for File
- pub const SeekableStream = struct {
+ pub const SeekableStreamAdapter = struct {
file: File,
- stream: Stream,
pub const Stream = io.SeekableStream(SeekError, GetSeekPosError);
- pub fn seekToFn(seekable_stream: *Stream, pos: u64) SeekError!void {
- const self = @fieldParentPtr(SeekableStream, "stream", seekable_stream);
+ pub fn seekToFn(seekable_stream: Stream, pos: u64) SeekError!void {
+ const self = seekable_stream.iface.implCast(SeekableStreamAdapter);
return self.file.seekTo(pos);
}
- pub fn seekForwardFn(seekable_stream: *Stream, amt: i64) SeekError!void {
- const self = @fieldParentPtr(SeekableStream, "stream", seekable_stream);
+ pub fn seekForwardFn(seekable_stream: Stream, amt: i64) SeekError!void {
+ const self = seekable_stream.iface.implCast(SeekableStreamAdapter);
return self.file.seekForward(amt);
}
- pub fn getEndPosFn(seekable_stream: *Stream) GetSeekPosError!u64 {
- const self = @fieldParentPtr(SeekableStream, "stream", seekable_stream);
+ pub fn getEndPosFn(seekable_stream: Stream) GetSeekPosError!u64 {
+ const self = seekable_stream.iface.implCast(SeekableStreamAdapter);
return self.file.getEndPos();
}
- pub fn getPosFn(seekable_stream: *Stream) GetSeekPosError!u64 {
- const self = @fieldParentPtr(SeekableStream, "stream", seekable_stream);
+ pub fn getPosFn(seekable_stream: Stream) GetSeekPosError!u64 {
+ const self = seekable_stream.iface.implCast(SeekableStreamAdapter);
return self.file.getPos();
}
+
+ pub fn seekableStream(self: *SeekableStreamAdapter) Stream {
+ return Stream{
+ .iface = Stream.Iface.init(self),
+ .seekToFn = SeekableStream.seekToFn,
+ .seekForwardFn = SeekableStream.seekForwardFn,
+ .getPosFn = SeekableStream.getPosFn,
+ .getEndPosFn = SeekableStream.getEndPosFn,
+ };
+ }
};
};
diff --git a/std/os/get_app_data_dir.zig b/std/os/get_app_data_dir.zig
index e69c03edb94f..bb1625db65f8 100644
--- a/std/os/get_app_data_dir.zig
+++ b/std/os/get_app_data_dir.zig
@@ -11,7 +11,7 @@ pub const GetAppDataDirError = error{
/// Caller owns returned memory.
/// TODO determine if we can remove the allocator requirement
-pub fn getAppDataDir(allocator: *mem.Allocator, appname: []const u8) GetAppDataDirError![]u8 {
+pub fn getAppDataDir(allocator: mem.Allocator, appname: []const u8) GetAppDataDirError![]u8 {
switch (builtin.os) {
builtin.Os.windows => {
var dir_path_ptr: [*]u16 = undefined;
@@ -62,7 +62,7 @@ fn utf16lePtrSlice(ptr: [*]const u16) []const u16 {
test "std.os.getAppDataDir" {
var buf: [512]u8 = undefined;
- const allocator = &std.heap.FixedBufferAllocator.init(buf[0..]).allocator;
+ const allocator = std.heap.FixedBufferAllocator.init(buf[0..]).allocator();
// We can't actually validate the result
_ = getAppDataDir(allocator, "zig") catch return;
diff --git a/std/os/path.zig b/std/os/path.zig
index fa8bb282eb9e..492d11388de5 100644
--- a/std/os/path.zig
+++ b/std/os/path.zig
@@ -36,7 +36,7 @@ pub fn isSep(byte: u8) bool {
/// This is different from mem.join in that the separator will not be repeated if
/// it is found at the end or beginning of a pair of consecutive paths.
-fn joinSep(allocator: *Allocator, separator: u8, paths: []const []const u8) ![]u8 {
+fn joinSep(allocator: Allocator, separator: u8, paths: []const []const u8) ![]u8 {
if (paths.len == 0) return (([*]u8)(undefined))[0..0];
const total_len = blk: {
@@ -81,26 +81,26 @@ pub const join = if (is_windows) joinWindows else joinPosix;
/// Naively combines a series of paths with the native path seperator.
/// Allocates memory for the result, which must be freed by the caller.
-pub fn joinWindows(allocator: *Allocator, paths: []const []const u8) ![]u8 {
+pub fn joinWindows(allocator: Allocator, paths: []const []const u8) ![]u8 {
return joinSep(allocator, sep_windows, paths);
}
/// Naively combines a series of paths with the native path seperator.
/// Allocates memory for the result, which must be freed by the caller.
-pub fn joinPosix(allocator: *Allocator, paths: []const []const u8) ![]u8 {
+pub fn joinPosix(allocator: Allocator, paths: []const []const u8) ![]u8 {
return joinSep(allocator, sep_posix, paths);
}
fn testJoinWindows(paths: []const []const u8, expected: []const u8) void {
var buf: [1024]u8 = undefined;
- const a = &std.heap.FixedBufferAllocator.init(&buf).allocator;
+ const a = std.heap.FixedBufferAllocator.init(&buf).allocator();
const actual = joinWindows(a, paths) catch @panic("fail");
testing.expectEqualSlices(u8, expected, actual);
}
fn testJoinPosix(paths: []const []const u8, expected: []const u8) void {
var buf: [1024]u8 = undefined;
- const a = &std.heap.FixedBufferAllocator.init(&buf).allocator;
+ const a = std.heap.FixedBufferAllocator.init(&buf).allocator();
const actual = joinPosix(a, paths) catch @panic("fail");
testing.expectEqualSlices(u8, expected, actual);
}
@@ -377,7 +377,7 @@ fn asciiEqlIgnoreCase(s1: []const u8, s2: []const u8) bool {
}
/// On Windows, this calls `resolveWindows` and on POSIX it calls `resolvePosix`.
-pub fn resolve(allocator: *Allocator, paths: []const []const u8) ![]u8 {
+pub fn resolve(allocator: Allocator, paths: []const []const u8) ![]u8 {
if (is_windows) {
return resolveWindows(allocator, paths);
} else {
@@ -393,7 +393,7 @@ pub fn resolve(allocator: *Allocator, paths: []const []const u8) ![]u8 {
/// Path separators are canonicalized to '\\' and drives are canonicalized to capital letters.
/// Note: all usage of this function should be audited due to the existence of symlinks.
/// Without performing actual syscalls, resolving `..` could be incorrect.
-pub fn resolveWindows(allocator: *Allocator, paths: []const []const u8) ![]u8 {
+pub fn resolveWindows(allocator: Allocator, paths: []const []const u8) ![]u8 {
if (paths.len == 0) {
assert(is_windows); // resolveWindows called on non windows can't use getCwd
return os.getCwdAlloc(allocator);
@@ -574,7 +574,7 @@ pub fn resolveWindows(allocator: *Allocator, paths: []const []const u8) ![]u8 {
/// If all paths are relative it uses the current working directory as a starting point.
/// Note: all usage of this function should be audited due to the existence of symlinks.
/// Without performing actual syscalls, resolving `..` could be incorrect.
-pub fn resolvePosix(allocator: *Allocator, paths: []const []const u8) ![]u8 {
+pub fn resolvePosix(allocator: Allocator, paths: []const []const u8) ![]u8 {
if (paths.len == 0) {
assert(!is_windows); // resolvePosix called on windows can't use getCwd
return os.getCwdAlloc(allocator);
@@ -964,7 +964,7 @@ fn testBasenameWindows(input: []const u8, expected_output: []const u8) void {
/// resolve to the same path (after calling `resolve` on each), a zero-length
/// string is returned.
/// On Windows this canonicalizes the drive to a capital letter and paths to `\\`.
-pub fn relative(allocator: *Allocator, from: []const u8, to: []const u8) ![]u8 {
+pub fn relative(allocator: Allocator, from: []const u8, to: []const u8) ![]u8 {
if (is_windows) {
return relativeWindows(allocator, from, to);
} else {
@@ -972,7 +972,7 @@ pub fn relative(allocator: *Allocator, from: []const u8, to: []const u8) ![]u8 {
}
}
-pub fn relativeWindows(allocator: *Allocator, from: []const u8, to: []const u8) ![]u8 {
+pub fn relativeWindows(allocator: Allocator, from: []const u8, to: []const u8) ![]u8 {
const resolved_from = try resolveWindows(allocator, [][]const u8{from});
defer allocator.free(resolved_from);
@@ -1045,7 +1045,7 @@ pub fn relativeWindows(allocator: *Allocator, from: []const u8, to: []const u8)
return []u8{};
}
-pub fn relativePosix(allocator: *Allocator, from: []const u8, to: []const u8) ![]u8 {
+pub fn relativePosix(allocator: Allocator, from: []const u8, to: []const u8) ![]u8 {
const resolved_from = try resolvePosix(allocator, [][]const u8{from});
defer allocator.free(resolved_from);
@@ -1276,7 +1276,7 @@ pub fn real(out_buffer: *[os.MAX_PATH_BYTES]u8, pathname: []const u8) RealError!
}
/// `real`, except caller must free the returned memory.
-pub fn realAlloc(allocator: *Allocator, pathname: []const u8) ![]u8 {
+pub fn realAlloc(allocator: Allocator, pathname: []const u8) ![]u8 {
var buf: [os.MAX_PATH_BYTES]u8 = undefined;
return mem.dupe(allocator, u8, try real(&buf, pathname));
}
diff --git a/std/os/test.zig b/std/os/test.zig
index 0ee6fc1f26d7..ca94ccccefcf 100644
--- a/std/os/test.zig
+++ b/std/os/test.zig
@@ -94,7 +94,7 @@ test "cpu count" {
test "AtomicFile" {
var buffer: [1024]u8 = undefined;
- const allocator = &std.heap.FixedBufferAllocator.init(buffer[0..]).allocator;
+ const allocator = std.heap.FixedBufferAllocator.init(buffer[0..]).allocator();
const test_out_file = "tmp_atomic_file_test_dest.txt";
const test_content =
\\ hello!
diff --git a/std/os/windows/util.zig b/std/os/windows/util.zig
index 72c84502e369..ae37b5d01dbf 100644
--- a/std/os/windows/util.zig
+++ b/std/os/windows/util.zig
@@ -164,7 +164,7 @@ pub fn windowsOpen(
}
/// Caller must free result.
-pub fn createWindowsEnvBlock(allocator: *mem.Allocator, env_map: *const BufMap) ![]u16 {
+pub fn createWindowsEnvBlock(allocator: mem.Allocator, env_map: *const BufMap) ![]u16 {
// count bytes needed
const max_chars_needed = x: {
var max_chars_needed: usize = 4; // 4 for the final 4 null bytes
diff --git a/std/packed_int_array.zig b/std/packed_int_array.zig
index 065f1becd852..189f0a0f727e 100644
--- a/std/packed_int_array.zig
+++ b/std/packed_int_array.zig
@@ -624,7 +624,7 @@ test "PackedIntArray at end of available memory" {
};
var da = std.heap.DirectAllocator.init();
- const allocator = &da.allocator;
+ const allocator = da.allocator();
var pad = try allocator.create(Padded);
defer allocator.destroy(pad);
@@ -639,7 +639,7 @@ test "PackedIntSlice at end of available memory" {
const PackedSlice = PackedIntSlice(u11);
var da = std.heap.DirectAllocator.init();
- const allocator = &da.allocator;
+ const allocator = da.allocator();
var page = try allocator.alloc(u8, std.os.page_size);
defer allocator.free(page);
diff --git a/std/pdb.zig b/std/pdb.zig
index f3b73663e88a..b6389511f52a 100644
--- a/std/pdb.zig
+++ b/std/pdb.zig
@@ -460,7 +460,7 @@ pub const PDBStringTableHeader = packed struct {
pub const Pdb = struct {
in_file: os.File,
- allocator: *mem.Allocator,
+ allocator: mem.Allocator,
coff: *coff.Coff,
string_table: *MsfStream,
dbi: *MsfStream,
@@ -492,9 +492,9 @@ const Msf = struct {
directory: MsfStream,
streams: []MsfStream,
- fn openFile(self: *Msf, allocator: *mem.Allocator, file: os.File) !void {
- var file_stream = file.inStream();
- const in = &file_stream.stream;
+ fn openFile(self: *Msf, allocator: mem.Allocator, file: os.File) !void {
+ var file_stream = file.inStreamAdapter();
+ const in = file_stream.inStream();
const superblock = try in.readStruct(SuperBlock);
@@ -518,11 +518,11 @@ const Msf = struct {
allocator,
);
- const stream_count = try self.directory.stream.readIntLittle(u32);
+ const stream_count = try self.directory.inStream().readIntLittle(u32);
const stream_sizes = try allocator.alloc(u32, stream_count);
for (stream_sizes) |*s| {
- const size = try self.directory.stream.readIntLittle(u32);
+ const size = try self.directory.inStream().readIntLittle(u32);
s.* = blockCountFromSize(size, superblock.BlockSize);
}
@@ -593,22 +593,19 @@ const MsfStream = struct {
block_size: u32,
/// Implementation of InStream trait for Pdb.MsfStream
- stream: Stream,
-
pub const Error = @typeOf(read).ReturnType.ErrorSet;
pub const Stream = io.InStream(Error);
- fn init(block_size: u32, block_count: u32, pos: u64, file: os.File, allocator: *mem.Allocator) !MsfStream {
+ fn init(block_size: u32, block_count: u32, pos: u64, file: os.File, allocator: mem.Allocator) !MsfStream {
var stream = MsfStream{
.in_file = file,
.pos = 0,
.blocks = try allocator.alloc(u32, block_count),
.block_size = block_size,
- .stream = Stream{ .readFn = readFn },
};
- var file_stream = file.inStream();
- const in = &file_stream.stream;
+ var file_stream = file.inStreamAdapter();
+ const in = file_stream.inStream();
try file.seekTo(pos);
var i: u32 = 0;
@@ -619,11 +616,11 @@ const MsfStream = struct {
return stream;
}
- fn readNullTermString(self: *MsfStream, allocator: *mem.Allocator) ![]u8 {
+ fn readNullTermString(self: *MsfStream, allocator: mem.Allocator) ![]u8 {
var list = ArrayList(u8).init(allocator);
defer list.deinit();
while (true) {
- const byte = try self.stream.readByte();
+ const byte = try self.inStream().readByte();
if (byte == 0) {
return list.toSlice();
}
@@ -637,8 +634,8 @@ const MsfStream = struct {
var offset = self.pos % self.block_size;
try self.in_file.seekTo(block * self.block_size + offset);
- var file_stream = self.in_file.inStream();
- const in = &file_stream.stream;
+ var file_stream = self.in_file.inStreamAdapter();
+ const in = file_stream.inStream();
var size: usize = 0;
for (buffer) |*byte| {
@@ -685,8 +682,15 @@ const MsfStream = struct {
return block * self.block_size + offset;
}
- fn readFn(in_stream: *Stream, buffer: []u8) Error!usize {
- const self = @fieldParentPtr(MsfStream, "stream", in_stream);
+ fn readFn(in_stream: Stream, buffer: []u8) Error!usize {
+ const self = in_stream.iface.implCast(MsfStream);
return self.read(buffer);
}
+
+ pub fn inStream(self: *MsfStream) Stream {
+ return Stream{
+ .iface = Stream.Iface.init(self),
+ .readFn = readFn,
+ };
+ }
};
diff --git a/std/priority_queue.zig b/std/priority_queue.zig
index 3bc09ef5a012..0265e3b4ba48 100644
--- a/std/priority_queue.zig
+++ b/std/priority_queue.zig
@@ -10,10 +10,10 @@ pub fn PriorityQueue(comptime T: type) type {
items: []T,
len: usize,
- allocator: *Allocator,
+ allocator: Allocator,
compareFn: fn (a: T, b: T) bool,
- pub fn init(allocator: *Allocator, compareFn: fn (a: T, b: T) bool) Self {
+ pub fn init(allocator: Allocator, compareFn: fn (a: T, b: T) bool) Self {
return Self{
.items = []T{},
.len = 0,
@@ -119,7 +119,7 @@ pub fn PriorityQueue(comptime T: type) type {
/// PriorityQueue takes ownership of the passed in slice. The slice must have been
/// allocated with `allocator`.
/// Deinitialize with `deinit`.
- pub fn fromOwnedSlice(allocator: *Allocator, compareFn: fn (a: T, b: T) bool, items: []T) Self {
+ pub fn fromOwnedSlice(allocator: Allocator, compareFn: fn (a: T, b: T) bool, items: []T) Self {
var queue = Self{
.items = items,
.len = items.len,
diff --git a/std/rand.zig b/std/rand.zig
index 4a6563f65a86..5936ac660705 100644
--- a/std/rand.zig
+++ b/std/rand.zig
@@ -9,7 +9,7 @@
//
// var r = DefaultPrng.init(seed);
//
-// const s = r.random.int(u64);
+// const s = r.random().int(u64);
// ```
//
// TODO(tiehuis): Benchmark these against other reference implementations.
@@ -30,20 +30,23 @@ pub const DefaultPrng = Xoroshiro128;
pub const DefaultCsprng = Isaac64;
pub const Random = struct {
- fillFn: fn (r: *Random, buf: []u8) void,
+ pub const Iface = std.Interface();
+ iface: Iface,
+
+ fillFn: fn (r: Random, buf: []u8) void,
/// Read random bytes into the specified buffer until full.
- pub fn bytes(r: *Random, buf: []u8) void {
+ pub fn bytes(r: Random, buf: []u8) void {
r.fillFn(r, buf);
}
- pub fn boolean(r: *Random) bool {
+ pub fn boolean(r: Random) bool {
return r.int(u1) != 0;
}
/// Returns a random int `i` such that `0 <= i <= maxInt(T)`.
/// `i` is evenly distributed.
- pub fn int(r: *Random, comptime T: type) T {
+ pub fn int(r: Random, comptime T: type) T {
const UnsignedT = @IntType(false, T.bit_count);
const ByteAlignedT = @IntType(false, @divTrunc(T.bit_count + 7, 8) * 8);
@@ -60,7 +63,7 @@ pub const Random = struct {
/// Constant-time implementation off ::uintLessThan.
/// The results of this function may be biased.
- pub fn uintLessThanBiased(r: *Random, comptime T: type, less_than: T) T {
+ pub fn uintLessThanBiased(r: Random, comptime T: type, less_than: T) T {
comptime assert(T.is_signed == false);
comptime assert(T.bit_count <= 64); // TODO: workaround: LLVM ERROR: Unsupported library call operation!
assert(0 < less_than);
@@ -79,7 +82,7 @@ pub const Random = struct {
/// However, if ::fillFn is backed by any evenly distributed pseudo random number generator,
/// this function is guaranteed to return.
/// If you need deterministic runtime bounds, use `::uintLessThanBiased`.
- pub fn uintLessThan(r: *Random, comptime T: type, less_than: T) T {
+ pub fn uintLessThan(r: Random, comptime T: type, less_than: T) T {
comptime assert(T.is_signed == false);
comptime assert(T.bit_count <= 64); // TODO: workaround: LLVM ERROR: Unsupported library call operation!
assert(0 < less_than);
@@ -117,7 +120,7 @@ pub const Random = struct {
/// Constant-time implementation off ::uintAtMost.
/// The results of this function may be biased.
- pub fn uintAtMostBiased(r: *Random, comptime T: type, at_most: T) T {
+ pub fn uintAtMostBiased(r: Random, comptime T: type, at_most: T) T {
assert(T.is_signed == false);
if (at_most == maxInt(T)) {
// have the full range
@@ -129,7 +132,7 @@ pub const Random = struct {
/// Returns an evenly distributed random unsigned integer `0 <= i <= at_most`.
/// See ::uintLessThan, which this function uses in most cases,
/// for commentary on the runtime of this function.
- pub fn uintAtMost(r: *Random, comptime T: type, at_most: T) T {
+ pub fn uintAtMost(r: Random, comptime T: type, at_most: T) T {
assert(T.is_signed == false);
if (at_most == maxInt(T)) {
// have the full range
@@ -140,7 +143,7 @@ pub const Random = struct {
/// Constant-time implementation off ::intRangeLessThan.
/// The results of this function may be biased.
- pub fn intRangeLessThanBiased(r: *Random, comptime T: type, at_least: T, less_than: T) T {
+ pub fn intRangeLessThanBiased(r: Random, comptime T: type, at_least: T, less_than: T) T {
assert(at_least < less_than);
if (T.is_signed) {
// Two's complement makes this math pretty easy.
@@ -158,7 +161,7 @@ pub const Random = struct {
/// Returns an evenly distributed random integer `at_least <= i < less_than`.
/// See ::uintLessThan, which this function uses in most cases,
/// for commentary on the runtime of this function.
- pub fn intRangeLessThan(r: *Random, comptime T: type, at_least: T, less_than: T) T {
+ pub fn intRangeLessThan(r: Random, comptime T: type, at_least: T, less_than: T) T {
assert(at_least < less_than);
if (T.is_signed) {
// Two's complement makes this math pretty easy.
@@ -175,7 +178,7 @@ pub const Random = struct {
/// Constant-time implementation off ::intRangeAtMostBiased.
/// The results of this function may be biased.
- pub fn intRangeAtMostBiased(r: *Random, comptime T: type, at_least: T, at_most: T) T {
+ pub fn intRangeAtMostBiased(r: Random, comptime T: type, at_least: T, at_most: T) T {
assert(at_least <= at_most);
if (T.is_signed) {
// Two's complement makes this math pretty easy.
@@ -193,7 +196,7 @@ pub const Random = struct {
/// Returns an evenly distributed random integer `at_least <= i <= at_most`.
/// See ::uintLessThan, which this function uses in most cases,
/// for commentary on the runtime of this function.
- pub fn intRangeAtMost(r: *Random, comptime T: type, at_least: T, at_most: T) T {
+ pub fn intRangeAtMost(r: Random, comptime T: type, at_least: T, at_most: T) T {
assert(at_least <= at_most);
if (T.is_signed) {
// Two's complement makes this math pretty easy.
@@ -209,17 +212,17 @@ pub const Random = struct {
}
/// TODO: deprecated. use ::boolean or ::int instead.
- pub fn scalar(r: *Random, comptime T: type) T {
+ pub fn scalar(r: Random, comptime T: type) T {
return if (T == bool) r.boolean() else r.int(T);
}
/// TODO: deprecated. renamed to ::intRangeLessThan
- pub fn range(r: *Random, comptime T: type, start: T, end: T) T {
+ pub fn range(r: Random, comptime T: type, start: T, end: T) T {
return r.intRangeLessThan(T, start, end);
}
/// Return a floating point value evenly distributed in the range [0, 1).
- pub fn float(r: *Random, comptime T: type) T {
+ pub fn float(r: Random, comptime T: type) T {
// Generate a uniform value between [1, 2) and scale down to [0, 1).
// Note: The lowest mantissa bit is always set to 0 so we only use half the available range.
switch (T) {
@@ -240,7 +243,7 @@ pub const Random = struct {
/// Return a floating point value normally distributed with mean = 0, stddev = 1.
///
/// To use different parameters, use: floatNorm(...) * desiredStddev + desiredMean.
- pub fn floatNorm(r: *Random, comptime T: type) T {
+ pub fn floatNorm(r: Random, comptime T: type) T {
const value = ziggurat.next_f64(r, ziggurat.NormDist);
switch (T) {
f32 => return @floatCast(f32, value),
@@ -252,7 +255,7 @@ pub const Random = struct {
/// Return an exponentially distributed float with a rate parameter of 1.
///
/// To use a different rate parameter, use: floatExp(...) / desiredRate.
- pub fn floatExp(r: *Random, comptime T: type) T {
+ pub fn floatExp(r: Random, comptime T: type) T {
const value = ziggurat.next_f64(r, ziggurat.ExpDist);
switch (T) {
f32 => return @floatCast(f32, value),
@@ -262,7 +265,7 @@ pub const Random = struct {
}
/// Shuffle a slice into a random order.
- pub fn shuffle(r: *Random, comptime T: type, buf: []T) void {
+ pub fn shuffle(r: Random, comptime T: type, buf: []T) void {
if (buf.len < 2) {
return;
}
@@ -291,23 +294,28 @@ pub fn limitRangeBiased(comptime T: type, random_int: T, less_than: T) T {
const SequentialPrng = struct {
const Self = @This();
- random: Random,
next_value: u8,
pub fn init() Self {
return Self{
- .random = Random{ .fillFn = fill },
.next_value = 0,
};
}
- fn fill(r: *Random, buf: []u8) void {
- const self = @fieldParentPtr(Self, "random", r);
+ fn fill(r: Random, buf: []u8) void {
+ const self = r.iface.implCast(SequentialPrng);
for (buf) |*b| {
b.* = self.next_value;
}
self.next_value +%= 1;
}
+
+ pub fn random(self: *Self) Random {
+ return Random{
+ .iface = Random.Iface.init(self),
+ .fillFn = fill,
+ };
+ }
};
test "Random int" {
@@ -317,43 +325,43 @@ test "Random int" {
fn testRandomInt() void {
var r = SequentialPrng.init();
- expect(r.random.int(u0) == 0);
+ expect(r.random().int(u0) == 0);
r.next_value = 0;
- expect(r.random.int(u1) == 0);
- expect(r.random.int(u1) == 1);
- expect(r.random.int(u2) == 2);
- expect(r.random.int(u2) == 3);
- expect(r.random.int(u2) == 0);
+ expect(r.random().int(u1) == 0);
+ expect(r.random().int(u1) == 1);
+ expect(r.random().int(u2) == 2);
+ expect(r.random().int(u2) == 3);
+ expect(r.random().int(u2) == 0);
r.next_value = 0xff;
- expect(r.random.int(u8) == 0xff);
+ expect(r.random().int(u8) == 0xff);
r.next_value = 0x11;
- expect(r.random.int(u8) == 0x11);
+ expect(r.random().int(u8) == 0x11);
r.next_value = 0xff;
- expect(r.random.int(u32) == 0xffffffff);
+ expect(r.random().int(u32) == 0xffffffff);
r.next_value = 0x11;
- expect(r.random.int(u32) == 0x11111111);
+ expect(r.random().int(u32) == 0x11111111);
r.next_value = 0xff;
- expect(r.random.int(i32) == -1);
+ expect(r.random().int(i32) == -1);
r.next_value = 0x11;
- expect(r.random.int(i32) == 0x11111111);
+ expect(r.random().int(i32) == 0x11111111);
r.next_value = 0xff;
- expect(r.random.int(i8) == -1);
+ expect(r.random().int(i8) == -1);
r.next_value = 0x11;
- expect(r.random.int(i8) == 0x11);
+ expect(r.random().int(i8) == 0x11);
r.next_value = 0xff;
- expect(r.random.int(u33) == 0x1ffffffff);
+ expect(r.random().int(u33) == 0x1ffffffff);
r.next_value = 0xff;
- expect(r.random.int(i1) == -1);
+ expect(r.random().int(i1) == -1);
r.next_value = 0xff;
- expect(r.random.int(i2) == -1);
+ expect(r.random().int(i2) == -1);
r.next_value = 0xff;
- expect(r.random.int(i33) == -1);
+ expect(r.random().int(i33) == -1);
}
test "Random boolean" {
@@ -362,50 +370,50 @@ test "Random boolean" {
}
fn testRandomBoolean() void {
var r = SequentialPrng.init();
- expect(r.random.boolean() == false);
- expect(r.random.boolean() == true);
- expect(r.random.boolean() == false);
- expect(r.random.boolean() == true);
+ expect(r.random().boolean() == false);
+ expect(r.random().boolean() == true);
+ expect(r.random().boolean() == false);
+ expect(r.random().boolean() == true);
}
test "Random intLessThan" {
- @setEvalBranchQuota(10000);
+ @setEvalBranchQuota(100000);
testRandomIntLessThan();
comptime testRandomIntLessThan();
}
fn testRandomIntLessThan() void {
var r = SequentialPrng.init();
r.next_value = 0xff;
- expect(r.random.uintLessThan(u8, 4) == 3);
+ expect(r.random().uintLessThan(u8, 4) == 3);
expect(r.next_value == 0);
- expect(r.random.uintLessThan(u8, 4) == 0);
+ expect(r.random().uintLessThan(u8, 4) == 0);
expect(r.next_value == 1);
r.next_value = 0;
- expect(r.random.uintLessThan(u64, 32) == 0);
+ expect(r.random().uintLessThan(u64, 32) == 0);
// trigger the bias rejection code path
r.next_value = 0;
- expect(r.random.uintLessThan(u8, 3) == 0);
+ expect(r.random().uintLessThan(u8, 3) == 0);
// verify we incremented twice
expect(r.next_value == 2);
r.next_value = 0xff;
- expect(r.random.intRangeLessThan(u8, 0, 0x80) == 0x7f);
+ expect(r.random().intRangeLessThan(u8, 0, 0x80) == 0x7f);
r.next_value = 0xff;
- expect(r.random.intRangeLessThan(u8, 0x7f, 0xff) == 0xfe);
+ expect(r.random().intRangeLessThan(u8, 0x7f, 0xff) == 0xfe);
r.next_value = 0xff;
- expect(r.random.intRangeLessThan(i8, 0, 0x40) == 0x3f);
+ expect(r.random().intRangeLessThan(i8, 0, 0x40) == 0x3f);
r.next_value = 0xff;
- expect(r.random.intRangeLessThan(i8, -0x40, 0x40) == 0x3f);
+ expect(r.random().intRangeLessThan(i8, -0x40, 0x40) == 0x3f);
r.next_value = 0xff;
- expect(r.random.intRangeLessThan(i8, -0x80, 0) == -1);
+ expect(r.random().intRangeLessThan(i8, -0x80, 0) == -1);
r.next_value = 0xff;
- expect(r.random.intRangeLessThan(i3, -4, 0) == -1);
+ expect(r.random().intRangeLessThan(i3, -4, 0) == -1);
r.next_value = 0xff;
- expect(r.random.intRangeLessThan(i3, -2, 2) == 1);
+ expect(r.random().intRangeLessThan(i3, -2, 2) == 1);
}
test "Random intAtMost" {
@@ -416,34 +424,34 @@ test "Random intAtMost" {
fn testRandomIntAtMost() void {
var r = SequentialPrng.init();
r.next_value = 0xff;
- expect(r.random.uintAtMost(u8, 3) == 3);
+ expect(r.random().uintAtMost(u8, 3) == 3);
expect(r.next_value == 0);
- expect(r.random.uintAtMost(u8, 3) == 0);
+ expect(r.random().uintAtMost(u8, 3) == 0);
// trigger the bias rejection code path
r.next_value = 0;
- expect(r.random.uintAtMost(u8, 2) == 0);
+ expect(r.random().uintAtMost(u8, 2) == 0);
// verify we incremented twice
expect(r.next_value == 2);
r.next_value = 0xff;
- expect(r.random.intRangeAtMost(u8, 0, 0x7f) == 0x7f);
+ expect(r.random().intRangeAtMost(u8, 0, 0x7f) == 0x7f);
r.next_value = 0xff;
- expect(r.random.intRangeAtMost(u8, 0x7f, 0xfe) == 0xfe);
+ expect(r.random().intRangeAtMost(u8, 0x7f, 0xfe) == 0xfe);
r.next_value = 0xff;
- expect(r.random.intRangeAtMost(i8, 0, 0x3f) == 0x3f);
+ expect(r.random().intRangeAtMost(i8, 0, 0x3f) == 0x3f);
r.next_value = 0xff;
- expect(r.random.intRangeAtMost(i8, -0x40, 0x3f) == 0x3f);
+ expect(r.random().intRangeAtMost(i8, -0x40, 0x3f) == 0x3f);
r.next_value = 0xff;
- expect(r.random.intRangeAtMost(i8, -0x80, -1) == -1);
+ expect(r.random().intRangeAtMost(i8, -0x80, -1) == -1);
r.next_value = 0xff;
- expect(r.random.intRangeAtMost(i3, -4, -1) == -1);
+ expect(r.random().intRangeAtMost(i3, -4, -1) == -1);
r.next_value = 0xff;
- expect(r.random.intRangeAtMost(i3, -2, 1) == 1);
+ expect(r.random().intRangeAtMost(i3, -2, 1) == 1);
- expect(r.random.uintAtMost(u0, 0) == 0);
+ expect(r.random().uintAtMost(u0, 0) == 0);
}
test "Random Biased" {
@@ -451,30 +459,30 @@ test "Random Biased" {
// Not thoroughly checking the logic here.
// Just want to execute all the paths with different types.
- expect(r.random.uintLessThanBiased(u1, 1) == 0);
- expect(r.random.uintLessThanBiased(u32, 10) < 10);
- expect(r.random.uintLessThanBiased(u64, 20) < 20);
+ expect(r.random().uintLessThanBiased(u1, 1) == 0);
+ expect(r.random().uintLessThanBiased(u32, 10) < 10);
+ expect(r.random().uintLessThanBiased(u64, 20) < 20);
- expect(r.random.uintAtMostBiased(u0, 0) == 0);
- expect(r.random.uintAtMostBiased(u1, 0) <= 0);
- expect(r.random.uintAtMostBiased(u32, 10) <= 10);
- expect(r.random.uintAtMostBiased(u64, 20) <= 20);
+ expect(r.random().uintAtMostBiased(u0, 0) == 0);
+ expect(r.random().uintAtMostBiased(u1, 0) <= 0);
+ expect(r.random().uintAtMostBiased(u32, 10) <= 10);
+ expect(r.random().uintAtMostBiased(u64, 20) <= 20);
- expect(r.random.intRangeLessThanBiased(u1, 0, 1) == 0);
- expect(r.random.intRangeLessThanBiased(i1, -1, 0) == -1);
- expect(r.random.intRangeLessThanBiased(u32, 10, 20) >= 10);
- expect(r.random.intRangeLessThanBiased(i32, 10, 20) >= 10);
- expect(r.random.intRangeLessThanBiased(u64, 20, 40) >= 20);
- expect(r.random.intRangeLessThanBiased(i64, 20, 40) >= 20);
+ expect(r.random().intRangeLessThanBiased(u1, 0, 1) == 0);
+ expect(r.random().intRangeLessThanBiased(i1, -1, 0) == -1);
+ expect(r.random().intRangeLessThanBiased(u32, 10, 20) >= 10);
+ expect(r.random().intRangeLessThanBiased(i32, 10, 20) >= 10);
+ expect(r.random().intRangeLessThanBiased(u64, 20, 40) >= 20);
+ expect(r.random().intRangeLessThanBiased(i64, 20, 40) >= 20);
// uncomment for broken module error:
- //expect(r.random.intRangeAtMostBiased(u0, 0, 0) == 0);
- expect(r.random.intRangeAtMostBiased(u1, 0, 1) >= 0);
- expect(r.random.intRangeAtMostBiased(i1, -1, 0) >= -1);
- expect(r.random.intRangeAtMostBiased(u32, 10, 20) >= 10);
- expect(r.random.intRangeAtMostBiased(i32, 10, 20) >= 10);
- expect(r.random.intRangeAtMostBiased(u64, 20, 40) >= 20);
- expect(r.random.intRangeAtMostBiased(i64, 20, 40) >= 20);
+ //expect(r.random().intRangeAtMostBiased(u0, 0, 0) == 0);
+ expect(r.random().intRangeAtMostBiased(u1, 0, 1) >= 0);
+ expect(r.random().intRangeAtMostBiased(i1, -1, 0) >= -1);
+ expect(r.random().intRangeAtMostBiased(u32, 10, 20) >= 10);
+ expect(r.random().intRangeAtMostBiased(i32, 10, 20) >= 10);
+ expect(r.random().intRangeAtMostBiased(u64, 20, 40) >= 20);
+ expect(r.random().intRangeAtMostBiased(i64, 20, 40) >= 20);
}
// Generator to extend 64-bit seed values into longer sequences.
@@ -521,14 +529,11 @@ test "splitmix64 sequence" {
pub const Pcg = struct {
const default_multiplier = 6364136223846793005;
- random: Random,
-
s: u64,
i: u64,
pub fn init(init_s: u64) Pcg {
var pcg = Pcg{
- .random = Random{ .fillFn = fill },
.s = undefined,
.i = undefined,
};
@@ -561,8 +566,8 @@ pub const Pcg = struct {
self.s = self.s *% default_multiplier +% self.i;
}
- fn fill(r: *Random, buf: []u8) void {
- const self = @fieldParentPtr(Pcg, "random", r);
+ fn fill(r: Random, buf: []u8) void {
+ const self = r.iface.implCast(Pcg);
var i: usize = 0;
const aligned_len = buf.len - (buf.len & 7);
@@ -586,6 +591,13 @@ pub const Pcg = struct {
}
}
}
+
+ pub fn random(self: *Pcg) Random {
+ return Random{
+ .iface = Random.Iface.init(self),
+ .fillFn = fill,
+ };
+ }
};
test "pcg sequence" {
@@ -612,13 +624,10 @@ test "pcg sequence" {
//
// PRNG
pub const Xoroshiro128 = struct {
- random: Random,
-
s: [2]u64,
pub fn init(init_s: u64) Xoroshiro128 {
var x = Xoroshiro128{
- .random = Random{ .fillFn = fill },
.s = undefined,
};
@@ -671,8 +680,8 @@ pub const Xoroshiro128 = struct {
self.s[1] = gen.next();
}
- fn fill(r: *Random, buf: []u8) void {
- const self = @fieldParentPtr(Xoroshiro128, "random", r);
+ fn fill(r: Random, buf: []u8) void {
+ const self = r.iface.implCast(Xoroshiro128);
var i: usize = 0;
const aligned_len = buf.len - (buf.len & 7);
@@ -696,6 +705,13 @@ pub const Xoroshiro128 = struct {
}
}
}
+
+ pub fn random(self: *Xoroshiro128) Random {
+ return Random{
+ .iface = Random.Iface.init(self),
+ .fillFn = fill,
+ };
+ }
};
test "xoroshiro sequence" {
@@ -739,8 +755,6 @@ test "xoroshiro sequence" {
// Follows the general idea of the implementation from here with a few shortcuts.
// https://doc.rust-lang.org/rand/src/rand/prng/isaac64.rs.html
pub const Isaac64 = struct {
- random: Random,
-
r: [256]u64,
m: [256]u64,
a: u64,
@@ -750,7 +764,6 @@ pub const Isaac64 = struct {
pub fn init(init_s: u64) Isaac64 {
var isaac = Isaac64{
- .random = Random{ .fillFn = fill },
.r = undefined,
.m = undefined,
.a = undefined,
@@ -880,8 +893,8 @@ pub const Isaac64 = struct {
self.i = self.r.len; // trigger refill on first value
}
- fn fill(r: *Random, buf: []u8) void {
- const self = @fieldParentPtr(Isaac64, "random", r);
+ fn fill(r: Random, buf: []u8) void {
+ const self = r.iface.implCast(Isaac64);
var i: usize = 0;
const aligned_len = buf.len - (buf.len & 7);
@@ -905,6 +918,13 @@ pub const Isaac64 = struct {
}
}
}
+
+ pub fn random(self: *Isaac64) Random {
+ return Random{
+ .iface = Random.Iface.init(self),
+ .fillFn = fill,
+ };
+ }
};
test "isaac64 sequence" {
@@ -941,11 +961,11 @@ test "Random float" {
var i: usize = 0;
while (i < 1000) : (i += 1) {
- const val1 = prng.random.float(f32);
+ const val1 = prng.random().float(f32);
expect(val1 >= 0.0);
expect(val1 < 1.0);
- const val2 = prng.random.float(f64);
+ const val2 = prng.random().float(f64);
expect(val2 >= 0.0);
expect(val2 < 1.0);
}
@@ -959,7 +979,7 @@ test "Random shuffle" {
var i: usize = 0;
while (i < 1000) : (i += 1) {
- prng.random.shuffle(u8, seq[0..]);
+ prng.random().shuffle(u8, seq[0..]);
seen[seq[0]] = true;
expect(sumArray(seq[0..]) == 10);
}
@@ -979,17 +999,17 @@ fn sumArray(s: []const u8) u32 {
test "Random range" {
var prng = DefaultPrng.init(0);
- testRange(&prng.random, -4, 3);
- testRange(&prng.random, -4, -1);
- testRange(&prng.random, 10, 14);
- testRange(&prng.random, -0x80, 0x7f);
+ testRange(prng.random(), -4, 3);
+ testRange(prng.random(), -4, -1);
+ testRange(prng.random(), 10, 14);
+ testRange(prng.random(), -0x80, 0x7f);
}
-fn testRange(r: *Random, start: i8, end: i8) void {
+fn testRange(r: Random, start: i8, end: i8) void {
testRangeBias(r, start, end, true);
testRangeBias(r, start, end, false);
}
-fn testRangeBias(r: *Random, start: i8, end: i8, biased: bool) void {
+fn testRangeBias(r: Random, start: i8, end: i8, biased: bool) void {
const count = @intCast(usize, i32(end) - i32(start));
var values_buffer = []bool{false} ** 0x100;
const values = values_buffer[0..count];
diff --git a/std/rand/ziggurat.zig b/std/rand/ziggurat.zig
index 995248415be1..4497b8ce33dc 100644
--- a/std/rand/ziggurat.zig
+++ b/std/rand/ziggurat.zig
@@ -12,7 +12,7 @@ const std = @import("../std.zig");
const math = std.math;
const Random = std.rand.Random;
-pub fn next_f64(random: *Random, comptime tables: ZigTable) f64 {
+pub fn next_f64(random: Random, comptime tables: ZigTable) f64 {
while (true) {
// We manually construct a float from parts as we can avoid an extra random lookup here by
// using the unused exponent for the lookup table entry.
@@ -60,7 +60,7 @@ pub const ZigTable = struct {
// whether the distribution is symmetric
is_symmetric: bool,
// fallback calculation in the case we are in the 0 block
- zero_case: fn (*Random, f64) f64,
+ zero_case: fn (Random, f64) f64,
};
// zigNorInit
@@ -70,7 +70,7 @@ fn ZigTableGen(
comptime v: f64,
comptime f: fn (f64) f64,
comptime f_inv: fn (f64) f64,
- comptime zero_case: fn (*Random, f64) f64,
+ comptime zero_case: fn (Random, f64) f64,
) ZigTable {
var tables: ZigTable = undefined;
@@ -110,7 +110,7 @@ fn norm_f(x: f64) f64 {
fn norm_f_inv(y: f64) f64 {
return math.sqrt(-2.0 * math.ln(y));
}
-fn norm_zero_case(random: *Random, u: f64) f64 {
+fn norm_zero_case(random: Random, u: f64) f64 {
var x: f64 = 1;
var y: f64 = 0;
@@ -130,7 +130,7 @@ test "ziggurant normal dist sanity" {
var prng = std.rand.DefaultPrng.init(0);
var i: usize = 0;
while (i < 1000) : (i += 1) {
- _ = prng.random.floatNorm(f64);
+ _ = prng.random().floatNorm(f64);
}
}
@@ -149,7 +149,7 @@ fn exp_f(x: f64) f64 {
fn exp_f_inv(y: f64) f64 {
return -math.ln(y);
}
-fn exp_zero_case(random: *Random, _: f64) f64 {
+fn exp_zero_case(random: Random, _: f64) f64 {
return exp_r - math.ln(random.float(f64));
}
@@ -157,7 +157,7 @@ test "ziggurant exp dist sanity" {
var prng = std.rand.DefaultPrng.init(0);
var i: usize = 0;
while (i < 1000) : (i += 1) {
- _ = prng.random.floatExp(f64);
+ _ = prng.random().floatExp(f64);
}
}
diff --git a/std/segmented_list.zig b/std/segmented_list.zig
index a6603af9943d..cb2d244d4c4f 100644
--- a/std/segmented_list.zig
+++ b/std/segmented_list.zig
@@ -89,7 +89,7 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type
prealloc_segment: [prealloc_item_count]T,
dynamic_segments: [][*]T,
- allocator: *Allocator,
+ allocator: Allocator,
len: usize,
pub const prealloc_count = prealloc_item_count;
@@ -103,7 +103,7 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type
}
/// Deinitialize with `deinit`
- pub fn init(allocator: *Allocator) Self {
+ pub fn init(allocator: Allocator) Self {
return Self{
.allocator = allocator,
.len = 0,
@@ -336,7 +336,7 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type
test "std.SegmentedList" {
var da = std.heap.DirectAllocator.init();
defer da.deinit();
- var a = &da.allocator;
+ var a = da.allocator();
try testSegmentedList(0, a);
try testSegmentedList(1, a);
@@ -346,7 +346,7 @@ test "std.SegmentedList" {
try testSegmentedList(16, a);
}
-fn testSegmentedList(comptime prealloc: usize, allocator: *Allocator) !void {
+fn testSegmentedList(comptime prealloc: usize, allocator: Allocator) !void {
var list = SegmentedList(i32, prealloc).init(allocator);
defer list.deinit();
diff --git a/std/sort.zig b/std/sort.zig
index 69dc148f31bc..63005e3da98b 100644
--- a/std/sort.zig
+++ b/std/sort.zig
@@ -1166,16 +1166,16 @@ test "sort fuzz testing" {
const test_case_count = 10;
var i: usize = 0;
while (i < test_case_count) : (i += 1) {
- fuzzTest(&prng.random);
+ fuzzTest(prng.random());
}
}
var fixed_buffer_mem: [100 * 1024]u8 = undefined;
-fn fuzzTest(rng: *std.rand.Random) void {
+fn fuzzTest(rng: std.rand.Random) void {
const array_size = rng.range(usize, 0, 1000);
var fixed_allocator = std.heap.FixedBufferAllocator.init(fixed_buffer_mem[0..]);
- var array = fixed_allocator.allocator.alloc(IdAndValue, array_size) catch unreachable;
+ var array = fixed_allocator.allocator().alloc(IdAndValue, array_size) catch unreachable;
// populate with random data
for (array) |*item, index| {
item.id = index;
diff --git a/std/special/build_runner.zig b/std/special/build_runner.zig
index dfc383857736..2d074d6ee258 100644
--- a/std/special/build_runner.zig
+++ b/std/special/build_runner.zig
@@ -19,10 +19,10 @@ pub fn main() !void {
var direct_allocator = std.heap.DirectAllocator.init();
defer direct_allocator.deinit();
- var arena = std.heap.ArenaAllocator.init(&direct_allocator.allocator);
+ var arena = std.heap.ArenaAllocator.init(direct_allocator.allocator());
defer arena.deinit();
- const allocator = &arena.allocator;
+ const allocator = arena.allocator();
// skip my own exe name
_ = arg_it.skip();
@@ -48,17 +48,17 @@ pub fn main() !void {
var prefix: ?[]const u8 = null;
var stderr_file = io.getStdErr();
- var stderr_file_stream: os.File.OutStream = undefined;
+ var stderr_file_stream: os.File.OutStreamAdapter = undefined;
var stderr_stream = if (stderr_file) |f| x: {
- stderr_file_stream = f.outStream();
- break :x &stderr_file_stream.stream;
+ stderr_file_stream = f.outStreamAdapter();
+ break :x stderr_file_stream.outStream();
} else |err| err;
var stdout_file = io.getStdOut();
- var stdout_file_stream: os.File.OutStream = undefined;
+ var stdout_file_stream: os.File.OutStreamAdapter = undefined;
var stdout_stream = if (stdout_file) |f| x: {
- stdout_file_stream = f.outStream();
- break :x &stdout_file_stream.stream;
+ stdout_file_stream = f.outStreamAdapter();
+ break :x stdout_file_stream.outStream();
} else |err| err;
while (arg_it.next(allocator)) |err_or_arg| {
diff --git a/std/statically_initialized_mutex.zig b/std/statically_initialized_mutex.zig
index cfcaf036d392..2f2f3c492423 100644
--- a/std/statically_initialized_mutex.zig
+++ b/std/statically_initialized_mutex.zig
@@ -83,11 +83,11 @@ test "std.StaticallyInitializedMutex" {
var direct_allocator = std.heap.DirectAllocator.init();
defer direct_allocator.deinit();
- var plenty_of_memory = try direct_allocator.allocator.alloc(u8, 300 * 1024);
- defer direct_allocator.allocator.free(plenty_of_memory);
+ var plenty_of_memory = try direct_allocator.allocator().alloc(u8, 300 * 1024);
+ defer direct_allocator.allocator().free(plenty_of_memory);
var fixed_buffer_allocator = std.heap.ThreadSafeFixedBufferAllocator.init(plenty_of_memory);
- var a = &fixed_buffer_allocator.allocator;
+ var a = fixed_buffer_allocator.allocator();
var context = TestContext{ .data = 0 };
diff --git a/std/std.zig b/std/std.zig
index 8ec042fdb877..f467e79f1634 100644
--- a/std/std.zig
+++ b/std/std.zig
@@ -7,6 +7,7 @@ pub const Buffer = @import("buffer.zig").Buffer;
pub const BufferOutStream = @import("io.zig").BufferOutStream;
pub const DynLib = @import("dynamic_library.zig").DynLib;
pub const HashMap = @import("hash_map.zig").HashMap;
+pub const Interface = @import("interface.zig").Interface;
pub const LinkedList = @import("linked_list.zig").LinkedList;
pub const Mutex = @import("mutex.zig").Mutex;
pub const PackedIntArrayEndian = @import("packed_int_array.zig").PackedIntArrayEndian;
diff --git a/std/unicode.zig b/std/unicode.zig
index 37a73d75004b..09af24e59982 100644
--- a/std/unicode.zig
+++ b/std/unicode.zig
@@ -465,7 +465,7 @@ fn testDecode(bytes: []const u8) !u32 {
}
/// Caller must free returned memory.
-pub fn utf16leToUtf8Alloc(allocator: *mem.Allocator, utf16le: []const u16) ![]u8 {
+pub fn utf16leToUtf8Alloc(allocator: mem.Allocator, utf16le: []const u16) ![]u8 {
var result = std.ArrayList(u8).init(allocator);
// optimistically guess that it will all be ascii.
try result.ensureCapacity(utf16le.len);
@@ -544,7 +544,7 @@ test "utf16leToUtf8" {
/// TODO support codepoints bigger than 16 bits
/// TODO type for null terminated pointer
-pub fn utf8ToUtf16LeWithNull(allocator: *mem.Allocator, utf8: []const u8) ![]u16 {
+pub fn utf8ToUtf16LeWithNull(allocator: mem.Allocator, utf8: []const u8) ![]u16 {
var result = std.ArrayList(u16).init(allocator);
// optimistically guess that it will not require surrogate pairs
try result.ensureCapacity(utf8.len + 1);
diff --git a/std/zig/bench.zig b/std/zig/bench.zig
index ed6ae9a128b3..786d26deb7db 100644
--- a/std/zig/bench.zig
+++ b/std/zig/bench.zig
@@ -24,13 +24,13 @@ pub fn main() !void {
const mb_per_sec = bytes_per_sec / (1024 * 1024);
var stdout_file = try std.io.getStdOut();
- const stdout = &stdout_file.outStream().stream;
+ const stdout = stdout_file.outStreamAdapter().outStream();
try stdout.print("{.3} MiB/s, {} KiB used \n", mb_per_sec, memory_used / 1024);
}
fn testOnce() usize {
var fixed_buf_alloc = std.heap.FixedBufferAllocator.init(fixed_buffer_mem[0..]);
- var allocator = &fixed_buf_alloc.allocator;
+ var allocator = fixed_buf_alloc.allocator();
_ = std.zig.parse(allocator, source) catch @panic("parse failure");
return fixed_buf_alloc.end_index;
}
diff --git a/std/zig/parse.zig b/std/zig/parse.zig
index 12a28bf65794..999d4f151a6d 100644
--- a/std/zig/parse.zig
+++ b/std/zig/parse.zig
@@ -13,7 +13,7 @@ pub const Error = error{ParseError} || Allocator.Error;
/// Result should be freed with tree.deinit() when there are
/// no more references to any of the tokens or nodes.
-pub fn parse(allocator: *Allocator, source: []const u8) !*Tree {
+pub fn parse(allocator: Allocator, source: []const u8) !*Tree {
const tree = blk: {
// This block looks unnecessary, but is a "foot-shield" to prevent the SegmentedLists
// from being initialized with a pointer to this `arena`, which is created on
@@ -22,7 +22,7 @@ pub fn parse(allocator: *Allocator, source: []const u8) !*Tree {
// https://github.com/ziglang/zig/commit/cb4fb14b6e66bd213575f69eec9598be8394fae6
var arena = std.heap.ArenaAllocator.init(allocator);
errdefer arena.deinit();
- const tree = try arena.allocator.create(ast.Tree);
+ const tree = try arena.allocator().create(ast.Tree);
tree.* = ast.Tree{
.source = source,
.root_node = undefined,
@@ -33,7 +33,7 @@ pub fn parse(allocator: *Allocator, source: []const u8) !*Tree {
break :blk tree;
};
errdefer tree.deinit();
- const arena = &tree.arena_allocator.allocator;
+ const arena = tree.arena_allocator.allocator();
tree.tokens = ast.Tree.TokenList.init(arena);
tree.errors = ast.Tree.ErrorList.init(arena);
@@ -53,7 +53,7 @@ pub fn parse(allocator: *Allocator, source: []const u8) !*Tree {
}
/// Root <- skip ContainerMembers eof
-fn parseRoot(arena: *Allocator, it: *TokenIterator, tree: *Tree) Allocator.Error!*Node.Root {
+fn parseRoot(arena: Allocator, it: *TokenIterator, tree: *Tree) Allocator.Error!*Node.Root {
const node = try arena.create(Node.Root);
node.* = Node.Root{
.base = Node{ .id = .Root },
@@ -90,7 +90,7 @@ fn parseRoot(arena: *Allocator, it: *TokenIterator, tree: *Tree) Allocator.Error
/// / KEYWORD_pub? ContainerField COMMA ContainerMembers
/// / KEYWORD_pub? ContainerField
/// /
-fn parseContainerMembers(arena: *Allocator, it: *TokenIterator, tree: *Tree) !Node.Root.DeclList {
+fn parseContainerMembers(arena: Allocator, it: *TokenIterator, tree: *Tree) !Node.Root.DeclList {
var list = Node.Root.DeclList.init(arena);
while (true) {
@@ -163,7 +163,7 @@ fn parseContainerMembers(arena: *Allocator, it: *TokenIterator, tree: *Tree) !No
}
/// TestDecl <- KEYWORD_test STRINGLITERAL Block
-fn parseTestDecl(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
+fn parseTestDecl(arena: Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
const test_token = eatToken(it, .Keyword_test) orelse return null;
const name_node = try expectNode(arena, it, tree, parseStringLiteral, AstError{
.ExpectedStringLiteral = AstError.ExpectedStringLiteral{ .token = it.index },
@@ -184,7 +184,7 @@ fn parseTestDecl(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
}
/// TopLevelComptime <- KEYWORD_comptime BlockExpr
-fn parseTopLevelComptime(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
+fn parseTopLevelComptime(arena: Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
const tok = eatToken(it, .Keyword_comptime) orelse return null;
const block_node = try expectNode(arena, it, tree, parseBlockExpr, AstError{
.ExpectedLabelOrLBrace = AstError.ExpectedLabelOrLBrace{ .token = it.index },
@@ -204,7 +204,7 @@ fn parseTopLevelComptime(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*
/// <- (KEYWORD_export / KEYWORD_extern STRINGLITERAL? / KEYWORD_inline)? FnProto (SEMICOLON / Block)
/// / (KEYWORD_export / KEYWORD_extern STRINGLITERAL?)? KEYWORD_threadlocal? VarDecl
/// / KEYWORD_use Expr SEMICOLON
-fn parseTopLevelDecl(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
+fn parseTopLevelDecl(arena: Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
var lib_name: ?*Node = null;
const extern_export_inline_token = blk: {
if (eatToken(it, .Keyword_export)) |token| break :blk token;
@@ -276,7 +276,7 @@ fn parseTopLevelDecl(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node
}
/// FnProto <- FnCC? KEYWORD_fn IDENTIFIER? LPAREN ParamDeclList RPAREN ByteAlign? LinkSection? EXCLAMATIONMARK? (KEYWORD_var / TypeExpr)
-fn parseFnProto(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
+fn parseFnProto(arena: Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
const cc = try parseFnCC(arena, it, tree);
const fn_token = eatToken(it, .Keyword_fn) orelse {
if (cc == null) return null else return error.ParseError;
@@ -339,7 +339,7 @@ fn parseFnProto(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
}
/// VarDecl <- (KEYWORD_const / KEYWORD_var) IDENTIFIER (COLON TypeExpr)? ByteAlign? LinkSection? (EQUAL Expr)? SEMICOLON
-fn parseVarDecl(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
+fn parseVarDecl(arena: Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
const mut_token = eatToken(it, .Keyword_const) orelse
eatToken(it, .Keyword_var) orelse
return null;
@@ -383,7 +383,7 @@ fn parseVarDecl(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
}
/// ContainerField <- IDENTIFIER (COLON TypeExpr)? (EQUAL Expr)?
-fn parseContainerField(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
+fn parseContainerField(arena: Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
const name_token = eatToken(it, .Identifier) orelse return null;
const type_expr = if (eatToken(it, .Colon)) |_|
@@ -422,7 +422,7 @@ fn parseContainerField(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*No
/// / LabeledStatement
/// / SwitchExpr
/// / AssignExpr SEMICOLON
-fn parseStatement(arena: *Allocator, it: *TokenIterator, tree: *Tree) Error!?*Node {
+fn parseStatement(arena: Allocator, it: *TokenIterator, tree: *Tree) Error!?*Node {
const comptime_token = eatToken(it, .Keyword_comptime);
const var_decl_node = try parseVarDecl(arena, it, tree);
@@ -493,7 +493,7 @@ fn parseStatement(arena: *Allocator, it: *TokenIterator, tree: *Tree) Error!?*No
/// IfStatement
/// <- IfPrefix BlockExpr ( KEYWORD_else Payload? Statement )?
/// / IfPrefix AssignExpr ( SEMICOLON / KEYWORD_else Payload? Statement )
-fn parseIfStatement(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
+fn parseIfStatement(arena: Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
const if_node = (try parseIfPrefix(arena, it, tree)) orelse return null;
const if_prefix = if_node.cast(Node.If).?;
@@ -552,7 +552,7 @@ fn parseIfStatement(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node
}
/// LabeledStatement <- BlockLabel? (Block / LoopStatement)
-fn parseLabeledStatement(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
+fn parseLabeledStatement(arena: Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
const label_token = parseBlockLabel(arena, it, tree);
if (try parseBlock(arena, it, tree)) |node| {
@@ -580,7 +580,7 @@ fn parseLabeledStatement(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*
}
/// LoopStatement <- KEYWORD_inline? (ForStatement / WhileStatement)
-fn parseLoopStatement(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
+fn parseLoopStatement(arena: Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
const inline_token = eatToken(it, .Keyword_inline);
if (try parseForStatement(arena, it, tree)) |node| {
@@ -599,7 +599,7 @@ fn parseLoopStatement(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Nod
/// ForStatement
/// <- ForPrefix BlockExpr ( KEYWORD_else Statement )?
/// / ForPrefix AssignExpr ( SEMICOLON / KEYWORD_else Statement )
-fn parseForStatement(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
+fn parseForStatement(arena: Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
const node = (try parseForPrefix(arena, it, tree)) orelse return null;
const for_prefix = node.cast(Node.For).?;
@@ -659,7 +659,7 @@ fn parseForStatement(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node
/// WhileStatement
/// <- WhilePrefix BlockExpr ( KEYWORD_else Payload? Statement )?
/// / WhilePrefix AssignExpr ( SEMICOLON / KEYWORD_else Payload? Statement )
-fn parseWhileStatement(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
+fn parseWhileStatement(arena: Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
const node = (try parseWhilePrefix(arena, it, tree)) orelse return null;
const while_prefix = node.cast(Node.While).?;
@@ -723,7 +723,7 @@ fn parseWhileStatement(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*No
/// BlockExprStatement
/// <- BlockExpr
/// / AssignExpr SEMICOLON
-fn parseBlockExprStatement(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
+fn parseBlockExprStatement(arena: Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
if (try parseBlockExpr(arena, it, tree)) |node| return node;
if (try parseAssignExpr(arena, it, tree)) |node| {
_ = try expectToken(it, tree, .Semicolon);
@@ -733,7 +733,7 @@ fn parseBlockExprStatement(arena: *Allocator, it: *TokenIterator, tree: *Tree) !
}
/// BlockExpr <- BlockLabel? Block
-fn parseBlockExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) Error!?*Node {
+fn parseBlockExpr(arena: Allocator, it: *TokenIterator, tree: *Tree) Error!?*Node {
const label_token = parseBlockLabel(arena, it, tree);
const block_node = (try parseBlock(arena, it, tree)) orelse {
if (label_token) |label| {
@@ -747,17 +747,17 @@ fn parseBlockExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) Error!?*No
}
/// AssignExpr <- Expr (AssignOp Expr)?
-fn parseAssignExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
+fn parseAssignExpr(arena: Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
return parseBinOpExpr(arena, it, tree, parseAssignOp, parseExpr, .Once);
}
/// Expr <- KEYWORD_try* BoolOrExpr
-fn parseExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) Error!?*Node {
+fn parseExpr(arena: Allocator, it: *TokenIterator, tree: *Tree) Error!?*Node {
return parsePrefixOpExpr(arena, it, tree, parseTry, parseBoolOrExpr);
}
/// BoolOrExpr <- BoolAndExpr (KEYWORD_or BoolAndExpr)*
-fn parseBoolOrExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
+fn parseBoolOrExpr(arena: Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
return parseBinOpExpr(
arena,
it,
@@ -769,7 +769,7 @@ fn parseBoolOrExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
}
/// BoolAndExpr <- CompareExpr (KEYWORD_and CompareExpr)*
-fn parseBoolAndExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
+fn parseBoolAndExpr(arena: Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
return parseBinOpExpr(
arena,
it,
@@ -781,32 +781,32 @@ fn parseBoolAndExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node
}
/// CompareExpr <- BitwiseExpr (CompareOp BitwiseExpr)?
-fn parseCompareExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
+fn parseCompareExpr(arena: Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
return parseBinOpExpr(arena, it, tree, parseCompareOp, parseBitwiseExpr, .Once);
}
/// BitwiseExpr <- BitShiftExpr (BitwiseOp BitShiftExpr)*
-fn parseBitwiseExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
+fn parseBitwiseExpr(arena: Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
return parseBinOpExpr(arena, it, tree, parseBitwiseOp, parseBitShiftExpr, .Infinitely);
}
/// BitShiftExpr <- AdditionExpr (BitShiftOp AdditionExpr)*
-fn parseBitShiftExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
+fn parseBitShiftExpr(arena: Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
return parseBinOpExpr(arena, it, tree, parseBitShiftOp, parseAdditionExpr, .Infinitely);
}
/// AdditionExpr <- MultiplyExpr (AdditionOp MultiplyExpr)*
-fn parseAdditionExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
+fn parseAdditionExpr(arena: Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
return parseBinOpExpr(arena, it, tree, parseAdditionOp, parseMultiplyExpr, .Infinitely);
}
/// MultiplyExpr <- PrefixExpr (MultiplyOp PrefixExpr)*
-fn parseMultiplyExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
+fn parseMultiplyExpr(arena: Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
return parseBinOpExpr(arena, it, tree, parseMultiplyOp, parsePrefixExpr, .Infinitely);
}
/// PrefixExpr <- PrefixOp* PrimaryExpr
-fn parsePrefixExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
+fn parsePrefixExpr(arena: Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
return parsePrefixOpExpr(arena, it, tree, parsePrefixOp, parsePrimaryExpr);
}
@@ -822,7 +822,7 @@ fn parsePrefixExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
/// / BlockLabel? LoopExpr
/// / Block
/// / CurlySuffixExpr
-fn parsePrimaryExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
+fn parsePrimaryExpr(arena: Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
if (try parseAsmExpr(arena, it, tree)) |node| return node;
if (try parseIfExpr(arena, it, tree)) |node| return node;
@@ -926,12 +926,12 @@ fn parsePrimaryExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node
}
/// IfExpr <- IfPrefix Expr (KEYWORD_else Payload? Expr)?
-fn parseIfExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
+fn parseIfExpr(arena: Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
return parseIf(arena, it, tree, parseExpr);
}
/// Block <- LBRACE Statement* RBRACE
-fn parseBlock(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
+fn parseBlock(arena: Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
const lbrace = eatToken(it, .LBrace) orelse return null;
var statements = Node.Block.StatementList.init(arena);
@@ -955,7 +955,7 @@ fn parseBlock(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
}
/// LoopExpr <- KEYWORD_inline? (ForExpr / WhileExpr)
-fn parseLoopExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
+fn parseLoopExpr(arena: Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
const inline_token = eatToken(it, .Keyword_inline);
if (try parseForExpr(arena, it, tree)) |node| {
@@ -978,7 +978,7 @@ fn parseLoopExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
}
/// ForExpr <- ForPrefix Expr (KEYWORD_else Expr)?
-fn parseForExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
+fn parseForExpr(arena: Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
const node = (try parseForPrefix(arena, it, tree)) orelse return null;
const for_prefix = node.cast(Node.For).?;
@@ -1007,7 +1007,7 @@ fn parseForExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
}
/// WhileExpr <- WhilePrefix Expr (KEYWORD_else Payload? Expr)?
-fn parseWhileExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
+fn parseWhileExpr(arena: Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
const node = (try parseWhilePrefix(arena, it, tree)) orelse return null;
const while_prefix = node.cast(Node.While).?;
@@ -1037,7 +1037,7 @@ fn parseWhileExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
}
/// CurlySuffixExpr <- TypeExpr InitList?
-fn parseCurlySuffixExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
+fn parseCurlySuffixExpr(arena: Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
const type_expr = (try parseTypeExpr(arena, it, tree)) orelse return null;
const init_list = (try parseInitList(arena, it, tree)) orelse return type_expr;
init_list.cast(Node.SuffixOp).?.lhs = type_expr;
@@ -1048,7 +1048,7 @@ fn parseCurlySuffixExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*N
/// <- LBRACE FieldInit (COMMA FieldInit)* COMMA? RBRACE
/// / LBRACE Expr (COMMA Expr)* COMMA? RBRACE
/// / LBRACE RBRACE
-fn parseInitList(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
+fn parseInitList(arena: Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
const lbrace = eatToken(it, .LBrace) orelse return null;
var init_list = Node.SuffixOp.Op.InitList.init(arena);
@@ -1085,12 +1085,12 @@ fn parseInitList(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
}
/// TypeExpr <- PrefixTypeOp* ErrorUnionExpr
-fn parseTypeExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) Error!?*Node {
+fn parseTypeExpr(arena: Allocator, it: *TokenIterator, tree: *Tree) Error!?*Node {
return parsePrefixOpExpr(arena, it, tree, parsePrefixTypeOp, parseErrorUnionExpr);
}
/// ErrorUnionExpr <- SuffixExpr (EXCLAMATIONMARK TypeExpr)?
-fn parseErrorUnionExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
+fn parseErrorUnionExpr(arena: Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
const suffix_expr = (try parseSuffixExpr(arena, it, tree)) orelse return null;
if (try SimpleBinOpParseFn(.Bang, Node.InfixOp.Op.ErrorUnion)(arena, it, tree)) |node| {
@@ -1109,7 +1109,7 @@ fn parseErrorUnionExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*No
/// SuffixExpr
/// <- AsyncPrefix PrimaryTypeExpr SuffixOp* FnCallArguments
/// / PrimaryTypeExpr (SuffixOp / FnCallArguments)*
-fn parseSuffixExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
+fn parseSuffixExpr(arena: Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
if (try parseAsyncPrefix(arena, it, tree)) |async_node| {
// TODO: Implement hack for parsing `async fn ...` in ast_parse_suffix_expr
var res = try expectNode(arena, it, tree, parsePrimaryTypeExpr, AstError{
@@ -1208,7 +1208,7 @@ fn parseSuffixExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
/// / KEYWORD_unreachable
/// / STRINGLITERAL
/// / SwitchExpr
-fn parsePrimaryTypeExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
+fn parsePrimaryTypeExpr(arena: Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
if (try parseBuiltinCall(arena, it, tree)) |node| return node;
if (eatToken(it, .CharLiteral)) |token| {
const node = try arena.create(Node.CharLiteral);
@@ -1277,7 +1277,7 @@ fn parsePrimaryTypeExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*N
}
/// ContainerDecl <- (KEYWORD_extern / KEYWORD_packed)? ContainerDeclAuto
-fn parseContainerDecl(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
+fn parseContainerDecl(arena: Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
const layout_token = eatToken(it, .Keyword_extern) orelse
eatToken(it, .Keyword_packed);
@@ -1291,7 +1291,7 @@ fn parseContainerDecl(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Nod
}
/// ErrorSetDecl <- KEYWORD_error LBRACE IdentifierList RBRACE
-fn parseErrorSetDecl(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
+fn parseErrorSetDecl(arena: Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
const error_token = eatToken(it, .Keyword_error) orelse return null;
if (eatToken(it, .LBrace) == null) {
// Might parse as `KEYWORD_error DOT IDENTIFIER` later in PrimaryTypeExpr, so don't error
@@ -1312,7 +1312,7 @@ fn parseErrorSetDecl(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node
}
/// GroupedExpr <- LPAREN Expr RPAREN
-fn parseGroupedExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
+fn parseGroupedExpr(arena: Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
const lparen = eatToken(it, .LParen) orelse return null;
const expr = try expectNode(arena, it, tree, parseExpr, AstError{
.ExpectedExpr = AstError.ExpectedExpr{ .token = it.index },
@@ -1330,14 +1330,14 @@ fn parseGroupedExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node
}
/// IfTypeExpr <- IfPrefix TypeExpr (KEYWORD_else Payload? TypeExpr)?
-fn parseIfTypeExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
+fn parseIfTypeExpr(arena: Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
return parseIf(arena, it, tree, parseTypeExpr);
}
/// LabeledTypeExpr
/// <- BlockLabel Block
/// / BlockLabel? LoopTypeExpr
-fn parseLabeledTypeExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
+fn parseLabeledTypeExpr(arena: Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
const label = parseBlockLabel(arena, it, tree);
if (label) |token| {
@@ -1367,7 +1367,7 @@ fn parseLabeledTypeExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*N
}
/// LoopTypeExpr <- KEYWORD_inline? (ForTypeExpr / WhileTypeExpr)
-fn parseLoopTypeExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
+fn parseLoopTypeExpr(arena: Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
const inline_token = eatToken(it, .Keyword_inline);
if (try parseForTypeExpr(arena, it, tree)) |node| {
@@ -1390,7 +1390,7 @@ fn parseLoopTypeExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node
}
/// ForTypeExpr <- ForPrefix TypeExpr (KEYWORD_else TypeExpr)?
-fn parseForTypeExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
+fn parseForTypeExpr(arena: Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
const node = (try parseForPrefix(arena, it, tree)) orelse return null;
const for_prefix = node.cast(Node.For).?;
@@ -1419,7 +1419,7 @@ fn parseForTypeExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node
}
/// WhileTypeExpr <- WhilePrefix TypeExpr (KEYWORD_else Payload? TypeExpr)?
-fn parseWhileTypeExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
+fn parseWhileTypeExpr(arena: Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
const node = (try parseWhilePrefix(arena, it, tree)) orelse return null;
const while_prefix = node.cast(Node.While).?;
@@ -1450,7 +1450,7 @@ fn parseWhileTypeExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Nod
}
/// SwitchExpr <- KEYWORD_switch LPAREN Expr RPAREN LBRACE SwitchProngList RBRACE
-fn parseSwitchExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
+fn parseSwitchExpr(arena: Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
const switch_token = eatToken(it, .Keyword_switch) orelse return null;
_ = try expectToken(it, tree, .LParen);
const expr_node = try expectNode(arena, it, tree, parseExpr, AstError{
@@ -1473,7 +1473,7 @@ fn parseSwitchExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
}
/// AsmExpr <- KEYWORD_asm KEYWORD_volatile? LPAREN STRINGLITERAL AsmOutput? RPAREN
-fn parseAsmExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
+fn parseAsmExpr(arena: Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
const asm_token = eatToken(it, .Keyword_asm) orelse return null;
const volatile_token = eatToken(it, .Keyword_volatile);
_ = try expectToken(it, tree, .LParen);
@@ -1499,7 +1499,7 @@ fn parseAsmExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
}
/// DOT IDENTIFIER
-fn parseEnumLiteral(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
+fn parseEnumLiteral(arena: Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
const dot = eatToken(it, .Period) orelse return null;
const name = try expectToken(it, tree, .Identifier);
const node = try arena.create(Node.EnumLiteral);
@@ -1512,14 +1512,14 @@ fn parseEnumLiteral(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node
}
/// AsmOutput <- COLON AsmOutputList AsmInput?
-fn parseAsmOutput(arena: *Allocator, it: *TokenIterator, tree: *Tree, asm_node: *Node.Asm) !void {
+fn parseAsmOutput(arena: Allocator, it: *TokenIterator, tree: *Tree, asm_node: *Node.Asm) !void {
if (eatToken(it, .Colon) == null) return;
asm_node.outputs = try parseAsmOutputList(arena, it, tree);
try parseAsmInput(arena, it, tree, asm_node);
}
/// AsmOutputItem <- LBRACKET IDENTIFIER RBRACKET STRINGLITERAL LPAREN (MINUSRARROW TypeExpr / IDENTIFIER) RPAREN
-fn parseAsmOutputItem(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node.AsmOutput {
+fn parseAsmOutputItem(arena: Allocator, it: *TokenIterator, tree: *Tree) !?*Node.AsmOutput {
const lbracket = eatToken(it, .LBracket) orelse return null;
const name = try expectNode(arena, it, tree, parseIdentifier, AstError{
.ExpectedIdentifier = AstError.ExpectedIdentifier{ .token = it.index },
@@ -1558,14 +1558,14 @@ fn parseAsmOutputItem(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Nod
}
/// AsmInput <- COLON AsmInputList AsmClobbers?
-fn parseAsmInput(arena: *Allocator, it: *TokenIterator, tree: *Tree, asm_node: *Node.Asm) !void {
+fn parseAsmInput(arena: Allocator, it: *TokenIterator, tree: *Tree, asm_node: *Node.Asm) !void {
if (eatToken(it, .Colon) == null) return;
asm_node.inputs = try parseAsmInputList(arena, it, tree);
try parseAsmClobbers(arena, it, tree, asm_node);
}
/// AsmInputItem <- LBRACKET IDENTIFIER RBRACKET STRINGLITERAL LPAREN Expr RPAREN
-fn parseAsmInputItem(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node.AsmInput {
+fn parseAsmInputItem(arena: Allocator, it: *TokenIterator, tree: *Tree) !?*Node.AsmInput {
const lbracket = eatToken(it, .LBracket) orelse return null;
const name = try expectNode(arena, it, tree, parseIdentifier, AstError{
.ExpectedIdentifier = AstError.ExpectedIdentifier{ .token = it.index },
@@ -1596,7 +1596,7 @@ fn parseAsmInputItem(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node
/// AsmClobbers <- COLON StringList
/// StringList <- (STRINGLITERAL COMMA)* STRINGLITERAL?
-fn parseAsmClobbers(arena: *Allocator, it: *TokenIterator, tree: *Tree, asm_node: *Node.Asm) !void {
+fn parseAsmClobbers(arena: Allocator, it: *TokenIterator, tree: *Tree, asm_node: *Node.Asm) !void {
if (eatToken(it, .Colon) == null) return;
asm_node.clobbers = try ListParseFn(
Node.Asm.ClobberList,
@@ -1605,7 +1605,7 @@ fn parseAsmClobbers(arena: *Allocator, it: *TokenIterator, tree: *Tree, asm_node
}
/// BreakLabel <- COLON IDENTIFIER
-fn parseBreakLabel(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
+fn parseBreakLabel(arena: Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
_ = eatToken(it, .Colon) orelse return null;
return try expectNode(arena, it, tree, parseIdentifier, AstError{
.ExpectedIdentifier = AstError.ExpectedIdentifier{ .token = it.index },
@@ -1613,7 +1613,7 @@ fn parseBreakLabel(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
}
/// BlockLabel <- IDENTIFIER COLON
-fn parseBlockLabel(arena: *Allocator, it: *TokenIterator, tree: *Tree) ?TokenIndex {
+fn parseBlockLabel(arena: Allocator, it: *TokenIterator, tree: *Tree) ?TokenIndex {
const identifier = eatToken(it, .Identifier) orelse return null;
if (eatToken(it, .Colon) != null) return identifier;
putBackToken(it, identifier);
@@ -1621,7 +1621,7 @@ fn parseBlockLabel(arena: *Allocator, it: *TokenIterator, tree: *Tree) ?TokenInd
}
/// FieldInit <- DOT IDENTIFIER EQUAL Expr
-fn parseFieldInit(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
+fn parseFieldInit(arena: Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
const period_token = eatToken(it, .Period) orelse return null;
const name_token = try expectToken(it, tree, .Identifier);
const eq_token = eatToken(it, .Equal) orelse {
@@ -1645,7 +1645,7 @@ fn parseFieldInit(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
}
/// WhileContinueExpr <- COLON LPAREN AssignExpr RPAREN
-fn parseWhileContinueExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
+fn parseWhileContinueExpr(arena: Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
_ = eatToken(it, .Colon) orelse return null;
_ = try expectToken(it, tree, .LParen);
const node = try expectNode(arena, it, tree, parseAssignExpr, AstError{
@@ -1656,7 +1656,7 @@ fn parseWhileContinueExpr(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?
}
/// LinkSection <- KEYWORD_linksection LPAREN Expr RPAREN
-fn parseLinkSection(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
+fn parseLinkSection(arena: Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
_ = eatToken(it, .Keyword_linksection) orelse return null;
_ = try expectToken(it, tree, .LParen);
const expr_node = try expectNode(arena, it, tree, parseExpr, AstError{
@@ -1671,7 +1671,7 @@ fn parseLinkSection(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node
/// / KEYWORD_stdcallcc
/// / KEYWORD_extern
/// / KEYWORD_async (LARROW TypeExpr RARROW)?
-fn parseFnCC(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?FnCC {
+fn parseFnCC(arena: Allocator, it: *TokenIterator, tree: *Tree) !?FnCC {
if (eatToken(it, .Keyword_nakedcc)) |token| return FnCC{ .CC = token };
if (eatToken(it, .Keyword_stdcallcc)) |token| return FnCC{ .CC = token };
if (eatToken(it, .Keyword_extern)) |token| return FnCC{ .Extern = token };
@@ -1703,7 +1703,7 @@ const FnCC = union(enum) {
};
/// ParamDecl <- (KEYWORD_noalias / KEYWORD_comptime)? (IDENTIFIER COLON)? ParamType
-fn parseParamDecl(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
+fn parseParamDecl(arena: Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
const doc_comments = try parseDocComment(arena, it, tree);
const noalias_token = eatToken(it, .Keyword_noalias);
const comptime_token = if (noalias_token == null) eatToken(it, .Keyword_comptime) else null;
@@ -1748,7 +1748,7 @@ fn parseParamDecl(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
/// <- KEYWORD_var
/// / DOT3
/// / TypeExpr
-fn parseParamType(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?ParamType {
+fn parseParamType(arena: Allocator, it: *TokenIterator, tree: *Tree) !?ParamType {
if (try parseVarType(arena, it, tree)) |node| return ParamType{ .VarType = node };
if (eatToken(it, .Ellipsis3)) |token| return ParamType{ .VarArgs = token };
if (try parseTypeExpr(arena, it, tree)) |node| return ParamType{ .TypeExpr = node };
@@ -1763,7 +1763,7 @@ const ParamType = union(enum) {
};
/// IfPrefix <- KEYWORD_if LPAREN Expr RPAREN PtrPayload?
-fn parseIfPrefix(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
+fn parseIfPrefix(arena: Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
const if_token = eatToken(it, .Keyword_if) orelse return null;
_ = try expectToken(it, tree, .LParen);
const condition = try expectNode(arena, it, tree, parseExpr, AstError{
@@ -1785,7 +1785,7 @@ fn parseIfPrefix(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
}
/// WhilePrefix <- KEYWORD_while LPAREN Expr RPAREN PtrPayload? WhileContinueExpr?
-fn parseWhilePrefix(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
+fn parseWhilePrefix(arena: Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
const while_token = eatToken(it, .Keyword_while) orelse return null;
_ = try expectToken(it, tree, .LParen);
@@ -1813,7 +1813,7 @@ fn parseWhilePrefix(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node
}
/// ForPrefix <- KEYWORD_for LPAREN Expr RPAREN PtrIndexPayload
-fn parseForPrefix(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
+fn parseForPrefix(arena: Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
const for_token = eatToken(it, .Keyword_for) orelse return null;
_ = try expectToken(it, tree, .LParen);
@@ -1841,7 +1841,7 @@ fn parseForPrefix(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
}
/// Payload <- PIPE IDENTIFIER PIPE
-fn parsePayload(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
+fn parsePayload(arena: Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
const lpipe = eatToken(it, .Pipe) orelse return null;
const identifier = try expectNode(arena, it, tree, parseIdentifier, AstError{
.ExpectedIdentifier = AstError.ExpectedIdentifier{ .token = it.index },
@@ -1859,7 +1859,7 @@ fn parsePayload(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
}
/// PtrPayload <- PIPE ASTERISK? IDENTIFIER PIPE
-fn parsePtrPayload(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
+fn parsePtrPayload(arena: Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
const lpipe = eatToken(it, .Pipe) orelse return null;
const asterisk = eatToken(it, .Asterisk);
const identifier = try expectNode(arena, it, tree, parseIdentifier, AstError{
@@ -1879,7 +1879,7 @@ fn parsePtrPayload(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
}
/// PtrIndexPayload <- PIPE ASTERISK? IDENTIFIER (COMMA IDENTIFIER)? PIPE
-fn parsePtrIndexPayload(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
+fn parsePtrIndexPayload(arena: Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
const lpipe = eatToken(it, .Pipe) orelse return null;
const asterisk = eatToken(it, .Asterisk);
const identifier = try expectNode(arena, it, tree, parseIdentifier, AstError{
@@ -1908,7 +1908,7 @@ fn parsePtrIndexPayload(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*N
}
/// SwitchProng <- SwitchCase EQUALRARROW PtrPayload? AssignExpr
-fn parseSwitchProng(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
+fn parseSwitchProng(arena: Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
const node = (try parseSwitchCase(arena, it, tree)) orelse return null;
const arrow = try expectToken(it, tree, .EqualAngleBracketRight);
const payload = try parsePtrPayload(arena, it, tree);
@@ -1927,7 +1927,7 @@ fn parseSwitchProng(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node
/// SwitchCase
/// <- SwitchItem (COMMA SwitchItem)* COMMA?
/// / KEYWORD_else
-fn parseSwitchCase(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
+fn parseSwitchCase(arena: Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
var list = Node.SwitchCase.ItemList.init(arena);
if (try parseSwitchItem(arena, it, tree)) |first_item| {
@@ -1957,7 +1957,7 @@ fn parseSwitchCase(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
}
/// SwitchItem <- Expr (DOT3 Expr)?
-fn parseSwitchItem(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
+fn parseSwitchItem(arena: Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
const expr = (try parseExpr(arena, it, tree)) orelse return null;
if (eatToken(it, .Ellipsis3)) |token| {
const range_end = try expectNode(arena, it, tree, parseExpr, AstError{
@@ -1992,7 +1992,7 @@ fn parseSwitchItem(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
/// / PLUSPERCENTEQUAL
/// / MINUSPERCENTEQUAL
/// / EQUAL
-fn parseAssignOp(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
+fn parseAssignOp(arena: Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
const Op = Node.InfixOp.Op;
const token = nextToken(it);
@@ -2035,7 +2035,7 @@ fn parseAssignOp(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
/// / RARROW
/// / LARROWEQUAL
/// / RARROWEQUAL
-fn parseCompareOp(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
+fn parseCompareOp(arena: Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
const ops = Node.InfixOp.Op;
const token = nextToken(it);
@@ -2061,7 +2061,7 @@ fn parseCompareOp(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
/// / PIPE
/// / KEYWORD_orelse
/// / KEYWORD_catch Payload?
-fn parseBitwiseOp(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
+fn parseBitwiseOp(arena: Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
const ops = Node.InfixOp.Op;
const token = nextToken(it);
@@ -2083,7 +2083,7 @@ fn parseBitwiseOp(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
/// BitShiftOp
/// <- LARROW2
/// / RARROW2
-fn parseBitShiftOp(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
+fn parseBitShiftOp(arena: Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
const ops = Node.InfixOp.Op;
const token = nextToken(it);
@@ -2105,7 +2105,7 @@ fn parseBitShiftOp(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
/// / PLUS2
/// / PLUSPERCENT
/// / MINUSPERCENT
-fn parseAdditionOp(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
+fn parseAdditionOp(arena: Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
const ops = Node.InfixOp.Op;
const token = nextToken(it);
@@ -2131,7 +2131,7 @@ fn parseAdditionOp(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
/// / PERCENT
/// / ASTERISK2
/// / ASTERISKPERCENT
-fn parseMultiplyOp(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
+fn parseMultiplyOp(arena: Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
const ops = Node.InfixOp.Op;
const token = nextToken(it);
@@ -2159,7 +2159,7 @@ fn parseMultiplyOp(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
/// / AMPERSAND
/// / KEYWORD_try
/// / KEYWORD_await
-fn parsePrefixOp(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
+fn parsePrefixOp(arena: Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
const ops = Node.PrefixOp.Op;
const token = nextToken(it);
@@ -2199,7 +2199,7 @@ fn parsePrefixOp(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
/// / KEYWORD_promise MINUSRARROW
/// / ArrayTypeStart (ByteAlign / KEYWORD_const / KEYWORD_volatile / KEYWORD_allowzero)*
/// / PtrTypeStart (KEYWORD_align LPAREN Expr (COLON INTEGER COLON INTEGER)? RPAREN / KEYWORD_const / KEYWORD_volatile / KEYWORD_allowzero)*
-fn parsePrefixTypeOp(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
+fn parsePrefixTypeOp(arena: Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
if (eatToken(it, .QuestionMark)) |token| {
const node = try arena.create(Node.PrefixOp);
node.* = Node.PrefixOp{
@@ -2355,7 +2355,7 @@ fn parsePrefixTypeOp(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node
/// / DOT IDENTIFIER
/// / DOTASTERISK
/// / DOTQUESTIONMARK
-fn parseSuffixOp(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
+fn parseSuffixOp(arena: Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
const Op = Node.SuffixOp.Op;
const OpAndToken = struct {
op: Node.SuffixOp.Op,
@@ -2427,7 +2427,7 @@ fn parseSuffixOp(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
}
/// AsyncPrefix <- KEYWORD_async (LARROW PrefixExpr RARROW)?
-fn parseAsyncPrefix(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
+fn parseAsyncPrefix(arena: Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
const async_token = eatToken(it, .Keyword_async) orelse return null;
var rangle_bracket: ?TokenIndex = null;
const expr_node = if (eatToken(it, .AngleBracketLeft)) |_| blk: {
@@ -2450,7 +2450,7 @@ fn parseAsyncPrefix(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node
/// FnCallArguments <- LPAREN ExprList RPAREN
/// ExprList <- (Expr COMMA)* Expr?
-fn parseFnCallArguments(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?AnnotatedParamList {
+fn parseFnCallArguments(arena: Allocator, it: *TokenIterator, tree: *Tree) !?AnnotatedParamList {
if (eatToken(it, .LParen) == null) return null;
const list = try ListParseFn(Node.FnProto.ParamList, parseExpr)(arena, it, tree);
const rparen = try expectToken(it, tree, .RParen);
@@ -2463,7 +2463,7 @@ const AnnotatedParamList = struct {
};
/// ArrayTypeStart <- LBRACKET Expr? RBRACKET
-fn parseArrayTypeStart(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
+fn parseArrayTypeStart(arena: Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
const lbracket = eatToken(it, .LBracket) orelse return null;
const expr = try parseExpr(arena, it, tree);
const rbracket = try expectToken(it, tree, .RBracket);
@@ -2495,7 +2495,7 @@ fn parseArrayTypeStart(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*No
/// / ASTERISK2
/// / PTRUNKNOWN
/// / PTRC
-fn parsePtrTypeStart(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
+fn parsePtrTypeStart(arena: Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
const token = eatAnnotatedToken(it, .Asterisk) orelse
eatAnnotatedToken(it, .AsteriskAsterisk) orelse
eatAnnotatedToken(it, .BracketStarBracket) orelse
@@ -2540,7 +2540,7 @@ fn parsePtrTypeStart(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node
}
/// ContainerDeclAuto <- ContainerDeclType LBRACE ContainerMembers RBRACE
-fn parseContainerDeclAuto(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
+fn parseContainerDeclAuto(arena: Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
const node = (try parseContainerDeclType(arena, it, tree)) orelse return null;
const lbrace = try expectToken(it, tree, .LBrace);
const members = try parseContainerMembers(arena, it, tree);
@@ -2558,7 +2558,7 @@ fn parseContainerDeclAuto(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?
/// <- KEYWORD_struct
/// / KEYWORD_enum (LPAREN Expr RPAREN)?
/// / KEYWORD_union (LPAREN (KEYWORD_enum (LPAREN Expr RPAREN)? / Expr) RPAREN)?
-fn parseContainerDeclType(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
+fn parseContainerDeclType(arena: Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
const kind_token = nextToken(it);
const init_arg_expr = switch (kind_token.ptr.id) {
@@ -2615,7 +2615,7 @@ fn parseContainerDeclType(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?
}
/// ByteAlign <- KEYWORD_align LPAREN Expr RPAREN
-fn parseByteAlign(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
+fn parseByteAlign(arena: Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
_ = eatToken(it, .Keyword_align) orelse return null;
_ = try expectToken(it, tree, .LParen);
const expr = try expectNode(arena, it, tree, parseExpr, AstError{
@@ -2627,39 +2627,39 @@ fn parseByteAlign(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
/// IdentifierList <- (IDENTIFIER COMMA)* IDENTIFIER?
/// Only ErrorSetDecl parses an IdentifierList
-fn parseErrorTagList(arena: *Allocator, it: *TokenIterator, tree: *Tree) !Node.ErrorSetDecl.DeclList {
+fn parseErrorTagList(arena: Allocator, it: *TokenIterator, tree: *Tree) !Node.ErrorSetDecl.DeclList {
return try ListParseFn(Node.ErrorSetDecl.DeclList, parseErrorTag)(arena, it, tree);
}
/// SwitchProngList <- (SwitchProng COMMA)* SwitchProng?
-fn parseSwitchProngList(arena: *Allocator, it: *TokenIterator, tree: *Tree) !Node.Switch.CaseList {
+fn parseSwitchProngList(arena: Allocator, it: *TokenIterator, tree: *Tree) !Node.Switch.CaseList {
return try ListParseFn(Node.Switch.CaseList, parseSwitchProng)(arena, it, tree);
}
/// AsmOutputList <- (AsmOutputItem COMMA)* AsmOutputItem?
-fn parseAsmOutputList(arena: *Allocator, it: *TokenIterator, tree: *Tree) Error!Node.Asm.OutputList {
+fn parseAsmOutputList(arena: Allocator, it: *TokenIterator, tree: *Tree) Error!Node.Asm.OutputList {
return try ListParseFn(Node.Asm.OutputList, parseAsmOutputItem)(arena, it, tree);
}
/// AsmInputList <- (AsmInputItem COMMA)* AsmInputItem?
-fn parseAsmInputList(arena: *Allocator, it: *TokenIterator, tree: *Tree) Error!Node.Asm.InputList {
+fn parseAsmInputList(arena: Allocator, it: *TokenIterator, tree: *Tree) Error!Node.Asm.InputList {
return try ListParseFn(Node.Asm.InputList, parseAsmInputItem)(arena, it, tree);
}
/// ParamDeclList <- (ParamDecl COMMA)* ParamDecl?
-fn parseParamDeclList(arena: *Allocator, it: *TokenIterator, tree: *Tree) !Node.FnProto.ParamList {
+fn parseParamDeclList(arena: Allocator, it: *TokenIterator, tree: *Tree) !Node.FnProto.ParamList {
return try ListParseFn(Node.FnProto.ParamList, parseParamDecl)(arena, it, tree);
}
fn ParseFn(comptime T: type) type {
- return fn (*Allocator, *TokenIterator, *Tree) Error!T;
+ return fn (Allocator, *TokenIterator, *Tree) Error!T;
}
-const NodeParseFn = fn (*Allocator, *TokenIterator, *Tree) Error!?*Node;
+const NodeParseFn = fn (Allocator, *TokenIterator, *Tree) Error!?*Node;
fn ListParseFn(comptime L: type, comptime nodeParseFn: var) ParseFn(L) {
return struct {
- pub fn parse(arena: *Allocator, it: *TokenIterator, tree: *Tree) !L {
+ pub fn parse(arena: Allocator, it: *TokenIterator, tree: *Tree) !L {
var list = L.init(arena);
while (try nodeParseFn(arena, it, tree)) |node| {
try list.push(node);
@@ -2672,7 +2672,7 @@ fn ListParseFn(comptime L: type, comptime nodeParseFn: var) ParseFn(L) {
fn SimpleBinOpParseFn(comptime token: Token.Id, comptime op: Node.InfixOp.Op) NodeParseFn {
return struct {
- pub fn parse(arena: *Allocator, it: *TokenIterator, tree: *Tree) Error!?*Node {
+ pub fn parse(arena: Allocator, it: *TokenIterator, tree: *Tree) Error!?*Node {
const op_token = eatToken(it, token) orelse return null;
const node = try arena.create(Node.InfixOp);
node.* = Node.InfixOp{
@@ -2689,7 +2689,7 @@ fn SimpleBinOpParseFn(comptime token: Token.Id, comptime op: Node.InfixOp.Op) No
// Helper parsers not included in the grammar
-fn parseBuiltinCall(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
+fn parseBuiltinCall(arena: Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
const token = eatToken(it, .Builtin) orelse return null;
const params = (try parseFnCallArguments(arena, it, tree)) orelse {
try tree.errors.push(AstError{
@@ -2707,7 +2707,7 @@ fn parseBuiltinCall(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node
return &node.base;
}
-fn parseErrorTag(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
+fn parseErrorTag(arena: Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
const doc_comments = try parseDocComment(arena, it, tree); // no need to rewind on failure
const token = eatToken(it, .Identifier) orelse return null;
@@ -2720,7 +2720,7 @@ fn parseErrorTag(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
return &node.base;
}
-fn parseIdentifier(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
+fn parseIdentifier(arena: Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
const token = eatToken(it, .Identifier) orelse return null;
const node = try arena.create(Node.Identifier);
node.* = Node.Identifier{
@@ -2730,7 +2730,7 @@ fn parseIdentifier(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
return &node.base;
}
-fn parseVarType(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
+fn parseVarType(arena: Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
const token = eatToken(it, .Keyword_var) orelse return null;
const node = try arena.create(Node.VarType);
node.* = Node.VarType{
@@ -2740,7 +2740,7 @@ fn parseVarType(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
return &node.base;
}
-fn createLiteral(arena: *Allocator, comptime T: type, token: TokenIndex) !*Node {
+fn createLiteral(arena: Allocator, comptime T: type, token: TokenIndex) !*Node {
const result = try arena.create(T);
result.* = T{
.base = Node{ .id = Node.typeToId(T) },
@@ -2750,7 +2750,7 @@ fn createLiteral(arena: *Allocator, comptime T: type, token: TokenIndex) !*Node
}
// string literal or multiline string literal
-fn parseStringLiteral(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
+fn parseStringLiteral(arena: Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
if (eatToken(it, .StringLiteral)) |token| {
const node = try arena.create(Node.StringLiteral);
node.* = Node.StringLiteral{
@@ -2776,7 +2776,7 @@ fn parseStringLiteral(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Nod
return null;
}
-fn parseIntegerLiteral(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
+fn parseIntegerLiteral(arena: Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
const token = eatToken(it, .IntegerLiteral) orelse return null;
const node = try arena.create(Node.IntegerLiteral);
node.* = Node.IntegerLiteral{
@@ -2786,7 +2786,7 @@ fn parseIntegerLiteral(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*No
return &node.base;
}
-fn parseFloatLiteral(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
+fn parseFloatLiteral(arena: Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
const token = eatToken(it, .FloatLiteral) orelse return null;
const node = try arena.create(Node.FloatLiteral);
node.* = Node.FloatLiteral{
@@ -2796,7 +2796,7 @@ fn parseFloatLiteral(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node
return &node.base;
}
-fn parseTry(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
+fn parseTry(arena: Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
const token = eatToken(it, .Keyword_try) orelse return null;
const node = try arena.create(Node.PrefixOp);
node.* = Node.PrefixOp{
@@ -2808,7 +2808,7 @@ fn parseTry(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
return &node.base;
}
-fn parseUse(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
+fn parseUse(arena: Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
const token = eatToken(it, .Keyword_use) orelse return null;
const node = try arena.create(Node.Use);
node.* = Node.Use{
@@ -2823,7 +2823,7 @@ fn parseUse(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
}
/// IfPrefix Body (KEYWORD_else Payload? Body)?
-fn parseIf(arena: *Allocator, it: *TokenIterator, tree: *Tree, bodyParseFn: NodeParseFn) !?*Node {
+fn parseIf(arena: Allocator, it: *TokenIterator, tree: *Tree, bodyParseFn: NodeParseFn) !?*Node {
const node = (try parseIfPrefix(arena, it, tree)) orelse return null;
const if_prefix = node.cast(Node.If).?;
@@ -2849,7 +2849,7 @@ fn parseIf(arena: *Allocator, it: *TokenIterator, tree: *Tree, bodyParseFn: Node
}
/// Eat a multiline doc comment
-fn parseDocComment(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node.DocComment {
+fn parseDocComment(arena: Allocator, it: *TokenIterator, tree: *Tree) !?*Node.DocComment {
var lines = Node.DocComment.LineList.init(arena);
while (eatToken(it, .DocComment)) |line| {
try lines.push(line);
@@ -2866,7 +2866,7 @@ fn parseDocComment(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node.D
}
/// Eat a single-line doc comment on the same line as another node
-fn parseAppendedDocComment(arena: *Allocator, it: *TokenIterator, tree: *Tree, after_token: TokenIndex) !?*Node.DocComment {
+fn parseAppendedDocComment(arena: Allocator, it: *TokenIterator, tree: *Tree, after_token: TokenIndex) !?*Node.DocComment {
const comment_token = eatToken(it, .DocComment) orelse return null;
if (tree.tokensOnSameLine(after_token, comment_token)) {
const node = try arena.create(Node.DocComment);
@@ -2883,7 +2883,7 @@ fn parseAppendedDocComment(arena: *Allocator, it: *TokenIterator, tree: *Tree, a
/// Op* Child
fn parsePrefixOpExpr(
- arena: *Allocator,
+ arena: Allocator,
it: *TokenIterator,
tree: *Tree,
opParseFn: NodeParseFn,
@@ -2943,7 +2943,7 @@ fn parsePrefixOpExpr(
/// Child (Op Child)*
/// Child (Op Child)?
fn parseBinOpExpr(
- arena: *Allocator,
+ arena: Allocator,
it: *TokenIterator,
tree: *Tree,
opParseFn: NodeParseFn,
@@ -2975,7 +2975,7 @@ fn parseBinOpExpr(
return res;
}
-fn createInfixOp(arena: *Allocator, index: TokenIndex, op: Node.InfixOp.Op) !*Node {
+fn createInfixOp(arena: Allocator, index: TokenIndex, op: Node.InfixOp.Op) !*Node {
const node = try arena.create(Node.InfixOp);
node.* = Node.InfixOp{
.base = Node{ .id = .InfixOp },
@@ -3035,7 +3035,7 @@ const AnnotatedToken = struct {
};
fn expectNode(
- arena: *Allocator,
+ arena: Allocator,
it: *TokenIterator,
tree: *Tree,
parseFn: NodeParseFn,
diff --git a/std/zig/parse_string_literal.zig b/std/zig/parse_string_literal.zig
index acae0b64c79c..fbfc993ce09c 100644
--- a/std/zig/parse_string_literal.zig
+++ b/std/zig/parse_string_literal.zig
@@ -15,7 +15,7 @@ pub const ParseStringLiteralError = error{
/// caller owns returned memory
pub fn parseStringLiteral(
- allocator: *std.mem.Allocator,
+ allocator: std.mem.Allocator,
bytes: []const u8,
bad_index: *usize, // populated if error.InvalidCharacter is returned
) ParseStringLiteralError![]u8 {
diff --git a/std/zig/parser_test.zig b/std/zig/parser_test.zig
index 9165c9bbe84e..7c8f2c57781c 100644
--- a/std/zig/parser_test.zig
+++ b/std/zig/parser_test.zig
@@ -86,7 +86,7 @@ test "zig fmt: doc comments on param decl" {
try testCanonical(
\\pub const Allocator = struct {
\\ shrinkFn: fn (
- \\ self: *Allocator,
+ \\ self: Allocator,
\\ /// Guaranteed to be the same as what was returned from most recent call to
\\ /// `allocFn`, `reallocFn`, or `shrinkFn`.
\\ old_mem: []u8,
@@ -159,7 +159,7 @@ test "zig fmt: spaces around slice operator" {
test "zig fmt: async call in if condition" {
try testCanonical(
\\comptime {
- \\ if (async b()) {
+ \\ if (async<&a> b()) {
\\ a();
\\ }
\\}
@@ -2065,7 +2065,7 @@ test "zig fmt: coroutines" {
\\}
\\
\\test "coroutine suspend, resume, cancel" {
- \\ const p: promise = try async testAsyncSeq();
+ \\ const p: promise = try async<&std.debug.global_allocator> testAsyncSeq();
\\ resume p;
\\ cancel p;
\\}
@@ -2178,9 +2178,9 @@ const maxInt = std.math.maxInt;
var fixed_buffer_mem: [100 * 1024]u8 = undefined;
-fn testParse(source: []const u8, allocator: *mem.Allocator, anything_changed: *bool) ![]u8 {
+fn testParse(source: []const u8, allocator: mem.Allocator, anything_changed: *bool) ![]u8 {
var stderr_file = try io.getStdErr();
- var stderr = &stderr_file.outStream().stream;
+ var stderr = stderr_file.outStreamAdapter().outStream();
const tree = try std.zig.parse(allocator, source);
defer tree.deinit();
@@ -2215,7 +2215,7 @@ fn testParse(source: []const u8, allocator: *mem.Allocator, anything_changed: *b
errdefer buffer.deinit();
var buffer_out_stream = io.BufferOutStream.init(&buffer);
- anything_changed.* = try std.zig.render(allocator, &buffer_out_stream.stream, tree);
+ anything_changed.* = try std.zig.render(allocator, buffer_out_stream.outStream(), tree);
return buffer.toOwnedSlice();
}
@@ -2223,9 +2223,9 @@ fn testTransform(source: []const u8, expected_source: []const u8) !void {
const needed_alloc_count = x: {
// Try it once with unlimited memory, make sure it works
var fixed_allocator = std.heap.FixedBufferAllocator.init(fixed_buffer_mem[0..]);
- var failing_allocator = std.debug.FailingAllocator.init(&fixed_allocator.allocator, maxInt(usize));
+ var failing_allocator = std.debug.FailingAllocator.init(fixed_allocator.allocator(), maxInt(usize));
var anything_changed: bool = undefined;
- const result_source = try testParse(source, &failing_allocator.allocator, &anything_changed);
+ const result_source = try testParse(source, failing_allocator.allocator(), &anything_changed);
if (!mem.eql(u8, result_source, expected_source)) {
warn("\n====== expected this output: =========\n");
warn("{}", expected_source);
@@ -2240,16 +2240,16 @@ fn testTransform(source: []const u8, expected_source: []const u8) !void {
return error.TestFailed;
}
std.testing.expect(anything_changed == changes_expected);
- failing_allocator.allocator.free(result_source);
+ failing_allocator.allocator().free(result_source);
break :x failing_allocator.index;
};
var fail_index: usize = 0;
while (fail_index < needed_alloc_count) : (fail_index += 1) {
var fixed_allocator = std.heap.FixedBufferAllocator.init(fixed_buffer_mem[0..]);
- var failing_allocator = std.debug.FailingAllocator.init(&fixed_allocator.allocator, fail_index);
+ var failing_allocator = std.debug.FailingAllocator.init(fixed_allocator.allocator(), fail_index);
var anything_changed: bool = undefined;
- if (testParse(source, &failing_allocator.allocator, &anything_changed)) |_| {
+ if (testParse(source, failing_allocator.allocator(), &anything_changed)) |_| {
return error.NondeterministicMemoryUsage;
} else |err| switch (err) {
error.OutOfMemory => {
diff --git a/std/zig/render.zig b/std/zig/render.zig
index dc879ee49f58..cd8b31159b82 100644
--- a/std/zig/render.zig
+++ b/std/zig/render.zig
@@ -13,25 +13,22 @@ pub const Error = error{
};
/// Returns whether anything changed
-pub fn render(allocator: *mem.Allocator, stream: var, tree: *ast.Tree) (@typeOf(stream).Child.Error || Error)!bool {
- comptime assert(@typeId(@typeOf(stream)) == builtin.TypeId.Pointer);
-
+pub fn render(allocator: mem.Allocator, stream: var, tree: *ast.Tree) (@typeOf(stream).Error || Error)!bool {
var anything_changed: bool = false;
// make a passthrough stream that checks whether something changed
const MyStream = struct {
const MyStream = @This();
- const StreamError = @typeOf(stream).Child.Error;
+ const StreamError = @typeOf(stream).Error;
const Stream = std.io.OutStream(StreamError);
anything_changed_ptr: *bool,
child_stream: @typeOf(stream),
- stream: Stream,
source_index: usize,
source: []const u8,
- fn write(iface_stream: *Stream, bytes: []const u8) StreamError!void {
- const self = @fieldParentPtr(MyStream, "stream", iface_stream);
+ fn write(iface_stream: Stream, bytes: []const u8) StreamError!void {
+ const self = iface_stream.iface.implCast(MyStream);
if (!self.anything_changed_ptr.*) {
const end = self.source_index + bytes.len;
@@ -48,16 +45,22 @@ pub fn render(allocator: *mem.Allocator, stream: var, tree: *ast.Tree) (@typeOf(
try self.child_stream.write(bytes);
}
+
+ pub fn outStream(self: *MyStream) Stream {
+ return Stream{
+ .iface = Stream.Iface.init(self),
+ .writeFn = write,
+ };
+ }
};
var my_stream = MyStream{
- .stream = MyStream.Stream{ .writeFn = MyStream.write },
.child_stream = stream,
.anything_changed_ptr = &anything_changed,
.source_index = 0,
.source = tree.source,
};
- try renderRoot(allocator, &my_stream.stream, tree);
+ try renderRoot(allocator, my_stream.outStream(), tree);
if (!anything_changed and my_stream.source_index != my_stream.source.len) {
anything_changed = true;
@@ -67,10 +70,10 @@ pub fn render(allocator: *mem.Allocator, stream: var, tree: *ast.Tree) (@typeOf(
}
fn renderRoot(
- allocator: *mem.Allocator,
+ allocator: mem.Allocator,
stream: var,
tree: *ast.Tree,
-) (@typeOf(stream).Child.Error || Error)!void {
+) (@typeOf(stream).Error || Error)!void {
var tok_it = tree.tokens.iterator(0);
// render all the line comments at the beginning of the file
@@ -132,7 +135,7 @@ fn renderRoot(
}
}
-fn renderExtraNewline(tree: *ast.Tree, stream: var, start_col: *usize, node: *ast.Node) @typeOf(stream).Child.Error!void {
+fn renderExtraNewline(tree: *ast.Tree, stream: var, start_col: *usize, node: *ast.Node) @typeOf(stream).Error!void {
const first_token = node.firstToken();
var prev_token = first_token;
while (tree.tokens.at(prev_token - 1).id == Token.Id.DocComment) {
@@ -146,7 +149,7 @@ fn renderExtraNewline(tree: *ast.Tree, stream: var, start_col: *usize, node: *as
}
}
-fn renderTopLevelDecl(allocator: *mem.Allocator, stream: var, tree: *ast.Tree, indent: usize, start_col: *usize, decl: *ast.Node) (@typeOf(stream).Child.Error || Error)!void {
+fn renderTopLevelDecl(allocator: mem.Allocator, stream: var, tree: *ast.Tree, indent: usize, start_col: *usize, decl: *ast.Node) (@typeOf(stream).Error || Error)!void {
switch (decl.id) {
ast.Node.Id.FnProto => {
const fn_proto = @fieldParentPtr(ast.Node.FnProto, "base", decl);
@@ -226,14 +229,14 @@ fn renderTopLevelDecl(allocator: *mem.Allocator, stream: var, tree: *ast.Tree, i
}
fn renderExpression(
- allocator: *mem.Allocator,
+ allocator: mem.Allocator,
stream: var,
tree: *ast.Tree,
indent: usize,
start_col: *usize,
base: *ast.Node,
space: Space,
-) (@typeOf(stream).Child.Error || Error)!void {
+) (@typeOf(stream).Error || Error)!void {
switch (base.id) {
ast.Node.Id.Identifier => {
const identifier = @fieldParentPtr(ast.Node.Identifier, "base", base);
@@ -577,7 +580,7 @@ fn renderExpression(
while (it.next()) |field_init| {
var find_stream = FindByteOutStream.init('\n');
var dummy_col: usize = 0;
- try renderExpression(allocator, &find_stream.stream, tree, 0, &dummy_col, field_init.*, Space.None);
+ try renderExpression(allocator, find_stream.outStream(), tree, 0, &dummy_col, field_init.*, Space.None);
if (find_stream.byte_found) break :blk false;
}
break :blk true;
@@ -709,7 +712,7 @@ fn renderExpression(
// Null stream for counting the printed length of each expression
var null_stream = std.io.NullOutStream.init();
- var counting_stream = std.io.CountingOutStream(std.io.NullOutStream.Error).init(&null_stream.stream);
+ var counting_stream = std.io.CountingOutStream(std.io.NullOutStream.Error).init(null_stream.outStream());
var it = exprs.iterator(0);
var i: usize = 0;
@@ -717,7 +720,7 @@ fn renderExpression(
while (it.next()) |expr| : (i += 1) {
counting_stream.bytes_written = 0;
var dummy_col: usize = 0;
- try renderExpression(allocator, &counting_stream.stream, tree, 0, &dummy_col, expr.*, Space.None);
+ try renderExpression(allocator, counting_stream.outStream(), tree, 0, &dummy_col, expr.*, Space.None);
const width = @intCast(usize, counting_stream.bytes_written);
const col = i % row_size;
column_widths[col] = std.math.max(column_widths[col], width);
@@ -1707,13 +1710,13 @@ fn renderExpression(
}
fn renderVarDecl(
- allocator: *mem.Allocator,
+ allocator: mem.Allocator,
stream: var,
tree: *ast.Tree,
indent: usize,
start_col: *usize,
var_decl: *ast.Node.VarDecl,
-) (@typeOf(stream).Child.Error || Error)!void {
+) (@typeOf(stream).Error || Error)!void {
if (var_decl.visib_token) |visib_token| {
try renderToken(tree, stream, visib_token, indent, start_col, Space.Space); // pub
}
@@ -1779,14 +1782,14 @@ fn renderVarDecl(
}
fn renderParamDecl(
- allocator: *mem.Allocator,
+ allocator: mem.Allocator,
stream: var,
tree: *ast.Tree,
indent: usize,
start_col: *usize,
base: *ast.Node,
space: Space,
-) (@typeOf(stream).Child.Error || Error)!void {
+) (@typeOf(stream).Error || Error)!void {
const param_decl = @fieldParentPtr(ast.Node.ParamDecl, "base", base);
try renderDocComments(tree, stream, param_decl, indent, start_col);
@@ -1809,13 +1812,13 @@ fn renderParamDecl(
}
fn renderStatement(
- allocator: *mem.Allocator,
+ allocator: mem.Allocator,
stream: var,
tree: *ast.Tree,
indent: usize,
start_col: *usize,
base: *ast.Node,
-) (@typeOf(stream).Child.Error || Error)!void {
+) (@typeOf(stream).Error || Error)!void {
switch (base.id) {
ast.Node.Id.VarDecl => {
const var_decl = @fieldParentPtr(ast.Node.VarDecl, "base", base);
@@ -1854,7 +1857,7 @@ fn renderTokenOffset(
start_col: *usize,
space: Space,
token_skip_bytes: usize,
-) (@typeOf(stream).Child.Error || Error)!void {
+) (@typeOf(stream).Error || Error)!void {
if (space == Space.BlockStart) {
if (start_col.* < indent + indent_delta)
return renderToken(tree, stream, token_index, indent, start_col, Space.Space);
@@ -2027,7 +2030,7 @@ fn renderToken(
indent: usize,
start_col: *usize,
space: Space,
-) (@typeOf(stream).Child.Error || Error)!void {
+) (@typeOf(stream).Error || Error)!void {
return renderTokenOffset(tree, stream, token_index, indent, start_col, space, 0);
}
@@ -2037,7 +2040,7 @@ fn renderDocComments(
node: var,
indent: usize,
start_col: *usize,
-) (@typeOf(stream).Child.Error || Error)!void {
+) (@typeOf(stream).Error || Error)!void {
const comment = node.doc_comments orelse return;
var it = comment.lines.iterator(0);
const first_token = node.firstToken();
@@ -2080,20 +2083,18 @@ const FindByteOutStream = struct {
pub const Error = error{};
pub const Stream = std.io.OutStream(Error);
- pub stream: Stream,
pub byte_found: bool,
byte: u8,
pub fn init(byte: u8) Self {
return Self{
- .stream = Stream{ .writeFn = writeFn },
.byte = byte,
.byte_found = false,
};
}
- fn writeFn(out_stream: *Stream, bytes: []const u8) Error!void {
- const self = @fieldParentPtr(Self, "stream", out_stream);
+ fn writeFn(out_stream: Stream, bytes: []const u8) Error!void {
+ const self = out_stream.iface.implCast(Self);
if (self.byte_found) return;
self.byte_found = blk: {
for (bytes) |b|
@@ -2101,4 +2102,11 @@ const FindByteOutStream = struct {
break :blk false;
};
}
+
+ pub fn outStream(self: *Self) Stream {
+ return Stream{
+ .iface = Stream.Iface.init(self),
+ .writeFn = writeFn,
+ };
+ }
};
diff --git a/test/cli.zig b/test/cli.zig
index 1e7f1d0a735e..1b644d1dcf6e 100644
--- a/test/cli.zig
+++ b/test/cli.zig
@@ -3,13 +3,13 @@ const builtin = @import("builtin");
const os = std.os;
const testing = std.testing;
-var a: *std.mem.Allocator = undefined;
+var a: std.mem.Allocator = undefined;
pub fn main() !void {
var direct_allocator = std.heap.DirectAllocator.init();
defer direct_allocator.deinit();
- var arena = std.heap.ArenaAllocator.init(&direct_allocator.allocator);
+ var arena = std.heap.ArenaAllocator.init(direct_allocator.allocator());
defer arena.deinit();
var arg_it = os.args();
@@ -17,7 +17,7 @@ pub fn main() !void {
// skip my own exe name
_ = arg_it.skip();
- a = &arena.allocator;
+ a = arena.allocator();
const zig_exe_rel = try (arg_it.next(a) orelse {
std.debug.warn("Expected first argument to be path to zig compiler\n");
diff --git a/test/compare_output.zig b/test/compare_output.zig
index 72f4e223aaba..e7dcdc97fedf 100644
--- a/test/compare_output.zig
+++ b/test/compare_output.zig
@@ -19,7 +19,7 @@ pub fn addCases(cases: *tests.CompareOutputContext) void {
\\
\\pub fn main() void {
\\ privateFunction();
- \\ const stdout = &(getStdOut() catch unreachable).outStream().stream;
+ \\ const stdout = (getStdOut() catch unreachable).outStreamAdapter().outStream();
\\ stdout.print("OK 2\n") catch unreachable;
\\}
\\
@@ -34,7 +34,7 @@ pub fn addCases(cases: *tests.CompareOutputContext) void {
\\// purposefully conflicting function with main.zig
\\// but it's private so it should be OK
\\fn privateFunction() void {
- \\ const stdout = &(getStdOut() catch unreachable).outStream().stream;
+ \\ const stdout = (getStdOut() catch unreachable).outStreamAdapter().outStream();
\\ stdout.print("OK 1\n") catch unreachable;
\\}
\\
@@ -60,7 +60,7 @@ pub fn addCases(cases: *tests.CompareOutputContext) void {
tc.addSourceFile("foo.zig",
\\use @import("std").io;
\\pub fn foo_function() void {
- \\ const stdout = &(getStdOut() catch unreachable).outStream().stream;
+ \\ const stdout = (getStdOut() catch unreachable).outStreamAdapter().outStream();
\\ stdout.print("OK\n") catch unreachable;
\\}
);
@@ -71,7 +71,7 @@ pub fn addCases(cases: *tests.CompareOutputContext) void {
\\
\\pub fn bar_function() void {
\\ if (foo_function()) {
- \\ const stdout = &(getStdOut() catch unreachable).outStream().stream;
+ \\ const stdout = (getStdOut() catch unreachable).outStreamAdapter().outStream();
\\ stdout.print("OK\n") catch unreachable;
\\ }
\\}
@@ -103,7 +103,7 @@ pub fn addCases(cases: *tests.CompareOutputContext) void {
\\pub const a_text = "OK\n";
\\
\\pub fn ok() void {
- \\ const stdout = &(io.getStdOut() catch unreachable).outStream().stream;
+ \\ const stdout = (io.getStdOut() catch unreachable).outStreamAdapter().outStream();
\\ stdout.print(b_text) catch unreachable;
\\}
);
@@ -121,7 +121,7 @@ pub fn addCases(cases: *tests.CompareOutputContext) void {
\\const io = @import("std").io;
\\
\\pub fn main() void {
- \\ const stdout = &(io.getStdOut() catch unreachable).outStream().stream;
+ \\ const stdout = (io.getStdOut() catch unreachable).outStreamAdapter().outStream();
\\ stdout.print("Hello, world!\n{d4} {x3} {c}\n", u32(12), u16(0x12), u8('a')) catch unreachable;
\\}
, "Hello, world!\n0012 012 a\n");
@@ -264,7 +264,7 @@ pub fn addCases(cases: *tests.CompareOutputContext) void {
\\ var x_local : i32 = print_ok(x);
\\}
\\fn print_ok(val: @typeOf(x)) @typeOf(foo) {
- \\ const stdout = &(io.getStdOut() catch unreachable).outStream().stream;
+ \\ const stdout = (io.getStdOut() catch unreachable).outStreamAdapter().outStream();
\\ stdout.print("OK\n") catch unreachable;
\\ return 0;
\\}
@@ -346,7 +346,7 @@ pub fn addCases(cases: *tests.CompareOutputContext) void {
\\pub fn main() void {
\\ const bar = Bar {.field2 = 13,};
\\ const foo = Foo {.field1 = bar,};
- \\ const stdout = &(io.getStdOut() catch unreachable).outStream().stream;
+ \\ const stdout = (io.getStdOut() catch unreachable).outStreamAdapter().outStream();
\\ if (!foo.method()) {
\\ stdout.print("BAD\n") catch unreachable;
\\ }
@@ -360,7 +360,7 @@ pub fn addCases(cases: *tests.CompareOutputContext) void {
cases.add("defer with only fallthrough",
\\const io = @import("std").io;
\\pub fn main() void {
- \\ const stdout = &(io.getStdOut() catch unreachable).outStream().stream;
+ \\ const stdout = (io.getStdOut() catch unreachable).outStreamAdapter().outStream();
\\ stdout.print("before\n") catch unreachable;
\\ defer stdout.print("defer1\n") catch unreachable;
\\ defer stdout.print("defer2\n") catch unreachable;
@@ -373,7 +373,7 @@ pub fn addCases(cases: *tests.CompareOutputContext) void {
\\const io = @import("std").io;
\\const os = @import("std").os;
\\pub fn main() void {
- \\ const stdout = &(io.getStdOut() catch unreachable).outStream().stream;
+ \\ const stdout = (io.getStdOut() catch unreachable).outStreamAdapter().outStream();
\\ stdout.print("before\n") catch unreachable;
\\ defer stdout.print("defer1\n") catch unreachable;
\\ defer stdout.print("defer2\n") catch unreachable;
@@ -390,7 +390,7 @@ pub fn addCases(cases: *tests.CompareOutputContext) void {
\\ do_test() catch return;
\\}
\\fn do_test() !void {
- \\ const stdout = &(io.getStdOut() catch unreachable).outStream().stream;
+ \\ const stdout = (io.getStdOut() catch unreachable).outStreamAdapter().outStream();
\\ stdout.print("before\n") catch unreachable;
\\ defer stdout.print("defer1\n") catch unreachable;
\\ errdefer stdout.print("deferErr\n") catch unreachable;
@@ -409,7 +409,7 @@ pub fn addCases(cases: *tests.CompareOutputContext) void {
\\ do_test() catch return;
\\}
\\fn do_test() !void {
- \\ const stdout = &(io.getStdOut() catch unreachable).outStream().stream;
+ \\ const stdout = (io.getStdOut() catch unreachable).outStreamAdapter().outStream();
\\ stdout.print("before\n") catch unreachable;
\\ defer stdout.print("defer1\n") catch unreachable;
\\ errdefer stdout.print("deferErr\n") catch unreachable;
@@ -426,7 +426,7 @@ pub fn addCases(cases: *tests.CompareOutputContext) void {
\\const io = @import("std").io;
\\
\\pub fn main() void {
- \\ const stdout = &(io.getStdOut() catch unreachable).outStream().stream;
+ \\ const stdout = (io.getStdOut() catch unreachable).outStreamAdapter().outStream();
\\ stdout.print(foo_txt) catch unreachable;
\\}
, "1234\nabcd\n");
@@ -446,8 +446,8 @@ pub fn addCases(cases: *tests.CompareOutputContext) void {
\\pub fn main() !void {
\\ var args_it = os.args();
\\ var stdout_file = try io.getStdOut();
- \\ var stdout_adapter = stdout_file.outStream();
- \\ const stdout = &stdout_adapter.stream;
+ \\ var stdout_adapter = stdout_file.outStreamAdapter();
+ \\ const stdout = stdout_adapter.outStream();
\\ var index: usize = 0;
\\ _ = args_it.skip();
\\ while (args_it.next(allocator)) |arg_or_err| : (index += 1) {
@@ -487,8 +487,8 @@ pub fn addCases(cases: *tests.CompareOutputContext) void {
\\pub fn main() !void {
\\ var args_it = os.args();
\\ var stdout_file = try io.getStdOut();
- \\ var stdout_adapter = stdout_file.outStream();
- \\ const stdout = &stdout_adapter.stream;
+ \\ var stdout_adapter = stdout_file.outStreamAdapter();
+ \\ const stdout = stdout_adapter.outStream();
\\ var index: usize = 0;
\\ _ = args_it.skip();
\\ while (args_it.next(allocator)) |arg_or_err| : (index += 1) {
diff --git a/test/compile_errors.zig b/test/compile_errors.zig
index 932a550eab4e..2d6955172c4c 100644
--- a/test/compile_errors.zig
+++ b/test/compile_errors.zig
@@ -1563,8 +1563,8 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
\\
\\export fn entry() void {
\\ var buf: [500]u8 = undefined;
- \\ var a = &std.heap.FixedBufferAllocator.init(buf[0..]).allocator;
- \\ const p = (async foo()) catch unreachable;
+ \\ var a = std.heap.FixedBufferAllocator.init(buf[0..]).allocator();
+ \\ const p = (async<&a> foo()) catch unreachable;
\\ cancel p;
\\}
\\
@@ -1619,7 +1619,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
"returning error from void async function",
\\const std = @import("std",);
\\export fn entry() void {
- \\ const p = async amain() catch unreachable;
+ \\ const p = async<&std.debug.global_allocator> amain() catch unreachable;
\\}
\\async fn amain() void {
\\ return error.ShouldBeCompileError;
@@ -4042,9 +4042,9 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
"method call with first arg type wrong container",
\\pub const List = struct {
\\ len: usize,
- \\ allocator: *Allocator,
+ \\ allocator: Allocator,
\\
- \\ pub fn init(allocator: *Allocator) List {
+ \\ pub fn init(allocator: Allocator) List {
\\ return List {
\\ .len = 0,
\\ .allocator = allocator,
@@ -4061,11 +4061,11 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
\\};
\\
\\export fn foo() void {
- \\ var x = List.init(&global_allocator);
+ \\ var x = List.init(global_allocator);
\\ x.init();
\\}
,
- "tmp.zig:23:5: error: expected type '*Allocator', found '*List'",
+ "tmp.zig:23:5: error: expected type 'Allocator', found 'List'",
);
cases.add(
@@ -5122,7 +5122,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
\\
\\export fn entry() void {
\\ const a = MdNode.Header {
- \\ .text = MdText.init(&std.debug.global_allocator),
+ \\ .text = MdText.init(std.debug.global_allocator),
\\ .weight = HeaderWeight.H1,
\\ };
\\}
diff --git a/test/runtime_safety.zig b/test/runtime_safety.zig
index b10accd213f9..cf267838bc28 100644
--- a/test/runtime_safety.zig
+++ b/test/runtime_safety.zig
@@ -486,12 +486,12 @@ pub fn addCases(cases: *tests.CompareOutputContext) void {
\\pub fn main() void {
\\ const p = nonFailing();
\\ resume p;
- \\ const p2 = async printTrace(p) catch unreachable;
+ \\ const p2 = async<&std.debug.global_allocator> printTrace(p) catch unreachable;
\\ cancel p2;
\\}
\\
\\fn nonFailing() promise->anyerror!void {
- \\ return async failing() catch unreachable;
+ \\ return async<&std.debug.global_allocator> failing() catch unreachable;
\\}
\\
\\async fn failing() anyerror!void {
diff --git a/test/stage1/behavior/bugs/1851.zig b/test/stage1/behavior/bugs/1851.zig
index ff9ab419f878..b4067735e206 100644
--- a/test/stage1/behavior/bugs/1851.zig
+++ b/test/stage1/behavior/bugs/1851.zig
@@ -7,7 +7,7 @@ test "allocation and looping over 3-byte integer" {
expect(@alignOf(u24) == 4);
expect(@alignOf([1]u24) == 4);
var buffer: [100]u8 = undefined;
- const a = &std.heap.FixedBufferAllocator.init(&buffer).allocator;
+ const a = std.heap.FixedBufferAllocator.init(&buffer).allocator();
var x = a.alloc(u24, 2) catch unreachable;
expect(x.len == 2);
diff --git a/test/stage1/behavior/bugs/920.zig b/test/stage1/behavior/bugs/920.zig
index 10c002f6bad6..847b8c57bd50 100644
--- a/test/stage1/behavior/bugs/920.zig
+++ b/test/stage1/behavior/bugs/920.zig
@@ -9,10 +9,10 @@ const ZigTable = struct {
pdf: fn (f64) f64,
is_symmetric: bool,
- zero_case: fn (*Random, f64) f64,
+ zero_case: fn (Random, f64) f64,
};
-fn ZigTableGen(comptime is_symmetric: bool, comptime r: f64, comptime v: f64, comptime f: fn (f64) f64, comptime f_inv: fn (f64) f64, comptime zero_case: fn (*Random, f64) f64) ZigTable {
+fn ZigTableGen(comptime is_symmetric: bool, comptime r: f64, comptime v: f64, comptime f: fn (f64) f64, comptime f_inv: fn (f64) f64, comptime zero_case: fn (Random, f64) f64) ZigTable {
var tables: ZigTable = undefined;
tables.is_symmetric = is_symmetric;
@@ -45,7 +45,7 @@ fn norm_f(x: f64) f64 {
fn norm_f_inv(y: f64) f64 {
return math.sqrt(-2.0 * math.ln(y));
}
-fn norm_zero_case(random: *Random, u: f64) f64 {
+fn norm_zero_case(random: Random, u: f64) f64 {
return 0.0;
}
diff --git a/test/stage1/behavior/cancel.zig b/test/stage1/behavior/cancel.zig
index 7fadf7f23055..4524a42144f5 100644
--- a/test/stage1/behavior/cancel.zig
+++ b/test/stage1/behavior/cancel.zig
@@ -8,7 +8,7 @@ test "cancel forwards" {
var da = std.heap.DirectAllocator.init();
defer da.deinit();
- const p = async<&da.allocator> f1() catch unreachable;
+ const p = async<&da.allocator()> f1() catch unreachable;
cancel p;
std.testing.expect(defer_f1);
std.testing.expect(defer_f2);
@@ -45,7 +45,7 @@ test "cancel backwards" {
var da = std.heap.DirectAllocator.init();
defer da.deinit();
- const p = async<&da.allocator> b1() catch unreachable;
+ const p = async<&da.allocator()> b1() catch unreachable;
cancel p;
std.testing.expect(defer_b1);
std.testing.expect(defer_b2);
diff --git a/test/stage1/behavior/coroutine_await_struct.zig b/test/stage1/behavior/coroutine_await_struct.zig
index 29f77bf67c12..c56944588da7 100644
--- a/test/stage1/behavior/coroutine_await_struct.zig
+++ b/test/stage1/behavior/coroutine_await_struct.zig
@@ -14,7 +14,7 @@ test "coroutine await struct" {
defer da.deinit();
await_seq('a');
- const p = async<&da.allocator> await_amain() catch unreachable;
+ const p = async<&da.allocator()> await_amain() catch unreachable;
await_seq('f');
resume await_a_promise;
await_seq('i');
diff --git a/test/stage1/behavior/coroutines.zig b/test/stage1/behavior/coroutines.zig
index be977bbfcef4..f412ed67a3bd 100644
--- a/test/stage1/behavior/coroutines.zig
+++ b/test/stage1/behavior/coroutines.zig
@@ -8,7 +8,7 @@ test "create a coroutine and cancel it" {
var da = std.heap.DirectAllocator.init();
defer da.deinit();
- const p = try async<&da.allocator> simpleAsyncFn();
+ const p = try async<&da.allocator()> simpleAsyncFn();
comptime expect(@typeOf(p) == promise->void);
cancel p;
expect(x == 2);
@@ -24,7 +24,7 @@ test "coroutine suspend, resume, cancel" {
defer da.deinit();
seq('a');
- const p = try async<&da.allocator> testAsyncSeq();
+ const p = try async<&da.allocator()> testAsyncSeq();
seq('c');
resume p;
seq('f');
@@ -52,7 +52,7 @@ test "coroutine suspend with block" {
var da = std.heap.DirectAllocator.init();
defer da.deinit();
- const p = try async<&da.allocator> testSuspendBlock();
+ const p = try async<&da.allocator()> testSuspendBlock();
std.testing.expect(!result);
resume a_promise;
std.testing.expect(result);
@@ -82,7 +82,7 @@ test "coroutine await" {
defer da.deinit();
await_seq('a');
- const p = async<&da.allocator> await_amain() catch unreachable;
+ const p = async<&da.allocator()> await_amain() catch unreachable;
await_seq('f');
resume await_a_promise;
await_seq('i');
@@ -121,7 +121,7 @@ test "coroutine await early return" {
defer da.deinit();
early_seq('a');
- const p = async<&da.allocator> early_amain() catch @panic("out of memory");
+ const p = async<&da.allocator()> early_amain() catch @panic("out of memory");
early_seq('f');
expect(early_final_result == 1234);
expect(std.mem.eql(u8, early_points, "abcdef"));
@@ -148,7 +148,7 @@ fn early_seq(c: u8) void {
test "coro allocation failure" {
var failing_allocator = std.debug.FailingAllocator.init(std.debug.global_allocator, 0);
- if (async<&failing_allocator.allocator> asyncFuncThatNeverGetsRun()) {
+ if (async<&failing_allocator.allocator()> asyncFuncThatNeverGetsRun()) {
@panic("expected allocation failure");
} else |err| switch (err) {
error.OutOfMemory => {},
@@ -168,7 +168,7 @@ test "async function with dot syntax" {
};
var da = std.heap.DirectAllocator.init();
defer da.deinit();
- const p = try async<&da.allocator> S.foo();
+ const p = try async<&da.allocator()> S.foo();
cancel p;
expect(S.y == 2);
}
@@ -181,7 +181,7 @@ test "async fn pointer in a struct field" {
var foo = Foo{ .bar = simpleAsyncFn2 };
var da = std.heap.DirectAllocator.init();
defer da.deinit();
- const p = (async<&da.allocator> foo.bar(&data)) catch unreachable;
+ const p = (async<&da.allocator()> foo.bar(&data)) catch unreachable;
expect(data == 2);
cancel p;
expect(data == 4);
@@ -195,7 +195,7 @@ async<*std.mem.Allocator> fn simpleAsyncFn2(y: *i32) void {
test "async fn with inferred error set" {
var da = std.heap.DirectAllocator.init();
defer da.deinit();
- const p = (async<&da.allocator> failing()) catch unreachable;
+ const p = (async<&da.allocator()> failing()) catch unreachable;
resume p;
cancel p;
}
@@ -210,19 +210,19 @@ test "error return trace across suspend points - early return" {
resume p;
var da = std.heap.DirectAllocator.init();
defer da.deinit();
- const p2 = try async<&da.allocator> printTrace(p);
+ const p2 = try async<&da.allocator()> printTrace(p);
cancel p2;
}
test "error return trace across suspend points - async return" {
const p = nonFailing();
- const p2 = try async printTrace(p);
+ const p2 = try async<&std.debug.global_allocator> printTrace(p);
resume p;
cancel p2;
}
fn nonFailing() (promise->anyerror!void) {
- return async suspendThenFail() catch unreachable;
+ return async<&std.debug.global_allocator> suspendThenFail() catch unreachable;
}
async fn suspendThenFail() anyerror!void {
suspend;
@@ -242,9 +242,9 @@ async fn printTrace(p: promise->(anyerror!void)) void {
test "break from suspend" {
var buf: [500]u8 = undefined;
- var a = &std.heap.FixedBufferAllocator.init(buf[0..]).allocator;
+ var a = std.heap.FixedBufferAllocator.init(buf[0..]).allocator();
var my_result: i32 = 1;
- const p = try async testBreakFromSuspend(&my_result);
+ const p = try async<&a> testBreakFromSuspend(&my_result);
cancel p;
std.testing.expect(my_result == 2);
}
diff --git a/test/standalone/brace_expansion/main.zig b/test/standalone/brace_expansion/main.zig
index f5bcd59ecffd..28a9ffa0146d 100644
--- a/test/standalone/brace_expansion/main.zig
+++ b/test/standalone/brace_expansion/main.zig
@@ -16,7 +16,7 @@ const Token = union(enum) {
Eof,
};
-var global_allocator: *mem.Allocator = undefined;
+var global_allocator: mem.Allocator = undefined;
fn tokenize(input: []const u8) !ArrayList(Token) {
const State = enum {
@@ -185,16 +185,16 @@ pub fn main() !void {
var direct_allocator = std.heap.DirectAllocator.init();
defer direct_allocator.deinit();
- var arena = std.heap.ArenaAllocator.init(&direct_allocator.allocator);
+ var arena = std.heap.ArenaAllocator.init(direct_allocator.allocator());
defer arena.deinit();
- global_allocator = &arena.allocator;
+ global_allocator = arena.allocator();
var stdin_buf = try Buffer.initSize(global_allocator, 0);
defer stdin_buf.deinit();
- var stdin_adapter = stdin_file.inStream();
- try stdin_adapter.stream.readAllBuffer(&stdin_buf, maxInt(usize));
+ var stdin_adapter = stdin_file.inStreamAdapter();
+ try stdin_adapter.inStream().readAllBuffer(&stdin_buf, maxInt(usize));
var result_buf = try Buffer.initSize(global_allocator, 0);
defer result_buf.deinit();
diff --git a/test/tests.zig b/test/tests.zig
index a17938fc3c11..6ce5b12ab2c6 100644
--- a/test/tests.zig
+++ b/test/tests.zig
@@ -314,11 +314,11 @@ pub const CompareOutputContext = struct {
var stdout = Buffer.initNull(b.allocator);
var stderr = Buffer.initNull(b.allocator);
- var stdout_file_in_stream = child.stdout.?.inStream();
- var stderr_file_in_stream = child.stderr.?.inStream();
+ var stdout_file_in_stream = child.stdout.?.inStreamAdapter();
+ var stderr_file_in_stream = child.stderr.?.inStreamAdapter();
- stdout_file_in_stream.stream.readAllBuffer(&stdout, max_stdout_size) catch unreachable;
- stderr_file_in_stream.stream.readAllBuffer(&stderr, max_stdout_size) catch unreachable;
+ stdout_file_in_stream.inStream().readAllBuffer(&stdout, max_stdout_size) catch unreachable;
+ stderr_file_in_stream.inStream().readAllBuffer(&stderr, max_stdout_size) catch unreachable;
const term = child.wait() catch |err| {
debug.panic("Unable to spawn {}: {}\n", full_exe_path, @errorName(err));
@@ -682,11 +682,11 @@ pub const CompileErrorContext = struct {
var stdout_buf = Buffer.initNull(b.allocator);
var stderr_buf = Buffer.initNull(b.allocator);
- var stdout_file_in_stream = child.stdout.?.inStream();
- var stderr_file_in_stream = child.stderr.?.inStream();
+ var stdout_file_in_stream = child.stdout.?.inStreamAdapter();
+ var stderr_file_in_stream = child.stderr.?.inStreamAdapter();
- stdout_file_in_stream.stream.readAllBuffer(&stdout_buf, max_stdout_size) catch unreachable;
- stderr_file_in_stream.stream.readAllBuffer(&stderr_buf, max_stdout_size) catch unreachable;
+ stdout_file_in_stream.inStream().readAllBuffer(&stdout_buf, max_stdout_size) catch unreachable;
+ stderr_file_in_stream.inStream().readAllBuffer(&stderr_buf, max_stdout_size) catch unreachable;
const term = child.wait() catch |err| {
debug.panic("Unable to spawn {}: {}\n", zig_args.items[0], @errorName(err));
@@ -989,11 +989,11 @@ pub const TranslateCContext = struct {
var stdout_buf = Buffer.initNull(b.allocator);
var stderr_buf = Buffer.initNull(b.allocator);
- var stdout_file_in_stream = child.stdout.?.inStream();
- var stderr_file_in_stream = child.stderr.?.inStream();
+ var stdout_file_in_stream = child.stdout.?.inStreamAdapter();
+ var stderr_file_in_stream = child.stderr.?.inStreamAdapter();
- stdout_file_in_stream.stream.readAllBuffer(&stdout_buf, max_stdout_size) catch unreachable;
- stderr_file_in_stream.stream.readAllBuffer(&stderr_buf, max_stdout_size) catch unreachable;
+ stdout_file_in_stream.inStream().readAllBuffer(&stdout_buf, max_stdout_size) catch unreachable;
+ stderr_file_in_stream.inStream().readAllBuffer(&stderr_buf, max_stdout_size) catch unreachable;
const term = child.wait() catch |err| {
debug.panic("Unable to spawn {}: {}\n", zig_args.toSliceConst()[0], @errorName(err));