diff --git a/build.zig b/build.zig
index a4e3dbcdfac3..109a799ac9fc 100644
--- a/build.zig
+++ b/build.zig
@@ -10,7 +10,7 @@ const ArrayList = std.ArrayList;
const Buffer = std.Buffer;
const io = std.io;
-pub fn build(b: &Builder) !void {
+pub fn build(b: *Builder) !void {
const mode = b.standardReleaseOptions();
var docgen_exe = b.addExecutable("docgen", "doc/docgen.zig");
@@ -132,7 +132,7 @@ pub fn build(b: &Builder) !void {
test_step.dependOn(tests.addGenHTests(b, test_filter));
}
-fn dependOnLib(lib_exe_obj: &std.build.LibExeObjStep, dep: &const LibraryDep) void {
+fn dependOnLib(lib_exe_obj: *std.build.LibExeObjStep, dep: *const LibraryDep) void {
for (dep.libdirs.toSliceConst()) |lib_dir| {
lib_exe_obj.addLibPath(lib_dir);
}
@@ -147,7 +147,7 @@ fn dependOnLib(lib_exe_obj: &std.build.LibExeObjStep, dep: &const LibraryDep) vo
}
}
-fn addCppLib(b: &Builder, lib_exe_obj: &std.build.LibExeObjStep, cmake_binary_dir: []const u8, lib_name: []const u8) void {
+fn addCppLib(b: *Builder, lib_exe_obj: *std.build.LibExeObjStep, cmake_binary_dir: []const u8, lib_name: []const u8) void {
const lib_prefix = if (lib_exe_obj.target.isWindows()) "" else "lib";
lib_exe_obj.addObjectFile(os.path.join(b.allocator, cmake_binary_dir, "zig_cpp", b.fmt("{}{}{}", lib_prefix, lib_name, lib_exe_obj.target.libFileExt())) catch unreachable);
}
@@ -159,7 +159,7 @@ const LibraryDep = struct {
includes: ArrayList([]const u8),
};
-fn findLLVM(b: &Builder, llvm_config_exe: []const u8) !LibraryDep {
+fn findLLVM(b: *Builder, llvm_config_exe: []const u8) !LibraryDep {
const libs_output = try b.exec([][]const u8{
llvm_config_exe,
"--libs",
@@ -217,7 +217,7 @@ fn findLLVM(b: &Builder, llvm_config_exe: []const u8) !LibraryDep {
return result;
}
-pub fn installStdLib(b: &Builder, stdlib_files: []const u8) void {
+pub fn installStdLib(b: *Builder, stdlib_files: []const u8) void {
var it = mem.split(stdlib_files, ";");
while (it.next()) |stdlib_file| {
const src_path = os.path.join(b.allocator, "std", stdlib_file) catch unreachable;
@@ -226,7 +226,7 @@ pub fn installStdLib(b: &Builder, stdlib_files: []const u8) void {
}
}
-pub fn installCHeaders(b: &Builder, c_header_files: []const u8) void {
+pub fn installCHeaders(b: *Builder, c_header_files: []const u8) void {
var it = mem.split(c_header_files, ";");
while (it.next()) |c_header_file| {
const src_path = os.path.join(b.allocator, "c_headers", c_header_file) catch unreachable;
@@ -235,7 +235,7 @@ pub fn installCHeaders(b: &Builder, c_header_files: []const u8) void {
}
}
-fn nextValue(index: &usize, build_info: []const u8) []const u8 {
+fn nextValue(index: *usize, build_info: []const u8) []const u8 {
const start = index.*;
while (true) : (index.* += 1) {
switch (build_info[index.*]) {
diff --git a/doc/docgen.zig b/doc/docgen.zig
index 7dc444f127df..fed4bb8ebab0 100644
--- a/doc/docgen.zig
+++ b/doc/docgen.zig
@@ -104,7 +104,7 @@ const Tokenizer = struct {
};
}
- fn next(self: &Tokenizer) Token {
+ fn next(self: *Tokenizer) Token {
var result = Token{
.id = Token.Id.Eof,
.start = self.index,
@@ -196,7 +196,7 @@ const Tokenizer = struct {
line_end: usize,
};
- fn getTokenLocation(self: &Tokenizer, token: &const Token) Location {
+ fn getTokenLocation(self: *Tokenizer, token: *const Token) Location {
var loc = Location{
.line = 0,
.column = 0,
@@ -221,7 +221,7 @@ const Tokenizer = struct {
}
};
-fn parseError(tokenizer: &Tokenizer, token: &const Token, comptime fmt: []const u8, args: ...) error {
+fn parseError(tokenizer: *Tokenizer, token: *const Token, comptime fmt: []const u8, args: ...) error {
const loc = tokenizer.getTokenLocation(token);
warn("{}:{}:{}: error: " ++ fmt ++ "\n", tokenizer.source_file_name, loc.line + 1, loc.column + 1, args);
if (loc.line_start <= loc.line_end) {
@@ -244,13 +244,13 @@ fn parseError(tokenizer: &Tokenizer, token: &const Token, comptime fmt: []const
return error.ParseError;
}
-fn assertToken(tokenizer: &Tokenizer, token: &const Token, id: Token.Id) !void {
+fn assertToken(tokenizer: *Tokenizer, token: *const Token, id: Token.Id) !void {
if (token.id != id) {
return parseError(tokenizer, token, "expected {}, found {}", @tagName(id), @tagName(token.id));
}
}
-fn eatToken(tokenizer: &Tokenizer, id: Token.Id) !Token {
+fn eatToken(tokenizer: *Tokenizer, id: Token.Id) !Token {
const token = tokenizer.next();
try assertToken(tokenizer, token, id);
return token;
@@ -317,7 +317,7 @@ const Action = enum {
Close,
};
-fn genToc(allocator: &mem.Allocator, tokenizer: &Tokenizer) !Toc {
+fn genToc(allocator: *mem.Allocator, tokenizer: *Tokenizer) !Toc {
var urls = std.HashMap([]const u8, Token, mem.hash_slice_u8, mem.eql_slice_u8).init(allocator);
errdefer urls.deinit();
@@ -546,7 +546,7 @@ fn genToc(allocator: &mem.Allocator, tokenizer: &Tokenizer) !Toc {
};
}
-fn urlize(allocator: &mem.Allocator, input: []const u8) ![]u8 {
+fn urlize(allocator: *mem.Allocator, input: []const u8) ![]u8 {
var buf = try std.Buffer.initSize(allocator, 0);
defer buf.deinit();
@@ -566,7 +566,7 @@ fn urlize(allocator: &mem.Allocator, input: []const u8) ![]u8 {
return buf.toOwnedSlice();
}
-fn escapeHtml(allocator: &mem.Allocator, input: []const u8) ![]u8 {
+fn escapeHtml(allocator: *mem.Allocator, input: []const u8) ![]u8 {
var buf = try std.Buffer.initSize(allocator, 0);
defer buf.deinit();
@@ -608,7 +608,7 @@ test "term color" {
assert(mem.eql(u8, result, "AgreenB"));
}
-fn termColor(allocator: &mem.Allocator, input: []const u8) ![]u8 {
+fn termColor(allocator: *mem.Allocator, input: []const u8) ![]u8 {
var buf = try std.Buffer.initSize(allocator, 0);
defer buf.deinit();
@@ -688,7 +688,7 @@ fn termColor(allocator: &mem.Allocator, input: []const u8) ![]u8 {
return buf.toOwnedSlice();
}
-fn genHtml(allocator: &mem.Allocator, tokenizer: &Tokenizer, toc: &Toc, out: var, zig_exe: []const u8) !void {
+fn genHtml(allocator: *mem.Allocator, tokenizer: *Tokenizer, toc: *Toc, out: var, zig_exe: []const u8) !void {
var code_progress_index: usize = 0;
for (toc.nodes) |node| {
switch (node) {
@@ -1036,7 +1036,7 @@ fn genHtml(allocator: &mem.Allocator, tokenizer: &Tokenizer, toc: &Toc, out: var
}
}
-fn exec(allocator: &mem.Allocator, args: []const []const u8) !os.ChildProcess.ExecResult {
+fn exec(allocator: *mem.Allocator, args: []const []const u8) !os.ChildProcess.ExecResult {
const result = try os.ChildProcess.exec(allocator, args, null, null, max_doc_file_size);
switch (result.term) {
os.ChildProcess.Term.Exited => |exit_code| {
diff --git a/doc/langref.html.in b/doc/langref.html.in
index d63c38d0fee6..3bd1124e00a6 100644
--- a/doc/langref.html.in
+++ b/doc/langref.html.in
@@ -458,7 +458,7 @@ test "string literals" {
// A C string literal is a null terminated pointer.
const null_terminated_bytes = c"hello";
- assert(@typeOf(null_terminated_bytes) == &const u8);
+ assert(@typeOf(null_terminated_bytes) == *const u8);
assert(null_terminated_bytes[5] == 0);
}
{#code_end#}
@@ -547,7 +547,7 @@ const c_string_literal =
;
{#code_end#}
- In this example the variable c_string_literal
has type &const char
and
+ In this example the variable c_string_literal
has type *const char
and
has a terminating null byte.
{#see_also|@embedFile#}
@@ -1403,12 +1403,12 @@ test "address of syntax" {
assert(x_ptr.* == 1234);
// When you get the address of a const variable, you get a const pointer.
- assert(@typeOf(x_ptr) == &const i32);
+ assert(@typeOf(x_ptr) == *const i32);
// If you want to mutate the value, you'd need an address of a mutable variable:
var y: i32 = 5678;
const y_ptr = &y;
- assert(@typeOf(y_ptr) == &i32);
+ assert(@typeOf(y_ptr) == *i32);
y_ptr.* += 1;
assert(y_ptr.* == 5679);
}
@@ -1455,7 +1455,7 @@ comptime {
test "@ptrToInt and @intToPtr" {
// To convert an integer address into a pointer, use @intToPtr:
- const ptr = @intToPtr(&i32, 0xdeadbeef);
+ const ptr = @intToPtr(*i32, 0xdeadbeef);
// To convert a pointer to an integer, use @ptrToInt:
const addr = @ptrToInt(ptr);
@@ -1467,7 +1467,7 @@ test "@ptrToInt and @intToPtr" {
comptime {
// Zig is able to do this at compile-time, as long as
// ptr is never dereferenced.
- const ptr = @intToPtr(&i32, 0xdeadbeef);
+ const ptr = @intToPtr(*i32, 0xdeadbeef);
const addr = @ptrToInt(ptr);
assert(@typeOf(addr) == usize);
assert(addr == 0xdeadbeef);
@@ -1477,17 +1477,17 @@ test "volatile" {
// In Zig, loads and stores are assumed to not have side effects.
// If a given load or store should have side effects, such as
// Memory Mapped Input/Output (MMIO), use `volatile`:
- const mmio_ptr = @intToPtr(&volatile u8, 0x12345678);
+ const mmio_ptr = @intToPtr(*volatile u8, 0x12345678);
// Now loads and stores with mmio_ptr are guaranteed to all happen
// and in the same order as in source code.
- assert(@typeOf(mmio_ptr) == &volatile u8);
+ assert(@typeOf(mmio_ptr) == *volatile u8);
}
test "nullable pointers" {
// Pointers cannot be null. If you want a null pointer, use the nullable
// prefix `?` to make the pointer type nullable.
- var ptr: ?&i32 = null;
+ var ptr: ?*i32 = null;
var x: i32 = 1;
ptr = &x;
@@ -1496,7 +1496,7 @@ test "nullable pointers" {
// Nullable pointers are the same size as normal pointers, because pointer
// value 0 is used as the null value.
- assert(@sizeOf(?&i32) == @sizeOf(&i32));
+ assert(@sizeOf(?*i32) == @sizeOf(*i32));
}
test "pointer casting" {
@@ -1504,7 +1504,7 @@ test "pointer casting" {
// operation that Zig cannot protect you against. Use @ptrCast only when other
// conversions are not possible.
const bytes align(@alignOf(u32)) = []u8{0x12, 0x12, 0x12, 0x12};
- const u32_ptr = @ptrCast(&const u32, &bytes[0]);
+ const u32_ptr = @ptrCast(*const u32, &bytes[0]);
assert(u32_ptr.* == 0x12121212);
// Even this example is contrived - there are better ways to do the above than
@@ -1518,7 +1518,7 @@ test "pointer casting" {
test "pointer child type" {
// pointer types have a `child` field which tells you the type they point to.
- assert((&u32).Child == u32);
+ assert((*u32).Child == u32);
}
{#code_end#}
{#header_open|Alignment#}
@@ -1543,15 +1543,15 @@ const builtin = @import("builtin");
test "variable alignment" {
var x: i32 = 1234;
const align_of_i32 = @alignOf(@typeOf(x));
- assert(@typeOf(&x) == &i32);
- assert(&i32 == &align(align_of_i32) i32);
+ assert(@typeOf(&x) == *i32);
+ assert(*i32 == *align(align_of_i32) i32);
if (builtin.arch == builtin.Arch.x86_64) {
- assert((&i32).alignment == 4);
+ assert((*i32).alignment == 4);
}
}
{#code_end#}
- In the same way that a &i32
can be implicitly cast to a
- &const i32
, a pointer with a larger alignment can be implicitly
+
In the same way that a *i32
can be implicitly cast to a
+ *const i32
, a pointer with a larger alignment can be implicitly
cast to a pointer with a smaller alignment, but not vice versa.
@@ -1565,7 +1565,7 @@ var foo: u8 align(4) = 100;
test "global variable alignment" {
assert(@typeOf(&foo).alignment == 4);
- assert(@typeOf(&foo) == &align(4) u8);
+ assert(@typeOf(&foo) == *align(4) u8);
const slice = (&foo)[0..1];
assert(@typeOf(slice) == []align(4) u8);
}
@@ -1610,7 +1610,7 @@ fn foo(bytes: []u8) u32 {
u8
can alias any memory.
As an example, this code produces undefined behavior:
- @ptrCast(&u32, f32(12.34)).*
+ @ptrCast(*u32, f32(12.34)).*
Instead, use {#link|@bitCast#}:
@bitCast(u32, f32(12.34))
As an added benefit, the @bitcast
version works at compile-time.
@@ -1736,7 +1736,7 @@ const Vec3 = struct {
};
}
- pub fn dot(self: &const Vec3, other: &const Vec3) f32 {
+ pub fn dot(self: *const Vec3, other: *const Vec3) f32 {
return self.x * other.x + self.y * other.y + self.z * other.z;
}
};
@@ -1768,7 +1768,7 @@ test "struct namespaced variable" {
// struct field order is determined by the compiler for optimal performance.
// however, you can still calculate a struct base pointer given a field pointer:
-fn setYBasedOnX(x: &f32, y: f32) void {
+fn setYBasedOnX(x: *f32, y: f32) void {
const point = @fieldParentPtr(Point, "x", x);
point.y = y;
}
@@ -1786,13 +1786,13 @@ test "field parent pointer" {
fn LinkedList(comptime T: type) type {
return struct {
pub const Node = struct {
- prev: ?&Node,
- next: ?&Node,
+ prev: ?*Node,
+ next: ?*Node,
data: T,
};
- first: ?&Node,
- last: ?&Node,
+ first: ?*Node,
+ last: ?*Node,
len: usize,
};
}
@@ -2039,7 +2039,7 @@ const Variant = union(enum) {
Int: i32,
Bool: bool,
- fn truthy(self: &const Variant) bool {
+ fn truthy(self: *const Variant) bool {
return switch (self.*) {
Variant.Int => |x_int| x_int != 0,
Variant.Bool => |x_bool| x_bool,
@@ -2786,7 +2786,7 @@ test "pass aggregate type by value to function" {
}
{#code_end#}
- Instead, one must use &const
. Zig allows implicitly casting something
+ Instead, one must use *const
. Zig allows implicitly casting something
to a const pointer to it:
{#code_begin|test#}
@@ -2794,7 +2794,7 @@ const Foo = struct {
x: i32,
};
-fn bar(foo: &const Foo) void {}
+fn bar(foo: *const Foo) void {}
test "implicitly cast to const pointer" {
bar(Foo {.x = 12,});
@@ -3208,16 +3208,16 @@ struct Foo *do_a_thing(void) {
Zig code
{#code_begin|syntax#}
// malloc prototype included for reference
-extern fn malloc(size: size_t) ?&u8;
+extern fn malloc(size: size_t) ?*u8;
-fn doAThing() ?&Foo {
+fn doAThing() ?*Foo {
const ptr = malloc(1234) ?? return null;
// ...
}
{#code_end#}
Here, Zig is at least as convenient, if not more, than C. And, the type of "ptr"
- is &u8
not ?&u8
. The ??
operator
+ is *u8
not ?*u8
. The ??
operator
unwrapped the nullable type and therefore ptr
is guaranteed to be non-null everywhere
it is used in the function.
@@ -3237,7 +3237,7 @@ fn doAThing() ?&Foo {
In Zig you can accomplish the same thing:
{#code_begin|syntax#}
-fn doAThing(nullable_foo: ?&Foo) void {
+fn doAThing(nullable_foo: ?*Foo) void {
// do some stuff
if (nullable_foo) |foo| {
@@ -3713,7 +3713,7 @@ fn List(comptime T: type) type {
{#code_begin|syntax#}
const Node = struct {
- next: &Node,
+ next: *Node,
name: []u8,
};
{#code_end#}
@@ -3745,7 +3745,7 @@ pub fn main() void {
{#code_begin|syntax#}
/// Calls print and then flushes the buffer.
-pub fn printf(self: &OutStream, comptime format: []const u8, args: ...) error!void {
+pub fn printf(self: *OutStream, comptime format: []const u8, args: ...) error!void {
const State = enum {
Start,
OpenBrace,
@@ -3817,7 +3817,7 @@ pub fn printf(self: &OutStream, comptime format: []const u8, args: ...) error!vo
and emits a function that actually looks like this:
{#code_begin|syntax#}
-pub fn printf(self: &OutStream, arg0: i32, arg1: []const u8) !void {
+pub fn printf(self: *OutStream, arg0: i32, arg1: []const u8) !void {
try self.write("here is a string: '");
try self.printValue(arg0);
try self.write("' here is a number: ");
@@ -3831,7 +3831,7 @@ pub fn printf(self: &OutStream, arg0: i32, arg1: []const u8) !void {
on the type:
{#code_begin|syntax#}
-pub fn printValue(self: &OutStream, value: var) !void {
+pub fn printValue(self: *OutStream, value: var) !void {
const T = @typeOf(value);
if (@isInteger(T)) {
return self.printInt(T, value);
@@ -3911,7 +3911,7 @@ pub fn main() void {
at compile time.
{#header_open|@addWithOverflow#}
- @addWithOverflow(comptime T: type, a: T, b: T, result: &T) -> bool
+ @addWithOverflow(comptime T: type, a: T, b: T, result: *T) bool
Performs result.* = a + b
. If overflow or underflow occurs,
stores the overflowed bits in result
and returns true
.
@@ -3919,7 +3919,7 @@ pub fn main() void {
{#header_close#}
{#header_open|@ArgType#}
- @ArgType(comptime T: type, comptime n: usize) -> type
+ @ArgType(comptime T: type, comptime n: usize) type
This builtin function takes a function type and returns the type of the parameter at index n
.
@@ -3931,7 +3931,7 @@ pub fn main() void {
{#header_close#}
{#header_open|@atomicLoad#}
- @atomicLoad(comptime T: type, ptr: &const T, comptime ordering: builtin.AtomicOrder) -> T
+ @atomicLoad(comptime T: type, ptr: *const T, comptime ordering: builtin.AtomicOrder) T
This builtin function atomically dereferences a pointer and returns the value.
@@ -3950,7 +3950,7 @@ pub fn main() void {
{#header_close#}
{#header_open|@atomicRmw#}
- @atomicRmw(comptime T: type, ptr: &T, comptime op: builtin.AtomicRmwOp, operand: T, comptime ordering: builtin.AtomicOrder) -> T
+ @atomicRmw(comptime T: type, ptr: *T, comptime op: builtin.AtomicRmwOp, operand: T, comptime ordering: builtin.AtomicOrder) T
This builtin function atomically modifies memory and then returns the previous value.
@@ -3969,7 +3969,7 @@ pub fn main() void {
{#header_close#}
{#header_open|@bitCast#}
- @bitCast(comptime DestType: type, value: var) -> DestType
+ @bitCast(comptime DestType: type, value: var) DestType
Converts a value of one type to another type.
@@ -4002,9 +4002,9 @@ pub fn main() void {
{#header_close#}
{#header_open|@alignCast#}
- @alignCast(comptime alignment: u29, ptr: var) -> var
+ @alignCast(comptime alignment: u29, ptr: var) var
- ptr
can be &T
, fn()
, ?&T
,
+ ptr
can be *T
, fn()
, ?*T
,
?fn()
, or []T
. It returns the same type as ptr
except with the alignment adjusted to the new value.
@@ -4013,7 +4013,7 @@ pub fn main() void {
{#header_close#}
{#header_open|@alignOf#}
- @alignOf(comptime T: type) -> (number literal)
+ @alignOf(comptime T: type) (number literal)
This function returns the number of bytes that this type should be aligned to
for the current target to match the C ABI. When the child type of a pointer has
@@ -4021,7 +4021,7 @@ pub fn main() void {
const assert = @import("std").debug.assert;
comptime {
- assert(&u32 == &align(@alignOf(u32)) u32);
+ assert(*u32 == *align(@alignOf(u32)) u32);
}
The result is a target-specific compile time constant. It is guaranteed to be
@@ -4049,7 +4049,7 @@ comptime {
{#see_also|Import from C Header File|@cInclude|@cImport|@cUndef|void#}
{#header_close#}
{#header_open|@cImport#}
-
@cImport(expression) -> (namespace)
+ @cImport(expression) (namespace)
This function parses C code and imports the functions, types, variables, and
compatible macro definitions into the result namespace.
@@ -4095,13 +4095,13 @@ comptime {
{#see_also|Import from C Header File|@cImport|@cDefine|@cInclude#}
{#header_close#}
{#header_open|@canImplicitCast#}
-
@canImplicitCast(comptime T: type, value) -> bool
+ @canImplicitCast(comptime T: type, value) bool
Returns whether a value can be implicitly casted to a given type.
{#header_close#}
{#header_open|@clz#}
- @clz(x: T) -> U
+ @clz(x: T) U
This function counts the number of leading zeroes in x
which is an integer
type T
.
@@ -4116,13 +4116,13 @@ comptime {
{#header_close#}
{#header_open|@cmpxchgStrong#}
-
@cmpxchgStrong(comptime T: type, ptr: &T, expected_value: T, new_value: T, success_order: AtomicOrder, fail_order: AtomicOrder) -> ?T
+ @cmpxchgStrong(comptime T: type, ptr: *T, expected_value: T, new_value: T, success_order: AtomicOrder, fail_order: AtomicOrder) ?T
This function performs a strong atomic compare exchange operation. It's the equivalent of this code,
except atomic:
{#code_begin|syntax#}
-fn cmpxchgStrongButNotAtomic(comptime T: type, ptr: &T, expected_value: T, new_value: T) ?T {
+fn cmpxchgStrongButNotAtomic(comptime T: type, ptr: *T, expected_value: T, new_value: T) ?T {
const old_value = ptr.*;
if (old_value == expected_value) {
ptr.* = new_value;
@@ -4143,13 +4143,13 @@ fn cmpxchgStrongButNotAtomic(comptime T: type, ptr: &T, expected_value: T, new_v
{#see_also|Compile Variables|cmpxchgWeak#}
{#header_close#}
{#header_open|@cmpxchgWeak#}
- @cmpxchgWeak(comptime T: type, ptr: &T, expected_value: T, new_value: T, success_order: AtomicOrder, fail_order: AtomicOrder) -> ?T
+ @cmpxchgWeak(comptime T: type, ptr: *T, expected_value: T, new_value: T, success_order: AtomicOrder, fail_order: AtomicOrder) ?T
This function performs a weak atomic compare exchange operation. It's the equivalent of this code,
except atomic:
{#code_begin|syntax#}
-fn cmpxchgWeakButNotAtomic(comptime T: type, ptr: &T, expected_value: T, new_value: T) ?T {
+fn cmpxchgWeakButNotAtomic(comptime T: type, ptr: *T, expected_value: T, new_value: T) ?T {
const old_value = ptr.*;
if (old_value == expected_value and usuallyTrueButSometimesFalse()) {
ptr.* = new_value;
@@ -4237,7 +4237,7 @@ test "main" {
{#code_end#}
{#header_close#}
{#header_open|@ctz#}
- @ctz(x: T) -> U
+ @ctz(x: T) U
This function counts the number of trailing zeroes in x
which is an integer
type T
.
@@ -4251,7 +4251,7 @@ test "main" {
{#header_close#}
{#header_open|@divExact#}
- @divExact(numerator: T, denominator: T) -> T
+ @divExact(numerator: T, denominator: T) T
Exact division. Caller guarantees denominator != 0
and
@divTrunc(numerator, denominator) * denominator == numerator
.
@@ -4264,7 +4264,7 @@ test "main" {
{#see_also|@divTrunc|@divFloor#}
{#header_close#}
{#header_open|@divFloor#}
-
@divFloor(numerator: T, denominator: T) -> T
+ @divFloor(numerator: T, denominator: T) T
Floored division. Rounds toward negative infinity. For unsigned integers it is
the same as numerator / denominator
. Caller guarantees denominator != 0
and
@@ -4278,7 +4278,7 @@ test "main" {
{#see_also|@divTrunc|@divExact#}
{#header_close#}
{#header_open|@divTrunc#}
-
@divTrunc(numerator: T, denominator: T) -> T
+ @divTrunc(numerator: T, denominator: T) T
Truncated division. Rounds toward zero. For unsigned integers it is
the same as numerator / denominator
. Caller guarantees denominator != 0
and
@@ -4292,7 +4292,7 @@ test "main" {
{#see_also|@divFloor|@divExact#}
{#header_close#}
{#header_open|@embedFile#}
-
@embedFile(comptime path: []const u8) -> [X]u8
+ @embedFile(comptime path: []const u8) [X]u8
This function returns a compile time constant fixed-size array with length
equal to the byte count of the file given by path
. The contents of the array
@@ -4304,19 +4304,19 @@ test "main" {
{#see_also|@import#}
{#header_close#}
{#header_open|@export#}
-
@export(comptime name: []const u8, target: var, linkage: builtin.GlobalLinkage) -> []const u8
+ @export(comptime name: []const u8, target: var, linkage: builtin.GlobalLinkage) []const u8
Creates a symbol in the output object file.
{#header_close#}
{#header_open|@tagName#}
- @tagName(value: var) -> []const u8
+ @tagName(value: var) []const u8
Converts an enum value or union value to a slice of bytes representing the name.
{#header_close#}
{#header_open|@TagType#}
- @TagType(T: type) -> type
+ @TagType(T: type) type
For an enum, returns the integer type that is used to store the enumeration value.
@@ -4325,7 +4325,7 @@ test "main" {
{#header_close#}
{#header_open|@errorName#}
- @errorName(err: error) -> []u8
+ @errorName(err: error) []u8
This function returns the string representation of an error. If an error
declaration is:
@@ -4341,7 +4341,7 @@ test "main" {
{#header_close#}
{#header_open|@errorReturnTrace#}
- @errorReturnTrace() -> ?&builtin.StackTrace
+ @errorReturnTrace() ?*builtin.StackTrace
If the binary is built with error return tracing, and this function is invoked in a
function that calls a function with an error or error union return type, returns a
@@ -4360,7 +4360,7 @@ test "main" {
{#header_close#}
{#header_open|@fieldParentPtr#}
@fieldParentPtr(comptime ParentType: type, comptime field_name: []const u8,
- field_ptr: &T) -> &ParentType
+ field_ptr: *T) *ParentType
Given a pointer to a field, returns the base pointer of a struct.
@@ -4380,7 +4380,7 @@ test "main" {
{#header_close#}
{#header_open|@import#}
- @import(comptime path: []u8) -> (namespace)
+ @import(comptime path: []u8) (namespace)
This function finds a zig file corresponding to path
and imports all the
public top level declarations into the resulting namespace.
@@ -4400,7 +4400,7 @@ test "main" {
{#see_also|Compile Variables|@embedFile#}
{#header_close#}
{#header_open|@inlineCall#}
-
@inlineCall(function: X, args: ...) -> Y
+ @inlineCall(function: X, args: ...) Y
This calls a function, in the same way that invoking an expression with parentheses does:
@@ -4420,19 +4420,19 @@ fn add(a: i32, b: i32) i32 { return a + b; }
{#see_also|@noInlineCall#}
{#header_close#}
{#header_open|@intToPtr#}
- @intToPtr(comptime DestType: type, int: usize) -> DestType
+ @intToPtr(comptime DestType: type, int: usize) DestType
Converts an integer to a pointer. To convert the other way, use {#link|@ptrToInt#}.
{#header_close#}
{#header_open|@IntType#}
- @IntType(comptime is_signed: bool, comptime bit_count: u8) -> type
+ @IntType(comptime is_signed: bool, comptime bit_count: u8) type
This function returns an integer type with the given signness and bit count.
{#header_close#}
{#header_open|@maxValue#}
- @maxValue(comptime T: type) -> (number literal)
+ @maxValue(comptime T: type) (number literal)
This function returns the maximum value of the integer type T
.
@@ -4441,7 +4441,7 @@ fn add(a: i32, b: i32) i32 { return a + b; }
{#header_close#}
{#header_open|@memberCount#}
- @memberCount(comptime T: type) -> (number literal)
+ @memberCount(comptime T: type) (number literal)
This function returns the number of members in a struct, enum, or union type.
@@ -4453,7 +4453,7 @@ fn add(a: i32, b: i32) i32 { return a + b; }
{#header_close#}
{#header_open|@memberName#}
- @memberName(comptime T: type, comptime index: usize) -> [N]u8
+ @memberName(comptime T: type, comptime index: usize) [N]u8
Returns the field name of a struct, union, or enum.
The result is a compile time constant.
@@ -4463,15 +4463,15 @@ fn add(a: i32, b: i32) i32 { return a + b; }
{#header_close#}
{#header_open|@field#}
- @field(lhs: var, comptime field_name: []const u8) -> (field)
+ @field(lhs: var, comptime field_name: []const u8) (field)
Preforms field access equivalent to lhs.->field_name-<
.
{#header_close#}
{#header_open|@memberType#}
- @memberType(comptime T: type, comptime index: usize) -> type
+ @memberType(comptime T: type, comptime index: usize) type
Returns the field type of a struct or union.
{#header_close#}
{#header_open|@memcpy#}
- @memcpy(noalias dest: &u8, noalias source: &const u8, byte_count: usize)
+ @memcpy(noalias dest: *u8, noalias source: *const u8, byte_count: usize)
This function copies bytes from one region of memory to another. dest
and
source
are both pointers and must not overlap.
@@ -4489,7 +4489,7 @@ fn add(a: i32, b: i32) i32 { return a + b; }
mem.copy(u8, dest[0...byte_count], source[0...byte_count]);
{#header_close#}
{#header_open|@memset#}
-
@memset(dest: &u8, c: u8, byte_count: usize)
+ @memset(dest: *u8, c: u8, byte_count: usize)
This function sets a region of memory to c
. dest
is a pointer.
@@ -4506,7 +4506,7 @@ mem.copy(u8, dest[0...byte_count], source[0...byte_count]);
mem.set(u8, dest, c);
{#header_close#}
{#header_open|@minValue#}
- @minValue(comptime T: type) -> (number literal)
+ @minValue(comptime T: type) (number literal)
This function returns the minimum value of the integer type T.
@@ -4515,7 +4515,7 @@ mem.set(u8, dest, c);
{#header_close#}
{#header_open|@mod#}
- @mod(numerator: T, denominator: T) -> T
+ @mod(numerator: T, denominator: T) T
Modulus division. For unsigned integers this is the same as
numerator % denominator
. Caller guarantees denominator > 0
.
@@ -4528,7 +4528,7 @@ mem.set(u8, dest, c);
{#see_also|@rem#}
{#header_close#}
{#header_open|@mulWithOverflow#}
-
@mulWithOverflow(comptime T: type, a: T, b: T, result: &T) -> bool
+ @mulWithOverflow(comptime T: type, a: T, b: T, result: *T) bool
Performs result.* = a * b
. If overflow or underflow occurs,
stores the overflowed bits in result
and returns true
.
@@ -4536,7 +4536,7 @@ mem.set(u8, dest, c);
{#header_close#}
{#header_open|@newStackCall#}
- @newStackCall(new_stack: []u8, function: var, args: ...) -> var
+ @newStackCall(new_stack: []u8, function: var, args: ...) var
This calls a function, in the same way that invoking an expression with parentheses does. However,
instead of using the same stack as the caller, the function uses the stack provided in the new_stack
@@ -4572,7 +4572,7 @@ fn targetFunction(x: i32) usize {
{#code_end#}
{#header_close#}
{#header_open|@noInlineCall#}
-
@noInlineCall(function: var, args: ...) -> var
+ @noInlineCall(function: var, args: ...) var
This calls a function, in the same way that invoking an expression with parentheses does:
@@ -4594,13 +4594,13 @@ fn add(a: i32, b: i32) i32 {
{#see_also|@inlineCall#}
{#header_close#}
{#header_open|@offsetOf#}
- @offsetOf(comptime T: type, comptime field_name: [] const u8) -> (number literal)
+ @offsetOf(comptime T: type, comptime field_name: [] const u8) (number literal)
This function returns the byte offset of a field relative to its containing struct.
{#header_close#}
{#header_open|@OpaqueType#}
- @OpaqueType() -> type
+ @OpaqueType() type
Creates a new type with an unknown size and alignment.
@@ -4608,12 +4608,12 @@ fn add(a: i32, b: i32) i32 {
This is typically used for type safety when interacting with C code that does not expose struct details.
Example:
- {#code_begin|test_err|expected type '&Derp', found '&Wat'#}
+ {#code_begin|test_err|expected type '*Derp', found '*Wat'#}
const Derp = @OpaqueType();
const Wat = @OpaqueType();
-extern fn bar(d: &Derp) void;
-export fn foo(w: &Wat) void {
+extern fn bar(d: *Derp) void;
+export fn foo(w: *Wat) void {
bar(w);
}
@@ -4623,7 +4623,7 @@ test "call foo" {
{#code_end#}
{#header_close#}
{#header_open|@panic#}
- @panic(message: []const u8) -> noreturn
+ @panic(message: []const u8) noreturn
Invokes the panic handler function. By default the panic handler function
calls the public panic
function exposed in the root source file, or
@@ -4639,19 +4639,19 @@ test "call foo" {
{#see_also|Root Source File#}
{#header_close#}
{#header_open|@ptrCast#}
-
@ptrCast(comptime DestType: type, value: var) -> DestType
+ @ptrCast(comptime DestType: type, value: var) DestType
Converts a pointer of one type to a pointer of another type.
{#header_close#}
{#header_open|@ptrToInt#}
- @ptrToInt(value: var) -> usize
+ @ptrToInt(value: var) usize
Converts value
to a usize
which is the address of the pointer. value
can be one of these types:
- &T
- ?&T
+ *T
+ ?*T
fn()
?fn()
@@ -4659,7 +4659,7 @@ test "call foo" {
{#header_close#}
{#header_open|@rem#}
- @rem(numerator: T, denominator: T) -> T
+ @rem(numerator: T, denominator: T) T
Remainder division. For unsigned integers this is the same as
numerator % denominator
. Caller guarantees denominator > 0
.
@@ -4776,13 +4776,13 @@ pub const FloatMode = enum {
{#see_also|Compile Variables#}
{#header_close#}
{#header_open|@setGlobalSection#}
-
@setGlobalSection(global_variable_name, comptime section_name: []const u8) -> bool
+ @setGlobalSection(global_variable_name, comptime section_name: []const u8) bool
Puts the global variable in the specified section.
{#header_close#}
{#header_open|@shlExact#}
- @shlExact(value: T, shift_amt: Log2T) -> T
+ @shlExact(value: T, shift_amt: Log2T) T
Performs the left shift operation (<<
). Caller guarantees
that the shift will not shift any 1 bits out.
@@ -4794,7 +4794,7 @@ pub const FloatMode = enum {
{#see_also|@shrExact|@shlWithOverflow#}
{#header_close#}
{#header_open|@shlWithOverflow#}
-
@shlWithOverflow(comptime T: type, a: T, shift_amt: Log2T, result: &T) -> bool
+ @shlWithOverflow(comptime T: type, a: T, shift_amt: Log2T, result: *T) bool
Performs result.* = a << b
. If overflow or underflow occurs,
stores the overflowed bits in result
and returns true
.
@@ -4807,7 +4807,7 @@ pub const FloatMode = enum {
{#see_also|@shlExact|@shrExact#}
{#header_close#}
{#header_open|@shrExact#}
-
@shrExact(value: T, shift_amt: Log2T) -> T
+ @shrExact(value: T, shift_amt: Log2T) T
Performs the right shift operation (>>
). Caller guarantees
that the shift will not shift any 1 bits out.
@@ -4819,7 +4819,7 @@ pub const FloatMode = enum {
{#see_also|@shlExact|@shlWithOverflow#}
{#header_close#}
{#header_open|@sizeOf#}
-
@sizeOf(comptime T: type) -> (number literal)
+ @sizeOf(comptime T: type) (number literal)
This function returns the number of bytes it takes to store T
in memory.
@@ -4828,7 +4828,7 @@ pub const FloatMode = enum {
{#header_close#}
{#header_open|@sqrt#}
- @sqrt(comptime T: type, value: T) -> T
+ @sqrt(comptime T: type, value: T) T
Performs the square root of a floating point number. Uses a dedicated hardware instruction
when available. Currently only supports f32 and f64 at runtime. f128 at runtime is TODO.
@@ -4838,7 +4838,7 @@ pub const FloatMode = enum {
{#header_close#}
{#header_open|@subWithOverflow#}
- @subWithOverflow(comptime T: type, a: T, b: T, result: &T) -> bool
+ @subWithOverflow(comptime T: type, a: T, b: T, result: *T) bool
Performs result.* = a - b
. If overflow or underflow occurs,
stores the overflowed bits in result
and returns true
.
@@ -4846,7 +4846,7 @@ pub const FloatMode = enum {
{#header_close#}
{#header_open|@truncate#}
- @truncate(comptime T: type, integer) -> T
+ @truncate(comptime T: type, integer) T
This function truncates bits from an integer type, resulting in a smaller
integer type.
@@ -4870,7 +4870,7 @@ const b: u8 = @truncate(u8, a);
{#header_close#}
{#header_open|@typeId#}
-
@typeId(comptime T: type) -> @import("builtin").TypeId
+ @typeId(comptime T: type) @import("builtin").TypeId
Returns which kind of type something is. Possible values:
@@ -4904,7 +4904,7 @@ pub const TypeId = enum {
{#code_end#}
{#header_close#}
{#header_open|@typeInfo#}
- @typeInfo(comptime T: type) -> @import("builtin").TypeInfo
+ @typeInfo(comptime T: type) @import("builtin").TypeInfo
Returns information on the type. Returns a value of the following union:
@@ -5080,14 +5080,14 @@ pub const TypeInfo = union(TypeId) {
{#code_end#}
{#header_close#}
{#header_open|@typeName#}
- @typeName(T: type) -> []u8
+ @typeName(T: type) []u8
This function returns the string representation of a type.
{#header_close#}
{#header_open|@typeOf#}
- @typeOf(expression) -> type
+ @typeOf(expression) type
This function returns a compile-time constant, which is the type of the
expression passed as an argument. The expression is evaluated.
@@ -5937,7 +5937,7 @@ pub const __zig_test_fn_slice = {}; // overwritten later
{#header_open|C String Literals#}
{#code_begin|exe#}
{#link_libc#}
-extern fn puts(&const u8) void;
+extern fn puts(*const u8) void;
pub fn main() void {
puts(c"this has a null terminator");
@@ -5996,8 +5996,8 @@ const c = @cImport({
{#code_begin|syntax#}
const base64 = @import("std").base64;
-export fn decode_base_64(dest_ptr: &u8, dest_len: usize,
- source_ptr: &const u8, source_len: usize) usize
+export fn decode_base_64(dest_ptr: *u8, dest_len: usize,
+ source_ptr: *const u8, source_len: usize) usize
{
const src = source_ptr[0..source_len];
const dest = dest_ptr[0..dest_len];
@@ -6028,7 +6028,7 @@ int main(int argc, char **argv) {
{#code_begin|syntax#}
const Builder = @import("std").build.Builder;
-pub fn build(b: &Builder) void {
+pub fn build(b: *Builder) void {
const obj = b.addObject("base64", "base64.zig");
const exe = b.addCExecutable("test");
diff --git a/example/cat/main.zig b/example/cat/main.zig
index de0d323bed91..1b34cb22eb2e 100644
--- a/example/cat/main.zig
+++ b/example/cat/main.zig
@@ -41,7 +41,7 @@ fn usage(exe: []const u8) !void {
return error.Invalid;
}
-fn cat_file(stdout: &os.File, file: &os.File) !void {
+fn cat_file(stdout: *os.File, file: *os.File) !void {
var buf: [1024 * 4]u8 = undefined;
while (true) {
diff --git a/example/hello_world/hello_libc.zig b/example/hello_world/hello_libc.zig
index 1df8f04ce493..f64beda40f40 100644
--- a/example/hello_world/hello_libc.zig
+++ b/example/hello_world/hello_libc.zig
@@ -7,7 +7,7 @@ const c = @cImport({
const msg = c"Hello, world!\n";
-export fn main(argc: c_int, argv: &&u8) c_int {
+export fn main(argc: c_int, argv: **u8) c_int {
if (c.printf(msg) != c_int(c.strlen(msg))) return -1;
return 0;
diff --git a/example/mix_o_files/base64.zig b/example/mix_o_files/base64.zig
index e682a9705548..35b090825b53 100644
--- a/example/mix_o_files/base64.zig
+++ b/example/mix_o_files/base64.zig
@@ -1,6 +1,6 @@
const base64 = @import("std").base64;
-export fn decode_base_64(dest_ptr: &u8, dest_len: usize, source_ptr: &const u8, source_len: usize) usize {
+export fn decode_base_64(dest_ptr: *u8, dest_len: usize, source_ptr: *const u8, source_len: usize) usize {
const src = source_ptr[0..source_len];
const dest = dest_ptr[0..dest_len];
const base64_decoder = base64.standard_decoder_unsafe;
diff --git a/example/mix_o_files/build.zig b/example/mix_o_files/build.zig
index e5d2e6a44661..a4e7fbbf8f8b 100644
--- a/example/mix_o_files/build.zig
+++ b/example/mix_o_files/build.zig
@@ -1,6 +1,6 @@
const Builder = @import("std").build.Builder;
-pub fn build(b: &Builder) void {
+pub fn build(b: *Builder) void {
const obj = b.addObject("base64", "base64.zig");
const exe = b.addCExecutable("test");
diff --git a/example/shared_library/build.zig b/example/shared_library/build.zig
index 30c714c6c6c7..05648cf9ebc1 100644
--- a/example/shared_library/build.zig
+++ b/example/shared_library/build.zig
@@ -1,6 +1,6 @@
const Builder = @import("std").build.Builder;
-pub fn build(b: &Builder) void {
+pub fn build(b: *Builder) void {
const lib = b.addSharedLibrary("mathtest", "mathtest.zig", b.version(1, 0, 0));
const exe = b.addCExecutable("test");
diff --git a/src-self-hosted/arg.zig b/src-self-hosted/arg.zig
index fa2166e3a53c..df2c04ef1f9b 100644
--- a/src-self-hosted/arg.zig
+++ b/src-self-hosted/arg.zig
@@ -30,7 +30,7 @@ fn argInAllowedSet(maybe_set: ?[]const []const u8, arg: []const u8) bool {
}
// Modifies the current argument index during iteration
-fn readFlagArguments(allocator: &Allocator, args: []const []const u8, required: usize, allowed_set: ?[]const []const u8, index: &usize) !FlagArg {
+fn readFlagArguments(allocator: *Allocator, args: []const []const u8, required: usize, allowed_set: ?[]const []const u8, index: *usize) !FlagArg {
switch (required) {
0 => return FlagArg{ .None = undefined }, // TODO: Required to force non-tag but value?
1 => {
@@ -79,7 +79,7 @@ pub const Args = struct {
flags: HashMapFlags,
positionals: ArrayList([]const u8),
- pub fn parse(allocator: &Allocator, comptime spec: []const Flag, args: []const []const u8) !Args {
+ pub fn parse(allocator: *Allocator, comptime spec: []const Flag, args: []const []const u8) !Args {
var parsed = Args{
.flags = HashMapFlags.init(allocator),
.positionals = ArrayList([]const u8).init(allocator),
@@ -143,18 +143,18 @@ pub const Args = struct {
return parsed;
}
- pub fn deinit(self: &Args) void {
+ pub fn deinit(self: *Args) void {
self.flags.deinit();
self.positionals.deinit();
}
// e.g. --help
- pub fn present(self: &Args, name: []const u8) bool {
+ pub fn present(self: *Args, name: []const u8) bool {
return self.flags.contains(name);
}
// e.g. --name value
- pub fn single(self: &Args, name: []const u8) ?[]const u8 {
+ pub fn single(self: *Args, name: []const u8) ?[]const u8 {
if (self.flags.get(name)) |entry| {
switch (entry.value) {
FlagArg.Single => |inner| {
@@ -168,7 +168,7 @@ pub const Args = struct {
}
// e.g. --names value1 value2 value3
- pub fn many(self: &Args, name: []const u8) ?[]const []const u8 {
+ pub fn many(self: *Args, name: []const u8) ?[]const []const u8 {
if (self.flags.get(name)) |entry| {
switch (entry.value) {
FlagArg.Many => |inner| {
diff --git a/src-self-hosted/errmsg.zig b/src-self-hosted/errmsg.zig
index 9905b8e3a63e..32d2450aacf9 100644
--- a/src-self-hosted/errmsg.zig
+++ b/src-self-hosted/errmsg.zig
@@ -16,18 +16,18 @@ pub const Msg = struct {
text: []u8,
first_token: TokenIndex,
last_token: TokenIndex,
- tree: &ast.Tree,
+ tree: *ast.Tree,
};
/// `path` must outlive the returned Msg
/// `tree` must outlive the returned Msg
/// Caller owns returned Msg and must free with `allocator`
pub fn createFromParseError(
- allocator: &mem.Allocator,
- parse_error: &const ast.Error,
- tree: &ast.Tree,
+ allocator: *mem.Allocator,
+ parse_error: *const ast.Error,
+ tree: *ast.Tree,
path: []const u8,
-) !&Msg {
+) !*Msg {
const loc_token = parse_error.loc();
var text_buf = try std.Buffer.initSize(allocator, 0);
defer text_buf.deinit();
@@ -47,7 +47,7 @@ pub fn createFromParseError(
return msg;
}
-pub fn printToStream(stream: var, msg: &const Msg, color_on: bool) !void {
+pub fn printToStream(stream: var, msg: *const Msg, color_on: bool) !void {
const first_token = msg.tree.tokens.at(msg.first_token);
const last_token = msg.tree.tokens.at(msg.last_token);
const start_loc = msg.tree.tokenLocationPtr(0, first_token);
@@ -76,7 +76,7 @@ pub fn printToStream(stream: var, msg: &const Msg, color_on: bool) !void {
try stream.write("\n");
}
-pub fn printToFile(file: &os.File, msg: &const Msg, color: Color) !void {
+pub fn printToFile(file: *os.File, msg: *const Msg, color: Color) !void {
const color_on = switch (color) {
Color.Auto => file.isTty(),
Color.On => true,
diff --git a/src-self-hosted/introspect.zig b/src-self-hosted/introspect.zig
index adab00286bdb..56b56c0c785b 100644
--- a/src-self-hosted/introspect.zig
+++ b/src-self-hosted/introspect.zig
@@ -7,7 +7,7 @@ const os = std.os;
const warn = std.debug.warn;
/// Caller must free result
-pub fn testZigInstallPrefix(allocator: &mem.Allocator, test_path: []const u8) ![]u8 {
+pub fn testZigInstallPrefix(allocator: *mem.Allocator, test_path: []const u8) ![]u8 {
const test_zig_dir = try os.path.join(allocator, test_path, "lib", "zig");
errdefer allocator.free(test_zig_dir);
@@ -21,7 +21,7 @@ pub fn testZigInstallPrefix(allocator: &mem.Allocator, test_path: []const u8) ![
}
/// Caller must free result
-pub fn findZigLibDir(allocator: &mem.Allocator) ![]u8 {
+pub fn findZigLibDir(allocator: *mem.Allocator) ![]u8 {
const self_exe_path = try os.selfExeDirPath(allocator);
defer allocator.free(self_exe_path);
@@ -42,7 +42,7 @@ pub fn findZigLibDir(allocator: &mem.Allocator) ![]u8 {
return error.FileNotFound;
}
-pub fn resolveZigLibDir(allocator: &mem.Allocator) ![]u8 {
+pub fn resolveZigLibDir(allocator: *mem.Allocator) ![]u8 {
return findZigLibDir(allocator) catch |err| {
warn(
\\Unable to find zig lib directory: {}.
diff --git a/src-self-hosted/ir.zig b/src-self-hosted/ir.zig
index c4550b5179ac..3334d9511b23 100644
--- a/src-self-hosted/ir.zig
+++ b/src-self-hosted/ir.zig
@@ -2,7 +2,7 @@ const Scope = @import("scope.zig").Scope;
pub const Instruction = struct {
id: Id,
- scope: &Scope,
+ scope: *Scope,
pub const Id = enum {
Br,
diff --git a/src-self-hosted/main.zig b/src-self-hosted/main.zig
index 71838503b7d9..80b1c3889afa 100644
--- a/src-self-hosted/main.zig
+++ b/src-self-hosted/main.zig
@@ -18,8 +18,8 @@ const Target = @import("target.zig").Target;
const errmsg = @import("errmsg.zig");
var stderr_file: os.File = undefined;
-var stderr: &io.OutStream(io.FileOutStream.Error) = undefined;
-var stdout: &io.OutStream(io.FileOutStream.Error) = undefined;
+var stderr: *io.OutStream(io.FileOutStream.Error) = undefined;
+var stdout: *io.OutStream(io.FileOutStream.Error) = undefined;
const usage =
\\usage: zig [command] [options]
@@ -43,7 +43,7 @@ const usage =
const Command = struct {
name: []const u8,
- exec: fn (&Allocator, []const []const u8) error!void,
+ exec: fn (*Allocator, []const []const u8) error!void,
};
pub fn main() !void {
@@ -191,7 +191,7 @@ const missing_build_file =
\\
;
-fn cmdBuild(allocator: &Allocator, args: []const []const u8) !void {
+fn cmdBuild(allocator: *Allocator, args: []const []const u8) !void {
var flags = try Args.parse(allocator, args_build_spec, args);
defer flags.deinit();
@@ -426,7 +426,7 @@ const args_build_generic = []Flag{
Flag.Arg1("--ver-patch"),
};
-fn buildOutputType(allocator: &Allocator, args: []const []const u8, out_type: Module.Kind) !void {
+fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Module.Kind) !void {
var flags = try Args.parse(allocator, args_build_generic, args);
defer flags.deinit();
@@ -661,19 +661,19 @@ fn buildOutputType(allocator: &Allocator, args: []const []const u8, out_type: Mo
try stderr.print("building {}: {}\n", @tagName(out_type), in_file);
}
-fn cmdBuildExe(allocator: &Allocator, args: []const []const u8) !void {
+fn cmdBuildExe(allocator: *Allocator, args: []const []const u8) !void {
try buildOutputType(allocator, args, Module.Kind.Exe);
}
// cmd:build-lib ///////////////////////////////////////////////////////////////////////////////////
-fn cmdBuildLib(allocator: &Allocator, args: []const []const u8) !void {
+fn cmdBuildLib(allocator: *Allocator, args: []const []const u8) !void {
try buildOutputType(allocator, args, Module.Kind.Lib);
}
// cmd:build-obj ///////////////////////////////////////////////////////////////////////////////////
-fn cmdBuildObj(allocator: &Allocator, args: []const []const u8) !void {
+fn cmdBuildObj(allocator: *Allocator, args: []const []const u8) !void {
try buildOutputType(allocator, args, Module.Kind.Obj);
}
@@ -700,7 +700,7 @@ const args_fmt_spec = []Flag{
}),
};
-fn cmdFmt(allocator: &Allocator, args: []const []const u8) !void {
+fn cmdFmt(allocator: *Allocator, args: []const []const u8) !void {
var flags = try Args.parse(allocator, args_fmt_spec, args);
defer flags.deinit();
@@ -768,7 +768,7 @@ fn cmdFmt(allocator: &Allocator, args: []const []const u8) !void {
// cmd:targets /////////////////////////////////////////////////////////////////////////////////////
-fn cmdTargets(allocator: &Allocator, args: []const []const u8) !void {
+fn cmdTargets(allocator: *Allocator, args: []const []const u8) !void {
try stdout.write("Architectures:\n");
{
comptime var i: usize = 0;
@@ -810,7 +810,7 @@ fn cmdTargets(allocator: &Allocator, args: []const []const u8) !void {
// cmd:version /////////////////////////////////////////////////////////////////////////////////////
-fn cmdVersion(allocator: &Allocator, args: []const []const u8) !void {
+fn cmdVersion(allocator: *Allocator, args: []const []const u8) !void {
try stdout.print("{}\n", std.cstr.toSliceConst(c.ZIG_VERSION_STRING));
}
@@ -827,7 +827,7 @@ const usage_test =
const args_test_spec = []Flag{Flag.Bool("--help")};
-fn cmdTest(allocator: &Allocator, args: []const []const u8) !void {
+fn cmdTest(allocator: *Allocator, args: []const []const u8) !void {
var flags = try Args.parse(allocator, args_build_spec, args);
defer flags.deinit();
@@ -862,7 +862,7 @@ const usage_run =
const args_run_spec = []Flag{Flag.Bool("--help")};
-fn cmdRun(allocator: &Allocator, args: []const []const u8) !void {
+fn cmdRun(allocator: *Allocator, args: []const []const u8) !void {
var compile_args = args;
var runtime_args: []const []const u8 = []const []const u8{};
@@ -912,7 +912,7 @@ const args_translate_c_spec = []Flag{
Flag.Arg1("--output"),
};
-fn cmdTranslateC(allocator: &Allocator, args: []const []const u8) !void {
+fn cmdTranslateC(allocator: *Allocator, args: []const []const u8) !void {
var flags = try Args.parse(allocator, args_translate_c_spec, args);
defer flags.deinit();
@@ -958,7 +958,7 @@ fn cmdTranslateC(allocator: &Allocator, args: []const []const u8) !void {
// cmd:help ////////////////////////////////////////////////////////////////////////////////////////
-fn cmdHelp(allocator: &Allocator, args: []const []const u8) !void {
+fn cmdHelp(allocator: *Allocator, args: []const []const u8) !void {
try stderr.write(usage);
}
@@ -981,7 +981,7 @@ const info_zen =
\\
;
-fn cmdZen(allocator: &Allocator, args: []const []const u8) !void {
+fn cmdZen(allocator: *Allocator, args: []const []const u8) !void {
try stdout.write(info_zen);
}
@@ -996,7 +996,7 @@ const usage_internal =
\\
;
-fn cmdInternal(allocator: &Allocator, args: []const []const u8) !void {
+fn cmdInternal(allocator: *Allocator, args: []const []const u8) !void {
if (args.len == 0) {
try stderr.write(usage_internal);
os.exit(1);
@@ -1018,7 +1018,7 @@ fn cmdInternal(allocator: &Allocator, args: []const []const u8) !void {
try stderr.write(usage_internal);
}
-fn cmdInternalBuildInfo(allocator: &Allocator, args: []const []const u8) !void {
+fn cmdInternalBuildInfo(allocator: *Allocator, args: []const []const u8) !void {
try stdout.print(
\\ZIG_CMAKE_BINARY_DIR {}
\\ZIG_CXX_COMPILER {}
diff --git a/src-self-hosted/module.zig b/src-self-hosted/module.zig
index 61834eab66ce..a7ddf3f9e938 100644
--- a/src-self-hosted/module.zig
+++ b/src-self-hosted/module.zig
@@ -13,7 +13,7 @@ const ArrayList = std.ArrayList;
const errmsg = @import("errmsg.zig");
pub const Module = struct {
- allocator: &mem.Allocator,
+ allocator: *mem.Allocator,
name: Buffer,
root_src_path: ?[]const u8,
module: llvm.ModuleRef,
@@ -53,8 +53,8 @@ pub const Module = struct {
windows_subsystem_windows: bool,
windows_subsystem_console: bool,
- link_libs_list: ArrayList(&LinkLib),
- libc_link_lib: ?&LinkLib,
+ link_libs_list: ArrayList(*LinkLib),
+ libc_link_lib: ?*LinkLib,
err_color: errmsg.Color,
@@ -106,19 +106,19 @@ pub const Module = struct {
pub const CliPkg = struct {
name: []const u8,
path: []const u8,
- children: ArrayList(&CliPkg),
- parent: ?&CliPkg,
+ children: ArrayList(*CliPkg),
+ parent: ?*CliPkg,
- pub fn init(allocator: &mem.Allocator, name: []const u8, path: []const u8, parent: ?&CliPkg) !&CliPkg {
+ pub fn init(allocator: *mem.Allocator, name: []const u8, path: []const u8, parent: ?*CliPkg) !*CliPkg {
var pkg = try allocator.create(CliPkg);
pkg.name = name;
pkg.path = path;
- pkg.children = ArrayList(&CliPkg).init(allocator);
+ pkg.children = ArrayList(*CliPkg).init(allocator);
pkg.parent = parent;
return pkg;
}
- pub fn deinit(self: &CliPkg) void {
+ pub fn deinit(self: *CliPkg) void {
for (self.children.toSliceConst()) |child| {
child.deinit();
}
@@ -126,7 +126,7 @@ pub const Module = struct {
}
};
- pub fn create(allocator: &mem.Allocator, name: []const u8, root_src_path: ?[]const u8, target: &const Target, kind: Kind, build_mode: builtin.Mode, zig_lib_dir: []const u8, cache_dir: []const u8) !&Module {
+ pub fn create(allocator: *mem.Allocator, name: []const u8, root_src_path: ?[]const u8, target: *const Target, kind: Kind, build_mode: builtin.Mode, zig_lib_dir: []const u8, cache_dir: []const u8) !*Module {
var name_buffer = try Buffer.init(allocator, name);
errdefer name_buffer.deinit();
@@ -188,7 +188,7 @@ pub const Module = struct {
.link_objects = [][]const u8{},
.windows_subsystem_windows = false,
.windows_subsystem_console = false,
- .link_libs_list = ArrayList(&LinkLib).init(allocator),
+ .link_libs_list = ArrayList(*LinkLib).init(allocator),
.libc_link_lib = null,
.err_color = errmsg.Color.Auto,
.darwin_frameworks = [][]const u8{},
@@ -200,11 +200,11 @@ pub const Module = struct {
return module_ptr;
}
- fn dump(self: &Module) void {
+ fn dump(self: *Module) void {
c.LLVMDumpModule(self.module);
}
- pub fn destroy(self: &Module) void {
+ pub fn destroy(self: *Module) void {
c.LLVMDisposeBuilder(self.builder);
c.LLVMDisposeModule(self.module);
c.LLVMContextDispose(self.context);
@@ -213,7 +213,7 @@ pub const Module = struct {
self.allocator.destroy(self);
}
- pub fn build(self: &Module) !void {
+ pub fn build(self: *Module) !void {
if (self.llvm_argv.len != 0) {
var c_compatible_args = try std.cstr.NullTerminated2DArray.fromSlices(self.allocator, [][]const []const u8{
[][]const u8{"zig (LLVM option parsing)"},
@@ -259,12 +259,12 @@ pub const Module = struct {
self.dump();
}
- pub fn link(self: &Module, out_file: ?[]const u8) !void {
+ pub fn link(self: *Module, out_file: ?[]const u8) !void {
warn("TODO link");
return error.Todo;
}
- pub fn addLinkLib(self: &Module, name: []const u8, provided_explicitly: bool) !&LinkLib {
+ pub fn addLinkLib(self: *Module, name: []const u8, provided_explicitly: bool) !*LinkLib {
const is_libc = mem.eql(u8, name, "c");
if (is_libc) {
diff --git a/src-self-hosted/scope.zig b/src-self-hosted/scope.zig
index 05e586daaeb8..b73dcb4ed315 100644
--- a/src-self-hosted/scope.zig
+++ b/src-self-hosted/scope.zig
@@ -1,6 +1,6 @@
pub const Scope = struct {
id: Id,
- parent: &Scope,
+ parent: *Scope,
pub const Id = enum {
Decls,
diff --git a/src-self-hosted/target.zig b/src-self-hosted/target.zig
index 7983a3ddec7b..724d99ea2354 100644
--- a/src-self-hosted/target.zig
+++ b/src-self-hosted/target.zig
@@ -11,7 +11,7 @@ pub const Target = union(enum) {
Native,
Cross: CrossTarget,
- pub fn oFileExt(self: &const Target) []const u8 {
+ pub fn oFileExt(self: *const Target) []const u8 {
const environ = switch (self.*) {
Target.Native => builtin.environ,
Target.Cross => |t| t.environ,
@@ -22,28 +22,28 @@ pub const Target = union(enum) {
};
}
- pub fn exeFileExt(self: &const Target) []const u8 {
+ pub fn exeFileExt(self: *const Target) []const u8 {
return switch (self.getOs()) {
builtin.Os.windows => ".exe",
else => "",
};
}
- pub fn getOs(self: &const Target) builtin.Os {
+ pub fn getOs(self: *const Target) builtin.Os {
return switch (self.*) {
Target.Native => builtin.os,
Target.Cross => |t| t.os,
};
}
- pub fn isDarwin(self: &const Target) bool {
+ pub fn isDarwin(self: *const Target) bool {
return switch (self.getOs()) {
builtin.Os.ios, builtin.Os.macosx => true,
else => false,
};
}
- pub fn isWindows(self: &const Target) bool {
+ pub fn isWindows(self: *const Target) bool {
return switch (self.getOs()) {
builtin.Os.windows => true,
else => false,
diff --git a/src/all_types.hpp b/src/all_types.hpp
index 9c156fb58bfe..d5906cae95ad 100644
--- a/src/all_types.hpp
+++ b/src/all_types.hpp
@@ -374,7 +374,7 @@ enum NodeType {
NodeTypeCharLiteral,
NodeTypeSymbol,
NodeTypePrefixOpExpr,
- NodeTypeAddrOfExpr,
+ NodeTypePointerType,
NodeTypeFnCallExpr,
NodeTypeArrayAccessExpr,
NodeTypeSliceExpr,
@@ -616,6 +616,7 @@ enum PrefixOp {
PrefixOpNegationWrap,
PrefixOpMaybe,
PrefixOpUnwrapMaybe,
+ PrefixOpAddrOf,
};
struct AstNodePrefixOpExpr {
@@ -623,7 +624,7 @@ struct AstNodePrefixOpExpr {
AstNode *primary_expr;
};
-struct AstNodeAddrOfExpr {
+struct AstNodePointerType {
AstNode *align_expr;
BigInt *bit_offset_start;
BigInt *bit_offset_end;
@@ -899,7 +900,7 @@ struct AstNode {
AstNodeBinOpExpr bin_op_expr;
AstNodeCatchExpr unwrap_err_expr;
AstNodePrefixOpExpr prefix_op_expr;
- AstNodeAddrOfExpr addr_of_expr;
+ AstNodePointerType pointer_type;
AstNodeFnCallExpr fn_call_expr;
AstNodeArrayAccessExpr array_access_expr;
AstNodeSliceExpr slice_expr;
@@ -2053,7 +2054,7 @@ enum IrInstructionId {
IrInstructionIdTypeInfo,
IrInstructionIdTypeId,
IrInstructionIdSetEvalBranchQuota,
- IrInstructionIdPtrTypeOf,
+ IrInstructionIdPtrType,
IrInstructionIdAlignCast,
IrInstructionIdOpaqueType,
IrInstructionIdSetAlignStack,
@@ -2274,8 +2275,6 @@ struct IrInstructionVarPtr {
IrInstruction base;
VariableTableEntry *var;
- bool is_const;
- bool is_volatile;
};
struct IrInstructionCall {
@@ -2412,6 +2411,17 @@ struct IrInstructionArrayType {
IrInstruction *child_type;
};
+struct IrInstructionPtrType {
+ IrInstruction base;
+
+ IrInstruction *align_value;
+ IrInstruction *child_type;
+ uint32_t bit_offset_start;
+ uint32_t bit_offset_end;
+ bool is_const;
+ bool is_volatile;
+};
+
struct IrInstructionPromiseType {
IrInstruction base;
@@ -2891,17 +2901,6 @@ struct IrInstructionSetEvalBranchQuota {
IrInstruction *new_quota;
};
-struct IrInstructionPtrTypeOf {
- IrInstruction base;
-
- IrInstruction *align_value;
- IrInstruction *child_type;
- uint32_t bit_offset_start;
- uint32_t bit_offset_end;
- bool is_const;
- bool is_volatile;
-};
-
struct IrInstructionAlignCast {
IrInstruction base;
diff --git a/src/analyze.cpp b/src/analyze.cpp
index b00e18a9a183..a5011035c5c7 100644
--- a/src/analyze.cpp
+++ b/src/analyze.cpp
@@ -418,12 +418,12 @@ TypeTableEntry *get_pointer_to_type_extra(CodeGen *g, TypeTableEntry *child_type
const char *volatile_str = is_volatile ? "volatile " : "";
buf_resize(&entry->name, 0);
if (unaligned_bit_count == 0 && byte_alignment == abi_alignment) {
- buf_appendf(&entry->name, "&%s%s%s", const_str, volatile_str, buf_ptr(&child_type->name));
+ buf_appendf(&entry->name, "*%s%s%s", const_str, volatile_str, buf_ptr(&child_type->name));
} else if (unaligned_bit_count == 0) {
- buf_appendf(&entry->name, "&align(%" PRIu32 ") %s%s%s", byte_alignment,
+ buf_appendf(&entry->name, "*align(%" PRIu32 ") %s%s%s", byte_alignment,
const_str, volatile_str, buf_ptr(&child_type->name));
} else {
- buf_appendf(&entry->name, "&align(%" PRIu32 ":%" PRIu32 ":%" PRIu32 ") %s%s%s", byte_alignment,
+ buf_appendf(&entry->name, "*align(%" PRIu32 ":%" PRIu32 ":%" PRIu32 ") %s%s%s", byte_alignment,
bit_offset, bit_offset + unaligned_bit_count, const_str, volatile_str, buf_ptr(&child_type->name));
}
@@ -3270,7 +3270,7 @@ void scan_decls(CodeGen *g, ScopeDecls *decls_scope, AstNode *node) {
case NodeTypeThisLiteral:
case NodeTypeSymbol:
case NodeTypePrefixOpExpr:
- case NodeTypeAddrOfExpr:
+ case NodeTypePointerType:
case NodeTypeIfBoolExpr:
case NodeTypeWhileExpr:
case NodeTypeForExpr:
diff --git a/src/ast_render.cpp b/src/ast_render.cpp
index 5a1e81b36daf..f356f406b080 100644
--- a/src/ast_render.cpp
+++ b/src/ast_render.cpp
@@ -68,6 +68,7 @@ static const char *prefix_op_str(PrefixOp prefix_op) {
case PrefixOpBinNot: return "~";
case PrefixOpMaybe: return "?";
case PrefixOpUnwrapMaybe: return "??";
+ case PrefixOpAddrOf: return "&";
}
zig_unreachable();
}
@@ -185,8 +186,6 @@ static const char *node_type_str(NodeType node_type) {
return "Symbol";
case NodeTypePrefixOpExpr:
return "PrefixOpExpr";
- case NodeTypeAddrOfExpr:
- return "AddrOfExpr";
case NodeTypeUse:
return "Use";
case NodeTypeBoolLiteral:
@@ -251,6 +250,8 @@ static const char *node_type_str(NodeType node_type) {
return "Suspend";
case NodeTypePromiseType:
return "PromiseType";
+ case NodeTypePointerType:
+ return "PointerType";
}
zig_unreachable();
}
@@ -616,41 +617,41 @@ static void render_node_extra(AstRender *ar, AstNode *node, bool grouped) {
fprintf(ar->f, "%s", prefix_op_str(op));
AstNode *child_node = node->data.prefix_op_expr.primary_expr;
- bool new_grouped = child_node->type == NodeTypePrefixOpExpr || child_node->type == NodeTypeAddrOfExpr;
+ bool new_grouped = child_node->type == NodeTypePrefixOpExpr || child_node->type == NodeTypePointerType;
render_node_extra(ar, child_node, new_grouped);
if (!grouped) fprintf(ar->f, ")");
break;
}
- case NodeTypeAddrOfExpr:
+ case NodeTypePointerType:
{
if (!grouped) fprintf(ar->f, "(");
- fprintf(ar->f, "&");
- if (node->data.addr_of_expr.align_expr != nullptr) {
+ fprintf(ar->f, "*");
+ if (node->data.pointer_type.align_expr != nullptr) {
fprintf(ar->f, "align(");
- render_node_grouped(ar, node->data.addr_of_expr.align_expr);
- if (node->data.addr_of_expr.bit_offset_start != nullptr) {
- assert(node->data.addr_of_expr.bit_offset_end != nullptr);
+ render_node_grouped(ar, node->data.pointer_type.align_expr);
+ if (node->data.pointer_type.bit_offset_start != nullptr) {
+ assert(node->data.pointer_type.bit_offset_end != nullptr);
Buf offset_start_buf = BUF_INIT;
buf_resize(&offset_start_buf, 0);
- bigint_append_buf(&offset_start_buf, node->data.addr_of_expr.bit_offset_start, 10);
+ bigint_append_buf(&offset_start_buf, node->data.pointer_type.bit_offset_start, 10);
Buf offset_end_buf = BUF_INIT;
buf_resize(&offset_end_buf, 0);
- bigint_append_buf(&offset_end_buf, node->data.addr_of_expr.bit_offset_end, 10);
+ bigint_append_buf(&offset_end_buf, node->data.pointer_type.bit_offset_end, 10);
fprintf(ar->f, ":%s:%s ", buf_ptr(&offset_start_buf), buf_ptr(&offset_end_buf));
}
fprintf(ar->f, ") ");
}
- if (node->data.addr_of_expr.is_const) {
+ if (node->data.pointer_type.is_const) {
fprintf(ar->f, "const ");
}
- if (node->data.addr_of_expr.is_volatile) {
+ if (node->data.pointer_type.is_volatile) {
fprintf(ar->f, "volatile ");
}
- render_node_ungrouped(ar, node->data.addr_of_expr.op_expr);
+ render_node_ungrouped(ar, node->data.pointer_type.op_expr);
if (!grouped) fprintf(ar->f, ")");
break;
}
@@ -669,7 +670,7 @@ static void render_node_extra(AstRender *ar, AstNode *node, bool grouped) {
fprintf(ar->f, " ");
}
AstNode *fn_ref_node = node->data.fn_call_expr.fn_ref_expr;
- bool grouped = (fn_ref_node->type != NodeTypePrefixOpExpr && fn_ref_node->type != NodeTypeAddrOfExpr);
+ bool grouped = (fn_ref_node->type != NodeTypePrefixOpExpr && fn_ref_node->type != NodeTypePointerType);
render_node_extra(ar, fn_ref_node, grouped);
fprintf(ar->f, "(");
for (size_t i = 0; i < node->data.fn_call_expr.params.length; i += 1) {
diff --git a/src/codegen.cpp b/src/codegen.cpp
index 69542b3e675e..d07d4277298d 100644
--- a/src/codegen.cpp
+++ b/src/codegen.cpp
@@ -4600,7 +4600,7 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable,
case IrInstructionIdTypeInfo:
case IrInstructionIdTypeId:
case IrInstructionIdSetEvalBranchQuota:
- case IrInstructionIdPtrTypeOf:
+ case IrInstructionIdPtrType:
case IrInstructionIdOpaqueType:
case IrInstructionIdSetAlignStack:
case IrInstructionIdArgType:
diff --git a/src/ir.cpp b/src/ir.cpp
index 6e944a89767a..d996b4a2befe 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -41,10 +41,6 @@ struct IrAnalyze {
static const LVal LVAL_NONE = { false, false, false };
static const LVal LVAL_PTR = { true, false, false };
-static LVal make_lval_addr(bool is_const, bool is_volatile) {
- return { true, is_const, is_volatile };
-}
-
enum ConstCastResultId {
ConstCastResultIdOk,
ConstCastResultIdErrSet,
@@ -108,8 +104,7 @@ static IrInstruction *ir_get_deref(IrAnalyze *ira, IrInstruction *source_instruc
static ErrorMsg *exec_add_error_node(CodeGen *codegen, IrExecutable *exec, AstNode *source_node, Buf *msg);
static IrInstruction *ir_analyze_container_field_ptr(IrAnalyze *ira, Buf *field_name,
IrInstruction *source_instr, IrInstruction *container_ptr, TypeTableEntry *container_type);
-static IrInstruction *ir_get_var_ptr(IrAnalyze *ira, IrInstruction *instruction,
- VariableTableEntry *var, bool is_const_ptr, bool is_volatile_ptr);
+static IrInstruction *ir_get_var_ptr(IrAnalyze *ira, IrInstruction *instruction, VariableTableEntry *var);
static TypeTableEntry *ir_resolve_atomic_operand_type(IrAnalyze *ira, IrInstruction *op);
static IrInstruction *ir_lval_wrap(IrBuilder *irb, Scope *scope, IrInstruction *value, LVal lval);
@@ -629,8 +624,8 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionSetEvalBranchQuo
return IrInstructionIdSetEvalBranchQuota;
}
-static constexpr IrInstructionId ir_instruction_id(IrInstructionPtrTypeOf *) {
- return IrInstructionIdPtrTypeOf;
+static constexpr IrInstructionId ir_instruction_id(IrInstructionPtrType *) {
+ return IrInstructionIdPtrType;
}
static constexpr IrInstructionId ir_instruction_id(IrInstructionAlignCast *) {
@@ -1004,13 +999,9 @@ static IrInstruction *ir_build_bin_op_from(IrBuilder *irb, IrInstruction *old_in
return new_instruction;
}
-static IrInstruction *ir_build_var_ptr(IrBuilder *irb, Scope *scope, AstNode *source_node,
- VariableTableEntry *var, bool is_const, bool is_volatile)
-{
+static IrInstruction *ir_build_var_ptr(IrBuilder *irb, Scope *scope, AstNode *source_node, VariableTableEntry *var) {
IrInstructionVarPtr *instruction = ir_build_instruction(irb, scope, source_node);
instruction->var = var;
- instruction->is_const = is_const;
- instruction->is_volatile = is_volatile;
ir_ref_var(var);
@@ -1196,11 +1187,11 @@ static IrInstruction *ir_build_br_from(IrBuilder *irb, IrInstruction *old_instru
return new_instruction;
}
-static IrInstruction *ir_build_ptr_type_of(IrBuilder *irb, Scope *scope, AstNode *source_node,
+static IrInstruction *ir_build_ptr_type(IrBuilder *irb, Scope *scope, AstNode *source_node,
IrInstruction *child_type, bool is_const, bool is_volatile, IrInstruction *align_value,
uint32_t bit_offset_start, uint32_t bit_offset_end)
{
- IrInstructionPtrTypeOf *ptr_type_of_instruction = ir_build_instruction(irb, scope, source_node);
+ IrInstructionPtrType *ptr_type_of_instruction = ir_build_instruction(irb, scope, source_node);
ptr_type_of_instruction->align_value = align_value;
ptr_type_of_instruction->child_type = child_type;
ptr_type_of_instruction->is_const = is_const;
@@ -3519,8 +3510,7 @@ static IrInstruction *ir_gen_symbol(IrBuilder *irb, Scope *scope, AstNode *node,
VariableTableEntry *var = find_variable(irb->codegen, scope, variable_name);
if (var) {
- IrInstruction *var_ptr = ir_build_var_ptr(irb, scope, node, var,
- !lval.is_ptr || lval.is_const, lval.is_ptr && lval.is_volatile);
+ IrInstruction *var_ptr = ir_build_var_ptr(irb, scope, node, var);
if (lval.is_ptr)
return var_ptr;
else
@@ -4609,14 +4599,8 @@ static IrInstruction *ir_gen_if_bool_expr(IrBuilder *irb, Scope *scope, AstNode
}
static IrInstruction *ir_gen_prefix_op_id_lval(IrBuilder *irb, Scope *scope, AstNode *node, IrUnOp op_id, LVal lval) {
- AstNode *expr_node;
- if (node->type == NodeTypePrefixOpExpr) {
- expr_node = node->data.prefix_op_expr.primary_expr;
- } else if (node->type == NodeTypePtrDeref) {
- expr_node = node->data.ptr_deref_expr.target;
- } else {
- zig_unreachable();
- }
+ assert(node->type == NodeTypePrefixOpExpr);
+ AstNode *expr_node = node->data.prefix_op_expr.primary_expr;
IrInstruction *value = ir_gen_node_extra(irb, expr_node, scope, lval);
if (value == irb->codegen->invalid_instruction)
@@ -4640,16 +4624,12 @@ static IrInstruction *ir_lval_wrap(IrBuilder *irb, Scope *scope, IrInstruction *
return ir_build_ref(irb, scope, value->source_node, value, lval.is_const, lval.is_volatile);
}
-static IrInstruction *ir_gen_address_of(IrBuilder *irb, Scope *scope, AstNode *node) {
- assert(node->type == NodeTypeAddrOfExpr);
- bool is_const = node->data.addr_of_expr.is_const;
- bool is_volatile = node->data.addr_of_expr.is_volatile;
- AstNode *expr_node = node->data.addr_of_expr.op_expr;
- AstNode *align_expr = node->data.addr_of_expr.align_expr;
-
- if (align_expr == nullptr && !is_const && !is_volatile) {
- return ir_gen_node_extra(irb, expr_node, scope, make_lval_addr(is_const, is_volatile));
- }
+static IrInstruction *ir_gen_pointer_type(IrBuilder *irb, Scope *scope, AstNode *node) {
+ assert(node->type == NodeTypePointerType);
+ bool is_const = node->data.pointer_type.is_const;
+ bool is_volatile = node->data.pointer_type.is_volatile;
+ AstNode *expr_node = node->data.pointer_type.op_expr;
+ AstNode *align_expr = node->data.pointer_type.align_expr;
IrInstruction *align_value;
if (align_expr != nullptr) {
@@ -4665,27 +4645,27 @@ static IrInstruction *ir_gen_address_of(IrBuilder *irb, Scope *scope, AstNode *n
return child_type;
uint32_t bit_offset_start = 0;
- if (node->data.addr_of_expr.bit_offset_start != nullptr) {
- if (!bigint_fits_in_bits(node->data.addr_of_expr.bit_offset_start, 32, false)) {
+ if (node->data.pointer_type.bit_offset_start != nullptr) {
+ if (!bigint_fits_in_bits(node->data.pointer_type.bit_offset_start, 32, false)) {
Buf *val_buf = buf_alloc();
- bigint_append_buf(val_buf, node->data.addr_of_expr.bit_offset_start, 10);
+ bigint_append_buf(val_buf, node->data.pointer_type.bit_offset_start, 10);
exec_add_error_node(irb->codegen, irb->exec, node,
buf_sprintf("value %s too large for u32 bit offset", buf_ptr(val_buf)));
return irb->codegen->invalid_instruction;
}
- bit_offset_start = bigint_as_unsigned(node->data.addr_of_expr.bit_offset_start);
+ bit_offset_start = bigint_as_unsigned(node->data.pointer_type.bit_offset_start);
}
uint32_t bit_offset_end = 0;
- if (node->data.addr_of_expr.bit_offset_end != nullptr) {
- if (!bigint_fits_in_bits(node->data.addr_of_expr.bit_offset_end, 32, false)) {
+ if (node->data.pointer_type.bit_offset_end != nullptr) {
+ if (!bigint_fits_in_bits(node->data.pointer_type.bit_offset_end, 32, false)) {
Buf *val_buf = buf_alloc();
- bigint_append_buf(val_buf, node->data.addr_of_expr.bit_offset_end, 10);
+ bigint_append_buf(val_buf, node->data.pointer_type.bit_offset_end, 10);
exec_add_error_node(irb->codegen, irb->exec, node,
buf_sprintf("value %s too large for u32 bit offset", buf_ptr(val_buf)));
return irb->codegen->invalid_instruction;
}
- bit_offset_end = bigint_as_unsigned(node->data.addr_of_expr.bit_offset_end);
+ bit_offset_end = bigint_as_unsigned(node->data.pointer_type.bit_offset_end);
}
if ((bit_offset_start != 0 || bit_offset_end != 0) && bit_offset_start >= bit_offset_end) {
@@ -4694,7 +4674,7 @@ static IrInstruction *ir_gen_address_of(IrBuilder *irb, Scope *scope, AstNode *n
return irb->codegen->invalid_instruction;
}
- return ir_build_ptr_type_of(irb, scope, node, child_type, is_const, is_volatile,
+ return ir_build_ptr_type(irb, scope, node, child_type, is_const, is_volatile,
align_value, bit_offset_start, bit_offset_end);
}
@@ -4761,6 +4741,10 @@ static IrInstruction *ir_gen_prefix_op_expr(IrBuilder *irb, Scope *scope, AstNod
return ir_lval_wrap(irb, scope, ir_gen_prefix_op_id(irb, scope, node, IrUnOpMaybe), lval);
case PrefixOpUnwrapMaybe:
return ir_gen_maybe_assert_ok(irb, scope, node, lval);
+ case PrefixOpAddrOf: {
+ AstNode *expr_node = node->data.prefix_op_expr.primary_expr;
+ return ir_lval_wrap(irb, scope, ir_gen_node_extra(irb, expr_node, scope, LVAL_PTR), lval);
+ }
}
zig_unreachable();
}
@@ -5150,7 +5134,7 @@ static IrInstruction *ir_gen_for_expr(IrBuilder *irb, Scope *parent_scope, AstNo
IrInstruction *undefined_value = ir_build_const_undefined(irb, child_scope, elem_node);
ir_build_var_decl(irb, child_scope, elem_node, elem_var, elem_var_type, nullptr, undefined_value);
- IrInstruction *elem_var_ptr = ir_build_var_ptr(irb, child_scope, node, elem_var, false, false);
+ IrInstruction *elem_var_ptr = ir_build_var_ptr(irb, child_scope, node, elem_var);
AstNode *index_var_source_node;
VariableTableEntry *index_var;
@@ -5168,7 +5152,7 @@ static IrInstruction *ir_gen_for_expr(IrBuilder *irb, Scope *parent_scope, AstNo
IrInstruction *zero = ir_build_const_usize(irb, child_scope, node, 0);
IrInstruction *one = ir_build_const_usize(irb, child_scope, node, 1);
ir_build_var_decl(irb, child_scope, index_var_source_node, index_var, usize, nullptr, zero);
- IrInstruction *index_ptr = ir_build_var_ptr(irb, child_scope, node, index_var, false, false);
+ IrInstruction *index_ptr = ir_build_var_ptr(irb, child_scope, node, index_var);
IrBasicBlock *cond_block = ir_create_basic_block(irb, child_scope, "ForCond");
@@ -6397,7 +6381,7 @@ static IrInstruction *ir_gen_await_expr(IrBuilder *irb, Scope *parent_scope, Ast
IrInstruction *promise_result_type = ir_build_promise_result_type(irb, parent_scope, node, target_promise_type);
ir_build_await_bookkeeping(irb, parent_scope, node, promise_result_type);
ir_build_var_decl(irb, parent_scope, node, result_var, promise_result_type, nullptr, undefined_value);
- IrInstruction *my_result_var_ptr = ir_build_var_ptr(irb, parent_scope, node, result_var, false, false);
+ IrInstruction *my_result_var_ptr = ir_build_var_ptr(irb, parent_scope, node, result_var);
ir_build_store_ptr(irb, parent_scope, node, result_ptr_field_ptr, my_result_var_ptr);
IrInstruction *save_token = ir_build_coro_save(irb, parent_scope, node, irb->exec->coro_handle);
IrInstruction *promise_type_val = ir_build_const_type(irb, parent_scope, node,
@@ -6568,8 +6552,6 @@ static IrInstruction *ir_gen_node_raw(IrBuilder *irb, AstNode *node, Scope *scop
return ir_lval_wrap(irb, scope, ir_gen_if_bool_expr(irb, scope, node), lval);
case NodeTypePrefixOpExpr:
return ir_gen_prefix_op_expr(irb, scope, node, lval);
- case NodeTypeAddrOfExpr:
- return ir_lval_wrap(irb, scope, ir_gen_address_of(irb, scope, node), lval);
case NodeTypeContainerInitExpr:
return ir_lval_wrap(irb, scope, ir_gen_container_init_expr(irb, scope, node), lval);
case NodeTypeVariableDeclaration:
@@ -6592,14 +6574,23 @@ static IrInstruction *ir_gen_node_raw(IrBuilder *irb, AstNode *node, Scope *scop
return ir_build_load_ptr(irb, scope, node, ptr_instruction);
}
- case NodeTypePtrDeref:
- return ir_gen_prefix_op_id_lval(irb, scope, node, IrUnOpDereference, lval);
+ case NodeTypePtrDeref: {
+ assert(node->type == NodeTypePtrDeref);
+ AstNode *expr_node = node->data.ptr_deref_expr.target;
+ IrInstruction *value = ir_gen_node_extra(irb, expr_node, scope, lval);
+ if (value == irb->codegen->invalid_instruction)
+ return value;
+
+ return ir_build_un_op(irb, scope, node, IrUnOpDereference, value);
+ }
case NodeTypeThisLiteral:
return ir_lval_wrap(irb, scope, ir_gen_this_literal(irb, scope, node), lval);
case NodeTypeBoolLiteral:
return ir_lval_wrap(irb, scope, ir_gen_bool_literal(irb, scope, node), lval);
case NodeTypeArrayType:
return ir_lval_wrap(irb, scope, ir_gen_array_type(irb, scope, node), lval);
+ case NodeTypePointerType:
+ return ir_lval_wrap(irb, scope, ir_gen_pointer_type(irb, scope, node), lval);
case NodeTypePromiseType:
return ir_lval_wrap(irb, scope, ir_gen_promise_type(irb, scope, node), lval);
case NodeTypeStringLiteral:
@@ -6711,15 +6702,14 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec
IrInstruction *coro_frame_type_value = ir_build_const_type(irb, coro_scope, node, coro_frame_type);
// TODO mark this var decl as "no safety" e.g. disable initializing the undef value to 0xaa
ir_build_var_decl(irb, coro_scope, node, promise_var, coro_frame_type_value, nullptr, undef);
- coro_promise_ptr = ir_build_var_ptr(irb, coro_scope, node, promise_var, false, false);
+ coro_promise_ptr = ir_build_var_ptr(irb, coro_scope, node, promise_var);
VariableTableEntry *await_handle_var = ir_create_var(irb, node, coro_scope, nullptr, false, false, true, const_bool_false);
IrInstruction *null_value = ir_build_const_null(irb, coro_scope, node);
IrInstruction *await_handle_type_val = ir_build_const_type(irb, coro_scope, node,
get_maybe_type(irb->codegen, irb->codegen->builtin_types.entry_promise));
ir_build_var_decl(irb, coro_scope, node, await_handle_var, await_handle_type_val, nullptr, null_value);
- irb->exec->await_handle_var_ptr = ir_build_var_ptr(irb, coro_scope, node,
- await_handle_var, false, false);
+ irb->exec->await_handle_var_ptr = ir_build_var_ptr(irb, coro_scope, node, await_handle_var);
u8_ptr_type = ir_build_const_type(irb, coro_scope, node,
get_pointer_to_type(irb->codegen, irb->codegen->builtin_types.entry_u8, false));
@@ -6859,7 +6849,7 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec
IrInstruction *coro_mem_ptr_maybe = ir_build_coro_free(irb, scope, node, coro_id, irb->exec->coro_handle);
IrInstruction *coro_mem_ptr = ir_build_ptr_cast(irb, scope, node, u8_ptr_type, coro_mem_ptr_maybe);
IrInstruction *coro_mem_ptr_ref = ir_build_ref(irb, scope, node, coro_mem_ptr, true, false);
- IrInstruction *coro_size_ptr = ir_build_var_ptr(irb, scope, node, coro_size_var, true, false);
+ IrInstruction *coro_size_ptr = ir_build_var_ptr(irb, scope, node, coro_size_var);
IrInstruction *coro_size = ir_build_load_ptr(irb, scope, node, coro_size_ptr);
IrInstruction *mem_slice = ir_build_slice(irb, scope, node, coro_mem_ptr_ref, zero, coro_size, false);
size_t arg_count = 2;
@@ -8961,34 +8951,15 @@ static IrInstruction *ir_get_const_ptr(IrAnalyze *ira, IrInstruction *instructio
ConstExprValue *pointee, TypeTableEntry *pointee_type,
ConstPtrMut ptr_mut, bool ptr_is_const, bool ptr_is_volatile, uint32_t ptr_align)
{
- if (pointee_type->id == TypeTableEntryIdMetaType) {
- TypeTableEntry *type_entry = pointee->data.x_type;
- if (type_entry->id == TypeTableEntryIdUnreachable) {
- ir_add_error(ira, instruction, buf_sprintf("pointer to noreturn not allowed"));
- return ira->codegen->invalid_instruction;
- }
-
- IrInstruction *const_instr = ir_get_const(ira, instruction);
- ConstExprValue *const_val = &const_instr->value;
- const_val->type = pointee_type;
- type_ensure_zero_bits_known(ira->codegen, type_entry);
- if (type_is_invalid(type_entry)) {
- return ira->codegen->invalid_instruction;
- }
- const_val->data.x_type = get_pointer_to_type_extra(ira->codegen, type_entry,
- ptr_is_const, ptr_is_volatile, get_abi_alignment(ira->codegen, type_entry), 0, 0);
- return const_instr;
- } else {
- TypeTableEntry *ptr_type = get_pointer_to_type_extra(ira->codegen, pointee_type,
- ptr_is_const, ptr_is_volatile, ptr_align, 0, 0);
- IrInstruction *const_instr = ir_get_const(ira, instruction);
- ConstExprValue *const_val = &const_instr->value;
- const_val->type = ptr_type;
- const_val->data.x_ptr.special = ConstPtrSpecialRef;
- const_val->data.x_ptr.mut = ptr_mut;
- const_val->data.x_ptr.data.ref.pointee = pointee;
- return const_instr;
- }
+ TypeTableEntry *ptr_type = get_pointer_to_type_extra(ira->codegen, pointee_type,
+ ptr_is_const, ptr_is_volatile, ptr_align, 0, 0);
+ IrInstruction *const_instr = ir_get_const(ira, instruction);
+ ConstExprValue *const_val = &const_instr->value;
+ const_val->type = ptr_type;
+ const_val->data.x_ptr.special = ConstPtrSpecialRef;
+ const_val->data.x_ptr.mut = ptr_mut;
+ const_val->data.x_ptr.data.ref.pointee = pointee;
+ return const_instr;
}
static TypeTableEntry *ir_analyze_const_ptr(IrAnalyze *ira, IrInstruction *instruction,
@@ -9316,9 +9287,8 @@ static IrInstruction *ir_get_ref(IrAnalyze *ira, IrInstruction *source_instructi
ConstExprValue *val = ir_resolve_const(ira, value, UndefOk);
if (!val)
return ira->codegen->invalid_instruction;
- bool final_is_const = (value->value.type->id == TypeTableEntryIdMetaType) ? is_const : true;
return ir_get_const_ptr(ira, source_instruction, val, value->value.type,
- ConstPtrMutComptimeConst, final_is_const, is_volatile,
+ ConstPtrMutComptimeConst, is_const, is_volatile,
get_abi_alignment(ira->codegen, value->value.type));
}
@@ -9463,6 +9433,8 @@ static IrInstruction *ir_analyze_enum_to_union(IrAnalyze *ira, IrInstruction *so
TypeUnionField *union_field = find_union_field_by_tag(wanted_type, &val->data.x_enum_tag);
assert(union_field != nullptr);
type_ensure_zero_bits_known(ira->codegen, union_field->type_entry);
+ if (type_is_invalid(union_field->type_entry))
+ return ira->codegen->invalid_instruction;
if (!union_field->type_entry->zero_bits) {
AstNode *field_node = wanted_type->data.unionation.decl_node->data.container_decl.fields.at(
union_field->enum_field->decl_index);
@@ -10045,6 +10017,9 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
if (actual_type->id == TypeTableEntryIdNumLitFloat ||
actual_type->id == TypeTableEntryIdNumLitInt)
{
+ ensure_complete_type(ira->codegen, wanted_type);
+ if (type_is_invalid(wanted_type))
+ return ira->codegen->invalid_instruction;
if (wanted_type->id == TypeTableEntryIdEnum) {
IrInstruction *cast1 = ir_analyze_cast(ira, source_instr, wanted_type->data.enumeration.tag_int_type, value);
if (type_is_invalid(cast1->value.type))
@@ -10247,21 +10222,6 @@ static IrInstruction *ir_get_deref(IrAnalyze *ira, IrInstruction *source_instruc
source_instruction->source_node, ptr);
load_ptr_instruction->value.type = child_type;
return load_ptr_instruction;
- } else if (type_entry->id == TypeTableEntryIdMetaType) {
- ConstExprValue *ptr_val = ir_resolve_const(ira, ptr, UndefBad);
- if (!ptr_val)
- return ira->codegen->invalid_instruction;
-
- TypeTableEntry *ptr_type = ptr_val->data.x_type;
- if (ptr_type->id == TypeTableEntryIdPointer) {
- TypeTableEntry *child_type = ptr_type->data.pointer.child_type;
- return ir_create_const_type(&ira->new_irb, source_instruction->scope,
- source_instruction->source_node, child_type);
- } else {
- ir_add_error(ira, source_instruction,
- buf_sprintf("attempt to dereference non pointer type '%s'", buf_ptr(&ptr_type->name)));
- return ira->codegen->invalid_instruction;
- }
} else {
ir_add_error_node(ira, source_instruction->source_node,
buf_sprintf("attempt to dereference non pointer type '%s'",
@@ -11968,7 +11928,7 @@ IrInstruction *ir_get_implicit_allocator(IrAnalyze *ira, IrInstruction *source_i
{
VariableTableEntry *coro_allocator_var = ira->old_irb.exec->coro_allocator_var;
assert(coro_allocator_var != nullptr);
- IrInstruction *var_ptr_inst = ir_get_var_ptr(ira, source_instr, coro_allocator_var, true, false);
+ IrInstruction *var_ptr_inst = ir_get_var_ptr(ira, source_instr, coro_allocator_var);
IrInstruction *result = ir_get_deref(ira, source_instr, var_ptr_inst);
assert(result->value.type != nullptr);
return result;
@@ -12149,7 +12109,7 @@ static VariableTableEntry *get_fn_var_by_index(FnTableEntry *fn_entry, size_t in
}
static IrInstruction *ir_get_var_ptr(IrAnalyze *ira, IrInstruction *instruction,
- VariableTableEntry *var, bool is_const_ptr, bool is_volatile_ptr)
+ VariableTableEntry *var)
{
if (var->mem_slot_index != SIZE_MAX && var->owner_exec->analysis == nullptr) {
assert(ira->codegen->errors.length != 0);
@@ -12175,8 +12135,8 @@ static IrInstruction *ir_get_var_ptr(IrAnalyze *ira, IrInstruction *instruction,
}
}
- bool is_const = (var->value->type->id == TypeTableEntryIdMetaType) ? is_const_ptr : var->src_is_const;
- bool is_volatile = (var->value->type->id == TypeTableEntryIdMetaType) ? is_volatile_ptr : false;
+ bool is_const = var->src_is_const;
+ bool is_volatile = false;
if (mem_slot != nullptr) {
switch (mem_slot->special) {
case ConstValSpecialRuntime:
@@ -12202,7 +12162,7 @@ static IrInstruction *ir_get_var_ptr(IrAnalyze *ira, IrInstruction *instruction,
no_mem_slot:
IrInstruction *var_ptr_instruction = ir_build_var_ptr(&ira->new_irb,
- instruction->scope, instruction->source_node, var, is_const, is_volatile);
+ instruction->scope, instruction->source_node, var);
var_ptr_instruction->value.type = get_pointer_to_type_extra(ira->codegen, var->value->type,
var->src_is_const, is_volatile, var->align_bytes, 0, 0);
type_ensure_zero_bits_known(ira->codegen, var->value->type);
@@ -12488,7 +12448,7 @@ static TypeTableEntry *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCall *cal
buf_sprintf("compiler bug: var args can't handle void. https://github.com/ziglang/zig/issues/557"));
return ira->codegen->builtin_types.entry_invalid;
}
- IrInstruction *arg_var_ptr_inst = ir_get_var_ptr(ira, arg, arg_var, true, false);
+ IrInstruction *arg_var_ptr_inst = ir_get_var_ptr(ira, arg, arg_var);
if (type_is_invalid(arg_var_ptr_inst->value.type))
return ira->codegen->builtin_types.entry_invalid;
@@ -12811,6 +12771,10 @@ static TypeTableEntry *ir_analyze_maybe(IrAnalyze *ira, IrInstructionUnOp *un_op
TypeTableEntry *type_entry = ir_resolve_type(ira, value);
if (type_is_invalid(type_entry))
return ira->codegen->builtin_types.entry_invalid;
+ ensure_complete_type(ira->codegen, type_entry);
+ if (type_is_invalid(type_entry))
+ return ira->codegen->builtin_types.entry_invalid;
+
switch (type_entry->id) {
case TypeTableEntryIdInvalid:
zig_unreachable();
@@ -13122,17 +13086,16 @@ static TypeTableEntry *ir_analyze_instruction_phi(IrAnalyze *ira, IrInstructionP
}
static TypeTableEntry *ir_analyze_var_ptr(IrAnalyze *ira, IrInstruction *instruction,
- VariableTableEntry *var, bool is_const_ptr, bool is_volatile_ptr)
+ VariableTableEntry *var)
{
- IrInstruction *result = ir_get_var_ptr(ira, instruction, var, is_const_ptr, is_volatile_ptr);
+ IrInstruction *result = ir_get_var_ptr(ira, instruction, var);
ir_link_new_instruction(result, instruction);
return result->value.type;
}
static TypeTableEntry *ir_analyze_instruction_var_ptr(IrAnalyze *ira, IrInstructionVarPtr *var_ptr_instruction) {
VariableTableEntry *var = var_ptr_instruction->var;
- return ir_analyze_var_ptr(ira, &var_ptr_instruction->base, var, var_ptr_instruction->is_const,
- var_ptr_instruction->is_volatile);
+ return ir_analyze_var_ptr(ira, &var_ptr_instruction->base, var);
}
static TypeTableEntry *adjust_ptr_align(CodeGen *g, TypeTableEntry *ptr_type, uint32_t new_align) {
@@ -13154,11 +13117,6 @@ static TypeTableEntry *ir_analyze_instruction_elem_ptr(IrAnalyze *ira, IrInstruc
return ira->codegen->builtin_types.entry_invalid;
TypeTableEntry *ptr_type = array_ptr->value.type;
- if (ptr_type->id == TypeTableEntryIdMetaType) {
- ir_add_error(ira, &elem_ptr_instruction->base,
- buf_sprintf("array access of non-array type '%s'", buf_ptr(&ptr_type->name)));
- return ira->codegen->builtin_types.entry_invalid;
- }
assert(ptr_type->id == TypeTableEntryIdPointer);
TypeTableEntry *array_type = ptr_type->data.pointer.child_type;
@@ -13220,8 +13178,7 @@ static TypeTableEntry *ir_analyze_instruction_elem_ptr(IrAnalyze *ira, IrInstruc
bool is_const = true;
bool is_volatile = false;
if (var) {
- return ir_analyze_var_ptr(ira, &elem_ptr_instruction->base, var,
- is_const, is_volatile);
+ return ir_analyze_var_ptr(ira, &elem_ptr_instruction->base, var);
} else {
return ir_analyze_const_ptr(ira, &elem_ptr_instruction->base, &ira->codegen->const_void_val,
ira->codegen->builtin_types.entry_void, ConstPtrMutComptimeConst, is_const, is_volatile);
@@ -13239,6 +13196,9 @@ static TypeTableEntry *ir_analyze_instruction_elem_ptr(IrAnalyze *ira, IrInstruc
bool safety_check_on = elem_ptr_instruction->safety_check_on;
ensure_complete_type(ira->codegen, return_type->data.pointer.child_type);
+ if (type_is_invalid(return_type->data.pointer.child_type))
+ return ira->codegen->builtin_types.entry_invalid;
+
uint64_t elem_size = type_size(ira->codegen, return_type->data.pointer.child_type);
uint64_t abi_align = get_abi_alignment(ira->codegen, return_type->data.pointer.child_type);
uint64_t ptr_align = return_type->data.pointer.alignment;
@@ -13605,7 +13565,7 @@ static TypeTableEntry *ir_analyze_decl_ref(IrAnalyze *ira, IrInstruction *source
add_link_lib_symbol(ira, tld_var->extern_lib_name, &var->name, source_instruction->source_node);
}
- return ir_analyze_var_ptr(ira, source_instruction, var, false, false);
+ return ir_analyze_var_ptr(ira, source_instruction, var);
}
case TldIdFn:
{
@@ -13654,14 +13614,8 @@ static TypeTableEntry *ir_analyze_instruction_field_ptr(IrAnalyze *ira, IrInstru
if (type_is_invalid(container_ptr->value.type))
return ira->codegen->builtin_types.entry_invalid;
- TypeTableEntry *container_type;
- if (container_ptr->value.type->id == TypeTableEntryIdPointer) {
- container_type = container_ptr->value.type->data.pointer.child_type;
- } else if (container_ptr->value.type->id == TypeTableEntryIdMetaType) {
- container_type = container_ptr->value.type;
- } else {
- zig_unreachable();
- }
+ TypeTableEntry *container_type = container_ptr->value.type->data.pointer.child_type;
+ assert(container_ptr->value.type->id == TypeTableEntryIdPointer);
Buf *field_name = field_ptr_instruction->field_name_buffer;
if (!field_name) {
@@ -13734,17 +13688,9 @@ static TypeTableEntry *ir_analyze_instruction_field_ptr(IrAnalyze *ira, IrInstru
if (!container_ptr_val)
return ira->codegen->builtin_types.entry_invalid;
- TypeTableEntry *child_type;
- if (container_ptr->value.type->id == TypeTableEntryIdMetaType) {
- TypeTableEntry *ptr_type = container_ptr_val->data.x_type;
- assert(ptr_type->id == TypeTableEntryIdPointer);
- child_type = ptr_type->data.pointer.child_type;
- } else if (container_ptr->value.type->id == TypeTableEntryIdPointer) {
- ConstExprValue *child_val = const_ptr_pointee(ira->codegen, container_ptr_val);
- child_type = child_val->data.x_type;
- } else {
- zig_unreachable();
- }
+ assert(container_ptr->value.type->id == TypeTableEntryIdPointer);
+ ConstExprValue *child_val = const_ptr_pointee(ira->codegen, container_ptr_val);
+ TypeTableEntry *child_type = child_val->data.x_type;
if (type_is_invalid(child_type)) {
return ira->codegen->builtin_types.entry_invalid;
@@ -13762,7 +13708,7 @@ static TypeTableEntry *ir_analyze_instruction_field_ptr(IrAnalyze *ira, IrInstru
}
if (child_type->id == TypeTableEntryIdEnum) {
ensure_complete_type(ira->codegen, child_type);
- if (child_type->data.enumeration.is_invalid)
+ if (type_is_invalid(child_type))
return ira->codegen->builtin_types.entry_invalid;
TypeEnumField *field = find_enum_type_field(child_type, field_name);
@@ -14635,27 +14581,27 @@ static TypeTableEntry *ir_analyze_instruction_unwrap_maybe(IrAnalyze *ira,
return ira->codegen->builtin_types.entry_invalid;
TypeTableEntry *ptr_type = value->value.type;
- if (ptr_type->id == TypeTableEntryIdMetaType) {
+ assert(ptr_type->id == TypeTableEntryIdPointer);
+
+ TypeTableEntry *type_entry = ptr_type->data.pointer.child_type;
+ if (type_is_invalid(type_entry)) {
+ return ira->codegen->builtin_types.entry_invalid;
+ } else if (type_entry->id == TypeTableEntryIdMetaType) {
// surprise! actually this is just ??T not an unwrap maybe instruction
- TypeTableEntry *ptr_type_ptr = ir_resolve_type(ira, value);
- assert(ptr_type_ptr->id == TypeTableEntryIdPointer);
- TypeTableEntry *child_type = ptr_type_ptr->data.pointer.child_type;
+ ConstExprValue *ptr_val = const_ptr_pointee(ira->codegen, &value->value);
+ assert(ptr_val->type->id == TypeTableEntryIdMetaType);
+ TypeTableEntry *child_type = ptr_val->data.x_type;
+
type_ensure_zero_bits_known(ira->codegen, child_type);
TypeTableEntry *layer1 = get_maybe_type(ira->codegen, child_type);
TypeTableEntry *layer2 = get_maybe_type(ira->codegen, layer1);
- TypeTableEntry *result_type = get_pointer_to_type(ira->codegen, layer2, true);
IrInstruction *const_instr = ir_build_const_type(&ira->new_irb, unwrap_maybe_instruction->base.scope,
- unwrap_maybe_instruction->base.source_node, result_type);
- ir_link_new_instruction(const_instr, &unwrap_maybe_instruction->base);
- return const_instr->value.type;
- }
-
- assert(ptr_type->id == TypeTableEntryIdPointer);
-
- TypeTableEntry *type_entry = ptr_type->data.pointer.child_type;
- if (type_is_invalid(type_entry)) {
- return ira->codegen->builtin_types.entry_invalid;
+ unwrap_maybe_instruction->base.source_node, layer2);
+ IrInstruction *result_instr = ir_get_ref(ira, &unwrap_maybe_instruction->base, const_instr,
+ ptr_type->data.pointer.is_const, ptr_type->data.pointer.is_volatile);
+ ir_link_new_instruction(result_instr, &unwrap_maybe_instruction->base);
+ return result_instr->value.type;
} else if (type_entry->id != TypeTableEntryIdMaybe) {
ir_add_error_node(ira, unwrap_maybe_instruction->value->source_node,
buf_sprintf("expected nullable type, found '%s'", buf_ptr(&type_entry->name)));
@@ -15181,6 +15127,8 @@ static TypeTableEntry *ir_analyze_container_init_fields_union(IrAnalyze *ira, Ir
assert(container_type->id == TypeTableEntryIdUnion);
ensure_complete_type(ira->codegen, container_type);
+ if (type_is_invalid(container_type))
+ return ira->codegen->builtin_types.entry_invalid;
if (instr_field_count != 1) {
ir_add_error(ira, instruction,
@@ -15248,6 +15196,8 @@ static TypeTableEntry *ir_analyze_container_init_fields(IrAnalyze *ira, IrInstru
}
ensure_complete_type(ira->codegen, container_type);
+ if (type_is_invalid(container_type))
+ return ira->codegen->builtin_types.entry_invalid;
size_t actual_field_count = container_type->data.structure.src_field_count;
@@ -15753,6 +15703,8 @@ static TypeTableEntry *ir_analyze_instruction_offset_of(IrAnalyze *ira,
return ira->codegen->builtin_types.entry_invalid;
ensure_complete_type(ira->codegen, container_type);
+ if (type_is_invalid(container_type))
+ return ira->codegen->builtin_types.entry_invalid;
IrInstruction *field_name_value = instruction->field_name->other;
Buf *field_name = ir_resolve_str(ira, field_name_value);
@@ -15806,6 +15758,9 @@ static TypeTableEntry *ir_type_info_get_type(IrAnalyze *ira, const char *type_na
assert(type_info_var->type->id == TypeTableEntryIdMetaType);
ensure_complete_type(ira->codegen, type_info_var->data.x_type);
+ if (type_is_invalid(type_info_var->data.x_type))
+ return ira->codegen->builtin_types.entry_invalid;
+
type_info_type = type_info_var->data.x_type;
assert(type_info_type->id == TypeTableEntryIdUnion);
}
@@ -15831,26 +15786,37 @@ static TypeTableEntry *ir_type_info_get_type(IrAnalyze *ira, const char *type_na
VariableTableEntry *var = tld->var;
ensure_complete_type(ira->codegen, var->value->type);
+ if (type_is_invalid(var->value->type))
+ return ira->codegen->builtin_types.entry_invalid;
assert(var->value->type->id == TypeTableEntryIdMetaType);
return var->value->data.x_type;
}
-static void ir_make_type_info_defs(IrAnalyze *ira, ConstExprValue *out_val, ScopeDecls *decls_scope)
+static bool ir_make_type_info_defs(IrAnalyze *ira, ConstExprValue *out_val, ScopeDecls *decls_scope)
{
TypeTableEntry *type_info_definition_type = ir_type_info_get_type(ira, "Definition");
ensure_complete_type(ira->codegen, type_info_definition_type);
+ if (type_is_invalid(type_info_definition_type))
+ return false;
+
ensure_field_index(type_info_definition_type, "name", 0);
ensure_field_index(type_info_definition_type, "is_pub", 1);
ensure_field_index(type_info_definition_type, "data", 2);
TypeTableEntry *type_info_definition_data_type = ir_type_info_get_type(ira, "Data", type_info_definition_type);
ensure_complete_type(ira->codegen, type_info_definition_data_type);
+ if (type_is_invalid(type_info_definition_data_type))
+ return false;
TypeTableEntry *type_info_fn_def_type = ir_type_info_get_type(ira, "FnDef", type_info_definition_data_type);
ensure_complete_type(ira->codegen, type_info_fn_def_type);
+ if (type_is_invalid(type_info_fn_def_type))
+ return false;
TypeTableEntry *type_info_fn_def_inline_type = ir_type_info_get_type(ira, "Inline", type_info_fn_def_type);
ensure_complete_type(ira->codegen, type_info_fn_def_inline_type);
+ if (type_is_invalid(type_info_fn_def_inline_type))
+ return false;
// Loop through our definitions once to figure out how many definitions we will generate info for.
auto decl_it = decls_scope->decl_table.entry_iterator();
@@ -15865,7 +15831,7 @@ static void ir_make_type_info_defs(IrAnalyze *ira, ConstExprValue *out_val, Scop
resolve_top_level_decl(ira->codegen, curr_entry->value, false, curr_entry->value->source_node);
if (curr_entry->value->resolution != TldResolutionOk)
{
- return;
+ return false;
}
}
@@ -15930,6 +15896,9 @@ static void ir_make_type_info_defs(IrAnalyze *ira, ConstExprValue *out_val, Scop
{
VariableTableEntry *var = ((TldVar *)curr_entry->value)->var;
ensure_complete_type(ira->codegen, var->value->type);
+ if (type_is_invalid(var->value->type))
+ return false;
+
if (var->value->type->id == TypeTableEntryIdMetaType)
{
// We have a variable of type 'type', so it's actually a type definition.
@@ -16057,6 +16026,9 @@ static void ir_make_type_info_defs(IrAnalyze *ira, ConstExprValue *out_val, Scop
{
TypeTableEntry *type_entry = ((TldContainer *)curr_entry->value)->type_entry;
ensure_complete_type(ira->codegen, type_entry);
+ if (type_is_invalid(type_entry))
+ return false;
+
// This is a type.
bigint_init_unsigned(&inner_fields[2].data.x_union.tag, 0);
@@ -16077,6 +16049,7 @@ static void ir_make_type_info_defs(IrAnalyze *ira, ConstExprValue *out_val, Scop
}
assert(definition_index == definition_count);
+ return true;
}
static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *type_entry)
@@ -16085,6 +16058,8 @@ static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *t
assert(!type_is_invalid(type_entry));
ensure_complete_type(ira->codegen, type_entry);
+ if (type_is_invalid(type_entry))
+ return nullptr;
const auto make_enum_field_val = [ira](ConstExprValue *enum_field_val, TypeEnumField *enum_field,
TypeTableEntry *type_info_enum_field_type) {
@@ -16312,7 +16287,8 @@ static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *t
}
// defs: []TypeInfo.Definition
ensure_field_index(result->type, "defs", 3);
- ir_make_type_info_defs(ira, &fields[3], type_entry->data.enumeration.decls_scope);
+ if (!ir_make_type_info_defs(ira, &fields[3], type_entry->data.enumeration.decls_scope))
+ return nullptr;
break;
}
@@ -16467,7 +16443,8 @@ static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *t
}
// defs: []TypeInfo.Definition
ensure_field_index(result->type, "defs", 3);
- ir_make_type_info_defs(ira, &fields[3], type_entry->data.unionation.decls_scope);
+ if (!ir_make_type_info_defs(ira, &fields[3], type_entry->data.unionation.decls_scope))
+ return nullptr;
break;
}
@@ -16478,6 +16455,8 @@ static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *t
buf_init_from_str(&ptr_field_name, "ptr");
TypeTableEntry *ptr_type = type_entry->data.structure.fields_by_name.get(&ptr_field_name)->type_entry;
ensure_complete_type(ira->codegen, ptr_type);
+ if (type_is_invalid(ptr_type))
+ return nullptr;
buf_deinit(&ptr_field_name);
result = create_ptr_like_type_info("Slice", ptr_type);
@@ -16548,7 +16527,8 @@ static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *t
}
// defs: []TypeInfo.Definition
ensure_field_index(result->type, "defs", 2);
- ir_make_type_info_defs(ira, &fields[2], type_entry->data.structure.decls_scope);
+ if (!ir_make_type_info_defs(ira, &fields[2], type_entry->data.structure.decls_scope))
+ return nullptr;
break;
}
@@ -17339,8 +17319,11 @@ static TypeTableEntry *ir_analyze_instruction_slice(IrAnalyze *ira, IrInstructio
if (array_type->data.array.len == 0 && byte_alignment == 0) {
byte_alignment = get_abi_alignment(ira->codegen, array_type->data.array.child_type);
}
+ bool is_comptime_const = ptr_ptr->value.special == ConstValSpecialStatic &&
+ ptr_ptr->value.data.x_ptr.mut == ConstPtrMutComptimeConst;
TypeTableEntry *slice_ptr_type = get_pointer_to_type_extra(ira->codegen, array_type->data.array.child_type,
- ptr_type->data.pointer.is_const, ptr_type->data.pointer.is_volatile,
+ ptr_type->data.pointer.is_const || is_comptime_const,
+ ptr_type->data.pointer.is_volatile,
byte_alignment, 0, 0);
return_type = get_slice_type(ira->codegen, slice_ptr_type);
} else if (array_type->id == TypeTableEntryIdPointer) {
@@ -17527,6 +17510,10 @@ static TypeTableEntry *ir_analyze_instruction_member_count(IrAnalyze *ira, IrIns
return ira->codegen->builtin_types.entry_invalid;
TypeTableEntry *container_type = ir_resolve_type(ira, container);
+ ensure_complete_type(ira->codegen, container_type);
+ if (type_is_invalid(container_type))
+ return ira->codegen->builtin_types.entry_invalid;
+
uint64_t result;
if (type_is_invalid(container_type)) {
return ira->codegen->builtin_types.entry_invalid;
@@ -17561,6 +17548,11 @@ static TypeTableEntry *ir_analyze_instruction_member_type(IrAnalyze *ira, IrInst
if (type_is_invalid(container_type))
return ira->codegen->builtin_types.entry_invalid;
+ ensure_complete_type(ira->codegen, container_type);
+ if (type_is_invalid(container_type))
+ return ira->codegen->builtin_types.entry_invalid;
+
+
uint64_t member_index;
IrInstruction *index_value = instruction->member_index->other;
if (!ir_resolve_usize(ira, index_value, &member_index))
@@ -17603,6 +17595,10 @@ static TypeTableEntry *ir_analyze_instruction_member_name(IrAnalyze *ira, IrInst
if (type_is_invalid(container_type))
return ira->codegen->builtin_types.entry_invalid;
+ ensure_complete_type(ira->codegen, container_type);
+ if (type_is_invalid(container_type))
+ return ira->codegen->builtin_types.entry_invalid;
+
uint64_t member_index;
IrInstruction *index_value = instruction->member_index->other;
if (!ir_resolve_usize(ira, index_value, &member_index))
@@ -17914,15 +17910,6 @@ static TypeTableEntry *ir_analyze_instruction_unwrap_err_payload(IrAnalyze *ira,
return ira->codegen->builtin_types.entry_invalid;
TypeTableEntry *ptr_type = value->value.type;
- // Because we don't have Pointer Reform yet, we can't have a pointer to a 'type'.
- // Therefor, we have to check for type 'type' here, so we can output a correct error
- // without asserting the assert below.
- if (ptr_type->id == TypeTableEntryIdMetaType) {
- ir_add_error(ira, value,
- buf_sprintf("expected error union type, found '%s'", buf_ptr(&ptr_type->name)));
- return ira->codegen->builtin_types.entry_invalid;
- }
-
// This will be a pointer type because unwrap err payload IR instruction operates on a pointer to a thing.
assert(ptr_type->id == TypeTableEntryIdPointer);
@@ -18553,7 +18540,12 @@ static TypeTableEntry *ir_analyze_instruction_bit_cast(IrAnalyze *ira, IrInstruc
return ira->codegen->builtin_types.entry_invalid;
ensure_complete_type(ira->codegen, dest_type);
+ if (type_is_invalid(dest_type))
+ return ira->codegen->builtin_types.entry_invalid;
+
ensure_complete_type(ira->codegen, src_type);
+ if (type_is_invalid(src_type))
+ return ira->codegen->builtin_types.entry_invalid;
if (get_codegen_ptr_type(src_type) != nullptr) {
ir_add_error(ira, value,
@@ -18699,8 +18691,7 @@ static TypeTableEntry *ir_analyze_instruction_decl_ref(IrAnalyze *ira,
TldVar *tld_var = (TldVar *)tld;
VariableTableEntry *var = tld_var->var;
- IrInstruction *var_ptr = ir_get_var_ptr(ira, &instruction->base, var,
- !lval.is_ptr || lval.is_const, lval.is_ptr && lval.is_volatile);
+ IrInstruction *var_ptr = ir_get_var_ptr(ira, &instruction->base, var);
if (type_is_invalid(var_ptr->value.type))
return ira->codegen->builtin_types.entry_invalid;
@@ -18778,16 +18769,24 @@ static TypeTableEntry *ir_analyze_instruction_ptr_to_int(IrAnalyze *ira, IrInstr
return usize;
}
-static TypeTableEntry *ir_analyze_instruction_ptr_type_of(IrAnalyze *ira, IrInstructionPtrTypeOf *instruction) {
+static TypeTableEntry *ir_analyze_instruction_ptr_type(IrAnalyze *ira, IrInstructionPtrType *instruction) {
TypeTableEntry *child_type = ir_resolve_type(ira, instruction->child_type->other);
if (type_is_invalid(child_type))
return ira->codegen->builtin_types.entry_invalid;
+ if (child_type->id == TypeTableEntryIdUnreachable) {
+ ir_add_error(ira, &instruction->base, buf_sprintf("pointer to noreturn not allowed"));
+ return ira->codegen->builtin_types.entry_invalid;
+ }
+
uint32_t align_bytes;
if (instruction->align_value != nullptr) {
if (!ir_resolve_align(ira, instruction->align_value->other, &align_bytes))
return ira->codegen->builtin_types.entry_invalid;
} else {
+ type_ensure_zero_bits_known(ira->codegen, child_type);
+ if (type_is_invalid(child_type))
+ return ira->codegen->builtin_types.entry_invalid;
align_bytes = get_abi_alignment(ira->codegen, child_type);
}
@@ -19606,8 +19605,8 @@ static TypeTableEntry *ir_analyze_instruction_nocast(IrAnalyze *ira, IrInstructi
return ir_analyze_instruction_type_id(ira, (IrInstructionTypeId *)instruction);
case IrInstructionIdSetEvalBranchQuota:
return ir_analyze_instruction_set_eval_branch_quota(ira, (IrInstructionSetEvalBranchQuota *)instruction);
- case IrInstructionIdPtrTypeOf:
- return ir_analyze_instruction_ptr_type_of(ira, (IrInstructionPtrTypeOf *)instruction);
+ case IrInstructionIdPtrType:
+ return ir_analyze_instruction_ptr_type(ira, (IrInstructionPtrType *)instruction);
case IrInstructionIdAlignCast:
return ir_analyze_instruction_align_cast(ira, (IrInstructionAlignCast *)instruction);
case IrInstructionIdOpaqueType:
@@ -19783,7 +19782,7 @@ bool ir_has_side_effects(IrInstruction *instruction) {
case IrInstructionIdCheckStatementIsVoid:
case IrInstructionIdPanic:
case IrInstructionIdSetEvalBranchQuota:
- case IrInstructionIdPtrTypeOf:
+ case IrInstructionIdPtrType:
case IrInstructionIdSetAlignStack:
case IrInstructionIdExport:
case IrInstructionIdCancel:
diff --git a/src/ir_print.cpp b/src/ir_print.cpp
index 9678120f1dcd..3c177a8bbf57 100644
--- a/src/ir_print.cpp
+++ b/src/ir_print.cpp
@@ -921,7 +921,7 @@ static void ir_print_can_implicit_cast(IrPrint *irp, IrInstructionCanImplicitCas
fprintf(irp->f, ")");
}
-static void ir_print_ptr_type_of(IrPrint *irp, IrInstructionPtrTypeOf *instruction) {
+static void ir_print_ptr_type(IrPrint *irp, IrInstructionPtrType *instruction) {
fprintf(irp->f, "&");
if (instruction->align_value != nullptr) {
fprintf(irp->f, "align(");
@@ -1527,8 +1527,8 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) {
case IrInstructionIdCanImplicitCast:
ir_print_can_implicit_cast(irp, (IrInstructionCanImplicitCast *)instruction);
break;
- case IrInstructionIdPtrTypeOf:
- ir_print_ptr_type_of(irp, (IrInstructionPtrTypeOf *)instruction);
+ case IrInstructionIdPtrType:
+ ir_print_ptr_type(irp, (IrInstructionPtrType *)instruction);
break;
case IrInstructionIdDeclRef:
ir_print_decl_ref(irp, (IrInstructionDeclRef *)instruction);
diff --git a/src/parser.cpp b/src/parser.cpp
index 4763d3b987a4..ef390a3a2e30 100644
--- a/src/parser.cpp
+++ b/src/parser.cpp
@@ -1167,20 +1167,19 @@ static PrefixOp tok_to_prefix_op(Token *token) {
case TokenIdTilde: return PrefixOpBinNot;
case TokenIdMaybe: return PrefixOpMaybe;
case TokenIdDoubleQuestion: return PrefixOpUnwrapMaybe;
+ case TokenIdAmpersand: return PrefixOpAddrOf;
default: return PrefixOpInvalid;
}
}
-static AstNode *ast_parse_addr_of(ParseContext *pc, size_t *token_index) {
- Token *ampersand_tok = ast_eat_token(pc, token_index, TokenIdAmpersand);
-
- AstNode *node = ast_create_node(pc, NodeTypeAddrOfExpr, ampersand_tok);
+static AstNode *ast_parse_pointer_type(ParseContext *pc, size_t *token_index, Token *star_tok) {
+ AstNode *node = ast_create_node(pc, NodeTypePointerType, star_tok);
Token *token = &pc->tokens->at(*token_index);
if (token->id == TokenIdKeywordAlign) {
*token_index += 1;
ast_eat_token(pc, token_index, TokenIdLParen);
- node->data.addr_of_expr.align_expr = ast_parse_expression(pc, token_index, true);
+ node->data.pointer_type.align_expr = ast_parse_expression(pc, token_index, true);
token = &pc->tokens->at(*token_index);
if (token->id == TokenIdColon) {
@@ -1189,24 +1188,24 @@ static AstNode *ast_parse_addr_of(ParseContext *pc, size_t *token_index) {
ast_eat_token(pc, token_index, TokenIdColon);
Token *bit_offset_end_tok = ast_eat_token(pc, token_index, TokenIdIntLiteral);
- node->data.addr_of_expr.bit_offset_start = token_bigint(bit_offset_start_tok);
- node->data.addr_of_expr.bit_offset_end = token_bigint(bit_offset_end_tok);
+ node->data.pointer_type.bit_offset_start = token_bigint(bit_offset_start_tok);
+ node->data.pointer_type.bit_offset_end = token_bigint(bit_offset_end_tok);
}
ast_eat_token(pc, token_index, TokenIdRParen);
token = &pc->tokens->at(*token_index);
}
if (token->id == TokenIdKeywordConst) {
*token_index += 1;
- node->data.addr_of_expr.is_const = true;
+ node->data.pointer_type.is_const = true;
token = &pc->tokens->at(*token_index);
}
if (token->id == TokenIdKeywordVolatile) {
*token_index += 1;
- node->data.addr_of_expr.is_volatile = true;
+ node->data.pointer_type.is_volatile = true;
}
- node->data.addr_of_expr.op_expr = ast_parse_prefix_op_expr(pc, token_index, true);
+ node->data.pointer_type.op_expr = ast_parse_prefix_op_expr(pc, token_index, true);
return node;
}
@@ -1216,8 +1215,17 @@ PrefixOp = "!" | "-" | "~" | ("*" option("align" "(" Expression option(":" Integ
*/
static AstNode *ast_parse_prefix_op_expr(ParseContext *pc, size_t *token_index, bool mandatory) {
Token *token = &pc->tokens->at(*token_index);
- if (token->id == TokenIdAmpersand) {
- return ast_parse_addr_of(pc, token_index);
+ if (token->id == TokenIdStar) {
+ *token_index += 1;
+ return ast_parse_pointer_type(pc, token_index, token);
+ }
+ if (token->id == TokenIdStarStar) {
+ *token_index += 1;
+ AstNode *child_node = ast_parse_pointer_type(pc, token_index, token);
+ child_node->column += 1;
+ AstNode *parent_node = ast_create_node(pc, NodeTypePointerType, token);
+ parent_node->data.pointer_type.op_expr = child_node;
+ return parent_node;
}
if (token->id == TokenIdKeywordTry) {
return ast_parse_try_expr(pc, token_index);
@@ -1234,13 +1242,12 @@ static AstNode *ast_parse_prefix_op_expr(ParseContext *pc, size_t *token_index,
AstNode *node = ast_create_node(pc, NodeTypePrefixOpExpr, token);
- AstNode *parent_node = node;
AstNode *prefix_op_expr = ast_parse_error_set_expr(pc, token_index, true);
node->data.prefix_op_expr.primary_expr = prefix_op_expr;
node->data.prefix_op_expr.prefix_op = prefix_op;
- return parent_node;
+ return node;
}
@@ -3121,9 +3128,9 @@ void ast_visit_node_children(AstNode *node, void (*visit)(AstNode **, void *cont
case NodeTypeErrorType:
// none
break;
- case NodeTypeAddrOfExpr:
- visit_field(&node->data.addr_of_expr.align_expr, visit, context);
- visit_field(&node->data.addr_of_expr.op_expr, visit, context);
+ case NodeTypePointerType:
+ visit_field(&node->data.pointer_type.align_expr, visit, context);
+ visit_field(&node->data.pointer_type.op_expr, visit, context);
break;
case NodeTypeErrorSetDecl:
visit_node_list(&node->data.err_set_decl.decls, visit, context);
diff --git a/src/translate_c.cpp b/src/translate_c.cpp
index 50ff073008bd..db541d34f3e0 100644
--- a/src/translate_c.cpp
+++ b/src/translate_c.cpp
@@ -276,11 +276,18 @@ static AstNode *maybe_suppress_result(Context *c, ResultUsed result_used, AstNod
node);
}
-static AstNode *trans_create_node_addr_of(Context *c, bool is_const, bool is_volatile, AstNode *child_node) {
- AstNode *node = trans_create_node(c, NodeTypeAddrOfExpr);
- node->data.addr_of_expr.is_const = is_const;
- node->data.addr_of_expr.is_volatile = is_volatile;
- node->data.addr_of_expr.op_expr = child_node;
+static AstNode *trans_create_node_ptr_type(Context *c, bool is_const, bool is_volatile, AstNode *child_node) {
+ AstNode *node = trans_create_node(c, NodeTypePointerType);
+ node->data.pointer_type.is_const = is_const;
+ node->data.pointer_type.is_volatile = is_volatile;
+ node->data.pointer_type.op_expr = child_node;
+ return node;
+}
+
+static AstNode *trans_create_node_addr_of(Context *c, AstNode *child_node) {
+ AstNode *node = trans_create_node(c, NodeTypePrefixOpExpr);
+ node->data.prefix_op_expr.prefix_op = PrefixOpAddrOf;
+ node->data.prefix_op_expr.primary_expr = child_node;
return node;
}
@@ -848,7 +855,7 @@ static AstNode *trans_type(Context *c, const Type *ty, const SourceLocation &sou
return trans_create_node_prefix_op(c, PrefixOpMaybe, child_node);
}
- AstNode *pointer_node = trans_create_node_addr_of(c, child_qt.isConstQualified(),
+ AstNode *pointer_node = trans_create_node_ptr_type(c, child_qt.isConstQualified(),
child_qt.isVolatileQualified(), child_node);
return trans_create_node_prefix_op(c, PrefixOpMaybe, pointer_node);
}
@@ -1033,7 +1040,7 @@ static AstNode *trans_type(Context *c, const Type *ty, const SourceLocation &sou
emit_warning(c, source_loc, "unresolved array element type");
return nullptr;
}
- AstNode *pointer_node = trans_create_node_addr_of(c, child_qt.isConstQualified(),
+ AstNode *pointer_node = trans_create_node_ptr_type(c, child_qt.isConstQualified(),
child_qt.isVolatileQualified(), child_type_node);
return pointer_node;
}
@@ -1402,7 +1409,7 @@ static AstNode *trans_create_compound_assign_shift(Context *c, ResultUsed result
// const _ref = &lhs;
AstNode *lhs = trans_expr(c, ResultUsedYes, &child_scope->base, stmt->getLHS(), TransLValue);
if (lhs == nullptr) return nullptr;
- AstNode *addr_of_lhs = trans_create_node_addr_of(c, false, false, lhs);
+ AstNode *addr_of_lhs = trans_create_node_addr_of(c, lhs);
// TODO: avoid name collisions with generated variable names
Buf* tmp_var_name = buf_create_from_str("_ref");
AstNode *tmp_var_decl = trans_create_node_var_decl_local(c, true, tmp_var_name, nullptr, addr_of_lhs);
@@ -1476,7 +1483,7 @@ static AstNode *trans_create_compound_assign(Context *c, ResultUsed result_used,
// const _ref = &lhs;
AstNode *lhs = trans_expr(c, ResultUsedYes, &child_scope->base, stmt->getLHS(), TransLValue);
if (lhs == nullptr) return nullptr;
- AstNode *addr_of_lhs = trans_create_node_addr_of(c, false, false, lhs);
+ AstNode *addr_of_lhs = trans_create_node_addr_of(c, lhs);
// TODO: avoid name collisions with generated variable names
Buf* tmp_var_name = buf_create_from_str("_ref");
AstNode *tmp_var_decl = trans_create_node_var_decl_local(c, true, tmp_var_name, nullptr, addr_of_lhs);
@@ -1813,7 +1820,7 @@ static AstNode *trans_create_post_crement(Context *c, ResultUsed result_used, Tr
// const _ref = &expr;
AstNode *expr = trans_expr(c, ResultUsedYes, &child_scope->base, op_expr, TransLValue);
if (expr == nullptr) return nullptr;
- AstNode *addr_of_expr = trans_create_node_addr_of(c, false, false, expr);
+ AstNode *addr_of_expr = trans_create_node_addr_of(c, expr);
// TODO: avoid name collisions with generated variable names
Buf* ref_var_name = buf_create_from_str("_ref");
AstNode *ref_var_decl = trans_create_node_var_decl_local(c, true, ref_var_name, nullptr, addr_of_expr);
@@ -1868,7 +1875,7 @@ static AstNode *trans_create_pre_crement(Context *c, ResultUsed result_used, Tra
// const _ref = &expr;
AstNode *expr = trans_expr(c, ResultUsedYes, &child_scope->base, op_expr, TransLValue);
if (expr == nullptr) return nullptr;
- AstNode *addr_of_expr = trans_create_node_addr_of(c, false, false, expr);
+ AstNode *addr_of_expr = trans_create_node_addr_of(c, expr);
// TODO: avoid name collisions with generated variable names
Buf* ref_var_name = buf_create_from_str("_ref");
AstNode *ref_var_decl = trans_create_node_var_decl_local(c, true, ref_var_name, nullptr, addr_of_expr);
@@ -1917,7 +1924,7 @@ static AstNode *trans_unary_operator(Context *c, ResultUsed result_used, TransSc
AstNode *value_node = trans_expr(c, result_used, scope, stmt->getSubExpr(), TransLValue);
if (value_node == nullptr)
return value_node;
- return trans_create_node_addr_of(c, false, false, value_node);
+ return trans_create_node_addr_of(c, value_node);
}
case UO_Deref:
{
@@ -4441,7 +4448,7 @@ static AstNode *parse_ctok_suffix_op_expr(Context *c, CTokenize *ctok, size_t *t
} else if (first_tok->id == CTokIdAsterisk) {
*tok_i += 1;
- node = trans_create_node_addr_of(c, false, false, node);
+ node = trans_create_node_ptr_type(c, false, false, node);
} else {
return node;
}
diff --git a/std/array_list.zig b/std/array_list.zig
index b315194c3369..07a1db645126 100644
--- a/std/array_list.zig
+++ b/std/array_list.zig
@@ -17,10 +17,10 @@ pub fn AlignedArrayList(comptime T: type, comptime A: u29) type {
/// you uninitialized memory.
items: []align(A) T,
len: usize,
- allocator: &Allocator,
+ allocator: *Allocator,
/// Deinitialize with `deinit` or use `toOwnedSlice`.
- pub fn init(allocator: &Allocator) Self {
+ pub fn init(allocator: *Allocator) Self {
return Self{
.items = []align(A) T{},
.len = 0,
@@ -28,30 +28,30 @@ pub fn AlignedArrayList(comptime T: type, comptime A: u29) type {
};
}
- pub fn deinit(l: &const Self) void {
+ pub fn deinit(l: *const Self) void {
l.allocator.free(l.items);
}
- pub fn toSlice(l: &const Self) []align(A) T {
+ pub fn toSlice(l: *const Self) []align(A) T {
return l.items[0..l.len];
}
- pub fn toSliceConst(l: &const Self) []align(A) const T {
+ pub fn toSliceConst(l: *const Self) []align(A) const T {
return l.items[0..l.len];
}
- pub fn at(l: &const Self, n: usize) T {
+ pub fn at(l: *const Self, n: usize) T {
return l.toSliceConst()[n];
}
- pub fn count(self: &const Self) usize {
+ pub fn count(self: *const Self) usize {
return self.len;
}
/// ArrayList takes ownership of the passed in slice. The slice must have been
/// allocated with `allocator`.
/// Deinitialize with `deinit` or use `toOwnedSlice`.
- pub fn fromOwnedSlice(allocator: &Allocator, slice: []align(A) T) Self {
+ pub fn fromOwnedSlice(allocator: *Allocator, slice: []align(A) T) Self {
return Self{
.items = slice,
.len = slice.len,
@@ -60,14 +60,14 @@ pub fn AlignedArrayList(comptime T: type, comptime A: u29) type {
}
/// The caller owns the returned memory. ArrayList becomes empty.
- pub fn toOwnedSlice(self: &Self) []align(A) T {
+ pub fn toOwnedSlice(self: *Self) []align(A) T {
const allocator = self.allocator;
const result = allocator.alignedShrink(T, A, self.items, self.len);
self.* = init(allocator);
return result;
}
- pub fn insert(l: &Self, n: usize, item: &const T) !void {
+ pub fn insert(l: *Self, n: usize, item: *const T) !void {
try l.ensureCapacity(l.len + 1);
l.len += 1;
@@ -75,7 +75,7 @@ pub fn AlignedArrayList(comptime T: type, comptime A: u29) type {
l.items[n] = item.*;
}
- pub fn insertSlice(l: &Self, n: usize, items: []align(A) const T) !void {
+ pub fn insertSlice(l: *Self, n: usize, items: []align(A) const T) !void {
try l.ensureCapacity(l.len + items.len);
l.len += items.len;
@@ -83,28 +83,28 @@ pub fn AlignedArrayList(comptime T: type, comptime A: u29) type {
mem.copy(T, l.items[n .. n + items.len], items);
}
- pub fn append(l: &Self, item: &const T) !void {
+ pub fn append(l: *Self, item: *const T) !void {
const new_item_ptr = try l.addOne();
new_item_ptr.* = item.*;
}
- pub fn appendSlice(l: &Self, items: []align(A) const T) !void {
+ pub fn appendSlice(l: *Self, items: []align(A) const T) !void {
try l.ensureCapacity(l.len + items.len);
mem.copy(T, l.items[l.len..], items);
l.len += items.len;
}
- pub fn resize(l: &Self, new_len: usize) !void {
+ pub fn resize(l: *Self, new_len: usize) !void {
try l.ensureCapacity(new_len);
l.len = new_len;
}
- pub fn shrink(l: &Self, new_len: usize) void {
+ pub fn shrink(l: *Self, new_len: usize) void {
assert(new_len <= l.len);
l.len = new_len;
}
- pub fn ensureCapacity(l: &Self, new_capacity: usize) !void {
+ pub fn ensureCapacity(l: *Self, new_capacity: usize) !void {
var better_capacity = l.items.len;
if (better_capacity >= new_capacity) return;
while (true) {
@@ -114,7 +114,7 @@ pub fn AlignedArrayList(comptime T: type, comptime A: u29) type {
l.items = try l.allocator.alignedRealloc(T, A, l.items, better_capacity);
}
- pub fn addOne(l: &Self) !&T {
+ pub fn addOne(l: *Self) !*T {
const new_length = l.len + 1;
try l.ensureCapacity(new_length);
const result = &l.items[l.len];
@@ -122,34 +122,34 @@ pub fn AlignedArrayList(comptime T: type, comptime A: u29) type {
return result;
}
- pub fn pop(self: &Self) T {
+ pub fn pop(self: *Self) T {
self.len -= 1;
return self.items[self.len];
}
- pub fn popOrNull(self: &Self) ?T {
+ pub fn popOrNull(self: *Self) ?T {
if (self.len == 0) return null;
return self.pop();
}
pub const Iterator = struct {
- list: &const Self,
+ list: *const Self,
// how many items have we returned
count: usize,
- pub fn next(it: &Iterator) ?T {
+ pub fn next(it: *Iterator) ?T {
if (it.count >= it.list.len) return null;
const val = it.list.at(it.count);
it.count += 1;
return val;
}
- pub fn reset(it: &Iterator) void {
+ pub fn reset(it: *Iterator) void {
it.count = 0;
}
};
- pub fn iterator(self: &const Self) Iterator {
+ pub fn iterator(self: *const Self) Iterator {
return Iterator{
.list = self,
.count = 0,
diff --git a/std/atomic/queue.zig b/std/atomic/queue.zig
index 35180da8d1ff..142c95817373 100644
--- a/std/atomic/queue.zig
+++ b/std/atomic/queue.zig
@@ -5,36 +5,36 @@ const AtomicRmwOp = builtin.AtomicRmwOp;
/// Many reader, many writer, non-allocating, thread-safe, lock-free
pub fn Queue(comptime T: type) type {
return struct {
- head: &Node,
- tail: &Node,
+ head: *Node,
+ tail: *Node,
root: Node,
pub const Self = this;
pub const Node = struct {
- next: ?&Node,
+ next: ?*Node,
data: T,
};
// TODO: well defined copy elision: https://github.com/ziglang/zig/issues/287
- pub fn init(self: &Self) void {
+ pub fn init(self: *Self) void {
self.root.next = null;
self.head = &self.root;
self.tail = &self.root;
}
- pub fn put(self: &Self, node: &Node) void {
+ pub fn put(self: *Self, node: *Node) void {
node.next = null;
- const tail = @atomicRmw(&Node, &self.tail, AtomicRmwOp.Xchg, node, AtomicOrder.SeqCst);
- _ = @atomicRmw(?&Node, &tail.next, AtomicRmwOp.Xchg, node, AtomicOrder.SeqCst);
+ const tail = @atomicRmw(*Node, &self.tail, AtomicRmwOp.Xchg, node, AtomicOrder.SeqCst);
+ _ = @atomicRmw(?*Node, &tail.next, AtomicRmwOp.Xchg, node, AtomicOrder.SeqCst);
}
- pub fn get(self: &Self) ?&Node {
- var head = @atomicLoad(&Node, &self.head, AtomicOrder.SeqCst);
+ pub fn get(self: *Self) ?*Node {
+ var head = @atomicLoad(*Node, &self.head, AtomicOrder.SeqCst);
while (true) {
const node = head.next ?? return null;
- head = @cmpxchgWeak(&Node, &self.head, head, node, AtomicOrder.SeqCst, AtomicOrder.SeqCst) ?? return node;
+ head = @cmpxchgWeak(*Node, &self.head, head, node, AtomicOrder.SeqCst, AtomicOrder.SeqCst) ?? return node;
}
}
};
@@ -42,8 +42,8 @@ pub fn Queue(comptime T: type) type {
const std = @import("std");
const Context = struct {
- allocator: &std.mem.Allocator,
- queue: &Queue(i32),
+ allocator: *std.mem.Allocator,
+ queue: *Queue(i32),
put_sum: isize,
get_sum: isize,
get_count: usize,
@@ -79,11 +79,11 @@ test "std.atomic.queue" {
.get_count = 0,
};
- var putters: [put_thread_count]&std.os.Thread = undefined;
+ var putters: [put_thread_count]*std.os.Thread = undefined;
for (putters) |*t| {
t.* = try std.os.spawnThread(&context, startPuts);
}
- var getters: [put_thread_count]&std.os.Thread = undefined;
+ var getters: [put_thread_count]*std.os.Thread = undefined;
for (getters) |*t| {
t.* = try std.os.spawnThread(&context, startGets);
}
@@ -98,7 +98,7 @@ test "std.atomic.queue" {
std.debug.assert(context.get_count == puts_per_thread * put_thread_count);
}
-fn startPuts(ctx: &Context) u8 {
+fn startPuts(ctx: *Context) u8 {
var put_count: usize = puts_per_thread;
var r = std.rand.DefaultPrng.init(0xdeadbeef);
while (put_count != 0) : (put_count -= 1) {
@@ -112,7 +112,7 @@ fn startPuts(ctx: &Context) u8 {
return 0;
}
-fn startGets(ctx: &Context) u8 {
+fn startGets(ctx: *Context) u8 {
while (true) {
while (ctx.queue.get()) |node| {
std.os.time.sleep(0, 1); // let the os scheduler be our fuzz
diff --git a/std/atomic/stack.zig b/std/atomic/stack.zig
index 400a1a3c4ff7..15611188d2a1 100644
--- a/std/atomic/stack.zig
+++ b/std/atomic/stack.zig
@@ -4,12 +4,12 @@ const AtomicOrder = builtin.AtomicOrder;
/// Many reader, many writer, non-allocating, thread-safe, lock-free
pub fn Stack(comptime T: type) type {
return struct {
- root: ?&Node,
+ root: ?*Node,
pub const Self = this;
pub const Node = struct {
- next: ?&Node,
+ next: ?*Node,
data: T,
};
@@ -19,36 +19,36 @@ pub fn Stack(comptime T: type) type {
/// push operation, but only if you are the first item in the stack. if you did not succeed in
/// being the first item in the stack, returns the other item that was there.
- pub fn pushFirst(self: &Self, node: &Node) ?&Node {
+ pub fn pushFirst(self: *Self, node: *Node) ?*Node {
node.next = null;
- return @cmpxchgStrong(?&Node, &self.root, null, node, AtomicOrder.SeqCst, AtomicOrder.SeqCst);
+ return @cmpxchgStrong(?*Node, &self.root, null, node, AtomicOrder.SeqCst, AtomicOrder.SeqCst);
}
- pub fn push(self: &Self, node: &Node) void {
- var root = @atomicLoad(?&Node, &self.root, AtomicOrder.SeqCst);
+ pub fn push(self: *Self, node: *Node) void {
+ var root = @atomicLoad(?*Node, &self.root, AtomicOrder.SeqCst);
while (true) {
node.next = root;
- root = @cmpxchgWeak(?&Node, &self.root, root, node, AtomicOrder.SeqCst, AtomicOrder.SeqCst) ?? break;
+ root = @cmpxchgWeak(?*Node, &self.root, root, node, AtomicOrder.SeqCst, AtomicOrder.SeqCst) ?? break;
}
}
- pub fn pop(self: &Self) ?&Node {
- var root = @atomicLoad(?&Node, &self.root, AtomicOrder.SeqCst);
+ pub fn pop(self: *Self) ?*Node {
+ var root = @atomicLoad(?*Node, &self.root, AtomicOrder.SeqCst);
while (true) {
- root = @cmpxchgWeak(?&Node, &self.root, root, (root ?? return null).next, AtomicOrder.SeqCst, AtomicOrder.SeqCst) ?? return root;
+ root = @cmpxchgWeak(?*Node, &self.root, root, (root ?? return null).next, AtomicOrder.SeqCst, AtomicOrder.SeqCst) ?? return root;
}
}
- pub fn isEmpty(self: &Self) bool {
- return @atomicLoad(?&Node, &self.root, AtomicOrder.SeqCst) == null;
+ pub fn isEmpty(self: *Self) bool {
+ return @atomicLoad(?*Node, &self.root, AtomicOrder.SeqCst) == null;
}
};
}
const std = @import("std");
const Context = struct {
- allocator: &std.mem.Allocator,
- stack: &Stack(i32),
+ allocator: *std.mem.Allocator,
+ stack: *Stack(i32),
put_sum: isize,
get_sum: isize,
get_count: usize,
@@ -82,11 +82,11 @@ test "std.atomic.stack" {
.get_count = 0,
};
- var putters: [put_thread_count]&std.os.Thread = undefined;
+ var putters: [put_thread_count]*std.os.Thread = undefined;
for (putters) |*t| {
t.* = try std.os.spawnThread(&context, startPuts);
}
- var getters: [put_thread_count]&std.os.Thread = undefined;
+ var getters: [put_thread_count]*std.os.Thread = undefined;
for (getters) |*t| {
t.* = try std.os.spawnThread(&context, startGets);
}
@@ -101,7 +101,7 @@ test "std.atomic.stack" {
std.debug.assert(context.get_count == puts_per_thread * put_thread_count);
}
-fn startPuts(ctx: &Context) u8 {
+fn startPuts(ctx: *Context) u8 {
var put_count: usize = puts_per_thread;
var r = std.rand.DefaultPrng.init(0xdeadbeef);
while (put_count != 0) : (put_count -= 1) {
@@ -115,7 +115,7 @@ fn startPuts(ctx: &Context) u8 {
return 0;
}
-fn startGets(ctx: &Context) u8 {
+fn startGets(ctx: *Context) u8 {
while (true) {
while (ctx.stack.pop()) |node| {
std.os.time.sleep(0, 1); // let the os scheduler be our fuzz
diff --git a/std/base64.zig b/std/base64.zig
index 204628a40521..d27bcbd2012a 100644
--- a/std/base64.zig
+++ b/std/base64.zig
@@ -32,7 +32,7 @@ pub const Base64Encoder = struct {
}
/// dest.len must be what you get from ::calcSize.
- pub fn encode(encoder: &const Base64Encoder, dest: []u8, source: []const u8) void {
+ pub fn encode(encoder: *const Base64Encoder, dest: []u8, source: []const u8) void {
assert(dest.len == Base64Encoder.calcSize(source.len));
var i: usize = 0;
@@ -107,7 +107,7 @@ pub const Base64Decoder = struct {
}
/// If the encoded buffer is detected to be invalid, returns error.InvalidPadding.
- pub fn calcSize(decoder: &const Base64Decoder, source: []const u8) !usize {
+ pub fn calcSize(decoder: *const Base64Decoder, source: []const u8) !usize {
if (source.len % 4 != 0) return error.InvalidPadding;
return calcDecodedSizeExactUnsafe(source, decoder.pad_char);
}
@@ -115,7 +115,7 @@ pub const Base64Decoder = struct {
/// dest.len must be what you get from ::calcSize.
/// invalid characters result in error.InvalidCharacter.
/// invalid padding results in error.InvalidPadding.
- pub fn decode(decoder: &const Base64Decoder, dest: []u8, source: []const u8) !void {
+ pub fn decode(decoder: *const Base64Decoder, dest: []u8, source: []const u8) !void {
assert(dest.len == (decoder.calcSize(source) catch unreachable));
assert(source.len % 4 == 0);
@@ -181,7 +181,7 @@ pub const Base64DecoderWithIgnore = struct {
/// Invalid padding results in error.InvalidPadding.
/// Decoding more data than can fit in dest results in error.OutputTooSmall. See also ::calcSizeUpperBound.
/// Returns the number of bytes writen to dest.
- pub fn decode(decoder_with_ignore: &const Base64DecoderWithIgnore, dest: []u8, source: []const u8) !usize {
+ pub fn decode(decoder_with_ignore: *const Base64DecoderWithIgnore, dest: []u8, source: []const u8) !usize {
const decoder = &decoder_with_ignore.decoder;
var src_cursor: usize = 0;
@@ -290,13 +290,13 @@ pub const Base64DecoderUnsafe = struct {
}
/// The source buffer must be valid.
- pub fn calcSize(decoder: &const Base64DecoderUnsafe, source: []const u8) usize {
+ pub fn calcSize(decoder: *const Base64DecoderUnsafe, source: []const u8) usize {
return calcDecodedSizeExactUnsafe(source, decoder.pad_char);
}
/// dest.len must be what you get from ::calcDecodedSizeExactUnsafe.
/// invalid characters or padding will result in undefined values.
- pub fn decode(decoder: &const Base64DecoderUnsafe, dest: []u8, source: []const u8) void {
+ pub fn decode(decoder: *const Base64DecoderUnsafe, dest: []u8, source: []const u8) void {
assert(dest.len == decoder.calcSize(source));
var src_index: usize = 0;
diff --git a/std/buf_map.zig b/std/buf_map.zig
index 930fc36a7815..22d821ae7b64 100644
--- a/std/buf_map.zig
+++ b/std/buf_map.zig
@@ -11,12 +11,12 @@ pub const BufMap = struct {
const BufMapHashMap = HashMap([]const u8, []const u8, mem.hash_slice_u8, mem.eql_slice_u8);
- pub fn init(allocator: &Allocator) BufMap {
+ pub fn init(allocator: *Allocator) BufMap {
var self = BufMap{ .hash_map = BufMapHashMap.init(allocator) };
return self;
}
- pub fn deinit(self: &const BufMap) void {
+ pub fn deinit(self: *const BufMap) void {
var it = self.hash_map.iterator();
while (true) {
const entry = it.next() ?? break;
@@ -27,7 +27,7 @@ pub const BufMap = struct {
self.hash_map.deinit();
}
- pub fn set(self: &BufMap, key: []const u8, value: []const u8) !void {
+ pub fn set(self: *BufMap, key: []const u8, value: []const u8) !void {
self.delete(key);
const key_copy = try self.copy(key);
errdefer self.free(key_copy);
@@ -36,30 +36,30 @@ pub const BufMap = struct {
_ = try self.hash_map.put(key_copy, value_copy);
}
- pub fn get(self: &const BufMap, key: []const u8) ?[]const u8 {
+ pub fn get(self: *const BufMap, key: []const u8) ?[]const u8 {
const entry = self.hash_map.get(key) ?? return null;
return entry.value;
}
- pub fn delete(self: &BufMap, key: []const u8) void {
+ pub fn delete(self: *BufMap, key: []const u8) void {
const entry = self.hash_map.remove(key) ?? return;
self.free(entry.key);
self.free(entry.value);
}
- pub fn count(self: &const BufMap) usize {
+ pub fn count(self: *const BufMap) usize {
return self.hash_map.count();
}
- pub fn iterator(self: &const BufMap) BufMapHashMap.Iterator {
+ pub fn iterator(self: *const BufMap) BufMapHashMap.Iterator {
return self.hash_map.iterator();
}
- fn free(self: &const BufMap, value: []const u8) void {
+ fn free(self: *const BufMap, value: []const u8) void {
self.hash_map.allocator.free(value);
}
- fn copy(self: &const BufMap, value: []const u8) ![]const u8 {
+ fn copy(self: *const BufMap, value: []const u8) ![]const u8 {
return mem.dupe(self.hash_map.allocator, u8, value);
}
};
diff --git a/std/buf_set.zig b/std/buf_set.zig
index c5a80e16fb5b..03a050ed8bff 100644
--- a/std/buf_set.zig
+++ b/std/buf_set.zig
@@ -9,12 +9,12 @@ pub const BufSet = struct {
const BufSetHashMap = HashMap([]const u8, void, mem.hash_slice_u8, mem.eql_slice_u8);
- pub fn init(a: &Allocator) BufSet {
+ pub fn init(a: *Allocator) BufSet {
var self = BufSet{ .hash_map = BufSetHashMap.init(a) };
return self;
}
- pub fn deinit(self: &const BufSet) void {
+ pub fn deinit(self: *const BufSet) void {
var it = self.hash_map.iterator();
while (true) {
const entry = it.next() ?? break;
@@ -24,7 +24,7 @@ pub const BufSet = struct {
self.hash_map.deinit();
}
- pub fn put(self: &BufSet, key: []const u8) !void {
+ pub fn put(self: *BufSet, key: []const u8) !void {
if (self.hash_map.get(key) == null) {
const key_copy = try self.copy(key);
errdefer self.free(key_copy);
@@ -32,28 +32,28 @@ pub const BufSet = struct {
}
}
- pub fn delete(self: &BufSet, key: []const u8) void {
+ pub fn delete(self: *BufSet, key: []const u8) void {
const entry = self.hash_map.remove(key) ?? return;
self.free(entry.key);
}
- pub fn count(self: &const BufSet) usize {
+ pub fn count(self: *const BufSet) usize {
return self.hash_map.count();
}
- pub fn iterator(self: &const BufSet) BufSetHashMap.Iterator {
+ pub fn iterator(self: *const BufSet) BufSetHashMap.Iterator {
return self.hash_map.iterator();
}
- pub fn allocator(self: &const BufSet) &Allocator {
+ pub fn allocator(self: *const BufSet) *Allocator {
return self.hash_map.allocator;
}
- fn free(self: &const BufSet, value: []const u8) void {
+ fn free(self: *const BufSet, value: []const u8) void {
self.hash_map.allocator.free(value);
}
- fn copy(self: &const BufSet, value: []const u8) ![]const u8 {
+ fn copy(self: *const BufSet, value: []const u8) ![]const u8 {
const result = try self.hash_map.allocator.alloc(u8, value.len);
mem.copy(u8, result, value);
return result;
diff --git a/std/buffer.zig b/std/buffer.zig
index 90d63719e36e..305746e183e9 100644
--- a/std/buffer.zig
+++ b/std/buffer.zig
@@ -12,14 +12,14 @@ pub const Buffer = struct {
list: ArrayList(u8),
/// Must deinitialize with deinit.
- pub fn init(allocator: &Allocator, m: []const u8) !Buffer {
+ pub fn init(allocator: *Allocator, m: []const u8) !Buffer {
var self = try initSize(allocator, m.len);
mem.copy(u8, self.list.items, m);
return self;
}
/// Must deinitialize with deinit.
- pub fn initSize(allocator: &Allocator, size: usize) !Buffer {
+ pub fn initSize(allocator: *Allocator, size: usize) !Buffer {
var self = initNull(allocator);
try self.resize(size);
return self;
@@ -30,19 +30,19 @@ pub const Buffer = struct {
/// * ::replaceContents
/// * ::replaceContentsBuffer
/// * ::resize
- pub fn initNull(allocator: &Allocator) Buffer {
+ pub fn initNull(allocator: *Allocator) Buffer {
return Buffer{ .list = ArrayList(u8).init(allocator) };
}
/// Must deinitialize with deinit.
- pub fn initFromBuffer(buffer: &const Buffer) !Buffer {
+ pub fn initFromBuffer(buffer: *const Buffer) !Buffer {
return Buffer.init(buffer.list.allocator, buffer.toSliceConst());
}
/// Buffer takes ownership of the passed in slice. The slice must have been
/// allocated with `allocator`.
/// Must deinitialize with deinit.
- pub fn fromOwnedSlice(allocator: &Allocator, slice: []u8) Buffer {
+ pub fn fromOwnedSlice(allocator: *Allocator, slice: []u8) Buffer {
var self = Buffer{ .list = ArrayList(u8).fromOwnedSlice(allocator, slice) };
self.list.append(0);
return self;
@@ -50,79 +50,79 @@ pub const Buffer = struct {
/// The caller owns the returned memory. The Buffer becomes null and
/// is safe to `deinit`.
- pub fn toOwnedSlice(self: &Buffer) []u8 {
+ pub fn toOwnedSlice(self: *Buffer) []u8 {
const allocator = self.list.allocator;
const result = allocator.shrink(u8, self.list.items, self.len());
self.* = initNull(allocator);
return result;
}
- pub fn deinit(self: &Buffer) void {
+ pub fn deinit(self: *Buffer) void {
self.list.deinit();
}
- pub fn toSlice(self: &const Buffer) []u8 {
+ pub fn toSlice(self: *const Buffer) []u8 {
return self.list.toSlice()[0..self.len()];
}
- pub fn toSliceConst(self: &const Buffer) []const u8 {
+ pub fn toSliceConst(self: *const Buffer) []const u8 {
return self.list.toSliceConst()[0..self.len()];
}
- pub fn shrink(self: &Buffer, new_len: usize) void {
+ pub fn shrink(self: *Buffer, new_len: usize) void {
assert(new_len <= self.len());
self.list.shrink(new_len + 1);
self.list.items[self.len()] = 0;
}
- pub fn resize(self: &Buffer, new_len: usize) !void {
+ pub fn resize(self: *Buffer, new_len: usize) !void {
try self.list.resize(new_len + 1);
self.list.items[self.len()] = 0;
}
- pub fn isNull(self: &const Buffer) bool {
+ pub fn isNull(self: *const Buffer) bool {
return self.list.len == 0;
}
- pub fn len(self: &const Buffer) usize {
+ pub fn len(self: *const Buffer) usize {
return self.list.len - 1;
}
- pub fn append(self: &Buffer, m: []const u8) !void {
+ pub fn append(self: *Buffer, m: []const u8) !void {
const old_len = self.len();
try self.resize(old_len + m.len);
mem.copy(u8, self.list.toSlice()[old_len..], m);
}
- pub fn appendByte(self: &Buffer, byte: u8) !void {
+ pub fn appendByte(self: *Buffer, byte: u8) !void {
const old_len = self.len();
try self.resize(old_len + 1);
self.list.toSlice()[old_len] = byte;
}
- pub fn eql(self: &const Buffer, m: []const u8) bool {
+ pub fn eql(self: *const Buffer, m: []const u8) bool {
return mem.eql(u8, self.toSliceConst(), m);
}
- pub fn startsWith(self: &const Buffer, m: []const u8) bool {
+ pub fn startsWith(self: *const Buffer, m: []const u8) bool {
if (self.len() < m.len) return false;
return mem.eql(u8, self.list.items[0..m.len], m);
}
- pub fn endsWith(self: &const Buffer, m: []const u8) bool {
+ pub fn endsWith(self: *const Buffer, m: []const u8) bool {
const l = self.len();
if (l < m.len) return false;
const start = l - m.len;
return mem.eql(u8, self.list.items[start..l], m);
}
- pub fn replaceContents(self: &const Buffer, m: []const u8) !void {
+ pub fn replaceContents(self: *const Buffer, m: []const u8) !void {
try self.resize(m.len);
mem.copy(u8, self.list.toSlice(), m);
}
/// For passing to C functions.
- pub fn ptr(self: &const Buffer) &u8 {
+ pub fn ptr(self: *const Buffer) *u8 {
return self.list.items.ptr;
}
};
diff --git a/std/build.zig b/std/build.zig
index 9a6e17f7287a..fed02e081586 100644
--- a/std/build.zig
+++ b/std/build.zig
@@ -20,7 +20,7 @@ pub const Builder = struct {
install_tls: TopLevelStep,
have_uninstall_step: bool,
have_install_step: bool,
- allocator: &Allocator,
+ allocator: *Allocator,
lib_paths: ArrayList([]const u8),
include_paths: ArrayList([]const u8),
rpaths: ArrayList([]const u8),
@@ -36,9 +36,9 @@ pub const Builder = struct {
verbose_cimport: bool,
invalid_user_input: bool,
zig_exe: []const u8,
- default_step: &Step,
+ default_step: *Step,
env_map: BufMap,
- top_level_steps: ArrayList(&TopLevelStep),
+ top_level_steps: ArrayList(*TopLevelStep),
prefix: []const u8,
search_prefixes: ArrayList([]const u8),
lib_dir: []const u8,
@@ -82,7 +82,7 @@ pub const Builder = struct {
description: []const u8,
};
- pub fn init(allocator: &Allocator, zig_exe: []const u8, build_root: []const u8, cache_root: []const u8) Builder {
+ pub fn init(allocator: *Allocator, zig_exe: []const u8, build_root: []const u8, cache_root: []const u8) Builder {
var self = Builder{
.zig_exe = zig_exe,
.build_root = build_root,
@@ -102,7 +102,7 @@ pub const Builder = struct {
.user_input_options = UserInputOptionsMap.init(allocator),
.available_options_map = AvailableOptionsMap.init(allocator),
.available_options_list = ArrayList(AvailableOption).init(allocator),
- .top_level_steps = ArrayList(&TopLevelStep).init(allocator),
+ .top_level_steps = ArrayList(*TopLevelStep).init(allocator),
.default_step = undefined,
.env_map = os.getEnvMap(allocator) catch unreachable,
.prefix = undefined,
@@ -127,7 +127,7 @@ pub const Builder = struct {
return self;
}
- pub fn deinit(self: &Builder) void {
+ pub fn deinit(self: *Builder) void {
self.lib_paths.deinit();
self.include_paths.deinit();
self.rpaths.deinit();
@@ -135,81 +135,81 @@ pub const Builder = struct {
self.top_level_steps.deinit();
}
- pub fn setInstallPrefix(self: &Builder, maybe_prefix: ?[]const u8) void {
+ pub fn setInstallPrefix(self: *Builder, maybe_prefix: ?[]const u8) void {
self.prefix = maybe_prefix ?? "/usr/local"; // TODO better default
self.lib_dir = os.path.join(self.allocator, self.prefix, "lib") catch unreachable;
self.exe_dir = os.path.join(self.allocator, self.prefix, "bin") catch unreachable;
}
- pub fn addExecutable(self: &Builder, name: []const u8, root_src: ?[]const u8) &LibExeObjStep {
+ pub fn addExecutable(self: *Builder, name: []const u8, root_src: ?[]const u8) *LibExeObjStep {
return LibExeObjStep.createExecutable(self, name, root_src);
}
- pub fn addObject(self: &Builder, name: []const u8, root_src: []const u8) &LibExeObjStep {
+ pub fn addObject(self: *Builder, name: []const u8, root_src: []const u8) *LibExeObjStep {
return LibExeObjStep.createObject(self, name, root_src);
}
- pub fn addSharedLibrary(self: &Builder, name: []const u8, root_src: ?[]const u8, ver: &const Version) &LibExeObjStep {
+ pub fn addSharedLibrary(self: *Builder, name: []const u8, root_src: ?[]const u8, ver: *const Version) *LibExeObjStep {
return LibExeObjStep.createSharedLibrary(self, name, root_src, ver);
}
- pub fn addStaticLibrary(self: &Builder, name: []const u8, root_src: ?[]const u8) &LibExeObjStep {
+ pub fn addStaticLibrary(self: *Builder, name: []const u8, root_src: ?[]const u8) *LibExeObjStep {
return LibExeObjStep.createStaticLibrary(self, name, root_src);
}
- pub fn addTest(self: &Builder, root_src: []const u8) &TestStep {
+ pub fn addTest(self: *Builder, root_src: []const u8) *TestStep {
const test_step = self.allocator.create(TestStep) catch unreachable;
test_step.* = TestStep.init(self, root_src);
return test_step;
}
- pub fn addAssemble(self: &Builder, name: []const u8, src: []const u8) &LibExeObjStep {
+ pub fn addAssemble(self: *Builder, name: []const u8, src: []const u8) *LibExeObjStep {
const obj_step = LibExeObjStep.createObject(self, name, null);
obj_step.addAssemblyFile(src);
return obj_step;
}
- pub fn addCStaticLibrary(self: &Builder, name: []const u8) &LibExeObjStep {
+ pub fn addCStaticLibrary(self: *Builder, name: []const u8) *LibExeObjStep {
return LibExeObjStep.createCStaticLibrary(self, name);
}
- pub fn addCSharedLibrary(self: &Builder, name: []const u8, ver: &const Version) &LibExeObjStep {
+ pub fn addCSharedLibrary(self: *Builder, name: []const u8, ver: *const Version) *LibExeObjStep {
return LibExeObjStep.createCSharedLibrary(self, name, ver);
}
- pub fn addCExecutable(self: &Builder, name: []const u8) &LibExeObjStep {
+ pub fn addCExecutable(self: *Builder, name: []const u8) *LibExeObjStep {
return LibExeObjStep.createCExecutable(self, name);
}
- pub fn addCObject(self: &Builder, name: []const u8, src: []const u8) &LibExeObjStep {
+ pub fn addCObject(self: *Builder, name: []const u8, src: []const u8) *LibExeObjStep {
return LibExeObjStep.createCObject(self, name, src);
}
/// ::argv is copied.
- pub fn addCommand(self: &Builder, cwd: ?[]const u8, env_map: &const BufMap, argv: []const []const u8) &CommandStep {
+ pub fn addCommand(self: *Builder, cwd: ?[]const u8, env_map: *const BufMap, argv: []const []const u8) *CommandStep {
return CommandStep.create(self, cwd, env_map, argv);
}
- pub fn addWriteFile(self: &Builder, file_path: []const u8, data: []const u8) &WriteFileStep {
+ pub fn addWriteFile(self: *Builder, file_path: []const u8, data: []const u8) *WriteFileStep {
const write_file_step = self.allocator.create(WriteFileStep) catch unreachable;
write_file_step.* = WriteFileStep.init(self, file_path, data);
return write_file_step;
}
- pub fn addLog(self: &Builder, comptime format: []const u8, args: ...) &LogStep {
+ pub fn addLog(self: *Builder, comptime format: []const u8, args: ...) *LogStep {
const data = self.fmt(format, args);
const log_step = self.allocator.create(LogStep) catch unreachable;
log_step.* = LogStep.init(self, data);
return log_step;
}
- pub fn addRemoveDirTree(self: &Builder, dir_path: []const u8) &RemoveDirStep {
+ pub fn addRemoveDirTree(self: *Builder, dir_path: []const u8) *RemoveDirStep {
const remove_dir_step = self.allocator.create(RemoveDirStep) catch unreachable;
remove_dir_step.* = RemoveDirStep.init(self, dir_path);
return remove_dir_step;
}
- pub fn version(self: &const Builder, major: u32, minor: u32, patch: u32) Version {
+ pub fn version(self: *const Builder, major: u32, minor: u32, patch: u32) Version {
return Version{
.major = major,
.minor = minor,
@@ -217,20 +217,20 @@ pub const Builder = struct {
};
}
- pub fn addCIncludePath(self: &Builder, path: []const u8) void {
+ pub fn addCIncludePath(self: *Builder, path: []const u8) void {
self.include_paths.append(path) catch unreachable;
}
- pub fn addRPath(self: &Builder, path: []const u8) void {
+ pub fn addRPath(self: *Builder, path: []const u8) void {
self.rpaths.append(path) catch unreachable;
}
- pub fn addLibPath(self: &Builder, path: []const u8) void {
+ pub fn addLibPath(self: *Builder, path: []const u8) void {
self.lib_paths.append(path) catch unreachable;
}
- pub fn make(self: &Builder, step_names: []const []const u8) !void {
- var wanted_steps = ArrayList(&Step).init(self.allocator);
+ pub fn make(self: *Builder, step_names: []const []const u8) !void {
+ var wanted_steps = ArrayList(*Step).init(self.allocator);
defer wanted_steps.deinit();
if (step_names.len == 0) {
@@ -247,7 +247,7 @@ pub const Builder = struct {
}
}
- pub fn getInstallStep(self: &Builder) &Step {
+ pub fn getInstallStep(self: *Builder) *Step {
if (self.have_install_step) return &self.install_tls.step;
self.top_level_steps.append(&self.install_tls) catch unreachable;
@@ -255,7 +255,7 @@ pub const Builder = struct {
return &self.install_tls.step;
}
- pub fn getUninstallStep(self: &Builder) &Step {
+ pub fn getUninstallStep(self: *Builder) *Step {
if (self.have_uninstall_step) return &self.uninstall_tls.step;
self.top_level_steps.append(&self.uninstall_tls) catch unreachable;
@@ -263,7 +263,7 @@ pub const Builder = struct {
return &self.uninstall_tls.step;
}
- fn makeUninstall(uninstall_step: &Step) error!void {
+ fn makeUninstall(uninstall_step: *Step) error!void {
const uninstall_tls = @fieldParentPtr(TopLevelStep, "step", uninstall_step);
const self = @fieldParentPtr(Builder, "uninstall_tls", uninstall_tls);
@@ -277,7 +277,7 @@ pub const Builder = struct {
// TODO remove empty directories
}
- fn makeOneStep(self: &Builder, s: &Step) error!void {
+ fn makeOneStep(self: *Builder, s: *Step) error!void {
if (s.loop_flag) {
warn("Dependency loop detected:\n {}\n", s.name);
return error.DependencyLoopDetected;
@@ -298,7 +298,7 @@ pub const Builder = struct {
try s.make();
}
- fn getTopLevelStepByName(self: &Builder, name: []const u8) !&Step {
+ fn getTopLevelStepByName(self: *Builder, name: []const u8) !*Step {
for (self.top_level_steps.toSliceConst()) |top_level_step| {
if (mem.eql(u8, top_level_step.step.name, name)) {
return &top_level_step.step;
@@ -308,7 +308,7 @@ pub const Builder = struct {
return error.InvalidStepName;
}
- fn processNixOSEnvVars(self: &Builder) void {
+ fn processNixOSEnvVars(self: *Builder) void {
if (os.getEnvVarOwned(self.allocator, "NIX_CFLAGS_COMPILE")) |nix_cflags_compile| {
var it = mem.split(nix_cflags_compile, " ");
while (true) {
@@ -350,7 +350,7 @@ pub const Builder = struct {
}
}
- pub fn option(self: &Builder, comptime T: type, name: []const u8, description: []const u8) ?T {
+ pub fn option(self: *Builder, comptime T: type, name: []const u8, description: []const u8) ?T {
const type_id = comptime typeToEnum(T);
const available_option = AvailableOption{
.name = name,
@@ -403,7 +403,7 @@ pub const Builder = struct {
}
}
- pub fn step(self: &Builder, name: []const u8, description: []const u8) &Step {
+ pub fn step(self: *Builder, name: []const u8, description: []const u8) *Step {
const step_info = self.allocator.create(TopLevelStep) catch unreachable;
step_info.* = TopLevelStep{
.step = Step.initNoOp(name, self.allocator),
@@ -413,7 +413,7 @@ pub const Builder = struct {
return &step_info.step;
}
- pub fn standardReleaseOptions(self: &Builder) builtin.Mode {
+ pub fn standardReleaseOptions(self: *Builder) builtin.Mode {
if (self.release_mode) |mode| return mode;
const release_safe = self.option(bool, "release-safe", "optimizations on and safety on") ?? false;
@@ -429,7 +429,7 @@ pub const Builder = struct {
return mode;
}
- pub fn addUserInputOption(self: &Builder, name: []const u8, value: []const u8) bool {
+ pub fn addUserInputOption(self: *Builder, name: []const u8, value: []const u8) bool {
if (self.user_input_options.put(name, UserInputOption{
.name = name,
.value = UserValue{ .Scalar = value },
@@ -466,7 +466,7 @@ pub const Builder = struct {
return false;
}
- pub fn addUserInputFlag(self: &Builder, name: []const u8) bool {
+ pub fn addUserInputFlag(self: *Builder, name: []const u8) bool {
if (self.user_input_options.put(name, UserInputOption{
.name = name,
.value = UserValue{ .Flag = {} },
@@ -500,7 +500,7 @@ pub const Builder = struct {
};
}
- fn markInvalidUserInput(self: &Builder) void {
+ fn markInvalidUserInput(self: *Builder) void {
self.invalid_user_input = true;
}
@@ -514,7 +514,7 @@ pub const Builder = struct {
};
}
- pub fn validateUserInputDidItFail(self: &Builder) bool {
+ pub fn validateUserInputDidItFail(self: *Builder) bool {
// make sure all args are used
var it = self.user_input_options.iterator();
while (true) {
@@ -528,7 +528,7 @@ pub const Builder = struct {
return self.invalid_user_input;
}
- fn spawnChild(self: &Builder, argv: []const []const u8) !void {
+ fn spawnChild(self: *Builder, argv: []const []const u8) !void {
return self.spawnChildEnvMap(null, &self.env_map, argv);
}
@@ -540,7 +540,7 @@ pub const Builder = struct {
warn("\n");
}
- fn spawnChildEnvMap(self: &Builder, cwd: ?[]const u8, env_map: &const BufMap, argv: []const []const u8) !void {
+ fn spawnChildEnvMap(self: *Builder, cwd: ?[]const u8, env_map: *const BufMap, argv: []const []const u8) !void {
if (self.verbose) {
printCmd(cwd, argv);
}
@@ -573,28 +573,28 @@ pub const Builder = struct {
}
}
- pub fn makePath(self: &Builder, path: []const u8) !void {
+ pub fn makePath(self: *Builder, path: []const u8) !void {
os.makePath(self.allocator, self.pathFromRoot(path)) catch |err| {
warn("Unable to create path {}: {}\n", path, @errorName(err));
return err;
};
}
- pub fn installArtifact(self: &Builder, artifact: &LibExeObjStep) void {
+ pub fn installArtifact(self: *Builder, artifact: *LibExeObjStep) void {
self.getInstallStep().dependOn(&self.addInstallArtifact(artifact).step);
}
- pub fn addInstallArtifact(self: &Builder, artifact: &LibExeObjStep) &InstallArtifactStep {
+ pub fn addInstallArtifact(self: *Builder, artifact: *LibExeObjStep) *InstallArtifactStep {
return InstallArtifactStep.create(self, artifact);
}
///::dest_rel_path is relative to prefix path or it can be an absolute path
- pub fn installFile(self: &Builder, src_path: []const u8, dest_rel_path: []const u8) void {
+ pub fn installFile(self: *Builder, src_path: []const u8, dest_rel_path: []const u8) void {
self.getInstallStep().dependOn(&self.addInstallFile(src_path, dest_rel_path).step);
}
///::dest_rel_path is relative to prefix path or it can be an absolute path
- pub fn addInstallFile(self: &Builder, src_path: []const u8, dest_rel_path: []const u8) &InstallFileStep {
+ pub fn addInstallFile(self: *Builder, src_path: []const u8, dest_rel_path: []const u8) *InstallFileStep {
const full_dest_path = os.path.resolve(self.allocator, self.prefix, dest_rel_path) catch unreachable;
self.pushInstalledFile(full_dest_path);
@@ -603,16 +603,16 @@ pub const Builder = struct {
return install_step;
}
- pub fn pushInstalledFile(self: &Builder, full_path: []const u8) void {
+ pub fn pushInstalledFile(self: *Builder, full_path: []const u8) void {
_ = self.getUninstallStep();
self.installed_files.append(full_path) catch unreachable;
}
- fn copyFile(self: &Builder, source_path: []const u8, dest_path: []const u8) !void {
+ fn copyFile(self: *Builder, source_path: []const u8, dest_path: []const u8) !void {
return self.copyFileMode(source_path, dest_path, os.default_file_mode);
}
- fn copyFileMode(self: &Builder, source_path: []const u8, dest_path: []const u8, mode: os.FileMode) !void {
+ fn copyFileMode(self: *Builder, source_path: []const u8, dest_path: []const u8, mode: os.FileMode) !void {
if (self.verbose) {
warn("cp {} {}\n", source_path, dest_path);
}
@@ -629,15 +629,15 @@ pub const Builder = struct {
};
}
- fn pathFromRoot(self: &Builder, rel_path: []const u8) []u8 {
+ fn pathFromRoot(self: *Builder, rel_path: []const u8) []u8 {
return os.path.resolve(self.allocator, self.build_root, rel_path) catch unreachable;
}
- pub fn fmt(self: &Builder, comptime format: []const u8, args: ...) []u8 {
+ pub fn fmt(self: *Builder, comptime format: []const u8, args: ...) []u8 {
return fmt_lib.allocPrint(self.allocator, format, args) catch unreachable;
}
- fn getCCExe(self: &Builder) []const u8 {
+ fn getCCExe(self: *Builder) []const u8 {
if (builtin.environ == builtin.Environ.msvc) {
return "cl.exe";
} else {
@@ -645,7 +645,7 @@ pub const Builder = struct {
}
}
- pub fn findProgram(self: &Builder, names: []const []const u8, paths: []const []const u8) ![]const u8 {
+ pub fn findProgram(self: *Builder, names: []const []const u8, paths: []const []const u8) ![]const u8 {
// TODO report error for ambiguous situations
const exe_extension = (Target{ .Native = {} }).exeFileExt();
for (self.search_prefixes.toSliceConst()) |search_prefix| {
@@ -693,7 +693,7 @@ pub const Builder = struct {
return error.FileNotFound;
}
- pub fn exec(self: &Builder, argv: []const []const u8) ![]u8 {
+ pub fn exec(self: *Builder, argv: []const []const u8) ![]u8 {
const max_output_size = 100 * 1024;
const result = try os.ChildProcess.exec(self.allocator, argv, null, null, max_output_size);
switch (result.term) {
@@ -715,7 +715,7 @@ pub const Builder = struct {
}
}
- pub fn addSearchPrefix(self: &Builder, search_prefix: []const u8) void {
+ pub fn addSearchPrefix(self: *Builder, search_prefix: []const u8) void {
self.search_prefixes.append(search_prefix) catch unreachable;
}
};
@@ -736,7 +736,7 @@ pub const Target = union(enum) {
Native: void,
Cross: CrossTarget,
- pub fn oFileExt(self: &const Target) []const u8 {
+ pub fn oFileExt(self: *const Target) []const u8 {
const environ = switch (self.*) {
Target.Native => builtin.environ,
Target.Cross => |t| t.environ,
@@ -747,49 +747,49 @@ pub const Target = union(enum) {
};
}
- pub fn exeFileExt(self: &const Target) []const u8 {
+ pub fn exeFileExt(self: *const Target) []const u8 {
return switch (self.getOs()) {
builtin.Os.windows => ".exe",
else => "",
};
}
- pub fn libFileExt(self: &const Target) []const u8 {
+ pub fn libFileExt(self: *const Target) []const u8 {
return switch (self.getOs()) {
builtin.Os.windows => ".lib",
else => ".a",
};
}
- pub fn getOs(self: &const Target) builtin.Os {
+ pub fn getOs(self: *const Target) builtin.Os {
return switch (self.*) {
Target.Native => builtin.os,
Target.Cross => |t| t.os,
};
}
- pub fn isDarwin(self: &const Target) bool {
+ pub fn isDarwin(self: *const Target) bool {
return switch (self.getOs()) {
builtin.Os.ios, builtin.Os.macosx => true,
else => false,
};
}
- pub fn isWindows(self: &const Target) bool {
+ pub fn isWindows(self: *const Target) bool {
return switch (self.getOs()) {
builtin.Os.windows => true,
else => false,
};
}
- pub fn wantSharedLibSymLinks(self: &const Target) bool {
+ pub fn wantSharedLibSymLinks(self: *const Target) bool {
return !self.isWindows();
}
};
pub const LibExeObjStep = struct {
step: Step,
- builder: &Builder,
+ builder: *Builder,
name: []const u8,
target: Target,
link_libs: BufSet,
@@ -836,56 +836,56 @@ pub const LibExeObjStep = struct {
Obj,
};
- pub fn createSharedLibrary(builder: &Builder, name: []const u8, root_src: ?[]const u8, ver: &const Version) &LibExeObjStep {
+ pub fn createSharedLibrary(builder: *Builder, name: []const u8, root_src: ?[]const u8, ver: *const Version) *LibExeObjStep {
const self = builder.allocator.create(LibExeObjStep) catch unreachable;
self.* = initExtraArgs(builder, name, root_src, Kind.Lib, false, ver);
return self;
}
- pub fn createCSharedLibrary(builder: &Builder, name: []const u8, version: &const Version) &LibExeObjStep {
+ pub fn createCSharedLibrary(builder: *Builder, name: []const u8, version: *const Version) *LibExeObjStep {
const self = builder.allocator.create(LibExeObjStep) catch unreachable;
self.* = initC(builder, name, Kind.Lib, version, false);
return self;
}
- pub fn createStaticLibrary(builder: &Builder, name: []const u8, root_src: ?[]const u8) &LibExeObjStep {
+ pub fn createStaticLibrary(builder: *Builder, name: []const u8, root_src: ?[]const u8) *LibExeObjStep {
const self = builder.allocator.create(LibExeObjStep) catch unreachable;
self.* = initExtraArgs(builder, name, root_src, Kind.Lib, true, builder.version(0, 0, 0));
return self;
}
- pub fn createCStaticLibrary(builder: &Builder, name: []const u8) &LibExeObjStep {
+ pub fn createCStaticLibrary(builder: *Builder, name: []const u8) *LibExeObjStep {
const self = builder.allocator.create(LibExeObjStep) catch unreachable;
self.* = initC(builder, name, Kind.Lib, builder.version(0, 0, 0), true);
return self;
}
- pub fn createObject(builder: &Builder, name: []const u8, root_src: []const u8) &LibExeObjStep {
+ pub fn createObject(builder: *Builder, name: []const u8, root_src: []const u8) *LibExeObjStep {
const self = builder.allocator.create(LibExeObjStep) catch unreachable;
self.* = initExtraArgs(builder, name, root_src, Kind.Obj, false, builder.version(0, 0, 0));
return self;
}
- pub fn createCObject(builder: &Builder, name: []const u8, src: []const u8) &LibExeObjStep {
+ pub fn createCObject(builder: *Builder, name: []const u8, src: []const u8) *LibExeObjStep {
const self = builder.allocator.create(LibExeObjStep) catch unreachable;
self.* = initC(builder, name, Kind.Obj, builder.version(0, 0, 0), false);
self.object_src = src;
return self;
}
- pub fn createExecutable(builder: &Builder, name: []const u8, root_src: ?[]const u8) &LibExeObjStep {
+ pub fn createExecutable(builder: *Builder, name: []const u8, root_src: ?[]const u8) *LibExeObjStep {
const self = builder.allocator.create(LibExeObjStep) catch unreachable;
self.* = initExtraArgs(builder, name, root_src, Kind.Exe, false, builder.version(0, 0, 0));
return self;
}
- pub fn createCExecutable(builder: &Builder, name: []const u8) &LibExeObjStep {
+ pub fn createCExecutable(builder: *Builder, name: []const u8) *LibExeObjStep {
const self = builder.allocator.create(LibExeObjStep) catch unreachable;
self.* = initC(builder, name, Kind.Exe, builder.version(0, 0, 0), false);
return self;
}
- fn initExtraArgs(builder: &Builder, name: []const u8, root_src: ?[]const u8, kind: Kind, static: bool, ver: &const Version) LibExeObjStep {
+ fn initExtraArgs(builder: *Builder, name: []const u8, root_src: ?[]const u8, kind: Kind, static: bool, ver: *const Version) LibExeObjStep {
var self = LibExeObjStep{
.strip = false,
.builder = builder,
@@ -924,7 +924,7 @@ pub const LibExeObjStep = struct {
return self;
}
- fn initC(builder: &Builder, name: []const u8, kind: Kind, version: &const Version, static: bool) LibExeObjStep {
+ fn initC(builder: *Builder, name: []const u8, kind: Kind, version: *const Version, static: bool) LibExeObjStep {
var self = LibExeObjStep{
.builder = builder,
.name = name,
@@ -964,7 +964,7 @@ pub const LibExeObjStep = struct {
return self;
}
- fn computeOutFileNames(self: &LibExeObjStep) void {
+ fn computeOutFileNames(self: *LibExeObjStep) void {
switch (self.kind) {
Kind.Obj => {
self.out_filename = self.builder.fmt("{}{}", self.name, self.target.oFileExt());
@@ -996,7 +996,7 @@ pub const LibExeObjStep = struct {
}
}
- pub fn setTarget(self: &LibExeObjStep, target_arch: builtin.Arch, target_os: builtin.Os, target_environ: builtin.Environ) void {
+ pub fn setTarget(self: *LibExeObjStep, target_arch: builtin.Arch, target_os: builtin.Os, target_environ: builtin.Environ) void {
self.target = Target{
.Cross = CrossTarget{
.arch = target_arch,
@@ -1008,16 +1008,16 @@ pub const LibExeObjStep = struct {
}
// TODO respect this in the C args
- pub fn setLinkerScriptPath(self: &LibExeObjStep, path: []const u8) void {
+ pub fn setLinkerScriptPath(self: *LibExeObjStep, path: []const u8) void {
self.linker_script = path;
}
- pub fn linkFramework(self: &LibExeObjStep, framework_name: []const u8) void {
+ pub fn linkFramework(self: *LibExeObjStep, framework_name: []const u8) void {
assert(self.target.isDarwin());
self.frameworks.put(framework_name) catch unreachable;
}
- pub fn linkLibrary(self: &LibExeObjStep, lib: &LibExeObjStep) void {
+ pub fn linkLibrary(self: *LibExeObjStep, lib: *LibExeObjStep) void {
assert(self.kind != Kind.Obj);
assert(lib.kind == Kind.Lib);
@@ -1038,26 +1038,26 @@ pub const LibExeObjStep = struct {
}
}
- pub fn linkSystemLibrary(self: &LibExeObjStep, name: []const u8) void {
+ pub fn linkSystemLibrary(self: *LibExeObjStep, name: []const u8) void {
assert(self.kind != Kind.Obj);
self.link_libs.put(name) catch unreachable;
}
- pub fn addSourceFile(self: &LibExeObjStep, file: []const u8) void {
+ pub fn addSourceFile(self: *LibExeObjStep, file: []const u8) void {
assert(self.kind != Kind.Obj);
assert(!self.is_zig);
self.source_files.append(file) catch unreachable;
}
- pub fn setVerboseLink(self: &LibExeObjStep, value: bool) void {
+ pub fn setVerboseLink(self: *LibExeObjStep, value: bool) void {
self.verbose_link = value;
}
- pub fn setBuildMode(self: &LibExeObjStep, mode: builtin.Mode) void {
+ pub fn setBuildMode(self: *LibExeObjStep, mode: builtin.Mode) void {
self.build_mode = mode;
}
- pub fn setOutputPath(self: &LibExeObjStep, file_path: []const u8) void {
+ pub fn setOutputPath(self: *LibExeObjStep, file_path: []const u8) void {
self.output_path = file_path;
// catch a common mistake
@@ -1066,11 +1066,11 @@ pub const LibExeObjStep = struct {
}
}
- pub fn getOutputPath(self: &LibExeObjStep) []const u8 {
+ pub fn getOutputPath(self: *LibExeObjStep) []const u8 {
return if (self.output_path) |output_path| output_path else os.path.join(self.builder.allocator, self.builder.cache_root, self.out_filename) catch unreachable;
}
- pub fn setOutputHPath(self: &LibExeObjStep, file_path: []const u8) void {
+ pub fn setOutputHPath(self: *LibExeObjStep, file_path: []const u8) void {
self.output_h_path = file_path;
// catch a common mistake
@@ -1079,21 +1079,21 @@ pub const LibExeObjStep = struct {
}
}
- pub fn getOutputHPath(self: &LibExeObjStep) []const u8 {
+ pub fn getOutputHPath(self: *LibExeObjStep) []const u8 {
return if (self.output_h_path) |output_h_path| output_h_path else os.path.join(self.builder.allocator, self.builder.cache_root, self.out_h_filename) catch unreachable;
}
- pub fn addAssemblyFile(self: &LibExeObjStep, path: []const u8) void {
+ pub fn addAssemblyFile(self: *LibExeObjStep, path: []const u8) void {
self.assembly_files.append(path) catch unreachable;
}
- pub fn addObjectFile(self: &LibExeObjStep, path: []const u8) void {
+ pub fn addObjectFile(self: *LibExeObjStep, path: []const u8) void {
assert(self.kind != Kind.Obj);
self.object_files.append(path) catch unreachable;
}
- pub fn addObject(self: &LibExeObjStep, obj: &LibExeObjStep) void {
+ pub fn addObject(self: *LibExeObjStep, obj: *LibExeObjStep) void {
assert(obj.kind == Kind.Obj);
assert(self.kind != Kind.Obj);
@@ -1110,15 +1110,15 @@ pub const LibExeObjStep = struct {
self.include_dirs.append(self.builder.cache_root) catch unreachable;
}
- pub fn addIncludeDir(self: &LibExeObjStep, path: []const u8) void {
+ pub fn addIncludeDir(self: *LibExeObjStep, path: []const u8) void {
self.include_dirs.append(path) catch unreachable;
}
- pub fn addLibPath(self: &LibExeObjStep, path: []const u8) void {
+ pub fn addLibPath(self: *LibExeObjStep, path: []const u8) void {
self.lib_paths.append(path) catch unreachable;
}
- pub fn addPackagePath(self: &LibExeObjStep, name: []const u8, pkg_index_path: []const u8) void {
+ pub fn addPackagePath(self: *LibExeObjStep, name: []const u8, pkg_index_path: []const u8) void {
assert(self.is_zig);
self.packages.append(Pkg{
@@ -1127,23 +1127,23 @@ pub const LibExeObjStep = struct {
}) catch unreachable;
}
- pub fn addCompileFlags(self: &LibExeObjStep, flags: []const []const u8) void {
+ pub fn addCompileFlags(self: *LibExeObjStep, flags: []const []const u8) void {
for (flags) |flag| {
self.cflags.append(flag) catch unreachable;
}
}
- pub fn setNoStdLib(self: &LibExeObjStep, disable: bool) void {
+ pub fn setNoStdLib(self: *LibExeObjStep, disable: bool) void {
assert(!self.is_zig);
self.disable_libc = disable;
}
- fn make(step: &Step) !void {
+ fn make(step: *Step) !void {
const self = @fieldParentPtr(LibExeObjStep, "step", step);
return if (self.is_zig) self.makeZig() else self.makeC();
}
- fn makeZig(self: &LibExeObjStep) !void {
+ fn makeZig(self: *LibExeObjStep) !void {
const builder = self.builder;
assert(self.is_zig);
@@ -1309,7 +1309,7 @@ pub const LibExeObjStep = struct {
}
}
- fn appendCompileFlags(self: &LibExeObjStep, args: &ArrayList([]const u8)) void {
+ fn appendCompileFlags(self: *LibExeObjStep, args: *ArrayList([]const u8)) void {
if (!self.strip) {
args.append("-g") catch unreachable;
}
@@ -1354,7 +1354,7 @@ pub const LibExeObjStep = struct {
}
}
- fn makeC(self: &LibExeObjStep) !void {
+ fn makeC(self: *LibExeObjStep) !void {
const builder = self.builder;
const cc = builder.getCCExe();
@@ -1580,7 +1580,7 @@ pub const LibExeObjStep = struct {
pub const TestStep = struct {
step: Step,
- builder: &Builder,
+ builder: *Builder,
root_src: []const u8,
build_mode: builtin.Mode,
verbose: bool,
@@ -1591,7 +1591,7 @@ pub const TestStep = struct {
exec_cmd_args: ?[]const ?[]const u8,
include_dirs: ArrayList([]const u8),
- pub fn init(builder: &Builder, root_src: []const u8) TestStep {
+ pub fn init(builder: *Builder, root_src: []const u8) TestStep {
const step_name = builder.fmt("test {}", root_src);
return TestStep{
.step = Step.init(step_name, builder.allocator, make),
@@ -1608,31 +1608,31 @@ pub const TestStep = struct {
};
}
- pub fn setVerbose(self: &TestStep, value: bool) void {
+ pub fn setVerbose(self: *TestStep, value: bool) void {
self.verbose = value;
}
- pub fn addIncludeDir(self: &TestStep, path: []const u8) void {
+ pub fn addIncludeDir(self: *TestStep, path: []const u8) void {
self.include_dirs.append(path) catch unreachable;
}
- pub fn setBuildMode(self: &TestStep, mode: builtin.Mode) void {
+ pub fn setBuildMode(self: *TestStep, mode: builtin.Mode) void {
self.build_mode = mode;
}
- pub fn linkSystemLibrary(self: &TestStep, name: []const u8) void {
+ pub fn linkSystemLibrary(self: *TestStep, name: []const u8) void {
self.link_libs.put(name) catch unreachable;
}
- pub fn setNamePrefix(self: &TestStep, text: []const u8) void {
+ pub fn setNamePrefix(self: *TestStep, text: []const u8) void {
self.name_prefix = text;
}
- pub fn setFilter(self: &TestStep, text: ?[]const u8) void {
+ pub fn setFilter(self: *TestStep, text: ?[]const u8) void {
self.filter = text;
}
- pub fn setTarget(self: &TestStep, target_arch: builtin.Arch, target_os: builtin.Os, target_environ: builtin.Environ) void {
+ pub fn setTarget(self: *TestStep, target_arch: builtin.Arch, target_os: builtin.Os, target_environ: builtin.Environ) void {
self.target = Target{
.Cross = CrossTarget{
.arch = target_arch,
@@ -1642,11 +1642,11 @@ pub const TestStep = struct {
};
}
- pub fn setExecCmd(self: &TestStep, args: []const ?[]const u8) void {
+ pub fn setExecCmd(self: *TestStep, args: []const ?[]const u8) void {
self.exec_cmd_args = args;
}
- fn make(step: &Step) !void {
+ fn make(step: *Step) !void {
const self = @fieldParentPtr(TestStep, "step", step);
const builder = self.builder;
@@ -1739,13 +1739,13 @@ pub const TestStep = struct {
pub const CommandStep = struct {
step: Step,
- builder: &Builder,
+ builder: *Builder,
argv: [][]const u8,
cwd: ?[]const u8,
- env_map: &const BufMap,
+ env_map: *const BufMap,
/// ::argv is copied.
- pub fn create(builder: &Builder, cwd: ?[]const u8, env_map: &const BufMap, argv: []const []const u8) &CommandStep {
+ pub fn create(builder: *Builder, cwd: ?[]const u8, env_map: *const BufMap, argv: []const []const u8) *CommandStep {
const self = builder.allocator.create(CommandStep) catch unreachable;
self.* = CommandStep{
.builder = builder,
@@ -1759,7 +1759,7 @@ pub const CommandStep = struct {
return self;
}
- fn make(step: &Step) !void {
+ fn make(step: *Step) !void {
const self = @fieldParentPtr(CommandStep, "step", step);
const cwd = if (self.cwd) |cwd| self.builder.pathFromRoot(cwd) else self.builder.build_root;
@@ -1769,13 +1769,13 @@ pub const CommandStep = struct {
const InstallArtifactStep = struct {
step: Step,
- builder: &Builder,
- artifact: &LibExeObjStep,
+ builder: *Builder,
+ artifact: *LibExeObjStep,
dest_file: []const u8,
const Self = this;
- pub fn create(builder: &Builder, artifact: &LibExeObjStep) &Self {
+ pub fn create(builder: *Builder, artifact: *LibExeObjStep) *Self {
const self = builder.allocator.create(Self) catch unreachable;
const dest_dir = switch (artifact.kind) {
LibExeObjStep.Kind.Obj => unreachable,
@@ -1797,7 +1797,7 @@ const InstallArtifactStep = struct {
return self;
}
- fn make(step: &Step) !void {
+ fn make(step: *Step) !void {
const self = @fieldParentPtr(Self, "step", step);
const builder = self.builder;
@@ -1818,11 +1818,11 @@ const InstallArtifactStep = struct {
pub const InstallFileStep = struct {
step: Step,
- builder: &Builder,
+ builder: *Builder,
src_path: []const u8,
dest_path: []const u8,
- pub fn init(builder: &Builder, src_path: []const u8, dest_path: []const u8) InstallFileStep {
+ pub fn init(builder: *Builder, src_path: []const u8, dest_path: []const u8) InstallFileStep {
return InstallFileStep{
.builder = builder,
.step = Step.init(builder.fmt("install {}", src_path), builder.allocator, make),
@@ -1831,7 +1831,7 @@ pub const InstallFileStep = struct {
};
}
- fn make(step: &Step) !void {
+ fn make(step: *Step) !void {
const self = @fieldParentPtr(InstallFileStep, "step", step);
try self.builder.copyFile(self.src_path, self.dest_path);
}
@@ -1839,11 +1839,11 @@ pub const InstallFileStep = struct {
pub const WriteFileStep = struct {
step: Step,
- builder: &Builder,
+ builder: *Builder,
file_path: []const u8,
data: []const u8,
- pub fn init(builder: &Builder, file_path: []const u8, data: []const u8) WriteFileStep {
+ pub fn init(builder: *Builder, file_path: []const u8, data: []const u8) WriteFileStep {
return WriteFileStep{
.builder = builder,
.step = Step.init(builder.fmt("writefile {}", file_path), builder.allocator, make),
@@ -1852,7 +1852,7 @@ pub const WriteFileStep = struct {
};
}
- fn make(step: &Step) !void {
+ fn make(step: *Step) !void {
const self = @fieldParentPtr(WriteFileStep, "step", step);
const full_path = self.builder.pathFromRoot(self.file_path);
const full_path_dir = os.path.dirname(full_path);
@@ -1869,10 +1869,10 @@ pub const WriteFileStep = struct {
pub const LogStep = struct {
step: Step,
- builder: &Builder,
+ builder: *Builder,
data: []const u8,
- pub fn init(builder: &Builder, data: []const u8) LogStep {
+ pub fn init(builder: *Builder, data: []const u8) LogStep {
return LogStep{
.builder = builder,
.step = Step.init(builder.fmt("log {}", data), builder.allocator, make),
@@ -1880,7 +1880,7 @@ pub const LogStep = struct {
};
}
- fn make(step: &Step) error!void {
+ fn make(step: *Step) error!void {
const self = @fieldParentPtr(LogStep, "step", step);
warn("{}", self.data);
}
@@ -1888,10 +1888,10 @@ pub const LogStep = struct {
pub const RemoveDirStep = struct {
step: Step,
- builder: &Builder,
+ builder: *Builder,
dir_path: []const u8,
- pub fn init(builder: &Builder, dir_path: []const u8) RemoveDirStep {
+ pub fn init(builder: *Builder, dir_path: []const u8) RemoveDirStep {
return RemoveDirStep{
.builder = builder,
.step = Step.init(builder.fmt("RemoveDir {}", dir_path), builder.allocator, make),
@@ -1899,7 +1899,7 @@ pub const RemoveDirStep = struct {
};
}
- fn make(step: &Step) !void {
+ fn make(step: *Step) !void {
const self = @fieldParentPtr(RemoveDirStep, "step", step);
const full_path = self.builder.pathFromRoot(self.dir_path);
@@ -1912,39 +1912,39 @@ pub const RemoveDirStep = struct {
pub const Step = struct {
name: []const u8,
- makeFn: fn (self: &Step) error!void,
- dependencies: ArrayList(&Step),
+ makeFn: fn (self: *Step) error!void,
+ dependencies: ArrayList(*Step),
loop_flag: bool,
done_flag: bool,
- pub fn init(name: []const u8, allocator: &Allocator, makeFn: fn (&Step) error!void) Step {
+ pub fn init(name: []const u8, allocator: *Allocator, makeFn: fn (*Step) error!void) Step {
return Step{
.name = name,
.makeFn = makeFn,
- .dependencies = ArrayList(&Step).init(allocator),
+ .dependencies = ArrayList(*Step).init(allocator),
.loop_flag = false,
.done_flag = false,
};
}
- pub fn initNoOp(name: []const u8, allocator: &Allocator) Step {
+ pub fn initNoOp(name: []const u8, allocator: *Allocator) Step {
return init(name, allocator, makeNoOp);
}
- pub fn make(self: &Step) !void {
+ pub fn make(self: *Step) !void {
if (self.done_flag) return;
try self.makeFn(self);
self.done_flag = true;
}
- pub fn dependOn(self: &Step, other: &Step) void {
+ pub fn dependOn(self: *Step, other: *Step) void {
self.dependencies.append(other) catch unreachable;
}
- fn makeNoOp(self: &Step) error!void {}
+ fn makeNoOp(self: *Step) error!void {}
};
-fn doAtomicSymLinks(allocator: &Allocator, output_path: []const u8, filename_major_only: []const u8, filename_name_only: []const u8) !void {
+fn doAtomicSymLinks(allocator: *Allocator, output_path: []const u8, filename_major_only: []const u8, filename_name_only: []const u8) !void {
const out_dir = os.path.dirname(output_path);
const out_basename = os.path.basename(output_path);
// sym link for libfoo.so.1 to libfoo.so.1.2.3
diff --git a/std/c/darwin.zig b/std/c/darwin.zig
index 6a33c994bf01..69395e6b27bc 100644
--- a/std/c/darwin.zig
+++ b/std/c/darwin.zig
@@ -1,10 +1,10 @@
-extern "c" fn __error() &c_int;
-pub extern "c" fn _NSGetExecutablePath(buf: &u8, bufsize: &u32) c_int;
+extern "c" fn __error() *c_int;
+pub extern "c" fn _NSGetExecutablePath(buf: *u8, bufsize: *u32) c_int;
-pub extern "c" fn __getdirentries64(fd: c_int, buf_ptr: &u8, buf_len: usize, basep: &i64) usize;
+pub extern "c" fn __getdirentries64(fd: c_int, buf_ptr: *u8, buf_len: usize, basep: *i64) usize;
pub extern "c" fn mach_absolute_time() u64;
-pub extern "c" fn mach_timebase_info(tinfo: ?&mach_timebase_info_data) void;
+pub extern "c" fn mach_timebase_info(tinfo: ?*mach_timebase_info_data) void;
pub use @import("../os/darwin_errno.zig");
diff --git a/std/c/index.zig b/std/c/index.zig
index f9704f47388c..114b79cdae28 100644
--- a/std/c/index.zig
+++ b/std/c/index.zig
@@ -13,49 +13,49 @@ pub extern "c" fn abort() noreturn;
pub extern "c" fn exit(code: c_int) noreturn;
pub extern "c" fn isatty(fd: c_int) c_int;
pub extern "c" fn close(fd: c_int) c_int;
-pub extern "c" fn fstat(fd: c_int, buf: &Stat) c_int;
-pub extern "c" fn @"fstat$INODE64"(fd: c_int, buf: &Stat) c_int;
+pub extern "c" fn fstat(fd: c_int, buf: *Stat) c_int;
+pub extern "c" fn @"fstat$INODE64"(fd: c_int, buf: *Stat) c_int;
pub extern "c" fn lseek(fd: c_int, offset: isize, whence: c_int) isize;
-pub extern "c" fn open(path: &const u8, oflag: c_int, ...) c_int;
+pub extern "c" fn open(path: *const u8, oflag: c_int, ...) c_int;
pub extern "c" fn raise(sig: c_int) c_int;
-pub extern "c" fn read(fd: c_int, buf: &c_void, nbyte: usize) isize;
-pub extern "c" fn stat(noalias path: &const u8, noalias buf: &Stat) c_int;
-pub extern "c" fn write(fd: c_int, buf: &const c_void, nbyte: usize) isize;
-pub extern "c" fn mmap(addr: ?&c_void, len: usize, prot: c_int, flags: c_int, fd: c_int, offset: isize) ?&c_void;
-pub extern "c" fn munmap(addr: &c_void, len: usize) c_int;
-pub extern "c" fn unlink(path: &const u8) c_int;
-pub extern "c" fn getcwd(buf: &u8, size: usize) ?&u8;
-pub extern "c" fn waitpid(pid: c_int, stat_loc: &c_int, options: c_int) c_int;
+pub extern "c" fn read(fd: c_int, buf: *c_void, nbyte: usize) isize;
+pub extern "c" fn stat(noalias path: *const u8, noalias buf: *Stat) c_int;
+pub extern "c" fn write(fd: c_int, buf: *const c_void, nbyte: usize) isize;
+pub extern "c" fn mmap(addr: ?*c_void, len: usize, prot: c_int, flags: c_int, fd: c_int, offset: isize) ?*c_void;
+pub extern "c" fn munmap(addr: *c_void, len: usize) c_int;
+pub extern "c" fn unlink(path: *const u8) c_int;
+pub extern "c" fn getcwd(buf: *u8, size: usize) ?*u8;
+pub extern "c" fn waitpid(pid: c_int, stat_loc: *c_int, options: c_int) c_int;
pub extern "c" fn fork() c_int;
-pub extern "c" fn access(path: &const u8, mode: c_uint) c_int;
-pub extern "c" fn pipe(fds: &c_int) c_int;
-pub extern "c" fn mkdir(path: &const u8, mode: c_uint) c_int;
-pub extern "c" fn symlink(existing: &const u8, new: &const u8) c_int;
-pub extern "c" fn rename(old: &const u8, new: &const u8) c_int;
-pub extern "c" fn chdir(path: &const u8) c_int;
-pub extern "c" fn execve(path: &const u8, argv: &const ?&const u8, envp: &const ?&const u8) c_int;
+pub extern "c" fn access(path: *const u8, mode: c_uint) c_int;
+pub extern "c" fn pipe(fds: *c_int) c_int;
+pub extern "c" fn mkdir(path: *const u8, mode: c_uint) c_int;
+pub extern "c" fn symlink(existing: *const u8, new: *const u8) c_int;
+pub extern "c" fn rename(old: *const u8, new: *const u8) c_int;
+pub extern "c" fn chdir(path: *const u8) c_int;
+pub extern "c" fn execve(path: *const u8, argv: *const ?*const u8, envp: *const ?*const u8) c_int;
pub extern "c" fn dup(fd: c_int) c_int;
pub extern "c" fn dup2(old_fd: c_int, new_fd: c_int) c_int;
-pub extern "c" fn readlink(noalias path: &const u8, noalias buf: &u8, bufsize: usize) isize;
-pub extern "c" fn realpath(noalias file_name: &const u8, noalias resolved_name: &u8) ?&u8;
-pub extern "c" fn sigprocmask(how: c_int, noalias set: &const sigset_t, noalias oset: ?&sigset_t) c_int;
-pub extern "c" fn gettimeofday(tv: ?&timeval, tz: ?&timezone) c_int;
-pub extern "c" fn sigaction(sig: c_int, noalias act: &const Sigaction, noalias oact: ?&Sigaction) c_int;
-pub extern "c" fn nanosleep(rqtp: &const timespec, rmtp: ?×pec) c_int;
+pub extern "c" fn readlink(noalias path: *const u8, noalias buf: *u8, bufsize: usize) isize;
+pub extern "c" fn realpath(noalias file_name: *const u8, noalias resolved_name: *u8) ?*u8;
+pub extern "c" fn sigprocmask(how: c_int, noalias set: *const sigset_t, noalias oset: ?*sigset_t) c_int;
+pub extern "c" fn gettimeofday(tv: ?*timeval, tz: ?*timezone) c_int;
+pub extern "c" fn sigaction(sig: c_int, noalias act: *const Sigaction, noalias oact: ?*Sigaction) c_int;
+pub extern "c" fn nanosleep(rqtp: *const timespec, rmtp: ?*timespec) c_int;
pub extern "c" fn setreuid(ruid: c_uint, euid: c_uint) c_int;
pub extern "c" fn setregid(rgid: c_uint, egid: c_uint) c_int;
-pub extern "c" fn rmdir(path: &const u8) c_int;
+pub extern "c" fn rmdir(path: *const u8) c_int;
-pub extern "c" fn aligned_alloc(alignment: usize, size: usize) ?&c_void;
-pub extern "c" fn malloc(usize) ?&c_void;
-pub extern "c" fn realloc(&c_void, usize) ?&c_void;
-pub extern "c" fn free(&c_void) void;
-pub extern "c" fn posix_memalign(memptr: &&c_void, alignment: usize, size: usize) c_int;
+pub extern "c" fn aligned_alloc(alignment: usize, size: usize) ?*c_void;
+pub extern "c" fn malloc(usize) ?*c_void;
+pub extern "c" fn realloc(*c_void, usize) ?*c_void;
+pub extern "c" fn free(*c_void) void;
+pub extern "c" fn posix_memalign(memptr: **c_void, alignment: usize, size: usize) c_int;
-pub extern "pthread" fn pthread_create(noalias newthread: &pthread_t, noalias attr: ?&const pthread_attr_t, start_routine: extern fn (?&c_void) ?&c_void, noalias arg: ?&c_void) c_int;
-pub extern "pthread" fn pthread_attr_init(attr: &pthread_attr_t) c_int;
-pub extern "pthread" fn pthread_attr_setstack(attr: &pthread_attr_t, stackaddr: &c_void, stacksize: usize) c_int;
-pub extern "pthread" fn pthread_attr_destroy(attr: &pthread_attr_t) c_int;
-pub extern "pthread" fn pthread_join(thread: pthread_t, arg_return: ?&?&c_void) c_int;
+pub extern "pthread" fn pthread_create(noalias newthread: *pthread_t, noalias attr: ?*const pthread_attr_t, start_routine: extern fn (?*c_void) ?*c_void, noalias arg: ?*c_void) c_int;
+pub extern "pthread" fn pthread_attr_init(attr: *pthread_attr_t) c_int;
+pub extern "pthread" fn pthread_attr_setstack(attr: *pthread_attr_t, stackaddr: *c_void, stacksize: usize) c_int;
+pub extern "pthread" fn pthread_attr_destroy(attr: *pthread_attr_t) c_int;
+pub extern "pthread" fn pthread_join(thread: pthread_t, arg_return: ?*?*c_void) c_int;
-pub const pthread_t = &@OpaqueType();
+pub const pthread_t = *@OpaqueType();
diff --git a/std/c/linux.zig b/std/c/linux.zig
index 7810fec13060..0ab043533e85 100644
--- a/std/c/linux.zig
+++ b/std/c/linux.zig
@@ -1,7 +1,7 @@
pub use @import("../os/linux/errno.zig");
-pub extern "c" fn getrandom(buf_ptr: &u8, buf_len: usize, flags: c_uint) c_int;
-extern "c" fn __errno_location() &c_int;
+pub extern "c" fn getrandom(buf_ptr: *u8, buf_len: usize, flags: c_uint) c_int;
+extern "c" fn __errno_location() *c_int;
pub const _errno = __errno_location;
pub const pthread_attr_t = extern struct {
diff --git a/std/c/windows.zig b/std/c/windows.zig
index 6e8b17eda80c..35ca21713183 100644
--- a/std/c/windows.zig
+++ b/std/c/windows.zig
@@ -1 +1 @@
-pub extern "c" fn _errno() &c_int;
+pub extern "c" fn _errno() *c_int;
diff --git a/std/crypto/blake2.zig b/std/crypto/blake2.zig
index bf3193b5d998..f0a9766c0069 100644
--- a/std/crypto/blake2.zig
+++ b/std/crypto/blake2.zig
@@ -75,7 +75,7 @@ fn Blake2s(comptime out_len: usize) type {
return s;
}
- pub fn reset(d: &Self) void {
+ pub fn reset(d: *Self) void {
mem.copy(u32, d.h[0..], iv[0..]);
// No key plus default parameters
@@ -90,7 +90,7 @@ fn Blake2s(comptime out_len: usize) type {
d.final(out);
}
- pub fn update(d: &Self, b: []const u8) void {
+ pub fn update(d: *Self, b: []const u8) void {
var off: usize = 0;
// Partial buffer exists from previous update. Copy into buffer then hash.
@@ -113,7 +113,7 @@ fn Blake2s(comptime out_len: usize) type {
d.buf_len += u8(b[off..].len);
}
- pub fn final(d: &Self, out: []u8) void {
+ pub fn final(d: *Self, out: []u8) void {
debug.assert(out.len >= out_len / 8);
mem.set(u8, d.buf[d.buf_len..], 0);
@@ -127,7 +127,7 @@ fn Blake2s(comptime out_len: usize) type {
}
}
- fn round(d: &Self, b: []const u8, last: bool) void {
+ fn round(d: *Self, b: []const u8, last: bool) void {
debug.assert(b.len == 64);
var m: [16]u32 = undefined;
@@ -310,7 +310,7 @@ fn Blake2b(comptime out_len: usize) type {
return s;
}
- pub fn reset(d: &Self) void {
+ pub fn reset(d: *Self) void {
mem.copy(u64, d.h[0..], iv[0..]);
// No key plus default parameters
@@ -325,7 +325,7 @@ fn Blake2b(comptime out_len: usize) type {
d.final(out);
}
- pub fn update(d: &Self, b: []const u8) void {
+ pub fn update(d: *Self, b: []const u8) void {
var off: usize = 0;
// Partial buffer exists from previous update. Copy into buffer then hash.
@@ -348,7 +348,7 @@ fn Blake2b(comptime out_len: usize) type {
d.buf_len += u8(b[off..].len);
}
- pub fn final(d: &Self, out: []u8) void {
+ pub fn final(d: *Self, out: []u8) void {
mem.set(u8, d.buf[d.buf_len..], 0);
d.t += d.buf_len;
d.round(d.buf[0..], true);
@@ -360,7 +360,7 @@ fn Blake2b(comptime out_len: usize) type {
}
}
- fn round(d: &Self, b: []const u8, last: bool) void {
+ fn round(d: *Self, b: []const u8, last: bool) void {
debug.assert(b.len == 128);
var m: [16]u64 = undefined;
diff --git a/std/crypto/md5.zig b/std/crypto/md5.zig
index 3d055972733e..c0d1732d3715 100644
--- a/std/crypto/md5.zig
+++ b/std/crypto/md5.zig
@@ -44,7 +44,7 @@ pub const Md5 = struct {
return d;
}
- pub fn reset(d: &Self) void {
+ pub fn reset(d: *Self) void {
d.s[0] = 0x67452301;
d.s[1] = 0xEFCDAB89;
d.s[2] = 0x98BADCFE;
@@ -59,7 +59,7 @@ pub const Md5 = struct {
d.final(out);
}
- pub fn update(d: &Self, b: []const u8) void {
+ pub fn update(d: *Self, b: []const u8) void {
var off: usize = 0;
// Partial buffer exists from previous update. Copy into buffer then hash.
@@ -84,7 +84,7 @@ pub const Md5 = struct {
d.total_len +%= b.len;
}
- pub fn final(d: &Self, out: []u8) void {
+ pub fn final(d: *Self, out: []u8) void {
debug.assert(out.len >= 16);
// The buffer here will never be completely full.
@@ -116,7 +116,7 @@ pub const Md5 = struct {
}
}
- fn round(d: &Self, b: []const u8) void {
+ fn round(d: *Self, b: []const u8) void {
debug.assert(b.len == 64);
var s: [16]u32 = undefined;
diff --git a/std/crypto/sha1.zig b/std/crypto/sha1.zig
index e9d8e3e13266..9e46fc92395a 100644
--- a/std/crypto/sha1.zig
+++ b/std/crypto/sha1.zig
@@ -43,7 +43,7 @@ pub const Sha1 = struct {
return d;
}
- pub fn reset(d: &Self) void {
+ pub fn reset(d: *Self) void {
d.s[0] = 0x67452301;
d.s[1] = 0xEFCDAB89;
d.s[2] = 0x98BADCFE;
@@ -59,7 +59,7 @@ pub const Sha1 = struct {
d.final(out);
}
- pub fn update(d: &Self, b: []const u8) void {
+ pub fn update(d: *Self, b: []const u8) void {
var off: usize = 0;
// Partial buffer exists from previous update. Copy into buffer then hash.
@@ -83,7 +83,7 @@ pub const Sha1 = struct {
d.total_len += b.len;
}
- pub fn final(d: &Self, out: []u8) void {
+ pub fn final(d: *Self, out: []u8) void {
debug.assert(out.len >= 20);
// The buffer here will never be completely full.
@@ -115,7 +115,7 @@ pub const Sha1 = struct {
}
}
- fn round(d: &Self, b: []const u8) void {
+ fn round(d: *Self, b: []const u8) void {
debug.assert(b.len == 64);
var s: [16]u32 = undefined;
diff --git a/std/crypto/sha2.zig b/std/crypto/sha2.zig
index aedc820f44be..d1375d73e8bc 100644
--- a/std/crypto/sha2.zig
+++ b/std/crypto/sha2.zig
@@ -93,7 +93,7 @@ fn Sha2_32(comptime params: Sha2Params32) type {
return d;
}
- pub fn reset(d: &Self) void {
+ pub fn reset(d: *Self) void {
d.s[0] = params.iv0;
d.s[1] = params.iv1;
d.s[2] = params.iv2;
@@ -112,7 +112,7 @@ fn Sha2_32(comptime params: Sha2Params32) type {
d.final(out);
}
- pub fn update(d: &Self, b: []const u8) void {
+ pub fn update(d: *Self, b: []const u8) void {
var off: usize = 0;
// Partial buffer exists from previous update. Copy into buffer then hash.
@@ -136,7 +136,7 @@ fn Sha2_32(comptime params: Sha2Params32) type {
d.total_len += b.len;
}
- pub fn final(d: &Self, out: []u8) void {
+ pub fn final(d: *Self, out: []u8) void {
debug.assert(out.len >= params.out_len / 8);
// The buffer here will never be completely full.
@@ -171,7 +171,7 @@ fn Sha2_32(comptime params: Sha2Params32) type {
}
}
- fn round(d: &Self, b: []const u8) void {
+ fn round(d: *Self, b: []const u8) void {
debug.assert(b.len == 64);
var s: [64]u32 = undefined;
@@ -434,7 +434,7 @@ fn Sha2_64(comptime params: Sha2Params64) type {
return d;
}
- pub fn reset(d: &Self) void {
+ pub fn reset(d: *Self) void {
d.s[0] = params.iv0;
d.s[1] = params.iv1;
d.s[2] = params.iv2;
@@ -453,7 +453,7 @@ fn Sha2_64(comptime params: Sha2Params64) type {
d.final(out);
}
- pub fn update(d: &Self, b: []const u8) void {
+ pub fn update(d: *Self, b: []const u8) void {
var off: usize = 0;
// Partial buffer exists from previous update. Copy into buffer then hash.
@@ -477,7 +477,7 @@ fn Sha2_64(comptime params: Sha2Params64) type {
d.total_len += b.len;
}
- pub fn final(d: &Self, out: []u8) void {
+ pub fn final(d: *Self, out: []u8) void {
debug.assert(out.len >= params.out_len / 8);
// The buffer here will never be completely full.
@@ -512,7 +512,7 @@ fn Sha2_64(comptime params: Sha2Params64) type {
}
}
- fn round(d: &Self, b: []const u8) void {
+ fn round(d: *Self, b: []const u8) void {
debug.assert(b.len == 128);
var s: [80]u64 = undefined;
diff --git a/std/crypto/sha3.zig b/std/crypto/sha3.zig
index 75bec57a8763..ae02d7a48203 100644
--- a/std/crypto/sha3.zig
+++ b/std/crypto/sha3.zig
@@ -26,7 +26,7 @@ fn Keccak(comptime bits: usize, comptime delim: u8) type {
return d;
}
- pub fn reset(d: &Self) void {
+ pub fn reset(d: *Self) void {
mem.set(u8, d.s[0..], 0);
d.offset = 0;
d.rate = 200 - (bits / 4);
@@ -38,7 +38,7 @@ fn Keccak(comptime bits: usize, comptime delim: u8) type {
d.final(out);
}
- pub fn update(d: &Self, b: []const u8) void {
+ pub fn update(d: *Self, b: []const u8) void {
var ip: usize = 0;
var len = b.len;
var rate = d.rate - d.offset;
@@ -63,7 +63,7 @@ fn Keccak(comptime bits: usize, comptime delim: u8) type {
d.offset = offset + len;
}
- pub fn final(d: &Self, out: []u8) void {
+ pub fn final(d: *Self, out: []u8) void {
// padding
d.s[d.offset] ^= delim;
d.s[d.rate - 1] ^= 0x80;
diff --git a/std/crypto/throughput_test.zig b/std/crypto/throughput_test.zig
index c5c4f9fe102c..0ad6845d1a81 100644
--- a/std/crypto/throughput_test.zig
+++ b/std/crypto/throughput_test.zig
@@ -15,8 +15,8 @@ const BytesToHash = 1024 * MiB;
pub fn main() !void {
var stdout_file = try std.io.getStdOut();
- var stdout_out_stream = std.io.FileOutStream.init(&stdout_file);
- const stdout = &stdout_out_stream.stream;
+ var stdout_out_stream = std.io.FileOutStream.init(*stdout_file);
+ const stdout = *stdout_out_stream.stream;
var block: [HashFunction.block_size]u8 = undefined;
std.mem.set(u8, block[0..], 0);
diff --git a/std/cstr.zig b/std/cstr.zig
index c9f302606451..dfbfb8047f2d 100644
--- a/std/cstr.zig
+++ b/std/cstr.zig
@@ -9,13 +9,13 @@ pub const line_sep = switch (builtin.os) {
else => "\n",
};
-pub fn len(ptr: &const u8) usize {
+pub fn len(ptr: *const u8) usize {
var count: usize = 0;
while (ptr[count] != 0) : (count += 1) {}
return count;
}
-pub fn cmp(a: &const u8, b: &const u8) i8 {
+pub fn cmp(a: *const u8, b: *const u8) i8 {
var index: usize = 0;
while (a[index] == b[index] and a[index] != 0) : (index += 1) {}
if (a[index] > b[index]) {
@@ -27,11 +27,11 @@ pub fn cmp(a: &const u8, b: &const u8) i8 {
}
}
-pub fn toSliceConst(str: &const u8) []const u8 {
+pub fn toSliceConst(str: *const u8) []const u8 {
return str[0..len(str)];
}
-pub fn toSlice(str: &u8) []u8 {
+pub fn toSlice(str: *u8) []u8 {
return str[0..len(str)];
}
@@ -47,7 +47,7 @@ fn testCStrFnsImpl() void {
/// Returns a mutable slice with 1 more byte of length which is a null byte.
/// Caller owns the returned memory.
-pub fn addNullByte(allocator: &mem.Allocator, slice: []const u8) ![]u8 {
+pub fn addNullByte(allocator: *mem.Allocator, slice: []const u8) ![]u8 {
const result = try allocator.alloc(u8, slice.len + 1);
mem.copy(u8, result, slice);
result[slice.len] = 0;
@@ -55,13 +55,13 @@ pub fn addNullByte(allocator: &mem.Allocator, slice: []const u8) ![]u8 {
}
pub const NullTerminated2DArray = struct {
- allocator: &mem.Allocator,
+ allocator: *mem.Allocator,
byte_count: usize,
- ptr: ?&?&u8,
+ ptr: ?*?*u8,
/// Takes N lists of strings, concatenates the lists together, and adds a null terminator
/// Caller must deinit result
- pub fn fromSlices(allocator: &mem.Allocator, slices: []const []const []const u8) !NullTerminated2DArray {
+ pub fn fromSlices(allocator: *mem.Allocator, slices: []const []const []const u8) !NullTerminated2DArray {
var new_len: usize = 1; // 1 for the list null
var byte_count: usize = 0;
for (slices) |slice| {
@@ -75,11 +75,11 @@ pub const NullTerminated2DArray = struct {
const index_size = @sizeOf(usize) * new_len; // size of the ptrs
byte_count += index_size;
- const buf = try allocator.alignedAlloc(u8, @alignOf(?&u8), byte_count);
+ const buf = try allocator.alignedAlloc(u8, @alignOf(?*u8), byte_count);
errdefer allocator.free(buf);
var write_index = index_size;
- const index_buf = ([]?&u8)(buf);
+ const index_buf = ([]?*u8)(buf);
var i: usize = 0;
for (slices) |slice| {
@@ -97,12 +97,12 @@ pub const NullTerminated2DArray = struct {
return NullTerminated2DArray{
.allocator = allocator,
.byte_count = byte_count,
- .ptr = @ptrCast(?&?&u8, buf.ptr),
+ .ptr = @ptrCast(?*?*u8, buf.ptr),
};
}
- pub fn deinit(self: &NullTerminated2DArray) void {
- const buf = @ptrCast(&u8, self.ptr);
+ pub fn deinit(self: *NullTerminated2DArray) void {
+ const buf = @ptrCast(*u8, self.ptr);
self.allocator.free(buf[0..self.byte_count]);
}
};
diff --git a/std/debug/failing_allocator.zig b/std/debug/failing_allocator.zig
index 6b5edff5bf7b..e16dd21db453 100644
--- a/std/debug/failing_allocator.zig
+++ b/std/debug/failing_allocator.zig
@@ -7,12 +7,12 @@ pub const FailingAllocator = struct {
allocator: mem.Allocator,
index: usize,
fail_index: usize,
- internal_allocator: &mem.Allocator,
+ internal_allocator: *mem.Allocator,
allocated_bytes: usize,
freed_bytes: usize,
deallocations: usize,
- pub fn init(allocator: &mem.Allocator, fail_index: usize) FailingAllocator {
+ pub fn init(allocator: *mem.Allocator, fail_index: usize) FailingAllocator {
return FailingAllocator{
.internal_allocator = allocator,
.fail_index = fail_index,
@@ -28,7 +28,7 @@ pub const FailingAllocator = struct {
};
}
- fn alloc(allocator: &mem.Allocator, n: usize, alignment: u29) ![]u8 {
+ fn alloc(allocator: *mem.Allocator, n: usize, alignment: u29) ![]u8 {
const self = @fieldParentPtr(FailingAllocator, "allocator", allocator);
if (self.index == self.fail_index) {
return error.OutOfMemory;
@@ -39,7 +39,7 @@ pub const FailingAllocator = struct {
return result;
}
- fn realloc(allocator: &mem.Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 {
+ fn realloc(allocator: *mem.Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 {
const self = @fieldParentPtr(FailingAllocator, "allocator", allocator);
if (new_size <= old_mem.len) {
self.freed_bytes += old_mem.len - new_size;
@@ -55,7 +55,7 @@ pub const FailingAllocator = struct {
return result;
}
- fn free(allocator: &mem.Allocator, bytes: []u8) void {
+ fn free(allocator: *mem.Allocator, bytes: []u8) void {
const self = @fieldParentPtr(FailingAllocator, "allocator", allocator);
self.freed_bytes += bytes.len;
self.deallocations += 1;
diff --git a/std/debug/index.zig b/std/debug/index.zig
index 92e565b39184..00d9bef121f4 100644
--- a/std/debug/index.zig
+++ b/std/debug/index.zig
@@ -16,12 +16,12 @@ pub const FailingAllocator = @import("failing_allocator.zig").FailingAllocator;
/// TODO atomic/multithread support
var stderr_file: os.File = undefined;
var stderr_file_out_stream: io.FileOutStream = undefined;
-var stderr_stream: ?&io.OutStream(io.FileOutStream.Error) = null;
+var stderr_stream: ?*io.OutStream(io.FileOutStream.Error) = null;
pub fn warn(comptime fmt: []const u8, args: ...) void {
const stderr = getStderrStream() catch return;
stderr.print(fmt, args) catch return;
}
-fn getStderrStream() !&io.OutStream(io.FileOutStream.Error) {
+fn getStderrStream() !*io.OutStream(io.FileOutStream.Error) {
if (stderr_stream) |st| {
return st;
} else {
@@ -33,8 +33,8 @@ fn getStderrStream() !&io.OutStream(io.FileOutStream.Error) {
}
}
-var self_debug_info: ?&ElfStackTrace = null;
-pub fn getSelfDebugInfo() !&ElfStackTrace {
+var self_debug_info: ?*ElfStackTrace = null;
+pub fn getSelfDebugInfo() !*ElfStackTrace {
if (self_debug_info) |info| {
return info;
} else {
@@ -58,7 +58,7 @@ pub fn dumpCurrentStackTrace(start_addr: ?usize) void {
}
/// Tries to print a stack trace to stderr, unbuffered, and ignores any error returned.
-pub fn dumpStackTrace(stack_trace: &const builtin.StackTrace) void {
+pub fn dumpStackTrace(stack_trace: *const builtin.StackTrace) void {
const stderr = getStderrStream() catch return;
const debug_info = getSelfDebugInfo() catch |err| {
stderr.print("Unable to dump stack trace: Unable to open debug info: {}\n", @errorName(err)) catch return;
@@ -104,7 +104,7 @@ pub fn panic(comptime format: []const u8, args: ...) noreturn {
var panicking: u8 = 0; // TODO make this a bool
-pub fn panicExtra(trace: ?&const builtin.StackTrace, first_trace_addr: ?usize, comptime format: []const u8, args: ...) noreturn {
+pub fn panicExtra(trace: ?*const builtin.StackTrace, first_trace_addr: ?usize, comptime format: []const u8, args: ...) noreturn {
@setCold(true);
if (@atomicRmw(u8, &panicking, builtin.AtomicRmwOp.Xchg, 1, builtin.AtomicOrder.SeqCst) == 1) {
@@ -130,7 +130,7 @@ const WHITE = "\x1b[37;1m";
const DIM = "\x1b[2m";
const RESET = "\x1b[0m";
-pub fn writeStackTrace(stack_trace: &const builtin.StackTrace, out_stream: var, allocator: &mem.Allocator, debug_info: &ElfStackTrace, tty_color: bool) !void {
+pub fn writeStackTrace(stack_trace: *const builtin.StackTrace, out_stream: var, allocator: *mem.Allocator, debug_info: *ElfStackTrace, tty_color: bool) !void {
var frame_index: usize = undefined;
var frames_left: usize = undefined;
if (stack_trace.index < stack_trace.instruction_addresses.len) {
@@ -150,7 +150,7 @@ pub fn writeStackTrace(stack_trace: &const builtin.StackTrace, out_stream: var,
}
}
-pub fn writeCurrentStackTrace(out_stream: var, allocator: &mem.Allocator, debug_info: &ElfStackTrace, tty_color: bool, start_addr: ?usize) !void {
+pub fn writeCurrentStackTrace(out_stream: var, allocator: *mem.Allocator, debug_info: *ElfStackTrace, tty_color: bool, start_addr: ?usize) !void {
const AddressState = union(enum) {
NotLookingForStartAddress,
LookingForStartAddress: usize,
@@ -166,8 +166,8 @@ pub fn writeCurrentStackTrace(out_stream: var, allocator: &mem.Allocator, debug_
}
var fp = @ptrToInt(@frameAddress());
- while (fp != 0) : (fp = @intToPtr(&const usize, fp).*) {
- const return_address = @intToPtr(&const usize, fp + @sizeOf(usize)).*;
+ while (fp != 0) : (fp = @intToPtr(*const usize, fp).*) {
+ const return_address = @intToPtr(*const usize, fp + @sizeOf(usize)).*;
switch (addr_state) {
AddressState.NotLookingForStartAddress => {},
@@ -183,7 +183,7 @@ pub fn writeCurrentStackTrace(out_stream: var, allocator: &mem.Allocator, debug_
}
}
-fn printSourceAtAddress(debug_info: &ElfStackTrace, out_stream: var, address: usize) !void {
+fn printSourceAtAddress(debug_info: *ElfStackTrace, out_stream: var, address: usize) !void {
const ptr_hex = "0x{x}";
switch (builtin.os) {
@@ -236,7 +236,7 @@ fn printSourceAtAddress(debug_info: &ElfStackTrace, out_stream: var, address: us
}
}
-pub fn openSelfDebugInfo(allocator: &mem.Allocator) !&ElfStackTrace {
+pub fn openSelfDebugInfo(allocator: *mem.Allocator) !*ElfStackTrace {
switch (builtin.object_format) {
builtin.ObjectFormat.elf => {
const st = try allocator.create(ElfStackTrace);
@@ -289,7 +289,7 @@ pub fn openSelfDebugInfo(allocator: &mem.Allocator) !&ElfStackTrace {
}
}
-fn printLineFromFile(allocator: &mem.Allocator, out_stream: var, line_info: &const LineInfo) !void {
+fn printLineFromFile(allocator: *mem.Allocator, out_stream: var, line_info: *const LineInfo) !void {
var f = try os.File.openRead(allocator, line_info.file_name);
defer f.close();
// TODO fstat and make sure that the file has the correct size
@@ -325,32 +325,32 @@ pub const ElfStackTrace = switch (builtin.os) {
builtin.Os.macosx => struct {
symbol_table: macho.SymbolTable,
- pub fn close(self: &ElfStackTrace) void {
+ pub fn close(self: *ElfStackTrace) void {
self.symbol_table.deinit();
}
},
else => struct {
self_exe_file: os.File,
elf: elf.Elf,
- debug_info: &elf.SectionHeader,
- debug_abbrev: &elf.SectionHeader,
- debug_str: &elf.SectionHeader,
- debug_line: &elf.SectionHeader,
- debug_ranges: ?&elf.SectionHeader,
+ debug_info: *elf.SectionHeader,
+ debug_abbrev: *elf.SectionHeader,
+ debug_str: *elf.SectionHeader,
+ debug_line: *elf.SectionHeader,
+ debug_ranges: ?*elf.SectionHeader,
abbrev_table_list: ArrayList(AbbrevTableHeader),
compile_unit_list: ArrayList(CompileUnit),
- pub fn allocator(self: &const ElfStackTrace) &mem.Allocator {
+ pub fn allocator(self: *const ElfStackTrace) *mem.Allocator {
return self.abbrev_table_list.allocator;
}
- pub fn readString(self: &ElfStackTrace) ![]u8 {
+ pub fn readString(self: *ElfStackTrace) ![]u8 {
var in_file_stream = io.FileInStream.init(&self.self_exe_file);
const in_stream = &in_file_stream.stream;
return readStringRaw(self.allocator(), in_stream);
}
- pub fn close(self: &ElfStackTrace) void {
+ pub fn close(self: *ElfStackTrace) void {
self.self_exe_file.close();
self.elf.close();
}
@@ -365,7 +365,7 @@ const PcRange = struct {
const CompileUnit = struct {
version: u16,
is_64: bool,
- die: &Die,
+ die: *Die,
index: usize,
pc_range: ?PcRange,
};
@@ -408,7 +408,7 @@ const Constant = struct {
payload: []u8,
signed: bool,
- fn asUnsignedLe(self: &const Constant) !u64 {
+ fn asUnsignedLe(self: *const Constant) !u64 {
if (self.payload.len > @sizeOf(u64)) return error.InvalidDebugInfo;
if (self.signed) return error.InvalidDebugInfo;
return mem.readInt(self.payload, u64, builtin.Endian.Little);
@@ -425,14 +425,14 @@ const Die = struct {
value: FormValue,
};
- fn getAttr(self: &const Die, id: u64) ?&const FormValue {
+ fn getAttr(self: *const Die, id: u64) ?*const FormValue {
for (self.attrs.toSliceConst()) |*attr| {
if (attr.id == id) return &attr.value;
}
return null;
}
- fn getAttrAddr(self: &const Die, id: u64) !u64 {
+ fn getAttrAddr(self: *const Die, id: u64) !u64 {
const form_value = self.getAttr(id) ?? return error.MissingDebugInfo;
return switch (form_value.*) {
FormValue.Address => |value| value,
@@ -440,7 +440,7 @@ const Die = struct {
};
}
- fn getAttrSecOffset(self: &const Die, id: u64) !u64 {
+ fn getAttrSecOffset(self: *const Die, id: u64) !u64 {
const form_value = self.getAttr(id) ?? return error.MissingDebugInfo;
return switch (form_value.*) {
FormValue.Const => |value| value.asUnsignedLe(),
@@ -449,7 +449,7 @@ const Die = struct {
};
}
- fn getAttrUnsignedLe(self: &const Die, id: u64) !u64 {
+ fn getAttrUnsignedLe(self: *const Die, id: u64) !u64 {
const form_value = self.getAttr(id) ?? return error.MissingDebugInfo;
return switch (form_value.*) {
FormValue.Const => |value| value.asUnsignedLe(),
@@ -457,7 +457,7 @@ const Die = struct {
};
}
- fn getAttrString(self: &const Die, st: &ElfStackTrace, id: u64) ![]u8 {
+ fn getAttrString(self: *const Die, st: *ElfStackTrace, id: u64) ![]u8 {
const form_value = self.getAttr(id) ?? return error.MissingDebugInfo;
return switch (form_value.*) {
FormValue.String => |value| value,
@@ -478,9 +478,9 @@ const LineInfo = struct {
line: usize,
column: usize,
file_name: []u8,
- allocator: &mem.Allocator,
+ allocator: *mem.Allocator,
- fn deinit(self: &const LineInfo) void {
+ fn deinit(self: *const LineInfo) void {
self.allocator.free(self.file_name);
}
};
@@ -496,7 +496,7 @@ const LineNumberProgram = struct {
target_address: usize,
include_dirs: []const []const u8,
- file_entries: &ArrayList(FileEntry),
+ file_entries: *ArrayList(FileEntry),
prev_address: usize,
prev_file: usize,
@@ -506,7 +506,7 @@ const LineNumberProgram = struct {
prev_basic_block: bool,
prev_end_sequence: bool,
- pub fn init(is_stmt: bool, include_dirs: []const []const u8, file_entries: &ArrayList(FileEntry), target_address: usize) LineNumberProgram {
+ pub fn init(is_stmt: bool, include_dirs: []const []const u8, file_entries: *ArrayList(FileEntry), target_address: usize) LineNumberProgram {
return LineNumberProgram{
.address = 0,
.file = 1,
@@ -528,7 +528,7 @@ const LineNumberProgram = struct {
};
}
- pub fn checkLineMatch(self: &LineNumberProgram) !?LineInfo {
+ pub fn checkLineMatch(self: *LineNumberProgram) !?LineInfo {
if (self.target_address >= self.prev_address and self.target_address < self.address) {
const file_entry = if (self.prev_file == 0) {
return error.MissingDebugInfo;
@@ -562,7 +562,7 @@ const LineNumberProgram = struct {
}
};
-fn readStringRaw(allocator: &mem.Allocator, in_stream: var) ![]u8 {
+fn readStringRaw(allocator: *mem.Allocator, in_stream: var) ![]u8 {
var buf = ArrayList(u8).init(allocator);
while (true) {
const byte = try in_stream.readByte();
@@ -572,30 +572,30 @@ fn readStringRaw(allocator: &mem.Allocator, in_stream: var) ![]u8 {
return buf.toSlice();
}
-fn getString(st: &ElfStackTrace, offset: u64) ![]u8 {
+fn getString(st: *ElfStackTrace, offset: u64) ![]u8 {
const pos = st.debug_str.offset + offset;
try st.self_exe_file.seekTo(pos);
return st.readString();
}
-fn readAllocBytes(allocator: &mem.Allocator, in_stream: var, size: usize) ![]u8 {
+fn readAllocBytes(allocator: *mem.Allocator, in_stream: var, size: usize) ![]u8 {
const buf = try allocator.alloc(u8, size);
errdefer allocator.free(buf);
if ((try in_stream.read(buf)) < size) return error.EndOfFile;
return buf;
}
-fn parseFormValueBlockLen(allocator: &mem.Allocator, in_stream: var, size: usize) !FormValue {
+fn parseFormValueBlockLen(allocator: *mem.Allocator, in_stream: var, size: usize) !FormValue {
const buf = try readAllocBytes(allocator, in_stream, size);
return FormValue{ .Block = buf };
}
-fn parseFormValueBlock(allocator: &mem.Allocator, in_stream: var, size: usize) !FormValue {
+fn parseFormValueBlock(allocator: *mem.Allocator, in_stream: var, size: usize) !FormValue {
const block_len = try in_stream.readVarInt(builtin.Endian.Little, usize, size);
return parseFormValueBlockLen(allocator, in_stream, block_len);
}
-fn parseFormValueConstant(allocator: &mem.Allocator, in_stream: var, signed: bool, size: usize) !FormValue {
+fn parseFormValueConstant(allocator: *mem.Allocator, in_stream: var, signed: bool, size: usize) !FormValue {
return FormValue{
.Const = Constant{
.signed = signed,
@@ -612,12 +612,12 @@ fn parseFormValueTargetAddrSize(in_stream: var) !u64 {
return if (@sizeOf(usize) == 4) u64(try in_stream.readIntLe(u32)) else if (@sizeOf(usize) == 8) try in_stream.readIntLe(u64) else unreachable;
}
-fn parseFormValueRefLen(allocator: &mem.Allocator, in_stream: var, size: usize) !FormValue {
+fn parseFormValueRefLen(allocator: *mem.Allocator, in_stream: var, size: usize) !FormValue {
const buf = try readAllocBytes(allocator, in_stream, size);
return FormValue{ .Ref = buf };
}
-fn parseFormValueRef(allocator: &mem.Allocator, in_stream: var, comptime T: type) !FormValue {
+fn parseFormValueRef(allocator: *mem.Allocator, in_stream: var, comptime T: type) !FormValue {
const block_len = try in_stream.readIntLe(T);
return parseFormValueRefLen(allocator, in_stream, block_len);
}
@@ -632,7 +632,7 @@ const ParseFormValueError = error{
OutOfMemory,
};
-fn parseFormValue(allocator: &mem.Allocator, in_stream: var, form_id: u64, is_64: bool) ParseFormValueError!FormValue {
+fn parseFormValue(allocator: *mem.Allocator, in_stream: var, form_id: u64, is_64: bool) ParseFormValueError!FormValue {
return switch (form_id) {
DW.FORM_addr => FormValue{ .Address = try parseFormValueTargetAddrSize(in_stream) },
DW.FORM_block1 => parseFormValueBlock(allocator, in_stream, 1),
@@ -682,7 +682,7 @@ fn parseFormValue(allocator: &mem.Allocator, in_stream: var, form_id: u64, is_64
};
}
-fn parseAbbrevTable(st: &ElfStackTrace) !AbbrevTable {
+fn parseAbbrevTable(st: *ElfStackTrace) !AbbrevTable {
const in_file = &st.self_exe_file;
var in_file_stream = io.FileInStream.init(in_file);
const in_stream = &in_file_stream.stream;
@@ -712,7 +712,7 @@ fn parseAbbrevTable(st: &ElfStackTrace) !AbbrevTable {
/// Gets an already existing AbbrevTable given the abbrev_offset, or if not found,
/// seeks in the stream and parses it.
-fn getAbbrevTable(st: &ElfStackTrace, abbrev_offset: u64) !&const AbbrevTable {
+fn getAbbrevTable(st: *ElfStackTrace, abbrev_offset: u64) !*const AbbrevTable {
for (st.abbrev_table_list.toSlice()) |*header| {
if (header.offset == abbrev_offset) {
return &header.table;
@@ -726,14 +726,14 @@ fn getAbbrevTable(st: &ElfStackTrace, abbrev_offset: u64) !&const AbbrevTable {
return &st.abbrev_table_list.items[st.abbrev_table_list.len - 1].table;
}
-fn getAbbrevTableEntry(abbrev_table: &const AbbrevTable, abbrev_code: u64) ?&const AbbrevTableEntry {
+fn getAbbrevTableEntry(abbrev_table: *const AbbrevTable, abbrev_code: u64) ?*const AbbrevTableEntry {
for (abbrev_table.toSliceConst()) |*table_entry| {
if (table_entry.abbrev_code == abbrev_code) return table_entry;
}
return null;
}
-fn parseDie(st: &ElfStackTrace, abbrev_table: &const AbbrevTable, is_64: bool) !Die {
+fn parseDie(st: *ElfStackTrace, abbrev_table: *const AbbrevTable, is_64: bool) !Die {
const in_file = &st.self_exe_file;
var in_file_stream = io.FileInStream.init(in_file);
const in_stream = &in_file_stream.stream;
@@ -755,7 +755,7 @@ fn parseDie(st: &ElfStackTrace, abbrev_table: &const AbbrevTable, is_64: bool) !
return result;
}
-fn getLineNumberInfo(st: &ElfStackTrace, compile_unit: &const CompileUnit, target_address: usize) !LineInfo {
+fn getLineNumberInfo(st: *ElfStackTrace, compile_unit: *const CompileUnit, target_address: usize) !LineInfo {
const compile_unit_cwd = try compile_unit.die.getAttrString(st, DW.AT_comp_dir);
const in_file = &st.self_exe_file;
@@ -934,7 +934,7 @@ fn getLineNumberInfo(st: &ElfStackTrace, compile_unit: &const CompileUnit, targe
return error.MissingDebugInfo;
}
-fn scanAllCompileUnits(st: &ElfStackTrace) !void {
+fn scanAllCompileUnits(st: *ElfStackTrace) !void {
const debug_info_end = st.debug_info.offset + st.debug_info.size;
var this_unit_offset = st.debug_info.offset;
var cu_index: usize = 0;
@@ -1005,7 +1005,7 @@ fn scanAllCompileUnits(st: &ElfStackTrace) !void {
}
}
-fn findCompileUnit(st: &ElfStackTrace, target_address: u64) !&const CompileUnit {
+fn findCompileUnit(st: *ElfStackTrace, target_address: u64) !*const CompileUnit {
var in_file_stream = io.FileInStream.init(&st.self_exe_file);
const in_stream = &in_file_stream.stream;
for (st.compile_unit_list.toSlice()) |*compile_unit| {
@@ -1039,7 +1039,7 @@ fn findCompileUnit(st: &ElfStackTrace, target_address: u64) !&const CompileUnit
return error.MissingDebugInfo;
}
-fn readInitialLength(comptime E: type, in_stream: &io.InStream(E), is_64: &bool) !u64 {
+fn readInitialLength(comptime E: type, in_stream: *io.InStream(E), is_64: *bool) !u64 {
const first_32_bits = try in_stream.readIntLe(u32);
is_64.* = (first_32_bits == 0xffffffff);
if (is_64.*) {
@@ -1096,10 +1096,10 @@ var global_fixed_allocator = std.heap.FixedBufferAllocator.init(global_allocator
var global_allocator_mem: [100 * 1024]u8 = undefined;
// TODO make thread safe
-var debug_info_allocator: ?&mem.Allocator = null;
+var debug_info_allocator: ?*mem.Allocator = null;
var debug_info_direct_allocator: std.heap.DirectAllocator = undefined;
var debug_info_arena_allocator: std.heap.ArenaAllocator = undefined;
-fn getDebugInfoAllocator() &mem.Allocator {
+fn getDebugInfoAllocator() *mem.Allocator {
if (debug_info_allocator) |a| return a;
debug_info_direct_allocator = std.heap.DirectAllocator.init();
diff --git a/std/elf.zig b/std/elf.zig
index 29b9473f98f5..50e97ab27137 100644
--- a/std/elf.zig
+++ b/std/elf.zig
@@ -338,7 +338,7 @@ pub const SectionHeader = struct {
};
pub const Elf = struct {
- in_file: &os.File,
+ in_file: *os.File,
auto_close_stream: bool,
is_64: bool,
endian: builtin.Endian,
@@ -348,20 +348,20 @@ pub const Elf = struct {
program_header_offset: u64,
section_header_offset: u64,
string_section_index: u64,
- string_section: &SectionHeader,
+ string_section: *SectionHeader,
section_headers: []SectionHeader,
- allocator: &mem.Allocator,
+ allocator: *mem.Allocator,
prealloc_file: os.File,
/// Call close when done.
- pub fn openPath(elf: &Elf, allocator: &mem.Allocator, path: []const u8) !void {
+ pub fn openPath(elf: *Elf, allocator: *mem.Allocator, path: []const u8) !void {
try elf.prealloc_file.open(path);
- try elf.openFile(allocator, &elf.prealloc_file);
+ try elf.openFile(allocator, *elf.prealloc_file);
elf.auto_close_stream = true;
}
/// Call close when done.
- pub fn openFile(elf: &Elf, allocator: &mem.Allocator, file: &os.File) !void {
+ pub fn openFile(elf: *Elf, allocator: *mem.Allocator, file: *os.File) !void {
elf.allocator = allocator;
elf.in_file = file;
elf.auto_close_stream = false;
@@ -503,13 +503,13 @@ pub const Elf = struct {
}
}
- pub fn close(elf: &Elf) void {
+ pub fn close(elf: *Elf) void {
elf.allocator.free(elf.section_headers);
if (elf.auto_close_stream) elf.in_file.close();
}
- pub fn findSection(elf: &Elf, name: []const u8) !?&SectionHeader {
+ pub fn findSection(elf: *Elf, name: []const u8) !?*SectionHeader {
var file_stream = io.FileInStream.init(elf.in_file);
const in = &file_stream.stream;
@@ -533,7 +533,7 @@ pub const Elf = struct {
return null;
}
- pub fn seekToSection(elf: &Elf, elf_section: &SectionHeader) !void {
+ pub fn seekToSection(elf: *Elf, elf_section: *SectionHeader) !void {
try elf.in_file.seekTo(elf_section.offset);
}
};
diff --git a/std/event.zig b/std/event.zig
index 4604eb8d02a6..89ab816bb6ba 100644
--- a/std/event.zig
+++ b/std/event.zig
@@ -6,9 +6,9 @@ const mem = std.mem;
const posix = std.os.posix;
pub const TcpServer = struct {
- handleRequestFn: async<&mem.Allocator> fn (&TcpServer, &const std.net.Address, &const std.os.File) void,
+ handleRequestFn: async<*mem.Allocator> fn (*TcpServer, *const std.net.Address, *const std.os.File) void,
- loop: &Loop,
+ loop: *Loop,
sockfd: i32,
accept_coro: ?promise,
listen_address: std.net.Address,
@@ -17,7 +17,7 @@ pub const TcpServer = struct {
const PromiseNode = std.LinkedList(promise).Node;
- pub fn init(loop: &Loop) !TcpServer {
+ pub fn init(loop: *Loop) !TcpServer {
const sockfd = try std.os.posixSocket(posix.AF_INET, posix.SOCK_STREAM | posix.SOCK_CLOEXEC | posix.SOCK_NONBLOCK, posix.PROTO_tcp);
errdefer std.os.close(sockfd);
@@ -32,7 +32,7 @@ pub const TcpServer = struct {
};
}
- pub fn listen(self: &TcpServer, address: &const std.net.Address, handleRequestFn: async<&mem.Allocator> fn (&TcpServer, &const std.net.Address, &const std.os.File) void) !void {
+ pub fn listen(self: *TcpServer, address: *const std.net.Address, handleRequestFn: async<*mem.Allocator> fn (*TcpServer, *const std.net.Address, *const std.os.File) void) !void {
self.handleRequestFn = handleRequestFn;
try std.os.posixBind(self.sockfd, &address.os_addr);
@@ -46,13 +46,13 @@ pub const TcpServer = struct {
errdefer self.loop.removeFd(self.sockfd);
}
- pub fn deinit(self: &TcpServer) void {
+ pub fn deinit(self: *TcpServer) void {
self.loop.removeFd(self.sockfd);
if (self.accept_coro) |accept_coro| cancel accept_coro;
std.os.close(self.sockfd);
}
- pub async fn handler(self: &TcpServer) void {
+ pub async fn handler(self: *TcpServer) void {
while (true) {
var accepted_addr: std.net.Address = undefined;
if (std.os.posixAccept(self.sockfd, &accepted_addr.os_addr, posix.SOCK_NONBLOCK | posix.SOCK_CLOEXEC)) |accepted_fd| {
@@ -92,11 +92,11 @@ pub const TcpServer = struct {
};
pub const Loop = struct {
- allocator: &mem.Allocator,
+ allocator: *mem.Allocator,
epollfd: i32,
keep_running: bool,
- fn init(allocator: &mem.Allocator) !Loop {
+ fn init(allocator: *mem.Allocator) !Loop {
const epollfd = try std.os.linuxEpollCreate(std.os.linux.EPOLL_CLOEXEC);
return Loop{
.keep_running = true,
@@ -105,7 +105,7 @@ pub const Loop = struct {
};
}
- pub fn addFd(self: &Loop, fd: i32, prom: promise) !void {
+ pub fn addFd(self: *Loop, fd: i32, prom: promise) !void {
var ev = std.os.linux.epoll_event{
.events = std.os.linux.EPOLLIN | std.os.linux.EPOLLOUT | std.os.linux.EPOLLET,
.data = std.os.linux.epoll_data{ .ptr = @ptrToInt(prom) },
@@ -113,23 +113,23 @@ pub const Loop = struct {
try std.os.linuxEpollCtl(self.epollfd, std.os.linux.EPOLL_CTL_ADD, fd, &ev);
}
- pub fn removeFd(self: &Loop, fd: i32) void {
+ pub fn removeFd(self: *Loop, fd: i32) void {
std.os.linuxEpollCtl(self.epollfd, std.os.linux.EPOLL_CTL_DEL, fd, undefined) catch {};
}
- async fn waitFd(self: &Loop, fd: i32) !void {
+ async fn waitFd(self: *Loop, fd: i32) !void {
defer self.removeFd(fd);
suspend |p| {
try self.addFd(fd, p);
}
}
- pub fn stop(self: &Loop) void {
+ pub fn stop(self: *Loop) void {
// TODO make atomic
self.keep_running = false;
// TODO activate an fd in the epoll set
}
- pub fn run(self: &Loop) void {
+ pub fn run(self: *Loop) void {
while (self.keep_running) {
var events: [16]std.os.linux.epoll_event = undefined;
const count = std.os.linuxEpollWait(self.epollfd, events[0..], -1);
@@ -141,7 +141,7 @@ pub const Loop = struct {
}
};
-pub async fn connect(loop: &Loop, _address: &const std.net.Address) !std.os.File {
+pub async fn connect(loop: *Loop, _address: *const std.net.Address) !std.os.File {
var address = _address.*; // TODO https://github.com/ziglang/zig/issues/733
const sockfd = try std.os.posixSocket(posix.AF_INET, posix.SOCK_STREAM | posix.SOCK_CLOEXEC | posix.SOCK_NONBLOCK, posix.PROTO_tcp);
@@ -163,7 +163,7 @@ test "listen on a port, send bytes, receive bytes" {
tcp_server: TcpServer,
const Self = this;
- async<&mem.Allocator> fn handler(tcp_server: &TcpServer, _addr: &const std.net.Address, _socket: &const std.os.File) void {
+ async<*mem.Allocator> fn handler(tcp_server: *TcpServer, _addr: *const std.net.Address, _socket: *const std.os.File) void {
const self = @fieldParentPtr(Self, "tcp_server", tcp_server);
var socket = _socket.*; // TODO https://github.com/ziglang/zig/issues/733
defer socket.close();
@@ -177,7 +177,7 @@ test "listen on a port, send bytes, receive bytes" {
cancel p;
}
}
- async fn errorableHandler(self: &Self, _addr: &const std.net.Address, _socket: &const std.os.File) !void {
+ async fn errorableHandler(self: *Self, _addr: *const std.net.Address, _socket: *const std.os.File) !void {
const addr = _addr.*; // TODO https://github.com/ziglang/zig/issues/733
var socket = _socket.*; // TODO https://github.com/ziglang/zig/issues/733
@@ -199,7 +199,7 @@ test "listen on a port, send bytes, receive bytes" {
defer cancel p;
loop.run();
}
-async fn doAsyncTest(loop: &Loop, address: &const std.net.Address) void {
+async fn doAsyncTest(loop: *Loop, address: *const std.net.Address) void {
errdefer @panic("test failure");
var socket_file = try await try async event.connect(loop, address);
diff --git a/std/fmt/errol/index.zig b/std/fmt/errol/index.zig
index 65e8d448a81d..933958ac182a 100644
--- a/std/fmt/errol/index.zig
+++ b/std/fmt/errol/index.zig
@@ -21,7 +21,7 @@ pub const RoundMode = enum {
/// Round a FloatDecimal as returned by errol3 to the specified fractional precision.
/// All digits after the specified precision should be considered invalid.
-pub fn roundToPrecision(float_decimal: &FloatDecimal, precision: usize, mode: RoundMode) void {
+pub fn roundToPrecision(float_decimal: *FloatDecimal, precision: usize, mode: RoundMode) void {
// The round digit refers to the index which we should look at to determine
// whether we need to round to match the specified precision.
var round_digit: usize = 0;
@@ -59,7 +59,7 @@ pub fn roundToPrecision(float_decimal: &FloatDecimal, precision: usize, mode: Ro
float_decimal.exp += 1;
// Re-size the buffer to use the reserved leading byte.
- const one_before = @intToPtr(&u8, @ptrToInt(&float_decimal.digits[0]) - 1);
+ const one_before = @intToPtr(*u8, @ptrToInt(&float_decimal.digits[0]) - 1);
float_decimal.digits = one_before[0 .. float_decimal.digits.len + 1];
float_decimal.digits[0] = '1';
return;
@@ -217,7 +217,7 @@ fn tableLowerBound(k: u64) usize {
/// @in: The HP number.
/// @val: The double.
/// &returns: The HP number.
-fn hpProd(in: &const HP, val: f64) HP {
+fn hpProd(in: *const HP, val: f64) HP {
var hi: f64 = undefined;
var lo: f64 = undefined;
split(in.val, &hi, &lo);
@@ -239,7 +239,7 @@ fn hpProd(in: &const HP, val: f64) HP {
/// @val: The double.
/// @hi: The high bits.
/// @lo: The low bits.
-fn split(val: f64, hi: &f64, lo: &f64) void {
+fn split(val: f64, hi: *f64, lo: *f64) void {
hi.* = gethi(val);
lo.* = val - hi.*;
}
@@ -252,7 +252,7 @@ fn gethi(in: f64) f64 {
/// Normalize the number by factoring in the error.
/// @hp: The float pair.
-fn hpNormalize(hp: &HP) void {
+fn hpNormalize(hp: *HP) void {
// Required to avoid segfaults causing buffer overrun during errol3 digit output termination.
@setFloatMode(this, @import("builtin").FloatMode.Strict);
@@ -264,7 +264,7 @@ fn hpNormalize(hp: &HP) void {
/// Divide the high-precision number by ten.
/// @hp: The high-precision number
-fn hpDiv10(hp: &HP) void {
+fn hpDiv10(hp: *HP) void {
var val = hp.val;
hp.val /= 10.0;
@@ -280,7 +280,7 @@ fn hpDiv10(hp: &HP) void {
/// Multiply the high-precision number by ten.
/// @hp: The high-precision number
-fn hpMul10(hp: &HP) void {
+fn hpMul10(hp: *HP) void {
const val = hp.val;
hp.val *= 10.0;
diff --git a/std/fmt/index.zig b/std/fmt/index.zig
index 0ffbc5989540..b522d9d37d91 100644
--- a/std/fmt/index.zig
+++ b/std/fmt/index.zig
@@ -679,7 +679,7 @@ const FormatIntBuf = struct {
out_buf: []u8,
index: usize,
};
-fn formatIntCallback(context: &FormatIntBuf, bytes: []const u8) (error{}!void) {
+fn formatIntCallback(context: *FormatIntBuf, bytes: []const u8) (error{}!void) {
mem.copy(u8, context.out_buf[context.index..], bytes);
context.index += bytes.len;
}
@@ -751,7 +751,7 @@ const BufPrintContext = struct {
remaining: []u8,
};
-fn bufPrintWrite(context: &BufPrintContext, bytes: []const u8) !void {
+fn bufPrintWrite(context: *BufPrintContext, bytes: []const u8) !void {
if (context.remaining.len < bytes.len) return error.BufferTooSmall;
mem.copy(u8, context.remaining, bytes);
context.remaining = context.remaining[bytes.len..];
@@ -763,14 +763,14 @@ pub fn bufPrint(buf: []u8, comptime fmt: []const u8, args: ...) ![]u8 {
return buf[0 .. buf.len - context.remaining.len];
}
-pub fn allocPrint(allocator: &mem.Allocator, comptime fmt: []const u8, args: ...) ![]u8 {
+pub fn allocPrint(allocator: *mem.Allocator, comptime fmt: []const u8, args: ...) ![]u8 {
var size: usize = 0;
format(&size, error{}, countSize, fmt, args) catch |err| switch (err) {};
const buf = try allocator.alloc(u8, size);
return bufPrint(buf, fmt, args);
}
-fn countSize(size: &usize, bytes: []const u8) (error{}!void) {
+fn countSize(size: *usize, bytes: []const u8) (error{}!void) {
size.* += bytes.len;
}
diff --git a/std/hash/adler.zig b/std/hash/adler.zig
index 12dab1457c30..9c5966f89bbe 100644
--- a/std/hash/adler.zig
+++ b/std/hash/adler.zig
@@ -18,7 +18,7 @@ pub const Adler32 = struct {
// This fast variant is taken from zlib. It reduces the required modulos and unrolls longer
// buffer inputs and should be much quicker.
- pub fn update(self: &Adler32, input: []const u8) void {
+ pub fn update(self: *Adler32, input: []const u8) void {
var s1 = self.adler & 0xffff;
var s2 = (self.adler >> 16) & 0xffff;
@@ -77,7 +77,7 @@ pub const Adler32 = struct {
self.adler = s1 | (s2 << 16);
}
- pub fn final(self: &Adler32) u32 {
+ pub fn final(self: *Adler32) u32 {
return self.adler;
}
diff --git a/std/hash/crc.zig b/std/hash/crc.zig
index 45bcb70e8bdb..ec831cdc2ed3 100644
--- a/std/hash/crc.zig
+++ b/std/hash/crc.zig
@@ -58,7 +58,7 @@ pub fn Crc32WithPoly(comptime poly: u32) type {
return Self{ .crc = 0xffffffff };
}
- pub fn update(self: &Self, input: []const u8) void {
+ pub fn update(self: *Self, input: []const u8) void {
var i: usize = 0;
while (i + 8 <= input.len) : (i += 8) {
const p = input[i .. i + 8];
@@ -86,7 +86,7 @@ pub fn Crc32WithPoly(comptime poly: u32) type {
}
}
- pub fn final(self: &Self) u32 {
+ pub fn final(self: *Self) u32 {
return ~self.crc;
}
@@ -143,14 +143,14 @@ pub fn Crc32SmallWithPoly(comptime poly: u32) type {
return Self{ .crc = 0xffffffff };
}
- pub fn update(self: &Self, input: []const u8) void {
+ pub fn update(self: *Self, input: []const u8) void {
for (input) |b| {
self.crc = lookup_table[@truncate(u4, self.crc ^ (b >> 0))] ^ (self.crc >> 4);
self.crc = lookup_table[@truncate(u4, self.crc ^ (b >> 4))] ^ (self.crc >> 4);
}
}
- pub fn final(self: &Self) u32 {
+ pub fn final(self: *Self) u32 {
return ~self.crc;
}
diff --git a/std/hash/fnv.zig b/std/hash/fnv.zig
index c2439e0ebc1b..447c99677229 100644
--- a/std/hash/fnv.zig
+++ b/std/hash/fnv.zig
@@ -21,14 +21,14 @@ fn Fnv1a(comptime T: type, comptime prime: T, comptime offset: T) type {
return Self{ .value = offset };
}
- pub fn update(self: &Self, input: []const u8) void {
+ pub fn update(self: *Self, input: []const u8) void {
for (input) |b| {
self.value ^= b;
self.value *%= prime;
}
}
- pub fn final(self: &Self) T {
+ pub fn final(self: *Self) T {
return self.value;
}
diff --git a/std/hash/siphash.zig b/std/hash/siphash.zig
index 750e23d4c885..8a90308a4699 100644
--- a/std/hash/siphash.zig
+++ b/std/hash/siphash.zig
@@ -63,7 +63,7 @@ fn SipHash(comptime T: type, comptime c_rounds: usize, comptime d_rounds: usize)
return d;
}
- pub fn update(d: &Self, b: []const u8) void {
+ pub fn update(d: *Self, b: []const u8) void {
var off: usize = 0;
// Partial from previous.
@@ -85,7 +85,7 @@ fn SipHash(comptime T: type, comptime c_rounds: usize, comptime d_rounds: usize)
d.msg_len +%= @truncate(u8, b.len);
}
- pub fn final(d: &Self) T {
+ pub fn final(d: *Self) T {
// Padding
mem.set(u8, d.buf[d.buf_len..], 0);
d.buf[7] = d.msg_len;
@@ -118,7 +118,7 @@ fn SipHash(comptime T: type, comptime c_rounds: usize, comptime d_rounds: usize)
return (u128(b2) << 64) | b1;
}
- fn round(d: &Self, b: []const u8) void {
+ fn round(d: *Self, b: []const u8) void {
debug.assert(b.len == 8);
const m = mem.readInt(b[0..], u64, Endian.Little);
@@ -132,7 +132,7 @@ fn SipHash(comptime T: type, comptime c_rounds: usize, comptime d_rounds: usize)
d.v0 ^= m;
}
- fn sipRound(d: &Self) void {
+ fn sipRound(d: *Self) void {
d.v0 +%= d.v1;
d.v1 = math.rotl(u64, d.v1, u64(13));
d.v1 ^= d.v0;
diff --git a/std/hash_map.zig b/std/hash_map.zig
index f51b9c66bada..a323cdc197a5 100644
--- a/std/hash_map.zig
+++ b/std/hash_map.zig
@@ -14,7 +14,7 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
entries: []Entry,
size: usize,
max_distance_from_start_index: usize,
- allocator: &Allocator,
+ allocator: *Allocator,
// this is used to detect bugs where a hashtable is edited while an iterator is running.
modification_count: debug_u32,
@@ -28,7 +28,7 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
};
pub const Iterator = struct {
- hm: &const Self,
+ hm: *const Self,
// how many items have we returned
count: usize,
// iterator through the entry array
@@ -36,7 +36,7 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
// used to detect concurrent modification
initial_modification_count: debug_u32,
- pub fn next(it: &Iterator) ?&Entry {
+ pub fn next(it: *Iterator) ?*Entry {
if (want_modification_safety) {
assert(it.initial_modification_count == it.hm.modification_count); // concurrent modification
}
@@ -53,7 +53,7 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
}
// Reset the iterator to the initial index
- pub fn reset(it: &Iterator) void {
+ pub fn reset(it: *Iterator) void {
it.count = 0;
it.index = 0;
// Resetting the modification count too
@@ -61,7 +61,7 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
}
};
- pub fn init(allocator: &Allocator) Self {
+ pub fn init(allocator: *Allocator) Self {
return Self{
.entries = []Entry{},
.allocator = allocator,
@@ -71,11 +71,11 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
};
}
- pub fn deinit(hm: &const Self) void {
+ pub fn deinit(hm: *const Self) void {
hm.allocator.free(hm.entries);
}
- pub fn clear(hm: &Self) void {
+ pub fn clear(hm: *Self) void {
for (hm.entries) |*entry| {
entry.used = false;
}
@@ -84,12 +84,12 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
hm.incrementModificationCount();
}
- pub fn count(hm: &const Self) usize {
+ pub fn count(hm: *const Self) usize {
return hm.size;
}
/// Returns the value that was already there.
- pub fn put(hm: &Self, key: K, value: &const V) !?V {
+ pub fn put(hm: *Self, key: K, value: *const V) !?V {
if (hm.entries.len == 0) {
try hm.initCapacity(16);
}
@@ -111,18 +111,18 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
return hm.internalPut(key, value);
}
- pub fn get(hm: &const Self, key: K) ?&Entry {
+ pub fn get(hm: *const Self, key: K) ?*Entry {
if (hm.entries.len == 0) {
return null;
}
return hm.internalGet(key);
}
- pub fn contains(hm: &const Self, key: K) bool {
+ pub fn contains(hm: *const Self, key: K) bool {
return hm.get(key) != null;
}
- pub fn remove(hm: &Self, key: K) ?&Entry {
+ pub fn remove(hm: *Self, key: K) ?*Entry {
if (hm.entries.len == 0) return null;
hm.incrementModificationCount();
const start_index = hm.keyToIndex(key);
@@ -154,7 +154,7 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
return null;
}
- pub fn iterator(hm: &const Self) Iterator {
+ pub fn iterator(hm: *const Self) Iterator {
return Iterator{
.hm = hm,
.count = 0,
@@ -163,7 +163,7 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
};
}
- fn initCapacity(hm: &Self, capacity: usize) !void {
+ fn initCapacity(hm: *Self, capacity: usize) !void {
hm.entries = try hm.allocator.alloc(Entry, capacity);
hm.size = 0;
hm.max_distance_from_start_index = 0;
@@ -172,14 +172,14 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
}
}
- fn incrementModificationCount(hm: &Self) void {
+ fn incrementModificationCount(hm: *Self) void {
if (want_modification_safety) {
hm.modification_count +%= 1;
}
}
/// Returns the value that was already there.
- fn internalPut(hm: &Self, orig_key: K, orig_value: &const V) ?V {
+ fn internalPut(hm: *Self, orig_key: K, orig_value: *const V) ?V {
var key = orig_key;
var value = orig_value.*;
const start_index = hm.keyToIndex(key);
@@ -231,7 +231,7 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
unreachable; // put into a full map
}
- fn internalGet(hm: &const Self, key: K) ?&Entry {
+ fn internalGet(hm: *const Self, key: K) ?*Entry {
const start_index = hm.keyToIndex(key);
{
var roll_over: usize = 0;
@@ -246,7 +246,7 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
return null;
}
- fn keyToIndex(hm: &const Self, key: K) usize {
+ fn keyToIndex(hm: *const Self, key: K) usize {
return usize(hash(key)) % hm.entries.len;
}
};
diff --git a/std/heap.zig b/std/heap.zig
index 8d4938a7c396..d15a99a75714 100644
--- a/std/heap.zig
+++ b/std/heap.zig
@@ -16,15 +16,15 @@ var c_allocator_state = Allocator{
.freeFn = cFree,
};
-fn cAlloc(self: &Allocator, n: usize, alignment: u29) ![]u8 {
+fn cAlloc(self: *Allocator, n: usize, alignment: u29) ![]u8 {
assert(alignment <= @alignOf(c_longdouble));
- return if (c.malloc(n)) |buf| @ptrCast(&u8, buf)[0..n] else error.OutOfMemory;
+ return if (c.malloc(n)) |buf| @ptrCast(*u8, buf)[0..n] else error.OutOfMemory;
}
-fn cRealloc(self: &Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 {
- const old_ptr = @ptrCast(&c_void, old_mem.ptr);
+fn cRealloc(self: *Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 {
+ const old_ptr = @ptrCast(*c_void, old_mem.ptr);
if (c.realloc(old_ptr, new_size)) |buf| {
- return @ptrCast(&u8, buf)[0..new_size];
+ return @ptrCast(*u8, buf)[0..new_size];
} else if (new_size <= old_mem.len) {
return old_mem[0..new_size];
} else {
@@ -32,8 +32,8 @@ fn cRealloc(self: &Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![
}
}
-fn cFree(self: &Allocator, old_mem: []u8) void {
- const old_ptr = @ptrCast(&c_void, old_mem.ptr);
+fn cFree(self: *Allocator, old_mem: []u8) void {
+ const old_ptr = @ptrCast(*c_void, old_mem.ptr);
c.free(old_ptr);
}
@@ -55,7 +55,7 @@ pub const DirectAllocator = struct {
};
}
- pub fn deinit(self: &DirectAllocator) void {
+ pub fn deinit(self: *DirectAllocator) void {
switch (builtin.os) {
Os.windows => if (self.heap_handle) |heap_handle| {
_ = os.windows.HeapDestroy(heap_handle);
@@ -64,7 +64,7 @@ pub const DirectAllocator = struct {
}
}
- fn alloc(allocator: &Allocator, n: usize, alignment: u29) ![]u8 {
+ fn alloc(allocator: *Allocator, n: usize, alignment: u29) ![]u8 {
const self = @fieldParentPtr(DirectAllocator, "allocator", allocator);
switch (builtin.os) {
@@ -74,7 +74,7 @@ pub const DirectAllocator = struct {
const addr = p.mmap(null, alloc_size, p.PROT_READ | p.PROT_WRITE, p.MAP_PRIVATE | p.MAP_ANONYMOUS, -1, 0);
if (addr == p.MAP_FAILED) return error.OutOfMemory;
- if (alloc_size == n) return @intToPtr(&u8, addr)[0..n];
+ if (alloc_size == n) return @intToPtr(*u8, addr)[0..n];
var aligned_addr = addr & ~usize(alignment - 1);
aligned_addr += alignment;
@@ -93,7 +93,7 @@ pub const DirectAllocator = struct {
//It is impossible that there is an unoccupied page at the top of our
// mmap.
- return @intToPtr(&u8, aligned_addr)[0..n];
+ return @intToPtr(*u8, aligned_addr)[0..n];
},
Os.windows => {
const amt = n + alignment + @sizeOf(usize);
@@ -108,14 +108,14 @@ pub const DirectAllocator = struct {
const march_forward_bytes = if (rem == 0) 0 else (alignment - rem);
const adjusted_addr = root_addr + march_forward_bytes;
const record_addr = adjusted_addr + n;
- @intToPtr(&align(1) usize, record_addr).* = root_addr;
- return @intToPtr(&u8, adjusted_addr)[0..n];
+ @intToPtr(*align(1) usize, record_addr).* = root_addr;
+ return @intToPtr(*u8, adjusted_addr)[0..n];
},
else => @compileError("Unsupported OS"),
}
}
- fn realloc(allocator: &Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 {
+ fn realloc(allocator: *Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 {
const self = @fieldParentPtr(DirectAllocator, "allocator", allocator);
switch (builtin.os) {
@@ -139,13 +139,13 @@ pub const DirectAllocator = struct {
Os.windows => {
const old_adjusted_addr = @ptrToInt(old_mem.ptr);
const old_record_addr = old_adjusted_addr + old_mem.len;
- const root_addr = @intToPtr(&align(1) usize, old_record_addr).*;
+ const root_addr = @intToPtr(*align(1) usize, old_record_addr).*;
const old_ptr = @intToPtr(os.windows.LPVOID, root_addr);
const amt = new_size + alignment + @sizeOf(usize);
const new_ptr = os.windows.HeapReAlloc(??self.heap_handle, 0, old_ptr, amt) ?? blk: {
if (new_size > old_mem.len) return error.OutOfMemory;
const new_record_addr = old_record_addr - new_size + old_mem.len;
- @intToPtr(&align(1) usize, new_record_addr).* = root_addr;
+ @intToPtr(*align(1) usize, new_record_addr).* = root_addr;
return old_mem[0..new_size];
};
const offset = old_adjusted_addr - root_addr;
@@ -153,14 +153,14 @@ pub const DirectAllocator = struct {
const new_adjusted_addr = new_root_addr + offset;
assert(new_adjusted_addr % alignment == 0);
const new_record_addr = new_adjusted_addr + new_size;
- @intToPtr(&align(1) usize, new_record_addr).* = new_root_addr;
- return @intToPtr(&u8, new_adjusted_addr)[0..new_size];
+ @intToPtr(*align(1) usize, new_record_addr).* = new_root_addr;
+ return @intToPtr(*u8, new_adjusted_addr)[0..new_size];
},
else => @compileError("Unsupported OS"),
}
}
- fn free(allocator: &Allocator, bytes: []u8) void {
+ fn free(allocator: *Allocator, bytes: []u8) void {
const self = @fieldParentPtr(DirectAllocator, "allocator", allocator);
switch (builtin.os) {
@@ -169,7 +169,7 @@ pub const DirectAllocator = struct {
},
Os.windows => {
const record_addr = @ptrToInt(bytes.ptr) + bytes.len;
- const root_addr = @intToPtr(&align(1) usize, record_addr).*;
+ const root_addr = @intToPtr(*align(1) usize, record_addr).*;
const ptr = @intToPtr(os.windows.LPVOID, root_addr);
_ = os.windows.HeapFree(??self.heap_handle, 0, ptr);
},
@@ -183,13 +183,13 @@ pub const DirectAllocator = struct {
pub const ArenaAllocator = struct {
pub allocator: Allocator,
- child_allocator: &Allocator,
+ child_allocator: *Allocator,
buffer_list: std.LinkedList([]u8),
end_index: usize,
const BufNode = std.LinkedList([]u8).Node;
- pub fn init(child_allocator: &Allocator) ArenaAllocator {
+ pub fn init(child_allocator: *Allocator) ArenaAllocator {
return ArenaAllocator{
.allocator = Allocator{
.allocFn = alloc,
@@ -202,7 +202,7 @@ pub const ArenaAllocator = struct {
};
}
- pub fn deinit(self: &ArenaAllocator) void {
+ pub fn deinit(self: *ArenaAllocator) void {
var it = self.buffer_list.first;
while (it) |node| {
// this has to occur before the free because the free frees node
@@ -212,7 +212,7 @@ pub const ArenaAllocator = struct {
}
}
- fn createNode(self: &ArenaAllocator, prev_len: usize, minimum_size: usize) !&BufNode {
+ fn createNode(self: *ArenaAllocator, prev_len: usize, minimum_size: usize) !*BufNode {
const actual_min_size = minimum_size + @sizeOf(BufNode);
var len = prev_len;
while (true) {
@@ -233,7 +233,7 @@ pub const ArenaAllocator = struct {
return buf_node;
}
- fn alloc(allocator: &Allocator, n: usize, alignment: u29) ![]u8 {
+ fn alloc(allocator: *Allocator, n: usize, alignment: u29) ![]u8 {
const self = @fieldParentPtr(ArenaAllocator, "allocator", allocator);
var cur_node = if (self.buffer_list.last) |last_node| last_node else try self.createNode(0, n + alignment);
@@ -254,7 +254,7 @@ pub const ArenaAllocator = struct {
}
}
- fn realloc(allocator: &Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 {
+ fn realloc(allocator: *Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 {
if (new_size <= old_mem.len) {
return old_mem[0..new_size];
} else {
@@ -264,7 +264,7 @@ pub const ArenaAllocator = struct {
}
}
- fn free(allocator: &Allocator, bytes: []u8) void {}
+ fn free(allocator: *Allocator, bytes: []u8) void {}
};
pub const FixedBufferAllocator = struct {
@@ -284,7 +284,7 @@ pub const FixedBufferAllocator = struct {
};
}
- fn alloc(allocator: &Allocator, n: usize, alignment: u29) ![]u8 {
+ fn alloc(allocator: *Allocator, n: usize, alignment: u29) ![]u8 {
const self = @fieldParentPtr(FixedBufferAllocator, "allocator", allocator);
const addr = @ptrToInt(self.buffer.ptr) + self.end_index;
const rem = @rem(addr, alignment);
@@ -300,7 +300,7 @@ pub const FixedBufferAllocator = struct {
return result;
}
- fn realloc(allocator: &Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 {
+ fn realloc(allocator: *Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 {
if (new_size <= old_mem.len) {
return old_mem[0..new_size];
} else {
@@ -310,7 +310,7 @@ pub const FixedBufferAllocator = struct {
}
}
- fn free(allocator: &Allocator, bytes: []u8) void {}
+ fn free(allocator: *Allocator, bytes: []u8) void {}
};
/// lock free
@@ -331,7 +331,7 @@ pub const ThreadSafeFixedBufferAllocator = struct {
};
}
- fn alloc(allocator: &Allocator, n: usize, alignment: u29) ![]u8 {
+ fn alloc(allocator: *Allocator, n: usize, alignment: u29) ![]u8 {
const self = @fieldParentPtr(ThreadSafeFixedBufferAllocator, "allocator", allocator);
var end_index = @atomicLoad(usize, &self.end_index, builtin.AtomicOrder.SeqCst);
while (true) {
@@ -347,7 +347,7 @@ pub const ThreadSafeFixedBufferAllocator = struct {
}
}
- fn realloc(allocator: &Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 {
+ fn realloc(allocator: *Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 {
if (new_size <= old_mem.len) {
return old_mem[0..new_size];
} else {
@@ -357,7 +357,7 @@ pub const ThreadSafeFixedBufferAllocator = struct {
}
}
- fn free(allocator: &Allocator, bytes: []u8) void {}
+ fn free(allocator: *Allocator, bytes: []u8) void {}
};
test "c_allocator" {
@@ -403,8 +403,8 @@ test "ThreadSafeFixedBufferAllocator" {
try testAllocatorLargeAlignment(&fixed_buffer_allocator.allocator);
}
-fn testAllocator(allocator: &mem.Allocator) !void {
- var slice = try allocator.alloc(&i32, 100);
+fn testAllocator(allocator: *mem.Allocator) !void {
+ var slice = try allocator.alloc(*i32, 100);
for (slice) |*item, i| {
item.* = try allocator.create(i32);
@@ -415,15 +415,15 @@ fn testAllocator(allocator: &mem.Allocator) !void {
allocator.destroy(item);
}
- slice = try allocator.realloc(&i32, slice, 20000);
- slice = try allocator.realloc(&i32, slice, 50);
- slice = try allocator.realloc(&i32, slice, 25);
- slice = try allocator.realloc(&i32, slice, 10);
+ slice = try allocator.realloc(*i32, slice, 20000);
+ slice = try allocator.realloc(*i32, slice, 50);
+ slice = try allocator.realloc(*i32, slice, 25);
+ slice = try allocator.realloc(*i32, slice, 10);
allocator.free(slice);
}
-fn testAllocatorLargeAlignment(allocator: &mem.Allocator) mem.Allocator.Error!void {
+fn testAllocatorLargeAlignment(allocator: *mem.Allocator) mem.Allocator.Error!void {
//Maybe a platform's page_size is actually the same as or
// very near usize?
if (os.page_size << 2 > @maxValue(usize)) return;
diff --git a/std/io.zig b/std/io.zig
index 39d319159e2e..e20a284e4e3f 100644
--- a/std/io.zig
+++ b/std/io.zig
@@ -34,20 +34,20 @@ pub fn getStdIn() GetStdIoErrs!File {
/// Implementation of InStream trait for File
pub const FileInStream = struct {
- file: &File,
+ file: *File,
stream: Stream,
pub const Error = @typeOf(File.read).ReturnType.ErrorSet;
pub const Stream = InStream(Error);
- pub fn init(file: &File) FileInStream {
+ pub fn init(file: *File) FileInStream {
return FileInStream{
.file = file,
.stream = Stream{ .readFn = readFn },
};
}
- fn readFn(in_stream: &Stream, buffer: []u8) Error!usize {
+ fn readFn(in_stream: *Stream, buffer: []u8) Error!usize {
const self = @fieldParentPtr(FileInStream, "stream", in_stream);
return self.file.read(buffer);
}
@@ -55,20 +55,20 @@ pub const FileInStream = struct {
/// Implementation of OutStream trait for File
pub const FileOutStream = struct {
- file: &File,
+ file: *File,
stream: Stream,
pub const Error = File.WriteError;
pub const Stream = OutStream(Error);
- pub fn init(file: &File) FileOutStream {
+ pub fn init(file: *File) FileOutStream {
return FileOutStream{
.file = file,
.stream = Stream{ .writeFn = writeFn },
};
}
- fn writeFn(out_stream: &Stream, bytes: []const u8) !void {
+ fn writeFn(out_stream: *Stream, bytes: []const u8) !void {
const self = @fieldParentPtr(FileOutStream, "stream", out_stream);
return self.file.write(bytes);
}
@@ -82,12 +82,12 @@ pub fn InStream(comptime ReadError: type) type {
/// Return the number of bytes read. If the number read is smaller than buf.len, it
/// means the stream reached the end. Reaching the end of a stream is not an error
/// condition.
- readFn: fn (self: &Self, buffer: []u8) Error!usize,
+ readFn: fn (self: *Self, buffer: []u8) Error!usize,
/// Replaces `buffer` contents by reading from the stream until it is finished.
/// If `buffer.len()` would exceed `max_size`, `error.StreamTooLong` is returned and
/// the contents read from the stream are lost.
- pub fn readAllBuffer(self: &Self, buffer: &Buffer, max_size: usize) !void {
+ pub fn readAllBuffer(self: *Self, buffer: *Buffer, max_size: usize) !void {
try buffer.resize(0);
var actual_buf_len: usize = 0;
@@ -111,7 +111,7 @@ pub fn InStream(comptime ReadError: type) type {
/// memory would be greater than `max_size`, returns `error.StreamTooLong`.
/// Caller owns returned memory.
/// If this function returns an error, the contents from the stream read so far are lost.
- pub fn readAllAlloc(self: &Self, allocator: &mem.Allocator, max_size: usize) ![]u8 {
+ pub fn readAllAlloc(self: *Self, allocator: *mem.Allocator, max_size: usize) ![]u8 {
var buf = Buffer.initNull(allocator);
defer buf.deinit();
@@ -123,7 +123,7 @@ pub fn InStream(comptime ReadError: type) type {
/// Does not include the delimiter in the result.
/// If `buffer.len()` would exceed `max_size`, `error.StreamTooLong` is returned and the contents
/// read from the stream so far are lost.
- pub fn readUntilDelimiterBuffer(self: &Self, buffer: &Buffer, delimiter: u8, max_size: usize) !void {
+ pub fn readUntilDelimiterBuffer(self: *Self, buffer: *Buffer, delimiter: u8, max_size: usize) !void {
try buffer.resize(0);
while (true) {
@@ -145,7 +145,7 @@ pub fn InStream(comptime ReadError: type) type {
/// memory would be greater than `max_size`, returns `error.StreamTooLong`.
/// Caller owns returned memory.
/// If this function returns an error, the contents from the stream read so far are lost.
- pub fn readUntilDelimiterAlloc(self: &Self, allocator: &mem.Allocator, delimiter: u8, max_size: usize) ![]u8 {
+ pub fn readUntilDelimiterAlloc(self: *Self, allocator: *mem.Allocator, delimiter: u8, max_size: usize) ![]u8 {
var buf = Buffer.initNull(allocator);
defer buf.deinit();
@@ -156,43 +156,43 @@ pub fn InStream(comptime ReadError: type) type {
/// Returns the number of bytes read. If the number read is smaller than buf.len, it
/// means the stream reached the end. Reaching the end of a stream is not an error
/// condition.
- pub fn read(self: &Self, buffer: []u8) !usize {
+ pub fn read(self: *Self, buffer: []u8) !usize {
return self.readFn(self, buffer);
}
/// Same as `read` but end of stream returns `error.EndOfStream`.
- pub fn readNoEof(self: &Self, buf: []u8) !void {
+ pub fn readNoEof(self: *Self, buf: []u8) !void {
const amt_read = try self.read(buf);
if (amt_read < buf.len) return error.EndOfStream;
}
/// Reads 1 byte from the stream or returns `error.EndOfStream`.
- pub fn readByte(self: &Self) !u8 {
+ pub fn readByte(self: *Self) !u8 {
var result: [1]u8 = undefined;
try self.readNoEof(result[0..]);
return result[0];
}
/// Same as `readByte` except the returned byte is signed.
- pub fn readByteSigned(self: &Self) !i8 {
+ pub fn readByteSigned(self: *Self) !i8 {
return @bitCast(i8, try self.readByte());
}
- pub fn readIntLe(self: &Self, comptime T: type) !T {
+ pub fn readIntLe(self: *Self, comptime T: type) !T {
return self.readInt(builtin.Endian.Little, T);
}
- pub fn readIntBe(self: &Self, comptime T: type) !T {
+ pub fn readIntBe(self: *Self, comptime T: type) !T {
return self.readInt(builtin.Endian.Big, T);
}
- pub fn readInt(self: &Self, endian: builtin.Endian, comptime T: type) !T {
+ pub fn readInt(self: *Self, endian: builtin.Endian, comptime T: type) !T {
var bytes: [@sizeOf(T)]u8 = undefined;
try self.readNoEof(bytes[0..]);
return mem.readInt(bytes, T, endian);
}
- pub fn readVarInt(self: &Self, endian: builtin.Endian, comptime T: type, size: usize) !T {
+ pub fn readVarInt(self: *Self, endian: builtin.Endian, comptime T: type, size: usize) !T {
assert(size <= @sizeOf(T));
assert(size <= 8);
var input_buf: [8]u8 = undefined;
@@ -208,22 +208,22 @@ pub fn OutStream(comptime WriteError: type) type {
const Self = this;
pub const Error = WriteError;
- writeFn: fn (self: &Self, bytes: []const u8) Error!void,
+ writeFn: fn (self: *Self, bytes: []const u8) Error!void,
- pub fn print(self: &Self, comptime format: []const u8, args: ...) !void {
+ pub fn print(self: *Self, comptime format: []const u8, args: ...) !void {
return std.fmt.format(self, Error, self.writeFn, format, args);
}
- pub fn write(self: &Self, bytes: []const u8) !void {
+ pub fn write(self: *Self, bytes: []const u8) !void {
return self.writeFn(self, bytes);
}
- pub fn writeByte(self: &Self, byte: u8) !void {
+ pub fn writeByte(self: *Self, byte: u8) !void {
const slice = (&byte)[0..1];
return self.writeFn(self, slice);
}
- pub fn writeByteNTimes(self: &Self, byte: u8, n: usize) !void {
+ pub fn writeByteNTimes(self: *Self, byte: u8, n: usize) !void {
const slice = (&byte)[0..1];
var i: usize = 0;
while (i < n) : (i += 1) {
@@ -234,14 +234,14 @@ pub fn OutStream(comptime WriteError: type) type {
}
/// `path` needs to be copied in memory to add a null terminating byte, hence the allocator.
-pub fn writeFile(allocator: &mem.Allocator, path: []const u8, data: []const u8) !void {
+pub fn writeFile(allocator: *mem.Allocator, path: []const u8, data: []const u8) !void {
var file = try File.openWrite(allocator, path);
defer file.close();
try file.write(data);
}
/// On success, caller owns returned buffer.
-pub fn readFileAlloc(allocator: &mem.Allocator, path: []const u8) ![]u8 {
+pub fn readFileAlloc(allocator: *mem.Allocator, path: []const u8) ![]u8 {
var file = try File.openRead(allocator, path);
defer file.close();
@@ -265,13 +265,13 @@ pub fn BufferedInStreamCustom(comptime buffer_size: usize, comptime Error: type)
pub stream: Stream,
- unbuffered_in_stream: &Stream,
+ unbuffered_in_stream: *Stream,
buffer: [buffer_size]u8,
start_index: usize,
end_index: usize,
- pub fn init(unbuffered_in_stream: &Stream) Self {
+ pub fn init(unbuffered_in_stream: *Stream) Self {
return Self{
.unbuffered_in_stream = unbuffered_in_stream,
.buffer = undefined,
@@ -287,7 +287,7 @@ pub fn BufferedInStreamCustom(comptime buffer_size: usize, comptime Error: type)
};
}
- fn readFn(in_stream: &Stream, dest: []u8) !usize {
+ fn readFn(in_stream: *Stream, dest: []u8) !usize {
const self = @fieldParentPtr(Self, "stream", in_stream);
var dest_index: usize = 0;
@@ -338,12 +338,12 @@ pub fn BufferedOutStreamCustom(comptime buffer_size: usize, comptime OutStreamEr
pub stream: Stream,
- unbuffered_out_stream: &Stream,
+ unbuffered_out_stream: *Stream,
buffer: [buffer_size]u8,
index: usize,
- pub fn init(unbuffered_out_stream: &Stream) Self {
+ pub fn init(unbuffered_out_stream: *Stream) Self {
return Self{
.unbuffered_out_stream = unbuffered_out_stream,
.buffer = undefined,
@@ -352,12 +352,12 @@ pub fn BufferedOutStreamCustom(comptime buffer_size: usize, comptime OutStreamEr
};
}
- pub fn flush(self: &Self) !void {
+ pub fn flush(self: *Self) !void {
try self.unbuffered_out_stream.write(self.buffer[0..self.index]);
self.index = 0;
}
- fn writeFn(out_stream: &Stream, bytes: []const u8) !void {
+ fn writeFn(out_stream: *Stream, bytes: []const u8) !void {
const self = @fieldParentPtr(Self, "stream", out_stream);
if (bytes.len >= self.buffer.len) {
@@ -383,20 +383,20 @@ pub fn BufferedOutStreamCustom(comptime buffer_size: usize, comptime OutStreamEr
/// Implementation of OutStream trait for Buffer
pub const BufferOutStream = struct {
- buffer: &Buffer,
+ buffer: *Buffer,
stream: Stream,
pub const Error = error{OutOfMemory};
pub const Stream = OutStream(Error);
- pub fn init(buffer: &Buffer) BufferOutStream {
+ pub fn init(buffer: *Buffer) BufferOutStream {
return BufferOutStream{
.buffer = buffer,
.stream = Stream{ .writeFn = writeFn },
};
}
- fn writeFn(out_stream: &Stream, bytes: []const u8) !void {
+ fn writeFn(out_stream: *Stream, bytes: []const u8) !void {
const self = @fieldParentPtr(BufferOutStream, "stream", out_stream);
return self.buffer.append(bytes);
}
@@ -407,7 +407,7 @@ pub const BufferedAtomicFile = struct {
file_stream: FileOutStream,
buffered_stream: BufferedOutStream(FileOutStream.Error),
- pub fn create(allocator: &mem.Allocator, dest_path: []const u8) !&BufferedAtomicFile {
+ pub fn create(allocator: *mem.Allocator, dest_path: []const u8) !*BufferedAtomicFile {
// TODO with well defined copy elision we don't need this allocation
var self = try allocator.create(BufferedAtomicFile);
errdefer allocator.destroy(self);
@@ -427,18 +427,18 @@ pub const BufferedAtomicFile = struct {
}
/// always call destroy, even after successful finish()
- pub fn destroy(self: &BufferedAtomicFile) void {
+ pub fn destroy(self: *BufferedAtomicFile) void {
const allocator = self.atomic_file.allocator;
self.atomic_file.deinit();
allocator.destroy(self);
}
- pub fn finish(self: &BufferedAtomicFile) !void {
+ pub fn finish(self: *BufferedAtomicFile) !void {
try self.buffered_stream.flush();
try self.atomic_file.finish();
}
- pub fn stream(self: &BufferedAtomicFile) &OutStream(FileOutStream.Error) {
+ pub fn stream(self: *BufferedAtomicFile) *OutStream(FileOutStream.Error) {
return &self.buffered_stream.stream;
}
};
diff --git a/std/json.zig b/std/json.zig
index 9de8f0b53e83..c8aef7688b9a 100644
--- a/std/json.zig
+++ b/std/json.zig
@@ -76,7 +76,7 @@ pub const Token = struct {
}
// Slice into the underlying input string.
- pub fn slice(self: &const Token, input: []const u8, i: usize) []const u8 {
+ pub fn slice(self: *const Token, input: []const u8, i: usize) []const u8 {
return input[i + self.offset - self.count .. i + self.offset];
}
};
@@ -115,7 +115,7 @@ pub const StreamingJsonParser = struct {
return p;
}
- pub fn reset(p: &StreamingJsonParser) void {
+ pub fn reset(p: *StreamingJsonParser) void {
p.state = State.TopLevelBegin;
p.count = 0;
// Set before ever read in main transition function
@@ -205,7 +205,7 @@ pub const StreamingJsonParser = struct {
// tokens. token2 is always null if token1 is null.
//
// There is currently no error recovery on a bad stream.
- pub fn feed(p: &StreamingJsonParser, c: u8, token1: &?Token, token2: &?Token) Error!void {
+ pub fn feed(p: *StreamingJsonParser, c: u8, token1: *?Token, token2: *?Token) Error!void {
token1.* = null;
token2.* = null;
p.count += 1;
@@ -217,7 +217,7 @@ pub const StreamingJsonParser = struct {
}
// Perform a single transition on the state machine and return any possible token.
- fn transition(p: &StreamingJsonParser, c: u8, token: &?Token) Error!bool {
+ fn transition(p: *StreamingJsonParser, c: u8, token: *?Token) Error!bool {
switch (p.state) {
State.TopLevelBegin => switch (c) {
'{' => {
@@ -861,7 +861,7 @@ pub fn validate(s: []const u8) bool {
var token1: ?Token = undefined;
var token2: ?Token = undefined;
- p.feed(c, &token1, &token2) catch |err| {
+ p.feed(c, *token1, *token2) catch |err| {
return false;
};
}
@@ -878,7 +878,7 @@ pub const ValueTree = struct {
arena: ArenaAllocator,
root: Value,
- pub fn deinit(self: &ValueTree) void {
+ pub fn deinit(self: *ValueTree) void {
self.arena.deinit();
}
};
@@ -894,7 +894,7 @@ pub const Value = union(enum) {
Array: ArrayList(Value),
Object: ObjectMap,
- pub fn dump(self: &const Value) void {
+ pub fn dump(self: *const Value) void {
switch (self.*) {
Value.Null => {
std.debug.warn("null");
@@ -941,7 +941,7 @@ pub const Value = union(enum) {
}
}
- pub fn dumpIndent(self: &const Value, indent: usize) void {
+ pub fn dumpIndent(self: *const Value, indent: usize) void {
if (indent == 0) {
self.dump();
} else {
@@ -949,7 +949,7 @@ pub const Value = union(enum) {
}
}
- fn dumpIndentLevel(self: &const Value, indent: usize, level: usize) void {
+ fn dumpIndentLevel(self: *const Value, indent: usize, level: usize) void {
switch (self.*) {
Value.Null => {
std.debug.warn("null");
@@ -1013,7 +1013,7 @@ pub const Value = union(enum) {
// A non-stream JSON parser which constructs a tree of Value's.
pub const JsonParser = struct {
- allocator: &Allocator,
+ allocator: *Allocator,
state: State,
copy_strings: bool,
// Stores parent nodes and un-combined Values.
@@ -1026,7 +1026,7 @@ pub const JsonParser = struct {
Simple,
};
- pub fn init(allocator: &Allocator, copy_strings: bool) JsonParser {
+ pub fn init(allocator: *Allocator, copy_strings: bool) JsonParser {
return JsonParser{
.allocator = allocator,
.state = State.Simple,
@@ -1035,16 +1035,16 @@ pub const JsonParser = struct {
};
}
- pub fn deinit(p: &JsonParser) void {
+ pub fn deinit(p: *JsonParser) void {
p.stack.deinit();
}
- pub fn reset(p: &JsonParser) void {
+ pub fn reset(p: *JsonParser) void {
p.state = State.Simple;
p.stack.shrink(0);
}
- pub fn parse(p: &JsonParser, input: []const u8) !ValueTree {
+ pub fn parse(p: *JsonParser, input: []const u8) !ValueTree {
var mp = StreamingJsonParser.init();
var arena = ArenaAllocator.init(p.allocator);
@@ -1090,7 +1090,7 @@ pub const JsonParser = struct {
// Even though p.allocator exists, we take an explicit allocator so that allocation state
// can be cleaned up on error correctly during a `parse` on call.
- fn transition(p: &JsonParser, allocator: &Allocator, input: []const u8, i: usize, token: &const Token) !void {
+ fn transition(p: *JsonParser, allocator: *Allocator, input: []const u8, i: usize, token: *const Token) !void {
switch (p.state) {
State.ObjectKey => switch (token.id) {
Token.Id.ObjectEnd => {
@@ -1223,7 +1223,7 @@ pub const JsonParser = struct {
}
}
- fn pushToParent(p: &JsonParser, value: &const Value) !void {
+ fn pushToParent(p: *JsonParser, value: *const Value) !void {
switch (p.stack.at(p.stack.len - 1)) {
// Object Parent -> [ ..., object, , value ]
Value.String => |key| {
@@ -1244,14 +1244,14 @@ pub const JsonParser = struct {
}
}
- fn parseString(p: &JsonParser, allocator: &Allocator, token: &const Token, input: []const u8, i: usize) !Value {
+ fn parseString(p: *JsonParser, allocator: *Allocator, token: *const Token, input: []const u8, i: usize) !Value {
// TODO: We don't strictly have to copy values which do not contain any escape
// characters if flagged with the option.
const slice = token.slice(input, i);
return Value{ .String = try mem.dupe(p.allocator, u8, slice) };
}
- fn parseNumber(p: &JsonParser, token: &const Token, input: []const u8, i: usize) !Value {
+ fn parseNumber(p: *JsonParser, token: *const Token, input: []const u8, i: usize) !Value {
return if (token.number_is_integer)
Value{ .Integer = try std.fmt.parseInt(i64, token.slice(input, i), 10) }
else
diff --git a/std/linked_list.zig b/std/linked_list.zig
index c6be08171ed7..fbc0a0c42a80 100644
--- a/std/linked_list.zig
+++ b/std/linked_list.zig
@@ -21,11 +21,11 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na
/// Node inside the linked list wrapping the actual data.
pub const Node = struct {
- prev: ?&Node,
- next: ?&Node,
+ prev: ?*Node,
+ next: ?*Node,
data: T,
- pub fn init(value: &const T) Node {
+ pub fn init(value: *const T) Node {
return Node{
.prev = null,
.next = null,
@@ -38,14 +38,14 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na
return Node.init({});
}
- pub fn toData(node: &Node) &ParentType {
+ pub fn toData(node: *Node) *ParentType {
comptime assert(isIntrusive());
return @fieldParentPtr(ParentType, field_name, node);
}
};
- first: ?&Node,
- last: ?&Node,
+ first: ?*Node,
+ last: ?*Node,
len: usize,
/// Initialize a linked list.
@@ -69,7 +69,7 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na
/// Arguments:
/// node: Pointer to a node in the list.
/// new_node: Pointer to the new node to insert.
- pub fn insertAfter(list: &Self, node: &Node, new_node: &Node) void {
+ pub fn insertAfter(list: *Self, node: *Node, new_node: *Node) void {
new_node.prev = node;
if (node.next) |next_node| {
// Intermediate node.
@@ -90,7 +90,7 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na
/// Arguments:
/// node: Pointer to a node in the list.
/// new_node: Pointer to the new node to insert.
- pub fn insertBefore(list: &Self, node: &Node, new_node: &Node) void {
+ pub fn insertBefore(list: *Self, node: *Node, new_node: *Node) void {
new_node.next = node;
if (node.prev) |prev_node| {
// Intermediate node.
@@ -110,7 +110,7 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na
///
/// Arguments:
/// new_node: Pointer to the new node to insert.
- pub fn append(list: &Self, new_node: &Node) void {
+ pub fn append(list: *Self, new_node: *Node) void {
if (list.last) |last| {
// Insert after last.
list.insertAfter(last, new_node);
@@ -124,7 +124,7 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na
///
/// Arguments:
/// new_node: Pointer to the new node to insert.
- pub fn prepend(list: &Self, new_node: &Node) void {
+ pub fn prepend(list: *Self, new_node: *Node) void {
if (list.first) |first| {
// Insert before first.
list.insertBefore(first, new_node);
@@ -143,7 +143,7 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na
///
/// Arguments:
/// node: Pointer to the node to be removed.
- pub fn remove(list: &Self, node: &Node) void {
+ pub fn remove(list: *Self, node: *Node) void {
if (node.prev) |prev_node| {
// Intermediate node.
prev_node.next = node.next;
@@ -168,7 +168,7 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na
///
/// Returns:
/// A pointer to the last node in the list.
- pub fn pop(list: &Self) ?&Node {
+ pub fn pop(list: *Self) ?*Node {
const last = list.last ?? return null;
list.remove(last);
return last;
@@ -178,7 +178,7 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na
///
/// Returns:
/// A pointer to the first node in the list.
- pub fn popFirst(list: &Self) ?&Node {
+ pub fn popFirst(list: *Self) ?*Node {
const first = list.first ?? return null;
list.remove(first);
return first;
@@ -191,7 +191,7 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na
///
/// Returns:
/// A pointer to the new node.
- pub fn allocateNode(list: &Self, allocator: &Allocator) !&Node {
+ pub fn allocateNode(list: *Self, allocator: *Allocator) !*Node {
comptime assert(!isIntrusive());
return allocator.create(Node);
}
@@ -201,7 +201,7 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na
/// Arguments:
/// node: Pointer to the node to deallocate.
/// allocator: Dynamic memory allocator.
- pub fn destroyNode(list: &Self, node: &Node, allocator: &Allocator) void {
+ pub fn destroyNode(list: *Self, node: *Node, allocator: *Allocator) void {
comptime assert(!isIntrusive());
allocator.destroy(node);
}
@@ -214,7 +214,7 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na
///
/// Returns:
/// A pointer to the new node.
- pub fn createNode(list: &Self, data: &const T, allocator: &Allocator) !&Node {
+ pub fn createNode(list: *Self, data: *const T, allocator: *Allocator) !*Node {
comptime assert(!isIntrusive());
var node = try list.allocateNode(allocator);
node.* = Node.init(data);
diff --git a/std/macho.zig b/std/macho.zig
index 615569e4b427..e71ac76b1a5a 100644
--- a/std/macho.zig
+++ b/std/macho.zig
@@ -42,13 +42,13 @@ pub const Symbol = struct {
name: []const u8,
address: u64,
- fn addressLessThan(lhs: &const Symbol, rhs: &const Symbol) bool {
+ fn addressLessThan(lhs: *const Symbol, rhs: *const Symbol) bool {
return lhs.address < rhs.address;
}
};
pub const SymbolTable = struct {
- allocator: &mem.Allocator,
+ allocator: *mem.Allocator,
symbols: []const Symbol,
strings: []const u8,
@@ -56,7 +56,7 @@ pub const SymbolTable = struct {
// Ideally we'd use _mh_execute_header because it's always at 0x100000000
// in the image but as it's located in a different section than executable
// code, its displacement is different.
- pub fn deinit(self: &SymbolTable) void {
+ pub fn deinit(self: *SymbolTable) void {
self.allocator.free(self.symbols);
self.symbols = []const Symbol{};
@@ -64,7 +64,7 @@ pub const SymbolTable = struct {
self.strings = []const u8{};
}
- pub fn search(self: &const SymbolTable, address: usize) ?&const Symbol {
+ pub fn search(self: *const SymbolTable, address: usize) ?*const Symbol {
var min: usize = 0;
var max: usize = self.symbols.len - 1; // Exclude sentinel.
while (min < max) {
@@ -83,7 +83,7 @@ pub const SymbolTable = struct {
}
};
-pub fn loadSymbols(allocator: &mem.Allocator, in: &io.FileInStream) !SymbolTable {
+pub fn loadSymbols(allocator: *mem.Allocator, in: *io.FileInStream) !SymbolTable {
var file = in.file;
try file.seekTo(0);
@@ -160,13 +160,13 @@ pub fn loadSymbols(allocator: &mem.Allocator, in: &io.FileInStream) !SymbolTable
};
}
-fn readNoEof(in: &io.FileInStream, comptime T: type, result: []T) !void {
+fn readNoEof(in: *io.FileInStream, comptime T: type, result: []T) !void {
return in.stream.readNoEof(([]u8)(result));
}
-fn readOneNoEof(in: &io.FileInStream, comptime T: type, result: &T) !void {
+fn readOneNoEof(in: *io.FileInStream, comptime T: type, result: *T) !void {
return readNoEof(in, T, result[0..1]);
}
-fn isSymbol(sym: &const Nlist64) bool {
+fn isSymbol(sym: *const Nlist64) bool {
return sym.n_value != 0 and sym.n_desc == 0;
}
diff --git a/std/math/complex/atan.zig b/std/math/complex/atan.zig
index b7bbf930ebd5..9bfe5fe724b2 100644
--- a/std/math/complex/atan.zig
+++ b/std/math/complex/atan.zig
@@ -29,7 +29,7 @@ fn redupif32(x: f32) f32 {
return ((x - u * DP1) - u * DP2) - t * DP3;
}
-fn atan32(z: &const Complex(f32)) Complex(f32) {
+fn atan32(z: *const Complex(f32)) Complex(f32) {
const maxnum = 1.0e38;
const x = z.re;
@@ -78,7 +78,7 @@ fn redupif64(x: f64) f64 {
return ((x - u * DP1) - u * DP2) - t * DP3;
}
-fn atan64(z: &const Complex(f64)) Complex(f64) {
+fn atan64(z: *const Complex(f64)) Complex(f64) {
const maxnum = 1.0e308;
const x = z.re;
diff --git a/std/math/complex/cosh.zig b/std/math/complex/cosh.zig
index 96eac685564d..c2f9a47b8db7 100644
--- a/std/math/complex/cosh.zig
+++ b/std/math/complex/cosh.zig
@@ -15,7 +15,7 @@ pub fn cosh(z: var) Complex(@typeOf(z.re)) {
};
}
-fn cosh32(z: &const Complex(f32)) Complex(f32) {
+fn cosh32(z: *const Complex(f32)) Complex(f32) {
const x = z.re;
const y = z.im;
@@ -78,7 +78,7 @@ fn cosh32(z: &const Complex(f32)) Complex(f32) {
return Complex(f32).new((x * x) * (y - y), (x + x) * (y - y));
}
-fn cosh64(z: &const Complex(f64)) Complex(f64) {
+fn cosh64(z: *const Complex(f64)) Complex(f64) {
const x = z.re;
const y = z.im;
diff --git a/std/math/complex/exp.zig b/std/math/complex/exp.zig
index 8fe069a43d60..44c354f2469e 100644
--- a/std/math/complex/exp.zig
+++ b/std/math/complex/exp.zig
@@ -16,7 +16,7 @@ pub fn exp(z: var) Complex(@typeOf(z.re)) {
};
}
-fn exp32(z: &const Complex(f32)) Complex(f32) {
+fn exp32(z: *const Complex(f32)) Complex(f32) {
@setFloatMode(this, @import("builtin").FloatMode.Strict);
const exp_overflow = 0x42b17218; // max_exp * ln2 ~= 88.72283955
@@ -63,7 +63,7 @@ fn exp32(z: &const Complex(f32)) Complex(f32) {
}
}
-fn exp64(z: &const Complex(f64)) Complex(f64) {
+fn exp64(z: *const Complex(f64)) Complex(f64) {
const exp_overflow = 0x40862e42; // high bits of max_exp * ln2 ~= 710
const cexp_overflow = 0x4096b8e4; // (max_exp - min_denorm_exp) * ln2
diff --git a/std/math/complex/index.zig b/std/math/complex/index.zig
index 5902ffaa193b..b00296bedae6 100644
--- a/std/math/complex/index.zig
+++ b/std/math/complex/index.zig
@@ -37,28 +37,28 @@ pub fn Complex(comptime T: type) type {
};
}
- pub fn add(self: &const Self, other: &const Self) Self {
+ pub fn add(self: *const Self, other: *const Self) Self {
return Self{
.re = self.re + other.re,
.im = self.im + other.im,
};
}
- pub fn sub(self: &const Self, other: &const Self) Self {
+ pub fn sub(self: *const Self, other: *const Self) Self {
return Self{
.re = self.re - other.re,
.im = self.im - other.im,
};
}
- pub fn mul(self: &const Self, other: &const Self) Self {
+ pub fn mul(self: *const Self, other: *const Self) Self {
return Self{
.re = self.re * other.re - self.im * other.im,
.im = self.im * other.re + self.re * other.im,
};
}
- pub fn div(self: &const Self, other: &const Self) Self {
+ pub fn div(self: *const Self, other: *const Self) Self {
const re_num = self.re * other.re + self.im * other.im;
const im_num = self.im * other.re - self.re * other.im;
const den = other.re * other.re + other.im * other.im;
@@ -69,14 +69,14 @@ pub fn Complex(comptime T: type) type {
};
}
- pub fn conjugate(self: &const Self) Self {
+ pub fn conjugate(self: *const Self) Self {
return Self{
.re = self.re,
.im = -self.im,
};
}
- pub fn reciprocal(self: &const Self) Self {
+ pub fn reciprocal(self: *const Self) Self {
const m = self.re * self.re + self.im * self.im;
return Self{
.re = self.re / m,
@@ -84,7 +84,7 @@ pub fn Complex(comptime T: type) type {
};
}
- pub fn magnitude(self: &const Self) T {
+ pub fn magnitude(self: *const Self) T {
return math.sqrt(self.re * self.re + self.im * self.im);
}
};
diff --git a/std/math/complex/ldexp.zig b/std/math/complex/ldexp.zig
index 7ebefff40c77..a56c2ef2eb26 100644
--- a/std/math/complex/ldexp.zig
+++ b/std/math/complex/ldexp.zig
@@ -14,7 +14,7 @@ pub fn ldexp_cexp(z: var, expt: i32) Complex(@typeOf(z.re)) {
};
}
-fn frexp_exp32(x: f32, expt: &i32) f32 {
+fn frexp_exp32(x: f32, expt: *i32) f32 {
const k = 235; // reduction constant
const kln2 = 162.88958740; // k * ln2
@@ -24,7 +24,7 @@ fn frexp_exp32(x: f32, expt: &i32) f32 {
return @bitCast(f32, (hx & 0x7fffff) | ((0x7f + 127) << 23));
}
-fn ldexp_cexp32(z: &const Complex(f32), expt: i32) Complex(f32) {
+fn ldexp_cexp32(z: *const Complex(f32), expt: i32) Complex(f32) {
var ex_expt: i32 = undefined;
const exp_x = frexp_exp32(z.re, &ex_expt);
const exptf = expt + ex_expt;
@@ -38,7 +38,7 @@ fn ldexp_cexp32(z: &const Complex(f32), expt: i32) Complex(f32) {
return Complex(f32).new(math.cos(z.im) * exp_x * scale1 * scale2, math.sin(z.im) * exp_x * scale1 * scale2);
}
-fn frexp_exp64(x: f64, expt: &i32) f64 {
+fn frexp_exp64(x: f64, expt: *i32) f64 {
const k = 1799; // reduction constant
const kln2 = 1246.97177782734161156; // k * ln2
@@ -54,7 +54,7 @@ fn frexp_exp64(x: f64, expt: &i32) f64 {
return @bitCast(f64, (u64(high_word) << 32) | lx);
}
-fn ldexp_cexp64(z: &const Complex(f64), expt: i32) Complex(f64) {
+fn ldexp_cexp64(z: *const Complex(f64), expt: i32) Complex(f64) {
var ex_expt: i32 = undefined;
const exp_x = frexp_exp64(z.re, &ex_expt);
const exptf = i64(expt + ex_expt);
diff --git a/std/math/complex/pow.zig b/std/math/complex/pow.zig
index bef9fde54203..4c2cd9cf3459 100644
--- a/std/math/complex/pow.zig
+++ b/std/math/complex/pow.zig
@@ -4,7 +4,7 @@ const math = std.math;
const cmath = math.complex;
const Complex = cmath.Complex;
-pub fn pow(comptime T: type, z: &const T, c: &const T) T {
+pub fn pow(comptime T: type, z: *const T, c: *const T) T {
const p = cmath.log(z);
const q = c.mul(p);
return cmath.exp(q);
diff --git a/std/math/complex/sinh.zig b/std/math/complex/sinh.zig
index 09a62ca05826..3d196bfd5020 100644
--- a/std/math/complex/sinh.zig
+++ b/std/math/complex/sinh.zig
@@ -15,7 +15,7 @@ pub fn sinh(z: var) Complex(@typeOf(z.re)) {
};
}
-fn sinh32(z: &const Complex(f32)) Complex(f32) {
+fn sinh32(z: *const Complex(f32)) Complex(f32) {
const x = z.re;
const y = z.im;
@@ -78,7 +78,7 @@ fn sinh32(z: &const Complex(f32)) Complex(f32) {
return Complex(f32).new((x * x) * (y - y), (x + x) * (y - y));
}
-fn sinh64(z: &const Complex(f64)) Complex(f64) {
+fn sinh64(z: *const Complex(f64)) Complex(f64) {
const x = z.re;
const y = z.im;
diff --git a/std/math/complex/sqrt.zig b/std/math/complex/sqrt.zig
index afda69f7c9e4..d4f5a6752813 100644
--- a/std/math/complex/sqrt.zig
+++ b/std/math/complex/sqrt.zig
@@ -15,7 +15,7 @@ pub fn sqrt(z: var) Complex(@typeOf(z.re)) {
};
}
-fn sqrt32(z: &const Complex(f32)) Complex(f32) {
+fn sqrt32(z: *const Complex(f32)) Complex(f32) {
const x = z.re;
const y = z.im;
@@ -57,7 +57,7 @@ fn sqrt32(z: &const Complex(f32)) Complex(f32) {
}
}
-fn sqrt64(z: &const Complex(f64)) Complex(f64) {
+fn sqrt64(z: *const Complex(f64)) Complex(f64) {
// may encounter overflow for im,re >= DBL_MAX / (1 + sqrt(2))
const threshold = 0x1.a827999fcef32p+1022;
diff --git a/std/math/complex/tanh.zig b/std/math/complex/tanh.zig
index 34250b1b4aa6..1d754838a3d5 100644
--- a/std/math/complex/tanh.zig
+++ b/std/math/complex/tanh.zig
@@ -13,7 +13,7 @@ pub fn tanh(z: var) Complex(@typeOf(z.re)) {
};
}
-fn tanh32(z: &const Complex(f32)) Complex(f32) {
+fn tanh32(z: *const Complex(f32)) Complex(f32) {
const x = z.re;
const y = z.im;
@@ -51,7 +51,7 @@ fn tanh32(z: &const Complex(f32)) Complex(f32) {
return Complex(f32).new((beta * rho * s) / den, t / den);
}
-fn tanh64(z: &const Complex(f64)) Complex(f64) {
+fn tanh64(z: *const Complex(f64)) Complex(f64) {
const x = z.re;
const y = z.im;
diff --git a/std/math/hypot.zig b/std/math/hypot.zig
index fe0de3a1ead0..494df22ba6ce 100644
--- a/std/math/hypot.zig
+++ b/std/math/hypot.zig
@@ -52,7 +52,7 @@ fn hypot32(x: f32, y: f32) f32 {
return z * math.sqrt(f32(f64(x) * x + f64(y) * y));
}
-fn sq(hi: &f64, lo: &f64, x: f64) void {
+fn sq(hi: *f64, lo: *f64, x: f64) void {
const split: f64 = 0x1.0p27 + 1.0;
const xc = x * split;
const xh = x - xc + xc;
diff --git a/std/math/index.zig b/std/math/index.zig
index 847e9725006b..33bc1082f7e3 100644
--- a/std/math/index.zig
+++ b/std/math/index.zig
@@ -46,12 +46,12 @@ pub fn forceEval(value: var) void {
switch (T) {
f32 => {
var x: f32 = undefined;
- const p = @ptrCast(&volatile f32, &x);
+ const p = @ptrCast(*volatile f32, &x);
p.* = x;
},
f64 => {
var x: f64 = undefined;
- const p = @ptrCast(&volatile f64, &x);
+ const p = @ptrCast(*volatile f64, &x);
p.* = x;
},
else => {
diff --git a/std/mem.zig b/std/mem.zig
index f4696cff9f2f..aec24e849132 100644
--- a/std/mem.zig
+++ b/std/mem.zig
@@ -13,7 +13,7 @@ pub const Allocator = struct {
/// The returned newly allocated memory is undefined.
/// `alignment` is guaranteed to be >= 1
/// `alignment` is guaranteed to be a power of 2
- allocFn: fn (self: &Allocator, byte_count: usize, alignment: u29) Error![]u8,
+ allocFn: fn (self: *Allocator, byte_count: usize, alignment: u29) Error![]u8,
/// If `new_byte_count > old_mem.len`:
/// * `old_mem.len` is the same as what was returned from allocFn or reallocFn.
@@ -26,22 +26,22 @@ pub const Allocator = struct {
/// The returned newly allocated memory is undefined.
/// `alignment` is guaranteed to be >= 1
/// `alignment` is guaranteed to be a power of 2
- reallocFn: fn (self: &Allocator, old_mem: []u8, new_byte_count: usize, alignment: u29) Error![]u8,
+ reallocFn: fn (self: *Allocator, old_mem: []u8, new_byte_count: usize, alignment: u29) Error![]u8,
/// Guaranteed: `old_mem.len` is the same as what was returned from `allocFn` or `reallocFn`
- freeFn: fn (self: &Allocator, old_mem: []u8) void,
+ freeFn: fn (self: *Allocator, old_mem: []u8) void,
- fn create(self: &Allocator, comptime T: type) !&T {
- if (@sizeOf(T) == 0) return &{};
+ fn create(self: *Allocator, comptime T: type) !*T {
+ if (@sizeOf(T) == 0) return *{};
const slice = try self.alloc(T, 1);
return &slice[0];
}
// TODO once #733 is solved, this will replace create
- fn construct(self: &Allocator, init: var) t: {
+ fn construct(self: *Allocator, init: var) t: {
// TODO this is a workaround for type getting parsed as Error!&const T
const T = @typeOf(init).Child;
- break :t Error!&T;
+ break :t Error!*T;
} {
const T = @typeOf(init).Child;
if (@sizeOf(T) == 0) return &{};
@@ -51,17 +51,17 @@ pub const Allocator = struct {
return ptr;
}
- fn destroy(self: &Allocator, ptr: var) void {
+ fn destroy(self: *Allocator, ptr: var) void {
self.free(ptr[0..1]);
}
- fn alloc(self: &Allocator, comptime T: type, n: usize) ![]T {
+ fn alloc(self: *Allocator, comptime T: type, n: usize) ![]T {
return self.alignedAlloc(T, @alignOf(T), n);
}
- fn alignedAlloc(self: &Allocator, comptime T: type, comptime alignment: u29, n: usize) ![]align(alignment) T {
+ fn alignedAlloc(self: *Allocator, comptime T: type, comptime alignment: u29, n: usize) ![]align(alignment) T {
if (n == 0) {
- return (&align(alignment) T)(undefined)[0..0];
+ return (*align(alignment) T)(undefined)[0..0];
}
const byte_count = math.mul(usize, @sizeOf(T), n) catch return Error.OutOfMemory;
const byte_slice = try self.allocFn(self, byte_count, alignment);
@@ -73,17 +73,17 @@ pub const Allocator = struct {
return ([]align(alignment) T)(@alignCast(alignment, byte_slice));
}
- fn realloc(self: &Allocator, comptime T: type, old_mem: []T, n: usize) ![]T {
+ fn realloc(self: *Allocator, comptime T: type, old_mem: []T, n: usize) ![]T {
return self.alignedRealloc(T, @alignOf(T), @alignCast(@alignOf(T), old_mem), n);
}
- fn alignedRealloc(self: &Allocator, comptime T: type, comptime alignment: u29, old_mem: []align(alignment) T, n: usize) ![]align(alignment) T {
+ fn alignedRealloc(self: *Allocator, comptime T: type, comptime alignment: u29, old_mem: []align(alignment) T, n: usize) ![]align(alignment) T {
if (old_mem.len == 0) {
return self.alloc(T, n);
}
if (n == 0) {
self.free(old_mem);
- return (&align(alignment) T)(undefined)[0..0];
+ return (*align(alignment) T)(undefined)[0..0];
}
const old_byte_slice = ([]u8)(old_mem);
@@ -102,11 +102,11 @@ pub const Allocator = struct {
/// Reallocate, but `n` must be less than or equal to `old_mem.len`.
/// Unlike `realloc`, this function cannot fail.
/// Shrinking to 0 is the same as calling `free`.
- fn shrink(self: &Allocator, comptime T: type, old_mem: []T, n: usize) []T {
+ fn shrink(self: *Allocator, comptime T: type, old_mem: []T, n: usize) []T {
return self.alignedShrink(T, @alignOf(T), @alignCast(@alignOf(T), old_mem), n);
}
- fn alignedShrink(self: &Allocator, comptime T: type, comptime alignment: u29, old_mem: []align(alignment) T, n: usize) []align(alignment) T {
+ fn alignedShrink(self: *Allocator, comptime T: type, comptime alignment: u29, old_mem: []align(alignment) T, n: usize) []align(alignment) T {
if (n == 0) {
self.free(old_mem);
return old_mem[0..0];
@@ -123,10 +123,10 @@ pub const Allocator = struct {
return ([]align(alignment) T)(@alignCast(alignment, byte_slice));
}
- fn free(self: &Allocator, memory: var) void {
+ fn free(self: *Allocator, memory: var) void {
const bytes = ([]const u8)(memory);
if (bytes.len == 0) return;
- const non_const_ptr = @intToPtr(&u8, @ptrToInt(bytes.ptr));
+ const non_const_ptr = @intToPtr(*u8, @ptrToInt(bytes.ptr));
self.freeFn(self, non_const_ptr[0..bytes.len]);
}
};
@@ -186,7 +186,7 @@ pub fn allEqual(comptime T: type, slice: []const T, scalar: T) bool {
}
/// Copies ::m to newly allocated memory. Caller is responsible to free it.
-pub fn dupe(allocator: &Allocator, comptime T: type, m: []const T) ![]T {
+pub fn dupe(allocator: *Allocator, comptime T: type, m: []const T) ![]T {
const new_buf = try allocator.alloc(T, m.len);
copy(T, new_buf, m);
return new_buf;
@@ -457,7 +457,7 @@ pub const SplitIterator = struct {
split_bytes: []const u8,
index: usize,
- pub fn next(self: &SplitIterator) ?[]const u8 {
+ pub fn next(self: *SplitIterator) ?[]const u8 {
// move to beginning of token
while (self.index < self.buffer.len and self.isSplitByte(self.buffer[self.index])) : (self.index += 1) {}
const start = self.index;
@@ -473,14 +473,14 @@ pub const SplitIterator = struct {
}
/// Returns a slice of the remaining bytes. Does not affect iterator state.
- pub fn rest(self: &const SplitIterator) []const u8 {
+ pub fn rest(self: *const SplitIterator) []const u8 {
// move to beginning of token
var index: usize = self.index;
while (index < self.buffer.len and self.isSplitByte(self.buffer[index])) : (index += 1) {}
return self.buffer[index..];
}
- fn isSplitByte(self: &const SplitIterator, byte: u8) bool {
+ fn isSplitByte(self: *const SplitIterator, byte: u8) bool {
for (self.split_bytes) |split_byte| {
if (byte == split_byte) {
return true;
@@ -492,7 +492,7 @@ pub const SplitIterator = struct {
/// Naively combines a series of strings with a separator.
/// Allocates memory for the result, which must be freed by the caller.
-pub fn join(allocator: &Allocator, sep: u8, strings: ...) ![]u8 {
+pub fn join(allocator: *Allocator, sep: u8, strings: ...) ![]u8 {
comptime assert(strings.len >= 1);
var total_strings_len: usize = strings.len; // 1 sep per string
{
@@ -649,7 +649,7 @@ test "mem.max" {
assert(max(u8, "abcdefg") == 'g');
}
-pub fn swap(comptime T: type, a: &T, b: &T) void {
+pub fn swap(comptime T: type, a: *T, b: *T) void {
const tmp = a.*;
a.* = b.*;
b.* = tmp;
diff --git a/std/net.zig b/std/net.zig
index 3af4e0b5259b..bfe4b1c2a06e 100644
--- a/std/net.zig
+++ b/std/net.zig
@@ -31,7 +31,7 @@ pub const Address = struct {
};
}
- pub fn initIp6(ip6: &const Ip6Addr, port: u16) Address {
+ pub fn initIp6(ip6: *const Ip6Addr, port: u16) Address {
return Address{
.family = posix.AF_INET6,
.os_addr = posix.sockaddr{
@@ -46,15 +46,15 @@ pub const Address = struct {
};
}
- pub fn initPosix(addr: &const posix.sockaddr) Address {
+ pub fn initPosix(addr: *const posix.sockaddr) Address {
return Address{ .os_addr = addr.* };
}
- pub fn format(self: &const Address, out_stream: var) !void {
+ pub fn format(self: *const Address, out_stream: var) !void {
switch (self.os_addr.in.family) {
posix.AF_INET => {
const native_endian_port = std.mem.endianSwapIfLe(u16, self.os_addr.in.port);
- const bytes = ([]const u8)((&self.os_addr.in.addr)[0..1]);
+ const bytes = ([]const u8)((*self.os_addr.in.addr)[0..1]);
try out_stream.print("{}.{}.{}.{}:{}", bytes[0], bytes[1], bytes[2], bytes[3], native_endian_port);
},
posix.AF_INET6 => {
diff --git a/std/os/child_process.zig b/std/os/child_process.zig
index 51f1bd96e5f6..0e80ae09c1a5 100644
--- a/std/os/child_process.zig
+++ b/std/os/child_process.zig
@@ -20,7 +20,7 @@ pub const ChildProcess = struct {
pub handle: if (is_windows) windows.HANDLE else void,
pub thread_handle: if (is_windows) windows.HANDLE else void,
- pub allocator: &mem.Allocator,
+ pub allocator: *mem.Allocator,
pub stdin: ?os.File,
pub stdout: ?os.File,
@@ -31,7 +31,7 @@ pub const ChildProcess = struct {
pub argv: []const []const u8,
/// Leave as null to use the current env map using the supplied allocator.
- pub env_map: ?&const BufMap,
+ pub env_map: ?*const BufMap,
pub stdin_behavior: StdIo,
pub stdout_behavior: StdIo,
@@ -47,7 +47,7 @@ pub const ChildProcess = struct {
pub cwd: ?[]const u8,
err_pipe: if (is_windows) void else [2]i32,
- llnode: if (is_windows) void else LinkedList(&ChildProcess).Node,
+ llnode: if (is_windows) void else LinkedList(*ChildProcess).Node,
pub const SpawnError = error{
ProcessFdQuotaExceeded,
@@ -84,7 +84,7 @@ pub const ChildProcess = struct {
/// First argument in argv is the executable.
/// On success must call deinit.
- pub fn init(argv: []const []const u8, allocator: &mem.Allocator) !&ChildProcess {
+ pub fn init(argv: []const []const u8, allocator: *mem.Allocator) !*ChildProcess {
const child = try allocator.create(ChildProcess);
errdefer allocator.destroy(child);
@@ -114,14 +114,14 @@ pub const ChildProcess = struct {
return child;
}
- pub fn setUserName(self: &ChildProcess, name: []const u8) !void {
+ pub fn setUserName(self: *ChildProcess, name: []const u8) !void {
const user_info = try os.getUserInfo(name);
self.uid = user_info.uid;
self.gid = user_info.gid;
}
/// On success must call `kill` or `wait`.
- pub fn spawn(self: &ChildProcess) !void {
+ pub fn spawn(self: *ChildProcess) !void {
if (is_windows) {
return self.spawnWindows();
} else {
@@ -129,13 +129,13 @@ pub const ChildProcess = struct {
}
}
- pub fn spawnAndWait(self: &ChildProcess) !Term {
+ pub fn spawnAndWait(self: *ChildProcess) !Term {
try self.spawn();
return self.wait();
}
/// Forcibly terminates child process and then cleans up all resources.
- pub fn kill(self: &ChildProcess) !Term {
+ pub fn kill(self: *ChildProcess) !Term {
if (is_windows) {
return self.killWindows(1);
} else {
@@ -143,7 +143,7 @@ pub const ChildProcess = struct {
}
}
- pub fn killWindows(self: &ChildProcess, exit_code: windows.UINT) !Term {
+ pub fn killWindows(self: *ChildProcess, exit_code: windows.UINT) !Term {
if (self.term) |term| {
self.cleanupStreams();
return term;
@@ -159,7 +159,7 @@ pub const ChildProcess = struct {
return ??self.term;
}
- pub fn killPosix(self: &ChildProcess) !Term {
+ pub fn killPosix(self: *ChildProcess) !Term {
if (self.term) |term| {
self.cleanupStreams();
return term;
@@ -179,7 +179,7 @@ pub const ChildProcess = struct {
}
/// Blocks until child process terminates and then cleans up all resources.
- pub fn wait(self: &ChildProcess) !Term {
+ pub fn wait(self: *ChildProcess) !Term {
if (is_windows) {
return self.waitWindows();
} else {
@@ -195,7 +195,7 @@ pub const ChildProcess = struct {
/// Spawns a child process, waits for it, collecting stdout and stderr, and then returns.
/// If it succeeds, the caller owns result.stdout and result.stderr memory.
- pub fn exec(allocator: &mem.Allocator, argv: []const []const u8, cwd: ?[]const u8, env_map: ?&const BufMap, max_output_size: usize) !ExecResult {
+ pub fn exec(allocator: *mem.Allocator, argv: []const []const u8, cwd: ?[]const u8, env_map: ?*const BufMap, max_output_size: usize) !ExecResult {
const child = try ChildProcess.init(argv, allocator);
defer child.deinit();
@@ -225,7 +225,7 @@ pub const ChildProcess = struct {
};
}
- fn waitWindows(self: &ChildProcess) !Term {
+ fn waitWindows(self: *ChildProcess) !Term {
if (self.term) |term| {
self.cleanupStreams();
return term;
@@ -235,7 +235,7 @@ pub const ChildProcess = struct {
return ??self.term;
}
- fn waitPosix(self: &ChildProcess) !Term {
+ fn waitPosix(self: *ChildProcess) !Term {
if (self.term) |term| {
self.cleanupStreams();
return term;
@@ -245,11 +245,11 @@ pub const ChildProcess = struct {
return ??self.term;
}
- pub fn deinit(self: &ChildProcess) void {
+ pub fn deinit(self: *ChildProcess) void {
self.allocator.destroy(self);
}
- fn waitUnwrappedWindows(self: &ChildProcess) !void {
+ fn waitUnwrappedWindows(self: *ChildProcess) !void {
const result = os.windowsWaitSingle(self.handle, windows.INFINITE);
self.term = (SpawnError!Term)(x: {
@@ -267,7 +267,7 @@ pub const ChildProcess = struct {
return result;
}
- fn waitUnwrapped(self: &ChildProcess) void {
+ fn waitUnwrapped(self: *ChildProcess) void {
var status: i32 = undefined;
while (true) {
const err = posix.getErrno(posix.waitpid(self.pid, &status, 0));
@@ -283,11 +283,11 @@ pub const ChildProcess = struct {
}
}
- fn handleWaitResult(self: &ChildProcess, status: i32) void {
+ fn handleWaitResult(self: *ChildProcess, status: i32) void {
self.term = self.cleanupAfterWait(status);
}
- fn cleanupStreams(self: &ChildProcess) void {
+ fn cleanupStreams(self: *ChildProcess) void {
if (self.stdin) |*stdin| {
stdin.close();
self.stdin = null;
@@ -302,7 +302,7 @@ pub const ChildProcess = struct {
}
}
- fn cleanupAfterWait(self: &ChildProcess, status: i32) !Term {
+ fn cleanupAfterWait(self: *ChildProcess, status: i32) !Term {
defer {
os.close(self.err_pipe[0]);
os.close(self.err_pipe[1]);
@@ -335,7 +335,7 @@ pub const ChildProcess = struct {
Term{ .Unknown = status };
}
- fn spawnPosix(self: &ChildProcess) !void {
+ fn spawnPosix(self: *ChildProcess) !void {
const stdin_pipe = if (self.stdin_behavior == StdIo.Pipe) try makePipe() else undefined;
errdefer if (self.stdin_behavior == StdIo.Pipe) {
destroyPipe(stdin_pipe);
@@ -432,7 +432,7 @@ pub const ChildProcess = struct {
self.pid = pid;
self.err_pipe = err_pipe;
- self.llnode = LinkedList(&ChildProcess).Node.init(self);
+ self.llnode = LinkedList(*ChildProcess).Node.init(self);
self.term = null;
if (self.stdin_behavior == StdIo.Pipe) {
@@ -446,7 +446,7 @@ pub const ChildProcess = struct {
}
}
- fn spawnWindows(self: &ChildProcess) !void {
+ fn spawnWindows(self: *ChildProcess) !void {
const saAttr = windows.SECURITY_ATTRIBUTES{
.nLength = @sizeOf(windows.SECURITY_ATTRIBUTES),
.bInheritHandle = windows.TRUE,
@@ -639,8 +639,8 @@ pub const ChildProcess = struct {
}
};
-fn windowsCreateProcess(app_name: &u8, cmd_line: &u8, envp_ptr: ?&u8, cwd_ptr: ?&u8, lpStartupInfo: &windows.STARTUPINFOA, lpProcessInformation: &windows.PROCESS_INFORMATION) !void {
- if (windows.CreateProcessA(app_name, cmd_line, null, null, windows.TRUE, 0, @ptrCast(?&c_void, envp_ptr), cwd_ptr, lpStartupInfo, lpProcessInformation) == 0) {
+fn windowsCreateProcess(app_name: *u8, cmd_line: *u8, envp_ptr: ?*u8, cwd_ptr: ?*u8, lpStartupInfo: *windows.STARTUPINFOA, lpProcessInformation: *windows.PROCESS_INFORMATION) !void {
+ if (windows.CreateProcessA(app_name, cmd_line, null, null, windows.TRUE, 0, @ptrCast(?*c_void, envp_ptr), cwd_ptr, lpStartupInfo, lpProcessInformation) == 0) {
const err = windows.GetLastError();
return switch (err) {
windows.ERROR.FILE_NOT_FOUND, windows.ERROR.PATH_NOT_FOUND => error.FileNotFound,
@@ -653,7 +653,7 @@ fn windowsCreateProcess(app_name: &u8, cmd_line: &u8, envp_ptr: ?&u8, cwd_ptr: ?
/// Caller must dealloc.
/// Guarantees a null byte at result[result.len].
-fn windowsCreateCommandLine(allocator: &mem.Allocator, argv: []const []const u8) ![]u8 {
+fn windowsCreateCommandLine(allocator: *mem.Allocator, argv: []const []const u8) ![]u8 {
var buf = try Buffer.initSize(allocator, 0);
defer buf.deinit();
@@ -698,7 +698,7 @@ fn windowsDestroyPipe(rd: ?windows.HANDLE, wr: ?windows.HANDLE) void {
// a namespace field lookup
const SECURITY_ATTRIBUTES = windows.SECURITY_ATTRIBUTES;
-fn windowsMakePipe(rd: &windows.HANDLE, wr: &windows.HANDLE, sattr: &const SECURITY_ATTRIBUTES) !void {
+fn windowsMakePipe(rd: *windows.HANDLE, wr: *windows.HANDLE, sattr: *const SECURITY_ATTRIBUTES) !void {
if (windows.CreatePipe(rd, wr, sattr, 0) == 0) {
const err = windows.GetLastError();
return switch (err) {
@@ -716,7 +716,7 @@ fn windowsSetHandleInfo(h: windows.HANDLE, mask: windows.DWORD, flags: windows.D
}
}
-fn windowsMakePipeIn(rd: &?windows.HANDLE, wr: &?windows.HANDLE, sattr: &const SECURITY_ATTRIBUTES) !void {
+fn windowsMakePipeIn(rd: *?windows.HANDLE, wr: *?windows.HANDLE, sattr: *const SECURITY_ATTRIBUTES) !void {
var rd_h: windows.HANDLE = undefined;
var wr_h: windows.HANDLE = undefined;
try windowsMakePipe(&rd_h, &wr_h, sattr);
@@ -726,7 +726,7 @@ fn windowsMakePipeIn(rd: &?windows.HANDLE, wr: &?windows.HANDLE, sattr: &const S
wr.* = wr_h;
}
-fn windowsMakePipeOut(rd: &?windows.HANDLE, wr: &?windows.HANDLE, sattr: &const SECURITY_ATTRIBUTES) !void {
+fn windowsMakePipeOut(rd: *?windows.HANDLE, wr: *?windows.HANDLE, sattr: *const SECURITY_ATTRIBUTES) !void {
var rd_h: windows.HANDLE = undefined;
var wr_h: windows.HANDLE = undefined;
try windowsMakePipe(&rd_h, &wr_h, sattr);
@@ -748,7 +748,7 @@ fn makePipe() ![2]i32 {
return fds;
}
-fn destroyPipe(pipe: &const [2]i32) void {
+fn destroyPipe(pipe: *const [2]i32) void {
os.close((pipe.*)[0]);
os.close((pipe.*)[1]);
}
diff --git a/std/os/darwin.zig b/std/os/darwin.zig
index a3fc230ac51c..77e8b6bb6abf 100644
--- a/std/os/darwin.zig
+++ b/std/os/darwin.zig
@@ -309,7 +309,7 @@ pub fn isatty(fd: i32) bool {
return c.isatty(fd) != 0;
}
-pub fn fstat(fd: i32, buf: &c.Stat) usize {
+pub fn fstat(fd: i32, buf: *c.Stat) usize {
return errnoWrap(c.@"fstat$INODE64"(fd, buf));
}
@@ -317,7 +317,7 @@ pub fn lseek(fd: i32, offset: isize, whence: c_int) usize {
return errnoWrap(c.lseek(fd, offset, whence));
}
-pub fn open(path: &const u8, flags: u32, mode: usize) usize {
+pub fn open(path: *const u8, flags: u32, mode: usize) usize {
return errnoWrap(c.open(path, @bitCast(c_int, flags), mode));
}
@@ -325,79 +325,79 @@ pub fn raise(sig: i32) usize {
return errnoWrap(c.raise(sig));
}
-pub fn read(fd: i32, buf: &u8, nbyte: usize) usize {
- return errnoWrap(c.read(fd, @ptrCast(&c_void, buf), nbyte));
+pub fn read(fd: i32, buf: *u8, nbyte: usize) usize {
+ return errnoWrap(c.read(fd, @ptrCast(*c_void, buf), nbyte));
}
-pub fn stat(noalias path: &const u8, noalias buf: &stat) usize {
+pub fn stat(noalias path: *const u8, noalias buf: *stat) usize {
return errnoWrap(c.stat(path, buf));
}
-pub fn write(fd: i32, buf: &const u8, nbyte: usize) usize {
- return errnoWrap(c.write(fd, @ptrCast(&const c_void, buf), nbyte));
+pub fn write(fd: i32, buf: *const u8, nbyte: usize) usize {
+ return errnoWrap(c.write(fd, @ptrCast(*const c_void, buf), nbyte));
}
-pub fn mmap(address: ?&u8, length: usize, prot: usize, flags: u32, fd: i32, offset: isize) usize {
- const ptr_result = c.mmap(@ptrCast(&c_void, address), length, @bitCast(c_int, c_uint(prot)), @bitCast(c_int, c_uint(flags)), fd, offset);
+pub fn mmap(address: ?*u8, length: usize, prot: usize, flags: u32, fd: i32, offset: isize) usize {
+ const ptr_result = c.mmap(@ptrCast(*c_void, address), length, @bitCast(c_int, c_uint(prot)), @bitCast(c_int, c_uint(flags)), fd, offset);
const isize_result = @bitCast(isize, @ptrToInt(ptr_result));
return errnoWrap(isize_result);
}
pub fn munmap(address: usize, length: usize) usize {
- return errnoWrap(c.munmap(@intToPtr(&c_void, address), length));
+ return errnoWrap(c.munmap(@intToPtr(*c_void, address), length));
}
-pub fn unlink(path: &const u8) usize {
+pub fn unlink(path: *const u8) usize {
return errnoWrap(c.unlink(path));
}
-pub fn getcwd(buf: &u8, size: usize) usize {
+pub fn getcwd(buf: *u8, size: usize) usize {
return if (c.getcwd(buf, size) == null) @bitCast(usize, -isize(c._errno().*)) else 0;
}
-pub fn waitpid(pid: i32, status: &i32, options: u32) usize {
+pub fn waitpid(pid: i32, status: *i32, options: u32) usize {
comptime assert(i32.bit_count == c_int.bit_count);
- return errnoWrap(c.waitpid(pid, @ptrCast(&c_int, status), @bitCast(c_int, options)));
+ return errnoWrap(c.waitpid(pid, @ptrCast(*c_int, status), @bitCast(c_int, options)));
}
pub fn fork() usize {
return errnoWrap(c.fork());
}
-pub fn access(path: &const u8, mode: u32) usize {
+pub fn access(path: *const u8, mode: u32) usize {
return errnoWrap(c.access(path, mode));
}
-pub fn pipe(fds: &[2]i32) usize {
+pub fn pipe(fds: *[2]i32) usize {
comptime assert(i32.bit_count == c_int.bit_count);
- return errnoWrap(c.pipe(@ptrCast(&c_int, fds)));
+ return errnoWrap(c.pipe(@ptrCast(*c_int, fds)));
}
-pub fn getdirentries64(fd: i32, buf_ptr: &u8, buf_len: usize, basep: &i64) usize {
+pub fn getdirentries64(fd: i32, buf_ptr: *u8, buf_len: usize, basep: *i64) usize {
return errnoWrap(@bitCast(isize, c.__getdirentries64(fd, buf_ptr, buf_len, basep)));
}
-pub fn mkdir(path: &const u8, mode: u32) usize {
+pub fn mkdir(path: *const u8, mode: u32) usize {
return errnoWrap(c.mkdir(path, mode));
}
-pub fn symlink(existing: &const u8, new: &const u8) usize {
+pub fn symlink(existing: *const u8, new: *const u8) usize {
return errnoWrap(c.symlink(existing, new));
}
-pub fn rename(old: &const u8, new: &const u8) usize {
+pub fn rename(old: *const u8, new: *const u8) usize {
return errnoWrap(c.rename(old, new));
}
-pub fn rmdir(path: &const u8) usize {
+pub fn rmdir(path: *const u8) usize {
return errnoWrap(c.rmdir(path));
}
-pub fn chdir(path: &const u8) usize {
+pub fn chdir(path: *const u8) usize {
return errnoWrap(c.chdir(path));
}
-pub fn execve(path: &const u8, argv: &const ?&const u8, envp: &const ?&const u8) usize {
+pub fn execve(path: *const u8, argv: *const ?*const u8, envp: *const ?*const u8) usize {
return errnoWrap(c.execve(path, argv, envp));
}
@@ -405,19 +405,19 @@ pub fn dup2(old: i32, new: i32) usize {
return errnoWrap(c.dup2(old, new));
}
-pub fn readlink(noalias path: &const u8, noalias buf_ptr: &u8, buf_len: usize) usize {
+pub fn readlink(noalias path: *const u8, noalias buf_ptr: *u8, buf_len: usize) usize {
return errnoWrap(c.readlink(path, buf_ptr, buf_len));
}
-pub fn gettimeofday(tv: ?&timeval, tz: ?&timezone) usize {
+pub fn gettimeofday(tv: ?*timeval, tz: ?*timezone) usize {
return errnoWrap(c.gettimeofday(tv, tz));
}
-pub fn nanosleep(req: &const timespec, rem: ?×pec) usize {
+pub fn nanosleep(req: *const timespec, rem: ?*timespec) usize {
return errnoWrap(c.nanosleep(req, rem));
}
-pub fn realpath(noalias filename: &const u8, noalias resolved_name: &u8) usize {
+pub fn realpath(noalias filename: *const u8, noalias resolved_name: *u8) usize {
return if (c.realpath(filename, resolved_name) == null) @bitCast(usize, -isize(c._errno().*)) else 0;
}
@@ -429,11 +429,11 @@ pub fn setregid(rgid: u32, egid: u32) usize {
return errnoWrap(c.setregid(rgid, egid));
}
-pub fn sigprocmask(flags: u32, noalias set: &const sigset_t, noalias oldset: ?&sigset_t) usize {
+pub fn sigprocmask(flags: u32, noalias set: *const sigset_t, noalias oldset: ?*sigset_t) usize {
return errnoWrap(c.sigprocmask(@bitCast(c_int, flags), set, oldset));
}
-pub fn sigaction(sig: u5, noalias act: &const Sigaction, noalias oact: ?&Sigaction) usize {
+pub fn sigaction(sig: u5, noalias act: *const Sigaction, noalias oact: ?*Sigaction) usize {
assert(sig != SIGKILL);
assert(sig != SIGSTOP);
var cact = c.Sigaction{
@@ -442,7 +442,7 @@ pub fn sigaction(sig: u5, noalias act: &const Sigaction, noalias oact: ?&Sigacti
.sa_mask = act.mask,
};
var coact: c.Sigaction = undefined;
- const result = errnoWrap(c.sigaction(sig, &cact, &coact));
+ const result = errnoWrap(c.sigaction(sig, *cact, *coact));
if (result != 0) {
return result;
}
@@ -473,7 +473,7 @@ pub const Sigaction = struct {
flags: u32,
};
-pub fn sigaddset(set: &sigset_t, signo: u5) void {
+pub fn sigaddset(set: *sigset_t, signo: u5) void {
set.* |= u32(1) << (signo - 1);
}
diff --git a/std/os/file.zig b/std/os/file.zig
index c07e2c5c8bd0..d943da30cad2 100644
--- a/std/os/file.zig
+++ b/std/os/file.zig
@@ -19,7 +19,7 @@ pub const File = struct {
/// `path` needs to be copied in memory to add a null terminating byte, hence the allocator.
/// Call close to clean up.
- pub fn openRead(allocator: &mem.Allocator, path: []const u8) OpenError!File {
+ pub fn openRead(allocator: *mem.Allocator, path: []const u8) OpenError!File {
if (is_posix) {
const flags = posix.O_LARGEFILE | posix.O_RDONLY;
const fd = try os.posixOpen(allocator, path, flags, 0);
@@ -40,7 +40,7 @@ pub const File = struct {
}
/// Calls `openWriteMode` with os.default_file_mode for the mode.
- pub fn openWrite(allocator: &mem.Allocator, path: []const u8) OpenError!File {
+ pub fn openWrite(allocator: *mem.Allocator, path: []const u8) OpenError!File {
return openWriteMode(allocator, path, os.default_file_mode);
}
@@ -48,7 +48,7 @@ pub const File = struct {
/// If a file already exists in the destination it will be truncated.
/// `path` needs to be copied in memory to add a null terminating byte, hence the allocator.
/// Call close to clean up.
- pub fn openWriteMode(allocator: &mem.Allocator, path: []const u8, file_mode: os.FileMode) OpenError!File {
+ pub fn openWriteMode(allocator: *mem.Allocator, path: []const u8, file_mode: os.FileMode) OpenError!File {
if (is_posix) {
const flags = posix.O_LARGEFILE | posix.O_WRONLY | posix.O_CREAT | posix.O_CLOEXEC | posix.O_TRUNC;
const fd = try os.posixOpen(allocator, path, flags, file_mode);
@@ -72,7 +72,7 @@ pub const File = struct {
/// If a file already exists in the destination this returns OpenError.PathAlreadyExists
/// `path` needs to be copied in memory to add a null terminating byte, hence the allocator.
/// Call close to clean up.
- pub fn openWriteNoClobber(allocator: &mem.Allocator, path: []const u8, file_mode: os.FileMode) OpenError!File {
+ pub fn openWriteNoClobber(allocator: *mem.Allocator, path: []const u8, file_mode: os.FileMode) OpenError!File {
if (is_posix) {
const flags = posix.O_LARGEFILE | posix.O_WRONLY | posix.O_CREAT | posix.O_CLOEXEC | posix.O_EXCL;
const fd = try os.posixOpen(allocator, path, flags, file_mode);
@@ -96,7 +96,7 @@ pub const File = struct {
return File{ .handle = handle };
}
- pub fn access(allocator: &mem.Allocator, path: []const u8, file_mode: os.FileMode) !bool {
+ pub fn access(allocator: *mem.Allocator, path: []const u8, file_mode: os.FileMode) !bool {
const path_with_null = try std.cstr.addNullByte(allocator, path);
defer allocator.free(path_with_null);
@@ -140,17 +140,17 @@ pub const File = struct {
/// Upon success, the stream is in an uninitialized state. To continue using it,
/// you must use the open() function.
- pub fn close(self: &File) void {
+ pub fn close(self: *File) void {
os.close(self.handle);
self.handle = undefined;
}
/// Calls `os.isTty` on `self.handle`.
- pub fn isTty(self: &File) bool {
+ pub fn isTty(self: *File) bool {
return os.isTty(self.handle);
}
- pub fn seekForward(self: &File, amount: isize) !void {
+ pub fn seekForward(self: *File, amount: isize) !void {
switch (builtin.os) {
Os.linux, Os.macosx, Os.ios => {
const result = posix.lseek(self.handle, amount, posix.SEEK_CUR);
@@ -179,7 +179,7 @@ pub const File = struct {
}
}
- pub fn seekTo(self: &File, pos: usize) !void {
+ pub fn seekTo(self: *File, pos: usize) !void {
switch (builtin.os) {
Os.linux, Os.macosx, Os.ios => {
const ipos = try math.cast(isize, pos);
@@ -210,7 +210,7 @@ pub const File = struct {
}
}
- pub fn getPos(self: &File) !usize {
+ pub fn getPos(self: *File) !usize {
switch (builtin.os) {
Os.linux, Os.macosx, Os.ios => {
const result = posix.lseek(self.handle, 0, posix.SEEK_CUR);
@@ -229,7 +229,7 @@ pub const File = struct {
},
Os.windows => {
var pos: windows.LARGE_INTEGER = undefined;
- if (windows.SetFilePointerEx(self.handle, 0, &pos, windows.FILE_CURRENT) == 0) {
+ if (windows.SetFilePointerEx(self.handle, 0, *pos, windows.FILE_CURRENT) == 0) {
const err = windows.GetLastError();
return switch (err) {
windows.ERROR.INVALID_PARAMETER => error.BadFd,
@@ -250,7 +250,7 @@ pub const File = struct {
}
}
- pub fn getEndPos(self: &File) !usize {
+ pub fn getEndPos(self: *File) !usize {
if (is_posix) {
var stat: posix.Stat = undefined;
const err = posix.getErrno(posix.fstat(self.handle, &stat));
@@ -285,7 +285,7 @@ pub const File = struct {
Unexpected,
};
- fn mode(self: &File) ModeError!os.FileMode {
+ fn mode(self: *File) ModeError!os.FileMode {
if (is_posix) {
var stat: posix.Stat = undefined;
const err = posix.getErrno(posix.fstat(self.handle, &stat));
@@ -309,7 +309,7 @@ pub const File = struct {
pub const ReadError = error{};
- pub fn read(self: &File, buffer: []u8) !usize {
+ pub fn read(self: *File, buffer: []u8) !usize {
if (is_posix) {
var index: usize = 0;
while (index < buffer.len) {
@@ -334,7 +334,7 @@ pub const File = struct {
while (index < buffer.len) {
const want_read_count = windows.DWORD(math.min(windows.DWORD(@maxValue(windows.DWORD)), buffer.len - index));
var amt_read: windows.DWORD = undefined;
- if (windows.ReadFile(self.handle, @ptrCast(&c_void, &buffer[index]), want_read_count, &amt_read, null) == 0) {
+ if (windows.ReadFile(self.handle, @ptrCast(*c_void, &buffer[index]), want_read_count, &amt_read, null) == 0) {
const err = windows.GetLastError();
return switch (err) {
windows.ERROR.OPERATION_ABORTED => continue,
@@ -353,7 +353,7 @@ pub const File = struct {
pub const WriteError = os.WindowsWriteError || os.PosixWriteError;
- fn write(self: &File, bytes: []const u8) WriteError!void {
+ fn write(self: *File, bytes: []const u8) WriteError!void {
if (is_posix) {
try os.posixWrite(self.handle, bytes);
} else if (is_windows) {
diff --git a/std/os/get_user_id.zig b/std/os/get_user_id.zig
index 2a15e1d49507..c0c1b1cc4b7a 100644
--- a/std/os/get_user_id.zig
+++ b/std/os/get_user_id.zig
@@ -77,8 +77,8 @@ pub fn posixGetUserInfo(name: []const u8) !UserInfo {
'0'...'9' => byte - '0',
else => return error.CorruptPasswordFile,
};
- if (@mulWithOverflow(u32, uid, 10, &uid)) return error.CorruptPasswordFile;
- if (@addWithOverflow(u32, uid, digit, &uid)) return error.CorruptPasswordFile;
+ if (@mulWithOverflow(u32, uid, 10, *uid)) return error.CorruptPasswordFile;
+ if (@addWithOverflow(u32, uid, digit, *uid)) return error.CorruptPasswordFile;
},
},
State.ReadGroupId => switch (byte) {
@@ -93,8 +93,8 @@ pub fn posixGetUserInfo(name: []const u8) !UserInfo {
'0'...'9' => byte - '0',
else => return error.CorruptPasswordFile,
};
- if (@mulWithOverflow(u32, gid, 10, &gid)) return error.CorruptPasswordFile;
- if (@addWithOverflow(u32, gid, digit, &gid)) return error.CorruptPasswordFile;
+ if (@mulWithOverflow(u32, gid, 10, *gid)) return error.CorruptPasswordFile;
+ if (@addWithOverflow(u32, gid, digit, *gid)) return error.CorruptPasswordFile;
},
},
}
diff --git a/std/os/index.zig b/std/os/index.zig
index 70e654bcd997..ff638c670b1a 100644
--- a/std/os/index.zig
+++ b/std/os/index.zig
@@ -321,14 +321,14 @@ pub const PosixOpenError = error{
/// ::file_path needs to be copied in memory to add a null terminating byte.
/// Calls POSIX open, keeps trying if it gets interrupted, and translates
/// the return value into zig errors.
-pub fn posixOpen(allocator: &Allocator, file_path: []const u8, flags: u32, perm: usize) PosixOpenError!i32 {
+pub fn posixOpen(allocator: *Allocator, file_path: []const u8, flags: u32, perm: usize) PosixOpenError!i32 {
const path_with_null = try cstr.addNullByte(allocator, file_path);
defer allocator.free(path_with_null);
return posixOpenC(path_with_null.ptr, flags, perm);
}
-pub fn posixOpenC(file_path: &const u8, flags: u32, perm: usize) !i32 {
+pub fn posixOpenC(file_path: *const u8, flags: u32, perm: usize) !i32 {
while (true) {
const result = posix.open(file_path, flags, perm);
const err = posix.getErrno(result);
@@ -374,10 +374,10 @@ pub fn posixDup2(old_fd: i32, new_fd: i32) !void {
}
}
-pub fn createNullDelimitedEnvMap(allocator: &Allocator, env_map: &const BufMap) ![]?&u8 {
+pub fn createNullDelimitedEnvMap(allocator: *Allocator, env_map: *const BufMap) ![]?*u8 {
const envp_count = env_map.count();
- const envp_buf = try allocator.alloc(?&u8, envp_count + 1);
- mem.set(?&u8, envp_buf, null);
+ const envp_buf = try allocator.alloc(?*u8, envp_count + 1);
+ mem.set(?*u8, envp_buf, null);
errdefer freeNullDelimitedEnvMap(allocator, envp_buf);
{
var it = env_map.iterator();
@@ -397,7 +397,7 @@ pub fn createNullDelimitedEnvMap(allocator: &Allocator, env_map: &const BufMap)
return envp_buf;
}
-pub fn freeNullDelimitedEnvMap(allocator: &Allocator, envp_buf: []?&u8) void {
+pub fn freeNullDelimitedEnvMap(allocator: *Allocator, envp_buf: []?*u8) void {
for (envp_buf) |env| {
const env_buf = if (env) |ptr| ptr[0 .. cstr.len(ptr) + 1] else break;
allocator.free(env_buf);
@@ -410,9 +410,9 @@ pub fn freeNullDelimitedEnvMap(allocator: &Allocator, envp_buf: []?&u8) void {
/// pointers after the args and after the environment variables.
/// `argv[0]` is the executable path.
/// This function also uses the PATH environment variable to get the full path to the executable.
-pub fn posixExecve(argv: []const []const u8, env_map: &const BufMap, allocator: &Allocator) !void {
- const argv_buf = try allocator.alloc(?&u8, argv.len + 1);
- mem.set(?&u8, argv_buf, null);
+pub fn posixExecve(argv: []const []const u8, env_map: *const BufMap, allocator: *Allocator) !void {
+ const argv_buf = try allocator.alloc(?*u8, argv.len + 1);
+ mem.set(?*u8, argv_buf, null);
defer {
for (argv_buf) |arg| {
const arg_buf = if (arg) |ptr| cstr.toSlice(ptr) else break;
@@ -494,10 +494,10 @@ fn posixExecveErrnoToErr(err: usize) PosixExecveError {
}
pub var linux_aux_raw = []usize{0} ** 38;
-pub var posix_environ_raw: []&u8 = undefined;
+pub var posix_environ_raw: []*u8 = undefined;
/// Caller must free result when done.
-pub fn getEnvMap(allocator: &Allocator) !BufMap {
+pub fn getEnvMap(allocator: *Allocator) !BufMap {
var result = BufMap.init(allocator);
errdefer result.deinit();
@@ -557,7 +557,7 @@ pub fn getEnvPosix(key: []const u8) ?[]const u8 {
}
/// Caller must free returned memory.
-pub fn getEnvVarOwned(allocator: &mem.Allocator, key: []const u8) ![]u8 {
+pub fn getEnvVarOwned(allocator: *mem.Allocator, key: []const u8) ![]u8 {
if (is_windows) {
const key_with_null = try cstr.addNullByte(allocator, key);
defer allocator.free(key_with_null);
@@ -591,7 +591,7 @@ pub fn getEnvVarOwned(allocator: &mem.Allocator, key: []const u8) ![]u8 {
}
/// Caller must free the returned memory.
-pub fn getCwd(allocator: &Allocator) ![]u8 {
+pub fn getCwd(allocator: *Allocator) ![]u8 {
switch (builtin.os) {
Os.windows => {
var buf = try allocator.alloc(u8, 256);
@@ -640,7 +640,7 @@ test "os.getCwd" {
pub const SymLinkError = PosixSymLinkError || WindowsSymLinkError;
-pub fn symLink(allocator: &Allocator, existing_path: []const u8, new_path: []const u8) SymLinkError!void {
+pub fn symLink(allocator: *Allocator, existing_path: []const u8, new_path: []const u8) SymLinkError!void {
if (is_windows) {
return symLinkWindows(allocator, existing_path, new_path);
} else {
@@ -653,7 +653,7 @@ pub const WindowsSymLinkError = error{
Unexpected,
};
-pub fn symLinkWindows(allocator: &Allocator, existing_path: []const u8, new_path: []const u8) WindowsSymLinkError!void {
+pub fn symLinkWindows(allocator: *Allocator, existing_path: []const u8, new_path: []const u8) WindowsSymLinkError!void {
const existing_with_null = try cstr.addNullByte(allocator, existing_path);
defer allocator.free(existing_with_null);
const new_with_null = try cstr.addNullByte(allocator, new_path);
@@ -683,7 +683,7 @@ pub const PosixSymLinkError = error{
Unexpected,
};
-pub fn symLinkPosix(allocator: &Allocator, existing_path: []const u8, new_path: []const u8) PosixSymLinkError!void {
+pub fn symLinkPosix(allocator: *Allocator, existing_path: []const u8, new_path: []const u8) PosixSymLinkError!void {
const full_buf = try allocator.alloc(u8, existing_path.len + new_path.len + 2);
defer allocator.free(full_buf);
@@ -718,7 +718,7 @@ pub fn symLinkPosix(allocator: &Allocator, existing_path: []const u8, new_path:
// here we replace the standard +/ with -_ so that it can be used in a file name
const b64_fs_encoder = base64.Base64Encoder.init("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_", base64.standard_pad_char);
-pub fn atomicSymLink(allocator: &Allocator, existing_path: []const u8, new_path: []const u8) !void {
+pub fn atomicSymLink(allocator: *Allocator, existing_path: []const u8, new_path: []const u8) !void {
if (symLink(allocator, existing_path, new_path)) {
return;
} else |err| switch (err) {
@@ -746,7 +746,7 @@ pub fn atomicSymLink(allocator: &Allocator, existing_path: []const u8, new_path:
}
}
-pub fn deleteFile(allocator: &Allocator, file_path: []const u8) !void {
+pub fn deleteFile(allocator: *Allocator, file_path: []const u8) !void {
if (builtin.os == Os.windows) {
return deleteFileWindows(allocator, file_path);
} else {
@@ -754,7 +754,7 @@ pub fn deleteFile(allocator: &Allocator, file_path: []const u8) !void {
}
}
-pub fn deleteFileWindows(allocator: &Allocator, file_path: []const u8) !void {
+pub fn deleteFileWindows(allocator: *Allocator, file_path: []const u8) !void {
const buf = try allocator.alloc(u8, file_path.len + 1);
defer allocator.free(buf);
@@ -772,7 +772,7 @@ pub fn deleteFileWindows(allocator: &Allocator, file_path: []const u8) !void {
}
}
-pub fn deleteFilePosix(allocator: &Allocator, file_path: []const u8) !void {
+pub fn deleteFilePosix(allocator: *Allocator, file_path: []const u8) !void {
const buf = try allocator.alloc(u8, file_path.len + 1);
defer allocator.free(buf);
@@ -803,7 +803,7 @@ pub fn deleteFilePosix(allocator: &Allocator, file_path: []const u8) !void {
/// there is a possibility of power loss or application termination leaving temporary files present
/// in the same directory as dest_path.
/// Destination file will have the same mode as the source file.
-pub fn copyFile(allocator: &Allocator, source_path: []const u8, dest_path: []const u8) !void {
+pub fn copyFile(allocator: *Allocator, source_path: []const u8, dest_path: []const u8) !void {
var in_file = try os.File.openRead(allocator, source_path);
defer in_file.close();
@@ -825,7 +825,7 @@ pub fn copyFile(allocator: &Allocator, source_path: []const u8, dest_path: []con
/// Guaranteed to be atomic. However until https://patchwork.kernel.org/patch/9636735/ is
/// merged and readily available,
/// there is a possibility of power loss or application termination leaving temporary files present
-pub fn copyFileMode(allocator: &Allocator, source_path: []const u8, dest_path: []const u8, mode: FileMode) !void {
+pub fn copyFileMode(allocator: *Allocator, source_path: []const u8, dest_path: []const u8, mode: FileMode) !void {
var in_file = try os.File.openRead(allocator, source_path);
defer in_file.close();
@@ -843,7 +843,7 @@ pub fn copyFileMode(allocator: &Allocator, source_path: []const u8, dest_path: [
}
pub const AtomicFile = struct {
- allocator: &Allocator,
+ allocator: *Allocator,
file: os.File,
tmp_path: []u8,
dest_path: []const u8,
@@ -851,7 +851,7 @@ pub const AtomicFile = struct {
/// dest_path must remain valid for the lifetime of AtomicFile
/// call finish to atomically replace dest_path with contents
- pub fn init(allocator: &Allocator, dest_path: []const u8, mode: FileMode) !AtomicFile {
+ pub fn init(allocator: *Allocator, dest_path: []const u8, mode: FileMode) !AtomicFile {
const dirname = os.path.dirname(dest_path);
var rand_buf: [12]u8 = undefined;
@@ -888,7 +888,7 @@ pub const AtomicFile = struct {
}
/// always call deinit, even after successful finish()
- pub fn deinit(self: &AtomicFile) void {
+ pub fn deinit(self: *AtomicFile) void {
if (!self.finished) {
self.file.close();
deleteFile(self.allocator, self.tmp_path) catch {};
@@ -897,7 +897,7 @@ pub const AtomicFile = struct {
}
}
- pub fn finish(self: &AtomicFile) !void {
+ pub fn finish(self: *AtomicFile) !void {
assert(!self.finished);
self.file.close();
try rename(self.allocator, self.tmp_path, self.dest_path);
@@ -906,7 +906,7 @@ pub const AtomicFile = struct {
}
};
-pub fn rename(allocator: &Allocator, old_path: []const u8, new_path: []const u8) !void {
+pub fn rename(allocator: *Allocator, old_path: []const u8, new_path: []const u8) !void {
const full_buf = try allocator.alloc(u8, old_path.len + new_path.len + 2);
defer allocator.free(full_buf);
@@ -951,7 +951,7 @@ pub fn rename(allocator: &Allocator, old_path: []const u8, new_path: []const u8)
}
}
-pub fn makeDir(allocator: &Allocator, dir_path: []const u8) !void {
+pub fn makeDir(allocator: *Allocator, dir_path: []const u8) !void {
if (is_windows) {
return makeDirWindows(allocator, dir_path);
} else {
@@ -959,7 +959,7 @@ pub fn makeDir(allocator: &Allocator, dir_path: []const u8) !void {
}
}
-pub fn makeDirWindows(allocator: &Allocator, dir_path: []const u8) !void {
+pub fn makeDirWindows(allocator: *Allocator, dir_path: []const u8) !void {
const path_buf = try cstr.addNullByte(allocator, dir_path);
defer allocator.free(path_buf);
@@ -973,7 +973,7 @@ pub fn makeDirWindows(allocator: &Allocator, dir_path: []const u8) !void {
}
}
-pub fn makeDirPosix(allocator: &Allocator, dir_path: []const u8) !void {
+pub fn makeDirPosix(allocator: *Allocator, dir_path: []const u8) !void {
const path_buf = try cstr.addNullByte(allocator, dir_path);
defer allocator.free(path_buf);
@@ -999,7 +999,7 @@ pub fn makeDirPosix(allocator: &Allocator, dir_path: []const u8) !void {
/// Calls makeDir recursively to make an entire path. Returns success if the path
/// already exists and is a directory.
-pub fn makePath(allocator: &Allocator, full_path: []const u8) !void {
+pub fn makePath(allocator: *Allocator, full_path: []const u8) !void {
const resolved_path = try path.resolve(allocator, full_path);
defer allocator.free(resolved_path);
@@ -1033,7 +1033,7 @@ pub fn makePath(allocator: &Allocator, full_path: []const u8) !void {
/// Returns ::error.DirNotEmpty if the directory is not empty.
/// To delete a directory recursively, see ::deleteTree
-pub fn deleteDir(allocator: &Allocator, dir_path: []const u8) !void {
+pub fn deleteDir(allocator: *Allocator, dir_path: []const u8) !void {
const path_buf = try allocator.alloc(u8, dir_path.len + 1);
defer allocator.free(path_buf);
@@ -1084,7 +1084,7 @@ const DeleteTreeError = error{
DirNotEmpty,
Unexpected,
};
-pub fn deleteTree(allocator: &Allocator, full_path: []const u8) DeleteTreeError!void {
+pub fn deleteTree(allocator: *Allocator, full_path: []const u8) DeleteTreeError!void {
start_over: while (true) {
var got_access_denied = false;
// First, try deleting the item as a file. This way we don't follow sym links.
@@ -1153,7 +1153,7 @@ pub fn deleteTree(allocator: &Allocator, full_path: []const u8) DeleteTreeError!
pub const Dir = struct {
fd: i32,
darwin_seek: darwin_seek_t,
- allocator: &Allocator,
+ allocator: *Allocator,
buf: []u8,
index: usize,
end_index: usize,
@@ -1180,7 +1180,7 @@ pub const Dir = struct {
};
};
- pub fn open(allocator: &Allocator, dir_path: []const u8) !Dir {
+ pub fn open(allocator: *Allocator, dir_path: []const u8) !Dir {
const fd = switch (builtin.os) {
Os.windows => @compileError("TODO support Dir.open for windows"),
Os.linux => try posixOpen(allocator, dir_path, posix.O_RDONLY | posix.O_DIRECTORY | posix.O_CLOEXEC, 0),
@@ -1206,14 +1206,14 @@ pub const Dir = struct {
};
}
- pub fn close(self: &Dir) void {
+ pub fn close(self: *Dir) void {
self.allocator.free(self.buf);
os.close(self.fd);
}
/// Memory such as file names referenced in this returned entry becomes invalid
/// with subsequent calls to next, as well as when this ::Dir is deinitialized.
- pub fn next(self: &Dir) !?Entry {
+ pub fn next(self: *Dir) !?Entry {
switch (builtin.os) {
Os.linux => return self.nextLinux(),
Os.macosx, Os.ios => return self.nextDarwin(),
@@ -1222,7 +1222,7 @@ pub const Dir = struct {
}
}
- fn nextDarwin(self: &Dir) !?Entry {
+ fn nextDarwin(self: *Dir) !?Entry {
start_over: while (true) {
if (self.index >= self.end_index) {
if (self.buf.len == 0) {
@@ -1248,7 +1248,7 @@ pub const Dir = struct {
break;
}
}
- const darwin_entry = @ptrCast(&align(1) posix.dirent, &self.buf[self.index]);
+ const darwin_entry = @ptrCast(*align(1) posix.dirent, &self.buf[self.index]);
const next_index = self.index + darwin_entry.d_reclen;
self.index = next_index;
@@ -1277,11 +1277,11 @@ pub const Dir = struct {
}
}
- fn nextWindows(self: &Dir) !?Entry {
+ fn nextWindows(self: *Dir) !?Entry {
@compileError("TODO support Dir.next for windows");
}
- fn nextLinux(self: &Dir) !?Entry {
+ fn nextLinux(self: *Dir) !?Entry {
start_over: while (true) {
if (self.index >= self.end_index) {
if (self.buf.len == 0) {
@@ -1307,7 +1307,7 @@ pub const Dir = struct {
break;
}
}
- const linux_entry = @ptrCast(&align(1) posix.dirent, &self.buf[self.index]);
+ const linux_entry = @ptrCast(*align(1) posix.dirent, &self.buf[self.index]);
const next_index = self.index + linux_entry.d_reclen;
self.index = next_index;
@@ -1337,7 +1337,7 @@ pub const Dir = struct {
}
};
-pub fn changeCurDir(allocator: &Allocator, dir_path: []const u8) !void {
+pub fn changeCurDir(allocator: *Allocator, dir_path: []const u8) !void {
const path_buf = try allocator.alloc(u8, dir_path.len + 1);
defer allocator.free(path_buf);
@@ -1361,7 +1361,7 @@ pub fn changeCurDir(allocator: &Allocator, dir_path: []const u8) !void {
}
/// Read value of a symbolic link.
-pub fn readLink(allocator: &Allocator, pathname: []const u8) ![]u8 {
+pub fn readLink(allocator: *Allocator, pathname: []const u8) ![]u8 {
const path_buf = try allocator.alloc(u8, pathname.len + 1);
defer allocator.free(path_buf);
@@ -1468,7 +1468,7 @@ pub const ArgIteratorPosix = struct {
};
}
- pub fn next(self: &ArgIteratorPosix) ?[]const u8 {
+ pub fn next(self: *ArgIteratorPosix) ?[]const u8 {
if (self.index == self.count) return null;
const s = raw[self.index];
@@ -1476,7 +1476,7 @@ pub const ArgIteratorPosix = struct {
return cstr.toSlice(s);
}
- pub fn skip(self: &ArgIteratorPosix) bool {
+ pub fn skip(self: *ArgIteratorPosix) bool {
if (self.index == self.count) return false;
self.index += 1;
@@ -1485,12 +1485,12 @@ pub const ArgIteratorPosix = struct {
/// This is marked as public but actually it's only meant to be used
/// internally by zig's startup code.
- pub var raw: []&u8 = undefined;
+ pub var raw: []*u8 = undefined;
};
pub const ArgIteratorWindows = struct {
index: usize,
- cmd_line: &const u8,
+ cmd_line: *const u8,
in_quote: bool,
quote_count: usize,
seen_quote_count: usize,
@@ -1501,7 +1501,7 @@ pub const ArgIteratorWindows = struct {
return initWithCmdLine(windows.GetCommandLineA());
}
- pub fn initWithCmdLine(cmd_line: &const u8) ArgIteratorWindows {
+ pub fn initWithCmdLine(cmd_line: *const u8) ArgIteratorWindows {
return ArgIteratorWindows{
.index = 0,
.cmd_line = cmd_line,
@@ -1512,7 +1512,7 @@ pub const ArgIteratorWindows = struct {
}
/// You must free the returned memory when done.
- pub fn next(self: &ArgIteratorWindows, allocator: &Allocator) ?(NextError![]u8) {
+ pub fn next(self: *ArgIteratorWindows, allocator: *Allocator) ?(NextError![]u8) {
// march forward over whitespace
while (true) : (self.index += 1) {
const byte = self.cmd_line[self.index];
@@ -1526,7 +1526,7 @@ pub const ArgIteratorWindows = struct {
return self.internalNext(allocator);
}
- pub fn skip(self: &ArgIteratorWindows) bool {
+ pub fn skip(self: *ArgIteratorWindows) bool {
// march forward over whitespace
while (true) : (self.index += 1) {
const byte = self.cmd_line[self.index];
@@ -1565,7 +1565,7 @@ pub const ArgIteratorWindows = struct {
}
}
- fn internalNext(self: &ArgIteratorWindows, allocator: &Allocator) NextError![]u8 {
+ fn internalNext(self: *ArgIteratorWindows, allocator: *Allocator) NextError![]u8 {
var buf = try Buffer.initSize(allocator, 0);
defer buf.deinit();
@@ -1609,14 +1609,14 @@ pub const ArgIteratorWindows = struct {
}
}
- fn emitBackslashes(self: &ArgIteratorWindows, buf: &Buffer, emit_count: usize) !void {
+ fn emitBackslashes(self: *ArgIteratorWindows, buf: *Buffer, emit_count: usize) !void {
var i: usize = 0;
while (i < emit_count) : (i += 1) {
try buf.appendByte('\\');
}
}
- fn countQuotes(cmd_line: &const u8) usize {
+ fn countQuotes(cmd_line: *const u8) usize {
var result: usize = 0;
var backslash_count: usize = 0;
var index: usize = 0;
@@ -1649,7 +1649,7 @@ pub const ArgIterator = struct {
pub const NextError = ArgIteratorWindows.NextError;
/// You must free the returned memory when done.
- pub fn next(self: &ArgIterator, allocator: &Allocator) ?(NextError![]u8) {
+ pub fn next(self: *ArgIterator, allocator: *Allocator) ?(NextError![]u8) {
if (builtin.os == Os.windows) {
return self.inner.next(allocator);
} else {
@@ -1658,13 +1658,13 @@ pub const ArgIterator = struct {
}
/// If you only are targeting posix you can call this and not need an allocator.
- pub fn nextPosix(self: &ArgIterator) ?[]const u8 {
+ pub fn nextPosix(self: *ArgIterator) ?[]const u8 {
return self.inner.next();
}
/// Parse past 1 argument without capturing it.
/// Returns `true` if skipped an arg, `false` if we are at the end.
- pub fn skip(self: &ArgIterator) bool {
+ pub fn skip(self: *ArgIterator) bool {
return self.inner.skip();
}
};
@@ -1674,7 +1674,7 @@ pub fn args() ArgIterator {
}
/// Caller must call freeArgs on result.
-pub fn argsAlloc(allocator: &mem.Allocator) ![]const []u8 {
+pub fn argsAlloc(allocator: *mem.Allocator) ![]const []u8 {
// TODO refactor to only make 1 allocation.
var it = args();
var contents = try Buffer.initSize(allocator, 0);
@@ -1711,12 +1711,12 @@ pub fn argsAlloc(allocator: &mem.Allocator) ![]const []u8 {
return result_slice_list;
}
-pub fn argsFree(allocator: &mem.Allocator, args_alloc: []const []u8) void {
+pub fn argsFree(allocator: *mem.Allocator, args_alloc: []const []u8) void {
var total_bytes: usize = 0;
for (args_alloc) |arg| {
total_bytes += @sizeOf([]u8) + arg.len;
}
- const unaligned_allocated_buf = @ptrCast(&const u8, args_alloc.ptr)[0..total_bytes];
+ const unaligned_allocated_buf = @ptrCast(*const u8, args_alloc.ptr)[0..total_bytes];
const aligned_allocated_buf = @alignCast(@alignOf([]u8), unaligned_allocated_buf);
return allocator.free(aligned_allocated_buf);
}
@@ -1765,7 +1765,7 @@ test "windows arg parsing" {
});
}
-fn testWindowsCmdLine(input_cmd_line: &const u8, expected_args: []const []const u8) void {
+fn testWindowsCmdLine(input_cmd_line: *const u8, expected_args: []const []const u8) void {
var it = ArgIteratorWindows.initWithCmdLine(input_cmd_line);
for (expected_args) |expected_arg| {
const arg = ??it.next(debug.global_allocator) catch unreachable;
@@ -1832,7 +1832,7 @@ test "openSelfExe" {
/// This function may return an error if the current executable
/// was deleted after spawning.
/// Caller owns returned memory.
-pub fn selfExePath(allocator: &mem.Allocator) ![]u8 {
+pub fn selfExePath(allocator: *mem.Allocator) ![]u8 {
switch (builtin.os) {
Os.linux => {
// If the currently executing binary has been deleted,
@@ -1875,7 +1875,7 @@ pub fn selfExePath(allocator: &mem.Allocator) ![]u8 {
/// Get the directory path that contains the current executable.
/// Caller owns returned memory.
-pub fn selfExeDirPath(allocator: &mem.Allocator) ![]u8 {
+pub fn selfExeDirPath(allocator: *mem.Allocator) ![]u8 {
switch (builtin.os) {
Os.linux => {
// If the currently executing binary has been deleted,
@@ -2001,7 +2001,7 @@ pub const PosixBindError = error{
};
/// addr is `&const T` where T is one of the sockaddr
-pub fn posixBind(fd: i32, addr: &const posix.sockaddr) PosixBindError!void {
+pub fn posixBind(fd: i32, addr: *const posix.sockaddr) PosixBindError!void {
const rc = posix.bind(fd, addr, @sizeOf(posix.sockaddr));
const err = posix.getErrno(rc);
switch (err) {
@@ -2096,7 +2096,7 @@ pub const PosixAcceptError = error{
Unexpected,
};
-pub fn posixAccept(fd: i32, addr: &posix.sockaddr, flags: u32) PosixAcceptError!i32 {
+pub fn posixAccept(fd: i32, addr: *posix.sockaddr, flags: u32) PosixAcceptError!i32 {
while (true) {
var sockaddr_size = u32(@sizeOf(posix.sockaddr));
const rc = posix.accept4(fd, addr, &sockaddr_size, flags);
@@ -2195,7 +2195,7 @@ pub const LinuxEpollCtlError = error{
Unexpected,
};
-pub fn linuxEpollCtl(epfd: i32, op: u32, fd: i32, event: &linux.epoll_event) LinuxEpollCtlError!void {
+pub fn linuxEpollCtl(epfd: i32, op: u32, fd: i32, event: *linux.epoll_event) LinuxEpollCtlError!void {
const rc = posix.epoll_ctl(epfd, op, fd, event);
const err = posix.getErrno(rc);
switch (err) {
@@ -2288,7 +2288,7 @@ pub const PosixConnectError = error{
Unexpected,
};
-pub fn posixConnect(sockfd: i32, sockaddr: &const posix.sockaddr) PosixConnectError!void {
+pub fn posixConnect(sockfd: i32, sockaddr: *const posix.sockaddr) PosixConnectError!void {
while (true) {
const rc = posix.connect(sockfd, sockaddr, @sizeOf(posix.sockaddr));
const err = posix.getErrno(rc);
@@ -2319,7 +2319,7 @@ pub fn posixConnect(sockfd: i32, sockaddr: &const posix.sockaddr) PosixConnectEr
/// Same as posixConnect except it is for blocking socket file descriptors.
/// It expects to receive EINPROGRESS.
-pub fn posixConnectAsync(sockfd: i32, sockaddr: &const posix.sockaddr) PosixConnectError!void {
+pub fn posixConnectAsync(sockfd: i32, sockaddr: *const posix.sockaddr) PosixConnectError!void {
while (true) {
const rc = posix.connect(sockfd, sockaddr, @sizeOf(posix.sockaddr));
const err = posix.getErrno(rc);
@@ -2350,7 +2350,7 @@ pub fn posixConnectAsync(sockfd: i32, sockaddr: &const posix.sockaddr) PosixConn
pub fn posixGetSockOptConnectError(sockfd: i32) PosixConnectError!void {
var err_code: i32 = undefined;
var size: u32 = @sizeOf(i32);
- const rc = posix.getsockopt(sockfd, posix.SOL_SOCKET, posix.SO_ERROR, @ptrCast(&u8, &err_code), &size);
+ const rc = posix.getsockopt(sockfd, posix.SOL_SOCKET, posix.SO_ERROR, @ptrCast(*u8, &err_code), &size);
assert(size == 4);
const err = posix.getErrno(rc);
switch (err) {
@@ -2401,13 +2401,13 @@ pub const Thread = struct {
},
builtin.Os.windows => struct {
handle: windows.HANDLE,
- alloc_start: &c_void,
+ alloc_start: *c_void,
heap_handle: windows.HANDLE,
},
else => @compileError("Unsupported OS"),
};
- pub fn wait(self: &const Thread) void {
+ pub fn wait(self: *const Thread) void {
if (use_pthreads) {
const err = c.pthread_join(self.data.handle, null);
switch (err) {
@@ -2473,7 +2473,7 @@ pub const SpawnThreadError = error{
/// fn startFn(@typeOf(context)) T
/// where T is u8, noreturn, void, or !void
/// caller must call wait on the returned thread
-pub fn spawnThread(context: var, comptime startFn: var) SpawnThreadError!&Thread {
+pub fn spawnThread(context: var, comptime startFn: var) SpawnThreadError!*Thread {
// TODO compile-time call graph analysis to determine stack upper bound
// https://github.com/ziglang/zig/issues/157
const default_stack_size = 8 * 1024 * 1024;
@@ -2491,7 +2491,7 @@ pub fn spawnThread(context: var, comptime startFn: var) SpawnThreadError!&Thread
if (@sizeOf(Context) == 0) {
return startFn({});
} else {
- return startFn(@ptrCast(&Context, @alignCast(@alignOf(Context), arg)).*);
+ return startFn(@ptrCast(*Context, @alignCast(@alignOf(Context), arg)).*);
}
}
};
@@ -2500,13 +2500,13 @@ pub fn spawnThread(context: var, comptime startFn: var) SpawnThreadError!&Thread
const byte_count = @alignOf(WinThread.OuterContext) + @sizeOf(WinThread.OuterContext);
const bytes_ptr = windows.HeapAlloc(heap_handle, 0, byte_count) ?? return SpawnThreadError.OutOfMemory;
errdefer assert(windows.HeapFree(heap_handle, 0, bytes_ptr) != 0);
- const bytes = @ptrCast(&u8, bytes_ptr)[0..byte_count];
+ const bytes = @ptrCast(*u8, bytes_ptr)[0..byte_count];
const outer_context = std.heap.FixedBufferAllocator.init(bytes).allocator.create(WinThread.OuterContext) catch unreachable;
outer_context.inner = context;
outer_context.thread.data.heap_handle = heap_handle;
outer_context.thread.data.alloc_start = bytes_ptr;
- const parameter = if (@sizeOf(Context) == 0) null else @ptrCast(&c_void, &outer_context.inner);
+ const parameter = if (@sizeOf(Context) == 0) null else @ptrCast(*c_void, &outer_context.inner);
outer_context.thread.data.handle = windows.CreateThread(null, default_stack_size, WinThread.threadMain, parameter, 0, null) ?? {
const err = windows.GetLastError();
return switch (err) {
@@ -2521,15 +2521,15 @@ pub fn spawnThread(context: var, comptime startFn: var) SpawnThreadError!&Thread
if (@sizeOf(Context) == 0) {
return startFn({});
} else {
- return startFn(@intToPtr(&const Context, ctx_addr).*);
+ return startFn(@intToPtr(*const Context, ctx_addr).*);
}
}
- extern fn posixThreadMain(ctx: ?&c_void) ?&c_void {
+ extern fn posixThreadMain(ctx: ?*c_void) ?*c_void {
if (@sizeOf(Context) == 0) {
_ = startFn({});
return null;
} else {
- _ = startFn(@ptrCast(&const Context, @alignCast(@alignOf(Context), ctx)).*);
+ _ = startFn(@ptrCast(*const Context, @alignCast(@alignOf(Context), ctx)).*);
return null;
}
}
@@ -2548,7 +2548,7 @@ pub fn spawnThread(context: var, comptime startFn: var) SpawnThreadError!&Thread
stack_end -= @sizeOf(Context);
stack_end -= stack_end % @alignOf(Context);
assert(stack_end >= stack_addr);
- const context_ptr = @alignCast(@alignOf(Context), @intToPtr(&Context, stack_end));
+ const context_ptr = @alignCast(@alignOf(Context), @intToPtr(*Context, stack_end));
context_ptr.* = context;
arg = stack_end;
}
@@ -2556,7 +2556,7 @@ pub fn spawnThread(context: var, comptime startFn: var) SpawnThreadError!&Thread
stack_end -= @sizeOf(Thread);
stack_end -= stack_end % @alignOf(Thread);
assert(stack_end >= stack_addr);
- const thread_ptr = @alignCast(@alignOf(Thread), @intToPtr(&Thread, stack_end));
+ const thread_ptr = @alignCast(@alignOf(Thread), @intToPtr(*Thread, stack_end));
thread_ptr.data.stack_addr = stack_addr;
thread_ptr.data.stack_len = mmap_len;
@@ -2572,9 +2572,9 @@ pub fn spawnThread(context: var, comptime startFn: var) SpawnThreadError!&Thread
// align to page
stack_end -= stack_end % os.page_size;
- assert(c.pthread_attr_setstack(&attr, @intToPtr(&c_void, stack_addr), stack_end - stack_addr) == 0);
+ assert(c.pthread_attr_setstack(&attr, @intToPtr(*c_void, stack_addr), stack_end - stack_addr) == 0);
- const err = c.pthread_create(&thread_ptr.data.handle, &attr, MainFuncs.posixThreadMain, @intToPtr(&c_void, arg));
+ const err = c.pthread_create(&thread_ptr.data.handle, &attr, MainFuncs.posixThreadMain, @intToPtr(*c_void, arg));
switch (err) {
0 => return thread_ptr,
posix.EAGAIN => return SpawnThreadError.SystemResources,
diff --git a/std/os/linux/index.zig b/std/os/linux/index.zig
index 5186ff32d394..3e7b836ac7b8 100644
--- a/std/os/linux/index.zig
+++ b/std/os/linux/index.zig
@@ -665,15 +665,15 @@ pub fn dup2(old: i32, new: i32) usize {
return syscall2(SYS_dup2, usize(old), usize(new));
}
-pub fn chdir(path: &const u8) usize {
+pub fn chdir(path: *const u8) usize {
return syscall1(SYS_chdir, @ptrToInt(path));
}
-pub fn chroot(path: &const u8) usize {
+pub fn chroot(path: *const u8) usize {
return syscall1(SYS_chroot, @ptrToInt(path));
}
-pub fn execve(path: &const u8, argv: &const ?&const u8, envp: &const ?&const u8) usize {
+pub fn execve(path: *const u8, argv: *const ?*const u8, envp: *const ?*const u8) usize {
return syscall3(SYS_execve, @ptrToInt(path), @ptrToInt(argv), @ptrToInt(envp));
}
@@ -681,15 +681,15 @@ pub fn fork() usize {
return syscall0(SYS_fork);
}
-pub fn futex_wait(uaddr: usize, futex_op: u32, val: i32, timeout: ?×pec) usize {
+pub fn futex_wait(uaddr: usize, futex_op: u32, val: i32, timeout: ?*timespec) usize {
return syscall4(SYS_futex, uaddr, futex_op, @bitCast(u32, val), @ptrToInt(timeout));
}
-pub fn getcwd(buf: &u8, size: usize) usize {
+pub fn getcwd(buf: *u8, size: usize) usize {
return syscall2(SYS_getcwd, @ptrToInt(buf), size);
}
-pub fn getdents(fd: i32, dirp: &u8, count: usize) usize {
+pub fn getdents(fd: i32, dirp: *u8, count: usize) usize {
return syscall3(SYS_getdents, usize(fd), @ptrToInt(dirp), count);
}
@@ -698,27 +698,27 @@ pub fn isatty(fd: i32) bool {
return syscall3(SYS_ioctl, usize(fd), TIOCGWINSZ, @ptrToInt(&wsz)) == 0;
}
-pub fn readlink(noalias path: &const u8, noalias buf_ptr: &u8, buf_len: usize) usize {
+pub fn readlink(noalias path: *const u8, noalias buf_ptr: *u8, buf_len: usize) usize {
return syscall3(SYS_readlink, @ptrToInt(path), @ptrToInt(buf_ptr), buf_len);
}
-pub fn mkdir(path: &const u8, mode: u32) usize {
+pub fn mkdir(path: *const u8, mode: u32) usize {
return syscall2(SYS_mkdir, @ptrToInt(path), mode);
}
-pub fn mount(special: &const u8, dir: &const u8, fstype: &const u8, flags: usize, data: usize) usize {
+pub fn mount(special: *const u8, dir: *const u8, fstype: *const u8, flags: usize, data: usize) usize {
return syscall5(SYS_mount, @ptrToInt(special), @ptrToInt(dir), @ptrToInt(fstype), flags, data);
}
-pub fn umount(special: &const u8) usize {
+pub fn umount(special: *const u8) usize {
return syscall2(SYS_umount2, @ptrToInt(special), 0);
}
-pub fn umount2(special: &const u8, flags: u32) usize {
+pub fn umount2(special: *const u8, flags: u32) usize {
return syscall2(SYS_umount2, @ptrToInt(special), flags);
}
-pub fn mmap(address: ?&u8, length: usize, prot: usize, flags: u32, fd: i32, offset: isize) usize {
+pub fn mmap(address: ?*u8, length: usize, prot: usize, flags: u32, fd: i32, offset: isize) usize {
return syscall6(SYS_mmap, @ptrToInt(address), length, prot, flags, usize(fd), @bitCast(usize, offset));
}
@@ -726,60 +726,60 @@ pub fn munmap(address: usize, length: usize) usize {
return syscall2(SYS_munmap, address, length);
}
-pub fn read(fd: i32, buf: &u8, count: usize) usize {
+pub fn read(fd: i32, buf: *u8, count: usize) usize {
return syscall3(SYS_read, usize(fd), @ptrToInt(buf), count);
}
-pub fn rmdir(path: &const u8) usize {
+pub fn rmdir(path: *const u8) usize {
return syscall1(SYS_rmdir, @ptrToInt(path));
}
-pub fn symlink(existing: &const u8, new: &const u8) usize {
+pub fn symlink(existing: *const u8, new: *const u8) usize {
return syscall2(SYS_symlink, @ptrToInt(existing), @ptrToInt(new));
}
-pub fn pread(fd: i32, buf: &u8, count: usize, offset: usize) usize {
+pub fn pread(fd: i32, buf: *u8, count: usize, offset: usize) usize {
return syscall4(SYS_pread, usize(fd), @ptrToInt(buf), count, offset);
}
-pub fn access(path: &const u8, mode: u32) usize {
+pub fn access(path: *const u8, mode: u32) usize {
return syscall2(SYS_access, @ptrToInt(path), mode);
}
-pub fn pipe(fd: &[2]i32) usize {
+pub fn pipe(fd: *[2]i32) usize {
return pipe2(fd, 0);
}
-pub fn pipe2(fd: &[2]i32, flags: usize) usize {
+pub fn pipe2(fd: *[2]i32, flags: usize) usize {
return syscall2(SYS_pipe2, @ptrToInt(fd), flags);
}
-pub fn write(fd: i32, buf: &const u8, count: usize) usize {
+pub fn write(fd: i32, buf: *const u8, count: usize) usize {
return syscall3(SYS_write, usize(fd), @ptrToInt(buf), count);
}
-pub fn pwrite(fd: i32, buf: &const u8, count: usize, offset: usize) usize {
+pub fn pwrite(fd: i32, buf: *const u8, count: usize, offset: usize) usize {
return syscall4(SYS_pwrite, usize(fd), @ptrToInt(buf), count, offset);
}
-pub fn rename(old: &const u8, new: &const u8) usize {
+pub fn rename(old: *const u8, new: *const u8) usize {
return syscall2(SYS_rename, @ptrToInt(old), @ptrToInt(new));
}
-pub fn open(path: &const u8, flags: u32, perm: usize) usize {
+pub fn open(path: *const u8, flags: u32, perm: usize) usize {
return syscall3(SYS_open, @ptrToInt(path), flags, perm);
}
-pub fn create(path: &const u8, perm: usize) usize {
+pub fn create(path: *const u8, perm: usize) usize {
return syscall2(SYS_creat, @ptrToInt(path), perm);
}
-pub fn openat(dirfd: i32, path: &const u8, flags: usize, mode: usize) usize {
+pub fn openat(dirfd: i32, path: *const u8, flags: usize, mode: usize) usize {
return syscall4(SYS_openat, usize(dirfd), @ptrToInt(path), flags, mode);
}
/// See also `clone` (from the arch-specific include)
-pub fn clone5(flags: usize, child_stack_ptr: usize, parent_tid: &i32, child_tid: &i32, newtls: usize) usize {
+pub fn clone5(flags: usize, child_stack_ptr: usize, parent_tid: *i32, child_tid: *i32, newtls: usize) usize {
return syscall5(SYS_clone, flags, child_stack_ptr, @ptrToInt(parent_tid), @ptrToInt(child_tid), newtls);
}
@@ -801,7 +801,7 @@ pub fn exit(status: i32) noreturn {
unreachable;
}
-pub fn getrandom(buf: &u8, count: usize, flags: u32) usize {
+pub fn getrandom(buf: *u8, count: usize, flags: u32) usize {
return syscall3(SYS_getrandom, @ptrToInt(buf), count, usize(flags));
}
@@ -809,15 +809,15 @@ pub fn kill(pid: i32, sig: i32) usize {
return syscall2(SYS_kill, @bitCast(usize, isize(pid)), usize(sig));
}
-pub fn unlink(path: &const u8) usize {
+pub fn unlink(path: *const u8) usize {
return syscall1(SYS_unlink, @ptrToInt(path));
}
-pub fn waitpid(pid: i32, status: &i32, options: i32) usize {
+pub fn waitpid(pid: i32, status: *i32, options: i32) usize {
return syscall4(SYS_wait4, @bitCast(usize, isize(pid)), @ptrToInt(status), @bitCast(usize, isize(options)), 0);
}
-pub fn clock_gettime(clk_id: i32, tp: ×pec) usize {
+pub fn clock_gettime(clk_id: i32, tp: *timespec) usize {
if (VDSO_CGT_SYM.len != 0) {
const f = @atomicLoad(@typeOf(init_vdso_clock_gettime), &vdso_clock_gettime, builtin.AtomicOrder.Unordered);
if (@ptrToInt(f) != 0) {
@@ -831,7 +831,7 @@ pub fn clock_gettime(clk_id: i32, tp: ×pec) usize {
return syscall2(SYS_clock_gettime, @bitCast(usize, isize(clk_id)), @ptrToInt(tp));
}
var vdso_clock_gettime = init_vdso_clock_gettime;
-extern fn init_vdso_clock_gettime(clk: i32, ts: ×pec) usize {
+extern fn init_vdso_clock_gettime(clk: i32, ts: *timespec) usize {
const addr = vdso.lookup(VDSO_CGT_VER, VDSO_CGT_SYM);
var f = @intToPtr(@typeOf(init_vdso_clock_gettime), addr);
_ = @cmpxchgStrong(@typeOf(init_vdso_clock_gettime), &vdso_clock_gettime, init_vdso_clock_gettime, f, builtin.AtomicOrder.Monotonic, builtin.AtomicOrder.Monotonic);
@@ -839,23 +839,23 @@ extern fn init_vdso_clock_gettime(clk: i32, ts: ×pec) usize {
return f(clk, ts);
}
-pub fn clock_getres(clk_id: i32, tp: ×pec) usize {
+pub fn clock_getres(clk_id: i32, tp: *timespec) usize {
return syscall2(SYS_clock_getres, @bitCast(usize, isize(clk_id)), @ptrToInt(tp));
}
-pub fn clock_settime(clk_id: i32, tp: &const timespec) usize {
+pub fn clock_settime(clk_id: i32, tp: *const timespec) usize {
return syscall2(SYS_clock_settime, @bitCast(usize, isize(clk_id)), @ptrToInt(tp));
}
-pub fn gettimeofday(tv: &timeval, tz: &timezone) usize {
+pub fn gettimeofday(tv: *timeval, tz: *timezone) usize {
return syscall2(SYS_gettimeofday, @ptrToInt(tv), @ptrToInt(tz));
}
-pub fn settimeofday(tv: &const timeval, tz: &const timezone) usize {
+pub fn settimeofday(tv: *const timeval, tz: *const timezone) usize {
return syscall2(SYS_settimeofday, @ptrToInt(tv), @ptrToInt(tz));
}
-pub fn nanosleep(req: &const timespec, rem: ?×pec) usize {
+pub fn nanosleep(req: *const timespec, rem: ?*timespec) usize {
return syscall2(SYS_nanosleep, @ptrToInt(req), @ptrToInt(rem));
}
@@ -899,11 +899,11 @@ pub fn setegid(egid: u32) usize {
return syscall1(SYS_setegid, egid);
}
-pub fn getresuid(ruid: &u32, euid: &u32, suid: &u32) usize {
+pub fn getresuid(ruid: *u32, euid: *u32, suid: *u32) usize {
return syscall3(SYS_getresuid, @ptrToInt(ruid), @ptrToInt(euid), @ptrToInt(suid));
}
-pub fn getresgid(rgid: &u32, egid: &u32, sgid: &u32) usize {
+pub fn getresgid(rgid: *u32, egid: *u32, sgid: *u32) usize {
return syscall3(SYS_getresgid, @ptrToInt(rgid), @ptrToInt(egid), @ptrToInt(sgid));
}
@@ -915,11 +915,11 @@ pub fn setresgid(rgid: u32, egid: u32, sgid: u32) usize {
return syscall3(SYS_setresgid, rgid, egid, sgid);
}
-pub fn getgroups(size: usize, list: &u32) usize {
+pub fn getgroups(size: usize, list: *u32) usize {
return syscall2(SYS_getgroups, size, @ptrToInt(list));
}
-pub fn setgroups(size: usize, list: &const u32) usize {
+pub fn setgroups(size: usize, list: *const u32) usize {
return syscall2(SYS_setgroups, size, @ptrToInt(list));
}
@@ -927,11 +927,11 @@ pub fn getpid() i32 {
return @bitCast(i32, u32(syscall0(SYS_getpid)));
}
-pub fn sigprocmask(flags: u32, noalias set: &const sigset_t, noalias oldset: ?&sigset_t) usize {
+pub fn sigprocmask(flags: u32, noalias set: *const sigset_t, noalias oldset: ?*sigset_t) usize {
return syscall4(SYS_rt_sigprocmask, flags, @ptrToInt(set), @ptrToInt(oldset), NSIG / 8);
}
-pub fn sigaction(sig: u6, noalias act: &const Sigaction, noalias oact: ?&Sigaction) usize {
+pub fn sigaction(sig: u6, noalias act: *const Sigaction, noalias oact: ?*Sigaction) usize {
assert(sig >= 1);
assert(sig != SIGKILL);
assert(sig != SIGSTOP);
@@ -942,8 +942,8 @@ pub fn sigaction(sig: u6, noalias act: &const Sigaction, noalias oact: ?&Sigacti
.restorer = @ptrCast(extern fn () void, restore_rt),
};
var ksa_old: k_sigaction = undefined;
- @memcpy(@ptrCast(&u8, &ksa.mask), @ptrCast(&const u8, &act.mask), 8);
- const result = syscall4(SYS_rt_sigaction, sig, @ptrToInt(&ksa), @ptrToInt(&ksa_old), @sizeOf(@typeOf(ksa.mask)));
+ @memcpy(@ptrCast(*u8, *ksa.mask), @ptrCast(*const u8, *act.mask), 8);
+ const result = syscall4(SYS_rt_sigaction, sig, @ptrToInt(*ksa), @ptrToInt(*ksa_old), @sizeOf(@typeOf(ksa.mask)));
const err = getErrno(result);
if (err != 0) {
return result;
@@ -951,7 +951,7 @@ pub fn sigaction(sig: u6, noalias act: &const Sigaction, noalias oact: ?&Sigacti
if (oact) |old| {
old.handler = ksa_old.handler;
old.flags = @truncate(u32, ksa_old.flags);
- @memcpy(@ptrCast(&u8, &old.mask), @ptrCast(&const u8, &ksa_old.mask), @sizeOf(@typeOf(ksa_old.mask)));
+ @memcpy(@ptrCast(*u8, *old.mask), @ptrCast(*const u8, *ksa_old.mask), @sizeOf(@typeOf(ksa_old.mask)));
}
return 0;
}
@@ -989,24 +989,24 @@ pub fn raise(sig: i32) usize {
return ret;
}
-fn blockAllSignals(set: &sigset_t) void {
+fn blockAllSignals(set: *sigset_t) void {
_ = syscall4(SYS_rt_sigprocmask, SIG_BLOCK, @ptrToInt(&all_mask), @ptrToInt(set), NSIG / 8);
}
-fn blockAppSignals(set: &sigset_t) void {
+fn blockAppSignals(set: *sigset_t) void {
_ = syscall4(SYS_rt_sigprocmask, SIG_BLOCK, @ptrToInt(&app_mask), @ptrToInt(set), NSIG / 8);
}
-fn restoreSignals(set: &sigset_t) void {
+fn restoreSignals(set: *sigset_t) void {
_ = syscall4(SYS_rt_sigprocmask, SIG_SETMASK, @ptrToInt(set), 0, NSIG / 8);
}
-pub fn sigaddset(set: &sigset_t, sig: u6) void {
+pub fn sigaddset(set: *sigset_t, sig: u6) void {
const s = sig - 1;
(set.*)[usize(s) / usize.bit_count] |= usize(1) << (s & (usize.bit_count - 1));
}
-pub fn sigismember(set: &const sigset_t, sig: u6) bool {
+pub fn sigismember(set: *const sigset_t, sig: u6) bool {
const s = sig - 1;
return ((set.*)[usize(s) / usize.bit_count] & (usize(1) << (s & (usize.bit_count - 1)))) != 0;
}
@@ -1036,15 +1036,15 @@ pub const sockaddr_in6 = extern struct {
};
pub const iovec = extern struct {
- iov_base: &u8,
+ iov_base: *u8,
iov_len: usize,
};
-pub fn getsockname(fd: i32, noalias addr: &sockaddr, noalias len: &socklen_t) usize {
+pub fn getsockname(fd: i32, noalias addr: *sockaddr, noalias len: *socklen_t) usize {
return syscall3(SYS_getsockname, usize(fd), @ptrToInt(addr), @ptrToInt(len));
}
-pub fn getpeername(fd: i32, noalias addr: &sockaddr, noalias len: &socklen_t) usize {
+pub fn getpeername(fd: i32, noalias addr: *sockaddr, noalias len: *socklen_t) usize {
return syscall3(SYS_getpeername, usize(fd), @ptrToInt(addr), @ptrToInt(len));
}
@@ -1052,27 +1052,27 @@ pub fn socket(domain: u32, socket_type: u32, protocol: u32) usize {
return syscall3(SYS_socket, domain, socket_type, protocol);
}
-pub fn setsockopt(fd: i32, level: u32, optname: u32, optval: &const u8, optlen: socklen_t) usize {
+pub fn setsockopt(fd: i32, level: u32, optname: u32, optval: *const u8, optlen: socklen_t) usize {
return syscall5(SYS_setsockopt, usize(fd), level, optname, usize(optval), @ptrToInt(optlen));
}
-pub fn getsockopt(fd: i32, level: u32, optname: u32, noalias optval: &u8, noalias optlen: &socklen_t) usize {
+pub fn getsockopt(fd: i32, level: u32, optname: u32, noalias optval: *u8, noalias optlen: *socklen_t) usize {
return syscall5(SYS_getsockopt, usize(fd), level, optname, @ptrToInt(optval), @ptrToInt(optlen));
}
-pub fn sendmsg(fd: i32, msg: &const msghdr, flags: u32) usize {
+pub fn sendmsg(fd: i32, msg: *const msghdr, flags: u32) usize {
return syscall3(SYS_sendmsg, usize(fd), @ptrToInt(msg), flags);
}
-pub fn connect(fd: i32, addr: &const sockaddr, len: socklen_t) usize {
+pub fn connect(fd: i32, addr: *const sockaddr, len: socklen_t) usize {
return syscall3(SYS_connect, usize(fd), @ptrToInt(addr), usize(len));
}
-pub fn recvmsg(fd: i32, msg: &msghdr, flags: u32) usize {
+pub fn recvmsg(fd: i32, msg: *msghdr, flags: u32) usize {
return syscall3(SYS_recvmsg, usize(fd), @ptrToInt(msg), flags);
}
-pub fn recvfrom(fd: i32, noalias buf: &u8, len: usize, flags: u32, noalias addr: ?&sockaddr, noalias alen: ?&socklen_t) usize {
+pub fn recvfrom(fd: i32, noalias buf: *u8, len: usize, flags: u32, noalias addr: ?*sockaddr, noalias alen: ?*socklen_t) usize {
return syscall6(SYS_recvfrom, usize(fd), @ptrToInt(buf), len, flags, @ptrToInt(addr), @ptrToInt(alen));
}
@@ -1080,7 +1080,7 @@ pub fn shutdown(fd: i32, how: i32) usize {
return syscall2(SYS_shutdown, usize(fd), usize(how));
}
-pub fn bind(fd: i32, addr: &const sockaddr, len: socklen_t) usize {
+pub fn bind(fd: i32, addr: *const sockaddr, len: socklen_t) usize {
return syscall3(SYS_bind, usize(fd), @ptrToInt(addr), usize(len));
}
@@ -1088,79 +1088,79 @@ pub fn listen(fd: i32, backlog: u32) usize {
return syscall2(SYS_listen, usize(fd), backlog);
}
-pub fn sendto(fd: i32, buf: &const u8, len: usize, flags: u32, addr: ?&const sockaddr, alen: socklen_t) usize {
+pub fn sendto(fd: i32, buf: *const u8, len: usize, flags: u32, addr: ?*const sockaddr, alen: socklen_t) usize {
return syscall6(SYS_sendto, usize(fd), @ptrToInt(buf), len, flags, @ptrToInt(addr), usize(alen));
}
pub fn socketpair(domain: i32, socket_type: i32, protocol: i32, fd: [2]i32) usize {
- return syscall4(SYS_socketpair, usize(domain), usize(socket_type), usize(protocol), @ptrToInt(&fd[0]));
+ return syscall4(SYS_socketpair, usize(domain), usize(socket_type), usize(protocol), @ptrToInt(*fd[0]));
}
-pub fn accept(fd: i32, noalias addr: &sockaddr, noalias len: &socklen_t) usize {
+pub fn accept(fd: i32, noalias addr: *sockaddr, noalias len: *socklen_t) usize {
return accept4(fd, addr, len, 0);
}
-pub fn accept4(fd: i32, noalias addr: &sockaddr, noalias len: &socklen_t, flags: u32) usize {
+pub fn accept4(fd: i32, noalias addr: *sockaddr, noalias len: *socklen_t, flags: u32) usize {
return syscall4(SYS_accept4, usize(fd), @ptrToInt(addr), @ptrToInt(len), flags);
}
-pub fn fstat(fd: i32, stat_buf: &Stat) usize {
+pub fn fstat(fd: i32, stat_buf: *Stat) usize {
return syscall2(SYS_fstat, usize(fd), @ptrToInt(stat_buf));
}
-pub fn stat(pathname: &const u8, statbuf: &Stat) usize {
+pub fn stat(pathname: *const u8, statbuf: *Stat) usize {
return syscall2(SYS_stat, @ptrToInt(pathname), @ptrToInt(statbuf));
}
-pub fn lstat(pathname: &const u8, statbuf: &Stat) usize {
+pub fn lstat(pathname: *const u8, statbuf: *Stat) usize {
return syscall2(SYS_lstat, @ptrToInt(pathname), @ptrToInt(statbuf));
}
-pub fn listxattr(path: &const u8, list: &u8, size: usize) usize {
+pub fn listxattr(path: *const u8, list: *u8, size: usize) usize {
return syscall3(SYS_listxattr, @ptrToInt(path), @ptrToInt(list), size);
}
-pub fn llistxattr(path: &const u8, list: &u8, size: usize) usize {
+pub fn llistxattr(path: *const u8, list: *u8, size: usize) usize {
return syscall3(SYS_llistxattr, @ptrToInt(path), @ptrToInt(list), size);
}
-pub fn flistxattr(fd: usize, list: &u8, size: usize) usize {
+pub fn flistxattr(fd: usize, list: *u8, size: usize) usize {
return syscall3(SYS_flistxattr, fd, @ptrToInt(list), size);
}
-pub fn getxattr(path: &const u8, name: &const u8, value: &void, size: usize) usize {
+pub fn getxattr(path: *const u8, name: *const u8, value: *void, size: usize) usize {
return syscall4(SYS_getxattr, @ptrToInt(path), @ptrToInt(name), @ptrToInt(value), size);
}
-pub fn lgetxattr(path: &const u8, name: &const u8, value: &void, size: usize) usize {
+pub fn lgetxattr(path: *const u8, name: *const u8, value: *void, size: usize) usize {
return syscall4(SYS_lgetxattr, @ptrToInt(path), @ptrToInt(name), @ptrToInt(value), size);
}
-pub fn fgetxattr(fd: usize, name: &const u8, value: &void, size: usize) usize {
+pub fn fgetxattr(fd: usize, name: *const u8, value: *void, size: usize) usize {
return syscall4(SYS_lgetxattr, fd, @ptrToInt(name), @ptrToInt(value), size);
}
-pub fn setxattr(path: &const u8, name: &const u8, value: &const void, size: usize, flags: usize) usize {
+pub fn setxattr(path: *const u8, name: *const u8, value: *const void, size: usize, flags: usize) usize {
return syscall5(SYS_setxattr, @ptrToInt(path), @ptrToInt(name), @ptrToInt(value), size, flags);
}
-pub fn lsetxattr(path: &const u8, name: &const u8, value: &const void, size: usize, flags: usize) usize {
+pub fn lsetxattr(path: *const u8, name: *const u8, value: *const void, size: usize, flags: usize) usize {
return syscall5(SYS_lsetxattr, @ptrToInt(path), @ptrToInt(name), @ptrToInt(value), size, flags);
}
-pub fn fsetxattr(fd: usize, name: &const u8, value: &const void, size: usize, flags: usize) usize {
+pub fn fsetxattr(fd: usize, name: *const u8, value: *const void, size: usize, flags: usize) usize {
return syscall5(SYS_fsetxattr, fd, @ptrToInt(name), @ptrToInt(value), size, flags);
}
-pub fn removexattr(path: &const u8, name: &const u8) usize {
+pub fn removexattr(path: *const u8, name: *const u8) usize {
return syscall2(SYS_removexattr, @ptrToInt(path), @ptrToInt(name));
}
-pub fn lremovexattr(path: &const u8, name: &const u8) usize {
+pub fn lremovexattr(path: *const u8, name: *const u8) usize {
return syscall2(SYS_lremovexattr, @ptrToInt(path), @ptrToInt(name));
}
-pub fn fremovexattr(fd: usize, name: &const u8) usize {
+pub fn fremovexattr(fd: usize, name: *const u8) usize {
return syscall2(SYS_fremovexattr, fd, @ptrToInt(name));
}
@@ -1184,11 +1184,11 @@ pub fn epoll_create1(flags: usize) usize {
return syscall1(SYS_epoll_create1, flags);
}
-pub fn epoll_ctl(epoll_fd: i32, op: u32, fd: i32, ev: &epoll_event) usize {
+pub fn epoll_ctl(epoll_fd: i32, op: u32, fd: i32, ev: *epoll_event) usize {
return syscall4(SYS_epoll_ctl, usize(epoll_fd), usize(op), usize(fd), @ptrToInt(ev));
}
-pub fn epoll_wait(epoll_fd: i32, events: &epoll_event, maxevents: u32, timeout: i32) usize {
+pub fn epoll_wait(epoll_fd: i32, events: *epoll_event, maxevents: u32, timeout: i32) usize {
return syscall4(SYS_epoll_wait, usize(epoll_fd), @ptrToInt(events), usize(maxevents), usize(timeout));
}
@@ -1201,11 +1201,11 @@ pub const itimerspec = extern struct {
it_value: timespec,
};
-pub fn timerfd_gettime(fd: i32, curr_value: &itimerspec) usize {
+pub fn timerfd_gettime(fd: i32, curr_value: *itimerspec) usize {
return syscall2(SYS_timerfd_gettime, usize(fd), @ptrToInt(curr_value));
}
-pub fn timerfd_settime(fd: i32, flags: u32, new_value: &const itimerspec, old_value: ?&itimerspec) usize {
+pub fn timerfd_settime(fd: i32, flags: u32, new_value: *const itimerspec, old_value: ?*itimerspec) usize {
return syscall4(SYS_timerfd_settime, usize(fd), usize(flags), @ptrToInt(new_value), @ptrToInt(old_value));
}
@@ -1300,8 +1300,8 @@ pub fn CAP_TO_INDEX(cap: u8) u8 {
}
pub const cap_t = extern struct {
- hdrp: &cap_user_header_t,
- datap: &cap_user_data_t,
+ hdrp: *cap_user_header_t,
+ datap: *cap_user_data_t,
};
pub const cap_user_header_t = extern struct {
@@ -1319,11 +1319,11 @@ pub fn unshare(flags: usize) usize {
return syscall1(SYS_unshare, usize(flags));
}
-pub fn capget(hdrp: &cap_user_header_t, datap: &cap_user_data_t) usize {
+pub fn capget(hdrp: *cap_user_header_t, datap: *cap_user_data_t) usize {
return syscall2(SYS_capget, @ptrToInt(hdrp), @ptrToInt(datap));
}
-pub fn capset(hdrp: &cap_user_header_t, datap: &const cap_user_data_t) usize {
+pub fn capset(hdrp: *cap_user_header_t, datap: *const cap_user_data_t) usize {
return syscall2(SYS_capset, @ptrToInt(hdrp), @ptrToInt(datap));
}
diff --git a/std/os/linux/vdso.zig b/std/os/linux/vdso.zig
index 8e0a285841a7..1317da6388b5 100644
--- a/std/os/linux/vdso.zig
+++ b/std/os/linux/vdso.zig
@@ -8,11 +8,11 @@ pub fn lookup(vername: []const u8, name: []const u8) usize {
const vdso_addr = std.os.linux_aux_raw[std.elf.AT_SYSINFO_EHDR];
if (vdso_addr == 0) return 0;
- const eh = @intToPtr(&elf.Ehdr, vdso_addr);
+ const eh = @intToPtr(*elf.Ehdr, vdso_addr);
var ph_addr: usize = vdso_addr + eh.e_phoff;
- const ph = @intToPtr(&elf.Phdr, ph_addr);
+ const ph = @intToPtr(*elf.Phdr, ph_addr);
- var maybe_dynv: ?&usize = null;
+ var maybe_dynv: ?*usize = null;
var base: usize = @maxValue(usize);
{
var i: usize = 0;
@@ -20,10 +20,10 @@ pub fn lookup(vername: []const u8, name: []const u8) usize {
i += 1;
ph_addr += eh.e_phentsize;
}) {
- const this_ph = @intToPtr(&elf.Phdr, ph_addr);
+ const this_ph = @intToPtr(*elf.Phdr, ph_addr);
switch (this_ph.p_type) {
elf.PT_LOAD => base = vdso_addr + this_ph.p_offset - this_ph.p_vaddr,
- elf.PT_DYNAMIC => maybe_dynv = @intToPtr(&usize, vdso_addr + this_ph.p_offset),
+ elf.PT_DYNAMIC => maybe_dynv = @intToPtr(*usize, vdso_addr + this_ph.p_offset),
else => {},
}
}
@@ -31,22 +31,22 @@ pub fn lookup(vername: []const u8, name: []const u8) usize {
const dynv = maybe_dynv ?? return 0;
if (base == @maxValue(usize)) return 0;
- var maybe_strings: ?&u8 = null;
- var maybe_syms: ?&elf.Sym = null;
- var maybe_hashtab: ?&linux.Elf_Symndx = null;
- var maybe_versym: ?&u16 = null;
- var maybe_verdef: ?&elf.Verdef = null;
+ var maybe_strings: ?*u8 = null;
+ var maybe_syms: ?*elf.Sym = null;
+ var maybe_hashtab: ?*linux.Elf_Symndx = null;
+ var maybe_versym: ?*u16 = null;
+ var maybe_verdef: ?*elf.Verdef = null;
{
var i: usize = 0;
while (dynv[i] != 0) : (i += 2) {
const p = base + dynv[i + 1];
switch (dynv[i]) {
- elf.DT_STRTAB => maybe_strings = @intToPtr(&u8, p),
- elf.DT_SYMTAB => maybe_syms = @intToPtr(&elf.Sym, p),
- elf.DT_HASH => maybe_hashtab = @intToPtr(&linux.Elf_Symndx, p),
- elf.DT_VERSYM => maybe_versym = @intToPtr(&u16, p),
- elf.DT_VERDEF => maybe_verdef = @intToPtr(&elf.Verdef, p),
+ elf.DT_STRTAB => maybe_strings = @intToPtr(*u8, p),
+ elf.DT_SYMTAB => maybe_syms = @intToPtr(*elf.Sym, p),
+ elf.DT_HASH => maybe_hashtab = @intToPtr(*linux.Elf_Symndx, p),
+ elf.DT_VERSYM => maybe_versym = @intToPtr(*u16, p),
+ elf.DT_VERDEF => maybe_verdef = @intToPtr(*elf.Verdef, p),
else => {},
}
}
@@ -76,7 +76,7 @@ pub fn lookup(vername: []const u8, name: []const u8) usize {
return 0;
}
-fn checkver(def_arg: &elf.Verdef, vsym_arg: i32, vername: []const u8, strings: &u8) bool {
+fn checkver(def_arg: *elf.Verdef, vsym_arg: i32, vername: []const u8, strings: *u8) bool {
var def = def_arg;
const vsym = @bitCast(u32, vsym_arg) & 0x7fff;
while (true) {
@@ -84,8 +84,8 @@ fn checkver(def_arg: &elf.Verdef, vsym_arg: i32, vername: []const u8, strings: &
break;
if (def.vd_next == 0)
return false;
- def = @intToPtr(&elf.Verdef, @ptrToInt(def) + def.vd_next);
+ def = @intToPtr(*elf.Verdef, @ptrToInt(def) + def.vd_next);
}
- const aux = @intToPtr(&elf.Verdaux, @ptrToInt(def) + def.vd_aux);
+ const aux = @intToPtr(*elf.Verdaux, @ptrToInt(def) + def.vd_aux);
return mem.eql(u8, vername, cstr.toSliceConst(&strings[aux.vda_name]));
}
diff --git a/std/os/linux/x86_64.zig b/std/os/linux/x86_64.zig
index b43a642038f1..9a90e6475706 100644
--- a/std/os/linux/x86_64.zig
+++ b/std/os/linux/x86_64.zig
@@ -463,7 +463,7 @@ pub fn syscall6(
}
/// This matches the libc clone function.
-pub extern fn clone(func: extern fn (arg: usize) u8, stack: usize, flags: usize, arg: usize, ptid: &i32, tls: usize, ctid: &i32) usize;
+pub extern fn clone(func: extern fn (arg: usize) u8, stack: usize, flags: usize, arg: usize, ptid: *i32, tls: usize, ctid: *i32) usize;
pub nakedcc fn restore_rt() void {
return asm volatile ("syscall"
@@ -474,12 +474,12 @@ pub nakedcc fn restore_rt() void {
}
pub const msghdr = extern struct {
- msg_name: &u8,
+ msg_name: *u8,
msg_namelen: socklen_t,
- msg_iov: &iovec,
+ msg_iov: *iovec,
msg_iovlen: i32,
__pad1: i32,
- msg_control: &u8,
+ msg_control: *u8,
msg_controllen: socklen_t,
__pad2: socklen_t,
msg_flags: i32,
diff --git a/std/os/path.zig b/std/os/path.zig
index 162faffc42e8..4df6179bf5e9 100644
--- a/std/os/path.zig
+++ b/std/os/path.zig
@@ -32,7 +32,7 @@ pub fn isSep(byte: u8) bool {
/// Naively combines a series of paths with the native path seperator.
/// Allocates memory for the result, which must be freed by the caller.
-pub fn join(allocator: &Allocator, paths: ...) ![]u8 {
+pub fn join(allocator: *Allocator, paths: ...) ![]u8 {
if (is_windows) {
return joinWindows(allocator, paths);
} else {
@@ -40,11 +40,11 @@ pub fn join(allocator: &Allocator, paths: ...) ![]u8 {
}
}
-pub fn joinWindows(allocator: &Allocator, paths: ...) ![]u8 {
+pub fn joinWindows(allocator: *Allocator, paths: ...) ![]u8 {
return mem.join(allocator, sep_windows, paths);
}
-pub fn joinPosix(allocator: &Allocator, paths: ...) ![]u8 {
+pub fn joinPosix(allocator: *Allocator, paths: ...) ![]u8 {
return mem.join(allocator, sep_posix, paths);
}
@@ -310,7 +310,7 @@ fn asciiEqlIgnoreCase(s1: []const u8, s2: []const u8) bool {
}
/// Converts the command line arguments into a slice and calls `resolveSlice`.
-pub fn resolve(allocator: &Allocator, args: ...) ![]u8 {
+pub fn resolve(allocator: *Allocator, args: ...) ![]u8 {
var paths: [args.len][]const u8 = undefined;
comptime var arg_i = 0;
inline while (arg_i < args.len) : (arg_i += 1) {
@@ -320,7 +320,7 @@ pub fn resolve(allocator: &Allocator, args: ...) ![]u8 {
}
/// On Windows, this calls `resolveWindows` and on POSIX it calls `resolvePosix`.
-pub fn resolveSlice(allocator: &Allocator, paths: []const []const u8) ![]u8 {
+pub fn resolveSlice(allocator: *Allocator, paths: []const []const u8) ![]u8 {
if (is_windows) {
return resolveWindows(allocator, paths);
} else {
@@ -334,7 +334,7 @@ pub fn resolveSlice(allocator: &Allocator, paths: []const []const u8) ![]u8 {
/// If all paths are relative it uses the current working directory as a starting point.
/// Each drive has its own current working directory.
/// Path separators are canonicalized to '\\' and drives are canonicalized to capital letters.
-pub fn resolveWindows(allocator: &Allocator, paths: []const []const u8) ![]u8 {
+pub fn resolveWindows(allocator: *Allocator, paths: []const []const u8) ![]u8 {
if (paths.len == 0) {
assert(is_windows); // resolveWindows called on non windows can't use getCwd
return os.getCwd(allocator);
@@ -513,7 +513,7 @@ pub fn resolveWindows(allocator: &Allocator, paths: []const []const u8) ![]u8 {
/// It resolves "." and "..".
/// The result does not have a trailing path separator.
/// If all paths are relative it uses the current working directory as a starting point.
-pub fn resolvePosix(allocator: &Allocator, paths: []const []const u8) ![]u8 {
+pub fn resolvePosix(allocator: *Allocator, paths: []const []const u8) ![]u8 {
if (paths.len == 0) {
assert(!is_windows); // resolvePosix called on windows can't use getCwd
return os.getCwd(allocator);
@@ -883,7 +883,7 @@ fn testBasenameWindows(input: []const u8, expected_output: []const u8) void {
/// resolve to the same path (after calling `resolve` on each), a zero-length
/// string is returned.
/// On Windows this canonicalizes the drive to a capital letter and paths to `\\`.
-pub fn relative(allocator: &Allocator, from: []const u8, to: []const u8) ![]u8 {
+pub fn relative(allocator: *Allocator, from: []const u8, to: []const u8) ![]u8 {
if (is_windows) {
return relativeWindows(allocator, from, to);
} else {
@@ -891,7 +891,7 @@ pub fn relative(allocator: &Allocator, from: []const u8, to: []const u8) ![]u8 {
}
}
-pub fn relativeWindows(allocator: &Allocator, from: []const u8, to: []const u8) ![]u8 {
+pub fn relativeWindows(allocator: *Allocator, from: []const u8, to: []const u8) ![]u8 {
const resolved_from = try resolveWindows(allocator, [][]const u8{from});
defer allocator.free(resolved_from);
@@ -964,7 +964,7 @@ pub fn relativeWindows(allocator: &Allocator, from: []const u8, to: []const u8)
return []u8{};
}
-pub fn relativePosix(allocator: &Allocator, from: []const u8, to: []const u8) ![]u8 {
+pub fn relativePosix(allocator: *Allocator, from: []const u8, to: []const u8) ![]u8 {
const resolved_from = try resolvePosix(allocator, [][]const u8{from});
defer allocator.free(resolved_from);
@@ -1063,7 +1063,7 @@ fn testRelativeWindows(from: []const u8, to: []const u8, expected_output: []cons
/// Expands all symbolic links and resolves references to `.`, `..`, and
/// extra `/` characters in ::pathname.
/// Caller must deallocate result.
-pub fn real(allocator: &Allocator, pathname: []const u8) ![]u8 {
+pub fn real(allocator: *Allocator, pathname: []const u8) ![]u8 {
switch (builtin.os) {
Os.windows => {
const pathname_buf = try allocator.alloc(u8, pathname.len + 1);
diff --git a/std/os/test.zig b/std/os/test.zig
index 4dfe76224adc..4aa353582991 100644
--- a/std/os/test.zig
+++ b/std/os/test.zig
@@ -63,7 +63,7 @@ fn start1(ctx: void) u8 {
return 0;
}
-fn start2(ctx: &i32) u8 {
+fn start2(ctx: *i32) u8 {
_ = @atomicRmw(i32, ctx, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
return 0;
}
diff --git a/std/os/time.zig b/std/os/time.zig
index 9a7c682483b5..8629504323ec 100644
--- a/std/os/time.zig
+++ b/std/os/time.zig
@@ -200,7 +200,7 @@ pub const Timer = struct {
}
/// Reads the timer value since start or the last reset in nanoseconds
- pub fn read(self: &Timer) u64 {
+ pub fn read(self: *Timer) u64 {
var clock = clockNative() - self.start_time;
return switch (builtin.os) {
Os.windows => @divFloor(clock * ns_per_s, self.frequency),
@@ -211,12 +211,12 @@ pub const Timer = struct {
}
/// Resets the timer value to 0/now.
- pub fn reset(self: &Timer) void {
+ pub fn reset(self: *Timer) void {
self.start_time = clockNative();
}
/// Returns the current value of the timer in nanoseconds, then resets it
- pub fn lap(self: &Timer) u64 {
+ pub fn lap(self: *Timer) u64 {
var now = clockNative();
var lap_time = self.read();
self.start_time = now;
diff --git a/std/os/windows/index.zig b/std/os/windows/index.zig
index 264ea391c42c..85f69836d503 100644
--- a/std/os/windows/index.zig
+++ b/std/os/windows/index.zig
@@ -1,7 +1,7 @@
pub const ERROR = @import("error.zig");
pub extern "advapi32" stdcallcc fn CryptAcquireContextA(
- phProv: &HCRYPTPROV,
+ phProv: *HCRYPTPROV,
pszContainer: ?LPCSTR,
pszProvider: ?LPCSTR,
dwProvType: DWORD,
@@ -10,13 +10,13 @@ pub extern "advapi32" stdcallcc fn CryptAcquireContextA(
pub extern "advapi32" stdcallcc fn CryptReleaseContext(hProv: HCRYPTPROV, dwFlags: DWORD) BOOL;
-pub extern "advapi32" stdcallcc fn CryptGenRandom(hProv: HCRYPTPROV, dwLen: DWORD, pbBuffer: &BYTE) BOOL;
+pub extern "advapi32" stdcallcc fn CryptGenRandom(hProv: HCRYPTPROV, dwLen: DWORD, pbBuffer: *BYTE) BOOL;
pub extern "kernel32" stdcallcc fn CloseHandle(hObject: HANDLE) BOOL;
pub extern "kernel32" stdcallcc fn CreateDirectoryA(
lpPathName: LPCSTR,
- lpSecurityAttributes: ?&SECURITY_ATTRIBUTES,
+ lpSecurityAttributes: ?*SECURITY_ATTRIBUTES,
) BOOL;
pub extern "kernel32" stdcallcc fn CreateFileA(
@@ -30,23 +30,23 @@ pub extern "kernel32" stdcallcc fn CreateFileA(
) HANDLE;
pub extern "kernel32" stdcallcc fn CreatePipe(
- hReadPipe: &HANDLE,
- hWritePipe: &HANDLE,
- lpPipeAttributes: &const SECURITY_ATTRIBUTES,
+ hReadPipe: *HANDLE,
+ hWritePipe: *HANDLE,
+ lpPipeAttributes: *const SECURITY_ATTRIBUTES,
nSize: DWORD,
) BOOL;
pub extern "kernel32" stdcallcc fn CreateProcessA(
lpApplicationName: ?LPCSTR,
lpCommandLine: LPSTR,
- lpProcessAttributes: ?&SECURITY_ATTRIBUTES,
- lpThreadAttributes: ?&SECURITY_ATTRIBUTES,
+ lpProcessAttributes: ?*SECURITY_ATTRIBUTES,
+ lpThreadAttributes: ?*SECURITY_ATTRIBUTES,
bInheritHandles: BOOL,
dwCreationFlags: DWORD,
- lpEnvironment: ?&c_void,
+ lpEnvironment: ?*c_void,
lpCurrentDirectory: ?LPCSTR,
- lpStartupInfo: &STARTUPINFOA,
- lpProcessInformation: &PROCESS_INFORMATION,
+ lpStartupInfo: *STARTUPINFOA,
+ lpProcessInformation: *PROCESS_INFORMATION,
) BOOL;
pub extern "kernel32" stdcallcc fn CreateSymbolicLinkA(
@@ -65,7 +65,7 @@ pub extern "kernel32" stdcallcc fn FreeEnvironmentStringsA(penv: LPCH) BOOL;
pub extern "kernel32" stdcallcc fn GetCommandLineA() LPSTR;
-pub extern "kernel32" stdcallcc fn GetConsoleMode(in_hConsoleHandle: HANDLE, out_lpMode: &DWORD) BOOL;
+pub extern "kernel32" stdcallcc fn GetConsoleMode(in_hConsoleHandle: HANDLE, out_lpMode: *DWORD) BOOL;
pub extern "kernel32" stdcallcc fn GetCurrentDirectoryA(nBufferLength: WORD, lpBuffer: ?LPSTR) DWORD;
@@ -73,9 +73,9 @@ pub extern "kernel32" stdcallcc fn GetEnvironmentStringsA() ?LPCH;
pub extern "kernel32" stdcallcc fn GetEnvironmentVariableA(lpName: LPCSTR, lpBuffer: LPSTR, nSize: DWORD) DWORD;
-pub extern "kernel32" stdcallcc fn GetExitCodeProcess(hProcess: HANDLE, lpExitCode: &DWORD) BOOL;
+pub extern "kernel32" stdcallcc fn GetExitCodeProcess(hProcess: HANDLE, lpExitCode: *DWORD) BOOL;
-pub extern "kernel32" stdcallcc fn GetFileSizeEx(hFile: HANDLE, lpFileSize: &LARGE_INTEGER) BOOL;
+pub extern "kernel32" stdcallcc fn GetFileSizeEx(hFile: HANDLE, lpFileSize: *LARGE_INTEGER) BOOL;
pub extern "kernel32" stdcallcc fn GetModuleFileNameA(hModule: ?HMODULE, lpFilename: LPSTR, nSize: DWORD) DWORD;
@@ -84,7 +84,7 @@ pub extern "kernel32" stdcallcc fn GetLastError() DWORD;
pub extern "kernel32" stdcallcc fn GetFileInformationByHandleEx(
in_hFile: HANDLE,
in_FileInformationClass: FILE_INFO_BY_HANDLE_CLASS,
- out_lpFileInformation: &c_void,
+ out_lpFileInformation: *c_void,
in_dwBufferSize: DWORD,
) BOOL;
@@ -97,21 +97,21 @@ pub extern "kernel32" stdcallcc fn GetFinalPathNameByHandleA(
pub extern "kernel32" stdcallcc fn GetProcessHeap() ?HANDLE;
-pub extern "kernel32" stdcallcc fn GetSystemTimeAsFileTime(?&FILETIME) void;
+pub extern "kernel32" stdcallcc fn GetSystemTimeAsFileTime(?*FILETIME) void;
pub extern "kernel32" stdcallcc fn HeapCreate(flOptions: DWORD, dwInitialSize: SIZE_T, dwMaximumSize: SIZE_T) ?HANDLE;
pub extern "kernel32" stdcallcc fn HeapDestroy(hHeap: HANDLE) BOOL;
-pub extern "kernel32" stdcallcc fn HeapReAlloc(hHeap: HANDLE, dwFlags: DWORD, lpMem: &c_void, dwBytes: SIZE_T) ?&c_void;
-pub extern "kernel32" stdcallcc fn HeapSize(hHeap: HANDLE, dwFlags: DWORD, lpMem: &const c_void) SIZE_T;
-pub extern "kernel32" stdcallcc fn HeapValidate(hHeap: HANDLE, dwFlags: DWORD, lpMem: &const c_void) BOOL;
+pub extern "kernel32" stdcallcc fn HeapReAlloc(hHeap: HANDLE, dwFlags: DWORD, lpMem: *c_void, dwBytes: SIZE_T) ?*c_void;
+pub extern "kernel32" stdcallcc fn HeapSize(hHeap: HANDLE, dwFlags: DWORD, lpMem: *const c_void) SIZE_T;
+pub extern "kernel32" stdcallcc fn HeapValidate(hHeap: HANDLE, dwFlags: DWORD, lpMem: *const c_void) BOOL;
pub extern "kernel32" stdcallcc fn HeapCompact(hHeap: HANDLE, dwFlags: DWORD) SIZE_T;
pub extern "kernel32" stdcallcc fn HeapSummary(hHeap: HANDLE, dwFlags: DWORD, lpSummary: LPHEAP_SUMMARY) BOOL;
pub extern "kernel32" stdcallcc fn GetStdHandle(in_nStdHandle: DWORD) ?HANDLE;
-pub extern "kernel32" stdcallcc fn HeapAlloc(hHeap: HANDLE, dwFlags: DWORD, dwBytes: SIZE_T) ?&c_void;
+pub extern "kernel32" stdcallcc fn HeapAlloc(hHeap: HANDLE, dwFlags: DWORD, dwBytes: SIZE_T) ?*c_void;
-pub extern "kernel32" stdcallcc fn HeapFree(hHeap: HANDLE, dwFlags: DWORD, lpMem: &c_void) BOOL;
+pub extern "kernel32" stdcallcc fn HeapFree(hHeap: HANDLE, dwFlags: DWORD, lpMem: *c_void) BOOL;
pub extern "kernel32" stdcallcc fn MoveFileExA(
lpExistingFileName: LPCSTR,
@@ -119,24 +119,24 @@ pub extern "kernel32" stdcallcc fn MoveFileExA(
dwFlags: DWORD,
) BOOL;
-pub extern "kernel32" stdcallcc fn QueryPerformanceCounter(lpPerformanceCount: &LARGE_INTEGER) BOOL;
+pub extern "kernel32" stdcallcc fn QueryPerformanceCounter(lpPerformanceCount: *LARGE_INTEGER) BOOL;
-pub extern "kernel32" stdcallcc fn QueryPerformanceFrequency(lpFrequency: &LARGE_INTEGER) BOOL;
+pub extern "kernel32" stdcallcc fn QueryPerformanceFrequency(lpFrequency: *LARGE_INTEGER) BOOL;
pub extern "kernel32" stdcallcc fn PathFileExists(pszPath: ?LPCTSTR) BOOL;
pub extern "kernel32" stdcallcc fn ReadFile(
in_hFile: HANDLE,
- out_lpBuffer: &c_void,
+ out_lpBuffer: *c_void,
in_nNumberOfBytesToRead: DWORD,
- out_lpNumberOfBytesRead: &DWORD,
- in_out_lpOverlapped: ?&OVERLAPPED,
+ out_lpNumberOfBytesRead: *DWORD,
+ in_out_lpOverlapped: ?*OVERLAPPED,
) BOOL;
pub extern "kernel32" stdcallcc fn SetFilePointerEx(
in_fFile: HANDLE,
in_liDistanceToMove: LARGE_INTEGER,
- out_opt_ldNewFilePointer: ?&LARGE_INTEGER,
+ out_opt_ldNewFilePointer: ?*LARGE_INTEGER,
in_dwMoveMethod: DWORD,
) BOOL;
@@ -150,10 +150,10 @@ pub extern "kernel32" stdcallcc fn WaitForSingleObject(hHandle: HANDLE, dwMillis
pub extern "kernel32" stdcallcc fn WriteFile(
in_hFile: HANDLE,
- in_lpBuffer: &const c_void,
+ in_lpBuffer: *const c_void,
in_nNumberOfBytesToWrite: DWORD,
- out_lpNumberOfBytesWritten: ?&DWORD,
- in_out_lpOverlapped: ?&OVERLAPPED,
+ out_lpNumberOfBytesWritten: ?*DWORD,
+ in_out_lpOverlapped: ?*OVERLAPPED,
) BOOL;
//TODO: call unicode versions instead of relying on ANSI code page
@@ -171,23 +171,23 @@ pub const BYTE = u8;
pub const CHAR = u8;
pub const DWORD = u32;
pub const FLOAT = f32;
-pub const HANDLE = &c_void;
+pub const HANDLE = *c_void;
pub const HCRYPTPROV = ULONG_PTR;
-pub const HINSTANCE = &@OpaqueType();
-pub const HMODULE = &@OpaqueType();
+pub const HINSTANCE = *@OpaqueType();
+pub const HMODULE = *@OpaqueType();
pub const INT = c_int;
-pub const LPBYTE = &BYTE;
-pub const LPCH = &CHAR;
-pub const LPCSTR = &const CHAR;
-pub const LPCTSTR = &const TCHAR;
-pub const LPCVOID = &const c_void;
-pub const LPDWORD = &DWORD;
-pub const LPSTR = &CHAR;
+pub const LPBYTE = *BYTE;
+pub const LPCH = *CHAR;
+pub const LPCSTR = *const CHAR;
+pub const LPCTSTR = *const TCHAR;
+pub const LPCVOID = *const c_void;
+pub const LPDWORD = *DWORD;
+pub const LPSTR = *CHAR;
pub const LPTSTR = if (UNICODE) LPWSTR else LPSTR;
-pub const LPVOID = &c_void;
-pub const LPWSTR = &WCHAR;
-pub const PVOID = &c_void;
-pub const PWSTR = &WCHAR;
+pub const LPVOID = *c_void;
+pub const LPWSTR = *WCHAR;
+pub const PVOID = *c_void;
+pub const PWSTR = *WCHAR;
pub const SIZE_T = usize;
pub const TCHAR = if (UNICODE) WCHAR else u8;
pub const UINT = c_uint;
@@ -218,7 +218,7 @@ pub const OVERLAPPED = extern struct {
Pointer: PVOID,
hEvent: HANDLE,
};
-pub const LPOVERLAPPED = &OVERLAPPED;
+pub const LPOVERLAPPED = *OVERLAPPED;
pub const MAX_PATH = 260;
@@ -271,11 +271,11 @@ pub const VOLUME_NAME_NT = 0x2;
pub const SECURITY_ATTRIBUTES = extern struct {
nLength: DWORD,
- lpSecurityDescriptor: ?&c_void,
+ lpSecurityDescriptor: ?*c_void,
bInheritHandle: BOOL,
};
-pub const PSECURITY_ATTRIBUTES = &SECURITY_ATTRIBUTES;
-pub const LPSECURITY_ATTRIBUTES = &SECURITY_ATTRIBUTES;
+pub const PSECURITY_ATTRIBUTES = *SECURITY_ATTRIBUTES;
+pub const LPSECURITY_ATTRIBUTES = *SECURITY_ATTRIBUTES;
pub const GENERIC_READ = 0x80000000;
pub const GENERIC_WRITE = 0x40000000;
diff --git a/std/os/windows/util.zig b/std/os/windows/util.zig
index 2bd8a157e49a..7170346108f8 100644
--- a/std/os/windows/util.zig
+++ b/std/os/windows/util.zig
@@ -42,7 +42,7 @@ pub const WriteError = error{
};
pub fn windowsWrite(handle: windows.HANDLE, bytes: []const u8) WriteError!void {
- if (windows.WriteFile(handle, @ptrCast(&const c_void, bytes.ptr), u32(bytes.len), null, null) == 0) {
+ if (windows.WriteFile(handle, @ptrCast(*const c_void, bytes.ptr), u32(bytes.len), null, null) == 0) {
const err = windows.GetLastError();
return switch (err) {
windows.ERROR.INVALID_USER_BUFFER => WriteError.SystemResources,
@@ -68,11 +68,11 @@ pub fn windowsIsCygwinPty(handle: windows.HANDLE) bool {
const size = @sizeOf(windows.FILE_NAME_INFO);
var name_info_bytes align(@alignOf(windows.FILE_NAME_INFO)) = []u8{0} ** (size + windows.MAX_PATH);
- if (windows.GetFileInformationByHandleEx(handle, windows.FileNameInfo, @ptrCast(&c_void, &name_info_bytes[0]), u32(name_info_bytes.len)) == 0) {
+ if (windows.GetFileInformationByHandleEx(handle, windows.FileNameInfo, @ptrCast(*c_void, &name_info_bytes[0]), u32(name_info_bytes.len)) == 0) {
return true;
}
- const name_info = @ptrCast(&const windows.FILE_NAME_INFO, &name_info_bytes[0]);
+ const name_info = @ptrCast(*const windows.FILE_NAME_INFO, &name_info_bytes[0]);
const name_bytes = name_info_bytes[size .. size + usize(name_info.FileNameLength)];
const name_wide = ([]u16)(name_bytes);
return mem.indexOf(u16, name_wide, []u16{ 'm', 's', 'y', 's', '-' }) != null or
@@ -91,7 +91,7 @@ pub const OpenError = error{
/// `file_path` needs to be copied in memory to add a null terminating byte, hence the allocator.
pub fn windowsOpen(
- allocator: &mem.Allocator,
+ allocator: *mem.Allocator,
file_path: []const u8,
desired_access: windows.DWORD,
share_mode: windows.DWORD,
@@ -119,7 +119,7 @@ pub fn windowsOpen(
}
/// Caller must free result.
-pub fn createWindowsEnvBlock(allocator: &mem.Allocator, env_map: &const BufMap) ![]u8 {
+pub fn createWindowsEnvBlock(allocator: *mem.Allocator, env_map: *const BufMap) ![]u8 {
// count bytes needed
const bytes_needed = x: {
var bytes_needed: usize = 1; // 1 for the final null byte
@@ -150,7 +150,7 @@ pub fn createWindowsEnvBlock(allocator: &mem.Allocator, env_map: &const BufMap)
return result;
}
-pub fn windowsLoadDll(allocator: &mem.Allocator, dll_path: []const u8) !windows.HMODULE {
+pub fn windowsLoadDll(allocator: *mem.Allocator, dll_path: []const u8) !windows.HMODULE {
const padded_buff = try cstr.addNullByte(allocator, dll_path);
defer allocator.free(padded_buff);
return windows.LoadLibraryA(padded_buff.ptr) ?? error.DllNotFound;
diff --git a/std/os/zen.zig b/std/os/zen.zig
index 2411c5363ee9..2312b36deac6 100644
--- a/std/os/zen.zig
+++ b/std/os/zen.zig
@@ -8,7 +8,7 @@ pub const Message = struct {
type: usize,
payload: usize,
- pub fn from(mailbox_id: &const MailboxId) Message {
+ pub fn from(mailbox_id: *const MailboxId) Message {
return Message{
.sender = MailboxId.Undefined,
.receiver = *mailbox_id,
@@ -17,7 +17,7 @@ pub const Message = struct {
};
}
- pub fn to(mailbox_id: &const MailboxId, msg_type: usize) Message {
+ pub fn to(mailbox_id: *const MailboxId, msg_type: usize) Message {
return Message{
.sender = MailboxId.This,
.receiver = *mailbox_id,
@@ -26,7 +26,7 @@ pub const Message = struct {
};
}
- pub fn withData(mailbox_id: &const MailboxId, msg_type: usize, payload: usize) Message {
+ pub fn withData(mailbox_id: *const MailboxId, msg_type: usize, payload: usize) Message {
return Message{
.sender = MailboxId.This,
.receiver = *mailbox_id,
@@ -67,7 +67,7 @@ pub const getErrno = @import("linux/index.zig").getErrno;
use @import("linux/errno.zig");
// TODO: implement this correctly.
-pub fn read(fd: i32, buf: &u8, count: usize) usize {
+pub fn read(fd: i32, buf: *u8, count: usize) usize {
switch (fd) {
STDIN_FILENO => {
var i: usize = 0;
@@ -75,7 +75,7 @@ pub fn read(fd: i32, buf: &u8, count: usize) usize {
send(Message.to(Server.Keyboard, 0));
var message = Message.from(MailboxId.This);
- receive(&message);
+ receive(*message);
buf[i] = u8(message.payload);
}
@@ -86,7 +86,7 @@ pub fn read(fd: i32, buf: &u8, count: usize) usize {
}
// TODO: implement this correctly.
-pub fn write(fd: i32, buf: &const u8, count: usize) usize {
+pub fn write(fd: i32, buf: *const u8, count: usize) usize {
switch (fd) {
STDOUT_FILENO, STDERR_FILENO => {
var i: usize = 0;
@@ -126,22 +126,22 @@ pub fn exit(status: i32) noreturn {
unreachable;
}
-pub fn createPort(mailbox_id: &const MailboxId) void {
+pub fn createPort(mailbox_id: *const MailboxId) void {
_ = switch (*mailbox_id) {
MailboxId.Port => |id| syscall1(Syscall.createPort, id),
else => unreachable,
};
}
-pub fn send(message: &const Message) void {
+pub fn send(message: *const Message) void {
_ = syscall1(Syscall.send, @ptrToInt(message));
}
-pub fn receive(destination: &Message) void {
+pub fn receive(destination: *Message) void {
_ = syscall1(Syscall.receive, @ptrToInt(destination));
}
-pub fn subscribeIRQ(irq: u8, mailbox_id: &const MailboxId) void {
+pub fn subscribeIRQ(irq: u8, mailbox_id: *const MailboxId) void {
_ = syscall2(Syscall.subscribeIRQ, irq, @ptrToInt(mailbox_id));
}
diff --git a/std/rand/index.zig b/std/rand/index.zig
index c32309a0fdcb..3a1a559cd92e 100644
--- a/std/rand/index.zig
+++ b/std/rand/index.zig
@@ -28,15 +28,15 @@ pub const DefaultPrng = Xoroshiro128;
pub const DefaultCsprng = Isaac64;
pub const Random = struct {
- fillFn: fn (r: &Random, buf: []u8) void,
+ fillFn: fn (r: *Random, buf: []u8) void,
/// Read random bytes into the specified buffer until fill.
- pub fn bytes(r: &Random, buf: []u8) void {
+ pub fn bytes(r: *Random, buf: []u8) void {
r.fillFn(r, buf);
}
/// Return a random integer/boolean type.
- pub fn scalar(r: &Random, comptime T: type) T {
+ pub fn scalar(r: *Random, comptime T: type) T {
var rand_bytes: [@sizeOf(T)]u8 = undefined;
r.bytes(rand_bytes[0..]);
@@ -50,7 +50,7 @@ pub const Random = struct {
/// Get a random unsigned integer with even distribution between `start`
/// inclusive and `end` exclusive.
- pub fn range(r: &Random, comptime T: type, start: T, end: T) T {
+ pub fn range(r: *Random, comptime T: type, start: T, end: T) T {
assert(start <= end);
if (T.is_signed) {
const uint = @IntType(false, T.bit_count);
@@ -92,7 +92,7 @@ pub const Random = struct {
}
/// Return a floating point value evenly distributed in the range [0, 1).
- pub fn float(r: &Random, comptime T: type) T {
+ pub fn float(r: *Random, comptime T: type) T {
// Generate a uniform value between [1, 2) and scale down to [0, 1).
// Note: The lowest mantissa bit is always set to 0 so we only use half the available range.
switch (T) {
@@ -113,7 +113,7 @@ pub const Random = struct {
/// Return a floating point value normally distributed with mean = 0, stddev = 1.
///
/// To use different parameters, use: floatNorm(...) * desiredStddev + desiredMean.
- pub fn floatNorm(r: &Random, comptime T: type) T {
+ pub fn floatNorm(r: *Random, comptime T: type) T {
const value = ziggurat.next_f64(r, ziggurat.NormDist);
switch (T) {
f32 => return f32(value),
@@ -125,7 +125,7 @@ pub const Random = struct {
/// Return an exponentially distributed float with a rate parameter of 1.
///
/// To use a different rate parameter, use: floatExp(...) / desiredRate.
- pub fn floatExp(r: &Random, comptime T: type) T {
+ pub fn floatExp(r: *Random, comptime T: type) T {
const value = ziggurat.next_f64(r, ziggurat.ExpDist);
switch (T) {
f32 => return f32(value),
@@ -135,7 +135,7 @@ pub const Random = struct {
}
/// Shuffle a slice into a random order.
- pub fn shuffle(r: &Random, comptime T: type, buf: []T) void {
+ pub fn shuffle(r: *Random, comptime T: type, buf: []T) void {
if (buf.len < 2) {
return;
}
@@ -159,7 +159,7 @@ const SplitMix64 = struct {
return SplitMix64{ .s = seed };
}
- pub fn next(self: &SplitMix64) u64 {
+ pub fn next(self: *SplitMix64) u64 {
self.s +%= 0x9e3779b97f4a7c15;
var z = self.s;
@@ -208,7 +208,7 @@ pub const Pcg = struct {
return pcg;
}
- fn next(self: &Pcg) u32 {
+ fn next(self: *Pcg) u32 {
const l = self.s;
self.s = l *% default_multiplier +% (self.i | 1);
@@ -218,13 +218,13 @@ pub const Pcg = struct {
return (xor_s >> u5(rot)) | (xor_s << u5((0 -% rot) & 31));
}
- fn seed(self: &Pcg, init_s: u64) void {
+ fn seed(self: *Pcg, init_s: u64) void {
// Pcg requires 128-bits of seed.
var gen = SplitMix64.init(init_s);
self.seedTwo(gen.next(), gen.next());
}
- fn seedTwo(self: &Pcg, init_s: u64, init_i: u64) void {
+ fn seedTwo(self: *Pcg, init_s: u64, init_i: u64) void {
self.s = 0;
self.i = (init_s << 1) | 1;
self.s = self.s *% default_multiplier +% self.i;
@@ -232,7 +232,7 @@ pub const Pcg = struct {
self.s = self.s *% default_multiplier +% self.i;
}
- fn fill(r: &Random, buf: []u8) void {
+ fn fill(r: *Random, buf: []u8) void {
const self = @fieldParentPtr(Pcg, "random", r);
var i: usize = 0;
@@ -297,7 +297,7 @@ pub const Xoroshiro128 = struct {
return x;
}
- fn next(self: &Xoroshiro128) u64 {
+ fn next(self: *Xoroshiro128) u64 {
const s0 = self.s[0];
var s1 = self.s[1];
const r = s0 +% s1;
@@ -310,7 +310,7 @@ pub const Xoroshiro128 = struct {
}
// Skip 2^64 places ahead in the sequence
- fn jump(self: &Xoroshiro128) void {
+ fn jump(self: *Xoroshiro128) void {
var s0: u64 = 0;
var s1: u64 = 0;
@@ -334,7 +334,7 @@ pub const Xoroshiro128 = struct {
self.s[1] = s1;
}
- fn seed(self: &Xoroshiro128, init_s: u64) void {
+ fn seed(self: *Xoroshiro128, init_s: u64) void {
// Xoroshiro requires 128-bits of seed.
var gen = SplitMix64.init(init_s);
@@ -342,7 +342,7 @@ pub const Xoroshiro128 = struct {
self.s[1] = gen.next();
}
- fn fill(r: &Random, buf: []u8) void {
+ fn fill(r: *Random, buf: []u8) void {
const self = @fieldParentPtr(Xoroshiro128, "random", r);
var i: usize = 0;
@@ -435,7 +435,7 @@ pub const Isaac64 = struct {
return isaac;
}
- fn step(self: &Isaac64, mix: u64, base: usize, comptime m1: usize, comptime m2: usize) void {
+ fn step(self: *Isaac64, mix: u64, base: usize, comptime m1: usize, comptime m2: usize) void {
const x = self.m[base + m1];
self.a = mix +% self.m[base + m2];
@@ -446,7 +446,7 @@ pub const Isaac64 = struct {
self.r[self.r.len - 1 - base - m1] = self.b;
}
- fn refill(self: &Isaac64) void {
+ fn refill(self: *Isaac64) void {
const midpoint = self.r.len / 2;
self.c +%= 1;
@@ -475,7 +475,7 @@ pub const Isaac64 = struct {
self.i = 0;
}
- fn next(self: &Isaac64) u64 {
+ fn next(self: *Isaac64) u64 {
if (self.i >= self.r.len) {
self.refill();
}
@@ -485,7 +485,7 @@ pub const Isaac64 = struct {
return value;
}
- fn seed(self: &Isaac64, init_s: u64, comptime rounds: usize) void {
+ fn seed(self: *Isaac64, init_s: u64, comptime rounds: usize) void {
// We ignore the multi-pass requirement since we don't currently expose full access to
// seeding the self.m array completely.
mem.set(u64, self.m[0..], 0);
@@ -551,7 +551,7 @@ pub const Isaac64 = struct {
self.i = self.r.len; // trigger refill on first value
}
- fn fill(r: &Random, buf: []u8) void {
+ fn fill(r: *Random, buf: []u8) void {
const self = @fieldParentPtr(Isaac64, "random", r);
var i: usize = 0;
@@ -666,7 +666,7 @@ test "Random range" {
testRange(&prng.random, 10, 14);
}
-fn testRange(r: &Random, start: i32, end: i32) void {
+fn testRange(r: *Random, start: i32, end: i32) void {
const count = usize(end - start);
var values_buffer = []bool{false} ** 20;
const values = values_buffer[0..count];
diff --git a/std/rand/ziggurat.zig b/std/rand/ziggurat.zig
index 7daeb59165a3..774d3bd52a06 100644
--- a/std/rand/ziggurat.zig
+++ b/std/rand/ziggurat.zig
@@ -12,7 +12,7 @@ const std = @import("../index.zig");
const math = std.math;
const Random = std.rand.Random;
-pub fn next_f64(random: &Random, comptime tables: &const ZigTable) f64 {
+pub fn next_f64(random: *Random, comptime tables: *const ZigTable) f64 {
while (true) {
// We manually construct a float from parts as we can avoid an extra random lookup here by
// using the unused exponent for the lookup table entry.
@@ -60,7 +60,7 @@ pub const ZigTable = struct {
// whether the distribution is symmetric
is_symmetric: bool,
// fallback calculation in the case we are in the 0 block
- zero_case: fn (&Random, f64) f64,
+ zero_case: fn (*Random, f64) f64,
};
// zigNorInit
@@ -70,7 +70,7 @@ fn ZigTableGen(
comptime v: f64,
comptime f: fn (f64) f64,
comptime f_inv: fn (f64) f64,
- comptime zero_case: fn (&Random, f64) f64,
+ comptime zero_case: fn (*Random, f64) f64,
) ZigTable {
var tables: ZigTable = undefined;
@@ -110,7 +110,7 @@ fn norm_f(x: f64) f64 {
fn norm_f_inv(y: f64) f64 {
return math.sqrt(-2.0 * math.ln(y));
}
-fn norm_zero_case(random: &Random, u: f64) f64 {
+fn norm_zero_case(random: *Random, u: f64) f64 {
var x: f64 = 1;
var y: f64 = 0;
@@ -149,7 +149,7 @@ fn exp_f(x: f64) f64 {
fn exp_f_inv(y: f64) f64 {
return -math.ln(y);
}
-fn exp_zero_case(random: &Random, _: f64) f64 {
+fn exp_zero_case(random: *Random, _: f64) f64 {
return exp_r - math.ln(random.float(f64));
}
diff --git a/std/segmented_list.zig b/std/segmented_list.zig
index d755135fe8c5..be9a2071a0e8 100644
--- a/std/segmented_list.zig
+++ b/std/segmented_list.zig
@@ -87,49 +87,49 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type
const ShelfIndex = std.math.Log2Int(usize);
prealloc_segment: [prealloc_item_count]T,
- dynamic_segments: []&T,
- allocator: &Allocator,
+ dynamic_segments: []*T,
+ allocator: *Allocator,
len: usize,
pub const prealloc_count = prealloc_item_count;
/// Deinitialize with `deinit`
- pub fn init(allocator: &Allocator) Self {
+ pub fn init(allocator: *Allocator) Self {
return Self{
.allocator = allocator,
.len = 0,
.prealloc_segment = undefined,
- .dynamic_segments = []&T{},
+ .dynamic_segments = []*T{},
};
}
- pub fn deinit(self: &Self) void {
+ pub fn deinit(self: *Self) void {
self.freeShelves(ShelfIndex(self.dynamic_segments.len), 0);
self.allocator.free(self.dynamic_segments);
self.* = undefined;
}
- pub fn at(self: &Self, i: usize) &T {
+ pub fn at(self: *Self, i: usize) *T {
assert(i < self.len);
return self.uncheckedAt(i);
}
- pub fn count(self: &const Self) usize {
+ pub fn count(self: *const Self) usize {
return self.len;
}
- pub fn push(self: &Self, item: &const T) !void {
+ pub fn push(self: *Self, item: *const T) !void {
const new_item_ptr = try self.addOne();
new_item_ptr.* = item.*;
}
- pub fn pushMany(self: &Self, items: []const T) !void {
+ pub fn pushMany(self: *Self, items: []const T) !void {
for (items) |item| {
try self.push(item);
}
}
- pub fn pop(self: &Self) ?T {
+ pub fn pop(self: *Self) ?T {
if (self.len == 0) return null;
const index = self.len - 1;
@@ -138,7 +138,7 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type
return result;
}
- pub fn addOne(self: &Self) !&T {
+ pub fn addOne(self: *Self) !*T {
const new_length = self.len + 1;
try self.growCapacity(new_length);
const result = self.uncheckedAt(self.len);
@@ -147,7 +147,7 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type
}
/// Grows or shrinks capacity to match usage.
- pub fn setCapacity(self: &Self, new_capacity: usize) !void {
+ pub fn setCapacity(self: *Self, new_capacity: usize) !void {
if (new_capacity <= usize(1) << (prealloc_exp + self.dynamic_segments.len)) {
return self.shrinkCapacity(new_capacity);
} else {
@@ -156,15 +156,15 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type
}
/// Only grows capacity, or retains current capacity
- pub fn growCapacity(self: &Self, new_capacity: usize) !void {
+ pub fn growCapacity(self: *Self, new_capacity: usize) !void {
const new_cap_shelf_count = shelfCount(new_capacity);
const old_shelf_count = ShelfIndex(self.dynamic_segments.len);
if (new_cap_shelf_count > old_shelf_count) {
- self.dynamic_segments = try self.allocator.realloc(&T, self.dynamic_segments, new_cap_shelf_count);
+ self.dynamic_segments = try self.allocator.realloc(*T, self.dynamic_segments, new_cap_shelf_count);
var i = old_shelf_count;
errdefer {
self.freeShelves(i, old_shelf_count);
- self.dynamic_segments = self.allocator.shrink(&T, self.dynamic_segments, old_shelf_count);
+ self.dynamic_segments = self.allocator.shrink(*T, self.dynamic_segments, old_shelf_count);
}
while (i < new_cap_shelf_count) : (i += 1) {
self.dynamic_segments[i] = (try self.allocator.alloc(T, shelfSize(i))).ptr;
@@ -173,12 +173,12 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type
}
/// Only shrinks capacity or retains current capacity
- pub fn shrinkCapacity(self: &Self, new_capacity: usize) void {
+ pub fn shrinkCapacity(self: *Self, new_capacity: usize) void {
if (new_capacity <= prealloc_item_count) {
const len = ShelfIndex(self.dynamic_segments.len);
self.freeShelves(len, 0);
self.allocator.free(self.dynamic_segments);
- self.dynamic_segments = []&T{};
+ self.dynamic_segments = []*T{};
return;
}
@@ -190,10 +190,10 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type
}
self.freeShelves(old_shelf_count, new_cap_shelf_count);
- self.dynamic_segments = self.allocator.shrink(&T, self.dynamic_segments, new_cap_shelf_count);
+ self.dynamic_segments = self.allocator.shrink(*T, self.dynamic_segments, new_cap_shelf_count);
}
- pub fn uncheckedAt(self: &Self, index: usize) &T {
+ pub fn uncheckedAt(self: *Self, index: usize) *T {
if (index < prealloc_item_count) {
return &self.prealloc_segment[index];
}
@@ -230,7 +230,7 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type
return list_index + prealloc_item_count - (usize(1) << ((prealloc_exp + 1) + shelf_index));
}
- fn freeShelves(self: &Self, from_count: ShelfIndex, to_count: ShelfIndex) void {
+ fn freeShelves(self: *Self, from_count: ShelfIndex, to_count: ShelfIndex) void {
var i = from_count;
while (i != to_count) {
i -= 1;
@@ -239,13 +239,13 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type
}
pub const Iterator = struct {
- list: &Self,
+ list: *Self,
index: usize,
box_index: usize,
shelf_index: ShelfIndex,
shelf_size: usize,
- pub fn next(it: &Iterator) ?&T {
+ pub fn next(it: *Iterator) ?*T {
if (it.index >= it.list.len) return null;
if (it.index < prealloc_item_count) {
const ptr = &it.list.prealloc_segment[it.index];
@@ -269,7 +269,7 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type
return ptr;
}
- pub fn prev(it: &Iterator) ?&T {
+ pub fn prev(it: *Iterator) ?*T {
if (it.index == 0) return null;
it.index -= 1;
@@ -286,7 +286,7 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type
return &it.list.dynamic_segments[it.shelf_index][it.box_index];
}
- pub fn peek(it: &Iterator) ?&T {
+ pub fn peek(it: *Iterator) ?*T {
if (it.index >= it.list.len)
return null;
if (it.index < prealloc_item_count)
@@ -295,7 +295,7 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type
return &it.list.dynamic_segments[it.shelf_index][it.box_index];
}
- pub fn set(it: &Iterator, index: usize) void {
+ pub fn set(it: *Iterator, index: usize) void {
it.index = index;
if (index < prealloc_item_count) return;
it.shelf_index = shelfIndex(index);
@@ -304,7 +304,7 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type
}
};
- pub fn iterator(self: &Self, start_index: usize) Iterator {
+ pub fn iterator(self: *Self, start_index: usize) Iterator {
var it = Iterator{
.list = self,
.index = undefined,
@@ -331,7 +331,7 @@ test "std.SegmentedList" {
try testSegmentedList(16, a);
}
-fn testSegmentedList(comptime prealloc: usize, allocator: &Allocator) !void {
+fn testSegmentedList(comptime prealloc: usize, allocator: *Allocator) !void {
var list = SegmentedList(i32, prealloc).init(allocator);
defer list.deinit();
diff --git a/std/sort.zig b/std/sort.zig
index 4e17718241bb..1b44c18dd9f6 100644
--- a/std/sort.zig
+++ b/std/sort.zig
@@ -5,7 +5,7 @@ const math = std.math;
const builtin = @import("builtin");
/// Stable in-place sort. O(n) best case, O(pow(n, 2)) worst case. O(1) memory (no allocator required).
-pub fn insertionSort(comptime T: type, items: []T, lessThan: fn (lhs: &const T, rhs: &const T) bool) void {
+pub fn insertionSort(comptime T: type, items: []T, lessThan: fn (lhs: *const T, rhs: *const T) bool) void {
{
var i: usize = 1;
while (i < items.len) : (i += 1) {
@@ -30,7 +30,7 @@ const Range = struct {
};
}
- fn length(self: &const Range) usize {
+ fn length(self: *const Range) usize {
return self.end - self.start;
}
};
@@ -58,12 +58,12 @@ const Iterator = struct {
};
}
- fn begin(self: &Iterator) void {
+ fn begin(self: *Iterator) void {
self.numerator = 0;
self.decimal = 0;
}
- fn nextRange(self: &Iterator) Range {
+ fn nextRange(self: *Iterator) Range {
const start = self.decimal;
self.decimal += self.decimal_step;
@@ -79,11 +79,11 @@ const Iterator = struct {
};
}
- fn finished(self: &Iterator) bool {
+ fn finished(self: *Iterator) bool {
return self.decimal >= self.size;
}
- fn nextLevel(self: &Iterator) bool {
+ fn nextLevel(self: *Iterator) bool {
self.decimal_step += self.decimal_step;
self.numerator_step += self.numerator_step;
if (self.numerator_step >= self.denominator) {
@@ -94,7 +94,7 @@ const Iterator = struct {
return (self.decimal_step < self.size);
}
- fn length(self: &Iterator) usize {
+ fn length(self: *Iterator) usize {
return self.decimal_step;
}
};
@@ -108,7 +108,7 @@ const Pull = struct {
/// Stable in-place sort. O(n) best case, O(n*log(n)) worst case and average case. O(1) memory (no allocator required).
/// Currently implemented as block sort.
-pub fn sort(comptime T: type, items: []T, lessThan: fn (lhs: &const T, rhs: &const T) bool) void {
+pub fn sort(comptime T: type, items: []T, lessThan: fn (lhs: *const T, rhs: *const T) bool) void {
// Implementation ported from https://github.com/BonzaiThePenguin/WikiSort/blob/master/WikiSort.c
var cache: [512]T = undefined;
@@ -741,7 +741,7 @@ pub fn sort(comptime T: type, items: []T, lessThan: fn (lhs: &const T, rhs: &con
}
// merge operation without a buffer
-fn mergeInPlace(comptime T: type, items: []T, A_arg: &const Range, B_arg: &const Range, lessThan: fn (&const T, &const T) bool) void {
+fn mergeInPlace(comptime T: type, items: []T, A_arg: *const Range, B_arg: *const Range, lessThan: fn (*const T, *const T) bool) void {
if (A_arg.length() == 0 or B_arg.length() == 0) return;
// this just repeatedly binary searches into B and rotates A into position.
@@ -783,7 +783,7 @@ fn mergeInPlace(comptime T: type, items: []T, A_arg: &const Range, B_arg: &const
}
// merge operation using an internal buffer
-fn mergeInternal(comptime T: type, items: []T, A: &const Range, B: &const Range, lessThan: fn (&const T, &const T) bool, buffer: &const Range) void {
+fn mergeInternal(comptime T: type, items: []T, A: *const Range, B: *const Range, lessThan: fn (*const T, *const T) bool, buffer: *const Range) void {
// whenever we find a value to add to the final array, swap it with the value that's already in that spot
// when this algorithm is finished, 'buffer' will contain its original contents, but in a different order
var A_count: usize = 0;
@@ -819,7 +819,7 @@ fn blockSwap(comptime T: type, items: []T, start1: usize, start2: usize, block_s
// combine a linear search with a binary search to reduce the number of comparisons in situations
// where have some idea as to how many unique values there are and where the next value might be
-fn findFirstForward(comptime T: type, items: []T, value: &const T, range: &const Range, lessThan: fn (&const T, &const T) bool, unique: usize) usize {
+fn findFirstForward(comptime T: type, items: []T, value: *const T, range: *const Range, lessThan: fn (*const T, *const T) bool, unique: usize) usize {
if (range.length() == 0) return range.start;
const skip = math.max(range.length() / unique, usize(1));
@@ -833,7 +833,7 @@ fn findFirstForward(comptime T: type, items: []T, value: &const T, range: &const
return binaryFirst(T, items, value, Range.init(index - skip, index), lessThan);
}
-fn findFirstBackward(comptime T: type, items: []T, value: &const T, range: &const Range, lessThan: fn (&const T, &const T) bool, unique: usize) usize {
+fn findFirstBackward(comptime T: type, items: []T, value: *const T, range: *const Range, lessThan: fn (*const T, *const T) bool, unique: usize) usize {
if (range.length() == 0) return range.start;
const skip = math.max(range.length() / unique, usize(1));
@@ -847,7 +847,7 @@ fn findFirstBackward(comptime T: type, items: []T, value: &const T, range: &cons
return binaryFirst(T, items, value, Range.init(index, index + skip), lessThan);
}
-fn findLastForward(comptime T: type, items: []T, value: &const T, range: &const Range, lessThan: fn (&const T, &const T) bool, unique: usize) usize {
+fn findLastForward(comptime T: type, items: []T, value: *const T, range: *const Range, lessThan: fn (*const T, *const T) bool, unique: usize) usize {
if (range.length() == 0) return range.start;
const skip = math.max(range.length() / unique, usize(1));
@@ -861,7 +861,7 @@ fn findLastForward(comptime T: type, items: []T, value: &const T, range: &const
return binaryLast(T, items, value, Range.init(index - skip, index), lessThan);
}
-fn findLastBackward(comptime T: type, items: []T, value: &const T, range: &const Range, lessThan: fn (&const T, &const T) bool, unique: usize) usize {
+fn findLastBackward(comptime T: type, items: []T, value: *const T, range: *const Range, lessThan: fn (*const T, *const T) bool, unique: usize) usize {
if (range.length() == 0) return range.start;
const skip = math.max(range.length() / unique, usize(1));
@@ -875,7 +875,7 @@ fn findLastBackward(comptime T: type, items: []T, value: &const T, range: &const
return binaryLast(T, items, value, Range.init(index, index + skip), lessThan);
}
-fn binaryFirst(comptime T: type, items: []T, value: &const T, range: &const Range, lessThan: fn (&const T, &const T) bool) usize {
+fn binaryFirst(comptime T: type, items: []T, value: *const T, range: *const Range, lessThan: fn (*const T, *const T) bool) usize {
var start = range.start;
var end = range.end - 1;
if (range.start >= range.end) return range.end;
@@ -893,7 +893,7 @@ fn binaryFirst(comptime T: type, items: []T, value: &const T, range: &const Rang
return start;
}
-fn binaryLast(comptime T: type, items: []T, value: &const T, range: &const Range, lessThan: fn (&const T, &const T) bool) usize {
+fn binaryLast(comptime T: type, items: []T, value: *const T, range: *const Range, lessThan: fn (*const T, *const T) bool) usize {
var start = range.start;
var end = range.end - 1;
if (range.start >= range.end) return range.end;
@@ -911,7 +911,7 @@ fn binaryLast(comptime T: type, items: []T, value: &const T, range: &const Range
return start;
}
-fn mergeInto(comptime T: type, from: []T, A: &const Range, B: &const Range, lessThan: fn (&const T, &const T) bool, into: []T) void {
+fn mergeInto(comptime T: type, from: []T, A: *const Range, B: *const Range, lessThan: fn (*const T, *const T) bool, into: []T) void {
var A_index: usize = A.start;
var B_index: usize = B.start;
const A_last = A.end;
@@ -941,7 +941,7 @@ fn mergeInto(comptime T: type, from: []T, A: &const Range, B: &const Range, less
}
}
-fn mergeExternal(comptime T: type, items: []T, A: &const Range, B: &const Range, lessThan: fn (&const T, &const T) bool, cache: []T) void {
+fn mergeExternal(comptime T: type, items: []T, A: *const Range, B: *const Range, lessThan: fn (*const T, *const T) bool, cache: []T) void {
// A fits into the cache, so use that instead of the internal buffer
var A_index: usize = 0;
var B_index: usize = B.start;
@@ -969,26 +969,26 @@ fn mergeExternal(comptime T: type, items: []T, A: &const Range, B: &const Range,
mem.copy(T, items[insert_index..], cache[A_index..A_last]);
}
-fn swap(comptime T: type, items: []T, lessThan: fn (lhs: &const T, rhs: &const T) bool, order: &[8]u8, x: usize, y: usize) void {
+fn swap(comptime T: type, items: []T, lessThan: fn (lhs: *const T, rhs: *const T) bool, order: *[8]u8, x: usize, y: usize) void {
if (lessThan(items[y], items[x]) or ((order.*)[x] > (order.*)[y] and !lessThan(items[x], items[y]))) {
mem.swap(T, &items[x], &items[y]);
mem.swap(u8, &(order.*)[x], &(order.*)[y]);
}
}
-fn i32asc(lhs: &const i32, rhs: &const i32) bool {
+fn i32asc(lhs: *const i32, rhs: *const i32) bool {
return lhs.* < rhs.*;
}
-fn i32desc(lhs: &const i32, rhs: &const i32) bool {
+fn i32desc(lhs: *const i32, rhs: *const i32) bool {
return rhs.* < lhs.*;
}
-fn u8asc(lhs: &const u8, rhs: &const u8) bool {
+fn u8asc(lhs: *const u8, rhs: *const u8) bool {
return lhs.* < rhs.*;
}
-fn u8desc(lhs: &const u8, rhs: &const u8) bool {
+fn u8desc(lhs: *const u8, rhs: *const u8) bool {
return rhs.* < lhs.*;
}
@@ -1125,7 +1125,7 @@ const IdAndValue = struct {
id: usize,
value: i32,
};
-fn cmpByValue(a: &const IdAndValue, b: &const IdAndValue) bool {
+fn cmpByValue(a: *const IdAndValue, b: *const IdAndValue) bool {
return i32asc(a.value, b.value);
}
@@ -1324,7 +1324,7 @@ test "sort fuzz testing" {
var fixed_buffer_mem: [100 * 1024]u8 = undefined;
-fn fuzzTest(rng: &std.rand.Random) void {
+fn fuzzTest(rng: *std.rand.Random) void {
const array_size = rng.range(usize, 0, 1000);
var fixed_allocator = std.heap.FixedBufferAllocator.init(fixed_buffer_mem[0..]);
var array = fixed_allocator.allocator.alloc(IdAndValue, array_size) catch unreachable;
@@ -1345,7 +1345,7 @@ fn fuzzTest(rng: &std.rand.Random) void {
}
}
-pub fn min(comptime T: type, items: []T, lessThan: fn (lhs: &const T, rhs: &const T) bool) T {
+pub fn min(comptime T: type, items: []T, lessThan: fn (lhs: *const T, rhs: *const T) bool) T {
var i: usize = 0;
var smallest = items[0];
for (items[1..]) |item| {
@@ -1356,7 +1356,7 @@ pub fn min(comptime T: type, items: []T, lessThan: fn (lhs: &const T, rhs: &cons
return smallest;
}
-pub fn max(comptime T: type, items: []T, lessThan: fn (lhs: &const T, rhs: &const T) bool) T {
+pub fn max(comptime T: type, items: []T, lessThan: fn (lhs: *const T, rhs: *const T) bool) T {
var i: usize = 0;
var biggest = items[0];
for (items[1..]) |item| {
diff --git a/std/special/bootstrap.zig b/std/special/bootstrap.zig
index c10f4aa80694..5ed7874ca583 100644
--- a/std/special/bootstrap.zig
+++ b/std/special/bootstrap.zig
@@ -5,7 +5,7 @@ const root = @import("@root");
const std = @import("std");
const builtin = @import("builtin");
-var argc_ptr: &usize = undefined;
+var argc_ptr: *usize = undefined;
comptime {
const strong_linkage = builtin.GlobalLinkage.Strong;
@@ -28,12 +28,12 @@ nakedcc fn _start() noreturn {
switch (builtin.arch) {
builtin.Arch.x86_64 => {
argc_ptr = asm ("lea (%%rsp), %[argc]"
- : [argc] "=r" (-> &usize)
+ : [argc] "=r" (-> *usize)
);
},
builtin.Arch.i386 => {
argc_ptr = asm ("lea (%%esp), %[argc]"
- : [argc] "=r" (-> &usize)
+ : [argc] "=r" (-> *usize)
);
},
else => @compileError("unsupported arch"),
@@ -51,13 +51,13 @@ extern fn WinMainCRTStartup() noreturn {
fn posixCallMainAndExit() noreturn {
const argc = argc_ptr.*;
- const argv = @ptrCast(&&u8, &argc_ptr[1]);
- const envp_nullable = @ptrCast(&?&u8, &argv[argc + 1]);
+ const argv = @ptrCast(**u8, &argc_ptr[1]);
+ const envp_nullable = @ptrCast(*?*u8, &argv[argc + 1]);
var envp_count: usize = 0;
while (envp_nullable[envp_count]) |_| : (envp_count += 1) {}
- const envp = @ptrCast(&&u8, envp_nullable)[0..envp_count];
+ const envp = @ptrCast(**u8, envp_nullable)[0..envp_count];
if (builtin.os == builtin.Os.linux) {
- const auxv = &@ptrCast(&usize, envp.ptr)[envp_count + 1];
+ const auxv = &@ptrCast(*usize, envp.ptr)[envp_count + 1];
var i: usize = 0;
while (auxv[i] != 0) : (i += 2) {
if (auxv[i] < std.os.linux_aux_raw.len) std.os.linux_aux_raw[auxv[i]] = auxv[i + 1];
@@ -68,16 +68,16 @@ fn posixCallMainAndExit() noreturn {
std.os.posix.exit(callMainWithArgs(argc, argv, envp));
}
-fn callMainWithArgs(argc: usize, argv: &&u8, envp: []&u8) u8 {
+fn callMainWithArgs(argc: usize, argv: **u8, envp: []*u8) u8 {
std.os.ArgIteratorPosix.raw = argv[0..argc];
std.os.posix_environ_raw = envp;
return callMain();
}
-extern fn main(c_argc: i32, c_argv: &&u8, c_envp: &?&u8) i32 {
+extern fn main(c_argc: i32, c_argv: **u8, c_envp: *?*u8) i32 {
var env_count: usize = 0;
while (c_envp[env_count] != null) : (env_count += 1) {}
- const envp = @ptrCast(&&u8, c_envp)[0..env_count];
+ const envp = @ptrCast(**u8, c_envp)[0..env_count];
return callMainWithArgs(usize(c_argc), c_argv, envp);
}
diff --git a/std/special/build_file_template.zig b/std/special/build_file_template.zig
index 1c06c93cdcc8..1e3eb01136a1 100644
--- a/std/special/build_file_template.zig
+++ b/std/special/build_file_template.zig
@@ -1,10 +1,10 @@
const Builder = @import("std").build.Builder;
-pub fn build(b: &Builder) void {
+pub fn build(b: *Builder) void {
const mode = b.standardReleaseOptions();
const exe = b.addExecutable("YOUR_NAME_HERE", "src/main.zig");
exe.setBuildMode(mode);
- b.default_step.dependOn(&exe.step);
+ b.default_step.dependOn(*exe.step);
b.installArtifact(exe);
}
diff --git a/std/special/build_runner.zig b/std/special/build_runner.zig
index 3ff11bbee4c4..3471d6ed2175 100644
--- a/std/special/build_runner.zig
+++ b/std/special/build_runner.zig
@@ -129,7 +129,7 @@ pub fn main() !void {
};
}
-fn runBuild(builder: &Builder) error!void {
+fn runBuild(builder: *Builder) error!void {
switch (@typeId(@typeOf(root.build).ReturnType)) {
builtin.TypeId.Void => root.build(builder),
builtin.TypeId.ErrorUnion => try root.build(builder),
@@ -137,7 +137,7 @@ fn runBuild(builder: &Builder) error!void {
}
}
-fn usage(builder: &Builder, already_ran_build: bool, out_stream: var) !void {
+fn usage(builder: *Builder, already_ran_build: bool, out_stream: var) !void {
// run the build script to collect the options
if (!already_ran_build) {
builder.setInstallPrefix(null);
@@ -195,7 +195,7 @@ fn usage(builder: &Builder, already_ran_build: bool, out_stream: var) !void {
);
}
-fn usageAndErr(builder: &Builder, already_ran_build: bool, out_stream: var) error {
+fn usageAndErr(builder: *Builder, already_ran_build: bool, out_stream: var) error {
usage(builder, already_ran_build, out_stream) catch {};
return error.InvalidArgs;
}
diff --git a/std/special/builtin.zig b/std/special/builtin.zig
index 63149d5161c4..9c9cd3510312 100644
--- a/std/special/builtin.zig
+++ b/std/special/builtin.zig
@@ -5,7 +5,7 @@ const builtin = @import("builtin");
// Avoid dragging in the runtime safety mechanisms into this .o file,
// unless we're trying to test this file.
-pub fn panic(msg: []const u8, error_return_trace: ?&builtin.StackTrace) noreturn {
+pub fn panic(msg: []const u8, error_return_trace: ?*builtin.StackTrace) noreturn {
if (builtin.is_test) {
@setCold(true);
@import("std").debug.panic("{}", msg);
@@ -14,7 +14,7 @@ pub fn panic(msg: []const u8, error_return_trace: ?&builtin.StackTrace) noreturn
}
}
-export fn memset(dest: ?&u8, c: u8, n: usize) ?&u8 {
+export fn memset(dest: ?*u8, c: u8, n: usize) ?*u8 {
@setRuntimeSafety(false);
var index: usize = 0;
@@ -24,7 +24,7 @@ export fn memset(dest: ?&u8, c: u8, n: usize) ?&u8 {
return dest;
}
-export fn memcpy(noalias dest: ?&u8, noalias src: ?&const u8, n: usize) ?&u8 {
+export fn memcpy(noalias dest: ?*u8, noalias src: ?*const u8, n: usize) ?*u8 {
@setRuntimeSafety(false);
var index: usize = 0;
@@ -34,7 +34,7 @@ export fn memcpy(noalias dest: ?&u8, noalias src: ?&const u8, n: usize) ?&u8 {
return dest;
}
-export fn memmove(dest: ?&u8, src: ?&const u8, n: usize) ?&u8 {
+export fn memmove(dest: ?*u8, src: ?*const u8, n: usize) ?*u8 {
@setRuntimeSafety(false);
if (@ptrToInt(dest) < @ptrToInt(src)) {
diff --git a/std/special/compiler_rt/index.zig b/std/special/compiler_rt/index.zig
index 3e014d4d16c8..d328324320a4 100644
--- a/std/special/compiler_rt/index.zig
+++ b/std/special/compiler_rt/index.zig
@@ -78,7 +78,7 @@ const __udivmoddi4 = @import("udivmoddi4.zig").__udivmoddi4;
// Avoid dragging in the runtime safety mechanisms into this .o file,
// unless we're trying to test this file.
-pub fn panic(msg: []const u8, error_return_trace: ?&builtin.StackTrace) noreturn {
+pub fn panic(msg: []const u8, error_return_trace: ?*builtin.StackTrace) noreturn {
@setCold(true);
if (is_test) {
std.debug.panic("{}", msg);
@@ -284,7 +284,7 @@ nakedcc fn ___chkstk_ms() align(4) void {
);
}
-extern fn __udivmodsi4(a: u32, b: u32, rem: &u32) u32 {
+extern fn __udivmodsi4(a: u32, b: u32, rem: *u32) u32 {
@setRuntimeSafety(is_test);
const d = __udivsi3(a, b);
diff --git a/std/special/compiler_rt/udivmod.zig b/std/special/compiler_rt/udivmod.zig
index 0dee5e45f68c..894dd022393d 100644
--- a/std/special/compiler_rt/udivmod.zig
+++ b/std/special/compiler_rt/udivmod.zig
@@ -7,15 +7,15 @@ const low = switch (builtin.endian) {
};
const high = 1 - low;
-pub fn udivmod(comptime DoubleInt: type, a: DoubleInt, b: DoubleInt, maybe_rem: ?&DoubleInt) DoubleInt {
+pub fn udivmod(comptime DoubleInt: type, a: DoubleInt, b: DoubleInt, maybe_rem: ?*DoubleInt) DoubleInt {
@setRuntimeSafety(is_test);
const SingleInt = @IntType(false, @divExact(DoubleInt.bit_count, 2));
const SignedDoubleInt = @IntType(true, DoubleInt.bit_count);
const Log2SingleInt = @import("std").math.Log2Int(SingleInt);
- const n = @ptrCast(&const [2]SingleInt, &a).*; // TODO issue #421
- const d = @ptrCast(&const [2]SingleInt, &b).*; // TODO issue #421
+ const n = @ptrCast(*const [2]SingleInt, &a).*; // TODO issue #421
+ const d = @ptrCast(*const [2]SingleInt, &b).*; // TODO issue #421
var q: [2]SingleInt = undefined;
var r: [2]SingleInt = undefined;
var sr: c_uint = undefined;
@@ -57,7 +57,7 @@ pub fn udivmod(comptime DoubleInt: type, a: DoubleInt, b: DoubleInt, maybe_rem:
if (maybe_rem) |rem| {
r[high] = n[high] % d[high];
r[low] = 0;
- rem.* = @ptrCast(&align(@alignOf(SingleInt)) DoubleInt, &r[0]).*; // TODO issue #421
+ rem.* = @ptrCast(*align(@alignOf(SingleInt)) DoubleInt, &r[0]).*; // TODO issue #421
}
return n[high] / d[high];
}
@@ -69,7 +69,7 @@ pub fn udivmod(comptime DoubleInt: type, a: DoubleInt, b: DoubleInt, maybe_rem:
if (maybe_rem) |rem| {
r[low] = n[low];
r[high] = n[high] & (d[high] - 1);
- rem.* = @ptrCast(&align(@alignOf(SingleInt)) DoubleInt, &r[0]).*; // TODO issue #421
+ rem.* = @ptrCast(*align(@alignOf(SingleInt)) DoubleInt, &r[0]).*; // TODO issue #421
}
return n[high] >> Log2SingleInt(@ctz(d[high]));
}
@@ -109,7 +109,7 @@ pub fn udivmod(comptime DoubleInt: type, a: DoubleInt, b: DoubleInt, maybe_rem:
sr = @ctz(d[low]);
q[high] = n[high] >> Log2SingleInt(sr);
q[low] = (n[high] << Log2SingleInt(SingleInt.bit_count - sr)) | (n[low] >> Log2SingleInt(sr));
- return @ptrCast(&align(@alignOf(SingleInt)) DoubleInt, &q[0]).*; // TODO issue #421
+ return @ptrCast(*align(@alignOf(SingleInt)) DoubleInt, &q[0]).*; // TODO issue #421
}
// K X
// ---
@@ -183,13 +183,13 @@ pub fn udivmod(comptime DoubleInt: type, a: DoubleInt, b: DoubleInt, maybe_rem:
// r.all -= b;
// carry = 1;
// }
- r_all = @ptrCast(&align(@alignOf(SingleInt)) DoubleInt, &r[0]).*; // TODO issue #421
+ r_all = @ptrCast(*align(@alignOf(SingleInt)) DoubleInt, &r[0]).*; // TODO issue #421
const s: SignedDoubleInt = SignedDoubleInt(b -% r_all -% 1) >> (DoubleInt.bit_count - 1);
carry = u32(s & 1);
r_all -= b & @bitCast(DoubleInt, s);
- r = @ptrCast(&[2]SingleInt, &r_all).*; // TODO issue #421
+ r = @ptrCast(*[2]SingleInt, &r_all).*; // TODO issue #421
}
- const q_all = ((@ptrCast(&align(@alignOf(SingleInt)) DoubleInt, &q[0]).*) << 1) | carry; // TODO issue #421
+ const q_all = ((@ptrCast(*align(@alignOf(SingleInt)) DoubleInt, &q[0]).*) << 1) | carry; // TODO issue #421
if (maybe_rem) |rem| {
rem.* = r_all;
}
diff --git a/std/special/compiler_rt/udivmoddi4.zig b/std/special/compiler_rt/udivmoddi4.zig
index 6cc54bb6bf10..de86c845e533 100644
--- a/std/special/compiler_rt/udivmoddi4.zig
+++ b/std/special/compiler_rt/udivmoddi4.zig
@@ -1,7 +1,7 @@
const udivmod = @import("udivmod.zig").udivmod;
const builtin = @import("builtin");
-pub extern fn __udivmoddi4(a: u64, b: u64, maybe_rem: ?&u64) u64 {
+pub extern fn __udivmoddi4(a: u64, b: u64, maybe_rem: ?*u64) u64 {
@setRuntimeSafety(builtin.is_test);
return udivmod(u64, a, b, maybe_rem);
}
diff --git a/std/special/compiler_rt/udivmodti4.zig b/std/special/compiler_rt/udivmodti4.zig
index 816f82b90034..3fa596442f27 100644
--- a/std/special/compiler_rt/udivmodti4.zig
+++ b/std/special/compiler_rt/udivmodti4.zig
@@ -2,12 +2,12 @@ const udivmod = @import("udivmod.zig").udivmod;
const builtin = @import("builtin");
const compiler_rt = @import("index.zig");
-pub extern fn __udivmodti4(a: u128, b: u128, maybe_rem: ?&u128) u128 {
+pub extern fn __udivmodti4(a: u128, b: u128, maybe_rem: ?*u128) u128 {
@setRuntimeSafety(builtin.is_test);
return udivmod(u128, a, b, maybe_rem);
}
-pub extern fn __udivmodti4_windows_x86_64(a: &const u128, b: &const u128, maybe_rem: ?&u128) void {
+pub extern fn __udivmodti4_windows_x86_64(a: *const u128, b: *const u128, maybe_rem: ?*u128) void {
@setRuntimeSafety(builtin.is_test);
compiler_rt.setXmm0(u128, udivmod(u128, a.*, b.*, maybe_rem));
}
diff --git a/std/special/compiler_rt/udivti3.zig b/std/special/compiler_rt/udivti3.zig
index ad0f09e733c9..510e21ac1dac 100644
--- a/std/special/compiler_rt/udivti3.zig
+++ b/std/special/compiler_rt/udivti3.zig
@@ -6,7 +6,7 @@ pub extern fn __udivti3(a: u128, b: u128) u128 {
return udivmodti4.__udivmodti4(a, b, null);
}
-pub extern fn __udivti3_windows_x86_64(a: &const u128, b: &const u128) void {
+pub extern fn __udivti3_windows_x86_64(a: *const u128, b: *const u128) void {
@setRuntimeSafety(builtin.is_test);
udivmodti4.__udivmodti4_windows_x86_64(a, b, null);
}
diff --git a/std/special/compiler_rt/umodti3.zig b/std/special/compiler_rt/umodti3.zig
index 11e2955bb346..9551e63a6f8e 100644
--- a/std/special/compiler_rt/umodti3.zig
+++ b/std/special/compiler_rt/umodti3.zig
@@ -9,7 +9,7 @@ pub extern fn __umodti3(a: u128, b: u128) u128 {
return r;
}
-pub extern fn __umodti3_windows_x86_64(a: &const u128, b: &const u128) void {
+pub extern fn __umodti3_windows_x86_64(a: *const u128, b: *const u128) void {
@setRuntimeSafety(builtin.is_test);
compiler_rt.setXmm0(u128, __umodti3(a.*, b.*));
}
diff --git a/std/special/panic.zig b/std/special/panic.zig
index 8f933ddd97a1..ca1caea73cae 100644
--- a/std/special/panic.zig
+++ b/std/special/panic.zig
@@ -6,7 +6,7 @@
const builtin = @import("builtin");
const std = @import("std");
-pub fn panic(msg: []const u8, error_return_trace: ?&builtin.StackTrace) noreturn {
+pub fn panic(msg: []const u8, error_return_trace: ?*builtin.StackTrace) noreturn {
@setCold(true);
switch (builtin.os) {
// TODO: fix panic in zen.
diff --git a/std/unicode.zig b/std/unicode.zig
index 36f04778f405..3d1bebdb5500 100644
--- a/std/unicode.zig
+++ b/std/unicode.zig
@@ -196,7 +196,7 @@ pub const Utf8View = struct {
}
}
- pub fn iterator(s: &const Utf8View) Utf8Iterator {
+ pub fn iterator(s: *const Utf8View) Utf8Iterator {
return Utf8Iterator{
.bytes = s.bytes,
.i = 0,
@@ -208,7 +208,7 @@ const Utf8Iterator = struct {
bytes: []const u8,
i: usize,
- pub fn nextCodepointSlice(it: &Utf8Iterator) ?[]const u8 {
+ pub fn nextCodepointSlice(it: *Utf8Iterator) ?[]const u8 {
if (it.i >= it.bytes.len) {
return null;
}
@@ -219,7 +219,7 @@ const Utf8Iterator = struct {
return it.bytes[it.i - cp_len .. it.i];
}
- pub fn nextCodepoint(it: &Utf8Iterator) ?u32 {
+ pub fn nextCodepoint(it: *Utf8Iterator) ?u32 {
const slice = it.nextCodepointSlice() ?? return null;
switch (slice.len) {
diff --git a/std/zig/ast.zig b/std/zig/ast.zig
index 56d4f9c3934d..a4b64d5db2ee 100644
--- a/std/zig/ast.zig
+++ b/std/zig/ast.zig
@@ -9,26 +9,26 @@ pub const TokenIndex = usize;
pub const Tree = struct {
source: []const u8,
tokens: TokenList,
- root_node: &Node.Root,
+ root_node: *Node.Root,
arena_allocator: std.heap.ArenaAllocator,
errors: ErrorList,
pub const TokenList = SegmentedList(Token, 64);
pub const ErrorList = SegmentedList(Error, 0);
- pub fn deinit(self: &Tree) void {
+ pub fn deinit(self: *Tree) void {
self.arena_allocator.deinit();
}
- pub fn renderError(self: &Tree, parse_error: &Error, stream: var) !void {
+ pub fn renderError(self: *Tree, parse_error: *Error, stream: var) !void {
return parse_error.render(&self.tokens, stream);
}
- pub fn tokenSlice(self: &Tree, token_index: TokenIndex) []const u8 {
+ pub fn tokenSlice(self: *Tree, token_index: TokenIndex) []const u8 {
return self.tokenSlicePtr(self.tokens.at(token_index));
}
- pub fn tokenSlicePtr(self: &Tree, token: &const Token) []const u8 {
+ pub fn tokenSlicePtr(self: *Tree, token: *const Token) []const u8 {
return self.source[token.start..token.end];
}
@@ -39,7 +39,7 @@ pub const Tree = struct {
line_end: usize,
};
- pub fn tokenLocationPtr(self: &Tree, start_index: usize, token: &const Token) Location {
+ pub fn tokenLocationPtr(self: *Tree, start_index: usize, token: *const Token) Location {
var loc = Location{
.line = 0,
.column = 0,
@@ -64,24 +64,24 @@ pub const Tree = struct {
return loc;
}
- pub fn tokenLocation(self: &Tree, start_index: usize, token_index: TokenIndex) Location {
+ pub fn tokenLocation(self: *Tree, start_index: usize, token_index: TokenIndex) Location {
return self.tokenLocationPtr(start_index, self.tokens.at(token_index));
}
- pub fn tokensOnSameLine(self: &Tree, token1_index: TokenIndex, token2_index: TokenIndex) bool {
+ pub fn tokensOnSameLine(self: *Tree, token1_index: TokenIndex, token2_index: TokenIndex) bool {
return self.tokensOnSameLinePtr(self.tokens.at(token1_index), self.tokens.at(token2_index));
}
- pub fn tokensOnSameLinePtr(self: &Tree, token1: &const Token, token2: &const Token) bool {
+ pub fn tokensOnSameLinePtr(self: *Tree, token1: *const Token, token2: *const Token) bool {
return mem.indexOfScalar(u8, self.source[token1.end..token2.start], '\n') == null;
}
- pub fn dump(self: &Tree) void {
+ pub fn dump(self: *Tree) void {
self.root_node.base.dump(0);
}
/// Skips over comments
- pub fn prevToken(self: &Tree, token_index: TokenIndex) TokenIndex {
+ pub fn prevToken(self: *Tree, token_index: TokenIndex) TokenIndex {
var index = token_index - 1;
while (self.tokens.at(index).id == Token.Id.LineComment) {
index -= 1;
@@ -90,7 +90,7 @@ pub const Tree = struct {
}
/// Skips over comments
- pub fn nextToken(self: &Tree, token_index: TokenIndex) TokenIndex {
+ pub fn nextToken(self: *Tree, token_index: TokenIndex) TokenIndex {
var index = token_index + 1;
while (self.tokens.at(index).id == Token.Id.LineComment) {
index += 1;
@@ -120,7 +120,7 @@ pub const Error = union(enum) {
ExpectedToken: ExpectedToken,
ExpectedCommaOrEnd: ExpectedCommaOrEnd,
- pub fn render(self: &const Error, tokens: &Tree.TokenList, stream: var) !void {
+ pub fn render(self: *const Error, tokens: *Tree.TokenList, stream: var) !void {
switch (self.*) {
// TODO https://github.com/ziglang/zig/issues/683
@TagType(Error).InvalidToken => |*x| return x.render(tokens, stream),
@@ -145,7 +145,7 @@ pub const Error = union(enum) {
}
}
- pub fn loc(self: &const Error) TokenIndex {
+ pub fn loc(self: *const Error) TokenIndex {
switch (self.*) {
// TODO https://github.com/ziglang/zig/issues/683
@TagType(Error).InvalidToken => |x| return x.token,
@@ -188,17 +188,17 @@ pub const Error = union(enum) {
pub const ExtraVolatileQualifier = SimpleError("Extra volatile qualifier");
pub const ExpectedCall = struct {
- node: &Node,
+ node: *Node,
- pub fn render(self: &const ExpectedCall, tokens: &Tree.TokenList, stream: var) !void {
+ pub fn render(self: *const ExpectedCall, tokens: *Tree.TokenList, stream: var) !void {
return stream.print("expected " ++ @tagName(@TagType(Node.SuffixOp.Op).Call) ++ ", found {}", @tagName(self.node.id));
}
};
pub const ExpectedCallOrFnProto = struct {
- node: &Node,
+ node: *Node,
- pub fn render(self: &const ExpectedCallOrFnProto, tokens: &Tree.TokenList, stream: var) !void {
+ pub fn render(self: *const ExpectedCallOrFnProto, tokens: *Tree.TokenList, stream: var) !void {
return stream.print("expected " ++ @tagName(@TagType(Node.SuffixOp.Op).Call) ++ " or " ++ @tagName(Node.Id.FnProto) ++ ", found {}", @tagName(self.node.id));
}
};
@@ -207,7 +207,7 @@ pub const Error = union(enum) {
token: TokenIndex,
expected_id: @TagType(Token.Id),
- pub fn render(self: &const ExpectedToken, tokens: &Tree.TokenList, stream: var) !void {
+ pub fn render(self: *const ExpectedToken, tokens: *Tree.TokenList, stream: var) !void {
const token_name = @tagName(tokens.at(self.token).id);
return stream.print("expected {}, found {}", @tagName(self.expected_id), token_name);
}
@@ -217,7 +217,7 @@ pub const Error = union(enum) {
token: TokenIndex,
end_id: @TagType(Token.Id),
- pub fn render(self: &const ExpectedCommaOrEnd, tokens: &Tree.TokenList, stream: var) !void {
+ pub fn render(self: *const ExpectedCommaOrEnd, tokens: *Tree.TokenList, stream: var) !void {
const token_name = @tagName(tokens.at(self.token).id);
return stream.print("expected ',' or {}, found {}", @tagName(self.end_id), token_name);
}
@@ -229,7 +229,7 @@ pub const Error = union(enum) {
token: TokenIndex,
- pub fn render(self: &const ThisError, tokens: &Tree.TokenList, stream: var) !void {
+ pub fn render(self: *const ThisError, tokens: *Tree.TokenList, stream: var) !void {
const token_name = @tagName(tokens.at(self.token).id);
return stream.print(msg, token_name);
}
@@ -242,7 +242,7 @@ pub const Error = union(enum) {
token: TokenIndex,
- pub fn render(self: &const ThisError, tokens: &Tree.TokenList, stream: var) !void {
+ pub fn render(self: *const ThisError, tokens: *Tree.TokenList, stream: var) !void {
return stream.write(msg);
}
};
@@ -320,14 +320,14 @@ pub const Node = struct {
FieldInitializer,
};
- pub fn cast(base: &Node, comptime T: type) ?&T {
+ pub fn cast(base: *Node, comptime T: type) ?*T {
if (base.id == comptime typeToId(T)) {
return @fieldParentPtr(T, "base", base);
}
return null;
}
- pub fn iterate(base: &Node, index: usize) ?&Node {
+ pub fn iterate(base: *Node, index: usize) ?*Node {
comptime var i = 0;
inline while (i < @memberCount(Id)) : (i += 1) {
if (base.id == @field(Id, @memberName(Id, i))) {
@@ -338,7 +338,7 @@ pub const Node = struct {
unreachable;
}
- pub fn firstToken(base: &Node) TokenIndex {
+ pub fn firstToken(base: *Node) TokenIndex {
comptime var i = 0;
inline while (i < @memberCount(Id)) : (i += 1) {
if (base.id == @field(Id, @memberName(Id, i))) {
@@ -349,7 +349,7 @@ pub const Node = struct {
unreachable;
}
- pub fn lastToken(base: &Node) TokenIndex {
+ pub fn lastToken(base: *Node) TokenIndex {
comptime var i = 0;
inline while (i < @memberCount(Id)) : (i += 1) {
if (base.id == @field(Id, @memberName(Id, i))) {
@@ -370,7 +370,7 @@ pub const Node = struct {
unreachable;
}
- pub fn requireSemiColon(base: &const Node) bool {
+ pub fn requireSemiColon(base: *const Node) bool {
var n = base;
while (true) {
switch (n.id) {
@@ -443,7 +443,7 @@ pub const Node = struct {
}
}
- pub fn dump(self: &Node, indent: usize) void {
+ pub fn dump(self: *Node, indent: usize) void {
{
var i: usize = 0;
while (i < indent) : (i += 1) {
@@ -460,44 +460,44 @@ pub const Node = struct {
pub const Root = struct {
base: Node,
- doc_comments: ?&DocComment,
+ doc_comments: ?*DocComment,
decls: DeclList,
eof_token: TokenIndex,
- pub const DeclList = SegmentedList(&Node, 4);
+ pub const DeclList = SegmentedList(*Node, 4);
- pub fn iterate(self: &Root, index: usize) ?&Node {
+ pub fn iterate(self: *Root, index: usize) ?*Node {
if (index < self.decls.len) {
return self.decls.at(index).*;
}
return null;
}
- pub fn firstToken(self: &Root) TokenIndex {
+ pub fn firstToken(self: *Root) TokenIndex {
return if (self.decls.len == 0) self.eof_token else (self.decls.at(0).*).firstToken();
}
- pub fn lastToken(self: &Root) TokenIndex {
+ pub fn lastToken(self: *Root) TokenIndex {
return if (self.decls.len == 0) self.eof_token else (self.decls.at(self.decls.len - 1).*).lastToken();
}
};
pub const VarDecl = struct {
base: Node,
- doc_comments: ?&DocComment,
+ doc_comments: ?*DocComment,
visib_token: ?TokenIndex,
name_token: TokenIndex,
eq_token: TokenIndex,
mut_token: TokenIndex,
comptime_token: ?TokenIndex,
extern_export_token: ?TokenIndex,
- lib_name: ?&Node,
- type_node: ?&Node,
- align_node: ?&Node,
- init_node: ?&Node,
+ lib_name: ?*Node,
+ type_node: ?*Node,
+ align_node: ?*Node,
+ init_node: ?*Node,
semicolon_token: TokenIndex,
- pub fn iterate(self: &VarDecl, index: usize) ?&Node {
+ pub fn iterate(self: *VarDecl, index: usize) ?*Node {
var i = index;
if (self.type_node) |type_node| {
@@ -518,7 +518,7 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: &VarDecl) TokenIndex {
+ pub fn firstToken(self: *VarDecl) TokenIndex {
if (self.visib_token) |visib_token| return visib_token;
if (self.comptime_token) |comptime_token| return comptime_token;
if (self.extern_export_token) |extern_export_token| return extern_export_token;
@@ -526,20 +526,20 @@ pub const Node = struct {
return self.mut_token;
}
- pub fn lastToken(self: &VarDecl) TokenIndex {
+ pub fn lastToken(self: *VarDecl) TokenIndex {
return self.semicolon_token;
}
};
pub const Use = struct {
base: Node,
- doc_comments: ?&DocComment,
+ doc_comments: ?*DocComment,
visib_token: ?TokenIndex,
use_token: TokenIndex,
- expr: &Node,
+ expr: *Node,
semicolon_token: TokenIndex,
- pub fn iterate(self: &Use, index: usize) ?&Node {
+ pub fn iterate(self: *Use, index: usize) ?*Node {
var i = index;
if (i < 1) return self.expr;
@@ -548,12 +548,12 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: &Use) TokenIndex {
+ pub fn firstToken(self: *Use) TokenIndex {
if (self.visib_token) |visib_token| return visib_token;
return self.use_token;
}
- pub fn lastToken(self: &Use) TokenIndex {
+ pub fn lastToken(self: *Use) TokenIndex {
return self.semicolon_token;
}
};
@@ -564,9 +564,9 @@ pub const Node = struct {
decls: DeclList,
rbrace_token: TokenIndex,
- pub const DeclList = SegmentedList(&Node, 2);
+ pub const DeclList = SegmentedList(*Node, 2);
- pub fn iterate(self: &ErrorSetDecl, index: usize) ?&Node {
+ pub fn iterate(self: *ErrorSetDecl, index: usize) ?*Node {
var i = index;
if (i < self.decls.len) return self.decls.at(i).*;
@@ -575,11 +575,11 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: &ErrorSetDecl) TokenIndex {
+ pub fn firstToken(self: *ErrorSetDecl) TokenIndex {
return self.error_token;
}
- pub fn lastToken(self: &ErrorSetDecl) TokenIndex {
+ pub fn lastToken(self: *ErrorSetDecl) TokenIndex {
return self.rbrace_token;
}
};
@@ -597,11 +597,11 @@ pub const Node = struct {
const InitArg = union(enum) {
None,
- Enum: ?&Node,
- Type: &Node,
+ Enum: ?*Node,
+ Type: *Node,
};
- pub fn iterate(self: &ContainerDecl, index: usize) ?&Node {
+ pub fn iterate(self: *ContainerDecl, index: usize) ?*Node {
var i = index;
switch (self.init_arg_expr) {
@@ -618,26 +618,26 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: &ContainerDecl) TokenIndex {
+ pub fn firstToken(self: *ContainerDecl) TokenIndex {
if (self.layout_token) |layout_token| {
return layout_token;
}
return self.kind_token;
}
- pub fn lastToken(self: &ContainerDecl) TokenIndex {
+ pub fn lastToken(self: *ContainerDecl) TokenIndex {
return self.rbrace_token;
}
};
pub const StructField = struct {
base: Node,
- doc_comments: ?&DocComment,
+ doc_comments: ?*DocComment,
visib_token: ?TokenIndex,
name_token: TokenIndex,
- type_expr: &Node,
+ type_expr: *Node,
- pub fn iterate(self: &StructField, index: usize) ?&Node {
+ pub fn iterate(self: *StructField, index: usize) ?*Node {
var i = index;
if (i < 1) return self.type_expr;
@@ -646,24 +646,24 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: &StructField) TokenIndex {
+ pub fn firstToken(self: *StructField) TokenIndex {
if (self.visib_token) |visib_token| return visib_token;
return self.name_token;
}
- pub fn lastToken(self: &StructField) TokenIndex {
+ pub fn lastToken(self: *StructField) TokenIndex {
return self.type_expr.lastToken();
}
};
pub const UnionTag = struct {
base: Node,
- doc_comments: ?&DocComment,
+ doc_comments: ?*DocComment,
name_token: TokenIndex,
- type_expr: ?&Node,
- value_expr: ?&Node,
+ type_expr: ?*Node,
+ value_expr: ?*Node,
- pub fn iterate(self: &UnionTag, index: usize) ?&Node {
+ pub fn iterate(self: *UnionTag, index: usize) ?*Node {
var i = index;
if (self.type_expr) |type_expr| {
@@ -679,11 +679,11 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: &UnionTag) TokenIndex {
+ pub fn firstToken(self: *UnionTag) TokenIndex {
return self.name_token;
}
- pub fn lastToken(self: &UnionTag) TokenIndex {
+ pub fn lastToken(self: *UnionTag) TokenIndex {
if (self.value_expr) |value_expr| {
return value_expr.lastToken();
}
@@ -697,11 +697,11 @@ pub const Node = struct {
pub const EnumTag = struct {
base: Node,
- doc_comments: ?&DocComment,
+ doc_comments: ?*DocComment,
name_token: TokenIndex,
- value: ?&Node,
+ value: ?*Node,
- pub fn iterate(self: &EnumTag, index: usize) ?&Node {
+ pub fn iterate(self: *EnumTag, index: usize) ?*Node {
var i = index;
if (self.value) |value| {
@@ -712,11 +712,11 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: &EnumTag) TokenIndex {
+ pub fn firstToken(self: *EnumTag) TokenIndex {
return self.name_token;
}
- pub fn lastToken(self: &EnumTag) TokenIndex {
+ pub fn lastToken(self: *EnumTag) TokenIndex {
if (self.value) |value| {
return value.lastToken();
}
@@ -727,25 +727,25 @@ pub const Node = struct {
pub const ErrorTag = struct {
base: Node,
- doc_comments: ?&DocComment,
+ doc_comments: ?*DocComment,
name_token: TokenIndex,
- pub fn iterate(self: &ErrorTag, index: usize) ?&Node {
+ pub fn iterate(self: *ErrorTag, index: usize) ?*Node {
var i = index;
if (self.doc_comments) |comments| {
- if (i < 1) return &comments.base;
+ if (i < 1) return *comments.base;
i -= 1;
}
return null;
}
- pub fn firstToken(self: &ErrorTag) TokenIndex {
+ pub fn firstToken(self: *ErrorTag) TokenIndex {
return self.name_token;
}
- pub fn lastToken(self: &ErrorTag) TokenIndex {
+ pub fn lastToken(self: *ErrorTag) TokenIndex {
return self.name_token;
}
};
@@ -754,15 +754,15 @@ pub const Node = struct {
base: Node,
token: TokenIndex,
- pub fn iterate(self: &Identifier, index: usize) ?&Node {
+ pub fn iterate(self: *Identifier, index: usize) ?*Node {
return null;
}
- pub fn firstToken(self: &Identifier) TokenIndex {
+ pub fn firstToken(self: *Identifier) TokenIndex {
return self.token;
}
- pub fn lastToken(self: &Identifier) TokenIndex {
+ pub fn lastToken(self: *Identifier) TokenIndex {
return self.token;
}
};
@@ -770,10 +770,10 @@ pub const Node = struct {
pub const AsyncAttribute = struct {
base: Node,
async_token: TokenIndex,
- allocator_type: ?&Node,
+ allocator_type: ?*Node,
rangle_bracket: ?TokenIndex,
- pub fn iterate(self: &AsyncAttribute, index: usize) ?&Node {
+ pub fn iterate(self: *AsyncAttribute, index: usize) ?*Node {
var i = index;
if (self.allocator_type) |allocator_type| {
@@ -784,11 +784,11 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: &AsyncAttribute) TokenIndex {
+ pub fn firstToken(self: *AsyncAttribute) TokenIndex {
return self.async_token;
}
- pub fn lastToken(self: &AsyncAttribute) TokenIndex {
+ pub fn lastToken(self: *AsyncAttribute) TokenIndex {
if (self.rangle_bracket) |rangle_bracket| {
return rangle_bracket;
}
@@ -799,7 +799,7 @@ pub const Node = struct {
pub const FnProto = struct {
base: Node,
- doc_comments: ?&DocComment,
+ doc_comments: ?*DocComment,
visib_token: ?TokenIndex,
fn_token: TokenIndex,
name_token: ?TokenIndex,
@@ -808,19 +808,19 @@ pub const Node = struct {
var_args_token: ?TokenIndex,
extern_export_inline_token: ?TokenIndex,
cc_token: ?TokenIndex,
- async_attr: ?&AsyncAttribute,
- body_node: ?&Node,
- lib_name: ?&Node, // populated if this is an extern declaration
- align_expr: ?&Node, // populated if align(A) is present
+ async_attr: ?*AsyncAttribute,
+ body_node: ?*Node,
+ lib_name: ?*Node, // populated if this is an extern declaration
+ align_expr: ?*Node, // populated if align(A) is present
- pub const ParamList = SegmentedList(&Node, 2);
+ pub const ParamList = SegmentedList(*Node, 2);
pub const ReturnType = union(enum) {
- Explicit: &Node,
- InferErrorSet: &Node,
+ Explicit: *Node,
+ InferErrorSet: *Node,
};
- pub fn iterate(self: &FnProto, index: usize) ?&Node {
+ pub fn iterate(self: *FnProto, index: usize) ?*Node {
var i = index;
if (self.lib_name) |lib_name| {
@@ -856,7 +856,7 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: &FnProto) TokenIndex {
+ pub fn firstToken(self: *FnProto) TokenIndex {
if (self.visib_token) |visib_token| return visib_token;
if (self.extern_export_inline_token) |extern_export_inline_token| return extern_export_inline_token;
assert(self.lib_name == null);
@@ -864,7 +864,7 @@ pub const Node = struct {
return self.fn_token;
}
- pub fn lastToken(self: &FnProto) TokenIndex {
+ pub fn lastToken(self: *FnProto) TokenIndex {
if (self.body_node) |body_node| return body_node.lastToken();
switch (self.return_type) {
// TODO allow this and next prong to share bodies since the types are the same
@@ -881,10 +881,10 @@ pub const Node = struct {
pub const Result = struct {
arrow_token: TokenIndex,
- return_type: &Node,
+ return_type: *Node,
};
- pub fn iterate(self: &PromiseType, index: usize) ?&Node {
+ pub fn iterate(self: *PromiseType, index: usize) ?*Node {
var i = index;
if (self.result) |result| {
@@ -895,11 +895,11 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: &PromiseType) TokenIndex {
+ pub fn firstToken(self: *PromiseType) TokenIndex {
return self.promise_token;
}
- pub fn lastToken(self: &PromiseType) TokenIndex {
+ pub fn lastToken(self: *PromiseType) TokenIndex {
if (self.result) |result| return result.return_type.lastToken();
return self.promise_token;
}
@@ -910,10 +910,10 @@ pub const Node = struct {
comptime_token: ?TokenIndex,
noalias_token: ?TokenIndex,
name_token: ?TokenIndex,
- type_node: &Node,
+ type_node: *Node,
var_args_token: ?TokenIndex,
- pub fn iterate(self: &ParamDecl, index: usize) ?&Node {
+ pub fn iterate(self: *ParamDecl, index: usize) ?*Node {
var i = index;
if (i < 1) return self.type_node;
@@ -922,14 +922,14 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: &ParamDecl) TokenIndex {
+ pub fn firstToken(self: *ParamDecl) TokenIndex {
if (self.comptime_token) |comptime_token| return comptime_token;
if (self.noalias_token) |noalias_token| return noalias_token;
if (self.name_token) |name_token| return name_token;
return self.type_node.firstToken();
}
- pub fn lastToken(self: &ParamDecl) TokenIndex {
+ pub fn lastToken(self: *ParamDecl) TokenIndex {
if (self.var_args_token) |var_args_token| return var_args_token;
return self.type_node.lastToken();
}
@@ -944,7 +944,7 @@ pub const Node = struct {
pub const StatementList = Root.DeclList;
- pub fn iterate(self: &Block, index: usize) ?&Node {
+ pub fn iterate(self: *Block, index: usize) ?*Node {
var i = index;
if (i < self.statements.len) return self.statements.at(i).*;
@@ -953,7 +953,7 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: &Block) TokenIndex {
+ pub fn firstToken(self: *Block) TokenIndex {
if (self.label) |label| {
return label;
}
@@ -961,7 +961,7 @@ pub const Node = struct {
return self.lbrace;
}
- pub fn lastToken(self: &Block) TokenIndex {
+ pub fn lastToken(self: *Block) TokenIndex {
return self.rbrace;
}
};
@@ -970,14 +970,14 @@ pub const Node = struct {
base: Node,
defer_token: TokenIndex,
kind: Kind,
- expr: &Node,
+ expr: *Node,
const Kind = enum {
Error,
Unconditional,
};
- pub fn iterate(self: &Defer, index: usize) ?&Node {
+ pub fn iterate(self: *Defer, index: usize) ?*Node {
var i = index;
if (i < 1) return self.expr;
@@ -986,22 +986,22 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: &Defer) TokenIndex {
+ pub fn firstToken(self: *Defer) TokenIndex {
return self.defer_token;
}
- pub fn lastToken(self: &Defer) TokenIndex {
+ pub fn lastToken(self: *Defer) TokenIndex {
return self.expr.lastToken();
}
};
pub const Comptime = struct {
base: Node,
- doc_comments: ?&DocComment,
+ doc_comments: ?*DocComment,
comptime_token: TokenIndex,
- expr: &Node,
+ expr: *Node,
- pub fn iterate(self: &Comptime, index: usize) ?&Node {
+ pub fn iterate(self: *Comptime, index: usize) ?*Node {
var i = index;
if (i < 1) return self.expr;
@@ -1010,11 +1010,11 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: &Comptime) TokenIndex {
+ pub fn firstToken(self: *Comptime) TokenIndex {
return self.comptime_token;
}
- pub fn lastToken(self: &Comptime) TokenIndex {
+ pub fn lastToken(self: *Comptime) TokenIndex {
return self.expr.lastToken();
}
};
@@ -1022,10 +1022,10 @@ pub const Node = struct {
pub const Payload = struct {
base: Node,
lpipe: TokenIndex,
- error_symbol: &Node,
+ error_symbol: *Node,
rpipe: TokenIndex,
- pub fn iterate(self: &Payload, index: usize) ?&Node {
+ pub fn iterate(self: *Payload, index: usize) ?*Node {
var i = index;
if (i < 1) return self.error_symbol;
@@ -1034,11 +1034,11 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: &Payload) TokenIndex {
+ pub fn firstToken(self: *Payload) TokenIndex {
return self.lpipe;
}
- pub fn lastToken(self: &Payload) TokenIndex {
+ pub fn lastToken(self: *Payload) TokenIndex {
return self.rpipe;
}
};
@@ -1047,10 +1047,10 @@ pub const Node = struct {
base: Node,
lpipe: TokenIndex,
ptr_token: ?TokenIndex,
- value_symbol: &Node,
+ value_symbol: *Node,
rpipe: TokenIndex,
- pub fn iterate(self: &PointerPayload, index: usize) ?&Node {
+ pub fn iterate(self: *PointerPayload, index: usize) ?*Node {
var i = index;
if (i < 1) return self.value_symbol;
@@ -1059,11 +1059,11 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: &PointerPayload) TokenIndex {
+ pub fn firstToken(self: *PointerPayload) TokenIndex {
return self.lpipe;
}
- pub fn lastToken(self: &PointerPayload) TokenIndex {
+ pub fn lastToken(self: *PointerPayload) TokenIndex {
return self.rpipe;
}
};
@@ -1072,11 +1072,11 @@ pub const Node = struct {
base: Node,
lpipe: TokenIndex,
ptr_token: ?TokenIndex,
- value_symbol: &Node,
- index_symbol: ?&Node,
+ value_symbol: *Node,
+ index_symbol: ?*Node,
rpipe: TokenIndex,
- pub fn iterate(self: &PointerIndexPayload, index: usize) ?&Node {
+ pub fn iterate(self: *PointerIndexPayload, index: usize) ?*Node {
var i = index;
if (i < 1) return self.value_symbol;
@@ -1090,11 +1090,11 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: &PointerIndexPayload) TokenIndex {
+ pub fn firstToken(self: *PointerIndexPayload) TokenIndex {
return self.lpipe;
}
- pub fn lastToken(self: &PointerIndexPayload) TokenIndex {
+ pub fn lastToken(self: *PointerIndexPayload) TokenIndex {
return self.rpipe;
}
};
@@ -1102,10 +1102,10 @@ pub const Node = struct {
pub const Else = struct {
base: Node,
else_token: TokenIndex,
- payload: ?&Node,
- body: &Node,
+ payload: ?*Node,
+ body: *Node,
- pub fn iterate(self: &Else, index: usize) ?&Node {
+ pub fn iterate(self: *Else, index: usize) ?*Node {
var i = index;
if (self.payload) |payload| {
@@ -1119,11 +1119,11 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: &Else) TokenIndex {
+ pub fn firstToken(self: *Else) TokenIndex {
return self.else_token;
}
- pub fn lastToken(self: &Else) TokenIndex {
+ pub fn lastToken(self: *Else) TokenIndex {
return self.body.lastToken();
}
};
@@ -1131,15 +1131,15 @@ pub const Node = struct {
pub const Switch = struct {
base: Node,
switch_token: TokenIndex,
- expr: &Node,
+ expr: *Node,
/// these must be SwitchCase nodes
cases: CaseList,
rbrace: TokenIndex,
- pub const CaseList = SegmentedList(&Node, 2);
+ pub const CaseList = SegmentedList(*Node, 2);
- pub fn iterate(self: &Switch, index: usize) ?&Node {
+ pub fn iterate(self: *Switch, index: usize) ?*Node {
var i = index;
if (i < 1) return self.expr;
@@ -1151,11 +1151,11 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: &Switch) TokenIndex {
+ pub fn firstToken(self: *Switch) TokenIndex {
return self.switch_token;
}
- pub fn lastToken(self: &Switch) TokenIndex {
+ pub fn lastToken(self: *Switch) TokenIndex {
return self.rbrace;
}
};
@@ -1164,12 +1164,12 @@ pub const Node = struct {
base: Node,
items: ItemList,
arrow_token: TokenIndex,
- payload: ?&Node,
- expr: &Node,
+ payload: ?*Node,
+ expr: *Node,
- pub const ItemList = SegmentedList(&Node, 1);
+ pub const ItemList = SegmentedList(*Node, 1);
- pub fn iterate(self: &SwitchCase, index: usize) ?&Node {
+ pub fn iterate(self: *SwitchCase, index: usize) ?*Node {
var i = index;
if (i < self.items.len) return self.items.at(i).*;
@@ -1186,11 +1186,11 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: &SwitchCase) TokenIndex {
+ pub fn firstToken(self: *SwitchCase) TokenIndex {
return (self.items.at(0).*).firstToken();
}
- pub fn lastToken(self: &SwitchCase) TokenIndex {
+ pub fn lastToken(self: *SwitchCase) TokenIndex {
return self.expr.lastToken();
}
};
@@ -1199,15 +1199,15 @@ pub const Node = struct {
base: Node,
token: TokenIndex,
- pub fn iterate(self: &SwitchElse, index: usize) ?&Node {
+ pub fn iterate(self: *SwitchElse, index: usize) ?*Node {
return null;
}
- pub fn firstToken(self: &SwitchElse) TokenIndex {
+ pub fn firstToken(self: *SwitchElse) TokenIndex {
return self.token;
}
- pub fn lastToken(self: &SwitchElse) TokenIndex {
+ pub fn lastToken(self: *SwitchElse) TokenIndex {
return self.token;
}
};
@@ -1217,13 +1217,13 @@ pub const Node = struct {
label: ?TokenIndex,
inline_token: ?TokenIndex,
while_token: TokenIndex,
- condition: &Node,
- payload: ?&Node,
- continue_expr: ?&Node,
- body: &Node,
- @"else": ?&Else,
+ condition: *Node,
+ payload: ?*Node,
+ continue_expr: ?*Node,
+ body: *Node,
+ @"else": ?*Else,
- pub fn iterate(self: &While, index: usize) ?&Node {
+ pub fn iterate(self: *While, index: usize) ?*Node {
var i = index;
if (i < 1) return self.condition;
@@ -1243,14 +1243,14 @@ pub const Node = struct {
i -= 1;
if (self.@"else") |@"else"| {
- if (i < 1) return &@"else".base;
+ if (i < 1) return *@"else".base;
i -= 1;
}
return null;
}
- pub fn firstToken(self: &While) TokenIndex {
+ pub fn firstToken(self: *While) TokenIndex {
if (self.label) |label| {
return label;
}
@@ -1262,7 +1262,7 @@ pub const Node = struct {
return self.while_token;
}
- pub fn lastToken(self: &While) TokenIndex {
+ pub fn lastToken(self: *While) TokenIndex {
if (self.@"else") |@"else"| {
return @"else".body.lastToken();
}
@@ -1276,12 +1276,12 @@ pub const Node = struct {
label: ?TokenIndex,
inline_token: ?TokenIndex,
for_token: TokenIndex,
- array_expr: &Node,
- payload: ?&Node,
- body: &Node,
- @"else": ?&Else,
+ array_expr: *Node,
+ payload: ?*Node,
+ body: *Node,
+ @"else": ?*Else,
- pub fn iterate(self: &For, index: usize) ?&Node {
+ pub fn iterate(self: *For, index: usize) ?*Node {
var i = index;
if (i < 1) return self.array_expr;
@@ -1296,14 +1296,14 @@ pub const Node = struct {
i -= 1;
if (self.@"else") |@"else"| {
- if (i < 1) return &@"else".base;
+ if (i < 1) return *@"else".base;
i -= 1;
}
return null;
}
- pub fn firstToken(self: &For) TokenIndex {
+ pub fn firstToken(self: *For) TokenIndex {
if (self.label) |label| {
return label;
}
@@ -1315,7 +1315,7 @@ pub const Node = struct {
return self.for_token;
}
- pub fn lastToken(self: &For) TokenIndex {
+ pub fn lastToken(self: *For) TokenIndex {
if (self.@"else") |@"else"| {
return @"else".body.lastToken();
}
@@ -1327,12 +1327,12 @@ pub const Node = struct {
pub const If = struct {
base: Node,
if_token: TokenIndex,
- condition: &Node,
- payload: ?&Node,
- body: &Node,
- @"else": ?&Else,
+ condition: *Node,
+ payload: ?*Node,
+ body: *Node,
+ @"else": ?*Else,
- pub fn iterate(self: &If, index: usize) ?&Node {
+ pub fn iterate(self: *If, index: usize) ?*Node {
var i = index;
if (i < 1) return self.condition;
@@ -1347,18 +1347,18 @@ pub const Node = struct {
i -= 1;
if (self.@"else") |@"else"| {
- if (i < 1) return &@"else".base;
+ if (i < 1) return *@"else".base;
i -= 1;
}
return null;
}
- pub fn firstToken(self: &If) TokenIndex {
+ pub fn firstToken(self: *If) TokenIndex {
return self.if_token;
}
- pub fn lastToken(self: &If) TokenIndex {
+ pub fn lastToken(self: *If) TokenIndex {
if (self.@"else") |@"else"| {
return @"else".body.lastToken();
}
@@ -1370,9 +1370,9 @@ pub const Node = struct {
pub const InfixOp = struct {
base: Node,
op_token: TokenIndex,
- lhs: &Node,
+ lhs: *Node,
op: Op,
- rhs: &Node,
+ rhs: *Node,
pub const Op = union(enum) {
Add,
@@ -1401,7 +1401,7 @@ pub const Node = struct {
BitXor,
BoolAnd,
BoolOr,
- Catch: ?&Node,
+ Catch: ?*Node,
Div,
EqualEqual,
ErrorUnion,
@@ -1420,7 +1420,7 @@ pub const Node = struct {
UnwrapMaybe,
};
- pub fn iterate(self: &InfixOp, index: usize) ?&Node {
+ pub fn iterate(self: *InfixOp, index: usize) ?*Node {
var i = index;
if (i < 1) return self.lhs;
@@ -1485,11 +1485,11 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: &InfixOp) TokenIndex {
+ pub fn firstToken(self: *InfixOp) TokenIndex {
return self.lhs.firstToken();
}
- pub fn lastToken(self: &InfixOp) TokenIndex {
+ pub fn lastToken(self: *InfixOp) TokenIndex {
return self.rhs.lastToken();
}
};
@@ -1498,42 +1498,42 @@ pub const Node = struct {
base: Node,
op_token: TokenIndex,
op: Op,
- rhs: &Node,
+ rhs: *Node,
pub const Op = union(enum) {
- AddrOf: AddrOfInfo,
- ArrayType: &Node,
+ AddressOf,
+ ArrayType: *Node,
Await,
BitNot,
BoolNot,
Cancel,
- PointerType,
MaybeType,
Negation,
NegationWrap,
Resume,
- SliceType: AddrOfInfo,
+ PtrType: PtrInfo,
+ SliceType: PtrInfo,
Try,
UnwrapMaybe,
};
- pub const AddrOfInfo = struct {
+ pub const PtrInfo = struct {
align_info: ?Align,
const_token: ?TokenIndex,
volatile_token: ?TokenIndex,
pub const Align = struct {
- node: &Node,
+ node: *Node,
bit_range: ?BitRange,
pub const BitRange = struct {
- start: &Node,
- end: &Node,
+ start: *Node,
+ end: *Node,
};
};
};
- pub fn iterate(self: &PrefixOp, index: usize) ?&Node {
+ pub fn iterate(self: *PrefixOp, index: usize) ?*Node {
var i = index;
switch (self.op) {
@@ -1573,11 +1573,11 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: &PrefixOp) TokenIndex {
+ pub fn firstToken(self: *PrefixOp) TokenIndex {
return self.op_token;
}
- pub fn lastToken(self: &PrefixOp) TokenIndex {
+ pub fn lastToken(self: *PrefixOp) TokenIndex {
return self.rhs.lastToken();
}
};
@@ -1586,9 +1586,9 @@ pub const Node = struct {
base: Node,
period_token: TokenIndex,
name_token: TokenIndex,
- expr: &Node,
+ expr: *Node,
- pub fn iterate(self: &FieldInitializer, index: usize) ?&Node {
+ pub fn iterate(self: *FieldInitializer, index: usize) ?*Node {
var i = index;
if (i < 1) return self.expr;
@@ -1597,45 +1597,45 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: &FieldInitializer) TokenIndex {
+ pub fn firstToken(self: *FieldInitializer) TokenIndex {
return self.period_token;
}
- pub fn lastToken(self: &FieldInitializer) TokenIndex {
+ pub fn lastToken(self: *FieldInitializer) TokenIndex {
return self.expr.lastToken();
}
};
pub const SuffixOp = struct {
base: Node,
- lhs: &Node,
+ lhs: *Node,
op: Op,
rtoken: TokenIndex,
pub const Op = union(enum) {
Call: Call,
- ArrayAccess: &Node,
+ ArrayAccess: *Node,
Slice: Slice,
ArrayInitializer: InitList,
StructInitializer: InitList,
Deref,
- pub const InitList = SegmentedList(&Node, 2);
+ pub const InitList = SegmentedList(*Node, 2);
pub const Call = struct {
params: ParamList,
- async_attr: ?&AsyncAttribute,
+ async_attr: ?*AsyncAttribute,
- pub const ParamList = SegmentedList(&Node, 2);
+ pub const ParamList = SegmentedList(*Node, 2);
};
pub const Slice = struct {
- start: &Node,
- end: ?&Node,
+ start: *Node,
+ end: ?*Node,
};
};
- pub fn iterate(self: &SuffixOp, index: usize) ?&Node {
+ pub fn iterate(self: *SuffixOp, index: usize) ?*Node {
var i = index;
if (i < 1) return self.lhs;
@@ -1673,7 +1673,7 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: &SuffixOp) TokenIndex {
+ pub fn firstToken(self: *SuffixOp) TokenIndex {
switch (self.op) {
@TagType(Op).Call => |*call_info| if (call_info.async_attr) |async_attr| return async_attr.firstToken(),
else => {},
@@ -1681,7 +1681,7 @@ pub const Node = struct {
return self.lhs.firstToken();
}
- pub fn lastToken(self: &SuffixOp) TokenIndex {
+ pub fn lastToken(self: *SuffixOp) TokenIndex {
return self.rtoken;
}
};
@@ -1689,10 +1689,10 @@ pub const Node = struct {
pub const GroupedExpression = struct {
base: Node,
lparen: TokenIndex,
- expr: &Node,
+ expr: *Node,
rparen: TokenIndex,
- pub fn iterate(self: &GroupedExpression, index: usize) ?&Node {
+ pub fn iterate(self: *GroupedExpression, index: usize) ?*Node {
var i = index;
if (i < 1) return self.expr;
@@ -1701,11 +1701,11 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: &GroupedExpression) TokenIndex {
+ pub fn firstToken(self: *GroupedExpression) TokenIndex {
return self.lparen;
}
- pub fn lastToken(self: &GroupedExpression) TokenIndex {
+ pub fn lastToken(self: *GroupedExpression) TokenIndex {
return self.rparen;
}
};
@@ -1714,15 +1714,15 @@ pub const Node = struct {
base: Node,
ltoken: TokenIndex,
kind: Kind,
- rhs: ?&Node,
+ rhs: ?*Node,
const Kind = union(enum) {
- Break: ?&Node,
- Continue: ?&Node,
+ Break: ?*Node,
+ Continue: ?*Node,
Return,
};
- pub fn iterate(self: &ControlFlowExpression, index: usize) ?&Node {
+ pub fn iterate(self: *ControlFlowExpression, index: usize) ?*Node {
var i = index;
switch (self.kind) {
@@ -1749,11 +1749,11 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: &ControlFlowExpression) TokenIndex {
+ pub fn firstToken(self: *ControlFlowExpression) TokenIndex {
return self.ltoken;
}
- pub fn lastToken(self: &ControlFlowExpression) TokenIndex {
+ pub fn lastToken(self: *ControlFlowExpression) TokenIndex {
if (self.rhs) |rhs| {
return rhs.lastToken();
}
@@ -1780,10 +1780,10 @@ pub const Node = struct {
base: Node,
label: ?TokenIndex,
suspend_token: TokenIndex,
- payload: ?&Node,
- body: ?&Node,
+ payload: ?*Node,
+ body: ?*Node,
- pub fn iterate(self: &Suspend, index: usize) ?&Node {
+ pub fn iterate(self: *Suspend, index: usize) ?*Node {
var i = index;
if (self.payload) |payload| {
@@ -1799,12 +1799,12 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: &Suspend) TokenIndex {
+ pub fn firstToken(self: *Suspend) TokenIndex {
if (self.label) |label| return label;
return self.suspend_token;
}
- pub fn lastToken(self: &Suspend) TokenIndex {
+ pub fn lastToken(self: *Suspend) TokenIndex {
if (self.body) |body| {
return body.lastToken();
}
@@ -1821,15 +1821,15 @@ pub const Node = struct {
base: Node,
token: TokenIndex,
- pub fn iterate(self: &IntegerLiteral, index: usize) ?&Node {
+ pub fn iterate(self: *IntegerLiteral, index: usize) ?*Node {
return null;
}
- pub fn firstToken(self: &IntegerLiteral) TokenIndex {
+ pub fn firstToken(self: *IntegerLiteral) TokenIndex {
return self.token;
}
- pub fn lastToken(self: &IntegerLiteral) TokenIndex {
+ pub fn lastToken(self: *IntegerLiteral) TokenIndex {
return self.token;
}
};
@@ -1838,15 +1838,15 @@ pub const Node = struct {
base: Node,
token: TokenIndex,
- pub fn iterate(self: &FloatLiteral, index: usize) ?&Node {
+ pub fn iterate(self: *FloatLiteral, index: usize) ?*Node {
return null;
}
- pub fn firstToken(self: &FloatLiteral) TokenIndex {
+ pub fn firstToken(self: *FloatLiteral) TokenIndex {
return self.token;
}
- pub fn lastToken(self: &FloatLiteral) TokenIndex {
+ pub fn lastToken(self: *FloatLiteral) TokenIndex {
return self.token;
}
};
@@ -1857,9 +1857,9 @@ pub const Node = struct {
params: ParamList,
rparen_token: TokenIndex,
- pub const ParamList = SegmentedList(&Node, 2);
+ pub const ParamList = SegmentedList(*Node, 2);
- pub fn iterate(self: &BuiltinCall, index: usize) ?&Node {
+ pub fn iterate(self: *BuiltinCall, index: usize) ?*Node {
var i = index;
if (i < self.params.len) return self.params.at(i).*;
@@ -1868,11 +1868,11 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: &BuiltinCall) TokenIndex {
+ pub fn firstToken(self: *BuiltinCall) TokenIndex {
return self.builtin_token;
}
- pub fn lastToken(self: &BuiltinCall) TokenIndex {
+ pub fn lastToken(self: *BuiltinCall) TokenIndex {
return self.rparen_token;
}
};
@@ -1881,15 +1881,15 @@ pub const Node = struct {
base: Node,
token: TokenIndex,
- pub fn iterate(self: &StringLiteral, index: usize) ?&Node {
+ pub fn iterate(self: *StringLiteral, index: usize) ?*Node {
return null;
}
- pub fn firstToken(self: &StringLiteral) TokenIndex {
+ pub fn firstToken(self: *StringLiteral) TokenIndex {
return self.token;
}
- pub fn lastToken(self: &StringLiteral) TokenIndex {
+ pub fn lastToken(self: *StringLiteral) TokenIndex {
return self.token;
}
};
@@ -1900,15 +1900,15 @@ pub const Node = struct {
pub const LineList = SegmentedList(TokenIndex, 4);
- pub fn iterate(self: &MultilineStringLiteral, index: usize) ?&Node {
+ pub fn iterate(self: *MultilineStringLiteral, index: usize) ?*Node {
return null;
}
- pub fn firstToken(self: &MultilineStringLiteral) TokenIndex {
+ pub fn firstToken(self: *MultilineStringLiteral) TokenIndex {
return self.lines.at(0).*;
}
- pub fn lastToken(self: &MultilineStringLiteral) TokenIndex {
+ pub fn lastToken(self: *MultilineStringLiteral) TokenIndex {
return self.lines.at(self.lines.len - 1).*;
}
};
@@ -1917,15 +1917,15 @@ pub const Node = struct {
base: Node,
token: TokenIndex,
- pub fn iterate(self: &CharLiteral, index: usize) ?&Node {
+ pub fn iterate(self: *CharLiteral, index: usize) ?*Node {
return null;
}
- pub fn firstToken(self: &CharLiteral) TokenIndex {
+ pub fn firstToken(self: *CharLiteral) TokenIndex {
return self.token;
}
- pub fn lastToken(self: &CharLiteral) TokenIndex {
+ pub fn lastToken(self: *CharLiteral) TokenIndex {
return self.token;
}
};
@@ -1934,15 +1934,15 @@ pub const Node = struct {
base: Node,
token: TokenIndex,
- pub fn iterate(self: &BoolLiteral, index: usize) ?&Node {
+ pub fn iterate(self: *BoolLiteral, index: usize) ?*Node {
return null;
}
- pub fn firstToken(self: &BoolLiteral) TokenIndex {
+ pub fn firstToken(self: *BoolLiteral) TokenIndex {
return self.token;
}
- pub fn lastToken(self: &BoolLiteral) TokenIndex {
+ pub fn lastToken(self: *BoolLiteral) TokenIndex {
return self.token;
}
};
@@ -1951,15 +1951,15 @@ pub const Node = struct {
base: Node,
token: TokenIndex,
- pub fn iterate(self: &NullLiteral, index: usize) ?&Node {
+ pub fn iterate(self: *NullLiteral, index: usize) ?*Node {
return null;
}
- pub fn firstToken(self: &NullLiteral) TokenIndex {
+ pub fn firstToken(self: *NullLiteral) TokenIndex {
return self.token;
}
- pub fn lastToken(self: &NullLiteral) TokenIndex {
+ pub fn lastToken(self: *NullLiteral) TokenIndex {
return self.token;
}
};
@@ -1968,15 +1968,15 @@ pub const Node = struct {
base: Node,
token: TokenIndex,
- pub fn iterate(self: &UndefinedLiteral, index: usize) ?&Node {
+ pub fn iterate(self: *UndefinedLiteral, index: usize) ?*Node {
return null;
}
- pub fn firstToken(self: &UndefinedLiteral) TokenIndex {
+ pub fn firstToken(self: *UndefinedLiteral) TokenIndex {
return self.token;
}
- pub fn lastToken(self: &UndefinedLiteral) TokenIndex {
+ pub fn lastToken(self: *UndefinedLiteral) TokenIndex {
return self.token;
}
};
@@ -1985,15 +1985,15 @@ pub const Node = struct {
base: Node,
token: TokenIndex,
- pub fn iterate(self: &ThisLiteral, index: usize) ?&Node {
+ pub fn iterate(self: *ThisLiteral, index: usize) ?*Node {
return null;
}
- pub fn firstToken(self: &ThisLiteral) TokenIndex {
+ pub fn firstToken(self: *ThisLiteral) TokenIndex {
return self.token;
}
- pub fn lastToken(self: &ThisLiteral) TokenIndex {
+ pub fn lastToken(self: *ThisLiteral) TokenIndex {
return self.token;
}
};
@@ -2001,17 +2001,17 @@ pub const Node = struct {
pub const AsmOutput = struct {
base: Node,
lbracket: TokenIndex,
- symbolic_name: &Node,
- constraint: &Node,
+ symbolic_name: *Node,
+ constraint: *Node,
kind: Kind,
rparen: TokenIndex,
const Kind = union(enum) {
- Variable: &Identifier,
- Return: &Node,
+ Variable: *Identifier,
+ Return: *Node,
};
- pub fn iterate(self: &AsmOutput, index: usize) ?&Node {
+ pub fn iterate(self: *AsmOutput, index: usize) ?*Node {
var i = index;
if (i < 1) return self.symbolic_name;
@@ -2022,7 +2022,7 @@ pub const Node = struct {
switch (self.kind) {
Kind.Variable => |variable_name| {
- if (i < 1) return &variable_name.base;
+ if (i < 1) return *variable_name.base;
i -= 1;
},
Kind.Return => |return_type| {
@@ -2034,11 +2034,11 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: &AsmOutput) TokenIndex {
+ pub fn firstToken(self: *AsmOutput) TokenIndex {
return self.lbracket;
}
- pub fn lastToken(self: &AsmOutput) TokenIndex {
+ pub fn lastToken(self: *AsmOutput) TokenIndex {
return self.rparen;
}
};
@@ -2046,12 +2046,12 @@ pub const Node = struct {
pub const AsmInput = struct {
base: Node,
lbracket: TokenIndex,
- symbolic_name: &Node,
- constraint: &Node,
- expr: &Node,
+ symbolic_name: *Node,
+ constraint: *Node,
+ expr: *Node,
rparen: TokenIndex,
- pub fn iterate(self: &AsmInput, index: usize) ?&Node {
+ pub fn iterate(self: *AsmInput, index: usize) ?*Node {
var i = index;
if (i < 1) return self.symbolic_name;
@@ -2066,11 +2066,11 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: &AsmInput) TokenIndex {
+ pub fn firstToken(self: *AsmInput) TokenIndex {
return self.lbracket;
}
- pub fn lastToken(self: &AsmInput) TokenIndex {
+ pub fn lastToken(self: *AsmInput) TokenIndex {
return self.rparen;
}
};
@@ -2079,33 +2079,33 @@ pub const Node = struct {
base: Node,
asm_token: TokenIndex,
volatile_token: ?TokenIndex,
- template: &Node,
+ template: *Node,
outputs: OutputList,
inputs: InputList,
clobbers: ClobberList,
rparen: TokenIndex,
- const OutputList = SegmentedList(&AsmOutput, 2);
- const InputList = SegmentedList(&AsmInput, 2);
+ const OutputList = SegmentedList(*AsmOutput, 2);
+ const InputList = SegmentedList(*AsmInput, 2);
const ClobberList = SegmentedList(TokenIndex, 2);
- pub fn iterate(self: &Asm, index: usize) ?&Node {
+ pub fn iterate(self: *Asm, index: usize) ?*Node {
var i = index;
- if (i < self.outputs.len) return &(self.outputs.at(index).*).base;
+ if (i < self.outputs.len) return *(self.outputs.at(index).*).base;
i -= self.outputs.len;
- if (i < self.inputs.len) return &(self.inputs.at(index).*).base;
+ if (i < self.inputs.len) return *(self.inputs.at(index).*).base;
i -= self.inputs.len;
return null;
}
- pub fn firstToken(self: &Asm) TokenIndex {
+ pub fn firstToken(self: *Asm) TokenIndex {
return self.asm_token;
}
- pub fn lastToken(self: &Asm) TokenIndex {
+ pub fn lastToken(self: *Asm) TokenIndex {
return self.rparen;
}
};
@@ -2114,15 +2114,15 @@ pub const Node = struct {
base: Node,
token: TokenIndex,
- pub fn iterate(self: &Unreachable, index: usize) ?&Node {
+ pub fn iterate(self: *Unreachable, index: usize) ?*Node {
return null;
}
- pub fn firstToken(self: &Unreachable) TokenIndex {
+ pub fn firstToken(self: *Unreachable) TokenIndex {
return self.token;
}
- pub fn lastToken(self: &Unreachable) TokenIndex {
+ pub fn lastToken(self: *Unreachable) TokenIndex {
return self.token;
}
};
@@ -2131,15 +2131,15 @@ pub const Node = struct {
base: Node,
token: TokenIndex,
- pub fn iterate(self: &ErrorType, index: usize) ?&Node {
+ pub fn iterate(self: *ErrorType, index: usize) ?*Node {
return null;
}
- pub fn firstToken(self: &ErrorType) TokenIndex {
+ pub fn firstToken(self: *ErrorType) TokenIndex {
return self.token;
}
- pub fn lastToken(self: &ErrorType) TokenIndex {
+ pub fn lastToken(self: *ErrorType) TokenIndex {
return self.token;
}
};
@@ -2148,15 +2148,15 @@ pub const Node = struct {
base: Node,
token: TokenIndex,
- pub fn iterate(self: &VarType, index: usize) ?&Node {
+ pub fn iterate(self: *VarType, index: usize) ?*Node {
return null;
}
- pub fn firstToken(self: &VarType) TokenIndex {
+ pub fn firstToken(self: *VarType) TokenIndex {
return self.token;
}
- pub fn lastToken(self: &VarType) TokenIndex {
+ pub fn lastToken(self: *VarType) TokenIndex {
return self.token;
}
};
@@ -2167,27 +2167,27 @@ pub const Node = struct {
pub const LineList = SegmentedList(TokenIndex, 4);
- pub fn iterate(self: &DocComment, index: usize) ?&Node {
+ pub fn iterate(self: *DocComment, index: usize) ?*Node {
return null;
}
- pub fn firstToken(self: &DocComment) TokenIndex {
+ pub fn firstToken(self: *DocComment) TokenIndex {
return self.lines.at(0).*;
}
- pub fn lastToken(self: &DocComment) TokenIndex {
+ pub fn lastToken(self: *DocComment) TokenIndex {
return self.lines.at(self.lines.len - 1).*;
}
};
pub const TestDecl = struct {
base: Node,
- doc_comments: ?&DocComment,
+ doc_comments: ?*DocComment,
test_token: TokenIndex,
- name: &Node,
- body_node: &Node,
+ name: *Node,
+ body_node: *Node,
- pub fn iterate(self: &TestDecl, index: usize) ?&Node {
+ pub fn iterate(self: *TestDecl, index: usize) ?*Node {
var i = index;
if (i < 1) return self.body_node;
@@ -2196,11 +2196,11 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: &TestDecl) TokenIndex {
+ pub fn firstToken(self: *TestDecl) TokenIndex {
return self.test_token;
}
- pub fn lastToken(self: &TestDecl) TokenIndex {
+ pub fn lastToken(self: *TestDecl) TokenIndex {
return self.body_node.lastToken();
}
};
diff --git a/std/zig/bench.zig b/std/zig/bench.zig
index c3b6b0d3d37e..59392889a686 100644
--- a/std/zig/bench.zig
+++ b/std/zig/bench.zig
@@ -24,15 +24,15 @@ pub fn main() !void {
const mb_per_sec = bytes_per_sec / (1024 * 1024);
var stdout_file = try std.io.getStdOut();
- const stdout = &std.io.FileOutStream.init(&stdout_file).stream;
+ const stdout = *std.io.FileOutStream.init(*stdout_file).stream;
try stdout.print("{.3} MB/s, {} KB used \n", mb_per_sec, memory_used / 1024);
}
fn testOnce() usize {
var fixed_buf_alloc = std.heap.FixedBufferAllocator.init(fixed_buffer_mem[0..]);
- var allocator = &fixed_buf_alloc.allocator;
+ var allocator = *fixed_buf_alloc.allocator;
var tokenizer = Tokenizer.init(source);
- var parser = Parser.init(&tokenizer, allocator, "(memory buffer)");
+ var parser = Parser.init(*tokenizer, allocator, "(memory buffer)");
_ = parser.parse() catch @panic("parse failure");
return fixed_buf_alloc.end_index;
}
diff --git a/std/zig/parse.zig b/std/zig/parse.zig
index 05554f5d3491..6adcf34c956f 100644
--- a/std/zig/parse.zig
+++ b/std/zig/parse.zig
@@ -9,7 +9,7 @@ const Error = ast.Error;
/// Result should be freed with tree.deinit() when there are
/// no more references to any of the tokens or nodes.
-pub fn parse(allocator: &mem.Allocator, source: []const u8) !ast.Tree {
+pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
var tree_arena = std.heap.ArenaAllocator.init(allocator);
errdefer tree_arena.deinit();
@@ -1533,14 +1533,14 @@ pub fn parse(allocator: &mem.Allocator, source: []const u8) !ast.Tree {
State.SliceOrArrayType => |node| {
if (eatToken(&tok_it, &tree, Token.Id.RBracket)) |_| {
node.op = ast.Node.PrefixOp.Op{
- .SliceType = ast.Node.PrefixOp.AddrOfInfo{
+ .SliceType = ast.Node.PrefixOp.PtrInfo{
.align_info = null,
.const_token = null,
.volatile_token = null,
},
};
stack.append(State{ .TypeExprBegin = OptionalCtx{ .Required = &node.rhs } }) catch unreachable;
- try stack.append(State{ .AddrOfModifiers = &node.op.SliceType });
+ try stack.append(State{ .PtrTypeModifiers = &node.op.SliceType });
continue;
}
@@ -1551,7 +1551,7 @@ pub fn parse(allocator: &mem.Allocator, source: []const u8) !ast.Tree {
continue;
},
- State.AddrOfModifiers => |addr_of_info| {
+ State.PtrTypeModifiers => |addr_of_info| {
const token = nextToken(&tok_it, &tree);
const token_index = token.index;
const token_ptr = token.ptr;
@@ -1562,7 +1562,7 @@ pub fn parse(allocator: &mem.Allocator, source: []const u8) !ast.Tree {
((try tree.errors.addOne())).* = Error{ .ExtraAlignQualifier = Error.ExtraAlignQualifier{ .token = token_index } };
return tree;
}
- addr_of_info.align_info = ast.Node.PrefixOp.AddrOfInfo.Align{
+ addr_of_info.align_info = ast.Node.PrefixOp.PtrInfo.Align{
.node = undefined,
.bit_range = null,
};
@@ -1603,7 +1603,7 @@ pub fn parse(allocator: &mem.Allocator, source: []const u8) !ast.Tree {
const token = nextToken(&tok_it, &tree);
switch (token.ptr.id) {
Token.Id.Colon => {
- align_info.bit_range = ast.Node.PrefixOp.AddrOfInfo.Align.BitRange(undefined);
+ align_info.bit_range = ast.Node.PrefixOp.PtrInfo.Align.BitRange(undefined);
const bit_range = &??align_info.bit_range;
try stack.append(State{ .ExpectToken = Token.Id.RParen });
@@ -2220,7 +2220,7 @@ pub fn parse(allocator: &mem.Allocator, source: []const u8) !ast.Tree {
});
opt_ctx.store(&node.base);
- // Treat '**' token as two derefs
+ // Treat '**' token as two pointer types
if (token_ptr.id == Token.Id.AsteriskAsterisk) {
const child = try arena.construct(ast.Node.PrefixOp{
.base = ast.Node{ .id = ast.Node.Id.PrefixOp },
@@ -2233,8 +2233,8 @@ pub fn parse(allocator: &mem.Allocator, source: []const u8) !ast.Tree {
}
stack.append(State{ .TypeExprBegin = OptionalCtx{ .Required = &node.rhs } }) catch unreachable;
- if (node.op == ast.Node.PrefixOp.Op.AddrOf) {
- try stack.append(State{ .AddrOfModifiers = &node.op.AddrOf });
+ if (node.op == ast.Node.PrefixOp.Op.PtrType) {
+ try stack.append(State{ .PtrTypeModifiers = &node.op.PtrType });
}
continue;
} else {
@@ -2754,16 +2754,16 @@ pub fn parse(allocator: &mem.Allocator, source: []const u8) !ast.Tree {
}
const AnnotatedToken = struct {
- ptr: &Token,
+ ptr: *Token,
index: TokenIndex,
};
const TopLevelDeclCtx = struct {
- decls: &ast.Node.Root.DeclList,
+ decls: *ast.Node.Root.DeclList,
visib_token: ?TokenIndex,
extern_export_inline_token: ?AnnotatedToken,
- lib_name: ?&ast.Node,
- comments: ?&ast.Node.DocComment,
+ lib_name: ?*ast.Node,
+ comments: ?*ast.Node.DocComment,
};
const VarDeclCtx = struct {
@@ -2771,21 +2771,21 @@ const VarDeclCtx = struct {
visib_token: ?TokenIndex,
comptime_token: ?TokenIndex,
extern_export_token: ?TokenIndex,
- lib_name: ?&ast.Node,
- list: &ast.Node.Root.DeclList,
- comments: ?&ast.Node.DocComment,
+ lib_name: ?*ast.Node,
+ list: *ast.Node.Root.DeclList,
+ comments: ?*ast.Node.DocComment,
};
const TopLevelExternOrFieldCtx = struct {
visib_token: TokenIndex,
- container_decl: &ast.Node.ContainerDecl,
- comments: ?&ast.Node.DocComment,
+ container_decl: *ast.Node.ContainerDecl,
+ comments: ?*ast.Node.DocComment,
};
const ExternTypeCtx = struct {
opt_ctx: OptionalCtx,
extern_token: TokenIndex,
- comments: ?&ast.Node.DocComment,
+ comments: ?*ast.Node.DocComment,
};
const ContainerKindCtx = struct {
@@ -2795,24 +2795,24 @@ const ContainerKindCtx = struct {
const ExpectTokenSave = struct {
id: @TagType(Token.Id),
- ptr: &TokenIndex,
+ ptr: *TokenIndex,
};
const OptionalTokenSave = struct {
id: @TagType(Token.Id),
- ptr: &?TokenIndex,
+ ptr: *?TokenIndex,
};
const ExprListCtx = struct {
- list: &ast.Node.SuffixOp.Op.InitList,
+ list: *ast.Node.SuffixOp.Op.InitList,
end: Token.Id,
- ptr: &TokenIndex,
+ ptr: *TokenIndex,
};
fn ListSave(comptime List: type) type {
return struct {
- list: &List,
- ptr: &TokenIndex,
+ list: *List,
+ ptr: *TokenIndex,
};
}
@@ -2841,7 +2841,7 @@ const LoopCtx = struct {
const AsyncEndCtx = struct {
ctx: OptionalCtx,
- attribute: &ast.Node.AsyncAttribute,
+ attribute: *ast.Node.AsyncAttribute,
};
const ErrorTypeOrSetDeclCtx = struct {
@@ -2850,21 +2850,21 @@ const ErrorTypeOrSetDeclCtx = struct {
};
const ParamDeclEndCtx = struct {
- fn_proto: &ast.Node.FnProto,
- param_decl: &ast.Node.ParamDecl,
+ fn_proto: *ast.Node.FnProto,
+ param_decl: *ast.Node.ParamDecl,
};
const ComptimeStatementCtx = struct {
comptime_token: TokenIndex,
- block: &ast.Node.Block,
+ block: *ast.Node.Block,
};
const OptionalCtx = union(enum) {
- Optional: &?&ast.Node,
- RequiredNull: &?&ast.Node,
- Required: &&ast.Node,
+ Optional: *?*ast.Node,
+ RequiredNull: *?*ast.Node,
+ Required: **ast.Node,
- pub fn store(self: &const OptionalCtx, value: &ast.Node) void {
+ pub fn store(self: *const OptionalCtx, value: *ast.Node) void {
switch (self.*) {
OptionalCtx.Optional => |ptr| ptr.* = value,
OptionalCtx.RequiredNull => |ptr| ptr.* = value,
@@ -2872,7 +2872,7 @@ const OptionalCtx = union(enum) {
}
}
- pub fn get(self: &const OptionalCtx) ?&ast.Node {
+ pub fn get(self: *const OptionalCtx) ?*ast.Node {
switch (self.*) {
OptionalCtx.Optional => |ptr| return ptr.*,
OptionalCtx.RequiredNull => |ptr| return ??ptr.*,
@@ -2880,7 +2880,7 @@ const OptionalCtx = union(enum) {
}
}
- pub fn toRequired(self: &const OptionalCtx) OptionalCtx {
+ pub fn toRequired(self: *const OptionalCtx) OptionalCtx {
switch (self.*) {
OptionalCtx.Optional => |ptr| {
return OptionalCtx{ .RequiredNull = ptr };
@@ -2892,8 +2892,8 @@ const OptionalCtx = union(enum) {
};
const AddCommentsCtx = struct {
- node_ptr: &&ast.Node,
- comments: ?&ast.Node.DocComment,
+ node_ptr: **ast.Node,
+ comments: ?*ast.Node.DocComment,
};
const State = union(enum) {
@@ -2904,67 +2904,67 @@ const State = union(enum) {
TopLevelExternOrField: TopLevelExternOrFieldCtx,
ContainerKind: ContainerKindCtx,
- ContainerInitArgStart: &ast.Node.ContainerDecl,
- ContainerInitArg: &ast.Node.ContainerDecl,
- ContainerDecl: &ast.Node.ContainerDecl,
+ ContainerInitArgStart: *ast.Node.ContainerDecl,
+ ContainerInitArg: *ast.Node.ContainerDecl,
+ ContainerDecl: *ast.Node.ContainerDecl,
VarDecl: VarDeclCtx,
- VarDeclAlign: &ast.Node.VarDecl,
- VarDeclEq: &ast.Node.VarDecl,
- VarDeclSemiColon: &ast.Node.VarDecl,
-
- FnDef: &ast.Node.FnProto,
- FnProto: &ast.Node.FnProto,
- FnProtoAlign: &ast.Node.FnProto,
- FnProtoReturnType: &ast.Node.FnProto,
-
- ParamDecl: &ast.Node.FnProto,
- ParamDeclAliasOrComptime: &ast.Node.ParamDecl,
- ParamDeclName: &ast.Node.ParamDecl,
+ VarDeclAlign: *ast.Node.VarDecl,
+ VarDeclEq: *ast.Node.VarDecl,
+ VarDeclSemiColon: *ast.Node.VarDecl,
+
+ FnDef: *ast.Node.FnProto,
+ FnProto: *ast.Node.FnProto,
+ FnProtoAlign: *ast.Node.FnProto,
+ FnProtoReturnType: *ast.Node.FnProto,
+
+ ParamDecl: *ast.Node.FnProto,
+ ParamDeclAliasOrComptime: *ast.Node.ParamDecl,
+ ParamDeclName: *ast.Node.ParamDecl,
ParamDeclEnd: ParamDeclEndCtx,
- ParamDeclComma: &ast.Node.FnProto,
+ ParamDeclComma: *ast.Node.FnProto,
MaybeLabeledExpression: MaybeLabeledExpressionCtx,
LabeledExpression: LabelCtx,
Inline: InlineCtx,
While: LoopCtx,
- WhileContinueExpr: &?&ast.Node,
+ WhileContinueExpr: *?*ast.Node,
For: LoopCtx,
- Else: &?&ast.Node.Else,
+ Else: *?*ast.Node.Else,
- Block: &ast.Node.Block,
- Statement: &ast.Node.Block,
+ Block: *ast.Node.Block,
+ Statement: *ast.Node.Block,
ComptimeStatement: ComptimeStatementCtx,
- Semicolon: &&ast.Node,
+ Semicolon: **ast.Node,
- AsmOutputItems: &ast.Node.Asm.OutputList,
- AsmOutputReturnOrType: &ast.Node.AsmOutput,
- AsmInputItems: &ast.Node.Asm.InputList,
- AsmClobberItems: &ast.Node.Asm.ClobberList,
+ AsmOutputItems: *ast.Node.Asm.OutputList,
+ AsmOutputReturnOrType: *ast.Node.AsmOutput,
+ AsmInputItems: *ast.Node.Asm.InputList,
+ AsmClobberItems: *ast.Node.Asm.ClobberList,
ExprListItemOrEnd: ExprListCtx,
ExprListCommaOrEnd: ExprListCtx,
FieldInitListItemOrEnd: ListSave(ast.Node.SuffixOp.Op.InitList),
FieldInitListCommaOrEnd: ListSave(ast.Node.SuffixOp.Op.InitList),
- FieldListCommaOrEnd: &ast.Node.ContainerDecl,
+ FieldListCommaOrEnd: *ast.Node.ContainerDecl,
FieldInitValue: OptionalCtx,
ErrorTagListItemOrEnd: ListSave(ast.Node.ErrorSetDecl.DeclList),
ErrorTagListCommaOrEnd: ListSave(ast.Node.ErrorSetDecl.DeclList),
SwitchCaseOrEnd: ListSave(ast.Node.Switch.CaseList),
SwitchCaseCommaOrEnd: ListSave(ast.Node.Switch.CaseList),
- SwitchCaseFirstItem: &ast.Node.SwitchCase,
- SwitchCaseItemCommaOrEnd: &ast.Node.SwitchCase,
- SwitchCaseItemOrEnd: &ast.Node.SwitchCase,
+ SwitchCaseFirstItem: *ast.Node.SwitchCase,
+ SwitchCaseItemCommaOrEnd: *ast.Node.SwitchCase,
+ SwitchCaseItemOrEnd: *ast.Node.SwitchCase,
- SuspendBody: &ast.Node.Suspend,
- AsyncAllocator: &ast.Node.AsyncAttribute,
+ SuspendBody: *ast.Node.Suspend,
+ AsyncAllocator: *ast.Node.AsyncAttribute,
AsyncEnd: AsyncEndCtx,
ExternType: ExternTypeCtx,
- SliceOrArrayAccess: &ast.Node.SuffixOp,
- SliceOrArrayType: &ast.Node.PrefixOp,
- AddrOfModifiers: &ast.Node.PrefixOp.AddrOfInfo,
- AlignBitRange: &ast.Node.PrefixOp.AddrOfInfo.Align,
+ SliceOrArrayAccess: *ast.Node.SuffixOp,
+ SliceOrArrayType: *ast.Node.PrefixOp,
+ PtrTypeModifiers: *ast.Node.PrefixOp.PtrInfo,
+ AlignBitRange: *ast.Node.PrefixOp.PtrInfo.Align,
Payload: OptionalCtx,
PointerPayload: OptionalCtx,
@@ -3007,7 +3007,7 @@ const State = union(enum) {
ErrorTypeOrSetDecl: ErrorTypeOrSetDeclCtx,
StringLiteral: OptionalCtx,
Identifier: OptionalCtx,
- ErrorTag: &&ast.Node,
+ ErrorTag: **ast.Node,
IfToken: @TagType(Token.Id),
IfTokenSave: ExpectTokenSave,
@@ -3016,7 +3016,7 @@ const State = union(enum) {
OptionalTokenSave: OptionalTokenSave,
};
-fn pushDocComment(arena: &mem.Allocator, line_comment: TokenIndex, result: &?&ast.Node.DocComment) !void {
+fn pushDocComment(arena: *mem.Allocator, line_comment: TokenIndex, result: *?*ast.Node.DocComment) !void {
const node = blk: {
if (result.*) |comment_node| {
break :blk comment_node;
@@ -3032,8 +3032,8 @@ fn pushDocComment(arena: &mem.Allocator, line_comment: TokenIndex, result: &?&as
try node.lines.push(line_comment);
}
-fn eatDocComments(arena: &mem.Allocator, tok_it: &ast.Tree.TokenList.Iterator, tree: &ast.Tree) !?&ast.Node.DocComment {
- var result: ?&ast.Node.DocComment = null;
+fn eatDocComments(arena: *mem.Allocator, tok_it: *ast.Tree.TokenList.Iterator, tree: *ast.Tree) !?*ast.Node.DocComment {
+ var result: ?*ast.Node.DocComment = null;
while (true) {
if (eatToken(tok_it, tree, Token.Id.DocComment)) |line_comment| {
try pushDocComment(arena, line_comment, &result);
@@ -3044,7 +3044,7 @@ fn eatDocComments(arena: &mem.Allocator, tok_it: &ast.Tree.TokenList.Iterator, t
return result;
}
-fn parseStringLiteral(arena: &mem.Allocator, tok_it: &ast.Tree.TokenList.Iterator, token_ptr: &const Token, token_index: TokenIndex, tree: &ast.Tree) !?&ast.Node {
+fn parseStringLiteral(arena: *mem.Allocator, tok_it: *ast.Tree.TokenList.Iterator, token_ptr: *const Token, token_index: TokenIndex, tree: *ast.Tree) !?*ast.Node {
switch (token_ptr.id) {
Token.Id.StringLiteral => {
return &(try createLiteral(arena, ast.Node.StringLiteral, token_index)).base;
@@ -3071,11 +3071,11 @@ fn parseStringLiteral(arena: &mem.Allocator, tok_it: &ast.Tree.TokenList.Iterato
},
// TODO: We shouldn't need a cast, but:
// zig: /home/jc/Documents/zig/src/ir.cpp:7962: TypeTableEntry* ir_resolve_peer_types(IrAnalyze*, AstNode*, IrInstruction**, size_t): Assertion `err_set_type != nullptr' failed.
- else => return (?&ast.Node)(null),
+ else => return (?*ast.Node)(null),
}
}
-fn parseBlockExpr(stack: &std.ArrayList(State), arena: &mem.Allocator, ctx: &const OptionalCtx, token_ptr: &const Token, token_index: TokenIndex) !bool {
+fn parseBlockExpr(stack: *std.ArrayList(State), arena: *mem.Allocator, ctx: *const OptionalCtx, token_ptr: *const Token, token_index: TokenIndex) !bool {
switch (token_ptr.id) {
Token.Id.Keyword_suspend => {
const node = try arena.construct(ast.Node.Suspend{
@@ -3189,7 +3189,7 @@ const ExpectCommaOrEndResult = union(enum) {
parse_error: Error,
};
-fn expectCommaOrEnd(tok_it: &ast.Tree.TokenList.Iterator, tree: &ast.Tree, end: @TagType(Token.Id)) ExpectCommaOrEndResult {
+fn expectCommaOrEnd(tok_it: *ast.Tree.TokenList.Iterator, tree: *ast.Tree, end: @TagType(Token.Id)) ExpectCommaOrEndResult {
const token = nextToken(tok_it, tree);
const token_index = token.index;
const token_ptr = token.ptr;
@@ -3212,7 +3212,7 @@ fn expectCommaOrEnd(tok_it: &ast.Tree.TokenList.Iterator, tree: &ast.Tree, end:
}
}
-fn tokenIdToAssignment(id: &const Token.Id) ?ast.Node.InfixOp.Op {
+fn tokenIdToAssignment(id: *const Token.Id) ?ast.Node.InfixOp.Op {
// TODO: We have to cast all cases because of this:
// error: expected type '?InfixOp', found '?@TagType(InfixOp)'
return switch (id.*) {
@@ -3291,9 +3291,9 @@ fn tokenIdToPrefixOp(id: @TagType(Token.Id)) ?ast.Node.PrefixOp.Op {
Token.Id.Tilde => ast.Node.PrefixOp.Op{ .BitNot = void{} },
Token.Id.Minus => ast.Node.PrefixOp.Op{ .Negation = void{} },
Token.Id.MinusPercent => ast.Node.PrefixOp.Op{ .NegationWrap = void{} },
- Token.Id.Asterisk, Token.Id.AsteriskAsterisk => ast.Node.PrefixOp.Op{ .PointerType = void{} },
- Token.Id.Ampersand => ast.Node.PrefixOp.Op{
- .AddrOf = ast.Node.PrefixOp.AddrOfInfo{
+ Token.Id.Ampersand => ast.Node.PrefixOp.Op{ .AddressOf = void{} },
+ Token.Id.Asterisk, Token.Id.AsteriskAsterisk => ast.Node.PrefixOp.Op{
+ .PtrType = ast.Node.PrefixOp.PtrInfo{
.align_info = null,
.const_token = null,
.volatile_token = null,
@@ -3307,21 +3307,21 @@ fn tokenIdToPrefixOp(id: @TagType(Token.Id)) ?ast.Node.PrefixOp.Op {
};
}
-fn createLiteral(arena: &mem.Allocator, comptime T: type, token_index: TokenIndex) !&T {
+fn createLiteral(arena: *mem.Allocator, comptime T: type, token_index: TokenIndex) !*T {
return arena.construct(T{
.base = ast.Node{ .id = ast.Node.typeToId(T) },
.token = token_index,
});
}
-fn createToCtxLiteral(arena: &mem.Allocator, opt_ctx: &const OptionalCtx, comptime T: type, token_index: TokenIndex) !&T {
+fn createToCtxLiteral(arena: *mem.Allocator, opt_ctx: *const OptionalCtx, comptime T: type, token_index: TokenIndex) !*T {
const node = try createLiteral(arena, T, token_index);
opt_ctx.store(&node.base);
return node;
}
-fn eatToken(tok_it: &ast.Tree.TokenList.Iterator, tree: &ast.Tree, id: @TagType(Token.Id)) ?TokenIndex {
+fn eatToken(tok_it: *ast.Tree.TokenList.Iterator, tree: *ast.Tree, id: @TagType(Token.Id)) ?TokenIndex {
const token = ??tok_it.peek();
if (token.id == id) {
@@ -3331,7 +3331,7 @@ fn eatToken(tok_it: &ast.Tree.TokenList.Iterator, tree: &ast.Tree, id: @TagType(
return null;
}
-fn nextToken(tok_it: &ast.Tree.TokenList.Iterator, tree: &ast.Tree) AnnotatedToken {
+fn nextToken(tok_it: *ast.Tree.TokenList.Iterator, tree: *ast.Tree) AnnotatedToken {
const result = AnnotatedToken{
.index = tok_it.index,
.ptr = ??tok_it.next(),
@@ -3345,7 +3345,7 @@ fn nextToken(tok_it: &ast.Tree.TokenList.Iterator, tree: &ast.Tree) AnnotatedTok
}
}
-fn prevToken(tok_it: &ast.Tree.TokenList.Iterator, tree: &ast.Tree) void {
+fn prevToken(tok_it: *ast.Tree.TokenList.Iterator, tree: *ast.Tree) void {
while (true) {
const prev_tok = tok_it.prev() ?? return;
if (prev_tok.id == Token.Id.LineComment) continue;
diff --git a/std/zig/parser_test.zig b/std/zig/parser_test.zig
index 69903bc3fd82..bad677580cb3 100644
--- a/std/zig/parser_test.zig
+++ b/std/zig/parser_test.zig
@@ -529,7 +529,7 @@ test "zig fmt: line comment after doc comment" {
test "zig fmt: float literal with exponent" {
try testCanonical(
\\test "bit field alignment" {
- \\ assert(@typeOf(&blah.b) == &align(1:3:6) const u3);
+ \\ assert(@typeOf(&blah.b) == *align(1:3:6) const u3);
\\}
\\
);
@@ -1040,7 +1040,7 @@ test "zig fmt: alignment" {
test "zig fmt: C main" {
try testCanonical(
- \\fn main(argc: c_int, argv: &&u8) c_int {
+ \\fn main(argc: c_int, argv: **u8) c_int {
\\ const a = b;
\\}
\\
@@ -1049,7 +1049,7 @@ test "zig fmt: C main" {
test "zig fmt: return" {
try testCanonical(
- \\fn foo(argc: c_int, argv: &&u8) c_int {
+ \\fn foo(argc: c_int, argv: **u8) c_int {
\\ return 0;
\\}
\\
@@ -1062,20 +1062,20 @@ test "zig fmt: return" {
test "zig fmt: pointer attributes" {
try testCanonical(
- \\extern fn f1(s: &align(&u8) u8) c_int;
- \\extern fn f2(s: &&align(1) &const &volatile u8) c_int;
- \\extern fn f3(s: &align(1) const &align(1) volatile &const volatile u8) c_int;
- \\extern fn f4(s: &align(1) const volatile u8) c_int;
+ \\extern fn f1(s: *align(*u8) u8) c_int;
+ \\extern fn f2(s: **align(1) *const *volatile u8) c_int;
+ \\extern fn f3(s: *align(1) const *align(1) volatile *const volatile u8) c_int;
+ \\extern fn f4(s: *align(1) const volatile u8) c_int;
\\
);
}
test "zig fmt: slice attributes" {
try testCanonical(
- \\extern fn f1(s: &align(&u8) u8) c_int;
- \\extern fn f2(s: &&align(1) &const &volatile u8) c_int;
- \\extern fn f3(s: &align(1) const &align(1) volatile &const volatile u8) c_int;
- \\extern fn f4(s: &align(1) const volatile u8) c_int;
+ \\extern fn f1(s: *align(*u8) u8) c_int;
+ \\extern fn f2(s: **align(1) *const *volatile u8) c_int;
+ \\extern fn f3(s: *align(1) const *align(1) volatile *const volatile u8) c_int;
+ \\extern fn f4(s: *align(1) const volatile u8) c_int;
\\
);
}
@@ -1212,18 +1212,18 @@ test "zig fmt: var type" {
test "zig fmt: functions" {
try testCanonical(
- \\extern fn puts(s: &const u8) c_int;
- \\extern "c" fn puts(s: &const u8) c_int;
- \\export fn puts(s: &const u8) c_int;
- \\inline fn puts(s: &const u8) c_int;
- \\pub extern fn puts(s: &const u8) c_int;
- \\pub extern "c" fn puts(s: &const u8) c_int;
- \\pub export fn puts(s: &const u8) c_int;
- \\pub inline fn puts(s: &const u8) c_int;
- \\pub extern fn puts(s: &const u8) align(2 + 2) c_int;
- \\pub extern "c" fn puts(s: &const u8) align(2 + 2) c_int;
- \\pub export fn puts(s: &const u8) align(2 + 2) c_int;
- \\pub inline fn puts(s: &const u8) align(2 + 2) c_int;
+ \\extern fn puts(s: *const u8) c_int;
+ \\extern "c" fn puts(s: *const u8) c_int;
+ \\export fn puts(s: *const u8) c_int;
+ \\inline fn puts(s: *const u8) c_int;
+ \\pub extern fn puts(s: *const u8) c_int;
+ \\pub extern "c" fn puts(s: *const u8) c_int;
+ \\pub export fn puts(s: *const u8) c_int;
+ \\pub inline fn puts(s: *const u8) c_int;
+ \\pub extern fn puts(s: *const u8) align(2 + 2) c_int;
+ \\pub extern "c" fn puts(s: *const u8) align(2 + 2) c_int;
+ \\pub export fn puts(s: *const u8) align(2 + 2) c_int;
+ \\pub inline fn puts(s: *const u8) align(2 + 2) c_int;
\\
);
}
@@ -1298,8 +1298,8 @@ test "zig fmt: struct declaration" {
\\ f1: u8,
\\ pub f3: u8,
\\
- \\ fn method(self: &Self) Self {
- \\ return *self;
+ \\ fn method(self: *Self) Self {
+ \\ return self.*;
\\ }
\\
\\ f2: u8,
@@ -1803,7 +1803,7 @@ const io = std.io;
var fixed_buffer_mem: [100 * 1024]u8 = undefined;
-fn testParse(source: []const u8, allocator: &mem.Allocator, anything_changed: &bool) ![]u8 {
+fn testParse(source: []const u8, allocator: *mem.Allocator, anything_changed: *bool) ![]u8 {
var stderr_file = try io.getStdErr();
var stderr = &io.FileOutStream.init(&stderr_file).stream;
diff --git a/std/zig/render.zig b/std/zig/render.zig
index ac07917ff141..147adc62216c 100644
--- a/std/zig/render.zig
+++ b/std/zig/render.zig
@@ -13,7 +13,7 @@ pub const Error = error{
};
/// Returns whether anything changed
-pub fn render(allocator: &mem.Allocator, stream: var, tree: &ast.Tree) (@typeOf(stream).Child.Error || Error)!bool {
+pub fn render(allocator: *mem.Allocator, stream: var, tree: *ast.Tree) (@typeOf(stream).Child.Error || Error)!bool {
comptime assert(@typeId(@typeOf(stream)) == builtin.TypeId.Pointer);
var anything_changed: bool = false;
@@ -24,13 +24,13 @@ pub fn render(allocator: &mem.Allocator, stream: var, tree: &ast.Tree) (@typeOf(
const StreamError = @typeOf(stream).Child.Error;
const Stream = std.io.OutStream(StreamError);
- anything_changed_ptr: &bool,
+ anything_changed_ptr: *bool,
child_stream: @typeOf(stream),
stream: Stream,
source_index: usize,
source: []const u8,
- fn write(iface_stream: &Stream, bytes: []const u8) StreamError!void {
+ fn write(iface_stream: *Stream, bytes: []const u8) StreamError!void {
const self = @fieldParentPtr(MyStream, "stream", iface_stream);
if (!self.anything_changed_ptr.*) {
@@ -63,9 +63,9 @@ pub fn render(allocator: &mem.Allocator, stream: var, tree: &ast.Tree) (@typeOf(
}
fn renderRoot(
- allocator: &mem.Allocator,
+ allocator: *mem.Allocator,
stream: var,
- tree: &ast.Tree,
+ tree: *ast.Tree,
) (@typeOf(stream).Child.Error || Error)!void {
// render all the line comments at the beginning of the file
var tok_it = tree.tokens.iterator(0);
@@ -90,7 +90,7 @@ fn renderRoot(
}
}
-fn renderExtraNewline(tree: &ast.Tree, stream: var, start_col: &usize, node: &ast.Node) !void {
+fn renderExtraNewline(tree: *ast.Tree, stream: var, start_col: *usize, node: *ast.Node) !void {
const first_token = node.firstToken();
var prev_token = first_token;
while (tree.tokens.at(prev_token - 1).id == Token.Id.DocComment) {
@@ -104,7 +104,7 @@ fn renderExtraNewline(tree: &ast.Tree, stream: var, start_col: &usize, node: &as
}
}
-fn renderTopLevelDecl(allocator: &mem.Allocator, stream: var, tree: &ast.Tree, indent: usize, start_col: &usize, decl: &ast.Node) (@typeOf(stream).Child.Error || Error)!void {
+fn renderTopLevelDecl(allocator: *mem.Allocator, stream: var, tree: *ast.Tree, indent: usize, start_col: *usize, decl: *ast.Node) (@typeOf(stream).Child.Error || Error)!void {
switch (decl.id) {
ast.Node.Id.FnProto => {
const fn_proto = @fieldParentPtr(ast.Node.FnProto, "base", decl);
@@ -214,12 +214,12 @@ fn renderTopLevelDecl(allocator: &mem.Allocator, stream: var, tree: &ast.Tree, i
}
fn renderExpression(
- allocator: &mem.Allocator,
+ allocator: *mem.Allocator,
stream: var,
- tree: &ast.Tree,
+ tree: *ast.Tree,
indent: usize,
- start_col: &usize,
- base: &ast.Node,
+ start_col: *usize,
+ base: *ast.Node,
space: Space,
) (@typeOf(stream).Child.Error || Error)!void {
switch (base.id) {
@@ -343,9 +343,13 @@ fn renderExpression(
const prefix_op_node = @fieldParentPtr(ast.Node.PrefixOp, "base", base);
switch (prefix_op_node.op) {
- ast.Node.PrefixOp.Op.AddrOf => |addr_of_info| {
- try renderToken(tree, stream, prefix_op_node.op_token, indent, start_col, Space.None); // &
- if (addr_of_info.align_info) |align_info| {
+ ast.Node.PrefixOp.Op.PtrType => |ptr_info| {
+ const star_offset = switch (tree.tokens.at(prefix_op_node.op_token).id) {
+ Token.Id.AsteriskAsterisk => usize(1),
+ else => usize(0),
+ };
+ try renderTokenOffset(tree, stream, prefix_op_node.op_token, indent, start_col, Space.None, star_offset); // *
+ if (ptr_info.align_info) |align_info| {
const lparen_token = tree.prevToken(align_info.node.firstToken());
const align_token = tree.prevToken(lparen_token);
@@ -370,19 +374,19 @@ fn renderExpression(
try renderToken(tree, stream, rparen_token, indent, start_col, Space.Space); // )
}
}
- if (addr_of_info.const_token) |const_token| {
+ if (ptr_info.const_token) |const_token| {
try renderToken(tree, stream, const_token, indent, start_col, Space.Space); // const
}
- if (addr_of_info.volatile_token) |volatile_token| {
+ if (ptr_info.volatile_token) |volatile_token| {
try renderToken(tree, stream, volatile_token, indent, start_col, Space.Space); // volatile
}
},
- ast.Node.PrefixOp.Op.SliceType => |addr_of_info| {
+ ast.Node.PrefixOp.Op.SliceType => |ptr_info| {
try renderToken(tree, stream, prefix_op_node.op_token, indent, start_col, Space.None); // [
try renderToken(tree, stream, tree.nextToken(prefix_op_node.op_token), indent, start_col, Space.None); // ]
- if (addr_of_info.align_info) |align_info| {
+ if (ptr_info.align_info) |align_info| {
const lparen_token = tree.prevToken(align_info.node.firstToken());
const align_token = tree.prevToken(lparen_token);
@@ -407,10 +411,10 @@ fn renderExpression(
try renderToken(tree, stream, rparen_token, indent, start_col, Space.Space); // )
}
}
- if (addr_of_info.const_token) |const_token| {
+ if (ptr_info.const_token) |const_token| {
try renderToken(tree, stream, const_token, indent, start_col, Space.Space);
}
- if (addr_of_info.volatile_token) |volatile_token| {
+ if (ptr_info.volatile_token) |volatile_token| {
try renderToken(tree, stream, volatile_token, indent, start_col, Space.Space);
}
},
@@ -426,7 +430,7 @@ fn renderExpression(
ast.Node.PrefixOp.Op.NegationWrap,
ast.Node.PrefixOp.Op.UnwrapMaybe,
ast.Node.PrefixOp.Op.MaybeType,
- ast.Node.PrefixOp.Op.PointerType,
+ ast.Node.PrefixOp.Op.AddressOf,
=> {
try renderToken(tree, stream, prefix_op_node.op_token, indent, start_col, Space.None);
},
@@ -1640,12 +1644,12 @@ fn renderExpression(
}
fn renderVarDecl(
- allocator: &mem.Allocator,
+ allocator: *mem.Allocator,
stream: var,
- tree: &ast.Tree,
+ tree: *ast.Tree,
indent: usize,
- start_col: &usize,
- var_decl: &ast.Node.VarDecl,
+ start_col: *usize,
+ var_decl: *ast.Node.VarDecl,
) (@typeOf(stream).Child.Error || Error)!void {
if (var_decl.visib_token) |visib_token| {
try renderToken(tree, stream, visib_token, indent, start_col, Space.Space); // pub
@@ -1696,12 +1700,12 @@ fn renderVarDecl(
}
fn renderParamDecl(
- allocator: &mem.Allocator,
+ allocator: *mem.Allocator,
stream: var,
- tree: &ast.Tree,
+ tree: *ast.Tree,
indent: usize,
- start_col: &usize,
- base: &ast.Node,
+ start_col: *usize,
+ base: *ast.Node,
space: Space,
) (@typeOf(stream).Child.Error || Error)!void {
const param_decl = @fieldParentPtr(ast.Node.ParamDecl, "base", base);
@@ -1724,12 +1728,12 @@ fn renderParamDecl(
}
fn renderStatement(
- allocator: &mem.Allocator,
+ allocator: *mem.Allocator,
stream: var,
- tree: &ast.Tree,
+ tree: *ast.Tree,
indent: usize,
- start_col: &usize,
- base: &ast.Node,
+ start_col: *usize,
+ base: *ast.Node,
) (@typeOf(stream).Child.Error || Error)!void {
switch (base.id) {
ast.Node.Id.VarDecl => {
@@ -1761,7 +1765,15 @@ const Space = enum {
BlockStart,
};
-fn renderToken(tree: &ast.Tree, stream: var, token_index: ast.TokenIndex, indent: usize, start_col: &usize, space: Space) (@typeOf(stream).Child.Error || Error)!void {
+fn renderTokenOffset(
+ tree: *ast.Tree,
+ stream: var,
+ token_index: ast.TokenIndex,
+ indent: usize,
+ start_col: *usize,
+ space: Space,
+ token_skip_bytes: usize,
+) (@typeOf(stream).Child.Error || Error)!void {
if (space == Space.BlockStart) {
if (start_col.* < indent + indent_delta)
return renderToken(tree, stream, token_index, indent, start_col, Space.Space);
@@ -1772,7 +1784,7 @@ fn renderToken(tree: &ast.Tree, stream: var, token_index: ast.TokenIndex, indent
}
var token = tree.tokens.at(token_index);
- try stream.write(mem.trimRight(u8, tree.tokenSlicePtr(token), " "));
+ try stream.write(mem.trimRight(u8, tree.tokenSlicePtr(token)[token_skip_bytes..], " "));
if (space == Space.NoComment)
return;
@@ -1927,12 +1939,23 @@ fn renderToken(tree: &ast.Tree, stream: var, token_index: ast.TokenIndex, indent
}
}
+fn renderToken(
+ tree: *ast.Tree,
+ stream: var,
+ token_index: ast.TokenIndex,
+ indent: usize,
+ start_col: *usize,
+ space: Space,
+) (@typeOf(stream).Child.Error || Error)!void {
+ return renderTokenOffset(tree, stream, token_index, indent, start_col, space, 0);
+}
+
fn renderDocComments(
- tree: &ast.Tree,
+ tree: *ast.Tree,
stream: var,
node: var,
indent: usize,
- start_col: &usize,
+ start_col: *usize,
) (@typeOf(stream).Child.Error || Error)!void {
const comment = node.doc_comments ?? return;
var it = comment.lines.iterator(0);
@@ -1949,7 +1972,7 @@ fn renderDocComments(
}
}
-fn nodeIsBlock(base: &const ast.Node) bool {
+fn nodeIsBlock(base: *const ast.Node) bool {
return switch (base.id) {
ast.Node.Id.Block,
ast.Node.Id.If,
@@ -1961,7 +1984,7 @@ fn nodeIsBlock(base: &const ast.Node) bool {
};
}
-fn nodeCausesSliceOpSpace(base: &ast.Node) bool {
+fn nodeCausesSliceOpSpace(base: *ast.Node) bool {
const infix_op = base.cast(ast.Node.InfixOp) ?? return false;
return switch (infix_op.op) {
ast.Node.InfixOp.Op.Period => false,
diff --git a/std/zig/tokenizer.zig b/std/zig/tokenizer.zig
index 7c3b3210fb89..8378a9011d5a 100644
--- a/std/zig/tokenizer.zig
+++ b/std/zig/tokenizer.zig
@@ -200,7 +200,7 @@ pub const Tokenizer = struct {
pending_invalid_token: ?Token,
/// For debugging purposes
- pub fn dump(self: &Tokenizer, token: &const Token) void {
+ pub fn dump(self: *Tokenizer, token: *const Token) void {
std.debug.warn("{} \"{}\"\n", @tagName(token.id), self.buffer[token.start..token.end]);
}
@@ -265,7 +265,7 @@ pub const Tokenizer = struct {
SawAtSign,
};
- pub fn next(self: &Tokenizer) Token {
+ pub fn next(self: *Tokenizer) Token {
if (self.pending_invalid_token) |token| {
self.pending_invalid_token = null;
return token;
@@ -1089,7 +1089,7 @@ pub const Tokenizer = struct {
return result;
}
- fn checkLiteralCharacter(self: &Tokenizer) void {
+ fn checkLiteralCharacter(self: *Tokenizer) void {
if (self.pending_invalid_token != null) return;
const invalid_length = self.getInvalidCharacterLength();
if (invalid_length == 0) return;
@@ -1100,7 +1100,7 @@ pub const Tokenizer = struct {
};
}
- fn getInvalidCharacterLength(self: &Tokenizer) u3 {
+ fn getInvalidCharacterLength(self: *Tokenizer) u3 {
const c0 = self.buffer[self.index];
if (c0 < 0x80) {
if (c0 < 0x20 or c0 == 0x7f) {
diff --git a/test/assemble_and_link.zig b/test/assemble_and_link.zig
index 2593f3306a54..8c727e87b56e 100644
--- a/test/assemble_and_link.zig
+++ b/test/assemble_and_link.zig
@@ -1,7 +1,7 @@
const builtin = @import("builtin");
const tests = @import("tests.zig");
-pub fn addCases(cases: &tests.CompareOutputContext) void {
+pub fn addCases(cases: *tests.CompareOutputContext) void {
if (builtin.os == builtin.Os.linux and builtin.arch == builtin.Arch.x86_64) {
cases.addAsm("hello world linux x86_64",
\\.text
diff --git a/test/build_examples.zig b/test/build_examples.zig
index 7a4c0f35d97a..1ba0ca46cf8f 100644
--- a/test/build_examples.zig
+++ b/test/build_examples.zig
@@ -2,7 +2,7 @@ const tests = @import("tests.zig");
const builtin = @import("builtin");
const is_windows = builtin.os == builtin.Os.windows;
-pub fn addCases(cases: &tests.BuildExamplesContext) void {
+pub fn addCases(cases: *tests.BuildExamplesContext) void {
cases.add("example/hello_world/hello.zig");
cases.addC("example/hello_world/hello_libc.zig");
cases.add("example/cat/main.zig");
diff --git a/test/cases/align.zig b/test/cases/align.zig
index 582063766f97..99bdcdf940ee 100644
--- a/test/cases/align.zig
+++ b/test/cases/align.zig
@@ -5,7 +5,7 @@ var foo: u8 align(4) = 100;
test "global variable alignment" {
assert(@typeOf(&foo).alignment == 4);
- assert(@typeOf(&foo) == &align(4) u8);
+ assert(@typeOf(&foo) == *align(4) u8);
const slice = (&foo)[0..1];
assert(@typeOf(slice) == []align(4) u8);
}
@@ -30,7 +30,7 @@ var baz: packed struct {
} = undefined;
test "packed struct alignment" {
- assert(@typeOf(&baz.b) == &align(1) u32);
+ assert(@typeOf(&baz.b) == *align(1) u32);
}
const blah: packed struct {
@@ -40,11 +40,11 @@ const blah: packed struct {
} = undefined;
test "bit field alignment" {
- assert(@typeOf(&blah.b) == &align(1:3:6) const u3);
+ assert(@typeOf(&blah.b) == *align(1:3:6) const u3);
}
test "default alignment allows unspecified in type syntax" {
- assert(&u32 == &align(@alignOf(u32)) u32);
+ assert(*u32 == *align(@alignOf(u32)) u32);
}
test "implicitly decreasing pointer alignment" {
@@ -53,7 +53,7 @@ test "implicitly decreasing pointer alignment" {
assert(addUnaligned(&a, &b) == 7);
}
-fn addUnaligned(a: &align(1) const u32, b: &align(1) const u32) u32 {
+fn addUnaligned(a: *align(1) const u32, b: *align(1) const u32) u32 {
return a.* + b.*;
}
@@ -76,7 +76,7 @@ fn testBytesAlign(b: u8) void {
b,
b,
};
- const ptr = @ptrCast(&u32, &bytes[0]);
+ const ptr = @ptrCast(*u32, &bytes[0]);
assert(ptr.* == 0x33333333);
}
@@ -99,10 +99,10 @@ test "@alignCast pointers" {
expectsOnly1(&x);
assert(x == 2);
}
-fn expectsOnly1(x: &align(1) u32) void {
+fn expectsOnly1(x: *align(1) u32) void {
expects4(@alignCast(4, x));
}
-fn expects4(x: &align(4) u32) void {
+fn expects4(x: *align(4) u32) void {
x.* += 1;
}
@@ -163,8 +163,8 @@ fn whyWouldYouEverDoThis(comptime align_bytes: u8) align(align_bytes) u8 {
test "@ptrCast preserves alignment of bigger source" {
var x: u32 align(16) = 1234;
- const ptr = @ptrCast(&u8, &x);
- assert(@typeOf(ptr) == &align(16) u8);
+ const ptr = @ptrCast(*u8, &x);
+ assert(@typeOf(ptr) == *align(16) u8);
}
test "compile-time known array index has best alignment possible" {
@@ -175,10 +175,10 @@ test "compile-time known array index has best alignment possible" {
3,
4,
};
- assert(@typeOf(&array[0]) == &align(4) u8);
- assert(@typeOf(&array[1]) == &u8);
- assert(@typeOf(&array[2]) == &align(2) u8);
- assert(@typeOf(&array[3]) == &u8);
+ assert(@typeOf(&array[0]) == *align(4) u8);
+ assert(@typeOf(&array[1]) == *u8);
+ assert(@typeOf(&array[2]) == *align(2) u8);
+ assert(@typeOf(&array[3]) == *u8);
// because align is too small but we still figure out to use 2
var bigger align(2) = []u64{
@@ -187,10 +187,10 @@ test "compile-time known array index has best alignment possible" {
3,
4,
};
- assert(@typeOf(&bigger[0]) == &align(2) u64);
- assert(@typeOf(&bigger[1]) == &align(2) u64);
- assert(@typeOf(&bigger[2]) == &align(2) u64);
- assert(@typeOf(&bigger[3]) == &align(2) u64);
+ assert(@typeOf(&bigger[0]) == *align(2) u64);
+ assert(@typeOf(&bigger[1]) == *align(2) u64);
+ assert(@typeOf(&bigger[2]) == *align(2) u64);
+ assert(@typeOf(&bigger[3]) == *align(2) u64);
// because pointer is align 2 and u32 align % 2 == 0 we can assume align 2
var smaller align(2) = []u32{
@@ -199,21 +199,21 @@ test "compile-time known array index has best alignment possible" {
3,
4,
};
- testIndex(&smaller[0], 0, &align(2) u32);
- testIndex(&smaller[0], 1, &align(2) u32);
- testIndex(&smaller[0], 2, &align(2) u32);
- testIndex(&smaller[0], 3, &align(2) u32);
+ testIndex(&smaller[0], 0, *align(2) u32);
+ testIndex(&smaller[0], 1, *align(2) u32);
+ testIndex(&smaller[0], 2, *align(2) u32);
+ testIndex(&smaller[0], 3, *align(2) u32);
// has to use ABI alignment because index known at runtime only
- testIndex2(&array[0], 0, &u8);
- testIndex2(&array[0], 1, &u8);
- testIndex2(&array[0], 2, &u8);
- testIndex2(&array[0], 3, &u8);
+ testIndex2(&array[0], 0, *u8);
+ testIndex2(&array[0], 1, *u8);
+ testIndex2(&array[0], 2, *u8);
+ testIndex2(&array[0], 3, *u8);
}
-fn testIndex(smaller: &align(2) u32, index: usize, comptime T: type) void {
+fn testIndex(smaller: *align(2) u32, index: usize, comptime T: type) void {
assert(@typeOf(&smaller[index]) == T);
}
-fn testIndex2(ptr: &align(4) u8, index: usize, comptime T: type) void {
+fn testIndex2(ptr: *align(4) u8, index: usize, comptime T: type) void {
assert(@typeOf(&ptr[index]) == T);
}
diff --git a/test/cases/atomics.zig b/test/cases/atomics.zig
index d406285d29e7..67c9ab3dd117 100644
--- a/test/cases/atomics.zig
+++ b/test/cases/atomics.zig
@@ -34,7 +34,7 @@ test "atomicrmw and atomicload" {
testAtomicLoad(&data);
}
-fn testAtomicRmw(ptr: &u8) void {
+fn testAtomicRmw(ptr: *u8) void {
const prev_value = @atomicRmw(u8, ptr, AtomicRmwOp.Xchg, 42, AtomicOrder.SeqCst);
assert(prev_value == 200);
comptime {
@@ -45,7 +45,7 @@ fn testAtomicRmw(ptr: &u8) void {
}
}
-fn testAtomicLoad(ptr: &u8) void {
+fn testAtomicLoad(ptr: *u8) void {
const x = @atomicLoad(u8, ptr, AtomicOrder.SeqCst);
assert(x == 42);
}
@@ -54,18 +54,18 @@ test "cmpxchg with ptr" {
var data1: i32 = 1234;
var data2: i32 = 5678;
var data3: i32 = 9101;
- var x: &i32 = &data1;
- if (@cmpxchgWeak(&i32, &x, &data2, &data3, AtomicOrder.SeqCst, AtomicOrder.SeqCst)) |x1| {
+ var x: *i32 = &data1;
+ if (@cmpxchgWeak(*i32, &x, &data2, &data3, AtomicOrder.SeqCst, AtomicOrder.SeqCst)) |x1| {
assert(x1 == &data1);
} else {
@panic("cmpxchg should have failed");
}
- while (@cmpxchgWeak(&i32, &x, &data1, &data3, AtomicOrder.SeqCst, AtomicOrder.SeqCst)) |x1| {
+ while (@cmpxchgWeak(*i32, &x, &data1, &data3, AtomicOrder.SeqCst, AtomicOrder.SeqCst)) |x1| {
assert(x1 == &data1);
}
assert(x == &data3);
- assert(@cmpxchgStrong(&i32, &x, &data3, &data2, AtomicOrder.SeqCst, AtomicOrder.SeqCst) == null);
+ assert(@cmpxchgStrong(*i32, &x, &data3, &data2, AtomicOrder.SeqCst, AtomicOrder.SeqCst) == null);
assert(x == &data2);
}
diff --git a/test/cases/bugs/655.zig b/test/cases/bugs/655.zig
index 4431767d5ca7..50374d4e6d65 100644
--- a/test/cases/bugs/655.zig
+++ b/test/cases/bugs/655.zig
@@ -3,10 +3,10 @@ const other_file = @import("655_other_file.zig");
test "function with &const parameter with type dereferenced by namespace" {
const x: other_file.Integer = 1234;
- comptime std.debug.assert(@typeOf(&x) == &const other_file.Integer);
+ comptime std.debug.assert(@typeOf(&x) == *const other_file.Integer);
foo(x);
}
-fn foo(x: &const other_file.Integer) void {
+fn foo(x: *const other_file.Integer) void {
std.debug.assert(x.* == 1234);
}
diff --git a/test/cases/bugs/828.zig b/test/cases/bugs/828.zig
index 10d7370b903b..50ae0fd279fd 100644
--- a/test/cases/bugs/828.zig
+++ b/test/cases/bugs/828.zig
@@ -3,7 +3,7 @@ const CountBy = struct {
const One = CountBy{ .a = 1 };
- pub fn counter(self: &const CountBy) Counter {
+ pub fn counter(self: *const CountBy) Counter {
return Counter{ .i = 0 };
}
};
@@ -11,13 +11,13 @@ const CountBy = struct {
const Counter = struct {
i: usize,
- pub fn count(self: &Counter) bool {
+ pub fn count(self: *Counter) bool {
self.i += 1;
return self.i <= 10;
}
};
-fn constCount(comptime cb: &const CountBy, comptime unused: u32) void {
+fn constCount(comptime cb: *const CountBy, comptime unused: u32) void {
comptime {
var cnt = cb.counter();
if (cnt.i != 0) @compileError("Counter instance reused!");
diff --git a/test/cases/bugs/920.zig b/test/cases/bugs/920.zig
index c3152060720e..2903f05a29ab 100644
--- a/test/cases/bugs/920.zig
+++ b/test/cases/bugs/920.zig
@@ -9,10 +9,10 @@ const ZigTable = struct {
pdf: fn (f64) f64,
is_symmetric: bool,
- zero_case: fn (&Random, f64) f64,
+ zero_case: fn (*Random, f64) f64,
};
-fn ZigTableGen(comptime is_symmetric: bool, comptime r: f64, comptime v: f64, comptime f: fn (f64) f64, comptime f_inv: fn (f64) f64, comptime zero_case: fn (&Random, f64) f64) ZigTable {
+fn ZigTableGen(comptime is_symmetric: bool, comptime r: f64, comptime v: f64, comptime f: fn (f64) f64, comptime f_inv: fn (f64) f64, comptime zero_case: fn (*Random, f64) f64) ZigTable {
var tables: ZigTable = undefined;
tables.is_symmetric = is_symmetric;
@@ -45,7 +45,7 @@ fn norm_f(x: f64) f64 {
fn norm_f_inv(y: f64) f64 {
return math.sqrt(-2.0 * math.ln(y));
}
-fn norm_zero_case(random: &Random, u: f64) f64 {
+fn norm_zero_case(random: *Random, u: f64) f64 {
return 0.0;
}
diff --git a/test/cases/cast.zig b/test/cases/cast.zig
index e37451ea93e3..7358a4ffd851 100644
--- a/test/cases/cast.zig
+++ b/test/cases/cast.zig
@@ -3,20 +3,20 @@ const mem = @import("std").mem;
test "int to ptr cast" {
const x = usize(13);
- const y = @intToPtr(&u8, x);
+ const y = @intToPtr(*u8, x);
const z = @ptrToInt(y);
assert(z == 13);
}
test "integer literal to pointer cast" {
- const vga_mem = @intToPtr(&u16, 0xB8000);
+ const vga_mem = @intToPtr(*u16, 0xB8000);
assert(@ptrToInt(vga_mem) == 0xB8000);
}
test "pointer reinterpret const float to int" {
const float: f64 = 5.99999999999994648725e-01;
const float_ptr = &float;
- const int_ptr = @ptrCast(&const i32, float_ptr);
+ const int_ptr = @ptrCast(*const i32, float_ptr);
const int_val = int_ptr.*;
assert(int_val == 858993411);
}
@@ -28,7 +28,7 @@ test "implicitly cast a pointer to a const pointer of it" {
assert(x == 2);
}
-fn funcWithConstPtrPtr(x: &const &i32) void {
+fn funcWithConstPtrPtr(x: *const *i32) void {
x.*.* += 1;
}
@@ -66,11 +66,11 @@ fn Struct(comptime T: type) type {
const Self = this;
x: T,
- fn pointer(self: &const Self) Self {
+ fn pointer(self: *const Self) Self {
return self.*;
}
- fn maybePointer(self: ?&const Self) Self {
+ fn maybePointer(self: ?*const Self) Self {
const none = Self{ .x = if (T == void) void{} else 0 };
return (self ?? &none).*;
}
@@ -80,11 +80,11 @@ fn Struct(comptime T: type) type {
const Union = union {
x: u8,
- fn pointer(self: &const Union) Union {
+ fn pointer(self: *const Union) Union {
return self.*;
}
- fn maybePointer(self: ?&const Union) Union {
+ fn maybePointer(self: ?*const Union) Union {
const none = Union{ .x = 0 };
return (self ?? &none).*;
}
@@ -94,11 +94,11 @@ const Enum = enum {
None,
Some,
- fn pointer(self: &const Enum) Enum {
+ fn pointer(self: *const Enum) Enum {
return self.*;
}
- fn maybePointer(self: ?&const Enum) Enum {
+ fn maybePointer(self: ?*const Enum) Enum {
return (self ?? &Enum.None).*;
}
};
@@ -107,16 +107,16 @@ test "implicitly cast indirect pointer to maybe-indirect pointer" {
const S = struct {
const Self = this;
x: u8,
- fn constConst(p: &const &const Self) u8 {
+ fn constConst(p: *const *const Self) u8 {
return (p.*).x;
}
- fn maybeConstConst(p: ?&const &const Self) u8 {
+ fn maybeConstConst(p: ?*const *const Self) u8 {
return ((??p).*).x;
}
- fn constConstConst(p: &const &const &const Self) u8 {
+ fn constConstConst(p: *const *const *const Self) u8 {
return (p.*.*).x;
}
- fn maybeConstConstConst(p: ?&const &const &const Self) u8 {
+ fn maybeConstConstConst(p: ?*const *const *const Self) u8 {
return ((??p).*.*).x;
}
};
@@ -166,12 +166,12 @@ fn testPeerResolveArrayConstSlice(b: bool) void {
}
test "integer literal to &const int" {
- const x: &const i32 = 3;
+ const x: *const i32 = 3;
assert(x.* == 3);
}
test "string literal to &const []const u8" {
- const x: &const []const u8 = "hello";
+ const x: *const []const u8 = "hello";
assert(mem.eql(u8, x.*, "hello"));
}
@@ -209,11 +209,11 @@ test "return null from fn() error!?&T" {
const b = returnNullLitFromMaybeTypeErrorRef();
assert((try a) == null and (try b) == null);
}
-fn returnNullFromMaybeTypeErrorRef() error!?&A {
- const a: ?&A = null;
+fn returnNullFromMaybeTypeErrorRef() error!?*A {
+ const a: ?*A = null;
return a;
}
-fn returnNullLitFromMaybeTypeErrorRef() error!?&A {
+fn returnNullLitFromMaybeTypeErrorRef() error!?*A {
return null;
}
@@ -312,7 +312,7 @@ test "implicit cast from &const [N]T to []const T" {
fn testCastConstArrayRefToConstSlice() void {
const blah = "aoeu";
const const_array_ref = &blah;
- assert(@typeOf(const_array_ref) == &const [4]u8);
+ assert(@typeOf(const_array_ref) == *const [4]u8);
const slice: []const u8 = const_array_ref;
assert(mem.eql(u8, slice, "aoeu"));
}
@@ -322,7 +322,7 @@ test "var args implicitly casts by value arg to const ref" {
}
fn foo(args: ...) void {
- assert(@typeOf(args[0]) == &const [5]u8);
+ assert(@typeOf(args[0]) == *const [5]u8);
}
test "peer type resolution: error and [N]T" {
diff --git a/test/cases/const_slice_child.zig b/test/cases/const_slice_child.zig
index a92c5891866f..e012c729a03d 100644
--- a/test/cases/const_slice_child.zig
+++ b/test/cases/const_slice_child.zig
@@ -1,10 +1,10 @@
const debug = @import("std").debug;
const assert = debug.assert;
-var argv: &const &const u8 = undefined;
+var argv: *const *const u8 = undefined;
test "const slice child" {
- const strs = ([]&const u8){
+ const strs = ([]*const u8){
c"one",
c"two",
c"three",
@@ -29,7 +29,7 @@ fn bar(argc: usize) void {
foo(args);
}
-fn strlen(ptr: &const u8) usize {
+fn strlen(ptr: *const u8) usize {
var count: usize = 0;
while (ptr[count] != 0) : (count += 1) {}
return count;
diff --git a/test/cases/coroutines.zig b/test/cases/coroutines.zig
index 8a071c6aad67..4d2aa54a6993 100644
--- a/test/cases/coroutines.zig
+++ b/test/cases/coroutines.zig
@@ -154,7 +154,7 @@ test "async function with dot syntax" {
test "async fn pointer in a struct field" {
var data: i32 = 1;
const Foo = struct {
- bar: async<&std.mem.Allocator> fn (&i32) void,
+ bar: async<*std.mem.Allocator> fn (*i32) void,
};
var foo = Foo{ .bar = simpleAsyncFn2 };
const p = (async foo.bar(&data)) catch unreachable;
@@ -162,7 +162,7 @@ test "async fn pointer in a struct field" {
cancel p;
assert(data == 4);
}
-async<&std.mem.Allocator> fn simpleAsyncFn2(y: &i32) void {
+async<*std.mem.Allocator> fn simpleAsyncFn2(y: *i32) void {
defer y.* += 2;
y.* += 1;
suspend;
@@ -220,7 +220,7 @@ test "break from suspend" {
cancel p;
std.debug.assert(my_result == 2);
}
-async fn testBreakFromSuspend(my_result: &i32) void {
+async fn testBreakFromSuspend(my_result: *i32) void {
s: suspend |p| {
break :s;
}
diff --git a/test/cases/enum.zig b/test/cases/enum.zig
index cbcbc5e3064c..ae9f04869b0a 100644
--- a/test/cases/enum.zig
+++ b/test/cases/enum.zig
@@ -56,14 +56,14 @@ test "constant enum with payload" {
shouldBeNotEmpty(full);
}
-fn shouldBeEmpty(x: &const AnEnumWithPayload) void {
+fn shouldBeEmpty(x: *const AnEnumWithPayload) void {
switch (x.*) {
AnEnumWithPayload.Empty => {},
else => unreachable,
}
}
-fn shouldBeNotEmpty(x: &const AnEnumWithPayload) void {
+fn shouldBeNotEmpty(x: *const AnEnumWithPayload) void {
switch (x.*) {
AnEnumWithPayload.Empty => unreachable,
else => {},
@@ -750,15 +750,15 @@ test "bit field access with enum fields" {
assert(data.b == B.Four3);
}
-fn getA(data: &const BitFieldOfEnums) A {
+fn getA(data: *const BitFieldOfEnums) A {
return data.a;
}
-fn getB(data: &const BitFieldOfEnums) B {
+fn getB(data: *const BitFieldOfEnums) B {
return data.b;
}
-fn getC(data: &const BitFieldOfEnums) C {
+fn getC(data: *const BitFieldOfEnums) C {
return data.c;
}
diff --git a/test/cases/enum_with_members.zig b/test/cases/enum_with_members.zig
index 8fafa70b0277..18174186a988 100644
--- a/test/cases/enum_with_members.zig
+++ b/test/cases/enum_with_members.zig
@@ -6,7 +6,7 @@ const ET = union(enum) {
SINT: i32,
UINT: u32,
- pub fn print(a: &const ET, buf: []u8) error!usize {
+ pub fn print(a: *const ET, buf: []u8) error!usize {
return switch (a.*) {
ET.SINT => |x| fmt.formatIntBuf(buf, x, 10, false, 0),
ET.UINT => |x| fmt.formatIntBuf(buf, x, 10, false, 0),
diff --git a/test/cases/eval.zig b/test/cases/eval.zig
index 8a6dc25bd8e7..b6d6a4f37bbb 100644
--- a/test/cases/eval.zig
+++ b/test/cases/eval.zig
@@ -282,7 +282,7 @@ fn fnWithFloatMode() f32 {
const SimpleStruct = struct {
field: i32,
- fn method(self: &const SimpleStruct) i32 {
+ fn method(self: *const SimpleStruct) i32 {
return self.field + 3;
}
};
@@ -367,7 +367,7 @@ test "const global shares pointer with other same one" {
assertEqualPtrs(&hi1[0], &hi2[0]);
comptime assert(&hi1[0] == &hi2[0]);
}
-fn assertEqualPtrs(ptr1: &const u8, ptr2: &const u8) void {
+fn assertEqualPtrs(ptr1: *const u8, ptr2: *const u8) void {
assert(ptr1 == ptr2);
}
@@ -418,9 +418,9 @@ test "string literal used as comptime slice is memoized" {
}
test "comptime slice of undefined pointer of length 0" {
- const slice1 = (&i32)(undefined)[0..0];
+ const slice1 = (*i32)(undefined)[0..0];
assert(slice1.len == 0);
- const slice2 = (&i32)(undefined)[100..100];
+ const slice2 = (*i32)(undefined)[100..100];
assert(slice2.len == 0);
}
@@ -472,7 +472,7 @@ test "comptime function with mutable pointer is not memoized" {
}
}
-fn increment(value: &i32) void {
+fn increment(value: *i32) void {
value.* += 1;
}
@@ -517,7 +517,7 @@ test "comptime slice of pointer preserves comptime var" {
const SingleFieldStruct = struct {
x: i32,
- fn read_x(self: &const SingleFieldStruct) i32 {
+ fn read_x(self: *const SingleFieldStruct) i32 {
return self.x;
}
};
@@ -576,3 +576,37 @@ test "comptime modification of const struct field" {
assert(res.version == 1);
}
}
+
+test "pointer to type" {
+ comptime {
+ var T: type = i32;
+ assert(T == i32);
+ var ptr = &T;
+ assert(@typeOf(ptr) == *type);
+ ptr.* = f32;
+ assert(T == f32);
+ assert(*T == *f32);
+ }
+}
+
+test "slice of type" {
+ comptime {
+ var types_array = []type{ i32, f64, type };
+ for (types_array) |T, i| {
+ switch (i) {
+ 0 => assert(T == i32),
+ 1 => assert(T == f64),
+ 2 => assert(T == type),
+ else => unreachable,
+ }
+ }
+ for (types_array[0..]) |T, i| {
+ switch (i) {
+ 0 => assert(T == i32),
+ 1 => assert(T == f64),
+ 2 => assert(T == type),
+ else => unreachable,
+ }
+ }
+ }
+}
diff --git a/test/cases/field_parent_ptr.zig b/test/cases/field_parent_ptr.zig
index 1a7de9ce3530..00d4e0f36721 100644
--- a/test/cases/field_parent_ptr.zig
+++ b/test/cases/field_parent_ptr.zig
@@ -24,7 +24,7 @@ const foo = Foo{
.d = -10,
};
-fn testParentFieldPtr(c: &const i32) void {
+fn testParentFieldPtr(c: *const i32) void {
assert(c == &foo.c);
const base = @fieldParentPtr(Foo, "c", c);
@@ -32,7 +32,7 @@ fn testParentFieldPtr(c: &const i32) void {
assert(&base.c == c);
}
-fn testParentFieldPtrFirst(a: &const bool) void {
+fn testParentFieldPtrFirst(a: *const bool) void {
assert(a == &foo.a);
const base = @fieldParentPtr(Foo, "a", a);
diff --git a/test/cases/fn_in_struct_in_comptime.zig b/test/cases/fn_in_struct_in_comptime.zig
index c22da71940c8..fabb57e9cbcb 100644
--- a/test/cases/fn_in_struct_in_comptime.zig
+++ b/test/cases/fn_in_struct_in_comptime.zig
@@ -1,9 +1,9 @@
const assert = @import("std").debug.assert;
-fn get_foo() fn (&u8) usize {
+fn get_foo() fn (*u8) usize {
comptime {
return struct {
- fn func(ptr: &u8) usize {
+ fn func(ptr: *u8) usize {
var u = @ptrToInt(ptr);
return u;
}
@@ -13,5 +13,5 @@ fn get_foo() fn (&u8) usize {
test "define a function in an anonymous struct in comptime" {
const foo = get_foo();
- assert(foo(@intToPtr(&u8, 12345)) == 12345);
+ assert(foo(@intToPtr(*u8, 12345)) == 12345);
}
diff --git a/test/cases/generics.zig b/test/cases/generics.zig
index 37cd1b89e485..a76990e2a1fc 100644
--- a/test/cases/generics.zig
+++ b/test/cases/generics.zig
@@ -96,8 +96,8 @@ test "generic struct" {
fn GenNode(comptime T: type) type {
return struct {
value: T,
- next: ?&GenNode(T),
- fn getVal(n: &const GenNode(T)) T {
+ next: ?*GenNode(T),
+ fn getVal(n: *const GenNode(T)) T {
return n.value;
}
};
@@ -126,11 +126,11 @@ test "generic fn with implicit cast" {
13,
}) == 0);
}
-fn getByte(ptr: ?&const u8) u8 {
+fn getByte(ptr: ?*const u8) u8 {
return (??ptr).*;
}
fn getFirstByte(comptime T: type, mem: []const T) u8 {
- return getByte(@ptrCast(&const u8, &mem[0]));
+ return getByte(@ptrCast(*const u8, &mem[0]));
}
const foos = []fn (var) bool{
diff --git a/test/cases/incomplete_struct_param_tld.zig b/test/cases/incomplete_struct_param_tld.zig
index a2f57743d027..552d6ef18514 100644
--- a/test/cases/incomplete_struct_param_tld.zig
+++ b/test/cases/incomplete_struct_param_tld.zig
@@ -11,12 +11,12 @@ const B = struct {
const C = struct {
x: i32,
- fn d(c: &const C) i32 {
+ fn d(c: *const C) i32 {
return c.x;
}
};
-fn foo(a: &const A) i32 {
+fn foo(a: *const A) i32 {
return a.b.c.d();
}
diff --git a/test/cases/math.zig b/test/cases/math.zig
index 0b4622702fd1..0c18293dd59b 100644
--- a/test/cases/math.zig
+++ b/test/cases/math.zig
@@ -28,13 +28,27 @@ fn testDivision() void {
assert(divTrunc(f32, -5.0, 3.0) == -1.0);
comptime {
- assert(1194735857077236777412821811143690633098347576 % 508740759824825164163191790951174292733114988 == 177254337427586449086438229241342047632117600);
- assert(@rem(-1194735857077236777412821811143690633098347576, 508740759824825164163191790951174292733114988) == -177254337427586449086438229241342047632117600);
- assert(1194735857077236777412821811143690633098347576 / 508740759824825164163191790951174292733114988 == 2);
- assert(@divTrunc(-1194735857077236777412821811143690633098347576, 508740759824825164163191790951174292733114988) == -2);
- assert(@divTrunc(1194735857077236777412821811143690633098347576, -508740759824825164163191790951174292733114988) == -2);
- assert(@divTrunc(-1194735857077236777412821811143690633098347576, -508740759824825164163191790951174292733114988) == 2);
- assert(4126227191251978491697987544882340798050766755606969681711 % 10 == 1);
+ assert(
+ 1194735857077236777412821811143690633098347576 % 508740759824825164163191790951174292733114988 == 177254337427586449086438229241342047632117600,
+ );
+ assert(
+ @rem(-1194735857077236777412821811143690633098347576, 508740759824825164163191790951174292733114988) == -177254337427586449086438229241342047632117600,
+ );
+ assert(
+ 1194735857077236777412821811143690633098347576 / 508740759824825164163191790951174292733114988 == 2,
+ );
+ assert(
+ @divTrunc(-1194735857077236777412821811143690633098347576, 508740759824825164163191790951174292733114988) == -2,
+ );
+ assert(
+ @divTrunc(1194735857077236777412821811143690633098347576, -508740759824825164163191790951174292733114988) == -2,
+ );
+ assert(
+ @divTrunc(-1194735857077236777412821811143690633098347576, -508740759824825164163191790951174292733114988) == 2,
+ );
+ assert(
+ 4126227191251978491697987544882340798050766755606969681711 % 10 == 1,
+ );
}
}
fn div(comptime T: type, a: T, b: T) T {
@@ -324,8 +338,12 @@ test "big number addition" {
test "big number multiplication" {
comptime {
- assert(45960427431263824329884196484953148229 * 128339149605334697009938835852565949723 == 5898522172026096622534201617172456926982464453350084962781392314016180490567);
- assert(594491908217841670578297176641415611445982232488944558774612 * 390603545391089362063884922208143568023166603618446395589768 == 232210647056203049913662402532976186578842425262306016094292237500303028346593132411865381225871291702600263463125370016);
+ assert(
+ 45960427431263824329884196484953148229 * 128339149605334697009938835852565949723 == 5898522172026096622534201617172456926982464453350084962781392314016180490567,
+ );
+ assert(
+ 594491908217841670578297176641415611445982232488944558774612 * 390603545391089362063884922208143568023166603618446395589768 == 232210647056203049913662402532976186578842425262306016094292237500303028346593132411865381225871291702600263463125370016,
+ );
}
}
diff --git a/test/cases/misc.zig b/test/cases/misc.zig
index b6b2da8de5f8..919b978f9f38 100644
--- a/test/cases/misc.zig
+++ b/test/cases/misc.zig
@@ -252,20 +252,20 @@ test "multiline C string" {
}
test "type equality" {
- assert(&const u8 != &u8);
+ assert(*const u8 != *u8);
}
const global_a: i32 = 1234;
-const global_b: &const i32 = &global_a;
-const global_c: &const f32 = @ptrCast(&const f32, global_b);
+const global_b: *const i32 = &global_a;
+const global_c: *const f32 = @ptrCast(*const f32, global_b);
test "compile time global reinterpret" {
- const d = @ptrCast(&const i32, global_c);
+ const d = @ptrCast(*const i32, global_c);
assert(d.* == 1234);
}
test "explicit cast maybe pointers" {
- const a: ?&i32 = undefined;
- const b: ?&f32 = @ptrCast(?&f32, a);
+ const a: ?*i32 = undefined;
+ const b: ?*f32 = @ptrCast(?*f32, a);
}
test "generic malloc free" {
@@ -274,7 +274,7 @@ test "generic malloc free" {
}
var some_mem: [100]u8 = undefined;
fn memAlloc(comptime T: type, n: usize) error![]T {
- return @ptrCast(&T, &some_mem[0])[0..n];
+ return @ptrCast(*T, &some_mem[0])[0..n];
}
fn memFree(comptime T: type, memory: []T) void {}
@@ -357,7 +357,7 @@ const test3_foo = Test3Foo{
},
};
const test3_bar = Test3Foo{ .Two = 13 };
-fn test3_1(f: &const Test3Foo) void {
+fn test3_1(f: *const Test3Foo) void {
switch (f.*) {
Test3Foo.Three => |pt| {
assert(pt.x == 3);
@@ -366,7 +366,7 @@ fn test3_1(f: &const Test3Foo) void {
else => unreachable,
}
}
-fn test3_2(f: &const Test3Foo) void {
+fn test3_2(f: *const Test3Foo) void {
switch (f.*) {
Test3Foo.Two => |x| {
assert(x == 13);
@@ -393,7 +393,7 @@ test "pointer comparison" {
const b = &a;
assert(ptrEql(b, b));
}
-fn ptrEql(a: &const []const u8, b: &const []const u8) bool {
+fn ptrEql(a: *const []const u8, b: *const []const u8) bool {
return a == b;
}
@@ -446,13 +446,13 @@ fn testPointerToVoidReturnType() error!void {
return a.*;
}
const test_pointer_to_void_return_type_x = void{};
-fn testPointerToVoidReturnType2() &const void {
+fn testPointerToVoidReturnType2() *const void {
return &test_pointer_to_void_return_type_x;
}
test "non const ptr to aliased type" {
const int = i32;
- assert(?&int == ?&i32);
+ assert(?*int == ?*i32);
}
test "array 2D const double ptr" {
@@ -463,7 +463,7 @@ test "array 2D const double ptr" {
testArray2DConstDoublePtr(&rect_2d_vertexes[0][0]);
}
-fn testArray2DConstDoublePtr(ptr: &const f32) void {
+fn testArray2DConstDoublePtr(ptr: *const f32) void {
assert(ptr[0] == 1.0);
assert(ptr[1] == 2.0);
}
@@ -497,7 +497,7 @@ test "@typeId" {
assert(@typeId(u64) == Tid.Int);
assert(@typeId(f32) == Tid.Float);
assert(@typeId(f64) == Tid.Float);
- assert(@typeId(&f32) == Tid.Pointer);
+ assert(@typeId(*f32) == Tid.Pointer);
assert(@typeId([2]u8) == Tid.Array);
assert(@typeId(AStruct) == Tid.Struct);
assert(@typeId(@typeOf(1)) == Tid.IntLiteral);
@@ -540,7 +540,7 @@ test "@typeName" {
};
comptime {
assert(mem.eql(u8, @typeName(i64), "i64"));
- assert(mem.eql(u8, @typeName(&usize), "&usize"));
+ assert(mem.eql(u8, @typeName(*usize), "*usize"));
// https://github.com/ziglang/zig/issues/675
assert(mem.eql(u8, @typeName(TypeFromFn(u8)), "TypeFromFn(u8)"));
assert(mem.eql(u8, @typeName(Struct), "Struct"));
@@ -555,7 +555,7 @@ fn TypeFromFn(comptime T: type) type {
test "volatile load and store" {
var number: i32 = 1234;
- const ptr = (&volatile i32)(&number);
+ const ptr = (*volatile i32)(&number);
ptr.* += 1;
assert(ptr.* == 1235);
}
@@ -587,28 +587,28 @@ var global_ptr = &gdt[0];
// can't really run this test but we can make sure it has no compile error
// and generates code
-const vram = @intToPtr(&volatile u8, 0x20000000)[0..0x8000];
+const vram = @intToPtr(*volatile u8, 0x20000000)[0..0x8000];
export fn writeToVRam() void {
vram[0] = 'X';
}
test "pointer child field" {
- assert((&u32).Child == u32);
+ assert((*u32).Child == u32);
}
const OpaqueA = @OpaqueType();
const OpaqueB = @OpaqueType();
test "@OpaqueType" {
- assert(&OpaqueA != &OpaqueB);
+ assert(*OpaqueA != *OpaqueB);
assert(mem.eql(u8, @typeName(OpaqueA), "OpaqueA"));
assert(mem.eql(u8, @typeName(OpaqueB), "OpaqueB"));
}
test "variable is allowed to be a pointer to an opaque type" {
var x: i32 = 1234;
- _ = hereIsAnOpaqueType(@ptrCast(&OpaqueA, &x));
+ _ = hereIsAnOpaqueType(@ptrCast(*OpaqueA, &x));
}
-fn hereIsAnOpaqueType(ptr: &OpaqueA) &OpaqueA {
+fn hereIsAnOpaqueType(ptr: *OpaqueA) *OpaqueA {
var a = ptr;
return a;
}
@@ -692,7 +692,7 @@ test "packed struct, enum, union parameters in extern function" {
}, PackedUnion{ .a = 1 }, PackedEnum.A);
}
-export fn testPackedStuff(a: &const PackedStruct, b: &const PackedUnion, c: PackedEnum) void {}
+export fn testPackedStuff(a: *const PackedStruct, b: *const PackedUnion, c: PackedEnum) void {}
test "slicing zero length array" {
const s1 = ""[0..];
@@ -703,8 +703,8 @@ test "slicing zero length array" {
assert(mem.eql(u32, s2, []u32{}));
}
-const addr1 = @ptrCast(&const u8, emptyFn);
+const addr1 = @ptrCast(*const u8, emptyFn);
test "comptime cast fn to ptr" {
- const addr2 = @ptrCast(&const u8, emptyFn);
+ const addr2 = @ptrCast(*const u8, emptyFn);
comptime assert(addr1 == addr2);
}
diff --git a/test/cases/null.zig b/test/cases/null.zig
index 936e5fafbd45..bd78990ff416 100644
--- a/test/cases/null.zig
+++ b/test/cases/null.zig
@@ -65,7 +65,7 @@ test "if var maybe pointer" {
.d = 1,
}) == 15);
}
-fn shouldBeAPlus1(p: &const Particle) u64 {
+fn shouldBeAPlus1(p: *const Particle) u64 {
var maybe_particle: ?Particle = p.*;
if (maybe_particle) |*particle| {
particle.a += 1;
diff --git a/test/cases/reflection.zig b/test/cases/reflection.zig
index b82ce6340f68..48fcc9ef030f 100644
--- a/test/cases/reflection.zig
+++ b/test/cases/reflection.zig
@@ -5,7 +5,7 @@ const reflection = this;
test "reflection: array, pointer, nullable, error union type child" {
comptime {
assert(([10]u8).Child == u8);
- assert((&u8).Child == u8);
+ assert((*u8).Child == u8);
assert((error!u8).Payload == u8);
assert((?u8).Child == u8);
}
diff --git a/test/cases/slice.zig b/test/cases/slice.zig
index eae6fa895e85..24e5239e2de3 100644
--- a/test/cases/slice.zig
+++ b/test/cases/slice.zig
@@ -1,7 +1,7 @@
const assert = @import("std").debug.assert;
const mem = @import("std").mem;
-const x = @intToPtr(&i32, 0x1000)[0..0x500];
+const x = @intToPtr(*i32, 0x1000)[0..0x500];
const y = x[0x100..];
test "compile time slice of pointer to hard coded address" {
assert(@ptrToInt(x.ptr) == 0x1000);
diff --git a/test/cases/struct.zig b/test/cases/struct.zig
index d4a1c7fbe3a2..0712e508de81 100644
--- a/test/cases/struct.zig
+++ b/test/cases/struct.zig
@@ -43,7 +43,7 @@ const VoidStructFieldsFoo = struct {
test "structs" {
var foo: StructFoo = undefined;
- @memset(@ptrCast(&u8, &foo), 0, @sizeOf(StructFoo));
+ @memset(@ptrCast(*u8, &foo), 0, @sizeOf(StructFoo));
foo.a += 1;
foo.b = foo.a == 1;
testFoo(foo);
@@ -55,16 +55,16 @@ const StructFoo = struct {
b: bool,
c: f32,
};
-fn testFoo(foo: &const StructFoo) void {
+fn testFoo(foo: *const StructFoo) void {
assert(foo.b);
}
-fn testMutation(foo: &StructFoo) void {
+fn testMutation(foo: *StructFoo) void {
foo.c = 100;
}
const Node = struct {
val: Val,
- next: &Node,
+ next: *Node,
};
const Val = struct {
@@ -112,7 +112,7 @@ fn aFunc() i32 {
return 13;
}
-fn callStructField(foo: &const Foo) i32 {
+fn callStructField(foo: *const Foo) i32 {
return foo.ptr();
}
@@ -124,7 +124,7 @@ test "store member function in variable" {
}
const MemberFnTestFoo = struct {
x: i32,
- fn member(foo: &const MemberFnTestFoo) i32 {
+ fn member(foo: *const MemberFnTestFoo) i32 {
return foo.x;
}
};
@@ -141,7 +141,7 @@ test "member functions" {
}
const MemberFnRand = struct {
seed: u32,
- pub fn getSeed(r: &const MemberFnRand) u32 {
+ pub fn getSeed(r: *const MemberFnRand) u32 {
return r.seed;
}
};
@@ -166,7 +166,7 @@ test "empty struct method call" {
assert(es.method() == 1234);
}
const EmptyStruct = struct {
- fn method(es: &const EmptyStruct) i32 {
+ fn method(es: *const EmptyStruct) i32 {
return 1234;
}
};
@@ -228,15 +228,15 @@ test "bit field access" {
assert(data.b == 3);
}
-fn getA(data: &const BitField1) u3 {
+fn getA(data: *const BitField1) u3 {
return data.a;
}
-fn getB(data: &const BitField1) u3 {
+fn getB(data: *const BitField1) u3 {
return data.b;
}
-fn getC(data: &const BitField1) u2 {
+fn getC(data: *const BitField1) u2 {
return data.c;
}
@@ -396,8 +396,8 @@ const Bitfields = packed struct {
test "native bit field understands endianness" {
var all: u64 = 0x7765443322221111;
var bytes: [8]u8 = undefined;
- @memcpy(&bytes[0], @ptrCast(&u8, &all), 8);
- var bitfields = @ptrCast(&Bitfields, &bytes[0]).*;
+ @memcpy(&bytes[0], @ptrCast(*u8, &all), 8);
+ var bitfields = @ptrCast(*Bitfields, &bytes[0]).*;
assert(bitfields.f1 == 0x1111);
assert(bitfields.f2 == 0x2222);
@@ -415,7 +415,7 @@ test "align 1 field before self referential align 8 field as slice return type"
const Expr = union(enum) {
Literal: u8,
- Question: &Expr,
+ Question: *Expr,
};
fn alloc(comptime T: type) []T {
diff --git a/test/cases/struct_contains_null_ptr_itself.zig b/test/cases/struct_contains_null_ptr_itself.zig
index b6cb1a94cce3..21175974b3d8 100644
--- a/test/cases/struct_contains_null_ptr_itself.zig
+++ b/test/cases/struct_contains_null_ptr_itself.zig
@@ -2,13 +2,13 @@ const std = @import("std");
const assert = std.debug.assert;
test "struct contains null pointer which contains original struct" {
- var x: ?&NodeLineComment = null;
+ var x: ?*NodeLineComment = null;
assert(x == null);
}
pub const Node = struct {
id: Id,
- comment: ?&NodeLineComment,
+ comment: ?*NodeLineComment,
pub const Id = enum {
Root,
diff --git a/test/cases/switch.zig b/test/cases/switch.zig
index 495fa9f3ed20..c6a4b60f09ce 100644
--- a/test/cases/switch.zig
+++ b/test/cases/switch.zig
@@ -90,7 +90,7 @@ const SwitchProngWithVarEnum = union(enum) {
Two: f32,
Meh: void,
};
-fn switchProngWithVarFn(a: &const SwitchProngWithVarEnum) void {
+fn switchProngWithVarFn(a: *const SwitchProngWithVarEnum) void {
switch (a.*) {
SwitchProngWithVarEnum.One => |x| {
assert(x == 13);
diff --git a/test/cases/this.zig b/test/cases/this.zig
index 5e433b503748..ba51d0ac9097 100644
--- a/test/cases/this.zig
+++ b/test/cases/this.zig
@@ -8,7 +8,7 @@ fn Point(comptime T: type) type {
x: T,
y: T,
- fn addOne(self: &Self) void {
+ fn addOne(self: *Self) void {
self.x += 1;
self.y += 1;
}
diff --git a/test/cases/type_info.zig b/test/cases/type_info.zig
index 2561d70865a8..921ff785a75d 100644
--- a/test/cases/type_info.zig
+++ b/test/cases/type_info.zig
@@ -37,7 +37,7 @@ test "type info: pointer type info" {
}
fn testPointer() void {
- const u32_ptr_info = @typeInfo(&u32);
+ const u32_ptr_info = @typeInfo(*u32);
assert(TypeId(u32_ptr_info) == TypeId.Pointer);
assert(u32_ptr_info.Pointer.is_const == false);
assert(u32_ptr_info.Pointer.is_volatile == false);
@@ -169,14 +169,14 @@ fn testUnion() void {
assert(notag_union_info.Union.fields[1].field_type == u32);
const TestExternUnion = extern union {
- foo: &c_void,
+ foo: *c_void,
};
const extern_union_info = @typeInfo(TestExternUnion);
assert(extern_union_info.Union.layout == TypeInfo.ContainerLayout.Extern);
assert(extern_union_info.Union.tag_type == @typeOf(undefined));
assert(extern_union_info.Union.fields[0].enum_field == null);
- assert(extern_union_info.Union.fields[0].field_type == &c_void);
+ assert(extern_union_info.Union.fields[0].field_type == *c_void);
}
test "type info: struct info" {
@@ -190,13 +190,13 @@ fn testStruct() void {
assert(struct_info.Struct.layout == TypeInfo.ContainerLayout.Packed);
assert(struct_info.Struct.fields.len == 3);
assert(struct_info.Struct.fields[1].offset == null);
- assert(struct_info.Struct.fields[2].field_type == &TestStruct);
+ assert(struct_info.Struct.fields[2].field_type == *TestStruct);
assert(struct_info.Struct.defs.len == 2);
assert(struct_info.Struct.defs[0].is_pub);
assert(!struct_info.Struct.defs[0].data.Fn.is_extern);
assert(struct_info.Struct.defs[0].data.Fn.lib_name == null);
assert(struct_info.Struct.defs[0].data.Fn.return_type == void);
- assert(struct_info.Struct.defs[0].data.Fn.fn_type == fn (&const TestStruct) void);
+ assert(struct_info.Struct.defs[0].data.Fn.fn_type == fn (*const TestStruct) void);
}
const TestStruct = packed struct {
@@ -204,9 +204,9 @@ const TestStruct = packed struct {
fieldA: usize,
fieldB: void,
- fieldC: &Self,
+ fieldC: *Self,
- pub fn foo(self: &const Self) void {}
+ pub fn foo(self: *const Self) void {}
};
test "type info: function type info" {
@@ -227,7 +227,7 @@ fn testFunction() void {
const test_instance: TestStruct = undefined;
const bound_fn_info = @typeInfo(@typeOf(test_instance.foo));
assert(TypeId(bound_fn_info) == TypeId.BoundFn);
- assert(bound_fn_info.BoundFn.args[0].arg_type == &const TestStruct);
+ assert(bound_fn_info.BoundFn.args[0].arg_type == *const TestStruct);
}
fn foo(comptime a: usize, b: bool, args: ...) usize {
diff --git a/test/cases/undefined.zig b/test/cases/undefined.zig
index f1af10e532f1..83c620d21159 100644
--- a/test/cases/undefined.zig
+++ b/test/cases/undefined.zig
@@ -27,12 +27,12 @@ test "init static array to undefined" {
const Foo = struct {
x: i32,
- fn setFooXMethod(foo: &Foo) void {
+ fn setFooXMethod(foo: *Foo) void {
foo.x = 3;
}
};
-fn setFooX(foo: &Foo) void {
+fn setFooX(foo: *Foo) void {
foo.x = 2;
}
diff --git a/test/cases/union.zig b/test/cases/union.zig
index 005ad08e6ab7..bdcbbdb452b7 100644
--- a/test/cases/union.zig
+++ b/test/cases/union.zig
@@ -68,11 +68,11 @@ test "init union with runtime value" {
assert(foo.int == 42);
}
-fn setFloat(foo: &Foo, x: f64) void {
+fn setFloat(foo: *Foo, x: f64) void {
foo.* = Foo{ .float = x };
}
-fn setInt(foo: &Foo, x: i32) void {
+fn setInt(foo: *Foo, x: i32) void {
foo.* = Foo{ .int = x };
}
@@ -108,7 +108,7 @@ fn doTest() void {
assert(bar(Payload{ .A = 1234 }) == -10);
}
-fn bar(value: &const Payload) i32 {
+fn bar(value: *const Payload) i32 {
assert(Letter(value.*) == Letter.A);
return switch (value.*) {
Payload.A => |x| return x - 1244,
@@ -147,7 +147,7 @@ test "union(enum(u32)) with specified and unspecified tag values" {
comptime testEnumWithSpecifiedAndUnspecifiedTagValues(MultipleChoice2{ .C = 123 });
}
-fn testEnumWithSpecifiedAndUnspecifiedTagValues(x: &const MultipleChoice2) void {
+fn testEnumWithSpecifiedAndUnspecifiedTagValues(x: *const MultipleChoice2) void {
assert(u32(@TagType(MultipleChoice2)(x.*)) == 60);
assert(1123 == switch (x.*) {
MultipleChoice2.A => 1,
@@ -163,7 +163,7 @@ fn testEnumWithSpecifiedAndUnspecifiedTagValues(x: &const MultipleChoice2) void
}
const ExternPtrOrInt = extern union {
- ptr: &u8,
+ ptr: *u8,
int: u64,
};
test "extern union size" {
@@ -171,7 +171,7 @@ test "extern union size" {
}
const PackedPtrOrInt = packed union {
- ptr: &u8,
+ ptr: *u8,
int: u64,
};
test "extern union size" {
@@ -206,7 +206,7 @@ test "cast union to tag type of union" {
comptime testCastUnionToTagType(TheUnion{ .B = 1234 });
}
-fn testCastUnionToTagType(x: &const TheUnion) void {
+fn testCastUnionToTagType(x: *const TheUnion) void {
assert(TheTag(x.*) == TheTag.B);
}
@@ -243,7 +243,7 @@ const TheUnion2 = union(enum) {
Item2: i32,
};
-fn assertIsTheUnion2Item1(value: &const TheUnion2) void {
+fn assertIsTheUnion2Item1(value: *const TheUnion2) void {
assert(value.* == TheUnion2.Item1);
}
diff --git a/test/compare_output.zig b/test/compare_output.zig
index 0170477b8b14..00ad4a709b81 100644
--- a/test/compare_output.zig
+++ b/test/compare_output.zig
@@ -3,10 +3,10 @@ const std = @import("std");
const os = std.os;
const tests = @import("tests.zig");
-pub fn addCases(cases: &tests.CompareOutputContext) void {
+pub fn addCases(cases: *tests.CompareOutputContext) void {
cases.addC("hello world with libc",
\\const c = @cImport(@cInclude("stdio.h"));
- \\export fn main(argc: c_int, argv: &&u8) c_int {
+ \\export fn main(argc: c_int, argv: **u8) c_int {
\\ _ = c.puts(c"Hello, world!");
\\ return 0;
\\}
@@ -139,7 +139,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) void {
\\ @cInclude("stdio.h");
\\});
\\
- \\export fn main(argc: c_int, argv: &&u8) c_int {
+ \\export fn main(argc: c_int, argv: **u8) c_int {
\\ if (is_windows) {
\\ // we want actual \n, not \r\n
\\ _ = c._setmode(1, c._O_BINARY);
@@ -284,9 +284,9 @@ pub fn addCases(cases: &tests.CompareOutputContext) void {
cases.addC("expose function pointer to C land",
\\const c = @cImport(@cInclude("stdlib.h"));
\\
- \\export fn compare_fn(a: ?&const c_void, b: ?&const c_void) c_int {
- \\ const a_int = @ptrCast(&align(1) const i32, a ?? unreachable);
- \\ const b_int = @ptrCast(&align(1) const i32, b ?? unreachable);
+ \\export fn compare_fn(a: ?*const c_void, b: ?*const c_void) c_int {
+ \\ const a_int = @ptrCast(*align(1) const i32, a ?? unreachable);
+ \\ const b_int = @ptrCast(*align(1) const i32, b ?? unreachable);
\\ if (a_int.* < b_int.*) {
\\ return -1;
\\ } else if (a_int.* > b_int.*) {
@@ -299,7 +299,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) void {
\\export fn main() c_int {
\\ var array = []u32 { 1, 7, 3, 2, 0, 9, 4, 8, 6, 5 };
\\
- \\ c.qsort(@ptrCast(&c_void, &array[0]), c_ulong(array.len), @sizeOf(i32), compare_fn);
+ \\ c.qsort(@ptrCast(*c_void, &array[0]), c_ulong(array.len), @sizeOf(i32), compare_fn);
\\
\\ for (array) |item, i| {
\\ if (item != i) {
@@ -324,7 +324,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) void {
\\ @cInclude("stdio.h");
\\});
\\
- \\export fn main(argc: c_int, argv: &&u8) c_int {
+ \\export fn main(argc: c_int, argv: **u8) c_int {
\\ if (is_windows) {
\\ // we want actual \n, not \r\n
\\ _ = c._setmode(1, c._O_BINARY);
@@ -344,13 +344,13 @@ pub fn addCases(cases: &tests.CompareOutputContext) void {
\\const Foo = struct {
\\ field1: Bar,
\\
- \\ fn method(a: &const Foo) bool { return true; }
+ \\ fn method(a: *const Foo) bool { return true; }
\\};
\\
\\const Bar = struct {
\\ field2: i32,
\\
- \\ fn method(b: &const Bar) bool { return true; }
+ \\ fn method(b: *const Bar) bool { return true; }
\\};
\\
\\pub fn main() void {
diff --git a/test/compile_errors.zig b/test/compile_errors.zig
index 5215953d0af9..132dc8cd80c7 100644
--- a/test/compile_errors.zig
+++ b/test/compile_errors.zig
@@ -1,6 +1,6 @@
const tests = @import("tests.zig");
-pub fn addCases(cases: &tests.CompileErrorContext) void {
+pub fn addCases(cases: *tests.CompileErrorContext) void {
cases.add(
"invalid deref on switch target",
\\comptime {
@@ -109,7 +109,7 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
"@ptrCast discards const qualifier",
\\export fn entry() void {
\\ const x: i32 = 1234;
- \\ const y = @ptrCast(&i32, &x);
+ \\ const y = @ptrCast(*i32, &x);
\\}
,
".tmp_source.zig:3:15: error: cast discards const qualifier",
@@ -118,7 +118,7 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
cases.add(
"comptime slice of undefined pointer non-zero len",
\\export fn entry() void {
- \\ const slice = (&i32)(undefined)[0..1];
+ \\ const slice = (*i32)(undefined)[0..1];
\\}
,
".tmp_source.zig:2:36: error: non-zero length slice of undefined pointer",
@@ -126,7 +126,7 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
cases.add(
"type checking function pointers",
- \\fn a(b: fn (&const u8) void) void {
+ \\fn a(b: fn (*const u8) void) void {
\\ b('a');
\\}
\\fn c(d: u8) void {
@@ -136,7 +136,7 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\ a(c);
\\}
,
- ".tmp_source.zig:8:7: error: expected type 'fn(&const u8) void', found 'fn(u8) void'",
+ ".tmp_source.zig:8:7: error: expected type 'fn(*const u8) void', found 'fn(u8) void'",
);
cases.add(
@@ -594,15 +594,15 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
cases.add(
"attempt to use 0 bit type in extern fn",
- \\extern fn foo(ptr: extern fn(&void) void) void;
+ \\extern fn foo(ptr: extern fn(*void) void) void;
\\
\\export fn entry() void {
\\ foo(bar);
\\}
\\
- \\extern fn bar(x: &void) void { }
+ \\extern fn bar(x: *void) void { }
,
- ".tmp_source.zig:7:18: error: parameter of type '&void' has 0 bits; not allowed in function with calling convention 'ccc'",
+ ".tmp_source.zig:7:18: error: parameter of type '*void' has 0 bits; not allowed in function with calling convention 'ccc'",
);
cases.add(
@@ -911,10 +911,10 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
cases.add(
"pointer to noreturn",
- \\fn a() &noreturn {}
+ \\fn a() *noreturn {}
\\export fn entry() void { _ = a(); }
,
- ".tmp_source.zig:1:9: error: pointer to noreturn not allowed",
+ ".tmp_source.zig:1:8: error: pointer to noreturn not allowed",
);
cases.add(
@@ -985,7 +985,7 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\ return a;
\\}
,
- ".tmp_source.zig:3:12: error: expected type 'i32', found '&const u8'",
+ ".tmp_source.zig:3:12: error: expected type 'i32', found '*const u8'",
);
cases.add(
@@ -1446,7 +1446,7 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
cases.add(
"switch expression - switch on pointer type with no else",
- \\fn foo(x: &u8) void {
+ \\fn foo(x: *u8) void {
\\ switch (x) {
\\ &y => {},
\\ }
@@ -1454,7 +1454,7 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\const y: u8 = 100;
\\export fn entry() usize { return @sizeOf(@typeOf(foo)); }
,
- ".tmp_source.zig:2:5: error: else prong required when switching on type '&u8'",
+ ".tmp_source.zig:2:5: error: else prong required when switching on type '*u8'",
);
cases.add(
@@ -1501,10 +1501,10 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
"address of number literal",
\\const x = 3;
\\const y = &x;
- \\fn foo() &const i32 { return y; }
+ \\fn foo() *const i32 { return y; }
\\export fn entry() usize { return @sizeOf(@typeOf(foo)); }
,
- ".tmp_source.zig:3:30: error: expected type '&const i32', found '&const (integer literal)'",
+ ".tmp_source.zig:3:30: error: expected type '*const i32', found '*const (integer literal)'",
);
cases.add(
@@ -1529,10 +1529,10 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\ a: i32,
\\ b: i32,
\\
- \\ fn member_a(foo: &const Foo) i32 {
+ \\ fn member_a(foo: *const Foo) i32 {
\\ return foo.a;
\\ }
- \\ fn member_b(foo: &const Foo) i32 {
+ \\ fn member_b(foo: *const Foo) i32 {
\\ return foo.b;
\\ }
\\};
@@ -1543,7 +1543,7 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\ Foo.member_b,
\\};
\\
- \\fn f(foo: &const Foo, index: usize) void {
+ \\fn f(foo: *const Foo, index: usize) void {
\\ const result = members[index]();
\\}
\\
@@ -1692,11 +1692,11 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
cases.add(
"assign null to non-nullable pointer",
- \\const a: &u8 = null;
+ \\const a: *u8 = null;
\\
\\export fn entry() usize { return @sizeOf(@typeOf(a)); }
,
- ".tmp_source.zig:1:16: error: expected type '&u8', found '(null)'",
+ ".tmp_source.zig:1:16: error: expected type '*u8', found '(null)'",
);
cases.add(
@@ -1806,7 +1806,7 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\ One: void,
\\ Two: i32,
\\};
- \\fn bad_eql_2(a: &const EnumWithData, b: &const EnumWithData) bool {
+ \\fn bad_eql_2(a: *const EnumWithData, b: *const EnumWithData) bool {
\\ return a.* == b.*;
\\}
\\
@@ -2011,9 +2011,9 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
cases.add(
"wrong number of arguments for method fn call",
\\const Foo = struct {
- \\ fn method(self: &const Foo, a: i32) void {}
+ \\ fn method(self: *const Foo, a: i32) void {}
\\};
- \\fn f(foo: &const Foo) void {
+ \\fn f(foo: *const Foo) void {
\\
\\ foo.method(1, 2);
\\}
@@ -2062,7 +2062,7 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
cases.add(
"misspelled type with pointer only reference",
\\const JasonHM = u8;
- \\const JasonList = &JsonNode;
+ \\const JasonList = *JsonNode;
\\
\\const JsonOA = union(enum) {
\\ JSONArray: JsonList,
@@ -2113,16 +2113,16 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\ derp.init();
\\}
,
- ".tmp_source.zig:14:5: error: expected type 'i32', found '&const Foo'",
+ ".tmp_source.zig:14:5: error: expected type 'i32', found '*const Foo'",
);
cases.add(
"method call with first arg type wrong container",
\\pub const List = struct {
\\ len: usize,
- \\ allocator: &Allocator,
+ \\ allocator: *Allocator,
\\
- \\ pub fn init(allocator: &Allocator) List {
+ \\ pub fn init(allocator: *Allocator) List {
\\ return List {
\\ .len = 0,
\\ .allocator = allocator,
@@ -2143,7 +2143,7 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\ x.init();
\\}
,
- ".tmp_source.zig:23:5: error: expected type '&Allocator', found '&List'",
+ ".tmp_source.zig:23:5: error: expected type '*Allocator', found '*List'",
);
cases.add(
@@ -2308,17 +2308,17 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\ c: u2,
\\};
\\
- \\fn foo(bit_field: &const BitField) u3 {
+ \\fn foo(bit_field: *const BitField) u3 {
\\ return bar(&bit_field.b);
\\}
\\
- \\fn bar(x: &const u3) u3 {
+ \\fn bar(x: *const u3) u3 {
\\ return x.*;
\\}
\\
\\export fn entry() usize { return @sizeOf(@typeOf(foo)); }
,
- ".tmp_source.zig:8:26: error: expected type '&const u3', found '&align(1:3:6) const u3'",
+ ".tmp_source.zig:8:26: error: expected type '*const u3', found '*align(1:3:6) const u3'",
);
cases.add(
@@ -2441,13 +2441,13 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\ const b = &a;
\\ return ptrEql(b, b);
\\}
- \\fn ptrEql(a: &[]const u8, b: &[]const u8) bool {
+ \\fn ptrEql(a: *[]const u8, b: *[]const u8) bool {
\\ return true;
\\}
\\
\\export fn entry() usize { return @sizeOf(@typeOf(foo)); }
,
- ".tmp_source.zig:4:19: error: expected type '&[]const u8', found '&const []const u8'",
+ ".tmp_source.zig:4:19: error: expected type '*[]const u8', found '*const []const u8'",
);
cases.addCase(x: {
@@ -2493,7 +2493,7 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
cases.add(
"ptrcast to non-pointer",
- \\export fn entry(a: &i32) usize {
+ \\export fn entry(a: *i32) usize {
\\ return @ptrCast(usize, a);
\\}
,
@@ -2542,16 +2542,16 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
"int to ptr of 0 bits",
\\export fn foo() void {
\\ var x: usize = 0x1000;
- \\ var y: &void = @intToPtr(&void, x);
+ \\ var y: *void = @intToPtr(*void, x);
\\}
,
- ".tmp_source.zig:3:31: error: type '&void' has 0 bits and cannot store information",
+ ".tmp_source.zig:3:30: error: type '*void' has 0 bits and cannot store information",
);
cases.add(
"@fieldParentPtr - non struct",
\\const Foo = i32;
- \\export fn foo(a: &i32) &Foo {
+ \\export fn foo(a: *i32) *Foo {
\\ return @fieldParentPtr(Foo, "a", a);
\\}
,
@@ -2563,7 +2563,7 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\const Foo = extern struct {
\\ derp: i32,
\\};
- \\export fn foo(a: &i32) &Foo {
+ \\export fn foo(a: *i32) *Foo {
\\ return @fieldParentPtr(Foo, "a", a);
\\}
,
@@ -2575,7 +2575,7 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\const Foo = extern struct {
\\ a: i32,
\\};
- \\export fn foo(a: i32) &Foo {
+ \\export fn foo(a: i32) *Foo {
\\ return @fieldParentPtr(Foo, "a", a);
\\}
,
@@ -2591,7 +2591,7 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\const foo = Foo { .a = 1, .b = 2, };
\\
\\comptime {
- \\ const field_ptr = @intToPtr(&i32, 0x1234);
+ \\ const field_ptr = @intToPtr(*i32, 0x1234);
\\ const another_foo_ptr = @fieldParentPtr(Foo, "b", field_ptr);
\\}
,
@@ -2682,7 +2682,7 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
cases.add(
"returning address of local variable - simple",
- \\export fn foo() &i32 {
+ \\export fn foo() *i32 {
\\ var a: i32 = undefined;
\\ return &a;
\\}
@@ -2692,7 +2692,7 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
cases.add(
"returning address of local variable - phi",
- \\export fn foo(c: bool) &i32 {
+ \\export fn foo(c: bool) *i32 {
\\ var a: i32 = undefined;
\\ var b: i32 = undefined;
\\ return if (c) &a else &b;
@@ -3086,11 +3086,11 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\ bar(&foo.b);
\\}
\\
- \\fn bar(x: &u32) void {
+ \\fn bar(x: *u32) void {
\\ x.* += 1;
\\}
,
- ".tmp_source.zig:8:13: error: expected type '&u32', found '&align(1) u32'",
+ ".tmp_source.zig:8:13: error: expected type '*u32', found '*align(1) u32'",
);
cases.add(
@@ -3117,13 +3117,13 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
"increase pointer alignment in @ptrCast",
\\export fn entry() u32 {
\\ var bytes: [4]u8 = []u8{0x01, 0x02, 0x03, 0x04};
- \\ const ptr = @ptrCast(&u32, &bytes[0]);
+ \\ const ptr = @ptrCast(*u32, &bytes[0]);
\\ return ptr.*;
\\}
,
".tmp_source.zig:3:17: error: cast increases pointer alignment",
- ".tmp_source.zig:3:38: note: '&u8' has alignment 1",
- ".tmp_source.zig:3:27: note: '&u32' has alignment 4",
+ ".tmp_source.zig:3:38: note: '*u8' has alignment 1",
+ ".tmp_source.zig:3:26: note: '*u32' has alignment 4",
);
cases.add(
@@ -3169,7 +3169,7 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\ return x == 5678;
\\}
,
- ".tmp_source.zig:4:32: error: expected type '&i32', found '&align(1) i32'",
+ ".tmp_source.zig:4:32: error: expected type '*i32', found '*align(1) i32'",
);
cases.add(
@@ -3198,20 +3198,20 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
cases.add(
"wrong pointer implicitly casted to pointer to @OpaqueType()",
\\const Derp = @OpaqueType();
- \\extern fn bar(d: &Derp) void;
+ \\extern fn bar(d: *Derp) void;
\\export fn foo() void {
\\ var x = u8(1);
- \\ bar(@ptrCast(&c_void, &x));
+ \\ bar(@ptrCast(*c_void, &x));
\\}
,
- ".tmp_source.zig:5:9: error: expected type '&Derp', found '&c_void'",
+ ".tmp_source.zig:5:9: error: expected type '*Derp', found '*c_void'",
);
cases.add(
"non-const variables of things that require const variables",
\\const Opaque = @OpaqueType();
\\
- \\export fn entry(opaque: &Opaque) void {
+ \\export fn entry(opaque: *Opaque) void {
\\ var m2 = &2;
\\ const y: u32 = m2.*;
\\
@@ -3229,10 +3229,10 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\}
\\
\\const Foo = struct {
- \\ fn bar(self: &const Foo) void {}
+ \\ fn bar(self: *const Foo) void {}
\\};
,
- ".tmp_source.zig:4:4: error: variable of type '&const (integer literal)' must be const or comptime",
+ ".tmp_source.zig:4:4: error: variable of type '*(integer literal)' must be const or comptime",
".tmp_source.zig:7:4: error: variable of type '(undefined)' must be const or comptime",
".tmp_source.zig:8:4: error: variable of type '(integer literal)' must be const or comptime",
".tmp_source.zig:9:4: error: variable of type '(float literal)' must be const or comptime",
@@ -3241,7 +3241,7 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
".tmp_source.zig:12:4: error: variable of type 'Opaque' must be const or comptime",
".tmp_source.zig:13:4: error: variable of type 'type' must be const or comptime",
".tmp_source.zig:14:4: error: variable of type '(namespace)' must be const or comptime",
- ".tmp_source.zig:15:4: error: variable of type '(bound fn(&const Foo) void)' must be const or comptime",
+ ".tmp_source.zig:15:4: error: variable of type '(bound fn(*const Foo) void)' must be const or comptime",
".tmp_source.zig:17:4: error: unreachable code",
);
@@ -3397,14 +3397,14 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\
\\export fn entry() bool {
\\ var x: i32 = 1;
- \\ return bar(@ptrCast(&MyType, &x));
+ \\ return bar(@ptrCast(*MyType, &x));
\\}
\\
- \\fn bar(x: &MyType) bool {
+ \\fn bar(x: *MyType) bool {
\\ return x.blah;
\\}
,
- ".tmp_source.zig:9:13: error: type '&MyType' does not support field access",
+ ".tmp_source.zig:9:13: error: type '*MyType' does not support field access",
);
cases.add(
@@ -3535,9 +3535,9 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\export fn entry() void {
\\ foo("hello",);
\\}
- \\pub extern fn foo(format: &const u8, ...) void;
+ \\pub extern fn foo(format: *const u8, ...) void;
,
- ".tmp_source.zig:2:9: error: expected type '&const u8', found '[5]u8'",
+ ".tmp_source.zig:2:9: error: expected type '*const u8', found '[5]u8'",
);
cases.add(
@@ -3902,7 +3902,7 @@ pub fn addCases(cases: &tests.CompileErrorContext) void {
\\ const a = Payload { .A = 1234 };
\\ foo(a);
\\}
- \\fn foo(a: &const Payload) void {
+ \\fn foo(a: *const Payload) void {
\\ switch (a.*) {
\\ Payload.A => {},
\\ else => unreachable,
diff --git a/test/gen_h.zig b/test/gen_h.zig
index 2def39bed738..e6a757ea6da8 100644
--- a/test/gen_h.zig
+++ b/test/gen_h.zig
@@ -1,6 +1,6 @@
const tests = @import("tests.zig");
-pub fn addCases(cases: &tests.GenHContext) void {
+pub fn addCases(cases: *tests.GenHContext) void {
cases.add("declare enum",
\\const Foo = extern enum { A, B, C };
\\export fn entry(foo: Foo) void { }
@@ -54,7 +54,7 @@ pub fn addCases(cases: &tests.GenHContext) void {
cases.add("declare opaque type",
\\export const Foo = @OpaqueType();
\\
- \\export fn entry(foo: ?&Foo) void { }
+ \\export fn entry(foo: ?*Foo) void { }
,
\\struct Foo;
\\
@@ -64,7 +64,7 @@ pub fn addCases(cases: &tests.GenHContext) void {
cases.add("array field-type",
\\const Foo = extern struct {
\\ A: [2]i32,
- \\ B: [4]&u32,
+ \\ B: [4]*u32,
\\};
\\export fn entry(foo: Foo, bar: [3]u8) void { }
,
diff --git a/test/runtime_safety.zig b/test/runtime_safety.zig
index 1fea6347ab6b..61eba9458ebf 100644
--- a/test/runtime_safety.zig
+++ b/test/runtime_safety.zig
@@ -1,8 +1,8 @@
const tests = @import("tests.zig");
-pub fn addCases(cases: &tests.CompareOutputContext) void {
+pub fn addCases(cases: *tests.CompareOutputContext) void {
cases.addRuntimeSafety("calling panic",
- \\pub fn panic(message: []const u8, stack_trace: ?&@import("builtin").StackTrace) noreturn {
+ \\pub fn panic(message: []const u8, stack_trace: ?*@import("builtin").StackTrace) noreturn {
\\ @import("std").os.exit(126);
\\}
\\pub fn main() void {
@@ -11,7 +11,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) void {
);
cases.addRuntimeSafety("out of bounds slice access",
- \\pub fn panic(message: []const u8, stack_trace: ?&@import("builtin").StackTrace) noreturn {
+ \\pub fn panic(message: []const u8, stack_trace: ?*@import("builtin").StackTrace) noreturn {
\\ @import("std").os.exit(126);
\\}
\\pub fn main() void {
@@ -25,7 +25,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) void {
);
cases.addRuntimeSafety("integer addition overflow",
- \\pub fn panic(message: []const u8, stack_trace: ?&@import("builtin").StackTrace) noreturn {
+ \\pub fn panic(message: []const u8, stack_trace: ?*@import("builtin").StackTrace) noreturn {
\\ @import("std").os.exit(126);
\\}
\\pub fn main() !void {
@@ -38,7 +38,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) void {
);
cases.addRuntimeSafety("integer subtraction overflow",
- \\pub fn panic(message: []const u8, stack_trace: ?&@import("builtin").StackTrace) noreturn {
+ \\pub fn panic(message: []const u8, stack_trace: ?*@import("builtin").StackTrace) noreturn {
\\ @import("std").os.exit(126);
\\}
\\pub fn main() !void {
@@ -51,7 +51,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) void {
);
cases.addRuntimeSafety("integer multiplication overflow",
- \\pub fn panic(message: []const u8, stack_trace: ?&@import("builtin").StackTrace) noreturn {
+ \\pub fn panic(message: []const u8, stack_trace: ?*@import("builtin").StackTrace) noreturn {
\\ @import("std").os.exit(126);
\\}
\\pub fn main() !void {
@@ -64,7 +64,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) void {
);
cases.addRuntimeSafety("integer negation overflow",
- \\pub fn panic(message: []const u8, stack_trace: ?&@import("builtin").StackTrace) noreturn {
+ \\pub fn panic(message: []const u8, stack_trace: ?*@import("builtin").StackTrace) noreturn {
\\ @import("std").os.exit(126);
\\}
\\pub fn main() !void {
@@ -77,7 +77,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) void {
);
cases.addRuntimeSafety("signed integer division overflow",
- \\pub fn panic(message: []const u8, stack_trace: ?&@import("builtin").StackTrace) noreturn {
+ \\pub fn panic(message: []const u8, stack_trace: ?*@import("builtin").StackTrace) noreturn {
\\ @import("std").os.exit(126);
\\}
\\pub fn main() !void {
@@ -90,7 +90,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) void {
);
cases.addRuntimeSafety("signed shift left overflow",
- \\pub fn panic(message: []const u8, stack_trace: ?&@import("builtin").StackTrace) noreturn {
+ \\pub fn panic(message: []const u8, stack_trace: ?*@import("builtin").StackTrace) noreturn {
\\ @import("std").os.exit(126);
\\}
\\pub fn main() !void {
@@ -103,7 +103,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) void {
);
cases.addRuntimeSafety("unsigned shift left overflow",
- \\pub fn panic(message: []const u8, stack_trace: ?&@import("builtin").StackTrace) noreturn {
+ \\pub fn panic(message: []const u8, stack_trace: ?*@import("builtin").StackTrace) noreturn {
\\ @import("std").os.exit(126);
\\}
\\pub fn main() !void {
@@ -116,7 +116,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) void {
);
cases.addRuntimeSafety("signed shift right overflow",
- \\pub fn panic(message: []const u8, stack_trace: ?&@import("builtin").StackTrace) noreturn {
+ \\pub fn panic(message: []const u8, stack_trace: ?*@import("builtin").StackTrace) noreturn {
\\ @import("std").os.exit(126);
\\}
\\pub fn main() !void {
@@ -129,7 +129,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) void {
);
cases.addRuntimeSafety("unsigned shift right overflow",
- \\pub fn panic(message: []const u8, stack_trace: ?&@import("builtin").StackTrace) noreturn {
+ \\pub fn panic(message: []const u8, stack_trace: ?*@import("builtin").StackTrace) noreturn {
\\ @import("std").os.exit(126);
\\}
\\pub fn main() !void {
@@ -142,7 +142,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) void {
);
cases.addRuntimeSafety("integer division by zero",
- \\pub fn panic(message: []const u8, stack_trace: ?&@import("builtin").StackTrace) noreturn {
+ \\pub fn panic(message: []const u8, stack_trace: ?*@import("builtin").StackTrace) noreturn {
\\ @import("std").os.exit(126);
\\}
\\pub fn main() void {
@@ -154,7 +154,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) void {
);
cases.addRuntimeSafety("exact division failure",
- \\pub fn panic(message: []const u8, stack_trace: ?&@import("builtin").StackTrace) noreturn {
+ \\pub fn panic(message: []const u8, stack_trace: ?*@import("builtin").StackTrace) noreturn {
\\ @import("std").os.exit(126);
\\}
\\pub fn main() !void {
@@ -167,7 +167,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) void {
);
cases.addRuntimeSafety("cast []u8 to bigger slice of wrong size",
- \\pub fn panic(message: []const u8, stack_trace: ?&@import("builtin").StackTrace) noreturn {
+ \\pub fn panic(message: []const u8, stack_trace: ?*@import("builtin").StackTrace) noreturn {
\\ @import("std").os.exit(126);
\\}
\\pub fn main() !void {
@@ -180,7 +180,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) void {
);
cases.addRuntimeSafety("value does not fit in shortening cast",
- \\pub fn panic(message: []const u8, stack_trace: ?&@import("builtin").StackTrace) noreturn {
+ \\pub fn panic(message: []const u8, stack_trace: ?*@import("builtin").StackTrace) noreturn {
\\ @import("std").os.exit(126);
\\}
\\pub fn main() !void {
@@ -193,7 +193,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) void {
);
cases.addRuntimeSafety("signed integer not fitting in cast to unsigned integer",
- \\pub fn panic(message: []const u8, stack_trace: ?&@import("builtin").StackTrace) noreturn {
+ \\pub fn panic(message: []const u8, stack_trace: ?*@import("builtin").StackTrace) noreturn {
\\ @import("std").os.exit(126);
\\}
\\pub fn main() !void {
@@ -206,7 +206,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) void {
);
cases.addRuntimeSafety("unwrap error",
- \\pub fn panic(message: []const u8, stack_trace: ?&@import("builtin").StackTrace) noreturn {
+ \\pub fn panic(message: []const u8, stack_trace: ?*@import("builtin").StackTrace) noreturn {
\\ if (@import("std").mem.eql(u8, message, "attempt to unwrap error: Whatever")) {
\\ @import("std").os.exit(126); // good
\\ }
@@ -221,7 +221,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) void {
);
cases.addRuntimeSafety("cast integer to global error and no code matches",
- \\pub fn panic(message: []const u8, stack_trace: ?&@import("builtin").StackTrace) noreturn {
+ \\pub fn panic(message: []const u8, stack_trace: ?*@import("builtin").StackTrace) noreturn {
\\ @import("std").os.exit(126);
\\}
\\pub fn main() void {
@@ -233,7 +233,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) void {
);
cases.addRuntimeSafety("cast integer to non-global error set and no match",
- \\pub fn panic(message: []const u8, stack_trace: ?&@import("builtin").StackTrace) noreturn {
+ \\pub fn panic(message: []const u8, stack_trace: ?*@import("builtin").StackTrace) noreturn {
\\ @import("std").os.exit(126);
\\}
\\const Set1 = error{A, B};
@@ -247,7 +247,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) void {
);
cases.addRuntimeSafety("@alignCast misaligned",
- \\pub fn panic(message: []const u8, stack_trace: ?&@import("builtin").StackTrace) noreturn {
+ \\pub fn panic(message: []const u8, stack_trace: ?*@import("builtin").StackTrace) noreturn {
\\ @import("std").os.exit(126);
\\}
\\pub fn main() !void {
@@ -263,7 +263,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) void {
);
cases.addRuntimeSafety("bad union field access",
- \\pub fn panic(message: []const u8, stack_trace: ?&@import("builtin").StackTrace) noreturn {
+ \\pub fn panic(message: []const u8, stack_trace: ?*@import("builtin").StackTrace) noreturn {
\\ @import("std").os.exit(126);
\\}
\\
@@ -277,7 +277,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) void {
\\ bar(&f);
\\}
\\
- \\fn bar(f: &Foo) void {
+ \\fn bar(f: *Foo) void {
\\ f.float = 12.34;
\\}
);
@@ -287,7 +287,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) void {
cases.addRuntimeSafety("error return trace across suspend points",
\\const std = @import("std");
\\
- \\pub fn panic(message: []const u8, stack_trace: ?&@import("builtin").StackTrace) noreturn {
+ \\pub fn panic(message: []const u8, stack_trace: ?*@import("builtin").StackTrace) noreturn {
\\ std.os.exit(126);
\\}
\\
diff --git a/test/standalone/brace_expansion/build.zig b/test/standalone/brace_expansion/build.zig
index 7752f599df3d..64f3c08583ce 100644
--- a/test/standalone/brace_expansion/build.zig
+++ b/test/standalone/brace_expansion/build.zig
@@ -1,6 +1,6 @@
const Builder = @import("std").build.Builder;
-pub fn build(b: &Builder) void {
+pub fn build(b: *Builder) void {
const main = b.addTest("main.zig");
main.setBuildMode(b.standardReleaseOptions());
diff --git a/test/standalone/brace_expansion/main.zig b/test/standalone/brace_expansion/main.zig
index c96cc2cbb956..ccb4f6dd4543 100644
--- a/test/standalone/brace_expansion/main.zig
+++ b/test/standalone/brace_expansion/main.zig
@@ -14,7 +14,7 @@ const Token = union(enum) {
Eof,
};
-var global_allocator: &mem.Allocator = undefined;
+var global_allocator: *mem.Allocator = undefined;
fn tokenize(input: []const u8) !ArrayList(Token) {
const State = enum {
@@ -73,7 +73,7 @@ const ParseError = error{
OutOfMemory,
};
-fn parse(tokens: &const ArrayList(Token), token_index: &usize) ParseError!Node {
+fn parse(tokens: *const ArrayList(Token), token_index: *usize) ParseError!Node {
const first_token = tokens.items[token_index.*];
token_index.* += 1;
@@ -109,7 +109,7 @@ fn parse(tokens: &const ArrayList(Token), token_index: &usize) ParseError!Node {
}
}
-fn expandString(input: []const u8, output: &Buffer) !void {
+fn expandString(input: []const u8, output: *Buffer) !void {
const tokens = try tokenize(input);
if (tokens.len == 1) {
return output.resize(0);
@@ -139,7 +139,7 @@ fn expandString(input: []const u8, output: &Buffer) !void {
const ExpandNodeError = error{OutOfMemory};
-fn expandNode(node: &const Node, output: &ArrayList(Buffer)) ExpandNodeError!void {
+fn expandNode(node: *const Node, output: *ArrayList(Buffer)) ExpandNodeError!void {
assert(output.len == 0);
switch (node.*) {
Node.Scalar => |scalar| {
diff --git a/test/standalone/issue_339/build.zig b/test/standalone/issue_339/build.zig
index f3ab327006a1..733b3729c14a 100644
--- a/test/standalone/issue_339/build.zig
+++ b/test/standalone/issue_339/build.zig
@@ -1,6 +1,6 @@
const Builder = @import("std").build.Builder;
-pub fn build(b: &Builder) void {
+pub fn build(b: *Builder) void {
const obj = b.addObject("test", "test.zig");
const test_step = b.step("test", "Test the program");
diff --git a/test/standalone/issue_339/test.zig b/test/standalone/issue_339/test.zig
index da0747b8e675..f4068dcfac48 100644
--- a/test/standalone/issue_339/test.zig
+++ b/test/standalone/issue_339/test.zig
@@ -1,5 +1,5 @@
const StackTrace = @import("builtin").StackTrace;
-pub fn panic(msg: []const u8, stack_trace: ?&StackTrace) noreturn {
+pub fn panic(msg: []const u8, stack_trace: ?*StackTrace) noreturn {
@breakpoint();
while (true) {}
}
diff --git a/test/standalone/issue_794/build.zig b/test/standalone/issue_794/build.zig
index 4f5dcd7ff430..06c37a83a361 100644
--- a/test/standalone/issue_794/build.zig
+++ b/test/standalone/issue_794/build.zig
@@ -1,6 +1,6 @@
const Builder = @import("std").build.Builder;
-pub fn build(b: &Builder) void {
+pub fn build(b: *Builder) void {
const test_artifact = b.addTest("main.zig");
test_artifact.addIncludeDir("a_directory");
diff --git a/test/standalone/pkg_import/build.zig b/test/standalone/pkg_import/build.zig
index bb9416d3c435..e0b3885dc33f 100644
--- a/test/standalone/pkg_import/build.zig
+++ b/test/standalone/pkg_import/build.zig
@@ -1,6 +1,6 @@
const Builder = @import("std").build.Builder;
-pub fn build(b: &Builder) void {
+pub fn build(b: *Builder) void {
const exe = b.addExecutable("test", "test.zig");
exe.addPackagePath("my_pkg", "pkg.zig");
diff --git a/test/standalone/use_alias/build.zig b/test/standalone/use_alias/build.zig
index ecbba297d81b..c700d43db982 100644
--- a/test/standalone/use_alias/build.zig
+++ b/test/standalone/use_alias/build.zig
@@ -1,6 +1,6 @@
const Builder = @import("std").build.Builder;
-pub fn build(b: &Builder) void {
+pub fn build(b: *Builder) void {
b.addCIncludePath(".");
const main = b.addTest("main.zig");
diff --git a/test/tests.zig b/test/tests.zig
index b59b954122af..cc562331feb3 100644
--- a/test/tests.zig
+++ b/test/tests.zig
@@ -47,7 +47,7 @@ const test_targets = []TestTarget{
const max_stdout_size = 1 * 1024 * 1024; // 1 MB
-pub fn addCompareOutputTests(b: &build.Builder, test_filter: ?[]const u8) &build.Step {
+pub fn addCompareOutputTests(b: *build.Builder, test_filter: ?[]const u8) *build.Step {
const cases = b.allocator.create(CompareOutputContext) catch unreachable;
cases.* = CompareOutputContext{
.b = b,
@@ -61,7 +61,7 @@ pub fn addCompareOutputTests(b: &build.Builder, test_filter: ?[]const u8) &build
return cases.step;
}
-pub fn addRuntimeSafetyTests(b: &build.Builder, test_filter: ?[]const u8) &build.Step {
+pub fn addRuntimeSafetyTests(b: *build.Builder, test_filter: ?[]const u8) *build.Step {
const cases = b.allocator.create(CompareOutputContext) catch unreachable;
cases.* = CompareOutputContext{
.b = b,
@@ -75,7 +75,7 @@ pub fn addRuntimeSafetyTests(b: &build.Builder, test_filter: ?[]const u8) &build
return cases.step;
}
-pub fn addCompileErrorTests(b: &build.Builder, test_filter: ?[]const u8) &build.Step {
+pub fn addCompileErrorTests(b: *build.Builder, test_filter: ?[]const u8) *build.Step {
const cases = b.allocator.create(CompileErrorContext) catch unreachable;
cases.* = CompileErrorContext{
.b = b,
@@ -89,7 +89,7 @@ pub fn addCompileErrorTests(b: &build.Builder, test_filter: ?[]const u8) &build.
return cases.step;
}
-pub fn addBuildExampleTests(b: &build.Builder, test_filter: ?[]const u8) &build.Step {
+pub fn addBuildExampleTests(b: *build.Builder, test_filter: ?[]const u8) *build.Step {
const cases = b.allocator.create(BuildExamplesContext) catch unreachable;
cases.* = BuildExamplesContext{
.b = b,
@@ -103,7 +103,7 @@ pub fn addBuildExampleTests(b: &build.Builder, test_filter: ?[]const u8) &build.
return cases.step;
}
-pub fn addAssembleAndLinkTests(b: &build.Builder, test_filter: ?[]const u8) &build.Step {
+pub fn addAssembleAndLinkTests(b: *build.Builder, test_filter: ?[]const u8) *build.Step {
const cases = b.allocator.create(CompareOutputContext) catch unreachable;
cases.* = CompareOutputContext{
.b = b,
@@ -117,7 +117,7 @@ pub fn addAssembleAndLinkTests(b: &build.Builder, test_filter: ?[]const u8) &bui
return cases.step;
}
-pub fn addTranslateCTests(b: &build.Builder, test_filter: ?[]const u8) &build.Step {
+pub fn addTranslateCTests(b: *build.Builder, test_filter: ?[]const u8) *build.Step {
const cases = b.allocator.create(TranslateCContext) catch unreachable;
cases.* = TranslateCContext{
.b = b,
@@ -131,7 +131,7 @@ pub fn addTranslateCTests(b: &build.Builder, test_filter: ?[]const u8) &build.St
return cases.step;
}
-pub fn addGenHTests(b: &build.Builder, test_filter: ?[]const u8) &build.Step {
+pub fn addGenHTests(b: *build.Builder, test_filter: ?[]const u8) *build.Step {
const cases = b.allocator.create(GenHContext) catch unreachable;
cases.* = GenHContext{
.b = b,
@@ -145,7 +145,7 @@ pub fn addGenHTests(b: &build.Builder, test_filter: ?[]const u8) &build.Step {
return cases.step;
}
-pub fn addPkgTests(b: &build.Builder, test_filter: ?[]const u8, root_src: []const u8, name: []const u8, desc: []const u8, with_lldb: bool) &build.Step {
+pub fn addPkgTests(b: *build.Builder, test_filter: ?[]const u8, root_src: []const u8, name: []const u8, desc: []const u8, with_lldb: bool) *build.Step {
const step = b.step(b.fmt("test-{}", name), desc);
for (test_targets) |test_target| {
const is_native = (test_target.os == builtin.os and test_target.arch == builtin.arch);
@@ -193,8 +193,8 @@ pub fn addPkgTests(b: &build.Builder, test_filter: ?[]const u8, root_src: []cons
}
pub const CompareOutputContext = struct {
- b: &build.Builder,
- step: &build.Step,
+ b: *build.Builder,
+ step: *build.Step,
test_index: usize,
test_filter: ?[]const u8,
@@ -217,28 +217,28 @@ pub const CompareOutputContext = struct {
source: []const u8,
};
- pub fn addSourceFile(self: &TestCase, filename: []const u8, source: []const u8) void {
+ pub fn addSourceFile(self: *TestCase, filename: []const u8, source: []const u8) void {
self.sources.append(SourceFile{
.filename = filename,
.source = source,
}) catch unreachable;
}
- pub fn setCommandLineArgs(self: &TestCase, args: []const []const u8) void {
+ pub fn setCommandLineArgs(self: *TestCase, args: []const []const u8) void {
self.cli_args = args;
}
};
const RunCompareOutputStep = struct {
step: build.Step,
- context: &CompareOutputContext,
+ context: *CompareOutputContext,
exe_path: []const u8,
name: []const u8,
expected_output: []const u8,
test_index: usize,
cli_args: []const []const u8,
- pub fn create(context: &CompareOutputContext, exe_path: []const u8, name: []const u8, expected_output: []const u8, cli_args: []const []const u8) &RunCompareOutputStep {
+ pub fn create(context: *CompareOutputContext, exe_path: []const u8, name: []const u8, expected_output: []const u8, cli_args: []const []const u8) *RunCompareOutputStep {
const allocator = context.b.allocator;
const ptr = allocator.create(RunCompareOutputStep) catch unreachable;
ptr.* = RunCompareOutputStep{
@@ -254,7 +254,7 @@ pub const CompareOutputContext = struct {
return ptr;
}
- fn make(step: &build.Step) !void {
+ fn make(step: *build.Step) !void {
const self = @fieldParentPtr(RunCompareOutputStep, "step", step);
const b = self.context.b;
@@ -321,12 +321,12 @@ pub const CompareOutputContext = struct {
const RuntimeSafetyRunStep = struct {
step: build.Step,
- context: &CompareOutputContext,
+ context: *CompareOutputContext,
exe_path: []const u8,
name: []const u8,
test_index: usize,
- pub fn create(context: &CompareOutputContext, exe_path: []const u8, name: []const u8) &RuntimeSafetyRunStep {
+ pub fn create(context: *CompareOutputContext, exe_path: []const u8, name: []const u8) *RuntimeSafetyRunStep {
const allocator = context.b.allocator;
const ptr = allocator.create(RuntimeSafetyRunStep) catch unreachable;
ptr.* = RuntimeSafetyRunStep{
@@ -340,7 +340,7 @@ pub const CompareOutputContext = struct {
return ptr;
}
- fn make(step: &build.Step) !void {
+ fn make(step: *build.Step) !void {
const self = @fieldParentPtr(RuntimeSafetyRunStep, "step", step);
const b = self.context.b;
@@ -382,7 +382,7 @@ pub const CompareOutputContext = struct {
}
};
- pub fn createExtra(self: &CompareOutputContext, name: []const u8, source: []const u8, expected_output: []const u8, special: Special) TestCase {
+ pub fn createExtra(self: *CompareOutputContext, name: []const u8, source: []const u8, expected_output: []const u8, special: Special) TestCase {
var tc = TestCase{
.name = name,
.sources = ArrayList(TestCase.SourceFile).init(self.b.allocator),
@@ -396,32 +396,32 @@ pub const CompareOutputContext = struct {
return tc;
}
- pub fn create(self: &CompareOutputContext, name: []const u8, source: []const u8, expected_output: []const u8) TestCase {
+ pub fn create(self: *CompareOutputContext, name: []const u8, source: []const u8, expected_output: []const u8) TestCase {
return createExtra(self, name, source, expected_output, Special.None);
}
- pub fn addC(self: &CompareOutputContext, name: []const u8, source: []const u8, expected_output: []const u8) void {
+ pub fn addC(self: *CompareOutputContext, name: []const u8, source: []const u8, expected_output: []const u8) void {
var tc = self.create(name, source, expected_output);
tc.link_libc = true;
self.addCase(tc);
}
- pub fn add(self: &CompareOutputContext, name: []const u8, source: []const u8, expected_output: []const u8) void {
+ pub fn add(self: *CompareOutputContext, name: []const u8, source: []const u8, expected_output: []const u8) void {
const tc = self.create(name, source, expected_output);
self.addCase(tc);
}
- pub fn addAsm(self: &CompareOutputContext, name: []const u8, source: []const u8, expected_output: []const u8) void {
+ pub fn addAsm(self: *CompareOutputContext, name: []const u8, source: []const u8, expected_output: []const u8) void {
const tc = self.createExtra(name, source, expected_output, Special.Asm);
self.addCase(tc);
}
- pub fn addRuntimeSafety(self: &CompareOutputContext, name: []const u8, source: []const u8) void {
+ pub fn addRuntimeSafety(self: *CompareOutputContext, name: []const u8, source: []const u8) void {
const tc = self.createExtra(name, source, undefined, Special.RuntimeSafety);
self.addCase(tc);
}
- pub fn addCase(self: &CompareOutputContext, case: &const TestCase) void {
+ pub fn addCase(self: *CompareOutputContext, case: *const TestCase) void {
const b = self.b;
const root_src = os.path.join(b.allocator, b.cache_root, case.sources.items[0].filename) catch unreachable;
@@ -504,8 +504,8 @@ pub const CompareOutputContext = struct {
};
pub const CompileErrorContext = struct {
- b: &build.Builder,
- step: &build.Step,
+ b: *build.Builder,
+ step: *build.Step,
test_index: usize,
test_filter: ?[]const u8,
@@ -521,27 +521,27 @@ pub const CompileErrorContext = struct {
source: []const u8,
};
- pub fn addSourceFile(self: &TestCase, filename: []const u8, source: []const u8) void {
+ pub fn addSourceFile(self: *TestCase, filename: []const u8, source: []const u8) void {
self.sources.append(SourceFile{
.filename = filename,
.source = source,
}) catch unreachable;
}
- pub fn addExpectedError(self: &TestCase, text: []const u8) void {
+ pub fn addExpectedError(self: *TestCase, text: []const u8) void {
self.expected_errors.append(text) catch unreachable;
}
};
const CompileCmpOutputStep = struct {
step: build.Step,
- context: &CompileErrorContext,
+ context: *CompileErrorContext,
name: []const u8,
test_index: usize,
- case: &const TestCase,
+ case: *const TestCase,
build_mode: Mode,
- pub fn create(context: &CompileErrorContext, name: []const u8, case: &const TestCase, build_mode: Mode) &CompileCmpOutputStep {
+ pub fn create(context: *CompileErrorContext, name: []const u8, case: *const TestCase, build_mode: Mode) *CompileCmpOutputStep {
const allocator = context.b.allocator;
const ptr = allocator.create(CompileCmpOutputStep) catch unreachable;
ptr.* = CompileCmpOutputStep{
@@ -556,7 +556,7 @@ pub const CompileErrorContext = struct {
return ptr;
}
- fn make(step: &build.Step) !void {
+ fn make(step: *build.Step) !void {
const self = @fieldParentPtr(CompileCmpOutputStep, "step", step);
const b = self.context.b;
@@ -661,7 +661,7 @@ pub const CompileErrorContext = struct {
warn("\n");
}
- pub fn create(self: &CompileErrorContext, name: []const u8, source: []const u8, expected_lines: ...) &TestCase {
+ pub fn create(self: *CompileErrorContext, name: []const u8, source: []const u8, expected_lines: ...) *TestCase {
const tc = self.b.allocator.create(TestCase) catch unreachable;
tc.* = TestCase{
.name = name,
@@ -678,24 +678,24 @@ pub const CompileErrorContext = struct {
return tc;
}
- pub fn addC(self: &CompileErrorContext, name: []const u8, source: []const u8, expected_lines: ...) void {
+ pub fn addC(self: *CompileErrorContext, name: []const u8, source: []const u8, expected_lines: ...) void {
var tc = self.create(name, source, expected_lines);
tc.link_libc = true;
self.addCase(tc);
}
- pub fn addExe(self: &CompileErrorContext, name: []const u8, source: []const u8, expected_lines: ...) void {
+ pub fn addExe(self: *CompileErrorContext, name: []const u8, source: []const u8, expected_lines: ...) void {
var tc = self.create(name, source, expected_lines);
tc.is_exe = true;
self.addCase(tc);
}
- pub fn add(self: &CompileErrorContext, name: []const u8, source: []const u8, expected_lines: ...) void {
+ pub fn add(self: *CompileErrorContext, name: []const u8, source: []const u8, expected_lines: ...) void {
const tc = self.create(name, source, expected_lines);
self.addCase(tc);
}
- pub fn addCase(self: &CompileErrorContext, case: &const TestCase) void {
+ pub fn addCase(self: *CompileErrorContext, case: *const TestCase) void {
const b = self.b;
for ([]Mode{
@@ -720,20 +720,20 @@ pub const CompileErrorContext = struct {
};
pub const BuildExamplesContext = struct {
- b: &build.Builder,
- step: &build.Step,
+ b: *build.Builder,
+ step: *build.Step,
test_index: usize,
test_filter: ?[]const u8,
- pub fn addC(self: &BuildExamplesContext, root_src: []const u8) void {
+ pub fn addC(self: *BuildExamplesContext, root_src: []const u8) void {
self.addAllArgs(root_src, true);
}
- pub fn add(self: &BuildExamplesContext, root_src: []const u8) void {
+ pub fn add(self: *BuildExamplesContext, root_src: []const u8) void {
self.addAllArgs(root_src, false);
}
- pub fn addBuildFile(self: &BuildExamplesContext, build_file: []const u8) void {
+ pub fn addBuildFile(self: *BuildExamplesContext, build_file: []const u8) void {
const b = self.b;
const annotated_case_name = b.fmt("build {} (Debug)", build_file);
@@ -763,7 +763,7 @@ pub const BuildExamplesContext = struct {
self.step.dependOn(&log_step.step);
}
- pub fn addAllArgs(self: &BuildExamplesContext, root_src: []const u8, link_libc: bool) void {
+ pub fn addAllArgs(self: *BuildExamplesContext, root_src: []const u8, link_libc: bool) void {
const b = self.b;
for ([]Mode{
@@ -792,8 +792,8 @@ pub const BuildExamplesContext = struct {
};
pub const TranslateCContext = struct {
- b: &build.Builder,
- step: &build.Step,
+ b: *build.Builder,
+ step: *build.Step,
test_index: usize,
test_filter: ?[]const u8,
@@ -808,26 +808,26 @@ pub const TranslateCContext = struct {
source: []const u8,
};
- pub fn addSourceFile(self: &TestCase, filename: []const u8, source: []const u8) void {
+ pub fn addSourceFile(self: *TestCase, filename: []const u8, source: []const u8) void {
self.sources.append(SourceFile{
.filename = filename,
.source = source,
}) catch unreachable;
}
- pub fn addExpectedLine(self: &TestCase, text: []const u8) void {
+ pub fn addExpectedLine(self: *TestCase, text: []const u8) void {
self.expected_lines.append(text) catch unreachable;
}
};
const TranslateCCmpOutputStep = struct {
step: build.Step,
- context: &TranslateCContext,
+ context: *TranslateCContext,
name: []const u8,
test_index: usize,
- case: &const TestCase,
+ case: *const TestCase,
- pub fn create(context: &TranslateCContext, name: []const u8, case: &const TestCase) &TranslateCCmpOutputStep {
+ pub fn create(context: *TranslateCContext, name: []const u8, case: *const TestCase) *TranslateCCmpOutputStep {
const allocator = context.b.allocator;
const ptr = allocator.create(TranslateCCmpOutputStep) catch unreachable;
ptr.* = TranslateCCmpOutputStep{
@@ -841,7 +841,7 @@ pub const TranslateCContext = struct {
return ptr;
}
- fn make(step: &build.Step) !void {
+ fn make(step: *build.Step) !void {
const self = @fieldParentPtr(TranslateCCmpOutputStep, "step", step);
const b = self.context.b;
@@ -935,7 +935,7 @@ pub const TranslateCContext = struct {
warn("\n");
}
- pub fn create(self: &TranslateCContext, allow_warnings: bool, filename: []const u8, name: []const u8, source: []const u8, expected_lines: ...) &TestCase {
+ pub fn create(self: *TranslateCContext, allow_warnings: bool, filename: []const u8, name: []const u8, source: []const u8, expected_lines: ...) *TestCase {
const tc = self.b.allocator.create(TestCase) catch unreachable;
tc.* = TestCase{
.name = name,
@@ -951,22 +951,22 @@ pub const TranslateCContext = struct {
return tc;
}
- pub fn add(self: &TranslateCContext, name: []const u8, source: []const u8, expected_lines: ...) void {
+ pub fn add(self: *TranslateCContext, name: []const u8, source: []const u8, expected_lines: ...) void {
const tc = self.create(false, "source.h", name, source, expected_lines);
self.addCase(tc);
}
- pub fn addC(self: &TranslateCContext, name: []const u8, source: []const u8, expected_lines: ...) void {
+ pub fn addC(self: *TranslateCContext, name: []const u8, source: []const u8, expected_lines: ...) void {
const tc = self.create(false, "source.c", name, source, expected_lines);
self.addCase(tc);
}
- pub fn addAllowWarnings(self: &TranslateCContext, name: []const u8, source: []const u8, expected_lines: ...) void {
+ pub fn addAllowWarnings(self: *TranslateCContext, name: []const u8, source: []const u8, expected_lines: ...) void {
const tc = self.create(true, "source.h", name, source, expected_lines);
self.addCase(tc);
}
- pub fn addCase(self: &TranslateCContext, case: &const TestCase) void {
+ pub fn addCase(self: *TranslateCContext, case: *const TestCase) void {
const b = self.b;
const annotated_case_name = fmt.allocPrint(self.b.allocator, "translate-c {}", case.name) catch unreachable;
@@ -986,8 +986,8 @@ pub const TranslateCContext = struct {
};
pub const GenHContext = struct {
- b: &build.Builder,
- step: &build.Step,
+ b: *build.Builder,
+ step: *build.Step,
test_index: usize,
test_filter: ?[]const u8,
@@ -1001,27 +1001,27 @@ pub const GenHContext = struct {
source: []const u8,
};
- pub fn addSourceFile(self: &TestCase, filename: []const u8, source: []const u8) void {
+ pub fn addSourceFile(self: *TestCase, filename: []const u8, source: []const u8) void {
self.sources.append(SourceFile{
.filename = filename,
.source = source,
}) catch unreachable;
}
- pub fn addExpectedLine(self: &TestCase, text: []const u8) void {
+ pub fn addExpectedLine(self: *TestCase, text: []const u8) void {
self.expected_lines.append(text) catch unreachable;
}
};
const GenHCmpOutputStep = struct {
step: build.Step,
- context: &GenHContext,
+ context: *GenHContext,
h_path: []const u8,
name: []const u8,
test_index: usize,
- case: &const TestCase,
+ case: *const TestCase,
- pub fn create(context: &GenHContext, h_path: []const u8, name: []const u8, case: &const TestCase) &GenHCmpOutputStep {
+ pub fn create(context: *GenHContext, h_path: []const u8, name: []const u8, case: *const TestCase) *GenHCmpOutputStep {
const allocator = context.b.allocator;
const ptr = allocator.create(GenHCmpOutputStep) catch unreachable;
ptr.* = GenHCmpOutputStep{
@@ -1036,7 +1036,7 @@ pub const GenHContext = struct {
return ptr;
}
- fn make(step: &build.Step) !void {
+ fn make(step: *build.Step) !void {
const self = @fieldParentPtr(GenHCmpOutputStep, "step", step);
const b = self.context.b;
@@ -1069,7 +1069,7 @@ pub const GenHContext = struct {
warn("\n");
}
- pub fn create(self: &GenHContext, filename: []const u8, name: []const u8, source: []const u8, expected_lines: ...) &TestCase {
+ pub fn create(self: *GenHContext, filename: []const u8, name: []const u8, source: []const u8, expected_lines: ...) *TestCase {
const tc = self.b.allocator.create(TestCase) catch unreachable;
tc.* = TestCase{
.name = name,
@@ -1084,12 +1084,12 @@ pub const GenHContext = struct {
return tc;
}
- pub fn add(self: &GenHContext, name: []const u8, source: []const u8, expected_lines: ...) void {
+ pub fn add(self: *GenHContext, name: []const u8, source: []const u8, expected_lines: ...) void {
const tc = self.create("test.zig", name, source, expected_lines);
self.addCase(tc);
}
- pub fn addCase(self: &GenHContext, case: &const TestCase) void {
+ pub fn addCase(self: *GenHContext, case: *const TestCase) void {
const b = self.b;
const root_src = os.path.join(b.allocator, b.cache_root, case.sources.items[0].filename) catch unreachable;
diff --git a/test/translate_c.zig b/test/translate_c.zig
index 4cf1e047fa22..9a07bc343d26 100644
--- a/test/translate_c.zig
+++ b/test/translate_c.zig
@@ -1,6 +1,6 @@
const tests = @import("tests.zig");
-pub fn addCases(cases: &tests.TranslateCContext) void {
+pub fn addCases(cases: *tests.TranslateCContext) void {
cases.add("double define struct",
\\typedef struct Bar Bar;
\\typedef struct Foo Foo;
@@ -14,11 +14,11 @@ pub fn addCases(cases: &tests.TranslateCContext) void {
\\};
,
\\pub const struct_Foo = extern struct {
- \\ a: ?&Foo,
+ \\ a: ?*Foo,
\\};
\\pub const Foo = struct_Foo;
\\pub const struct_Bar = extern struct {
- \\ a: ?&Foo,
+ \\ a: ?*Foo,
\\};
);
@@ -99,7 +99,7 @@ pub fn addCases(cases: &tests.TranslateCContext) void {
cases.add("restrict -> noalias",
\\void foo(void *restrict bar, void *restrict);
,
- \\pub extern fn foo(noalias bar: ?&c_void, noalias arg1: ?&c_void) void;
+ \\pub extern fn foo(noalias bar: ?*c_void, noalias arg1: ?*c_void) void;
);
cases.add("simple struct",
@@ -110,7 +110,7 @@ pub fn addCases(cases: &tests.TranslateCContext) void {
,
\\const struct_Foo = extern struct {
\\ x: c_int,
- \\ y: ?&u8,
+ \\ y: ?*u8,
\\};
,
\\pub const Foo = struct_Foo;
@@ -141,7 +141,7 @@ pub fn addCases(cases: &tests.TranslateCContext) void {
,
\\pub const BarB = enum_Bar.B;
,
- \\pub extern fn func(a: ?&struct_Foo, b: ?&(?&enum_Bar)) void;
+ \\pub extern fn func(a: ?*struct_Foo, b: ?*(?*enum_Bar)) void;
,
\\pub const Foo = struct_Foo;
,
@@ -151,7 +151,7 @@ pub fn addCases(cases: &tests.TranslateCContext) void {
cases.add("constant size array",
\\void func(int array[20]);
,
- \\pub extern fn func(array: ?&c_int) void;
+ \\pub extern fn func(array: ?*c_int) void;
);
cases.add("self referential struct with function pointer",
@@ -160,7 +160,7 @@ pub fn addCases(cases: &tests.TranslateCContext) void {
\\};
,
\\pub const struct_Foo = extern struct {
- \\ derp: ?extern fn(?&struct_Foo) void,
+ \\ derp: ?extern fn(?*struct_Foo) void,
\\};
,
\\pub const Foo = struct_Foo;
@@ -172,7 +172,7 @@ pub fn addCases(cases: &tests.TranslateCContext) void {
,
\\pub const struct_Foo = @OpaqueType();
,
- \\pub extern fn some_func(foo: ?&struct_Foo, x: c_int) ?&struct_Foo;
+ \\pub extern fn some_func(foo: ?*struct_Foo, x: c_int) ?*struct_Foo;
,
\\pub const Foo = struct_Foo;
);
@@ -219,11 +219,11 @@ pub fn addCases(cases: &tests.TranslateCContext) void {
\\};
,
\\pub const struct_Bar = extern struct {
- \\ next: ?&struct_Foo,
+ \\ next: ?*struct_Foo,
\\};
,
\\pub const struct_Foo = extern struct {
- \\ next: ?&struct_Bar,
+ \\ next: ?*struct_Bar,
\\};
);
@@ -233,7 +233,7 @@ pub fn addCases(cases: &tests.TranslateCContext) void {
,
\\pub const Foo = c_void;
,
- \\pub extern fn fun(a: ?&Foo) Foo;
+ \\pub extern fn fun(a: ?*Foo) Foo;
);
cases.add("generate inline func for #define global extern fn",
@@ -505,7 +505,7 @@ pub fn addCases(cases: &tests.TranslateCContext) void {
\\ return 6;
\\}
,
- \\pub export fn and_or_none_bool(a: c_int, b: f32, c: ?&c_void) c_int {
+ \\pub export fn and_or_none_bool(a: c_int, b: f32, c: ?*c_void) c_int {
\\ if ((a != 0) and (b != 0)) return 0;
\\ if ((b != 0) and (c != null)) return 1;
\\ if ((a != 0) and (c != null)) return 2;
@@ -607,7 +607,7 @@ pub fn addCases(cases: &tests.TranslateCContext) void {
\\pub const struct_Foo = extern struct {
\\ field: c_int,
\\};
- \\pub export fn read_field(foo: ?&struct_Foo) c_int {
+ \\pub export fn read_field(foo: ?*struct_Foo) c_int {
\\ return (??foo).field;
\\}
);
@@ -653,8 +653,8 @@ pub fn addCases(cases: &tests.TranslateCContext) void {
\\ return x;
\\}
,
- \\pub export fn foo(x: ?&c_ushort) ?&c_void {
- \\ return @ptrCast(?&c_void, x);
+ \\pub export fn foo(x: ?*c_ushort) ?*c_void {
+ \\ return @ptrCast(?*c_void, x);
\\}
);
@@ -674,7 +674,7 @@ pub fn addCases(cases: &tests.TranslateCContext) void {
\\ return 0;
\\}
,
- \\pub export fn foo() ?&c_int {
+ \\pub export fn foo() ?*c_int {
\\ return null;
\\}
);
@@ -983,7 +983,7 @@ pub fn addCases(cases: &tests.TranslateCContext) void {
\\ *x = 1;
\\}
,
- \\pub export fn foo(x: ?&c_int) void {
+ \\pub export fn foo(x: ?*c_int) void {
\\ (??x).* = 1;
\\}
);
@@ -1011,7 +1011,7 @@ pub fn addCases(cases: &tests.TranslateCContext) void {
,
\\pub fn foo() c_int {
\\ var x: c_int = 1234;
- \\ var ptr: ?&c_int = &x;
+ \\ var ptr: ?*c_int = &x;
\\ return (??ptr).*;
\\}
);
@@ -1021,7 +1021,7 @@ pub fn addCases(cases: &tests.TranslateCContext) void {
\\ return "bar";
\\}
,
- \\pub fn foo() ?&const u8 {
+ \\pub fn foo() ?*const u8 {
\\ return c"bar";
\\}
);
@@ -1150,8 +1150,8 @@ pub fn addCases(cases: &tests.TranslateCContext) void {
\\ return (float *)a;
\\}
,
- \\fn ptrcast(a: ?&c_int) ?&f32 {
- \\ return @ptrCast(?&f32, a);
+ \\fn ptrcast(a: ?*c_int) ?*f32 {
+ \\ return @ptrCast(?*f32, a);
\\}
);
@@ -1173,7 +1173,7 @@ pub fn addCases(cases: &tests.TranslateCContext) void {
\\ return !c;
\\}
,
- \\pub fn foo(a: c_int, b: f32, c: ?&c_void) c_int {
+ \\pub fn foo(a: c_int, b: f32, c: ?*c_void) c_int {
\\ return !(a == 0);
\\ return !(a != 0);
\\ return !(b != 0);
@@ -1194,7 +1194,7 @@ pub fn addCases(cases: &tests.TranslateCContext) void {
cases.add("const ptr initializer",
\\static const char *v0 = "0.0.0";
,
- \\pub var v0: ?&const u8 = c"0.0.0";
+ \\pub var v0: ?*const u8 = c"0.0.0";
);
cases.add("static incomplete array inside function",
@@ -1203,14 +1203,14 @@ pub fn addCases(cases: &tests.TranslateCContext) void {
\\}
,
\\pub fn foo() void {
- \\ const v2: &const u8 = c"2.2.2";
+ \\ const v2: *const u8 = c"2.2.2";
\\}
);
cases.add("macro pointer cast",
\\#define NRF_GPIO ((NRF_GPIO_Type *) NRF_GPIO_BASE)
,
- \\pub const NRF_GPIO = if (@typeId(@typeOf(NRF_GPIO_BASE)) == @import("builtin").TypeId.Pointer) @ptrCast(&NRF_GPIO_Type, NRF_GPIO_BASE) else if (@typeId(@typeOf(NRF_GPIO_BASE)) == @import("builtin").TypeId.Int) @intToPtr(&NRF_GPIO_Type, NRF_GPIO_BASE) else (&NRF_GPIO_Type)(NRF_GPIO_BASE);
+ \\pub const NRF_GPIO = if (@typeId(@typeOf(NRF_GPIO_BASE)) == @import("builtin").TypeId.Pointer) @ptrCast(*NRF_GPIO_Type, NRF_GPIO_BASE) else if (@typeId(@typeOf(NRF_GPIO_BASE)) == @import("builtin").TypeId.Int) @intToPtr(*NRF_GPIO_Type, NRF_GPIO_BASE) else (*NRF_GPIO_Type)(NRF_GPIO_BASE);
);
cases.add("if on none bool",
@@ -1231,7 +1231,7 @@ pub fn addCases(cases: &tests.TranslateCContext) void {
\\ B,
\\ C,
\\};
- \\pub fn if_none_bool(a: c_int, b: f32, c: ?&c_void, d: enum_SomeEnum) c_int {
+ \\pub fn if_none_bool(a: c_int, b: f32, c: ?*c_void, d: enum_SomeEnum) c_int {
\\ if (a != 0) return 0;
\\ if (b != 0) return 1;
\\ if (c != null) return 2;
@@ -1248,7 +1248,7 @@ pub fn addCases(cases: &tests.TranslateCContext) void {
\\ return 3;
\\}
,
- \\pub fn while_none_bool(a: c_int, b: f32, c: ?&c_void) c_int {
+ \\pub fn while_none_bool(a: c_int, b: f32, c: ?*c_void) c_int {
\\ while (a != 0) return 0;
\\ while (b != 0) return 1;
\\ while (c != null) return 2;
@@ -1264,7 +1264,7 @@ pub fn addCases(cases: &tests.TranslateCContext) void {
\\ return 3;
\\}
,
- \\pub fn for_none_bool(a: c_int, b: f32, c: ?&c_void) c_int {
+ \\pub fn for_none_bool(a: c_int, b: f32, c: ?*c_void) c_int {
\\ while (a != 0) return 0;
\\ while (b != 0) return 1;
\\ while (c != null) return 2;