Skip to content

Commit f887b02

Browse files
authored
Merge pull request #16359 from g-w1/plan9-more-std
Plan 9: more standard library support
2 parents 31979b1 + d0fbfd3 commit f887b02

File tree

9 files changed

+471
-119
lines changed

9 files changed

+471
-119
lines changed

doc/langref.html.in

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8467,9 +8467,10 @@ export fn @"A function name that is a complete sentence."() void {}
84678467
{#header_close#}
84688468

84698469
{#header_open|@extern#}
8470-
<pre>{#syntax#}@extern(T: type, comptime options: std.builtin.ExternOptions) *T{#endsyntax#}</pre>
8470+
<pre>{#syntax#}@extern(T: type, comptime options: std.builtin.ExternOptions) T{#endsyntax#}</pre>
84718471
<p>
84728472
Creates a reference to an external symbol in the output object file.
8473+
T must be a pointer type.
84738474
</p>
84748475
{#see_also|@export#}
84758476
{#header_close#}

lib/std/fs.zig

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ pub const Watch = @import("fs/watch.zig").Watch;
3939
/// fit into a UTF-8 encoded array of this length.
4040
/// The byte count includes room for a null sentinel byte.
4141
pub const MAX_PATH_BYTES = switch (builtin.os.tag) {
42-
.linux, .macos, .ios, .freebsd, .openbsd, .netbsd, .dragonfly, .haiku, .solaris => os.PATH_MAX,
42+
.linux, .macos, .ios, .freebsd, .openbsd, .netbsd, .dragonfly, .haiku, .solaris, .plan9 => os.PATH_MAX,
4343
// Each UTF-16LE character may be expanded to 3 UTF-8 bytes.
4444
// If it would require 4 UTF-8 bytes, then there would be a surrogate
4545
// pair in the UTF-16LE, and we (over)account 3 bytes for it that way.
@@ -1160,7 +1160,9 @@ pub const Dir = struct {
11601160
return self.openFileW(path_w.span(), flags);
11611161
}
11621162

1163-
var os_flags: u32 = os.O.CLOEXEC;
1163+
var os_flags: u32 = 0;
1164+
if (@hasDecl(os.O, "CLOEXEC")) os_flags = os.O.CLOEXEC;
1165+
11641166
// Use the O locking flags if the os supports them to acquire the lock
11651167
// atomically.
11661168
const has_flock_open_flags = @hasDecl(os.O, "EXLOCK");
@@ -1180,7 +1182,7 @@ pub const Dir = struct {
11801182
if (@hasDecl(os.O, "LARGEFILE")) {
11811183
os_flags |= os.O.LARGEFILE;
11821184
}
1183-
if (!flags.allow_ctty) {
1185+
if (@hasDecl(os.O, "NOCTTY") and !flags.allow_ctty) {
11841186
os_flags |= os.O.NOCTTY;
11851187
}
11861188
os_flags |= switch (flags.mode) {
@@ -1196,7 +1198,7 @@ pub const Dir = struct {
11961198

11971199
// WASI doesn't have os.flock so we intetinally check OS prior to the inner if block
11981200
// since it is not compiltime-known and we need to avoid undefined symbol in Wasm.
1199-
if (builtin.target.os.tag != .wasi) {
1201+
if (@hasDecl(os.system, "LOCK") and builtin.target.os.tag != .wasi) {
12001202
if (!has_flock_open_flags and flags.lock != .none) {
12011203
// TODO: integrate async I/O
12021204
const lock_nonblocking = if (flags.lock_nonblocking) os.LOCK.NB else @as(i32, 0);

lib/std/heap.zig

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@ pub const WasmAllocator = @import("heap/WasmAllocator.zig");
2121
pub const WasmPageAllocator = @import("heap/WasmPageAllocator.zig");
2222
pub const PageAllocator = @import("heap/PageAllocator.zig");
2323
pub const ThreadSafeAllocator = @import("heap/ThreadSafeAllocator.zig");
24+
pub const SbrkAllocator = @import("heap/sbrk_allocator.zig").SbrkAllocator;
2425

2526
const memory_pool = @import("heap/memory_pool.zig");
2627
pub const MemoryPool = memory_pool.MemoryPool;
@@ -228,6 +229,11 @@ pub const page_allocator = if (builtin.target.isWasm())
228229
.ptr = undefined,
229230
.vtable = &WasmPageAllocator.vtable,
230231
}
232+
else if (builtin.target.os.tag == .plan9)
233+
Allocator{
234+
.ptr = undefined,
235+
.vtable = &SbrkAllocator(std.os.plan9.sbrk).vtable,
236+
}
231237
else if (builtin.target.os.tag == .freestanding)
232238
root.os.heap.page_allocator
233239
else

lib/std/heap/sbrk_allocator.zig

Lines changed: 161 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,161 @@
1+
const std = @import("../std.zig");
2+
const builtin = @import("builtin");
3+
const math = std.math;
4+
const Allocator = std.mem.Allocator;
5+
const mem = std.mem;
6+
const assert = std.debug.assert;
7+
8+
pub fn SbrkAllocator(comptime sbrk: *const fn (n: usize) usize) type {
9+
return struct {
10+
pub const vtable = Allocator.VTable{
11+
.alloc = alloc,
12+
.resize = resize,
13+
.free = free,
14+
};
15+
16+
pub const Error = Allocator.Error;
17+
18+
lock: std.Thread.Mutex = .{},
19+
20+
const max_usize = math.maxInt(usize);
21+
const ushift = math.Log2Int(usize);
22+
const bigpage_size = 64 * 1024;
23+
const pages_per_bigpage = bigpage_size / mem.page_size;
24+
const bigpage_count = max_usize / bigpage_size;
25+
26+
/// Because of storing free list pointers, the minimum size class is 3.
27+
const min_class = math.log2(math.ceilPowerOfTwoAssert(usize, 1 + @sizeOf(usize)));
28+
const size_class_count = math.log2(bigpage_size) - min_class;
29+
/// 0 - 1 bigpage
30+
/// 1 - 2 bigpages
31+
/// 2 - 4 bigpages
32+
/// etc.
33+
const big_size_class_count = math.log2(bigpage_count);
34+
35+
var next_addrs = [1]usize{0} ** size_class_count;
36+
/// For each size class, points to the freed pointer.
37+
var frees = [1]usize{0} ** size_class_count;
38+
/// For each big size class, points to the freed pointer.
39+
var big_frees = [1]usize{0} ** big_size_class_count;
40+
41+
// TODO don't do the naive locking strategy
42+
var lock: std.Thread.Mutex = .{};
43+
fn alloc(ctx: *anyopaque, len: usize, log2_align: u8, return_address: usize) ?[*]u8 {
44+
_ = ctx;
45+
_ = return_address;
46+
lock.lock();
47+
defer lock.unlock();
48+
// Make room for the freelist next pointer.
49+
const alignment = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_align));
50+
const actual_len = @max(len +| @sizeOf(usize), alignment);
51+
const slot_size = math.ceilPowerOfTwo(usize, actual_len) catch return null;
52+
const class = math.log2(slot_size) - min_class;
53+
if (class < size_class_count) {
54+
const addr = a: {
55+
const top_free_ptr = frees[class];
56+
if (top_free_ptr != 0) {
57+
const node = @as(*usize, @ptrFromInt(top_free_ptr + (slot_size - @sizeOf(usize))));
58+
frees[class] = node.*;
59+
break :a top_free_ptr;
60+
}
61+
62+
const next_addr = next_addrs[class];
63+
if (next_addr % mem.page_size == 0) {
64+
const addr = allocBigPages(1);
65+
if (addr == 0) return null;
66+
//std.debug.print("allocated fresh slot_size={d} class={d} addr=0x{x}\n", .{
67+
// slot_size, class, addr,
68+
//});
69+
next_addrs[class] = addr + slot_size;
70+
break :a addr;
71+
} else {
72+
next_addrs[class] = next_addr + slot_size;
73+
break :a next_addr;
74+
}
75+
};
76+
return @as([*]u8, @ptrFromInt(addr));
77+
}
78+
const bigpages_needed = bigPagesNeeded(actual_len);
79+
const addr = allocBigPages(bigpages_needed);
80+
return @as([*]u8, @ptrFromInt(addr));
81+
}
82+
83+
fn resize(
84+
ctx: *anyopaque,
85+
buf: []u8,
86+
log2_buf_align: u8,
87+
new_len: usize,
88+
return_address: usize,
89+
) bool {
90+
_ = ctx;
91+
_ = return_address;
92+
lock.lock();
93+
defer lock.unlock();
94+
// We don't want to move anything from one size class to another, but we
95+
// can recover bytes in between powers of two.
96+
const buf_align = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_buf_align));
97+
const old_actual_len = @max(buf.len + @sizeOf(usize), buf_align);
98+
const new_actual_len = @max(new_len +| @sizeOf(usize), buf_align);
99+
const old_small_slot_size = math.ceilPowerOfTwoAssert(usize, old_actual_len);
100+
const old_small_class = math.log2(old_small_slot_size) - min_class;
101+
if (old_small_class < size_class_count) {
102+
const new_small_slot_size = math.ceilPowerOfTwo(usize, new_actual_len) catch return false;
103+
return old_small_slot_size == new_small_slot_size;
104+
} else {
105+
const old_bigpages_needed = bigPagesNeeded(old_actual_len);
106+
const old_big_slot_pages = math.ceilPowerOfTwoAssert(usize, old_bigpages_needed);
107+
const new_bigpages_needed = bigPagesNeeded(new_actual_len);
108+
const new_big_slot_pages = math.ceilPowerOfTwo(usize, new_bigpages_needed) catch return false;
109+
return old_big_slot_pages == new_big_slot_pages;
110+
}
111+
}
112+
113+
fn free(
114+
ctx: *anyopaque,
115+
buf: []u8,
116+
log2_buf_align: u8,
117+
return_address: usize,
118+
) void {
119+
_ = ctx;
120+
_ = return_address;
121+
lock.lock();
122+
defer lock.unlock();
123+
const buf_align = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_buf_align));
124+
const actual_len = @max(buf.len + @sizeOf(usize), buf_align);
125+
const slot_size = math.ceilPowerOfTwoAssert(usize, actual_len);
126+
const class = math.log2(slot_size) - min_class;
127+
const addr = @intFromPtr(buf.ptr);
128+
if (class < size_class_count) {
129+
const node = @as(*usize, @ptrFromInt(addr + (slot_size - @sizeOf(usize))));
130+
node.* = frees[class];
131+
frees[class] = addr;
132+
} else {
133+
const bigpages_needed = bigPagesNeeded(actual_len);
134+
const pow2_pages = math.ceilPowerOfTwoAssert(usize, bigpages_needed);
135+
const big_slot_size_bytes = pow2_pages * bigpage_size;
136+
const node = @as(*usize, @ptrFromInt(addr + (big_slot_size_bytes - @sizeOf(usize))));
137+
const big_class = math.log2(pow2_pages);
138+
node.* = big_frees[big_class];
139+
big_frees[big_class] = addr;
140+
}
141+
}
142+
143+
inline fn bigPagesNeeded(byte_count: usize) usize {
144+
return (byte_count + (bigpage_size + (@sizeOf(usize) - 1))) / bigpage_size;
145+
}
146+
147+
fn allocBigPages(n: usize) usize {
148+
const pow2_pages = math.ceilPowerOfTwoAssert(usize, n);
149+
const slot_size_bytes = pow2_pages * bigpage_size;
150+
const class = math.log2(pow2_pages);
151+
152+
const top_free_ptr = big_frees[class];
153+
if (top_free_ptr != 0) {
154+
const node = @as(*usize, @ptrFromInt(top_free_ptr + (slot_size_bytes - @sizeOf(usize))));
155+
big_frees[class] = node.*;
156+
return top_free_ptr;
157+
}
158+
return sbrk(pow2_pages * pages_per_bigpage * mem.page_size);
159+
}
160+
};
161+
}

0 commit comments

Comments
 (0)