From 5055cea15b060bfaa10ad87a6c2db002c11c94db Mon Sep 17 00:00:00 2001 From: DraagrenKirneh Date: Fri, 14 Apr 2023 14:11:00 +0200 Subject: [PATCH 01/13] save wip --- src/Manifest.zig | 54 +++++++++++++ src/main.zig | 196 +++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 250 insertions(+) diff --git a/src/Manifest.zig b/src/Manifest.zig index 068a14942f50..a9e09a6728c2 100644 --- a/src/Manifest.zig +++ b/src/Manifest.zig @@ -1,6 +1,60 @@ pub const basename = "build.zig.zon"; pub const Hash = std.crypto.hash.sha2.Sha256; +const Rule = union(enum) { + text: []const u8, + wildcard: void, +}; + +const Pattern = struct { + min_size: u32, + rules: []const Rule, + + const Self = @This(); + + pub fn matches(self: Self, input: []const u8) bool { + if (input.len < self.min_size) return false; + var offset: usize = 0; + var wildcard = false; + for (self.rules) |rule| { + switch (rule) { + .text => |text| { + if (wildcard) { + if (std.mem.indexOf(u8, input.len[offset..], text)) |index| { + offset = index + text.len; + wildcard = false; + } else { + return false; + } + } else { + if (!std.mem.startsWith(u8, input.len[offset], text)) { + return false; + } + offset += text.len; + } + }, + .wildcard => wildcard = true, + } + } + return wildcard or offset == input.len; + } +}; + +test "patterns" { + const pattern = Pattern{ + .min_size = 6, + .rules = &.{ + .{ .text = "abc" }, + .{ .wildcard = {} }, + .{ .text = "def" }, + }, + }; + + try testing.expect(pattern.matches("abc_def")); + try testing.expect(!pattern.matches("abdef")); + try testing.expect(!pattern.matches("abcdef_")); +} + pub const Dependency = struct { url: []const u8, url_tok: Ast.TokenIndex, diff --git a/src/main.zig b/src/main.zig index d996972e8972..2fa28ff0b6fa 100644 --- a/src/main.zig +++ b/src/main.zig @@ -298,6 +298,8 @@ pub fn mainArgs(gpa: Allocator, arena: Allocator, args: []const []const u8) !voi return cmdBuild(gpa, arena, cmd_args); } else if (mem.eql(u8, cmd, "fmt")) { return cmdFmt(gpa, arena, cmd_args); + } else if (mem.eql(u8, cmd, "pkg")) { + return cmdPkg(gpa, arena, cmd_args); } else if (mem.eql(u8, cmd, "objcopy")) { return @import("objcopy.zig").cmdObjCopy(gpa, arena, cmd_args); } else if (mem.eql(u8, cmd, "libc")) { @@ -4118,6 +4120,200 @@ pub fn cmdInit( } } +const PackageCommand = enum { + add, + remove, + check, + update, + fetch, +}; + +const PackageOptions = struct { + @"--cache-dir": []const u8, + @"--global-cache-dir": []const u8, +}; + +pub fn cmdPkg(gpa: Allocator, arena: Allocator, args: []const []const u8) !void { + if (args.len == 0) fatal("expected sub argument after pkg", .{}); + + const command = blk: { + const command_arg = args[0]; + inline for (std.meta.fieldNames(PackageCommand)) |name| { + if (mem.eql(u9, command_arg, name)) { + break :blk std.meta.stringToEnum(PackageCommand, name); + } + } + fatal("expected valid sub type argument found: {s}", .{command_arg}); + return; + }; + + return switch (command) { + .add => @panic("Not implemented"), + .remove => @panic("not implemented"), + .update => @panic("not implemented"), + .check => @panic("not implemented"), + .fetch => sub_cmd_pkg_fetch(gpa, arena), + }; +} + +pub fn sub_cmd_pkg_fetch(gpa: Allocator, arena: Allocator) !void { + var color: Color = .auto; + const self_exe_path = try introspect.findZigExePath(arena); + + var http_client: std.http.Client = .{ .allocator = gpa }; + defer http_client.deinit(); + try http_client.rescanRootCertificates(); + + // Here we provide an import to the build runner that allows using reflection to find + // all of the dependencies. Without this, there would be no way to use `@import` to + // access dependencies by name, since `@import` requires string literals. + var dependencies_source = std.ArrayList(u8).init(gpa); + defer dependencies_source.deinit(); + try dependencies_source.appendSlice("pub const imports = struct {\n"); + + // This will go into the same package. It contains the file system paths + // to all the build.zig files. + var build_roots_source = std.ArrayList(u8).init(gpa); + defer build_roots_source.deinit(); + + var thread_pool: ThreadPool = undefined; + try thread_pool.init(.{ .allocator = gpa }); + defer thread_pool.deinit(); + + var cleanup_build_runner_dir: ?fs.Dir = null; + defer if (cleanup_build_runner_dir) |*dir| dir.close(); + const cwd_path = try process.getCwdAlloc(arena); + var zig_lib_directory: Compilation.Directory = if (false) |lib_dir| .{ + .path = lib_dir, + .handle = fs.cwd().openDir(lib_dir, .{}) catch |err| { + fatal("unable to open zig lib directory from 'zig-lib-dir' argument: '{s}': {s}", .{ lib_dir, @errorName(err) }); + }, + } else introspect.findZigLibDirFromSelfExe(arena, self_exe_path) catch |err| { + fatal("unable to find zig installation directory '{s}': {s}", .{ self_exe_path, @errorName(err) }); + }; + defer zig_lib_directory.handle.close(); + + var main_pkg: Package = .{ + .root_src_directory = zig_lib_directory, + .root_src_path = "build_runner.zig", + }; + + var build_pkg: Package = .{ + .root_src_directory = "build_directory", + .root_src_path = "build_zig_basename", + }; + var build_file: ?[]const u8 = null; + var cleanup_build_dir: ?fs.Dir = null; + defer if (cleanup_build_dir) |*dir| dir.close(); + const build_zig_basename = if (build_file) |bf| fs.path.basename(bf) else "build.zig"; + const build_directory: Compilation.Directory = blk: { + if (build_file) |bf| { + if (fs.path.dirname(bf)) |dirname| { + const dir = fs.cwd().openDir(dirname, .{}) catch |err| { + fatal("unable to open directory to build file from argument 'build-file', '{s}': {s}", .{ dirname, @errorName(err) }); + }; + cleanup_build_dir = dir; + break :blk .{ .path = dirname, .handle = dir }; + } + + break :blk .{ .path = null, .handle = fs.cwd() }; + } + // Search up parent directories until we find build.zig. + var dirname: []const u8 = cwd_path; + while (true) { + const joined_path = try fs.path.join(arena, &[_][]const u8{ dirname, build_zig_basename }); + if (fs.cwd().access(joined_path, .{})) |_| { + const dir = fs.cwd().openDir(dirname, .{}) catch |err| { + fatal("unable to open directory while searching for build.zig file, '{s}': {s}", .{ dirname, @errorName(err) }); + }; + break :blk .{ .path = dirname, .handle = dir }; + } else |err| switch (err) { + error.FileNotFound => { + dirname = fs.path.dirname(dirname) orelse { + std.log.info("{s}", .{ + \\Initialize a 'build.zig' template file with `zig init-lib` or `zig init-exe`, + \\or see `zig --help` for more options. + }); + fatal("No 'build.zig' file found, in the current directory or any parent directories.", .{}); + }; + continue; + }, + else => |e| return e, + } + } + }; + var override_global_cache_dir: ?[]const u8 = try optionalStringEnvVar(arena, "ZIG_GLOBAL_CACHE_DIR"); + var global_cache_directory: Compilation.Directory = l: { + const p = override_global_cache_dir orelse try introspect.resolveGlobalCacheDir(arena); + break :l .{ + .handle = try fs.cwd().makeOpenPath(p, .{}), + .path = p, + }; + }; + var override_local_cache_dir: ?[]const u8 = try optionalStringEnvVar(arena, "ZIG_LOCAL_CACHE_DIR"); + defer global_cache_directory.handle.close(); + var local_cache_directory: Compilation.Directory = l: { + if (override_local_cache_dir) |local_cache_dir_path| { + break :l .{ + .handle = try fs.cwd().makeOpenPath(local_cache_dir_path, .{}), + .path = local_cache_dir_path, + }; + } + const cache_dir_path = try build_directory.join(arena, &[_][]const u8{"zig-cache"}); + break :l .{ + .handle = try build_directory.handle.makeOpenPath("zig-cache", .{}), + .path = cache_dir_path, + }; + }; + defer local_cache_directory.handle.close(); + // Here we borrow main package's table and will replace it with a fresh + // one after this process completes. + + var wip_errors: std.zig.ErrorBundle.Wip = undefined; + try wip_errors.init(gpa); + defer wip_errors.deinit(); + + var all_modules: Package.AllModules = .{}; + defer all_modules.deinit(gpa); + + const fetch_result = build_pkg.fetchAndAddDependencies( + &main_pkg, + arena, + &thread_pool, + &http_client, + build_directory, + global_cache_directory, + local_cache_directory, + &dependencies_source, + &build_roots_source, + "", + &wip_errors, + &all_modules, + ); + + if (wip_errors.root_list.items.len > 0) { + var errors = try wip_errors.toOwnedBundle(""); + defer errors.deinit(gpa); + errors.renderToStdErr(renderOptions(color)); + process.exit(1); + } + try fetch_result; + + try dependencies_source.appendSlice("};\npub const build_root = struct {\n"); + try dependencies_source.appendSlice(build_roots_source.items); + try dependencies_source.appendSlice("};\n"); + + const deps_pkg = try Package.createFilePkg( + gpa, + local_cache_directory, + "dependencies.zig", + dependencies_source.items, + ); + + mem.swap(Package.Table, &main_pkg.table, &deps_pkg.table); + try main_pkg.addAndAdopt(gpa, "@dependencies", deps_pkg); +} + pub const usage_build = \\Usage: zig build [steps] [options] \\ From aa4b71609e46fc6ac91ad78b95ab2c9869abf0bb Mon Sep 17 00:00:00 2001 From: DraagrenKirneh Date: Sat, 15 Apr 2023 23:44:51 +0200 Subject: [PATCH 02/13] save --- .gitignore | 1 + lib/fetch_runner.zig | 1044 ++++++++++++++++++++++++++++++++++++++++++ src/Manifest.zig | 54 --- src/Package.zig | 274 +++++++++++ src/main.zig | 45 +- 5 files changed, 1335 insertions(+), 83 deletions(-) create mode 100644 lib/fetch_runner.zig diff --git a/.gitignore b/.gitignore index feda423c10de..63d5ea718078 100644 --- a/.gitignore +++ b/.gitignore @@ -16,3 +16,4 @@ zig-out/ /build/ /build-*/ /docgen_tmp/ +stage3/ \ No newline at end of file diff --git a/lib/fetch_runner.zig b/lib/fetch_runner.zig new file mode 100644 index 000000000000..7004ede67e07 --- /dev/null +++ b/lib/fetch_runner.zig @@ -0,0 +1,1044 @@ +const root = @import("@build"); +const std = @import("std"); +const builtin = @import("builtin"); +const assert = std.debug.assert; +const io = std.io; +const fmt = std.fmt; +const mem = std.mem; +const process = std.process; +const ArrayList = std.ArrayList; +const File = std.fs.File; +const Step = std.Build.Step; + +pub const dependencies = @import("@dependencies"); + +pub fn main() !void { + // Here we use an ArenaAllocator backed by a DirectAllocator because a fetch is a short-lived, + // one shot program. We don't need to waste time freeing memory and finding places to squish + // bytes into. So we free everything all at once at the very end. + var single_threaded_arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); + defer single_threaded_arena.deinit(); + + var thread_safe_arena: std.heap.ThreadSafeAllocator = .{ + .child_allocator = single_threaded_arena.allocator(), + }; + const arena = thread_safe_arena.allocator(); + + var args = try process.argsAlloc(arena); + + // skip my own exe name + var arg_idx: usize = 1; + + const zig_exe = nextArg(args, &arg_idx) orelse { + std.debug.print("Expected path to zig compiler\n", .{}); + return error.InvalidArgs; + }; + const build_root = nextArg(args, &arg_idx) orelse { + std.debug.print("Expected build root directory path\n", .{}); + return error.InvalidArgs; + }; + const cache_root = nextArg(args, &arg_idx) orelse { + std.debug.print("Expected cache root directory path\n", .{}); + return error.InvalidArgs; + }; + const global_cache_root = nextArg(args, &arg_idx) orelse { + std.debug.print("Expected global cache root directory path\n", .{}); + return error.InvalidArgs; + }; + + const host = try std.zig.system.NativeTargetInfo.detect(.{}); + + const build_root_directory: std.Build.Cache.Directory = .{ + .path = build_root, + .handle = try std.fs.cwd().openDir(build_root, .{}), + }; + + const local_cache_directory: std.Build.Cache.Directory = .{ + .path = cache_root, + .handle = try std.fs.cwd().makeOpenPath(cache_root, .{}), + }; + + const global_cache_directory: std.Build.Cache.Directory = .{ + .path = global_cache_root, + .handle = try std.fs.cwd().makeOpenPath(global_cache_root, .{}), + }; + + var cache: std.Build.Cache = .{ + .gpa = arena, + .manifest_dir = try local_cache_directory.handle.makeOpenPath("h", .{}), + }; + cache.addPrefix(.{ .path = null, .handle = std.fs.cwd() }); + cache.addPrefix(build_root_directory); + cache.addPrefix(local_cache_directory); + cache.addPrefix(global_cache_directory); + cache.hash.addBytes(builtin.zig_version_string); + + const builder = try std.Build.create( + arena, + zig_exe, + build_root_directory, + local_cache_directory, + global_cache_directory, + host, + &cache, + ); + defer builder.destroy(); + + var targets = ArrayList([]const u8).init(arena); + var debug_log_scopes = ArrayList([]const u8).init(arena); + var thread_pool_options: std.Thread.Pool.Options = .{ .allocator = arena }; + + var install_prefix: ?[]const u8 = null; + var dir_list = std.Build.DirList{}; + var enable_summary: ?bool = null; + var max_rss: usize = 0; + var color: Color = .auto; + + const stderr_stream = io.getStdErr().writer(); + const stdout_stream = io.getStdOut().writer(); + + while (nextArg(args, &arg_idx)) |arg| { + if (mem.startsWith(u8, arg, "-D")) { + const option_contents = arg[2..]; + if (option_contents.len == 0) { + std.debug.print("Expected option name after '-D'\n\n", .{}); + usageAndErr(builder, false, stderr_stream); + } + if (mem.indexOfScalar(u8, option_contents, '=')) |name_end| { + const option_name = option_contents[0..name_end]; + const option_value = option_contents[name_end + 1 ..]; + if (try builder.addUserInputOption(option_name, option_value)) + usageAndErr(builder, false, stderr_stream); + } else { + if (try builder.addUserInputFlag(option_contents)) + usageAndErr(builder, false, stderr_stream); + } + } else if (mem.startsWith(u8, arg, "-")) { + if (mem.eql(u8, arg, "--verbose")) { + builder.verbose = true; + } else if (mem.eql(u8, arg, "-h") or mem.eql(u8, arg, "--help")) { + return usage(builder, false, stdout_stream); + } else if (mem.eql(u8, arg, "-p") or mem.eql(u8, arg, "--prefix")) { + install_prefix = nextArg(args, &arg_idx) orelse { + std.debug.print("Expected argument after {s}\n\n", .{arg}); + usageAndErr(builder, false, stderr_stream); + }; + } else if (mem.eql(u8, arg, "-l") or mem.eql(u8, arg, "--list-steps")) { + return steps(builder, false, stdout_stream); + } else if (mem.eql(u8, arg, "--prefix-lib-dir")) { + dir_list.lib_dir = nextArg(args, &arg_idx) orelse { + std.debug.print("Expected argument after {s}\n\n", .{arg}); + usageAndErr(builder, false, stderr_stream); + }; + } else if (mem.eql(u8, arg, "--prefix-exe-dir")) { + dir_list.exe_dir = nextArg(args, &arg_idx) orelse { + std.debug.print("Expected argument after {s}\n\n", .{arg}); + usageAndErr(builder, false, stderr_stream); + }; + } else if (mem.eql(u8, arg, "--prefix-include-dir")) { + dir_list.include_dir = nextArg(args, &arg_idx) orelse { + std.debug.print("Expected argument after {s}\n\n", .{arg}); + usageAndErr(builder, false, stderr_stream); + }; + } else if (mem.eql(u8, arg, "--sysroot")) { + const sysroot = nextArg(args, &arg_idx) orelse { + std.debug.print("Expected argument after {s}\n\n", .{arg}); + usageAndErr(builder, false, stderr_stream); + }; + builder.sysroot = sysroot; + } else if (mem.eql(u8, arg, "--maxrss")) { + const max_rss_text = nextArg(args, &arg_idx) orelse { + std.debug.print("Expected argument after {s}\n\n", .{arg}); + usageAndErr(builder, false, stderr_stream); + }; + // TODO: support shorthand such as "2GiB", "2GB", or "2G" + max_rss = std.fmt.parseInt(usize, max_rss_text, 10) catch |err| { + std.debug.print("invalid byte size: '{s}': {s}\n", .{ + max_rss_text, @errorName(err), + }); + process.exit(1); + }; + } else if (mem.eql(u8, arg, "--search-prefix")) { + const search_prefix = nextArg(args, &arg_idx) orelse { + std.debug.print("Expected argument after {s}\n\n", .{arg}); + usageAndErr(builder, false, stderr_stream); + }; + builder.addSearchPrefix(search_prefix); + } else if (mem.eql(u8, arg, "--libc")) { + const libc_file = nextArg(args, &arg_idx) orelse { + std.debug.print("Expected argument after {s}\n\n", .{arg}); + usageAndErr(builder, false, stderr_stream); + }; + builder.libc_file = libc_file; + } else if (mem.eql(u8, arg, "--color")) { + const next_arg = nextArg(args, &arg_idx) orelse { + std.debug.print("Expected [auto|on|off] after {s}\n\n", .{arg}); + usageAndErr(builder, false, stderr_stream); + }; + color = std.meta.stringToEnum(Color, next_arg) orelse { + std.debug.print("Expected [auto|on|off] after {s}, found '{s}'\n\n", .{ arg, next_arg }); + usageAndErr(builder, false, stderr_stream); + }; + } else if (mem.eql(u8, arg, "--zig-lib-dir")) { + builder.zig_lib_dir = nextArg(args, &arg_idx) orelse { + std.debug.print("Expected argument after {s}\n\n", .{arg}); + usageAndErr(builder, false, stderr_stream); + }; + } else if (mem.eql(u8, arg, "--debug-log")) { + const next_arg = nextArg(args, &arg_idx) orelse { + std.debug.print("Expected argument after {s}\n\n", .{arg}); + usageAndErr(builder, false, stderr_stream); + }; + try debug_log_scopes.append(next_arg); + } else if (mem.eql(u8, arg, "--debug-pkg-config")) { + builder.debug_pkg_config = true; + } else if (mem.eql(u8, arg, "--debug-compile-errors")) { + builder.debug_compile_errors = true; + } else if (mem.eql(u8, arg, "--glibc-runtimes")) { + builder.glibc_runtimes_dir = nextArg(args, &arg_idx) orelse { + std.debug.print("Expected argument after {s}\n\n", .{arg}); + usageAndErr(builder, false, stderr_stream); + }; + } else if (mem.eql(u8, arg, "--verbose-link")) { + builder.verbose_link = true; + } else if (mem.eql(u8, arg, "--verbose-air")) { + builder.verbose_air = true; + } else if (mem.eql(u8, arg, "--verbose-llvm-ir")) { + builder.verbose_llvm_ir = "-"; + } else if (mem.startsWith(u8, arg, "--verbose-llvm-ir=")) { + builder.verbose_llvm_ir = arg["--verbose-llvm-ir=".len..]; + } else if (mem.eql(u8, arg, "--verbose-llvm-bc=")) { + builder.verbose_llvm_bc = arg["--verbose-llvm-bc=".len..]; + } else if (mem.eql(u8, arg, "--verbose-cimport")) { + builder.verbose_cimport = true; + } else if (mem.eql(u8, arg, "--verbose-cc")) { + builder.verbose_cc = true; + } else if (mem.eql(u8, arg, "--verbose-llvm-cpu-features")) { + builder.verbose_llvm_cpu_features = true; + } else if (mem.eql(u8, arg, "-fwine")) { + builder.enable_wine = true; + } else if (mem.eql(u8, arg, "-fno-wine")) { + builder.enable_wine = false; + } else if (mem.eql(u8, arg, "-fqemu")) { + builder.enable_qemu = true; + } else if (mem.eql(u8, arg, "-fno-qemu")) { + builder.enable_qemu = false; + } else if (mem.eql(u8, arg, "-fwasmtime")) { + builder.enable_wasmtime = true; + } else if (mem.eql(u8, arg, "-fno-wasmtime")) { + builder.enable_wasmtime = false; + } else if (mem.eql(u8, arg, "-frosetta")) { + builder.enable_rosetta = true; + } else if (mem.eql(u8, arg, "-fno-rosetta")) { + builder.enable_rosetta = false; + } else if (mem.eql(u8, arg, "-fdarling")) { + builder.enable_darling = true; + } else if (mem.eql(u8, arg, "-fno-darling")) { + builder.enable_darling = false; + } else if (mem.eql(u8, arg, "-fsummary")) { + enable_summary = true; + } else if (mem.eql(u8, arg, "-fno-summary")) { + enable_summary = false; + } else if (mem.eql(u8, arg, "-freference-trace")) { + builder.reference_trace = 256; + } else if (mem.startsWith(u8, arg, "-freference-trace=")) { + const num = arg["-freference-trace=".len..]; + builder.reference_trace = std.fmt.parseUnsigned(u32, num, 10) catch |err| { + std.debug.print("unable to parse reference_trace count '{s}': {s}", .{ num, @errorName(err) }); + process.exit(1); + }; + } else if (mem.eql(u8, arg, "-fno-reference-trace")) { + builder.reference_trace = null; + } else if (mem.startsWith(u8, arg, "-j")) { + const num = arg["-j".len..]; + const n_jobs = std.fmt.parseUnsigned(u32, num, 10) catch |err| { + std.debug.print("unable to parse jobs count '{s}': {s}", .{ + num, @errorName(err), + }); + process.exit(1); + }; + if (n_jobs < 1) { + std.debug.print("number of jobs must be at least 1\n", .{}); + process.exit(1); + } + thread_pool_options.n_jobs = n_jobs; + } else if (mem.eql(u8, arg, "--")) { + builder.args = argsRest(args, arg_idx); + break; + } else { + std.debug.print("Unrecognized argument: {s}\n\n", .{arg}); + usageAndErr(builder, false, stderr_stream); + } + } else { + try targets.append(arg); + } + } + + const stderr = std.io.getStdErr(); + const ttyconf = get_tty_conf(color, stderr); + switch (ttyconf) { + .no_color => try builder.env_map.put("NO_COLOR", "1"), + .escape_codes => try builder.env_map.put("ZIG_DEBUG_COLOR", "1"), + .windows_api => {}, + } + + var progress: std.Progress = .{ .dont_print_on_dumb = true }; + const main_progress_node = progress.start("", 0); + + builder.debug_log_scopes = debug_log_scopes.items; + builder.resolveInstallPrefix(install_prefix, dir_list); + { + var prog_node = main_progress_node.start("user build.zig logic", 0); + defer prog_node.end(); + try builder.runBuild(root); + } + + if (builder.validateUserInputDidItFail()) + usageAndErr(builder, true, stderr_stream); + + var run: Run = .{ + .max_rss = max_rss, + .max_rss_is_default = false, + .max_rss_mutex = .{}, + .memory_blocked_steps = std.ArrayList(*Step).init(arena), + + .claimed_rss = 0, + .enable_summary = enable_summary, + .ttyconf = ttyconf, + .stderr = stderr, + }; + + if (run.max_rss == 0) { + run.max_rss = process.totalSystemMemory() catch std.math.maxInt(usize); + run.max_rss_is_default = true; + } + + runStepNames( + arena, + builder, + targets.items, + main_progress_node, + thread_pool_options, + &run, + ) catch |err| switch (err) { + error.UncleanExit => process.exit(1), + else => return err, + }; +} + +const Run = struct { + max_rss: usize, + max_rss_is_default: bool, + max_rss_mutex: std.Thread.Mutex, + memory_blocked_steps: std.ArrayList(*Step), + + claimed_rss: usize, + enable_summary: ?bool, + ttyconf: std.debug.TTY.Config, + stderr: std.fs.File, +}; + +fn runStepNames( + arena: std.mem.Allocator, + b: *std.Build, + step_names: []const []const u8, + parent_prog_node: *std.Progress.Node, + thread_pool_options: std.Thread.Pool.Options, + run: *Run, +) !void { + const gpa = b.allocator; + var step_stack: std.AutoArrayHashMapUnmanaged(*Step, void) = .{}; + defer step_stack.deinit(gpa); + + if (step_names.len == 0) { + try step_stack.put(gpa, b.default_step, {}); + } else { + try step_stack.ensureUnusedCapacity(gpa, step_names.len); + for (0..step_names.len) |i| { + const step_name = step_names[step_names.len - i - 1]; + const s = b.top_level_steps.get(step_name) orelse { + std.debug.print("no step named '{s}'. Access the help menu with 'zig build -h'\n", .{step_name}); + process.exit(1); + }; + step_stack.putAssumeCapacity(&s.step, {}); + } + } + + const starting_steps = try arena.dupe(*Step, step_stack.keys()); + for (starting_steps) |s| { + checkForDependencyLoop(b, s, &step_stack) catch |err| switch (err) { + error.DependencyLoopDetected => return error.UncleanExit, + else => |e| return e, + }; + } + + { + // Check that we have enough memory to complete the build. + var any_problems = false; + for (step_stack.keys()) |s| { + if (s.max_rss == 0) continue; + if (s.max_rss > run.max_rss) { + std.debug.print("{s}{s}: this step declares an upper bound of {d} bytes of memory, exceeding the available {d} bytes of memory\n", .{ + s.owner.dep_prefix, s.name, s.max_rss, run.max_rss, + }); + any_problems = true; + } + } + if (any_problems) { + if (run.max_rss_is_default) { + std.debug.print("note: use --maxrss to override the default", .{}); + } + return error.UncleanExit; + } + } + + var thread_pool: std.Thread.Pool = undefined; + try thread_pool.init(thread_pool_options); + defer thread_pool.deinit(); + + { + defer parent_prog_node.end(); + + var step_prog = parent_prog_node.start("steps", step_stack.count()); + defer step_prog.end(); + + var wait_group: std.Thread.WaitGroup = .{}; + defer wait_group.wait(); + + // Here we spawn the initial set of tasks with a nice heuristic - + // dependency order. Each worker when it finishes a step will then + // check whether it should run any dependants. + const steps_slice = step_stack.keys(); + for (0..steps_slice.len) |i| { + const step = steps_slice[steps_slice.len - i - 1]; + + wait_group.start(); + thread_pool.spawn(workerMakeOneStep, .{ + &wait_group, &thread_pool, b, step, &step_prog, run, + }) catch @panic("OOM"); + } + } + assert(run.memory_blocked_steps.items.len == 0); + + var test_skip_count: usize = 0; + var test_fail_count: usize = 0; + var test_pass_count: usize = 0; + var test_leak_count: usize = 0; + var test_count: usize = 0; + + var success_count: usize = 0; + var skipped_count: usize = 0; + var failure_count: usize = 0; + var pending_count: usize = 0; + var total_compile_errors: usize = 0; + var compile_error_steps: std.ArrayListUnmanaged(*Step) = .{}; + defer compile_error_steps.deinit(gpa); + + for (step_stack.keys()) |s| { + test_fail_count += s.test_results.fail_count; + test_skip_count += s.test_results.skip_count; + test_leak_count += s.test_results.leak_count; + test_pass_count += s.test_results.passCount(); + test_count += s.test_results.test_count; + + switch (s.state) { + .precheck_unstarted => unreachable, + .precheck_started => unreachable, + .running => unreachable, + .precheck_done => { + // precheck_done is equivalent to dependency_failure in the case of + // transitive dependencies. For example: + // A -> B -> C (failure) + // B will be marked as dependency_failure, while A may never be queued, and thus + // remain in the initial state of precheck_done. + s.state = .dependency_failure; + pending_count += 1; + }, + .dependency_failure => pending_count += 1, + .success => success_count += 1, + .skipped => skipped_count += 1, + .failure => { + failure_count += 1; + const compile_errors_len = s.result_error_bundle.errorMessageCount(); + if (compile_errors_len > 0) { + total_compile_errors += compile_errors_len; + try compile_error_steps.append(gpa, s); + } + }, + } + } + + // A proper command line application defaults to silently succeeding. + // The user may request verbose mode if they have a different preference. + if (failure_count == 0 and run.enable_summary != true) return cleanExit(); + + const ttyconf = run.ttyconf; + const stderr = run.stderr; + + if (run.enable_summary != false) { + const total_count = success_count + failure_count + pending_count + skipped_count; + ttyconf.setColor(stderr, .Cyan) catch {}; + stderr.writeAll("Build Summary:") catch {}; + ttyconf.setColor(stderr, .Reset) catch {}; + stderr.writer().print(" {d}/{d} steps succeeded", .{ success_count, total_count }) catch {}; + if (skipped_count > 0) stderr.writer().print("; {d} skipped", .{skipped_count}) catch {}; + if (failure_count > 0) stderr.writer().print("; {d} failed", .{failure_count}) catch {}; + + if (test_count > 0) stderr.writer().print("; {d}/{d} tests passed", .{ test_pass_count, test_count }) catch {}; + if (test_skip_count > 0) stderr.writer().print("; {d} skipped", .{test_skip_count}) catch {}; + if (test_fail_count > 0) stderr.writer().print("; {d} failed", .{test_fail_count}) catch {}; + if (test_leak_count > 0) stderr.writer().print("; {d} leaked", .{test_leak_count}) catch {}; + + if (run.enable_summary == null) { + ttyconf.setColor(stderr, .Dim) catch {}; + stderr.writeAll(" (disable with -fno-summary)") catch {}; + ttyconf.setColor(stderr, .Reset) catch {}; + } + stderr.writeAll("\n") catch {}; + + // Print a fancy tree with build results. + var print_node: PrintNode = .{ .parent = null }; + if (step_names.len == 0) { + print_node.last = true; + printTreeStep(b, b.default_step, stderr, ttyconf, &print_node, &step_stack) catch {}; + } else { + for (step_names, 0..) |step_name, i| { + const tls = b.top_level_steps.get(step_name).?; + print_node.last = i + 1 == b.top_level_steps.count(); + printTreeStep(b, &tls.step, stderr, ttyconf, &print_node, &step_stack) catch {}; + } + } + } + + if (failure_count == 0) return cleanExit(); + + // Finally, render compile errors at the bottom of the terminal. + // We use a separate compile_error_steps array list because step_stack is destructively + // mutated in printTreeStep above. + if (total_compile_errors > 0) { + for (compile_error_steps.items) |s| { + if (s.result_error_bundle.errorMessageCount() > 0) { + s.result_error_bundle.renderToStdErr(renderOptions(ttyconf)); + } + } + + // Signal to parent process that we have printed compile errors. The + // parent process may choose to omit the "following command failed" + // line in this case. + process.exit(2); + } + + process.exit(1); +} + +const PrintNode = struct { + parent: ?*PrintNode, + last: bool = false, +}; + +fn printPrefix(node: *PrintNode, stderr: std.fs.File, ttyconf: std.debug.TTY.Config) !void { + const parent = node.parent orelse return; + if (parent.parent == null) return; + try printPrefix(parent, stderr, ttyconf); + if (parent.last) { + try stderr.writeAll(" "); + } else { + try stderr.writeAll(switch (ttyconf) { + .no_color, .windows_api => "| ", + .escape_codes => "\x1B\x28\x30\x78\x1B\x28\x42 ", // │ + }); + } +} + +fn printTreeStep( + b: *std.Build, + s: *Step, + stderr: std.fs.File, + ttyconf: std.debug.TTY.Config, + parent_node: *PrintNode, + step_stack: *std.AutoArrayHashMapUnmanaged(*Step, void), +) !void { + const first = step_stack.swapRemove(s); + try printPrefix(parent_node, stderr, ttyconf); + + if (!first) try ttyconf.setColor(stderr, .Dim); + if (parent_node.parent != null) { + if (parent_node.last) { + try stderr.writeAll(switch (ttyconf) { + .no_color, .windows_api => "+- ", + .escape_codes => "\x1B\x28\x30\x6d\x71\x1B\x28\x42 ", // └─ + }); + } else { + try stderr.writeAll(switch (ttyconf) { + .no_color, .windows_api => "+- ", + .escape_codes => "\x1B\x28\x30\x74\x71\x1B\x28\x42 ", // ├─ + }); + } + } + + // dep_prefix omitted here because it is redundant with the tree. + try stderr.writeAll(s.name); + + if (first) { + switch (s.state) { + .precheck_unstarted => unreachable, + .precheck_started => unreachable, + .precheck_done => unreachable, + .running => unreachable, + + .dependency_failure => { + try ttyconf.setColor(stderr, .Dim); + try stderr.writeAll(" transitive failure\n"); + try ttyconf.setColor(stderr, .Reset); + }, + + .success => { + try ttyconf.setColor(stderr, .Green); + if (s.result_cached) { + try stderr.writeAll(" cached"); + } else if (s.test_results.test_count > 0) { + const pass_count = s.test_results.passCount(); + try stderr.writer().print(" {d} passed", .{pass_count}); + if (s.test_results.skip_count > 0) { + try ttyconf.setColor(stderr, .Yellow); + try stderr.writer().print(" {d} skipped", .{s.test_results.skip_count}); + } + } else { + try stderr.writeAll(" success"); + } + try ttyconf.setColor(stderr, .Reset); + if (s.result_duration_ns) |ns| { + try ttyconf.setColor(stderr, .Dim); + if (ns >= std.time.ns_per_min) { + try stderr.writer().print(" {d}m", .{ns / std.time.ns_per_min}); + } else if (ns >= std.time.ns_per_s) { + try stderr.writer().print(" {d}s", .{ns / std.time.ns_per_s}); + } else if (ns >= std.time.ns_per_ms) { + try stderr.writer().print(" {d}ms", .{ns / std.time.ns_per_ms}); + } else if (ns >= std.time.ns_per_us) { + try stderr.writer().print(" {d}us", .{ns / std.time.ns_per_us}); + } else { + try stderr.writer().print(" {d}ns", .{ns}); + } + try ttyconf.setColor(stderr, .Reset); + } + if (s.result_peak_rss != 0) { + const rss = s.result_peak_rss; + try ttyconf.setColor(stderr, .Dim); + if (rss >= 1000_000_000) { + try stderr.writer().print(" MaxRSS:{d}G", .{rss / 1000_000_000}); + } else if (rss >= 1000_000) { + try stderr.writer().print(" MaxRSS:{d}M", .{rss / 1000_000}); + } else if (rss >= 1000) { + try stderr.writer().print(" MaxRSS:{d}K", .{rss / 1000}); + } else { + try stderr.writer().print(" MaxRSS:{d}B", .{rss}); + } + try ttyconf.setColor(stderr, .Reset); + } + try stderr.writeAll("\n"); + }, + + .skipped => { + try ttyconf.setColor(stderr, .Yellow); + try stderr.writeAll(" skipped\n"); + try ttyconf.setColor(stderr, .Reset); + }, + + .failure => { + if (s.result_error_bundle.errorMessageCount() > 0) { + try ttyconf.setColor(stderr, .Red); + try stderr.writer().print(" {d} errors\n", .{ + s.result_error_bundle.errorMessageCount(), + }); + try ttyconf.setColor(stderr, .Reset); + } else if (!s.test_results.isSuccess()) { + try stderr.writer().print(" {d}/{d} passed", .{ + s.test_results.passCount(), s.test_results.test_count, + }); + if (s.test_results.fail_count > 0) { + try stderr.writeAll(", "); + try ttyconf.setColor(stderr, .Red); + try stderr.writer().print("{d} failed", .{ + s.test_results.fail_count, + }); + try ttyconf.setColor(stderr, .Reset); + } + if (s.test_results.skip_count > 0) { + try stderr.writeAll(", "); + try ttyconf.setColor(stderr, .Yellow); + try stderr.writer().print("{d} skipped", .{ + s.test_results.skip_count, + }); + try ttyconf.setColor(stderr, .Reset); + } + if (s.test_results.leak_count > 0) { + try stderr.writeAll(", "); + try ttyconf.setColor(stderr, .Red); + try stderr.writer().print("{d} leaked", .{ + s.test_results.leak_count, + }); + try ttyconf.setColor(stderr, .Reset); + } + try stderr.writeAll("\n"); + } else { + try ttyconf.setColor(stderr, .Red); + try stderr.writeAll(" failure\n"); + try ttyconf.setColor(stderr, .Reset); + } + }, + } + + for (s.dependencies.items, 0..) |dep, i| { + var print_node: PrintNode = .{ + .parent = parent_node, + .last = i == s.dependencies.items.len - 1, + }; + try printTreeStep(b, dep, stderr, ttyconf, &print_node, step_stack); + } + } else { + if (s.dependencies.items.len == 0) { + try stderr.writeAll(" (reused)\n"); + } else { + try stderr.writer().print(" (+{d} more reused dependencies)\n", .{ + s.dependencies.items.len, + }); + } + try ttyconf.setColor(stderr, .Reset); + } +} + +fn checkForDependencyLoop( + b: *std.Build, + s: *Step, + step_stack: *std.AutoArrayHashMapUnmanaged(*Step, void), +) !void { + switch (s.state) { + .precheck_started => { + std.debug.print("dependency loop detected:\n {s}\n", .{s.name}); + return error.DependencyLoopDetected; + }, + .precheck_unstarted => { + s.state = .precheck_started; + + try step_stack.ensureUnusedCapacity(b.allocator, s.dependencies.items.len); + for (s.dependencies.items) |dep| { + try step_stack.put(b.allocator, dep, {}); + try dep.dependants.append(b.allocator, s); + checkForDependencyLoop(b, dep, step_stack) catch |err| { + if (err == error.DependencyLoopDetected) { + std.debug.print(" {s}\n", .{s.name}); + } + return err; + }; + } + + s.state = .precheck_done; + }, + .precheck_done => {}, + + // These don't happen until we actually run the step graph. + .dependency_failure => unreachable, + .running => unreachable, + .success => unreachable, + .failure => unreachable, + .skipped => unreachable, + } +} + +fn workerMakeOneStep( + wg: *std.Thread.WaitGroup, + thread_pool: *std.Thread.Pool, + b: *std.Build, + s: *Step, + prog_node: *std.Progress.Node, + run: *Run, +) void { + defer wg.finish(); + + // First, check the conditions for running this step. If they are not met, + // then we return without doing the step, relying on another worker to + // queue this step up again when dependencies are met. + for (s.dependencies.items) |dep| { + switch (@atomicLoad(Step.State, &dep.state, .SeqCst)) { + .success, .skipped => continue, + .failure, .dependency_failure => { + @atomicStore(Step.State, &s.state, .dependency_failure, .SeqCst); + return; + }, + .precheck_done, .running => { + // dependency is not finished yet. + return; + }, + .precheck_unstarted => unreachable, + .precheck_started => unreachable, + } + } + + if (s.max_rss != 0) { + run.max_rss_mutex.lock(); + defer run.max_rss_mutex.unlock(); + + // Avoid running steps twice. + if (s.state != .precheck_done) { + // Another worker got the job. + return; + } + + const new_claimed_rss = run.claimed_rss + s.max_rss; + if (new_claimed_rss > run.max_rss) { + // Running this step right now could possibly exceed the allotted RSS. + // Add this step to the queue of memory-blocked steps. + run.memory_blocked_steps.append(s) catch @panic("OOM"); + return; + } + + run.claimed_rss = new_claimed_rss; + s.state = .running; + } else { + // Avoid running steps twice. + if (@cmpxchgStrong(Step.State, &s.state, .precheck_done, .running, .SeqCst, .SeqCst) != null) { + // Another worker got the job. + return; + } + } + + var sub_prog_node = prog_node.start(s.name, 0); + sub_prog_node.activate(); + defer sub_prog_node.end(); + + const make_result = s.make(&sub_prog_node); + + // No matter the result, we want to display error/warning messages. + if (s.result_error_msgs.items.len > 0) { + sub_prog_node.context.lock_stderr(); + defer sub_prog_node.context.unlock_stderr(); + + const stderr = run.stderr; + const ttyconf = run.ttyconf; + + for (s.result_error_msgs.items) |msg| { + // Sometimes it feels like you just can't catch a break. Finally, + // with Zig, you can. + ttyconf.setColor(stderr, .Bold) catch break; + stderr.writeAll(s.owner.dep_prefix) catch break; + stderr.writeAll(s.name) catch break; + stderr.writeAll(": ") catch break; + ttyconf.setColor(stderr, .Red) catch break; + stderr.writeAll("error: ") catch break; + ttyconf.setColor(stderr, .Reset) catch break; + stderr.writeAll(msg) catch break; + stderr.writeAll("\n") catch break; + } + } + + handle_result: { + if (make_result) |_| { + @atomicStore(Step.State, &s.state, .success, .SeqCst); + } else |err| switch (err) { + error.MakeFailed => { + @atomicStore(Step.State, &s.state, .failure, .SeqCst); + break :handle_result; + }, + error.MakeSkipped => @atomicStore(Step.State, &s.state, .skipped, .SeqCst), + } + + // Successful completion of a step, so we queue up its dependants as well. + for (s.dependants.items) |dep| { + wg.start(); + thread_pool.spawn(workerMakeOneStep, .{ + wg, thread_pool, b, dep, prog_node, run, + }) catch @panic("OOM"); + } + } + + // If this is a step that claims resources, we must now queue up other + // steps that are waiting for resources. + if (s.max_rss != 0) { + run.max_rss_mutex.lock(); + defer run.max_rss_mutex.unlock(); + + // Give the memory back to the scheduler. + run.claimed_rss -= s.max_rss; + // Avoid kicking off too many tasks that we already know will not have + // enough resources. + var remaining = run.max_rss - run.claimed_rss; + var i: usize = 0; + var j: usize = 0; + while (j < run.memory_blocked_steps.items.len) : (j += 1) { + const dep = run.memory_blocked_steps.items[j]; + assert(dep.max_rss != 0); + if (dep.max_rss <= remaining) { + remaining -= dep.max_rss; + + wg.start(); + thread_pool.spawn(workerMakeOneStep, .{ + wg, thread_pool, b, dep, prog_node, run, + }) catch @panic("OOM"); + } else { + run.memory_blocked_steps.items[i] = dep; + i += 1; + } + } + run.memory_blocked_steps.shrinkRetainingCapacity(i); + } +} + +fn steps(builder: *std.Build, already_ran_build: bool, out_stream: anytype) !void { + // run the build script to collect the options + if (!already_ran_build) { + builder.resolveInstallPrefix(null, .{}); + try builder.runBuild(root); + } + + const allocator = builder.allocator; + for (builder.top_level_steps.values()) |top_level_step| { + const name = if (&top_level_step.step == builder.default_step) + try fmt.allocPrint(allocator, "{s} (default)", .{top_level_step.step.name}) + else + top_level_step.step.name; + try out_stream.print(" {s:<28} {s}\n", .{ name, top_level_step.description }); + } +} + +fn usage(builder: *std.Build, already_ran_build: bool, out_stream: anytype) !void { + // run the build script to collect the options + if (!already_ran_build) { + builder.resolveInstallPrefix(null, .{}); + try builder.runBuild(root); + } + + try out_stream.print( + \\ + \\Usage: {s} build [steps] [options] + \\ + \\Steps: + \\ + , .{builder.zig_exe}); + try steps(builder, true, out_stream); + + try out_stream.writeAll( + \\ + \\General Options: + \\ -p, --prefix [path] Override default install prefix + \\ --prefix-lib-dir [path] Override default library directory path + \\ --prefix-exe-dir [path] Override default executable directory path + \\ --prefix-include-dir [path] Override default include directory path + \\ + \\ --sysroot [path] Set the system root directory (usually /) + \\ --search-prefix [path] Add a path to look for binaries, libraries, headers + \\ --libc [file] Provide a file which specifies libc paths + \\ + \\ -fdarling, -fno-darling Integration with system-installed Darling to + \\ execute macOS programs on Linux hosts + \\ (default: no) + \\ -fqemu, -fno-qemu Integration with system-installed QEMU to execute + \\ foreign-architecture programs on Linux hosts + \\ (default: no) + \\ --glibc-runtimes [path] Enhances QEMU integration by providing glibc built + \\ for multiple foreign architectures, allowing + \\ execution of non-native programs that link with glibc. + \\ -frosetta, -fno-rosetta Rely on Rosetta to execute x86_64 programs on + \\ ARM64 macOS hosts. (default: no) + \\ -fwasmtime, -fno-wasmtime Integration with system-installed wasmtime to + \\ execute WASI binaries. (default: no) + \\ -fwine, -fno-wine Integration with system-installed Wine to execute + \\ Windows programs on Linux hosts. (default: no) + \\ + \\ -h, --help Print this help and exit + \\ -l, --list-steps Print available steps + \\ --verbose Print commands before executing them + \\ --color [auto|off|on] Enable or disable colored error messages + \\ -fsummary Print the build summary, even on success + \\ -fno-summary Omit the build summary, even on failure + \\ -j Limit concurrent jobs (default is to use all CPU cores) + \\ --maxrss Limit memory usage (default is to use available memory) + \\ + \\Project-Specific Options: + \\ + ); + + const allocator = builder.allocator; + if (builder.available_options_list.items.len == 0) { + try out_stream.print(" (none)\n", .{}); + } else { + for (builder.available_options_list.items) |option| { + const name = try fmt.allocPrint(allocator, " -D{s}=[{s}]", .{ + option.name, + @tagName(option.type_id), + }); + defer allocator.free(name); + try out_stream.print("{s:<30} {s}\n", .{ name, option.description }); + if (option.enum_options) |enum_options| { + const padding = " " ** 33; + try out_stream.writeAll(padding ++ "Supported Values:\n"); + for (enum_options) |enum_option| { + try out_stream.print(padding ++ " {s}\n", .{enum_option}); + } + } + } + } + + try out_stream.writeAll( + \\ + \\Advanced Options: + \\ -freference-trace[=num] How many lines of reference trace should be shown per compile error + \\ -fno-reference-trace Disable reference trace + \\ --build-file [file] Override path to build.zig + \\ --cache-dir [path] Override path to local Zig cache directory + \\ --global-cache-dir [path] Override path to global Zig cache directory + \\ --zig-lib-dir [arg] Override path to Zig lib directory + \\ --build-runner [file] Override path to build runner + \\ --debug-log [scope] Enable debugging the compiler + \\ --debug-pkg-config Fail if unknown pkg-config flags encountered + \\ --verbose-link Enable compiler debug output for linking + \\ --verbose-air Enable compiler debug output for Zig AIR + \\ --verbose-llvm-ir[=file] Enable compiler debug output for LLVM IR + \\ --verbose-llvm-bc=[file] Enable compiler debug output for LLVM BC + \\ --verbose-cimport Enable compiler debug output for C imports + \\ --verbose-cc Enable compiler debug output for C compilation + \\ --verbose-llvm-cpu-features Enable compiler debug output for LLVM CPU features + \\ + ); +} + +fn usageAndErr(builder: *std.Build, already_ran_build: bool, out_stream: anytype) noreturn { + usage(builder, already_ran_build, out_stream) catch {}; + process.exit(1); +} + +fn nextArg(args: [][:0]const u8, idx: *usize) ?[:0]const u8 { + if (idx.* >= args.len) return null; + defer idx.* += 1; + return args[idx.*]; +} + +fn argsRest(args: [][:0]const u8, idx: usize) ?[][:0]const u8 { + if (idx >= args.len) return null; + return args[idx..]; +} + +fn cleanExit() void { + // Perhaps in the future there could be an Advanced Options flag such as + // --debug-build-runner-leaks which would make this function return instead + // of calling exit. + process.exit(0); +} + +const Color = enum { auto, off, on }; + +fn get_tty_conf(color: Color, stderr: std.fs.File) std.debug.TTY.Config { + return switch (color) { + .auto => std.debug.detectTTYConfig(stderr), + .on => .escape_codes, + .off => .no_color, + }; +} + +fn renderOptions(ttyconf: std.debug.TTY.Config) std.zig.ErrorBundle.RenderOptions { + return .{ + .ttyconf = ttyconf, + .include_source_line = ttyconf != .no_color, + .include_reference_trace = ttyconf != .no_color, + }; +} diff --git a/src/Manifest.zig b/src/Manifest.zig index a9e09a6728c2..068a14942f50 100644 --- a/src/Manifest.zig +++ b/src/Manifest.zig @@ -1,60 +1,6 @@ pub const basename = "build.zig.zon"; pub const Hash = std.crypto.hash.sha2.Sha256; -const Rule = union(enum) { - text: []const u8, - wildcard: void, -}; - -const Pattern = struct { - min_size: u32, - rules: []const Rule, - - const Self = @This(); - - pub fn matches(self: Self, input: []const u8) bool { - if (input.len < self.min_size) return false; - var offset: usize = 0; - var wildcard = false; - for (self.rules) |rule| { - switch (rule) { - .text => |text| { - if (wildcard) { - if (std.mem.indexOf(u8, input.len[offset..], text)) |index| { - offset = index + text.len; - wildcard = false; - } else { - return false; - } - } else { - if (!std.mem.startsWith(u8, input.len[offset], text)) { - return false; - } - offset += text.len; - } - }, - .wildcard => wildcard = true, - } - } - return wildcard or offset == input.len; - } -}; - -test "patterns" { - const pattern = Pattern{ - .min_size = 6, - .rules = &.{ - .{ .text = "abc" }, - .{ .wildcard = {} }, - .{ .text = "def" }, - }, - }; - - try testing.expect(pattern.matches("abc_def")); - try testing.expect(!pattern.matches("abdef")); - try testing.expect(!pattern.matches("abcdef_")); -} - pub const Dependency = struct { url: []const u8, url_tok: Ast.TokenIndex, diff --git a/src/Package.zig b/src/Package.zig index dba00c2c0839..2344b9038a6d 100644 --- a/src/Package.zig +++ b/src/Package.zig @@ -213,6 +213,93 @@ pub fn getName(target: *const Package, gpa: Allocator, mod: Module) ![]const u8 pub const build_zig_basename = "build.zig"; +pub fn fetchDependencies( + arena: Allocator, + thread_pool: *ThreadPool, + http_client: *std.http.Client, + directory: Compilation.Directory, + global_cache_directory: Compilation.Directory, + local_cache_directory: Compilation.Directory, + build_roots_source: *std.ArrayList(u8), + name_prefix: []const u8, + error_bundle: *std.zig.ErrorBundle.Wip, +) !void { + const max_bytes = 10 * 1024 * 1024; + const gpa = thread_pool.allocator; + const build_zig_zon_bytes = directory.handle.readFileAllocOptions( + arena, + Manifest.basename, + max_bytes, + null, + 1, + 0, + ) catch |err| switch (err) { + error.FileNotFound => { + // Handle the same as no dependencies. + return; + }, + else => |e| return e, + }; + + var ast = try std.zig.Ast.parse(gpa, build_zig_zon_bytes, .zon); + defer ast.deinit(gpa); + + if (ast.errors.len > 0) { + const file_path = try directory.join(arena, &.{Manifest.basename}); + try main.putAstErrorsIntoBundle(gpa, ast, file_path, error_bundle); + return error.PackageFetchFailed; + } + + var manifest = try Manifest.parse(gpa, ast); + defer manifest.deinit(gpa); + + if (manifest.errors.len > 0) { + const file_path = try directory.join(arena, &.{Manifest.basename}); + for (manifest.errors) |msg| { + try Report.addErrorMessage(ast, file_path, error_bundle, 0, msg); + } + return error.PackageFetchFailed; + } + + const report: Report = .{ + .ast = &ast, + .directory = directory, + .error_bundle = error_bundle, + }; + + var any_error = false; + const deps_list = manifest.dependencies.values(); + for (manifest.dependencies.keys(), 0..) |name, i| { + const dep = deps_list[i]; + + const sub_prefix = try std.fmt.allocPrint(arena, "{s}{s}.", .{ name_prefix, name }); + const fqn = sub_prefix[0 .. sub_prefix.len - 1]; + _ = fqn; + + const sub_pkg = try fetchAndUnpack_wip( + thread_pool, + http_client, + global_cache_directory, + dep, + report, + ); + + try fetchDependencies( + arena, + thread_pool, + http_client, + sub_pkg.root_src_directory, + global_cache_directory, + local_cache_directory, + build_roots_source, + sub_prefix, + error_bundle, + ); + } + + if (any_error) return error.InvalidBuildManifestFile; +} + pub fn fetchAndAddDependencies( pkg: *Package, root_pkg: *Package, @@ -400,6 +487,193 @@ const MultiHashHexDigest = [hex_multihash_len]u8; /// This is to avoid creating multiple modules for the same build.zig file. pub const AllModules = std.AutoHashMapUnmanaged(MultiHashHexDigest, *Package); +fn importCachedPackage( + gpa: Allocator, + global_cache_directory: Compilation.Directory, + dep: Manifest.Dependency, + build_roots_source: *std.ArrayList(u8), + fqn: []const u8, + all_modules: *AllModules, +) !*Package { + const s = fs.path.sep_str; + + // Check if the expected_hash is already present in the global package + // cache, and thereby avoid both fetching and unpacking. + if (dep.hash) |h| { + const hex_digest = h[0..hex_multihash_len]; + const pkg_dir_sub_path = "p" ++ s ++ hex_digest; + + const build_root = try global_cache_directory.join(gpa, &.{pkg_dir_sub_path}); + errdefer gpa.free(build_root); + + var pkg_dir = global_cache_directory.handle.openDir(pkg_dir_sub_path, .{}) catch |err| switch (err) { + error.FileNotFound => return error.MissingFile, //@todo + else => |e| return e, + }; + errdefer pkg_dir.close(); + + try build_roots_source.writer().print(" pub const {s} = \"{}\";\n", .{ + std.zig.fmtId(fqn), std.zig.fmtEscapes(build_root), + }); + + // The compiler has a rule that a file must not be included in multiple modules, + // so we must detect if a module has been created for this package and reuse it. + const gop = try all_modules.getOrPut(gpa, hex_digest.*); + if (gop.found_existing) { + gpa.free(build_root); + return gop.value_ptr.*; + } + + const ptr = try gpa.create(Package); + errdefer gpa.destroy(ptr); + + const owned_src_path = try gpa.dupe(u8, build_zig_basename); + errdefer gpa.free(owned_src_path); + + ptr.* = .{ + .root_src_directory = .{ + .path = build_root, + .handle = pkg_dir, + }, + .root_src_directory_owned = true, + .root_src_path = owned_src_path, + }; + + gop.value_ptr.* = ptr; + return ptr; + } + return error.MissingDependencyHash; +} + +fn fetchAndUnpack_wip( + thread_pool: *ThreadPool, + http_client: *std.http.Client, + global_cache_directory: Compilation.Directory, + dep: Manifest.Dependency, + report: Report, +) !*Package { + const gpa = http_client.allocator; + const s = fs.path.sep_str; + + // Check if the expected_hash is already present in the global package + // cache, and thereby avoid both fetching and unpacking. + if (dep.hash) |h| cached: { + const hex_digest = h[0..hex_multihash_len]; + const pkg_dir_sub_path = "p" ++ s ++ hex_digest; + + const build_root = try global_cache_directory.join(gpa, &.{pkg_dir_sub_path}); + errdefer gpa.free(build_root); + + var pkg_dir = global_cache_directory.handle.openDir(pkg_dir_sub_path, .{}) catch |err| switch (err) { + error.FileNotFound => break :cached, + else => |e| return e, + }; + errdefer pkg_dir.close(); + + const ptr = try gpa.create(Package); + errdefer gpa.destroy(ptr); + + const owned_src_path = try gpa.dupe(u8, build_zig_basename); + errdefer gpa.free(owned_src_path); + + ptr.* = .{ + .root_src_directory = .{ + .path = build_root, + .handle = pkg_dir, + }, + .root_src_directory_owned = true, + .root_src_path = owned_src_path, + }; + + return ptr; + } + + const uri = try std.Uri.parse(dep.url); + + const rand_int = std.crypto.random.int(u64); + const tmp_dir_sub_path = "tmp" ++ s ++ Manifest.hex64(rand_int); + + const actual_hash = a: { + var tmp_directory: Compilation.Directory = d: { + const path = try global_cache_directory.join(gpa, &.{tmp_dir_sub_path}); + errdefer gpa.free(path); + + const iterable_dir = try global_cache_directory.handle.makeOpenPathIterable(tmp_dir_sub_path, .{}); + errdefer iterable_dir.close(); + + break :d .{ + .path = path, + .handle = iterable_dir.dir, + }; + }; + defer tmp_directory.closeAndFree(gpa); + + var req = try http_client.request(uri, .{}, .{}); + defer req.deinit(); + + try req.do(); + + if (mem.endsWith(u8, uri.path, ".tar.gz")) { + // I observed the gzip stream to read 1 byte at a time, so I am using a + // buffered reader on the front of it. + try unpackTarball(gpa, &req, tmp_directory.handle, std.compress.gzip); + } else if (mem.endsWith(u8, uri.path, ".tar.xz")) { + // I have not checked what buffer sizes the xz decompression implementation uses + // by default, so the same logic applies for buffering the reader as for gzip. + try unpackTarball(gpa, &req, tmp_directory.handle, std.compress.xz); + } else { + return report.fail(dep.url_tok, "unknown file extension for path '{s}'", .{uri.path}); + } + + // TODO: delete files not included in the package prior to computing the package hash. + // for example, if the ini file has directives to include/not include certain files, + // apply those rules directly to the filesystem right here. This ensures that files + // not protected by the hash are not present on the file system. + + // TODO: raise an error for files that have illegal paths on some operating systems. + // For example, on Linux a path with a backslash should raise an error here. + // Of course, if the ignore rules above omit the file from the package, then everything + // is fine and no error should be raised. + + break :a try computePackageHash(thread_pool, .{ .dir = tmp_directory.handle }); + }; + + const pkg_dir_sub_path = "p" ++ s ++ Manifest.hexDigest(actual_hash); + try renameTmpIntoCache(global_cache_directory.handle, tmp_dir_sub_path, pkg_dir_sub_path); + + const actual_hex = Manifest.hexDigest(actual_hash); + if (dep.hash) |h| { + if (!mem.eql(u8, h, &actual_hex)) { + return report.fail(dep.hash_tok, "hash mismatch: expected: {s}, found: {s}", .{ + h, actual_hex, + }); + } + } else { + const file_path = try report.directory.join(gpa, &.{Manifest.basename}); + defer gpa.free(file_path); + + const eb = report.error_bundle; + const notes_len = 1; + try Report.addErrorMessage(report.ast.*, file_path, eb, notes_len, .{ + .tok = dep.url_tok, + .off = 0, + .msg = "url field is missing corresponding hash field", + }); + const notes_start = try eb.reserveNotes(notes_len); + eb.extra.items[notes_start] = @enumToInt(try eb.addErrorMessage(.{ + .msg = try eb.printString("expected .hash = \"{s}\",", .{&actual_hex}), + })); + return error.PackageFetchFailed; + } + + return createWithDir( + gpa, + global_cache_directory, + pkg_dir_sub_path, + build_zig_basename, + ); +} + fn fetchAndUnpack( thread_pool: *ThreadPool, http_client: *std.http.Client, diff --git a/src/main.zig b/src/main.zig index 2fa28ff0b6fa..9de7f756f6ce 100644 --- a/src/main.zig +++ b/src/main.zig @@ -4139,16 +4139,15 @@ pub fn cmdPkg(gpa: Allocator, arena: Allocator, args: []const []const u8) !void const command = blk: { const command_arg = args[0]; inline for (std.meta.fieldNames(PackageCommand)) |name| { - if (mem.eql(u9, command_arg, name)) { + if (mem.eql(u8, command_arg, name)) { break :blk std.meta.stringToEnum(PackageCommand, name); } } fatal("expected valid sub type argument found: {s}", .{command_arg}); - return; }; - return switch (command) { - .add => @panic("Not implemented"), + return switch (command.?) { + .add => std.log.info("pkg add \n --- \n", .{}), .remove => @panic("not implemented"), .update => @panic("not implemented"), .check => @panic("not implemented"), @@ -4160,9 +4159,11 @@ pub fn sub_cmd_pkg_fetch(gpa: Allocator, arena: Allocator) !void { var color: Color = .auto; const self_exe_path = try introspect.findZigExePath(arena); + var override_lib_dir: ?[]const u8 = try optionalStringEnvVar(arena, "ZIG_LIB_DIR"); + var http_client: std.http.Client = .{ .allocator = gpa }; defer http_client.deinit(); - try http_client.rescanRootCertificates(); + //try http_client.rescanRootCertificates(); // Here we provide an import to the build runner that allows using reflection to find // all of the dependencies. Without this, there would be no way to use `@import` to @@ -4183,7 +4184,7 @@ pub fn sub_cmd_pkg_fetch(gpa: Allocator, arena: Allocator) !void { var cleanup_build_runner_dir: ?fs.Dir = null; defer if (cleanup_build_runner_dir) |*dir| dir.close(); const cwd_path = try process.getCwdAlloc(arena); - var zig_lib_directory: Compilation.Directory = if (false) |lib_dir| .{ + var zig_lib_directory: Compilation.Directory = if (override_lib_dir) |lib_dir| .{ .path = lib_dir, .handle = fs.cwd().openDir(lib_dir, .{}) catch |err| { fatal("unable to open zig lib directory from 'zig-lib-dir' argument: '{s}': {s}", .{ lib_dir, @errorName(err) }); @@ -4197,11 +4198,7 @@ pub fn sub_cmd_pkg_fetch(gpa: Allocator, arena: Allocator) !void { .root_src_directory = zig_lib_directory, .root_src_path = "build_runner.zig", }; - - var build_pkg: Package = .{ - .root_src_directory = "build_directory", - .root_src_path = "build_zig_basename", - }; + _ = main_pkg; var build_file: ?[]const u8 = null; var cleanup_build_dir: ?fs.Dir = null; defer if (cleanup_build_dir) |*dir| dir.close(); @@ -4242,6 +4239,13 @@ pub fn sub_cmd_pkg_fetch(gpa: Allocator, arena: Allocator) !void { } } }; + + var build_pkg: Package = .{ + .root_src_directory = build_directory, + .root_src_path = build_zig_basename, + }; + _ = build_pkg; + var override_global_cache_dir: ?[]const u8 = try optionalStringEnvVar(arena, "ZIG_GLOBAL_CACHE_DIR"); var global_cache_directory: Compilation.Directory = l: { const p = override_global_cache_dir orelse try introspect.resolveGlobalCacheDir(arena); @@ -4276,19 +4280,16 @@ pub fn sub_cmd_pkg_fetch(gpa: Allocator, arena: Allocator) !void { var all_modules: Package.AllModules = .{}; defer all_modules.deinit(gpa); - const fetch_result = build_pkg.fetchAndAddDependencies( - &main_pkg, + const fetch_result = Package.fetchDependencies( arena, &thread_pool, &http_client, build_directory, global_cache_directory, local_cache_directory, - &dependencies_source, &build_roots_source, "", &wip_errors, - &all_modules, ); if (wip_errors.root_list.items.len > 0) { @@ -4298,20 +4299,6 @@ pub fn sub_cmd_pkg_fetch(gpa: Allocator, arena: Allocator) !void { process.exit(1); } try fetch_result; - - try dependencies_source.appendSlice("};\npub const build_root = struct {\n"); - try dependencies_source.appendSlice(build_roots_source.items); - try dependencies_source.appendSlice("};\n"); - - const deps_pkg = try Package.createFilePkg( - gpa, - local_cache_directory, - "dependencies.zig", - dependencies_source.items, - ); - - mem.swap(Package.Table, &main_pkg.table, &deps_pkg.table); - try main_pkg.addAndAdopt(gpa, "@dependencies", deps_pkg); } pub const usage_build = From a500333bb1e8c23c46a26457be7b59177550f6a4 Mon Sep 17 00:00:00 2001 From: DraagrenKirneh Date: Mon, 17 Apr 2023 00:57:14 +0200 Subject: [PATCH 03/13] revert change to .gitignore --- .gitignore | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.gitignore b/.gitignore index 63d5ea718078..0b4957c56a13 100644 --- a/.gitignore +++ b/.gitignore @@ -15,5 +15,4 @@ zig-out/ /debug/ /build/ /build-*/ -/docgen_tmp/ -stage3/ \ No newline at end of file +/docgen_tmp/ \ No newline at end of file From 774db77a86f1fb5d4ba43aec72d95966b5479509 Mon Sep 17 00:00:00 2001 From: DraagrenKirneh Date: Mon, 17 Apr 2023 01:31:05 +0200 Subject: [PATCH 04/13] cleanup code, add usage_pkg string --- src/Package.zig | 12 +++---- src/main.zig | 93 +++++++++++++++++++------------------------------ 2 files changed, 39 insertions(+), 66 deletions(-) diff --git a/src/Package.zig b/src/Package.zig index 2344b9038a6d..aa99d7a6e639 100644 --- a/src/Package.zig +++ b/src/Package.zig @@ -220,7 +220,6 @@ pub fn fetchDependencies( directory: Compilation.Directory, global_cache_directory: Compilation.Directory, local_cache_directory: Compilation.Directory, - build_roots_source: *std.ArrayList(u8), name_prefix: []const u8, error_bundle: *std.zig.ErrorBundle.Wip, ) !void { @@ -273,10 +272,8 @@ pub fn fetchDependencies( const dep = deps_list[i]; const sub_prefix = try std.fmt.allocPrint(arena, "{s}{s}.", .{ name_prefix, name }); - const fqn = sub_prefix[0 .. sub_prefix.len - 1]; - _ = fqn; - const sub_pkg = try fetchAndUnpack_wip( + const sub_pkg = try fetchAndUnpack( thread_pool, http_client, global_cache_directory, @@ -291,7 +288,6 @@ pub fn fetchDependencies( sub_pkg.root_src_directory, global_cache_directory, local_cache_directory, - build_roots_source, sub_prefix, error_bundle, ); @@ -366,7 +362,7 @@ pub fn fetchAndAddDependencies( const sub_prefix = try std.fmt.allocPrint(arena, "{s}{s}.", .{ name_prefix, name }); const fqn = sub_prefix[0 .. sub_prefix.len - 1]; - const sub_pkg = try fetchAndUnpack( + const sub_pkg = try fetchUnpackImport( thread_pool, http_client, global_cache_directory, @@ -545,7 +541,7 @@ fn importCachedPackage( return error.MissingDependencyHash; } -fn fetchAndUnpack_wip( +fn fetchAndUnpack( thread_pool: *ThreadPool, http_client: *std.http.Client, global_cache_directory: Compilation.Directory, @@ -674,7 +670,7 @@ fn fetchAndUnpack_wip( ); } -fn fetchAndUnpack( +fn fetchUnpackImport( thread_pool: *ThreadPool, http_client: *std.http.Client, global_cache_directory: Compilation.Directory, diff --git a/src/main.zig b/src/main.zig index 8b70c9b52217..29efaf519b50 100644 --- a/src/main.zig +++ b/src/main.zig @@ -4127,67 +4127,47 @@ pub fn cmdInit( } } -const PackageCommand = enum { - add, - remove, - check, - update, - fetch, -}; - const PackageOptions = struct { @"--cache-dir": []const u8, @"--global-cache-dir": []const u8, }; +pub const usage_pkg = + \\Usage: zig pkg [command] [options] + \\ + \\ Runs a package command + \\ + \\Commands: + \\ fetch Calculates the package hash of the current directory. + \\ + \\Options: + \\ -h --help Print this help and exit. + \\ +; + pub fn cmdPkg(gpa: Allocator, arena: Allocator, args: []const []const u8) !void { - if (args.len == 0) fatal("expected sub argument after pkg", .{}); + if (args.len == 0) fatal("Expected at least one argument.\n", .{}); - const command = blk: { - const command_arg = args[0]; - inline for (std.meta.fieldNames(PackageCommand)) |name| { - if (mem.eql(u8, command_arg, name)) { - break :blk std.meta.stringToEnum(PackageCommand, name); - } + for (args) |arg| { + if (mem.eql(u8, arg, "-h") or mem.eql(u8, arg, "--help")) { + const stdout = io.getStdOut().writer(); + try stdout.writeAll(usage_fmt); + return cleanExit(); } - fatal("expected valid sub type argument found: {s}", .{command_arg}); - }; + } - return switch (command.?) { - .add => std.log.info("pkg add \n --- \n", .{}), - .remove => @panic("not implemented"), - .update => @panic("not implemented"), - .check => @panic("not implemented"), - .fetch => sub_cmd_pkg_fetch(gpa, arena), - }; + const command_arg = args[0]; + if (!mem.eql(u8, command_arg, "fetch")) fatal("Invalid command: {s}\n", .{command_arg}); + + return cmdPkgFetch(gpa, arena); } -pub fn sub_cmd_pkg_fetch(gpa: Allocator, arena: Allocator) !void { +pub fn cmdPkgFetch(gpa: Allocator, arena: Allocator) !void { var color: Color = .auto; const self_exe_path = try introspect.findZigExePath(arena); var override_lib_dir: ?[]const u8 = try optionalStringEnvVar(arena, "ZIG_LIB_DIR"); - var http_client: std.http.Client = .{ .allocator = gpa }; - defer http_client.deinit(); - //try http_client.rescanRootCertificates(); - - // Here we provide an import to the build runner that allows using reflection to find - // all of the dependencies. Without this, there would be no way to use `@import` to - // access dependencies by name, since `@import` requires string literals. - var dependencies_source = std.ArrayList(u8).init(gpa); - defer dependencies_source.deinit(); - try dependencies_source.appendSlice("pub const imports = struct {\n"); - - // This will go into the same package. It contains the file system paths - // to all the build.zig files. - var build_roots_source = std.ArrayList(u8).init(gpa); - defer build_roots_source.deinit(); - - var thread_pool: ThreadPool = undefined; - try thread_pool.init(.{ .allocator = gpa }); - defer thread_pool.deinit(); - var cleanup_build_runner_dir: ?fs.Dir = null; defer if (cleanup_build_runner_dir) |*dir| dir.close(); const cwd_path = try process.getCwdAlloc(arena); @@ -4201,11 +4181,6 @@ pub fn sub_cmd_pkg_fetch(gpa: Allocator, arena: Allocator) !void { }; defer zig_lib_directory.handle.close(); - var main_pkg: Package = .{ - .root_src_directory = zig_lib_directory, - .root_src_path = "build_runner.zig", - }; - _ = main_pkg; var build_file: ?[]const u8 = null; var cleanup_build_dir: ?fs.Dir = null; defer if (cleanup_build_dir) |*dir| dir.close(); @@ -4247,12 +4222,6 @@ pub fn sub_cmd_pkg_fetch(gpa: Allocator, arena: Allocator) !void { } }; - var build_pkg: Package = .{ - .root_src_directory = build_directory, - .root_src_path = build_zig_basename, - }; - _ = build_pkg; - var override_global_cache_dir: ?[]const u8 = try optionalStringEnvVar(arena, "ZIG_GLOBAL_CACHE_DIR"); var global_cache_directory: Compilation.Directory = l: { const p = override_global_cache_dir orelse try introspect.resolveGlobalCacheDir(arena); @@ -4261,8 +4230,9 @@ pub fn sub_cmd_pkg_fetch(gpa: Allocator, arena: Allocator) !void { .path = p, }; }; - var override_local_cache_dir: ?[]const u8 = try optionalStringEnvVar(arena, "ZIG_LOCAL_CACHE_DIR"); defer global_cache_directory.handle.close(); + + var override_local_cache_dir: ?[]const u8 = try optionalStringEnvVar(arena, "ZIG_LOCAL_CACHE_DIR"); var local_cache_directory: Compilation.Directory = l: { if (override_local_cache_dir) |local_cache_dir_path| { break :l .{ @@ -4277,6 +4247,7 @@ pub fn sub_cmd_pkg_fetch(gpa: Allocator, arena: Allocator) !void { }; }; defer local_cache_directory.handle.close(); + // Here we borrow main package's table and will replace it with a fresh // one after this process completes. @@ -4287,6 +4258,13 @@ pub fn sub_cmd_pkg_fetch(gpa: Allocator, arena: Allocator) !void { var all_modules: Package.AllModules = .{}; defer all_modules.deinit(gpa); + var thread_pool: ThreadPool = undefined; + try thread_pool.init(.{ .allocator = gpa }); + defer thread_pool.deinit(); + + var http_client: std.http.Client = .{ .allocator = gpa }; + defer http_client.deinit(); + const fetch_result = Package.fetchDependencies( arena, &thread_pool, @@ -4294,7 +4272,6 @@ pub fn sub_cmd_pkg_fetch(gpa: Allocator, arena: Allocator) !void { build_directory, global_cache_directory, local_cache_directory, - &build_roots_source, "", &wip_errors, ); From f06d28738a45baba6cb59d5cf3bb21391924e0a3 Mon Sep 17 00:00:00 2001 From: DraagrenKirneh Date: Mon, 17 Apr 2023 01:53:02 +0200 Subject: [PATCH 05/13] cleanup. handle build dir cleanup when found in a parent directory --- src/main.zig | 24 ++++++------------------ 1 file changed, 6 insertions(+), 18 deletions(-) diff --git a/src/main.zig b/src/main.zig index 29efaf519b50..d43e5426da89 100644 --- a/src/main.zig +++ b/src/main.zig @@ -4127,11 +4127,6 @@ pub fn cmdInit( } } -const PackageOptions = struct { - @"--cache-dir": []const u8, - @"--global-cache-dir": []const u8, -}; - pub const usage_pkg = \\Usage: zig pkg [command] [options] \\ @@ -4164,22 +4159,10 @@ pub fn cmdPkg(gpa: Allocator, arena: Allocator, args: []const []const u8) !void pub fn cmdPkgFetch(gpa: Allocator, arena: Allocator) !void { var color: Color = .auto; - const self_exe_path = try introspect.findZigExePath(arena); - - var override_lib_dir: ?[]const u8 = try optionalStringEnvVar(arena, "ZIG_LIB_DIR"); var cleanup_build_runner_dir: ?fs.Dir = null; defer if (cleanup_build_runner_dir) |*dir| dir.close(); const cwd_path = try process.getCwdAlloc(arena); - var zig_lib_directory: Compilation.Directory = if (override_lib_dir) |lib_dir| .{ - .path = lib_dir, - .handle = fs.cwd().openDir(lib_dir, .{}) catch |err| { - fatal("unable to open zig lib directory from 'zig-lib-dir' argument: '{s}': {s}", .{ lib_dir, @errorName(err) }); - }, - } else introspect.findZigLibDirFromSelfExe(arena, self_exe_path) catch |err| { - fatal("unable to find zig installation directory '{s}': {s}", .{ self_exe_path, @errorName(err) }); - }; - defer zig_lib_directory.handle.close(); var build_file: ?[]const u8 = null; var cleanup_build_dir: ?fs.Dir = null; @@ -4194,7 +4177,6 @@ pub fn cmdPkgFetch(gpa: Allocator, arena: Allocator) !void { cleanup_build_dir = dir; break :blk .{ .path = dirname, .handle = dir }; } - break :blk .{ .path = null, .handle = fs.cwd() }; } // Search up parent directories until we find build.zig. @@ -4205,6 +4187,9 @@ pub fn cmdPkgFetch(gpa: Allocator, arena: Allocator) !void { const dir = fs.cwd().openDir(dirname, .{}) catch |err| { fatal("unable to open directory while searching for build.zig file, '{s}': {s}", .{ dirname, @errorName(err) }); }; + if (!mem.eql(u8, dirname, cwd_path)) { + cleanup_build_dir = dir; + } break :blk .{ .path = dirname, .handle = dir }; } else |err| switch (err) { error.FileNotFound => { @@ -4422,6 +4407,9 @@ pub fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !voi const dir = fs.cwd().openDir(dirname, .{}) catch |err| { fatal("unable to open directory while searching for build.zig file, '{s}': {s}", .{ dirname, @errorName(err) }); }; + if (!mem.eql(u8, dirname, cwd_path)) { + cleanup_build_dir = dir; + } break :blk .{ .path = dirname, .handle = dir }; } else |err| switch (err) { error.FileNotFound => { From 5d19bc897333d76909046fab60919d9c56a64294 Mon Sep 17 00:00:00 2001 From: DraagrenKirneh Date: Mon, 17 Apr 2023 02:30:41 +0200 Subject: [PATCH 06/13] add back newline in .gitignore --- .gitignore | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index 0b4957c56a13..feda423c10de 100644 --- a/.gitignore +++ b/.gitignore @@ -15,4 +15,4 @@ zig-out/ /debug/ /build/ /build-*/ -/docgen_tmp/ \ No newline at end of file +/docgen_tmp/ From 7eef638e4546f4a926e53b7b3b1afebff7f37859 Mon Sep 17 00:00:00 2001 From: DraagrenKirneh Date: Mon, 17 Apr 2023 02:31:21 +0200 Subject: [PATCH 07/13] remove wip file --- lib/fetch_runner.zig | 1044 ------------------------------------------ 1 file changed, 1044 deletions(-) delete mode 100644 lib/fetch_runner.zig diff --git a/lib/fetch_runner.zig b/lib/fetch_runner.zig deleted file mode 100644 index 7004ede67e07..000000000000 --- a/lib/fetch_runner.zig +++ /dev/null @@ -1,1044 +0,0 @@ -const root = @import("@build"); -const std = @import("std"); -const builtin = @import("builtin"); -const assert = std.debug.assert; -const io = std.io; -const fmt = std.fmt; -const mem = std.mem; -const process = std.process; -const ArrayList = std.ArrayList; -const File = std.fs.File; -const Step = std.Build.Step; - -pub const dependencies = @import("@dependencies"); - -pub fn main() !void { - // Here we use an ArenaAllocator backed by a DirectAllocator because a fetch is a short-lived, - // one shot program. We don't need to waste time freeing memory and finding places to squish - // bytes into. So we free everything all at once at the very end. - var single_threaded_arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); - defer single_threaded_arena.deinit(); - - var thread_safe_arena: std.heap.ThreadSafeAllocator = .{ - .child_allocator = single_threaded_arena.allocator(), - }; - const arena = thread_safe_arena.allocator(); - - var args = try process.argsAlloc(arena); - - // skip my own exe name - var arg_idx: usize = 1; - - const zig_exe = nextArg(args, &arg_idx) orelse { - std.debug.print("Expected path to zig compiler\n", .{}); - return error.InvalidArgs; - }; - const build_root = nextArg(args, &arg_idx) orelse { - std.debug.print("Expected build root directory path\n", .{}); - return error.InvalidArgs; - }; - const cache_root = nextArg(args, &arg_idx) orelse { - std.debug.print("Expected cache root directory path\n", .{}); - return error.InvalidArgs; - }; - const global_cache_root = nextArg(args, &arg_idx) orelse { - std.debug.print("Expected global cache root directory path\n", .{}); - return error.InvalidArgs; - }; - - const host = try std.zig.system.NativeTargetInfo.detect(.{}); - - const build_root_directory: std.Build.Cache.Directory = .{ - .path = build_root, - .handle = try std.fs.cwd().openDir(build_root, .{}), - }; - - const local_cache_directory: std.Build.Cache.Directory = .{ - .path = cache_root, - .handle = try std.fs.cwd().makeOpenPath(cache_root, .{}), - }; - - const global_cache_directory: std.Build.Cache.Directory = .{ - .path = global_cache_root, - .handle = try std.fs.cwd().makeOpenPath(global_cache_root, .{}), - }; - - var cache: std.Build.Cache = .{ - .gpa = arena, - .manifest_dir = try local_cache_directory.handle.makeOpenPath("h", .{}), - }; - cache.addPrefix(.{ .path = null, .handle = std.fs.cwd() }); - cache.addPrefix(build_root_directory); - cache.addPrefix(local_cache_directory); - cache.addPrefix(global_cache_directory); - cache.hash.addBytes(builtin.zig_version_string); - - const builder = try std.Build.create( - arena, - zig_exe, - build_root_directory, - local_cache_directory, - global_cache_directory, - host, - &cache, - ); - defer builder.destroy(); - - var targets = ArrayList([]const u8).init(arena); - var debug_log_scopes = ArrayList([]const u8).init(arena); - var thread_pool_options: std.Thread.Pool.Options = .{ .allocator = arena }; - - var install_prefix: ?[]const u8 = null; - var dir_list = std.Build.DirList{}; - var enable_summary: ?bool = null; - var max_rss: usize = 0; - var color: Color = .auto; - - const stderr_stream = io.getStdErr().writer(); - const stdout_stream = io.getStdOut().writer(); - - while (nextArg(args, &arg_idx)) |arg| { - if (mem.startsWith(u8, arg, "-D")) { - const option_contents = arg[2..]; - if (option_contents.len == 0) { - std.debug.print("Expected option name after '-D'\n\n", .{}); - usageAndErr(builder, false, stderr_stream); - } - if (mem.indexOfScalar(u8, option_contents, '=')) |name_end| { - const option_name = option_contents[0..name_end]; - const option_value = option_contents[name_end + 1 ..]; - if (try builder.addUserInputOption(option_name, option_value)) - usageAndErr(builder, false, stderr_stream); - } else { - if (try builder.addUserInputFlag(option_contents)) - usageAndErr(builder, false, stderr_stream); - } - } else if (mem.startsWith(u8, arg, "-")) { - if (mem.eql(u8, arg, "--verbose")) { - builder.verbose = true; - } else if (mem.eql(u8, arg, "-h") or mem.eql(u8, arg, "--help")) { - return usage(builder, false, stdout_stream); - } else if (mem.eql(u8, arg, "-p") or mem.eql(u8, arg, "--prefix")) { - install_prefix = nextArg(args, &arg_idx) orelse { - std.debug.print("Expected argument after {s}\n\n", .{arg}); - usageAndErr(builder, false, stderr_stream); - }; - } else if (mem.eql(u8, arg, "-l") or mem.eql(u8, arg, "--list-steps")) { - return steps(builder, false, stdout_stream); - } else if (mem.eql(u8, arg, "--prefix-lib-dir")) { - dir_list.lib_dir = nextArg(args, &arg_idx) orelse { - std.debug.print("Expected argument after {s}\n\n", .{arg}); - usageAndErr(builder, false, stderr_stream); - }; - } else if (mem.eql(u8, arg, "--prefix-exe-dir")) { - dir_list.exe_dir = nextArg(args, &arg_idx) orelse { - std.debug.print("Expected argument after {s}\n\n", .{arg}); - usageAndErr(builder, false, stderr_stream); - }; - } else if (mem.eql(u8, arg, "--prefix-include-dir")) { - dir_list.include_dir = nextArg(args, &arg_idx) orelse { - std.debug.print("Expected argument after {s}\n\n", .{arg}); - usageAndErr(builder, false, stderr_stream); - }; - } else if (mem.eql(u8, arg, "--sysroot")) { - const sysroot = nextArg(args, &arg_idx) orelse { - std.debug.print("Expected argument after {s}\n\n", .{arg}); - usageAndErr(builder, false, stderr_stream); - }; - builder.sysroot = sysroot; - } else if (mem.eql(u8, arg, "--maxrss")) { - const max_rss_text = nextArg(args, &arg_idx) orelse { - std.debug.print("Expected argument after {s}\n\n", .{arg}); - usageAndErr(builder, false, stderr_stream); - }; - // TODO: support shorthand such as "2GiB", "2GB", or "2G" - max_rss = std.fmt.parseInt(usize, max_rss_text, 10) catch |err| { - std.debug.print("invalid byte size: '{s}': {s}\n", .{ - max_rss_text, @errorName(err), - }); - process.exit(1); - }; - } else if (mem.eql(u8, arg, "--search-prefix")) { - const search_prefix = nextArg(args, &arg_idx) orelse { - std.debug.print("Expected argument after {s}\n\n", .{arg}); - usageAndErr(builder, false, stderr_stream); - }; - builder.addSearchPrefix(search_prefix); - } else if (mem.eql(u8, arg, "--libc")) { - const libc_file = nextArg(args, &arg_idx) orelse { - std.debug.print("Expected argument after {s}\n\n", .{arg}); - usageAndErr(builder, false, stderr_stream); - }; - builder.libc_file = libc_file; - } else if (mem.eql(u8, arg, "--color")) { - const next_arg = nextArg(args, &arg_idx) orelse { - std.debug.print("Expected [auto|on|off] after {s}\n\n", .{arg}); - usageAndErr(builder, false, stderr_stream); - }; - color = std.meta.stringToEnum(Color, next_arg) orelse { - std.debug.print("Expected [auto|on|off] after {s}, found '{s}'\n\n", .{ arg, next_arg }); - usageAndErr(builder, false, stderr_stream); - }; - } else if (mem.eql(u8, arg, "--zig-lib-dir")) { - builder.zig_lib_dir = nextArg(args, &arg_idx) orelse { - std.debug.print("Expected argument after {s}\n\n", .{arg}); - usageAndErr(builder, false, stderr_stream); - }; - } else if (mem.eql(u8, arg, "--debug-log")) { - const next_arg = nextArg(args, &arg_idx) orelse { - std.debug.print("Expected argument after {s}\n\n", .{arg}); - usageAndErr(builder, false, stderr_stream); - }; - try debug_log_scopes.append(next_arg); - } else if (mem.eql(u8, arg, "--debug-pkg-config")) { - builder.debug_pkg_config = true; - } else if (mem.eql(u8, arg, "--debug-compile-errors")) { - builder.debug_compile_errors = true; - } else if (mem.eql(u8, arg, "--glibc-runtimes")) { - builder.glibc_runtimes_dir = nextArg(args, &arg_idx) orelse { - std.debug.print("Expected argument after {s}\n\n", .{arg}); - usageAndErr(builder, false, stderr_stream); - }; - } else if (mem.eql(u8, arg, "--verbose-link")) { - builder.verbose_link = true; - } else if (mem.eql(u8, arg, "--verbose-air")) { - builder.verbose_air = true; - } else if (mem.eql(u8, arg, "--verbose-llvm-ir")) { - builder.verbose_llvm_ir = "-"; - } else if (mem.startsWith(u8, arg, "--verbose-llvm-ir=")) { - builder.verbose_llvm_ir = arg["--verbose-llvm-ir=".len..]; - } else if (mem.eql(u8, arg, "--verbose-llvm-bc=")) { - builder.verbose_llvm_bc = arg["--verbose-llvm-bc=".len..]; - } else if (mem.eql(u8, arg, "--verbose-cimport")) { - builder.verbose_cimport = true; - } else if (mem.eql(u8, arg, "--verbose-cc")) { - builder.verbose_cc = true; - } else if (mem.eql(u8, arg, "--verbose-llvm-cpu-features")) { - builder.verbose_llvm_cpu_features = true; - } else if (mem.eql(u8, arg, "-fwine")) { - builder.enable_wine = true; - } else if (mem.eql(u8, arg, "-fno-wine")) { - builder.enable_wine = false; - } else if (mem.eql(u8, arg, "-fqemu")) { - builder.enable_qemu = true; - } else if (mem.eql(u8, arg, "-fno-qemu")) { - builder.enable_qemu = false; - } else if (mem.eql(u8, arg, "-fwasmtime")) { - builder.enable_wasmtime = true; - } else if (mem.eql(u8, arg, "-fno-wasmtime")) { - builder.enable_wasmtime = false; - } else if (mem.eql(u8, arg, "-frosetta")) { - builder.enable_rosetta = true; - } else if (mem.eql(u8, arg, "-fno-rosetta")) { - builder.enable_rosetta = false; - } else if (mem.eql(u8, arg, "-fdarling")) { - builder.enable_darling = true; - } else if (mem.eql(u8, arg, "-fno-darling")) { - builder.enable_darling = false; - } else if (mem.eql(u8, arg, "-fsummary")) { - enable_summary = true; - } else if (mem.eql(u8, arg, "-fno-summary")) { - enable_summary = false; - } else if (mem.eql(u8, arg, "-freference-trace")) { - builder.reference_trace = 256; - } else if (mem.startsWith(u8, arg, "-freference-trace=")) { - const num = arg["-freference-trace=".len..]; - builder.reference_trace = std.fmt.parseUnsigned(u32, num, 10) catch |err| { - std.debug.print("unable to parse reference_trace count '{s}': {s}", .{ num, @errorName(err) }); - process.exit(1); - }; - } else if (mem.eql(u8, arg, "-fno-reference-trace")) { - builder.reference_trace = null; - } else if (mem.startsWith(u8, arg, "-j")) { - const num = arg["-j".len..]; - const n_jobs = std.fmt.parseUnsigned(u32, num, 10) catch |err| { - std.debug.print("unable to parse jobs count '{s}': {s}", .{ - num, @errorName(err), - }); - process.exit(1); - }; - if (n_jobs < 1) { - std.debug.print("number of jobs must be at least 1\n", .{}); - process.exit(1); - } - thread_pool_options.n_jobs = n_jobs; - } else if (mem.eql(u8, arg, "--")) { - builder.args = argsRest(args, arg_idx); - break; - } else { - std.debug.print("Unrecognized argument: {s}\n\n", .{arg}); - usageAndErr(builder, false, stderr_stream); - } - } else { - try targets.append(arg); - } - } - - const stderr = std.io.getStdErr(); - const ttyconf = get_tty_conf(color, stderr); - switch (ttyconf) { - .no_color => try builder.env_map.put("NO_COLOR", "1"), - .escape_codes => try builder.env_map.put("ZIG_DEBUG_COLOR", "1"), - .windows_api => {}, - } - - var progress: std.Progress = .{ .dont_print_on_dumb = true }; - const main_progress_node = progress.start("", 0); - - builder.debug_log_scopes = debug_log_scopes.items; - builder.resolveInstallPrefix(install_prefix, dir_list); - { - var prog_node = main_progress_node.start("user build.zig logic", 0); - defer prog_node.end(); - try builder.runBuild(root); - } - - if (builder.validateUserInputDidItFail()) - usageAndErr(builder, true, stderr_stream); - - var run: Run = .{ - .max_rss = max_rss, - .max_rss_is_default = false, - .max_rss_mutex = .{}, - .memory_blocked_steps = std.ArrayList(*Step).init(arena), - - .claimed_rss = 0, - .enable_summary = enable_summary, - .ttyconf = ttyconf, - .stderr = stderr, - }; - - if (run.max_rss == 0) { - run.max_rss = process.totalSystemMemory() catch std.math.maxInt(usize); - run.max_rss_is_default = true; - } - - runStepNames( - arena, - builder, - targets.items, - main_progress_node, - thread_pool_options, - &run, - ) catch |err| switch (err) { - error.UncleanExit => process.exit(1), - else => return err, - }; -} - -const Run = struct { - max_rss: usize, - max_rss_is_default: bool, - max_rss_mutex: std.Thread.Mutex, - memory_blocked_steps: std.ArrayList(*Step), - - claimed_rss: usize, - enable_summary: ?bool, - ttyconf: std.debug.TTY.Config, - stderr: std.fs.File, -}; - -fn runStepNames( - arena: std.mem.Allocator, - b: *std.Build, - step_names: []const []const u8, - parent_prog_node: *std.Progress.Node, - thread_pool_options: std.Thread.Pool.Options, - run: *Run, -) !void { - const gpa = b.allocator; - var step_stack: std.AutoArrayHashMapUnmanaged(*Step, void) = .{}; - defer step_stack.deinit(gpa); - - if (step_names.len == 0) { - try step_stack.put(gpa, b.default_step, {}); - } else { - try step_stack.ensureUnusedCapacity(gpa, step_names.len); - for (0..step_names.len) |i| { - const step_name = step_names[step_names.len - i - 1]; - const s = b.top_level_steps.get(step_name) orelse { - std.debug.print("no step named '{s}'. Access the help menu with 'zig build -h'\n", .{step_name}); - process.exit(1); - }; - step_stack.putAssumeCapacity(&s.step, {}); - } - } - - const starting_steps = try arena.dupe(*Step, step_stack.keys()); - for (starting_steps) |s| { - checkForDependencyLoop(b, s, &step_stack) catch |err| switch (err) { - error.DependencyLoopDetected => return error.UncleanExit, - else => |e| return e, - }; - } - - { - // Check that we have enough memory to complete the build. - var any_problems = false; - for (step_stack.keys()) |s| { - if (s.max_rss == 0) continue; - if (s.max_rss > run.max_rss) { - std.debug.print("{s}{s}: this step declares an upper bound of {d} bytes of memory, exceeding the available {d} bytes of memory\n", .{ - s.owner.dep_prefix, s.name, s.max_rss, run.max_rss, - }); - any_problems = true; - } - } - if (any_problems) { - if (run.max_rss_is_default) { - std.debug.print("note: use --maxrss to override the default", .{}); - } - return error.UncleanExit; - } - } - - var thread_pool: std.Thread.Pool = undefined; - try thread_pool.init(thread_pool_options); - defer thread_pool.deinit(); - - { - defer parent_prog_node.end(); - - var step_prog = parent_prog_node.start("steps", step_stack.count()); - defer step_prog.end(); - - var wait_group: std.Thread.WaitGroup = .{}; - defer wait_group.wait(); - - // Here we spawn the initial set of tasks with a nice heuristic - - // dependency order. Each worker when it finishes a step will then - // check whether it should run any dependants. - const steps_slice = step_stack.keys(); - for (0..steps_slice.len) |i| { - const step = steps_slice[steps_slice.len - i - 1]; - - wait_group.start(); - thread_pool.spawn(workerMakeOneStep, .{ - &wait_group, &thread_pool, b, step, &step_prog, run, - }) catch @panic("OOM"); - } - } - assert(run.memory_blocked_steps.items.len == 0); - - var test_skip_count: usize = 0; - var test_fail_count: usize = 0; - var test_pass_count: usize = 0; - var test_leak_count: usize = 0; - var test_count: usize = 0; - - var success_count: usize = 0; - var skipped_count: usize = 0; - var failure_count: usize = 0; - var pending_count: usize = 0; - var total_compile_errors: usize = 0; - var compile_error_steps: std.ArrayListUnmanaged(*Step) = .{}; - defer compile_error_steps.deinit(gpa); - - for (step_stack.keys()) |s| { - test_fail_count += s.test_results.fail_count; - test_skip_count += s.test_results.skip_count; - test_leak_count += s.test_results.leak_count; - test_pass_count += s.test_results.passCount(); - test_count += s.test_results.test_count; - - switch (s.state) { - .precheck_unstarted => unreachable, - .precheck_started => unreachable, - .running => unreachable, - .precheck_done => { - // precheck_done is equivalent to dependency_failure in the case of - // transitive dependencies. For example: - // A -> B -> C (failure) - // B will be marked as dependency_failure, while A may never be queued, and thus - // remain in the initial state of precheck_done. - s.state = .dependency_failure; - pending_count += 1; - }, - .dependency_failure => pending_count += 1, - .success => success_count += 1, - .skipped => skipped_count += 1, - .failure => { - failure_count += 1; - const compile_errors_len = s.result_error_bundle.errorMessageCount(); - if (compile_errors_len > 0) { - total_compile_errors += compile_errors_len; - try compile_error_steps.append(gpa, s); - } - }, - } - } - - // A proper command line application defaults to silently succeeding. - // The user may request verbose mode if they have a different preference. - if (failure_count == 0 and run.enable_summary != true) return cleanExit(); - - const ttyconf = run.ttyconf; - const stderr = run.stderr; - - if (run.enable_summary != false) { - const total_count = success_count + failure_count + pending_count + skipped_count; - ttyconf.setColor(stderr, .Cyan) catch {}; - stderr.writeAll("Build Summary:") catch {}; - ttyconf.setColor(stderr, .Reset) catch {}; - stderr.writer().print(" {d}/{d} steps succeeded", .{ success_count, total_count }) catch {}; - if (skipped_count > 0) stderr.writer().print("; {d} skipped", .{skipped_count}) catch {}; - if (failure_count > 0) stderr.writer().print("; {d} failed", .{failure_count}) catch {}; - - if (test_count > 0) stderr.writer().print("; {d}/{d} tests passed", .{ test_pass_count, test_count }) catch {}; - if (test_skip_count > 0) stderr.writer().print("; {d} skipped", .{test_skip_count}) catch {}; - if (test_fail_count > 0) stderr.writer().print("; {d} failed", .{test_fail_count}) catch {}; - if (test_leak_count > 0) stderr.writer().print("; {d} leaked", .{test_leak_count}) catch {}; - - if (run.enable_summary == null) { - ttyconf.setColor(stderr, .Dim) catch {}; - stderr.writeAll(" (disable with -fno-summary)") catch {}; - ttyconf.setColor(stderr, .Reset) catch {}; - } - stderr.writeAll("\n") catch {}; - - // Print a fancy tree with build results. - var print_node: PrintNode = .{ .parent = null }; - if (step_names.len == 0) { - print_node.last = true; - printTreeStep(b, b.default_step, stderr, ttyconf, &print_node, &step_stack) catch {}; - } else { - for (step_names, 0..) |step_name, i| { - const tls = b.top_level_steps.get(step_name).?; - print_node.last = i + 1 == b.top_level_steps.count(); - printTreeStep(b, &tls.step, stderr, ttyconf, &print_node, &step_stack) catch {}; - } - } - } - - if (failure_count == 0) return cleanExit(); - - // Finally, render compile errors at the bottom of the terminal. - // We use a separate compile_error_steps array list because step_stack is destructively - // mutated in printTreeStep above. - if (total_compile_errors > 0) { - for (compile_error_steps.items) |s| { - if (s.result_error_bundle.errorMessageCount() > 0) { - s.result_error_bundle.renderToStdErr(renderOptions(ttyconf)); - } - } - - // Signal to parent process that we have printed compile errors. The - // parent process may choose to omit the "following command failed" - // line in this case. - process.exit(2); - } - - process.exit(1); -} - -const PrintNode = struct { - parent: ?*PrintNode, - last: bool = false, -}; - -fn printPrefix(node: *PrintNode, stderr: std.fs.File, ttyconf: std.debug.TTY.Config) !void { - const parent = node.parent orelse return; - if (parent.parent == null) return; - try printPrefix(parent, stderr, ttyconf); - if (parent.last) { - try stderr.writeAll(" "); - } else { - try stderr.writeAll(switch (ttyconf) { - .no_color, .windows_api => "| ", - .escape_codes => "\x1B\x28\x30\x78\x1B\x28\x42 ", // │ - }); - } -} - -fn printTreeStep( - b: *std.Build, - s: *Step, - stderr: std.fs.File, - ttyconf: std.debug.TTY.Config, - parent_node: *PrintNode, - step_stack: *std.AutoArrayHashMapUnmanaged(*Step, void), -) !void { - const first = step_stack.swapRemove(s); - try printPrefix(parent_node, stderr, ttyconf); - - if (!first) try ttyconf.setColor(stderr, .Dim); - if (parent_node.parent != null) { - if (parent_node.last) { - try stderr.writeAll(switch (ttyconf) { - .no_color, .windows_api => "+- ", - .escape_codes => "\x1B\x28\x30\x6d\x71\x1B\x28\x42 ", // └─ - }); - } else { - try stderr.writeAll(switch (ttyconf) { - .no_color, .windows_api => "+- ", - .escape_codes => "\x1B\x28\x30\x74\x71\x1B\x28\x42 ", // ├─ - }); - } - } - - // dep_prefix omitted here because it is redundant with the tree. - try stderr.writeAll(s.name); - - if (first) { - switch (s.state) { - .precheck_unstarted => unreachable, - .precheck_started => unreachable, - .precheck_done => unreachable, - .running => unreachable, - - .dependency_failure => { - try ttyconf.setColor(stderr, .Dim); - try stderr.writeAll(" transitive failure\n"); - try ttyconf.setColor(stderr, .Reset); - }, - - .success => { - try ttyconf.setColor(stderr, .Green); - if (s.result_cached) { - try stderr.writeAll(" cached"); - } else if (s.test_results.test_count > 0) { - const pass_count = s.test_results.passCount(); - try stderr.writer().print(" {d} passed", .{pass_count}); - if (s.test_results.skip_count > 0) { - try ttyconf.setColor(stderr, .Yellow); - try stderr.writer().print(" {d} skipped", .{s.test_results.skip_count}); - } - } else { - try stderr.writeAll(" success"); - } - try ttyconf.setColor(stderr, .Reset); - if (s.result_duration_ns) |ns| { - try ttyconf.setColor(stderr, .Dim); - if (ns >= std.time.ns_per_min) { - try stderr.writer().print(" {d}m", .{ns / std.time.ns_per_min}); - } else if (ns >= std.time.ns_per_s) { - try stderr.writer().print(" {d}s", .{ns / std.time.ns_per_s}); - } else if (ns >= std.time.ns_per_ms) { - try stderr.writer().print(" {d}ms", .{ns / std.time.ns_per_ms}); - } else if (ns >= std.time.ns_per_us) { - try stderr.writer().print(" {d}us", .{ns / std.time.ns_per_us}); - } else { - try stderr.writer().print(" {d}ns", .{ns}); - } - try ttyconf.setColor(stderr, .Reset); - } - if (s.result_peak_rss != 0) { - const rss = s.result_peak_rss; - try ttyconf.setColor(stderr, .Dim); - if (rss >= 1000_000_000) { - try stderr.writer().print(" MaxRSS:{d}G", .{rss / 1000_000_000}); - } else if (rss >= 1000_000) { - try stderr.writer().print(" MaxRSS:{d}M", .{rss / 1000_000}); - } else if (rss >= 1000) { - try stderr.writer().print(" MaxRSS:{d}K", .{rss / 1000}); - } else { - try stderr.writer().print(" MaxRSS:{d}B", .{rss}); - } - try ttyconf.setColor(stderr, .Reset); - } - try stderr.writeAll("\n"); - }, - - .skipped => { - try ttyconf.setColor(stderr, .Yellow); - try stderr.writeAll(" skipped\n"); - try ttyconf.setColor(stderr, .Reset); - }, - - .failure => { - if (s.result_error_bundle.errorMessageCount() > 0) { - try ttyconf.setColor(stderr, .Red); - try stderr.writer().print(" {d} errors\n", .{ - s.result_error_bundle.errorMessageCount(), - }); - try ttyconf.setColor(stderr, .Reset); - } else if (!s.test_results.isSuccess()) { - try stderr.writer().print(" {d}/{d} passed", .{ - s.test_results.passCount(), s.test_results.test_count, - }); - if (s.test_results.fail_count > 0) { - try stderr.writeAll(", "); - try ttyconf.setColor(stderr, .Red); - try stderr.writer().print("{d} failed", .{ - s.test_results.fail_count, - }); - try ttyconf.setColor(stderr, .Reset); - } - if (s.test_results.skip_count > 0) { - try stderr.writeAll(", "); - try ttyconf.setColor(stderr, .Yellow); - try stderr.writer().print("{d} skipped", .{ - s.test_results.skip_count, - }); - try ttyconf.setColor(stderr, .Reset); - } - if (s.test_results.leak_count > 0) { - try stderr.writeAll(", "); - try ttyconf.setColor(stderr, .Red); - try stderr.writer().print("{d} leaked", .{ - s.test_results.leak_count, - }); - try ttyconf.setColor(stderr, .Reset); - } - try stderr.writeAll("\n"); - } else { - try ttyconf.setColor(stderr, .Red); - try stderr.writeAll(" failure\n"); - try ttyconf.setColor(stderr, .Reset); - } - }, - } - - for (s.dependencies.items, 0..) |dep, i| { - var print_node: PrintNode = .{ - .parent = parent_node, - .last = i == s.dependencies.items.len - 1, - }; - try printTreeStep(b, dep, stderr, ttyconf, &print_node, step_stack); - } - } else { - if (s.dependencies.items.len == 0) { - try stderr.writeAll(" (reused)\n"); - } else { - try stderr.writer().print(" (+{d} more reused dependencies)\n", .{ - s.dependencies.items.len, - }); - } - try ttyconf.setColor(stderr, .Reset); - } -} - -fn checkForDependencyLoop( - b: *std.Build, - s: *Step, - step_stack: *std.AutoArrayHashMapUnmanaged(*Step, void), -) !void { - switch (s.state) { - .precheck_started => { - std.debug.print("dependency loop detected:\n {s}\n", .{s.name}); - return error.DependencyLoopDetected; - }, - .precheck_unstarted => { - s.state = .precheck_started; - - try step_stack.ensureUnusedCapacity(b.allocator, s.dependencies.items.len); - for (s.dependencies.items) |dep| { - try step_stack.put(b.allocator, dep, {}); - try dep.dependants.append(b.allocator, s); - checkForDependencyLoop(b, dep, step_stack) catch |err| { - if (err == error.DependencyLoopDetected) { - std.debug.print(" {s}\n", .{s.name}); - } - return err; - }; - } - - s.state = .precheck_done; - }, - .precheck_done => {}, - - // These don't happen until we actually run the step graph. - .dependency_failure => unreachable, - .running => unreachable, - .success => unreachable, - .failure => unreachable, - .skipped => unreachable, - } -} - -fn workerMakeOneStep( - wg: *std.Thread.WaitGroup, - thread_pool: *std.Thread.Pool, - b: *std.Build, - s: *Step, - prog_node: *std.Progress.Node, - run: *Run, -) void { - defer wg.finish(); - - // First, check the conditions for running this step. If they are not met, - // then we return without doing the step, relying on another worker to - // queue this step up again when dependencies are met. - for (s.dependencies.items) |dep| { - switch (@atomicLoad(Step.State, &dep.state, .SeqCst)) { - .success, .skipped => continue, - .failure, .dependency_failure => { - @atomicStore(Step.State, &s.state, .dependency_failure, .SeqCst); - return; - }, - .precheck_done, .running => { - // dependency is not finished yet. - return; - }, - .precheck_unstarted => unreachable, - .precheck_started => unreachable, - } - } - - if (s.max_rss != 0) { - run.max_rss_mutex.lock(); - defer run.max_rss_mutex.unlock(); - - // Avoid running steps twice. - if (s.state != .precheck_done) { - // Another worker got the job. - return; - } - - const new_claimed_rss = run.claimed_rss + s.max_rss; - if (new_claimed_rss > run.max_rss) { - // Running this step right now could possibly exceed the allotted RSS. - // Add this step to the queue of memory-blocked steps. - run.memory_blocked_steps.append(s) catch @panic("OOM"); - return; - } - - run.claimed_rss = new_claimed_rss; - s.state = .running; - } else { - // Avoid running steps twice. - if (@cmpxchgStrong(Step.State, &s.state, .precheck_done, .running, .SeqCst, .SeqCst) != null) { - // Another worker got the job. - return; - } - } - - var sub_prog_node = prog_node.start(s.name, 0); - sub_prog_node.activate(); - defer sub_prog_node.end(); - - const make_result = s.make(&sub_prog_node); - - // No matter the result, we want to display error/warning messages. - if (s.result_error_msgs.items.len > 0) { - sub_prog_node.context.lock_stderr(); - defer sub_prog_node.context.unlock_stderr(); - - const stderr = run.stderr; - const ttyconf = run.ttyconf; - - for (s.result_error_msgs.items) |msg| { - // Sometimes it feels like you just can't catch a break. Finally, - // with Zig, you can. - ttyconf.setColor(stderr, .Bold) catch break; - stderr.writeAll(s.owner.dep_prefix) catch break; - stderr.writeAll(s.name) catch break; - stderr.writeAll(": ") catch break; - ttyconf.setColor(stderr, .Red) catch break; - stderr.writeAll("error: ") catch break; - ttyconf.setColor(stderr, .Reset) catch break; - stderr.writeAll(msg) catch break; - stderr.writeAll("\n") catch break; - } - } - - handle_result: { - if (make_result) |_| { - @atomicStore(Step.State, &s.state, .success, .SeqCst); - } else |err| switch (err) { - error.MakeFailed => { - @atomicStore(Step.State, &s.state, .failure, .SeqCst); - break :handle_result; - }, - error.MakeSkipped => @atomicStore(Step.State, &s.state, .skipped, .SeqCst), - } - - // Successful completion of a step, so we queue up its dependants as well. - for (s.dependants.items) |dep| { - wg.start(); - thread_pool.spawn(workerMakeOneStep, .{ - wg, thread_pool, b, dep, prog_node, run, - }) catch @panic("OOM"); - } - } - - // If this is a step that claims resources, we must now queue up other - // steps that are waiting for resources. - if (s.max_rss != 0) { - run.max_rss_mutex.lock(); - defer run.max_rss_mutex.unlock(); - - // Give the memory back to the scheduler. - run.claimed_rss -= s.max_rss; - // Avoid kicking off too many tasks that we already know will not have - // enough resources. - var remaining = run.max_rss - run.claimed_rss; - var i: usize = 0; - var j: usize = 0; - while (j < run.memory_blocked_steps.items.len) : (j += 1) { - const dep = run.memory_blocked_steps.items[j]; - assert(dep.max_rss != 0); - if (dep.max_rss <= remaining) { - remaining -= dep.max_rss; - - wg.start(); - thread_pool.spawn(workerMakeOneStep, .{ - wg, thread_pool, b, dep, prog_node, run, - }) catch @panic("OOM"); - } else { - run.memory_blocked_steps.items[i] = dep; - i += 1; - } - } - run.memory_blocked_steps.shrinkRetainingCapacity(i); - } -} - -fn steps(builder: *std.Build, already_ran_build: bool, out_stream: anytype) !void { - // run the build script to collect the options - if (!already_ran_build) { - builder.resolveInstallPrefix(null, .{}); - try builder.runBuild(root); - } - - const allocator = builder.allocator; - for (builder.top_level_steps.values()) |top_level_step| { - const name = if (&top_level_step.step == builder.default_step) - try fmt.allocPrint(allocator, "{s} (default)", .{top_level_step.step.name}) - else - top_level_step.step.name; - try out_stream.print(" {s:<28} {s}\n", .{ name, top_level_step.description }); - } -} - -fn usage(builder: *std.Build, already_ran_build: bool, out_stream: anytype) !void { - // run the build script to collect the options - if (!already_ran_build) { - builder.resolveInstallPrefix(null, .{}); - try builder.runBuild(root); - } - - try out_stream.print( - \\ - \\Usage: {s} build [steps] [options] - \\ - \\Steps: - \\ - , .{builder.zig_exe}); - try steps(builder, true, out_stream); - - try out_stream.writeAll( - \\ - \\General Options: - \\ -p, --prefix [path] Override default install prefix - \\ --prefix-lib-dir [path] Override default library directory path - \\ --prefix-exe-dir [path] Override default executable directory path - \\ --prefix-include-dir [path] Override default include directory path - \\ - \\ --sysroot [path] Set the system root directory (usually /) - \\ --search-prefix [path] Add a path to look for binaries, libraries, headers - \\ --libc [file] Provide a file which specifies libc paths - \\ - \\ -fdarling, -fno-darling Integration with system-installed Darling to - \\ execute macOS programs on Linux hosts - \\ (default: no) - \\ -fqemu, -fno-qemu Integration with system-installed QEMU to execute - \\ foreign-architecture programs on Linux hosts - \\ (default: no) - \\ --glibc-runtimes [path] Enhances QEMU integration by providing glibc built - \\ for multiple foreign architectures, allowing - \\ execution of non-native programs that link with glibc. - \\ -frosetta, -fno-rosetta Rely on Rosetta to execute x86_64 programs on - \\ ARM64 macOS hosts. (default: no) - \\ -fwasmtime, -fno-wasmtime Integration with system-installed wasmtime to - \\ execute WASI binaries. (default: no) - \\ -fwine, -fno-wine Integration with system-installed Wine to execute - \\ Windows programs on Linux hosts. (default: no) - \\ - \\ -h, --help Print this help and exit - \\ -l, --list-steps Print available steps - \\ --verbose Print commands before executing them - \\ --color [auto|off|on] Enable or disable colored error messages - \\ -fsummary Print the build summary, even on success - \\ -fno-summary Omit the build summary, even on failure - \\ -j Limit concurrent jobs (default is to use all CPU cores) - \\ --maxrss Limit memory usage (default is to use available memory) - \\ - \\Project-Specific Options: - \\ - ); - - const allocator = builder.allocator; - if (builder.available_options_list.items.len == 0) { - try out_stream.print(" (none)\n", .{}); - } else { - for (builder.available_options_list.items) |option| { - const name = try fmt.allocPrint(allocator, " -D{s}=[{s}]", .{ - option.name, - @tagName(option.type_id), - }); - defer allocator.free(name); - try out_stream.print("{s:<30} {s}\n", .{ name, option.description }); - if (option.enum_options) |enum_options| { - const padding = " " ** 33; - try out_stream.writeAll(padding ++ "Supported Values:\n"); - for (enum_options) |enum_option| { - try out_stream.print(padding ++ " {s}\n", .{enum_option}); - } - } - } - } - - try out_stream.writeAll( - \\ - \\Advanced Options: - \\ -freference-trace[=num] How many lines of reference trace should be shown per compile error - \\ -fno-reference-trace Disable reference trace - \\ --build-file [file] Override path to build.zig - \\ --cache-dir [path] Override path to local Zig cache directory - \\ --global-cache-dir [path] Override path to global Zig cache directory - \\ --zig-lib-dir [arg] Override path to Zig lib directory - \\ --build-runner [file] Override path to build runner - \\ --debug-log [scope] Enable debugging the compiler - \\ --debug-pkg-config Fail if unknown pkg-config flags encountered - \\ --verbose-link Enable compiler debug output for linking - \\ --verbose-air Enable compiler debug output for Zig AIR - \\ --verbose-llvm-ir[=file] Enable compiler debug output for LLVM IR - \\ --verbose-llvm-bc=[file] Enable compiler debug output for LLVM BC - \\ --verbose-cimport Enable compiler debug output for C imports - \\ --verbose-cc Enable compiler debug output for C compilation - \\ --verbose-llvm-cpu-features Enable compiler debug output for LLVM CPU features - \\ - ); -} - -fn usageAndErr(builder: *std.Build, already_ran_build: bool, out_stream: anytype) noreturn { - usage(builder, already_ran_build, out_stream) catch {}; - process.exit(1); -} - -fn nextArg(args: [][:0]const u8, idx: *usize) ?[:0]const u8 { - if (idx.* >= args.len) return null; - defer idx.* += 1; - return args[idx.*]; -} - -fn argsRest(args: [][:0]const u8, idx: usize) ?[][:0]const u8 { - if (idx >= args.len) return null; - return args[idx..]; -} - -fn cleanExit() void { - // Perhaps in the future there could be an Advanced Options flag such as - // --debug-build-runner-leaks which would make this function return instead - // of calling exit. - process.exit(0); -} - -const Color = enum { auto, off, on }; - -fn get_tty_conf(color: Color, stderr: std.fs.File) std.debug.TTY.Config { - return switch (color) { - .auto => std.debug.detectTTYConfig(stderr), - .on => .escape_codes, - .off => .no_color, - }; -} - -fn renderOptions(ttyconf: std.debug.TTY.Config) std.zig.ErrorBundle.RenderOptions { - return .{ - .ttyconf = ttyconf, - .include_source_line = ttyconf != .no_color, - .include_reference_trace = ttyconf != .no_color, - }; -} From b51765ff07e51b648962e55282abc30ccc67d1ec Mon Sep 17 00:00:00 2001 From: DraagrenKirneh Date: Sat, 22 Apr 2023 00:14:33 +0200 Subject: [PATCH 08/13] fix typo in help option, update http client to latest changes --- src/Package.zig | 7 ++++++- src/main.zig | 2 +- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/src/Package.zig b/src/Package.zig index 19b7a8746928..b5789be29c44 100644 --- a/src/Package.zig +++ b/src/Package.zig @@ -604,9 +604,14 @@ fn fetchAndUnpack( }; defer tmp_directory.closeAndFree(gpa); - var req = try http_client.request(uri, .{}, .{}); + var h = std.http.Headers{ .allocator = gpa }; + defer h.deinit(); + + var req = try http_client.request(.GET, uri, h, .{}); defer req.deinit(); + try req.start(); + try req.do(); if (mem.endsWith(u8, uri.path, ".tar.gz")) { diff --git a/src/main.zig b/src/main.zig index 21f1310d2d17..647bf7f05034 100644 --- a/src/main.zig +++ b/src/main.zig @@ -4147,7 +4147,7 @@ pub fn cmdPkg(gpa: Allocator, arena: Allocator, args: []const []const u8) !void for (args) |arg| { if (mem.eql(u8, arg, "-h") or mem.eql(u8, arg, "--help")) { const stdout = io.getStdOut().writer(); - try stdout.writeAll(usage_fmt); + try stdout.writeAll(usage_pkg); return cleanExit(); } } From 9218c824c61696602b40a483f5f45fcce62fe35f Mon Sep 17 00:00:00 2001 From: DraagrenKirneh Date: Mon, 8 May 2023 15:02:39 +0200 Subject: [PATCH 09/13] Update to work with latest changes in package.zig --- src/Package.zig | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/src/Package.zig b/src/Package.zig index 0f0322018c57..0908ef4e8d7a 100644 --- a/src/Package.zig +++ b/src/Package.zig @@ -612,14 +612,18 @@ fn fetchAndUnpack( defer req.deinit(); try req.start(); + try req.wait(); - try req.do(); + const content_type = req.response.headers.getFirstValue("Content-Type") orelse + return report.fail(dep.url_tok, "missing Content-Type for '{s}'", .{uri.path}); - if (mem.endsWith(u8, uri.path, ".tar.gz")) { + if (ascii.eqlIgnoreCase(content_type, "application/gzip") or + ascii.eqlIgnoreCase(content_type, "application/x-gzip")) + { // I observed the gzip stream to read 1 byte at a time, so I am using a // buffered reader on the front of it. try unpackTarball(gpa, &req, tmp_directory.handle, std.compress.gzip); - } else if (mem.endsWith(u8, uri.path, ".tar.xz")) { + } else if (ascii.eqlIgnoreCase(content_type, "application/x-xz")) { // I have not checked what buffer sizes the xz decompression implementation uses // by default, so the same logic applies for buffering the reader as for gzip. try unpackTarball(gpa, &req, tmp_directory.handle, std.compress.xz); From 8cd3ed29e69d57d6a44d57de0c08b3b4ee934a60 Mon Sep 17 00:00:00 2001 From: DraagrenKirneh Date: Wed, 10 May 2023 20:38:35 +0200 Subject: [PATCH 10/13] debug build error windows - adding ws2_32 directly --- build.zig | 1 + 1 file changed, 1 insertion(+) diff --git a/build.zig b/build.zig index 208d06fe1dd7..e23e4df7944f 100644 --- a/build.zig +++ b/build.zig @@ -673,6 +673,7 @@ fn addStaticLlvmOptionsToExe(exe: *std.Build.Step.Compile) !void { exe.linkSystemLibrary("version"); exe.linkSystemLibrary("uuid"); exe.linkSystemLibrary("ole32"); + exe.linkSystemLibrary("ws2_32"); } } From db1b4e921976e338941862af6b3d5998699637c2 Mon Sep 17 00:00:00 2001 From: DraagrenKirneh Date: Wed, 10 May 2023 21:11:46 +0200 Subject: [PATCH 11/13] add ws2_32 for enable_llvm as well --- build.zig | 1 + 1 file changed, 1 insertion(+) diff --git a/build.zig b/build.zig index e23e4df7944f..1fa686b986d0 100644 --- a/build.zig +++ b/build.zig @@ -302,6 +302,7 @@ pub fn build(b: *std.Build) !void { artifact.linkSystemLibrary("version"); artifact.linkSystemLibrary("uuid"); artifact.linkSystemLibrary("ole32"); + artifact.linkSystemLibrary("ws2_32"); } } } From 9b51c60051e66689e8b000a1639aed907ea39ad7 Mon Sep 17 00:00:00 2001 From: DraagrenKirneh Date: Sun, 14 May 2023 00:10:31 +0200 Subject: [PATCH 12/13] dbg: try with omit fetch code --- build.zig | 2 -- src/main.zig | 5 +++-- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/build.zig b/build.zig index 1fa686b986d0..208d06fe1dd7 100644 --- a/build.zig +++ b/build.zig @@ -302,7 +302,6 @@ pub fn build(b: *std.Build) !void { artifact.linkSystemLibrary("version"); artifact.linkSystemLibrary("uuid"); artifact.linkSystemLibrary("ole32"); - artifact.linkSystemLibrary("ws2_32"); } } } @@ -674,7 +673,6 @@ fn addStaticLlvmOptionsToExe(exe: *std.Build.Step.Compile) !void { exe.linkSystemLibrary("version"); exe.linkSystemLibrary("uuid"); exe.linkSystemLibrary("ole32"); - exe.linkSystemLibrary("ws2_32"); } } diff --git a/src/main.zig b/src/main.zig index 2d56f3e91ec9..5ebb643b8d31 100644 --- a/src/main.zig +++ b/src/main.zig @@ -4173,8 +4173,9 @@ pub fn cmdPkg(gpa: Allocator, arena: Allocator, args: []const []const u8) !void const command_arg = args[0]; if (!mem.eql(u8, command_arg, "fetch")) fatal("Invalid command: {s}\n", .{command_arg}); - - return cmdPkgFetch(gpa, arena); + if (!build_options.omit_pkg_fetching_code) { + try cmdPkgFetch(gpa, arena); + } } pub fn cmdPkgFetch(gpa: Allocator, arena: Allocator) !void { From cab6227b706d1be10cb233d5e96fed90c993a328 Mon Sep 17 00:00:00 2001 From: DraagrenKirneh Date: Sun, 14 May 2023 14:23:32 +0200 Subject: [PATCH 13/13] update latest fetching changes in Package.zig. cleanup package command --- src/Package.zig | 14 +++++++++++--- src/main.zig | 17 +++++++---------- 2 files changed, 18 insertions(+), 13 deletions(-) diff --git a/src/Package.zig b/src/Package.zig index 2f93bd04b189..545656a47d16 100644 --- a/src/Package.zig +++ b/src/Package.zig @@ -614,11 +614,19 @@ fn fetchAndUnpack( try req.start(); try req.wait(); + if (req.response.status != .ok) { + return report.fail(dep.url_tok, "Expected response status '200 OK' got '{} {s}'", .{ + @enumToInt(req.response.status), + req.response.status.phrase() orelse "", + }); + } + const content_type = req.response.headers.getFirstValue("Content-Type") orelse - return report.fail(dep.url_tok, "missing Content-Type for '{s}'", .{uri.path}); + return report.fail(dep.url_tok, "Missing 'Content-Type' header", .{}); if (ascii.eqlIgnoreCase(content_type, "application/gzip") or - ascii.eqlIgnoreCase(content_type, "application/x-gzip")) + ascii.eqlIgnoreCase(content_type, "application/x-gzip") or + ascii.eqlIgnoreCase(content_type, "application/tar+gzip")) { // I observed the gzip stream to read 1 byte at a time, so I am using a // buffered reader on the front of it. @@ -628,7 +636,7 @@ fn fetchAndUnpack( // by default, so the same logic applies for buffering the reader as for gzip. try unpackTarball(gpa, &req, tmp_directory.handle, std.compress.xz); } else { - return report.fail(dep.url_tok, "unknown file extension for path '{s}'", .{uri.path}); + return report.fail(dep.url_tok, "Unsupported 'Content-Type' header value: '{s}'", .{content_type}); } // TODO: delete files not included in the package prior to computing the package hash. diff --git a/src/main.zig b/src/main.zig index 5ebb643b8d31..85f458fc76b5 100644 --- a/src/main.zig +++ b/src/main.zig @@ -4153,7 +4153,7 @@ pub const usage_pkg = \\ Runs a package command \\ \\Commands: - \\ fetch Calculates the package hash of the current directory. + \\ fetch fetch all dependenices found in build.zig.zon. \\ \\Options: \\ -h --help Print this help and exit. @@ -4172,13 +4172,16 @@ pub fn cmdPkg(gpa: Allocator, arena: Allocator, args: []const []const u8) !void } const command_arg = args[0]; - if (!mem.eql(u8, command_arg, "fetch")) fatal("Invalid command: {s}\n", .{command_arg}); - if (!build_options.omit_pkg_fetching_code) { - try cmdPkgFetch(gpa, arena); + if (!mem.eql(u8, command_arg, "fetch")) { + fatal("Invalid package command: {s}\n", .{command_arg}); } + + try cmdPkgFetch(gpa, arena); } pub fn cmdPkgFetch(gpa: Allocator, arena: Allocator) !void { + if (build_options.omit_pkg_fetching_code) unreachable; + var color: Color = .auto; var cleanup_build_runner_dir: ?fs.Dir = null; @@ -4254,16 +4257,10 @@ pub fn cmdPkgFetch(gpa: Allocator, arena: Allocator) !void { }; defer local_cache_directory.handle.close(); - // Here we borrow main package's table and will replace it with a fresh - // one after this process completes. - var wip_errors: std.zig.ErrorBundle.Wip = undefined; try wip_errors.init(gpa); defer wip_errors.deinit(); - var all_modules: Package.AllModules = .{}; - defer all_modules.deinit(gpa); - var thread_pool: ThreadPool = undefined; try thread_pool.init(.{ .allocator = gpa }); defer thread_pool.deinit();