Skip to content

Find system-installed root SSL certificates on macOS #14325

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Jan 16, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
48 changes: 26 additions & 22 deletions lib/std/crypto/Certificate/Bundle.zig
Original file line number Diff line number Diff line change
Expand Up @@ -60,13 +60,13 @@ pub fn rescan(cb: *Bundle, gpa: Allocator) !void {
.windows => {
// TODO
},
.macos => {
// TODO
},
.macos => return rescanMac(cb, gpa),
else => {},
}
}

pub const rescanMac = @import("Bundle/macos.zig").rescanMac;

pub fn rescanLinux(cb: *Bundle, gpa: Allocator) !void {
// Possible certificate files; stop after finding one.
const cert_file_paths = [_][]const u8{
Expand Down Expand Up @@ -195,25 +195,29 @@ pub fn addCertsFromFile(cb: *Bundle, gpa: Allocator, file: fs.File) !void {
const decoded_start = @intCast(u32, cb.bytes.items.len);
const dest_buf = cb.bytes.allocatedSlice()[decoded_start..];
cb.bytes.items.len += try base64.decode(dest_buf, encoded_cert);
// Even though we could only partially parse the certificate to find
// the subject name, we pre-parse all of them to make sure and only
// include in the bundle ones that we know will parse. This way we can
// use `catch unreachable` later.
const parsed_cert = try Certificate.parse(.{
.buffer = cb.bytes.items,
.index = decoded_start,
});
if (now_sec > parsed_cert.validity.not_after) {
// Ignore expired cert.
cb.bytes.items.len = decoded_start;
continue;
}
const gop = try cb.map.getOrPutContext(gpa, parsed_cert.subject_slice, .{ .cb = cb });
if (gop.found_existing) {
cb.bytes.items.len = decoded_start;
} else {
gop.value_ptr.* = decoded_start;
}
try cb.parseCert(gpa, decoded_start, now_sec);
}
}

pub fn parseCert(cb: *Bundle, gpa: Allocator, decoded_start: u32, now_sec: i64) !void {
// Even though we could only partially parse the certificate to find
// the subject name, we pre-parse all of them to make sure and only
// include in the bundle ones that we know will parse. This way we can
// use `catch unreachable` later.
const parsed_cert = try Certificate.parse(.{
.buffer = cb.bytes.items,
.index = decoded_start,
});
if (now_sec > parsed_cert.validity.not_after) {
// Ignore expired cert.
cb.bytes.items.len = decoded_start;
return;
}
const gop = try cb.map.getOrPutContext(gpa, parsed_cert.subject_slice, .{ .cb = cb });
if (gop.found_existing) {
cb.bytes.items.len = decoded_start;
} else {
gop.value_ptr.* = decoded_start;
}
}

Expand Down
136 changes: 136 additions & 0 deletions lib/std/crypto/Certificate/Bundle/macos.zig
Original file line number Diff line number Diff line change
@@ -0,0 +1,136 @@
const std = @import("std");
const assert = std.debug.assert;
const mem = std.mem;
const fs = std.fs;
const Allocator = std.mem.Allocator;
const Bundle = @import("../Bundle.zig");

pub fn rescanMac(cb: *Bundle, gpa: Allocator) !void {
const file = try fs.openFileAbsolute("/System/Library/Keychains/SystemRootCertificates.keychain", .{});
defer file.close();

const bytes = try file.readToEndAlloc(gpa, std.math.maxInt(u32));
defer gpa.free(bytes);

var stream = std.io.fixedBufferStream(bytes);
const reader = stream.reader();

const db_header = try reader.readStructBig(ApplDbHeader);
assert(mem.eql(u8, "kych", &@bitCast([4]u8, db_header.signature)));

try stream.seekTo(db_header.schema_offset);

const db_schema = try reader.readStructBig(ApplDbSchema);

var table_list = try gpa.alloc(u32, db_schema.table_count);
defer gpa.free(table_list);

var table_idx: u32 = 0;
while (table_idx < table_list.len) : (table_idx += 1) {
table_list[table_idx] = try reader.readIntBig(u32);
}

const now_sec = std.time.timestamp();

for (table_list) |table_offset| {
try stream.seekTo(db_header.schema_offset + table_offset);

const table_header = try reader.readStructBig(TableHeader);

if (@intToEnum(TableId, table_header.table_id) != TableId.CSSM_DL_DB_RECORD_X509_CERTIFICATE) {
continue;
}

var record_list = try gpa.alloc(u32, table_header.record_count);
defer gpa.free(record_list);

var record_idx: u32 = 0;
while (record_idx < record_list.len) : (record_idx += 1) {
record_list[record_idx] = try reader.readIntBig(u32);
}

for (record_list) |record_offset| {
try stream.seekTo(db_header.schema_offset + table_offset + record_offset);

const cert_header = try reader.readStructBig(X509CertHeader);

try cb.bytes.ensureUnusedCapacity(gpa, cert_header.cert_size);

const cert_start = @intCast(u32, cb.bytes.items.len);
const dest_buf = cb.bytes.allocatedSlice()[cert_start..];
cb.bytes.items.len += try reader.readAtLeast(dest_buf, cert_header.cert_size);

try cb.parseCert(gpa, cert_start, now_sec);
}
}
}

const ApplDbHeader = extern struct {
signature: @Vector(4, u8),
version: u32,
header_size: u32,
schema_offset: u32,
auth_offset: u32,
};

const ApplDbSchema = extern struct {
schema_size: u32,
table_count: u32,
};

const TableHeader = extern struct {
table_size: u32,
table_id: u32,
record_count: u32,
records: u32,
indexes_offset: u32,
free_list_head: u32,
record_numbers_count: u32,
};

const TableId = enum(u32) {
CSSM_DL_DB_SCHEMA_INFO = 0x00000000,
CSSM_DL_DB_SCHEMA_INDEXES = 0x00000001,
CSSM_DL_DB_SCHEMA_ATTRIBUTES = 0x00000002,
CSSM_DL_DB_SCHEMA_PARSING_MODULE = 0x00000003,

CSSM_DL_DB_RECORD_ANY = 0x0000000a,
CSSM_DL_DB_RECORD_CERT = 0x0000000b,
CSSM_DL_DB_RECORD_CRL = 0x0000000c,
CSSM_DL_DB_RECORD_POLICY = 0x0000000d,
CSSM_DL_DB_RECORD_GENERIC = 0x0000000e,
CSSM_DL_DB_RECORD_PUBLIC_KEY = 0x0000000f,
CSSM_DL_DB_RECORD_PRIVATE_KEY = 0x00000010,
CSSM_DL_DB_RECORD_SYMMETRIC_KEY = 0x00000011,
CSSM_DL_DB_RECORD_ALL_KEYS = 0x00000012,

CSSM_DL_DB_RECORD_GENERIC_PASSWORD = 0x80000000,
CSSM_DL_DB_RECORD_INTERNET_PASSWORD = 0x80000001,
CSSM_DL_DB_RECORD_APPLESHARE_PASSWORD = 0x80000002,
CSSM_DL_DB_RECORD_USER_TRUST = 0x80000003,
CSSM_DL_DB_RECORD_X509_CRL = 0x80000004,
CSSM_DL_DB_RECORD_UNLOCK_REFERRAL = 0x80000005,
CSSM_DL_DB_RECORD_EXTENDED_ATTRIBUTE = 0x80000006,
CSSM_DL_DB_RECORD_X509_CERTIFICATE = 0x80001000,
CSSM_DL_DB_RECORD_METADATA = 0x80008000,

_,
};

const X509CertHeader = extern struct {
record_size: u32,
record_number: u32,
unknown1: u32,
unknown2: u32,
cert_size: u32,
unknown3: u32,
cert_type: u32,
cert_encoding: u32,
print_name: u32,
alias: u32,
subject: u32,
issuer: u32,
serial_number: u32,
subject_key_identifier: u32,
public_key_hash: u32,
};
9 changes: 9 additions & 0 deletions lib/std/io/reader.zig
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ const math = std.math;
const assert = std.debug.assert;
const mem = std.mem;
const testing = std.testing;
const native_endian = @import("builtin").target.cpu.arch.endian();

pub fn Reader(
comptime Context: type,
Expand Down Expand Up @@ -351,6 +352,14 @@ pub fn Reader(
return res[0];
}

pub fn readStructBig(self: Self, comptime T: type) !T {
var res = try self.readStruct(T);
if (native_endian != std.builtin.Endian.Big) {
mem.byteSwapAllFields(T, &res);
}
return res;
}

/// Reads an integer with the same size as the given enum's tag type. If the integer matches
/// an enum tag, casts the integer to the enum tag and returns it. Otherwise, returns an error.
/// TODO optimization taking advantage of most fields being in order
Expand Down
4 changes: 2 additions & 2 deletions src/Autodoc.zig
Original file line number Diff line number Diff line change
Expand Up @@ -2514,12 +2514,12 @@ fn walkInstruction(
if (small.has_align) extra_index += 1;

const var_type = try self.walkRef(file, parent_scope, parent_src, extra.data.var_type, need_type);

var value: DocData.WalkResult = .{
.typeRef = var_type.expr,
.expr = .{ .undefined = .{} },
};

if (small.has_init) {
const var_init_ref = @intToEnum(Ref, file.zir.extra[extra_index]);
const var_init = try self.walkRef(file, parent_scope, parent_src, var_init_ref, need_type);
Expand Down
16 changes: 2 additions & 14 deletions src/link/MachO/fat.zig
Original file line number Diff line number Diff line change
@@ -1,9 +1,7 @@
const std = @import("std");
const builtin = @import("builtin");
const log = std.log.scoped(.archive);
const macho = std.macho;
const mem = std.mem;
const native_endian = builtin.target.cpu.arch.endian();

pub fn decodeArch(cputype: macho.cpu_type_t, comptime logError: bool) !std.Target.Cpu.Arch {
const cpu_arch: std.Target.Cpu.Arch = switch (cputype) {
Expand All @@ -19,23 +17,13 @@ pub fn decodeArch(cputype: macho.cpu_type_t, comptime logError: bool) !std.Targe
return cpu_arch;
}

fn readFatStruct(reader: anytype, comptime T: type) !T {
// Fat structures (fat_header & fat_arch) are always written and read to/from
// disk in big endian order.
var res = try reader.readStruct(T);
if (native_endian != std.builtin.Endian.Big) {
mem.byteSwapAllFields(T, &res);
}
return res;
}

pub fn getLibraryOffset(reader: anytype, cpu_arch: std.Target.Cpu.Arch) !u64 {
const fat_header = try readFatStruct(reader, macho.fat_header);
const fat_header = try reader.readStructBig(macho.fat_header);
if (fat_header.magic != macho.FAT_MAGIC) return 0;

var fat_arch_index: u32 = 0;
while (fat_arch_index < fat_header.nfat_arch) : (fat_arch_index += 1) {
const fat_arch = try readFatStruct(reader, macho.fat_arch);
const fat_arch = try reader.readStructBig(macho.fat_arch);
// If we come across an architecture that we do not know how to handle, that's
// fine because we can keep looking for one that might match.
const lib_arch = decodeArch(fat_arch.cputype, false) catch |err| switch (err) {
Expand Down