diff --git a/head.S b/head.S index 4c57eed..d60dc0b 100644 --- a/head.S +++ b/head.S @@ -190,16 +190,23 @@ GLOBAL(_entry) call skl_main /* - * skl_main() is magic. It returns two pointers by register: + * skl_main() returns a pointer to protected mode kernel entry in %eax. It + * could also return the argument for the kernel (depending on kernel type, + * this could be either Linux boot parameters or MBI for Multiboot2), but + * for parity with what Intel TXT does, this isn't the case. * - * %eax - protected mode kernel entry - * %edx - argument for kernel entry point, depends on type of kernel + * Intel TXT provides physical address of kernel entry point in %ebx. This + * is due to the fact that %ss is undefined there, so the kernel can't use + * the call/pop pair to obtain its load address. Even though it is possible + * on AMD with this implementation of SKL, keep things consistent and move + * the entry point address to %ebx as well. * - * We stash the entry point in %edi and the argument in %esi to protect - * them from clobbering during teardown. + * %ebp points to base of SLB, it was set by the first instruction on SKL + * entry and preserved across call to C. This is how the kernel can obtain + * offset to SLRT, and through it, bootloader context and payload argument + * saved within. */ - mov %eax, %edi - mov %edx, %esi + mov %eax, %ebx #ifdef __x86_64__ @@ -232,28 +239,8 @@ GLOBAL(_entry) push $0 popf - /* - * Various kernels use different boot protocols, SKL supports some of - * the common ones. Because of that, we are saving the same argument in - * every possible place that any of the supported kernel types may look - * for it. As of now, supported protocols include: - * - * - Linux x86 protected mode entry, not UEFI - * - Multiboot2, also not UEFI - * - simple payload started as 'entry(u32 arg)' function call. As we - * don't expect it to return, __cdecl, __stdcall and __pascal calling - * conventions work the same. - */ - /* Linux expects Zero Page address in %esi, it is already there */ - /* Multiboot2 expects MBI address in %ebx and magic number in %eax */ - mov %esi, %ebx - mov $MULTIBOOT2_BOOTLOADER_MAGIC, %eax - /* Simple payload expects argument on stack followed by return address */ - push %esi - push $0 - /* All set, jump to the kernel */ - jmp *%edi + jmp *%ebx ENDFUNC(_entry) .section .rodata, "a", @progbits diff --git a/main.c b/main.c index 1b6bf04..3f74360 100644 --- a/main.c +++ b/main.c @@ -231,27 +231,15 @@ static void dma_protection_setup(void) #endif } -/* - * Function return ABI magic: - * - * By returning a simple object of two pointers, the SYSV ABI splits it across - * %rax and %rdx rather than spilling it to the stack. This is far more - * convenient for our asm caller to deal with. - */ -typedef struct { - void *dlme_entry; /* %eax */ - void *dlme_arg; /* %edx */ -} asm_return_t; - -asm_return_t skl_main(void) +void *skl_main(void) { struct tpm *tpm; struct slr_entry_dl_info *dl_info; - asm_return_t ret; + void *dlme_entry; u32 entry_offset; /* - * Now in 64b mode, paging is setup. This is the launching point. We can + * Now in 64b mode, paging is set up. This is the launching point. We can * now do what we want. At the end, trampoline to the PM entry point which * will include the Secure Launch stub. */ @@ -298,14 +286,13 @@ asm_return_t skl_main(void) tpm_relinquish_locality(tpm); free_tpm(tpm); - ret.dlme_entry = _p(dl_info->dlme_base + dl_info->dlme_entry); - ret.dlme_arg = _p(dl_info->bl_context.context); + dlme_entry = _p(dl_info->dlme_base + dl_info->dlme_entry); /* End of the line, off to the protected mode entry into the kernel */ print("dlme_entry:\n"); - hexdump(ret.dlme_entry, 0x100); - print("dlme_arg:\n"); - hexdump(ret.dlme_arg, 0x280); + hexdump(dlme_entry, 0x100); + print("bl_context:\n"); + hexdump(_p(dl_info->bl_context.context), 0x280); print("skl_base:\n"); hexdump(_start, 0x100); print("bootloader_data:\n"); @@ -313,5 +300,5 @@ asm_return_t skl_main(void) print("skl_main() is about to exit\n"); - return ret; + return dlme_entry; }