From 9e4a5730d931aa4a0b53b8a9548fa2e1003a3ecf Mon Sep 17 00:00:00 2001 From: Quey-Liang Kao Date: Sun, 29 Mar 2020 10:24:22 +0800 Subject: [PATCH 1/3] cmd/internal/obj/riscv: mark unsafe points For async preemption, we will be using REGTMP as a temporary register in injected call on riscv64, which will clobber it. So mark all codes that use REGTMP. Change-Id: I4e32377427cb0f7360b3a91ed9e8b0224599a872 --- src/cmd/internal/obj/riscv/obj.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/cmd/internal/obj/riscv/obj.go b/src/cmd/internal/obj/riscv/obj.go index 73fe8c284f1653..ea16a522e1bbbc 100644 --- a/src/cmd/internal/obj/riscv/obj.go +++ b/src/cmd/internal/obj/riscv/obj.go @@ -1998,6 +1998,12 @@ func assemble(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { for p, i := cursym.P, 0; i < len(symcode); p, i = p[4:], i+1 { ctxt.Arch.ByteOrder.PutUint32(p, symcode[i]) } + + obj.MarkUnsafePoints(ctxt, cursym.Func.Text, newprog, isUnsafePoint) +} + +func isUnsafePoint(p *obj.Prog) bool { + return p.From.Reg == REG_TMP || p.To.Reg == REG_TMP || p.Reg == REG_TMP } var LinkRISCV64 = obj.LinkArch{ From f8eb7a6001edaf56b84a8231cf69a5feca00ff3c Mon Sep 17 00:00:00 2001 From: Quey-Liang Kao Date: Sun, 29 Mar 2020 12:01:48 +0800 Subject: [PATCH 2/3] cmd/internal/obj/riscv: refine spadj field The preprocess function calculates Spadj for each Prog purely based on the stacksize of the function, which ignores frameless calls. However, if some frameless assembly function moves sp, the offset will not be recorded correctly in pcln table, and thus it fails when executing anything involving backtrace. This is essential for async preempt support, because the injected call asyncPreempt performs context save/restore on the stack. It cannot survive a runtime.GC() without correct sp information. Change-Id: If5e3d9d57810b4fcdb538bda26848e088654ff80 --- src/cmd/internal/obj/riscv/obj.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/cmd/internal/obj/riscv/obj.go b/src/cmd/internal/obj/riscv/obj.go index ea16a522e1bbbc..6fcde2d67eb83f 100644 --- a/src/cmd/internal/obj/riscv/obj.go +++ b/src/cmd/internal/obj/riscv/obj.go @@ -745,6 +745,12 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { // count adjustments from earlier epilogues, since they // won't affect later PCs. p.Spadj = int32(stacksize) + + case AADDI: + // Refine Spadjs account for adjustment via ADDI instruction. + if p.To.Type == obj.TYPE_REG && p.To.Reg == REG_SP && p.From.Type == obj.TYPE_CONST { + p.Spadj = int32(-p.From.Offset) + } } } From f6110d470713be70d960fd3e984fea89bb371719 Mon Sep 17 00:00:00 2001 From: Quey-Liang Kao Date: Sat, 7 Mar 2020 09:15:53 +0800 Subject: [PATCH 3/3] runtime: add async preemption support on riscv64 This CL adds support of call injection and async preemption on riscv64. We also clobbered REG_TMP for the injected call. Unsafe points related to REG_TMP access have been marked in previous commits. Fixes #36711. Change-Id: I69d7f2ac5fde2279f62046725adbac69f98dd9ab --- src/runtime/mkpreempt.go | 29 +++++++- src/runtime/preempt_riscv64.s | 127 +++++++++++++++++++++++++++++++++- src/runtime/signal_riscv64.go | 14 +++- 3 files changed, 164 insertions(+), 6 deletions(-) diff --git a/src/runtime/mkpreempt.go b/src/runtime/mkpreempt.go index 35ed42871f11df..1fe77663b9ca56 100644 --- a/src/runtime/mkpreempt.go +++ b/src/runtime/mkpreempt.go @@ -502,8 +502,33 @@ func genPPC64() { } func genRISCV64() { - p("// No async preemption on riscv64 - see issue 36711") - p("UNDEF") + // X0 (zero), X1 (LR), X2 (SP), X4 (g), X31 (TMP) are special. + var l = layout{sp: "X2", stack: 8} + + // Add integer registers (X3, X5-X30). + for i := 3; i < 31; i++ { + if i == 4 { + continue + } + reg := fmt.Sprintf("X%d", i) + l.add("MOV", reg, 8) + } + + // Add floating point registers (F0-F31). + for i := 0; i <= 31; i++ { + reg := fmt.Sprintf("F%d", i) + l.add("MOVD", reg, 8) + } + + p("MOV X1, -%d(X2)", l.stack) + p("ADD $-%d, X2", l.stack) + l.save() + p("CALL ·asyncPreempt2(SB)") + l.restore() + p("MOV %d(X2), X1", l.stack) + p("MOV (X2), X31") + p("ADD $%d, X2", l.stack+8) + p("JMP (X31)") } func genS390X() { diff --git a/src/runtime/preempt_riscv64.s b/src/runtime/preempt_riscv64.s index 80c0636c7ae8c2..0338c22a941133 100644 --- a/src/runtime/preempt_riscv64.s +++ b/src/runtime/preempt_riscv64.s @@ -4,5 +4,128 @@ #include "textflag.h" TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0 - // No async preemption on riscv64 - see issue 36711 - UNDEF + MOV X1, -480(X2) + ADD $-480, X2 + MOV X3, 8(X2) + MOV X5, 16(X2) + MOV X6, 24(X2) + MOV X7, 32(X2) + MOV X8, 40(X2) + MOV X9, 48(X2) + MOV X10, 56(X2) + MOV X11, 64(X2) + MOV X12, 72(X2) + MOV X13, 80(X2) + MOV X14, 88(X2) + MOV X15, 96(X2) + MOV X16, 104(X2) + MOV X17, 112(X2) + MOV X18, 120(X2) + MOV X19, 128(X2) + MOV X20, 136(X2) + MOV X21, 144(X2) + MOV X22, 152(X2) + MOV X23, 160(X2) + MOV X24, 168(X2) + MOV X25, 176(X2) + MOV X26, 184(X2) + MOV X27, 192(X2) + MOV X28, 200(X2) + MOV X29, 208(X2) + MOV X30, 216(X2) + MOVD F0, 224(X2) + MOVD F1, 232(X2) + MOVD F2, 240(X2) + MOVD F3, 248(X2) + MOVD F4, 256(X2) + MOVD F5, 264(X2) + MOVD F6, 272(X2) + MOVD F7, 280(X2) + MOVD F8, 288(X2) + MOVD F9, 296(X2) + MOVD F10, 304(X2) + MOVD F11, 312(X2) + MOVD F12, 320(X2) + MOVD F13, 328(X2) + MOVD F14, 336(X2) + MOVD F15, 344(X2) + MOVD F16, 352(X2) + MOVD F17, 360(X2) + MOVD F18, 368(X2) + MOVD F19, 376(X2) + MOVD F20, 384(X2) + MOVD F21, 392(X2) + MOVD F22, 400(X2) + MOVD F23, 408(X2) + MOVD F24, 416(X2) + MOVD F25, 424(X2) + MOVD F26, 432(X2) + MOVD F27, 440(X2) + MOVD F28, 448(X2) + MOVD F29, 456(X2) + MOVD F30, 464(X2) + MOVD F31, 472(X2) + CALL ·asyncPreempt2(SB) + MOVD 472(X2), F31 + MOVD 464(X2), F30 + MOVD 456(X2), F29 + MOVD 448(X2), F28 + MOVD 440(X2), F27 + MOVD 432(X2), F26 + MOVD 424(X2), F25 + MOVD 416(X2), F24 + MOVD 408(X2), F23 + MOVD 400(X2), F22 + MOVD 392(X2), F21 + MOVD 384(X2), F20 + MOVD 376(X2), F19 + MOVD 368(X2), F18 + MOVD 360(X2), F17 + MOVD 352(X2), F16 + MOVD 344(X2), F15 + MOVD 336(X2), F14 + MOVD 328(X2), F13 + MOVD 320(X2), F12 + MOVD 312(X2), F11 + MOVD 304(X2), F10 + MOVD 296(X2), F9 + MOVD 288(X2), F8 + MOVD 280(X2), F7 + MOVD 272(X2), F6 + MOVD 264(X2), F5 + MOVD 256(X2), F4 + MOVD 248(X2), F3 + MOVD 240(X2), F2 + MOVD 232(X2), F1 + MOVD 224(X2), F0 + MOV 216(X2), X30 + MOV 208(X2), X29 + MOV 200(X2), X28 + MOV 192(X2), X27 + MOV 184(X2), X26 + MOV 176(X2), X25 + MOV 168(X2), X24 + MOV 160(X2), X23 + MOV 152(X2), X22 + MOV 144(X2), X21 + MOV 136(X2), X20 + MOV 128(X2), X19 + MOV 120(X2), X18 + MOV 112(X2), X17 + MOV 104(X2), X16 + MOV 96(X2), X15 + MOV 88(X2), X14 + MOV 80(X2), X13 + MOV 72(X2), X12 + MOV 64(X2), X11 + MOV 56(X2), X10 + MOV 48(X2), X9 + MOV 40(X2), X8 + MOV 32(X2), X7 + MOV 24(X2), X6 + MOV 16(X2), X5 + MOV 8(X2), X3 + MOV 480(X2), X1 + MOV (X2), X31 + ADD $488, X2 + JMP (X31) diff --git a/src/runtime/signal_riscv64.go b/src/runtime/signal_riscv64.go index cd0c3938869a40..7da69b287bd1a5 100644 --- a/src/runtime/signal_riscv64.go +++ b/src/runtime/signal_riscv64.go @@ -78,8 +78,18 @@ func (c *sigctxt) preparePanic(sig uint32, gp *g) { c.set_pc(uint64(funcPC(sigpanic))) } -const pushCallSupported = false +const pushCallSupported = true func (c *sigctxt) pushCall(targetPC uintptr) { - throw("unimplemented") + // Push the LR to stack, as we'll clobber it in order to + // push the call. The function being pushed is responsible + // for restoring the LR and setting the SP back. + // This extra slot is known to gentraceback. + sp := c.sp() - sys.PtrSize + c.set_sp(sp) + *(*uint64)(unsafe.Pointer(uintptr(sp))) = c.ra() + // Set up PC and LR to pretend the function being signaled + // calls targetPC at the faulting PC. + c.set_ra(c.pc()) + c.set_pc(uint64(targetPC)) }