Skip to content

Commit 2393fae

Browse files
targosBethGriggs
authored andcommitted
deps: V8: cherry-pick 2b77ca200c56
Original commit message: [wasm][liftoff] Always zero-extend 32 bit offsets The upper 32 bits of the 64 bit offset register are not guaranteed to be cleared, so a zero-extension is needed. We already do the zero-extension in the case of explicit bounds checking, but this should also be done if the trap handler is enabled. [email protected] [email protected] Bug: v8:11809 Change-Id: I21e2535c701041d11fa06c176fa683d82db0a3f1 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2917612 Commit-Queue: Thibaud Michaud <[email protected]> Reviewed-by: Clemens Backes <[email protected]> Cr-Commit-Position: refs/heads/master@{#74881} Refs: v8/v8@2b77ca2 PR-URL: #39337 Reviewed-By: Matteo Collina <[email protected]> Reviewed-By: James M Snell <[email protected]>
1 parent c8e7d80 commit 2393fae

File tree

8 files changed

+84
-13
lines changed

8 files changed

+84
-13
lines changed

common.gypi

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@
3636

3737
# Reset this number to 0 on major V8 upgrades.
3838
# Increment by one for each non-official patch applied to deps/v8.
39-
'v8_embedder_string': '-node.13',
39+
'v8_embedder_string': '-node.14',
4040

4141
##### V8 defaults for Node.js #####
4242

deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -768,7 +768,8 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
768768
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
769769
Register offset_reg, uint32_t offset_imm,
770770
LoadType type, LiftoffRegList pinned,
771-
uint32_t* protected_load_pc, bool is_load_mem) {
771+
uint32_t* protected_load_pc, bool is_load_mem,
772+
bool i64_offset) {
772773
// Offsets >=2GB are statically OOB on 32-bit systems.
773774
DCHECK_LE(offset_imm, std::numeric_limits<int32_t>::max());
774775
liftoff::LoadInternal(this, dst, src_addr, offset_reg,

deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h

Lines changed: 10 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -126,9 +126,13 @@ inline CPURegister AcquireByType(UseScratchRegisterScope* temps,
126126
template <typename T>
127127
inline MemOperand GetMemOp(LiftoffAssembler* assm,
128128
UseScratchRegisterScope* temps, Register addr,
129-
Register offset, T offset_imm) {
129+
Register offset, T offset_imm,
130+
bool i64_offset = false) {
130131
if (offset.is_valid()) {
131-
if (offset_imm == 0) return MemOperand(addr.X(), offset.X());
132+
if (offset_imm == 0) {
133+
return i64_offset ? MemOperand(addr.X(), offset.X())
134+
: MemOperand(addr.X(), offset.W(), UXTW);
135+
}
132136
Register tmp = temps->AcquireX();
133137
DCHECK_GE(kMaxUInt32, offset_imm);
134138
assm->Add(tmp, offset.X(), offset_imm);
@@ -493,10 +497,11 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
493497
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
494498
Register offset_reg, uintptr_t offset_imm,
495499
LoadType type, LiftoffRegList pinned,
496-
uint32_t* protected_load_pc, bool is_load_mem) {
500+
uint32_t* protected_load_pc, bool is_load_mem,
501+
bool i64_offset) {
497502
UseScratchRegisterScope temps(this);
498-
MemOperand src_op =
499-
liftoff::GetMemOp(this, &temps, src_addr, offset_reg, offset_imm);
503+
MemOperand src_op = liftoff::GetMemOp(this, &temps, src_addr, offset_reg,
504+
offset_imm, i64_offset);
500505
if (protected_load_pc) *protected_load_pc = pc_offset();
501506
switch (type.value()) {
502507
case LoadType::kI32Load8U:

deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -390,7 +390,8 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
390390
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
391391
Register offset_reg, uint32_t offset_imm,
392392
LoadType type, LiftoffRegList pinned,
393-
uint32_t* protected_load_pc, bool is_load_mem) {
393+
uint32_t* protected_load_pc, bool is_load_mem,
394+
bool i64_offset) {
394395
// Offsets >=2GB are statically OOB on 32-bit systems.
395396
DCHECK_LE(offset_imm, std::numeric_limits<int32_t>::max());
396397
DCHECK_EQ(type.value_type() == kWasmI64, dst.is_gp_pair());

deps/v8/src/wasm/baseline/liftoff-assembler.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -675,7 +675,7 @@ class LiftoffAssembler : public TurboAssembler {
675675
inline void Load(LiftoffRegister dst, Register src_addr, Register offset_reg,
676676
uintptr_t offset_imm, LoadType type, LiftoffRegList pinned,
677677
uint32_t* protected_load_pc = nullptr,
678-
bool is_load_mem = false);
678+
bool is_load_mem = false, bool i64_offset = false);
679679
inline void Store(Register dst_addr, Register offset_reg,
680680
uintptr_t offset_imm, LiftoffRegister src, StoreType type,
681681
LiftoffRegList pinned,

deps/v8/src/wasm/baseline/liftoff-compiler.cc

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2792,14 +2792,16 @@ class LiftoffCompiler {
27922792
// Only look at the slot, do not pop it yet (will happen in PopToRegister
27932793
// below, if this is not a statically-in-bounds index).
27942794
auto& index_slot = __ cache_state()->stack_state.back();
2795+
bool i64_offset = index_val.type == kWasmI64;
27952796
if (IndexStaticallyInBounds(index_slot, type.size(), &offset)) {
27962797
__ cache_state()->stack_state.pop_back();
27972798
DEBUG_CODE_COMMENT("load from memory (constant offset)");
27982799
LiftoffRegList pinned;
27992800
Register mem = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
28002801
LOAD_INSTANCE_FIELD(mem, MemoryStart, kSystemPointerSize, pinned);
28012802
LiftoffRegister value = pinned.set(__ GetUnusedRegister(rc, pinned));
2802-
__ Load(value, mem, no_reg, offset, type, pinned, nullptr, true);
2803+
__ Load(value, mem, no_reg, offset, type, pinned, nullptr, true,
2804+
i64_offset);
28032805
__ PushRegister(kind, value);
28042806
} else {
28052807
LiftoffRegister full_index = __ PopToRegister();
@@ -2818,8 +2820,8 @@ class LiftoffCompiler {
28182820
LiftoffRegister value = pinned.set(__ GetUnusedRegister(rc, pinned));
28192821

28202822
uint32_t protected_load_pc = 0;
2821-
__ Load(value, mem, index, offset, type, pinned, &protected_load_pc,
2822-
true);
2823+
__ Load(value, mem, index, offset, type, pinned, &protected_load_pc, true,
2824+
i64_offset);
28232825
if (env_->use_trap_handler) {
28242826
AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapMemOutOfBounds,
28252827
protected_load_pc);

deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -395,7 +395,11 @@ void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr,
395395
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
396396
Register offset_reg, uintptr_t offset_imm,
397397
LoadType type, LiftoffRegList pinned,
398-
uint32_t* protected_load_pc, bool is_load_mem) {
398+
uint32_t* protected_load_pc, bool is_load_mem,
399+
bool i64_offset) {
400+
if (offset_reg != no_reg && !i64_offset) {
401+
AssertZeroExtended(offset_reg);
402+
}
399403
Operand src_op = liftoff::GetMemOp(this, src_addr, offset_reg, offset_imm);
400404
if (protected_load_pc) *protected_load_pc = pc_offset();
401405
switch (type.value()) {
Lines changed: 58 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,58 @@
1+
// Copyright 2021 the V8 project authors. All rights reserved.
2+
// Use of this source code is governed by a BSD-style license that can be
3+
// found in the LICENSE file.
4+
//
5+
// Flags: --enable-testing-opcode-in-wasm --nowasm-tier-up --wasm-tier-mask-for-testing=2
6+
7+
load("test/mjsunit/wasm/wasm-module-builder.js");
8+
9+
var instance = (function () {
10+
var builder = new WasmModuleBuilder();
11+
builder.addMemory(1, 1, false /* exported */);
12+
13+
var sig_index = builder.addType(makeSig(
14+
[kWasmI32, kWasmI32, kWasmI32, kWasmI32, kWasmI32, kWasmI32, kWasmI32,
15+
kWasmI32],
16+
[kWasmI32]));
17+
var sig_three = builder.addType(makeSig(
18+
[kWasmI64, kWasmI64, kWasmI64, kWasmI64, kWasmI64, kWasmI64, kWasmI64,
19+
kWasmI64],
20+
[]));
21+
22+
var zero = builder.addFunction("zero", kSig_i_i);
23+
var one = builder.addFunction("one", sig_index);
24+
var two = builder.addFunction("two", kSig_v_i);
25+
var three = builder.addFunction("three", sig_three).addBody([]);
26+
27+
zero.addBody([kExprLocalGet, 0, kExprI32LoadMem, 0, 0]);
28+
29+
one.addBody([
30+
kExprLocalGet, 7,
31+
kExprCallFunction, zero.index]);
32+
33+
two.addBody([
34+
kExprI64Const, 0x81, 0x80, 0x80, 0x80, 0x10,
35+
kExprI64Const, 0x81, 0x80, 0x80, 0x80, 0x10,
36+
kExprI64Const, 0x81, 0x80, 0x80, 0x80, 0x10,
37+
kExprI64Const, 0x81, 0x80, 0x80, 0x80, 0x10,
38+
kExprI64Const, 0x81, 0x80, 0x80, 0x80, 0x10,
39+
kExprI64Const, 0x81, 0x80, 0x80, 0x80, 0x10,
40+
kExprI64Const, 0x81, 0x80, 0x80, 0x80, 0x10,
41+
kExprI64Const, 0x81, 0x80, 0x80, 0x80, 0x10,
42+
kExprCallFunction, three.index,
43+
kExprI32Const, 0,
44+
kExprI32Const, 0,
45+
kExprI32Const, 0,
46+
kExprI32Const, 0,
47+
kExprI32Const, 0,
48+
kExprI32Const, 0,
49+
kExprI32Const, 0,
50+
kExprI32Const, 0,
51+
kExprCallFunction, one.index,
52+
kExprDrop,
53+
]).exportFunc();
54+
55+
return builder.instantiate({});
56+
})();
57+
58+
instance.exports.two()

0 commit comments

Comments
 (0)