diff --git a/scripts/gen-s-parser.py b/scripts/gen-s-parser.py index 4a900ac0e51..3d493f1de77 100755 --- a/scripts/gen-s-parser.py +++ b/scripts/gen-s-parser.py @@ -44,44 +44,44 @@ ("memory.copy", "makeMemoryCopy(s)"), ("memory.fill", "makeMemoryFill(s)"), ("push", "makePush(s)"), - ("i32.pop", "makePop(i32)"), - ("i64.pop", "makePop(i64)"), - ("f32.pop", "makePop(f32)"), - ("f64.pop", "makePop(f64)"), - ("v128.pop", "makePop(v128)"), - ("funcref.pop", "makePop(funcref)"), - ("anyref.pop", "makePop(anyref)"), - ("nullref.pop", "makePop(nullref)"), - ("exnref.pop", "makePop(exnref)"), - ("i32.load", "makeLoad(s, i32, /*isAtomic=*/false)"), - ("i64.load", "makeLoad(s, i64, /*isAtomic=*/false)"), - ("f32.load", "makeLoad(s, f32, /*isAtomic=*/false)"), - ("f64.load", "makeLoad(s, f64, /*isAtomic=*/false)"), - ("i32.load8_s", "makeLoad(s, i32, /*isAtomic=*/false)"), - ("i32.load8_u", "makeLoad(s, i32, /*isAtomic=*/false)"), - ("i32.load16_s", "makeLoad(s, i32, /*isAtomic=*/false)"), - ("i32.load16_u", "makeLoad(s, i32, /*isAtomic=*/false)"), - ("i64.load8_s", "makeLoad(s, i64, /*isAtomic=*/false)"), - ("i64.load8_u", "makeLoad(s, i64, /*isAtomic=*/false)"), - ("i64.load16_s", "makeLoad(s, i64, /*isAtomic=*/false)"), - ("i64.load16_u", "makeLoad(s, i64, /*isAtomic=*/false)"), - ("i64.load32_s", "makeLoad(s, i64, /*isAtomic=*/false)"), - ("i64.load32_u", "makeLoad(s, i64, /*isAtomic=*/false)"), - ("i32.store", "makeStore(s, i32, /*isAtomic=*/false)"), - ("i64.store", "makeStore(s, i64, /*isAtomic=*/false)"), - ("f32.store", "makeStore(s, f32, /*isAtomic=*/false)"), - ("f64.store", "makeStore(s, f64, /*isAtomic=*/false)"), - ("i32.store8", "makeStore(s, i32, /*isAtomic=*/false)"), - ("i32.store16", "makeStore(s, i32, /*isAtomic=*/false)"), - ("i64.store8", "makeStore(s, i64, /*isAtomic=*/false)"), - ("i64.store16", "makeStore(s, i64, /*isAtomic=*/false)"), - ("i64.store32", "makeStore(s, i64, /*isAtomic=*/false)"), + ("i32.pop", "makePop(Type::i32)"), + ("i64.pop", "makePop(Type::i64)"), + ("f32.pop", "makePop(Type::f32)"), + ("f64.pop", "makePop(Type::f64)"), + ("v128.pop", "makePop(Type::v128)"), + ("funcref.pop", "makePop(Type::funcref)"), + ("anyref.pop", "makePop(Type::anyref)"), + ("nullref.pop", "makePop(Type::nullref)"), + ("exnref.pop", "makePop(Type::exnref)"), + ("i32.load", "makeLoad(s, Type::i32, /*isAtomic=*/false)"), + ("i64.load", "makeLoad(s, Type::i64, /*isAtomic=*/false)"), + ("f32.load", "makeLoad(s, Type::f32, /*isAtomic=*/false)"), + ("f64.load", "makeLoad(s, Type::f64, /*isAtomic=*/false)"), + ("i32.load8_s", "makeLoad(s, Type::i32, /*isAtomic=*/false)"), + ("i32.load8_u", "makeLoad(s, Type::i32, /*isAtomic=*/false)"), + ("i32.load16_s", "makeLoad(s, Type::i32, /*isAtomic=*/false)"), + ("i32.load16_u", "makeLoad(s, Type::i32, /*isAtomic=*/false)"), + ("i64.load8_s", "makeLoad(s, Type::i64, /*isAtomic=*/false)"), + ("i64.load8_u", "makeLoad(s, Type::i64, /*isAtomic=*/false)"), + ("i64.load16_s", "makeLoad(s, Type::i64, /*isAtomic=*/false)"), + ("i64.load16_u", "makeLoad(s, Type::i64, /*isAtomic=*/false)"), + ("i64.load32_s", "makeLoad(s, Type::i64, /*isAtomic=*/false)"), + ("i64.load32_u", "makeLoad(s, Type::i64, /*isAtomic=*/false)"), + ("i32.store", "makeStore(s, Type::i32, /*isAtomic=*/false)"), + ("i64.store", "makeStore(s, Type::i64, /*isAtomic=*/false)"), + ("f32.store", "makeStore(s, Type::f32, /*isAtomic=*/false)"), + ("f64.store", "makeStore(s, Type::f64, /*isAtomic=*/false)"), + ("i32.store8", "makeStore(s, Type::i32, /*isAtomic=*/false)"), + ("i32.store16", "makeStore(s, Type::i32, /*isAtomic=*/false)"), + ("i64.store8", "makeStore(s, Type::i64, /*isAtomic=*/false)"), + ("i64.store16", "makeStore(s, Type::i64, /*isAtomic=*/false)"), + ("i64.store32", "makeStore(s, Type::i64, /*isAtomic=*/false)"), ("memory.size", "makeHost(s, HostOp::MemorySize)"), ("memory.grow", "makeHost(s, HostOp::MemoryGrow)"), - ("i32.const", "makeConst(s, i32)"), - ("i64.const", "makeConst(s, i64)"), - ("f32.const", "makeConst(s, f32)"), - ("f64.const", "makeConst(s, f64)"), + ("i32.const", "makeConst(s, Type::i32)"), + ("i64.const", "makeConst(s, Type::i64)"), + ("f32.const", "makeConst(s, Type::f32)"), + ("f64.const", "makeConst(s, Type::f64)"), ("i32.eqz", "makeUnary(s, UnaryOp::EqZInt32)"), ("i32.eq", "makeBinary(s, BinaryOp::EqInt32)"), ("i32.ne", "makeBinary(s, BinaryOp::NeInt32)"), @@ -212,72 +212,72 @@ ("i64.extend32_s", "makeUnary(s, UnaryOp::ExtendS32Int64)"), # atomic instructions ("atomic.notify", "makeAtomicNotify(s)"), - ("i32.atomic.wait", "makeAtomicWait(s, i32)"), - ("i64.atomic.wait", "makeAtomicWait(s, i64)"), + ("i32.atomic.wait", "makeAtomicWait(s, Type::i32)"), + ("i64.atomic.wait", "makeAtomicWait(s, Type::i64)"), ("atomic.fence", "makeAtomicFence(s)"), - ("i32.atomic.load8_u", "makeLoad(s, i32, /*isAtomic=*/true)"), - ("i32.atomic.load16_u", "makeLoad(s, i32, /*isAtomic=*/true)"), - ("i32.atomic.load", "makeLoad(s, i32, /*isAtomic=*/true)"), - ("i64.atomic.load8_u", "makeLoad(s, i64, /*isAtomic=*/true)"), - ("i64.atomic.load16_u", "makeLoad(s, i64, /*isAtomic=*/true)"), - ("i64.atomic.load32_u", "makeLoad(s, i64, /*isAtomic=*/true)"), - ("i64.atomic.load", "makeLoad(s, i64, /*isAtomic=*/true)"), - ("i32.atomic.store8", "makeStore(s, i32, /*isAtomic=*/true)"), - ("i32.atomic.store16", "makeStore(s, i32, /*isAtomic=*/true)"), - ("i32.atomic.store", "makeStore(s, i32, /*isAtomic=*/true)"), - ("i64.atomic.store8", "makeStore(s, i64, /*isAtomic=*/true)"), - ("i64.atomic.store16", "makeStore(s, i64, /*isAtomic=*/true)"), - ("i64.atomic.store32", "makeStore(s, i64, /*isAtomic=*/true)"), - ("i64.atomic.store", "makeStore(s, i64, /*isAtomic=*/true)"), - ("i32.atomic.rmw8.add_u", "makeAtomicRMWOrCmpxchg(s, i32)"), - ("i32.atomic.rmw16.add_u", "makeAtomicRMWOrCmpxchg(s, i32)"), - ("i32.atomic.rmw.add", "makeAtomicRMWOrCmpxchg(s, i32)"), - ("i64.atomic.rmw8.add_u", "makeAtomicRMWOrCmpxchg(s, i64)"), - ("i64.atomic.rmw16.add_u", "makeAtomicRMWOrCmpxchg(s, i64)"), - ("i64.atomic.rmw32.add_u", "makeAtomicRMWOrCmpxchg(s, i64)"), - ("i64.atomic.rmw.add", "makeAtomicRMWOrCmpxchg(s, i64)"), - ("i32.atomic.rmw8.sub_u", "makeAtomicRMWOrCmpxchg(s, i32)"), - ("i32.atomic.rmw16.sub_u", "makeAtomicRMWOrCmpxchg(s, i32)"), - ("i32.atomic.rmw.sub", "makeAtomicRMWOrCmpxchg(s, i32)"), - ("i64.atomic.rmw8.sub_u", "makeAtomicRMWOrCmpxchg(s, i64)"), - ("i64.atomic.rmw16.sub_u", "makeAtomicRMWOrCmpxchg(s, i64)"), - ("i64.atomic.rmw32.sub_u", "makeAtomicRMWOrCmpxchg(s, i64)"), - ("i64.atomic.rmw.sub", "makeAtomicRMWOrCmpxchg(s, i64)"), - ("i32.atomic.rmw8.and_u", "makeAtomicRMWOrCmpxchg(s, i32)"), - ("i32.atomic.rmw16.and_u", "makeAtomicRMWOrCmpxchg(s, i32)"), - ("i32.atomic.rmw.and", "makeAtomicRMWOrCmpxchg(s, i32)"), - ("i64.atomic.rmw8.and_u", "makeAtomicRMWOrCmpxchg(s, i64)"), - ("i64.atomic.rmw16.and_u", "makeAtomicRMWOrCmpxchg(s, i64)"), - ("i64.atomic.rmw32.and_u", "makeAtomicRMWOrCmpxchg(s, i64)"), - ("i64.atomic.rmw.and", "makeAtomicRMWOrCmpxchg(s, i64)"), - ("i32.atomic.rmw8.or_u", "makeAtomicRMWOrCmpxchg(s, i32)"), - ("i32.atomic.rmw16.or_u", "makeAtomicRMWOrCmpxchg(s, i32)"), - ("i32.atomic.rmw.or", "makeAtomicRMWOrCmpxchg(s, i32)"), - ("i64.atomic.rmw8.or_u", "makeAtomicRMWOrCmpxchg(s, i64)"), - ("i64.atomic.rmw16.or_u", "makeAtomicRMWOrCmpxchg(s, i64)"), - ("i64.atomic.rmw32.or_u", "makeAtomicRMWOrCmpxchg(s, i64)"), - ("i64.atomic.rmw.or", "makeAtomicRMWOrCmpxchg(s, i64)"), - ("i32.atomic.rmw8.xor_u", "makeAtomicRMWOrCmpxchg(s, i32)"), - ("i32.atomic.rmw16.xor_u", "makeAtomicRMWOrCmpxchg(s, i32)"), - ("i32.atomic.rmw.xor", "makeAtomicRMWOrCmpxchg(s, i32)"), - ("i64.atomic.rmw8.xor_u", "makeAtomicRMWOrCmpxchg(s, i64)"), - ("i64.atomic.rmw16.xor_u", "makeAtomicRMWOrCmpxchg(s, i64)"), - ("i64.atomic.rmw32.xor_u", "makeAtomicRMWOrCmpxchg(s, i64)"), - ("i64.atomic.rmw.xor", "makeAtomicRMWOrCmpxchg(s, i64)"), - ("i32.atomic.rmw8.xchg_u", "makeAtomicRMWOrCmpxchg(s, i32)"), - ("i32.atomic.rmw16.xchg_u", "makeAtomicRMWOrCmpxchg(s, i32)"), - ("i32.atomic.rmw.xchg", "makeAtomicRMWOrCmpxchg(s, i32)"), - ("i64.atomic.rmw8.xchg_u", "makeAtomicRMWOrCmpxchg(s, i64)"), - ("i64.atomic.rmw16.xchg_u", "makeAtomicRMWOrCmpxchg(s, i64)"), - ("i64.atomic.rmw32.xchg_u", "makeAtomicRMWOrCmpxchg(s, i64)"), - ("i64.atomic.rmw.xchg", "makeAtomicRMWOrCmpxchg(s, i64)"), - ("i32.atomic.rmw8.cmpxchg_u", "makeAtomicRMWOrCmpxchg(s, i32)"), - ("i32.atomic.rmw16.cmpxchg_u", "makeAtomicRMWOrCmpxchg(s, i32)"), - ("i32.atomic.rmw.cmpxchg", "makeAtomicRMWOrCmpxchg(s, i32)"), - ("i64.atomic.rmw8.cmpxchg_u", "makeAtomicRMWOrCmpxchg(s, i64)"), - ("i64.atomic.rmw16.cmpxchg_u", "makeAtomicRMWOrCmpxchg(s, i64)"), - ("i64.atomic.rmw32.cmpxchg_u", "makeAtomicRMWOrCmpxchg(s, i64)"), - ("i64.atomic.rmw.cmpxchg", "makeAtomicRMWOrCmpxchg(s, i64)"), + ("i32.atomic.load8_u", "makeLoad(s, Type::i32, /*isAtomic=*/true)"), + ("i32.atomic.load16_u", "makeLoad(s, Type::i32, /*isAtomic=*/true)"), + ("i32.atomic.load", "makeLoad(s, Type::i32, /*isAtomic=*/true)"), + ("i64.atomic.load8_u", "makeLoad(s, Type::i64, /*isAtomic=*/true)"), + ("i64.atomic.load16_u", "makeLoad(s, Type::i64, /*isAtomic=*/true)"), + ("i64.atomic.load32_u", "makeLoad(s, Type::i64, /*isAtomic=*/true)"), + ("i64.atomic.load", "makeLoad(s, Type::i64, /*isAtomic=*/true)"), + ("i32.atomic.store8", "makeStore(s, Type::i32, /*isAtomic=*/true)"), + ("i32.atomic.store16", "makeStore(s, Type::i32, /*isAtomic=*/true)"), + ("i32.atomic.store", "makeStore(s, Type::i32, /*isAtomic=*/true)"), + ("i64.atomic.store8", "makeStore(s, Type::i64, /*isAtomic=*/true)"), + ("i64.atomic.store16", "makeStore(s, Type::i64, /*isAtomic=*/true)"), + ("i64.atomic.store32", "makeStore(s, Type::i64, /*isAtomic=*/true)"), + ("i64.atomic.store", "makeStore(s, Type::i64, /*isAtomic=*/true)"), + ("i32.atomic.rmw8.add_u", "makeAtomicRMWOrCmpxchg(s, Type::i32)"), + ("i32.atomic.rmw16.add_u", "makeAtomicRMWOrCmpxchg(s, Type::i32)"), + ("i32.atomic.rmw.add", "makeAtomicRMWOrCmpxchg(s, Type::i32)"), + ("i64.atomic.rmw8.add_u", "makeAtomicRMWOrCmpxchg(s, Type::i64)"), + ("i64.atomic.rmw16.add_u", "makeAtomicRMWOrCmpxchg(s, Type::i64)"), + ("i64.atomic.rmw32.add_u", "makeAtomicRMWOrCmpxchg(s, Type::i64)"), + ("i64.atomic.rmw.add", "makeAtomicRMWOrCmpxchg(s, Type::i64)"), + ("i32.atomic.rmw8.sub_u", "makeAtomicRMWOrCmpxchg(s, Type::i32)"), + ("i32.atomic.rmw16.sub_u", "makeAtomicRMWOrCmpxchg(s, Type::i32)"), + ("i32.atomic.rmw.sub", "makeAtomicRMWOrCmpxchg(s, Type::i32)"), + ("i64.atomic.rmw8.sub_u", "makeAtomicRMWOrCmpxchg(s, Type::i64)"), + ("i64.atomic.rmw16.sub_u", "makeAtomicRMWOrCmpxchg(s, Type::i64)"), + ("i64.atomic.rmw32.sub_u", "makeAtomicRMWOrCmpxchg(s, Type::i64)"), + ("i64.atomic.rmw.sub", "makeAtomicRMWOrCmpxchg(s, Type::i64)"), + ("i32.atomic.rmw8.and_u", "makeAtomicRMWOrCmpxchg(s, Type::i32)"), + ("i32.atomic.rmw16.and_u", "makeAtomicRMWOrCmpxchg(s, Type::i32)"), + ("i32.atomic.rmw.and", "makeAtomicRMWOrCmpxchg(s, Type::i32)"), + ("i64.atomic.rmw8.and_u", "makeAtomicRMWOrCmpxchg(s, Type::i64)"), + ("i64.atomic.rmw16.and_u", "makeAtomicRMWOrCmpxchg(s, Type::i64)"), + ("i64.atomic.rmw32.and_u", "makeAtomicRMWOrCmpxchg(s, Type::i64)"), + ("i64.atomic.rmw.and", "makeAtomicRMWOrCmpxchg(s, Type::i64)"), + ("i32.atomic.rmw8.or_u", "makeAtomicRMWOrCmpxchg(s, Type::i32)"), + ("i32.atomic.rmw16.or_u", "makeAtomicRMWOrCmpxchg(s, Type::i32)"), + ("i32.atomic.rmw.or", "makeAtomicRMWOrCmpxchg(s, Type::i32)"), + ("i64.atomic.rmw8.or_u", "makeAtomicRMWOrCmpxchg(s, Type::i64)"), + ("i64.atomic.rmw16.or_u", "makeAtomicRMWOrCmpxchg(s, Type::i64)"), + ("i64.atomic.rmw32.or_u", "makeAtomicRMWOrCmpxchg(s, Type::i64)"), + ("i64.atomic.rmw.or", "makeAtomicRMWOrCmpxchg(s, Type::i64)"), + ("i32.atomic.rmw8.xor_u", "makeAtomicRMWOrCmpxchg(s, Type::i32)"), + ("i32.atomic.rmw16.xor_u", "makeAtomicRMWOrCmpxchg(s, Type::i32)"), + ("i32.atomic.rmw.xor", "makeAtomicRMWOrCmpxchg(s, Type::i32)"), + ("i64.atomic.rmw8.xor_u", "makeAtomicRMWOrCmpxchg(s, Type::i64)"), + ("i64.atomic.rmw16.xor_u", "makeAtomicRMWOrCmpxchg(s, Type::i64)"), + ("i64.atomic.rmw32.xor_u", "makeAtomicRMWOrCmpxchg(s, Type::i64)"), + ("i64.atomic.rmw.xor", "makeAtomicRMWOrCmpxchg(s, Type::i64)"), + ("i32.atomic.rmw8.xchg_u", "makeAtomicRMWOrCmpxchg(s, Type::i32)"), + ("i32.atomic.rmw16.xchg_u", "makeAtomicRMWOrCmpxchg(s, Type::i32)"), + ("i32.atomic.rmw.xchg", "makeAtomicRMWOrCmpxchg(s, Type::i32)"), + ("i64.atomic.rmw8.xchg_u", "makeAtomicRMWOrCmpxchg(s, Type::i64)"), + ("i64.atomic.rmw16.xchg_u", "makeAtomicRMWOrCmpxchg(s, Type::i64)"), + ("i64.atomic.rmw32.xchg_u", "makeAtomicRMWOrCmpxchg(s, Type::i64)"), + ("i64.atomic.rmw.xchg", "makeAtomicRMWOrCmpxchg(s, Type::i64)"), + ("i32.atomic.rmw8.cmpxchg_u", "makeAtomicRMWOrCmpxchg(s, Type::i32)"), + ("i32.atomic.rmw16.cmpxchg_u", "makeAtomicRMWOrCmpxchg(s, Type::i32)"), + ("i32.atomic.rmw.cmpxchg", "makeAtomicRMWOrCmpxchg(s, Type::i32)"), + ("i64.atomic.rmw8.cmpxchg_u", "makeAtomicRMWOrCmpxchg(s, Type::i64)"), + ("i64.atomic.rmw16.cmpxchg_u", "makeAtomicRMWOrCmpxchg(s, Type::i64)"), + ("i64.atomic.rmw32.cmpxchg_u", "makeAtomicRMWOrCmpxchg(s, Type::i64)"), + ("i64.atomic.rmw.cmpxchg", "makeAtomicRMWOrCmpxchg(s, Type::i64)"), # nontrapping float-to-int instructions ("i32.trunc_sat_f32_s", "makeUnary(s, UnaryOp::TruncSatSFloat32ToInt32)"), ("i32.trunc_sat_f32_u", "makeUnary(s, UnaryOp::TruncSatUFloat32ToInt32)"), @@ -288,9 +288,9 @@ ("i64.trunc_sat_f64_s", "makeUnary(s, UnaryOp::TruncSatSFloat64ToInt64)"), ("i64.trunc_sat_f64_u", "makeUnary(s, UnaryOp::TruncSatUFloat64ToInt64)"), # SIMD ops - ("v128.load", "makeLoad(s, v128, /*isAtomic=*/false)"), - ("v128.store", "makeStore(s, v128, /*isAtomic=*/false)"), - ("v128.const", "makeConst(s, v128)"), + ("v128.load", "makeLoad(s, Type::v128, /*isAtomic=*/false)"), + ("v128.store", "makeStore(s, Type::v128, /*isAtomic=*/false)"), + ("v128.const", "makeConst(s, Type::v128)"), ("v8x16.shuffle", "makeSIMDShuffle(s)"), ("i8x16.splat", "makeUnary(s, UnaryOp::SplatVecI8x16)"), ("i8x16.extract_lane_s", "makeSIMDExtract(s, SIMDExtractOp::ExtractLaneSVecI8x16, 16)"), diff --git a/src/abi/js.h b/src/abi/js.h index 89e3f0087a1..de74899b6a8 100644 --- a/src/abi/js.h +++ b/src/abi/js.h @@ -67,14 +67,14 @@ ensureScratchMemoryHelpers(Module* wasm, wasm->addFunction(std::move(func)); }; - ensureImport(SCRATCH_LOAD_I32, {i32}, i32); - ensureImport(SCRATCH_STORE_I32, {i32, i32}, none); - ensureImport(SCRATCH_LOAD_I64, {}, i64); - ensureImport(SCRATCH_STORE_I64, {i64}, none); - ensureImport(SCRATCH_LOAD_F32, {}, f32); - ensureImport(SCRATCH_STORE_F32, {f32}, none); - ensureImport(SCRATCH_LOAD_F64, {}, f64); - ensureImport(SCRATCH_STORE_F64, {f64}, none); + ensureImport(SCRATCH_LOAD_I32, {Type::i32}, Type::i32); + ensureImport(SCRATCH_STORE_I32, {Type::i32, Type::i32}, Type::none); + ensureImport(SCRATCH_LOAD_I64, {}, Type::i64); + ensureImport(SCRATCH_STORE_I64, {Type::i64}, Type::none); + ensureImport(SCRATCH_LOAD_F32, {}, Type::f32); + ensureImport(SCRATCH_STORE_F32, {Type::f32}, Type::none); + ensureImport(SCRATCH_LOAD_F64, {}, Type::f64); + ensureImport(SCRATCH_STORE_F64, {Type::f64}, Type::none); } inline bool isScratchMemoryHelper(cashew::IString name) { diff --git a/src/abi/stack.h b/src/abi/stack.h index 265a7af6ec0..c06caa2d098 100644 --- a/src/abi/stack.h +++ b/src/abi/stack.h @@ -88,7 +88,7 @@ getStackSpace(Index local, Function* func, Index size, Module& wasm) { local, builder.makeGlobalGet(stackPointer->name, PointerType))); // TODO: add stack max check Expression* added; - if (PointerType == i32) { + if (PointerType == Type::i32) { added = builder.makeBinary(AddInt32, builder.makeLocalGet(local, PointerType), builder.makeConst(Literal(int32_t(size)))); @@ -104,7 +104,7 @@ getStackSpace(Index local, Function* func, Index size, Module& wasm) { FindAllPointers finder(func->body); for (auto** ptr : finder.list) { auto* ret = (*ptr)->cast(); - if (ret->value && ret->value->type != unreachable) { + if (ret->value && ret->value->type != Type::unreachable) { // handle the returned value auto* block = builder.makeBlock(); auto temp = builder.addVar(func, ret->value->type); @@ -120,10 +120,10 @@ getStackSpace(Index local, Function* func, Index size, Module& wasm) { } } // add stack restores to the body - if (func->body->type == none) { + if (func->body->type == Type::none) { block->list.push_back(func->body); block->list.push_back(makeStackRestore()); - } else if (func->body->type == unreachable) { + } else if (func->body->type == Type::unreachable) { block->list.push_back(func->body); // no need to restore the old stack value, we're gone anyhow } else { diff --git a/src/asm2wasm.h b/src/asm2wasm.h index b30a1f6daa8..1f1805379b0 100644 --- a/src/asm2wasm.h +++ b/src/asm2wasm.h @@ -430,7 +430,7 @@ class Asm2WasmBuilder { // zero bool import; IString module, base; - MappedGlobal() : type(none), import(false) {} + MappedGlobal() : type(Type::none), import(false) {} MappedGlobal(Type type) : type(type), import(false) {} MappedGlobal(Type type, bool import, IString module, IString base) : type(type), import(import), module(module), base(base) {} @@ -456,7 +456,7 @@ class Asm2WasmBuilder { private: void allocateGlobal(IString name, Type type, Literal value = Literal()) { assert(mappedGlobals.find(name) == mappedGlobals.end()); - if (value.type == none) { + if (value.type == Type::none) { value = Literal::makeZero(type); } mappedGlobals.emplace(name, MappedGlobal(type)); @@ -529,12 +529,8 @@ class Asm2WasmBuilder { // ok since in JS, double can contain everything i32 and f32 can). for (size_t i = 0; i < params.size(); i++) { if (mergedParams.size() > i) { - // TODO: Is this dead? - // if (mergedParams[i] == Type::none) { - // mergedParams[i] = params[i]; // use a more concrete type - // } else if (mergedParams[i] != params[i]) { - mergedParams[i] = f64; // overloaded type, make it a double + mergedParams[i] = Type::f64; // overloaded type, make it a double } } else { mergedParams.push_back(params[i]); // add a new param @@ -557,7 +553,7 @@ class Asm2WasmBuilder { } Type getResultTypeOfCallUsingParent(Ref parent, AsmData* data) { - auto result = none; + Type result = Type::none; if (!!parent) { // if the parent is a seq, we cannot be the last element in it (we would // have a coercion, which would be the parent), so we must be (us, @@ -642,18 +638,18 @@ class Asm2WasmBuilder { if (op == PLUS) { return isInteger ? BinaryOp::AddInt32 - : (leftType == f32 ? BinaryOp::AddFloat32 - : BinaryOp::AddFloat64); + : (leftType == Type::f32 ? BinaryOp::AddFloat32 + : BinaryOp::AddFloat64); } if (op == MINUS) { return isInteger ? BinaryOp::SubInt32 - : (leftType == f32 ? BinaryOp::SubFloat32 - : BinaryOp::SubFloat64); + : (leftType == Type::f32 ? BinaryOp::SubFloat32 + : BinaryOp::SubFloat64); } if (op == MUL) { return isInteger ? BinaryOp::MulInt32 - : (leftType == f32 ? BinaryOp::MulFloat32 - : BinaryOp::MulFloat64); + : (leftType == Type::f32 ? BinaryOp::MulFloat32 + : BinaryOp::MulFloat64); } if (op == AND) { return BinaryOp::AndInt32; @@ -674,14 +670,14 @@ class Asm2WasmBuilder { return BinaryOp::ShrUInt32; } if (op == EQ) { - return isInteger - ? BinaryOp::EqInt32 - : (leftType == f32 ? BinaryOp::EqFloat32 : BinaryOp::EqFloat64); + return isInteger ? BinaryOp::EqInt32 + : (leftType == Type::f32 ? BinaryOp::EqFloat32 + : BinaryOp::EqFloat64); } if (op == NE) { - return isInteger - ? BinaryOp::NeInt32 - : (leftType == f32 ? BinaryOp::NeFloat32 : BinaryOp::NeFloat64); + return isInteger ? BinaryOp::NeInt32 + : (leftType == Type::f32 ? BinaryOp::NeFloat32 + : BinaryOp::NeFloat64); } bool isUnsigned = isUnsignedCoercion(left) || isUnsignedCoercion(right); @@ -690,7 +686,8 @@ class Asm2WasmBuilder { if (isInteger) { return isUnsigned ? BinaryOp::DivUInt32 : BinaryOp::DivSInt32; } - return leftType == f32 ? BinaryOp::DivFloat32 : BinaryOp::DivFloat64; + return leftType == Type::f32 ? BinaryOp::DivFloat32 + : BinaryOp::DivFloat64; } if (op == MOD) { if (isInteger) { @@ -703,25 +700,25 @@ class Asm2WasmBuilder { if (isInteger) { return isUnsigned ? BinaryOp::GeUInt32 : BinaryOp::GeSInt32; } - return leftType == f32 ? BinaryOp::GeFloat32 : BinaryOp::GeFloat64; + return leftType == Type::f32 ? BinaryOp::GeFloat32 : BinaryOp::GeFloat64; } if (op == GT) { if (isInteger) { return isUnsigned ? BinaryOp::GtUInt32 : BinaryOp::GtSInt32; } - return leftType == f32 ? BinaryOp::GtFloat32 : BinaryOp::GtFloat64; + return leftType == Type::f32 ? BinaryOp::GtFloat32 : BinaryOp::GtFloat64; } if (op == LE) { if (isInteger) { return isUnsigned ? BinaryOp::LeUInt32 : BinaryOp::LeSInt32; } - return leftType == f32 ? BinaryOp::LeFloat32 : BinaryOp::LeFloat64; + return leftType == Type::f32 ? BinaryOp::LeFloat32 : BinaryOp::LeFloat64; } if (op == LT) { if (isInteger) { return isUnsigned ? BinaryOp::LtUInt32 : BinaryOp::LtSInt32; } - return leftType == f32 ? BinaryOp::LtFloat32 : BinaryOp::LtFloat64; + return leftType == Type::f32 ? BinaryOp::LtFloat32 : BinaryOp::LtFloat64; } abort_on("bad wasm binary op", op); abort(); // avoid warning @@ -785,7 +782,7 @@ class Asm2WasmBuilder { Literal getLiteral(Ref ast) { Literal ret = checkLiteral(ast); - assert(ret.type != none); + assert(ret.type != Type::none); return ret; } @@ -805,15 +802,15 @@ class Asm2WasmBuilder { if (base == ABS) { assert(operands && operands->size() == 1); Type type = (*operands)[0]->type; - if (type == i32) { + if (type == Type::i32) { sig = Signature(Type::i32, Type::i32); return true; } - if (type == f32) { + if (type == Type::f32) { sig = Signature(Type::f32, Type::f32); return true; } - if (type == f64) { + if (type == Type::f64) { sig = Signature(Type::f64, Type::f64); return true; } @@ -838,7 +835,7 @@ class Asm2WasmBuilder { } Expression* truncateToInt32(Expression* value) { - if (value->type == i64) { + if (value->type == Type::i64) { return builder.makeUnary(UnaryOp::WrapInt64, value); } // either i32, or a call_import whose type we don't know yet (but would be @@ -895,7 +892,7 @@ void Asm2WasmBuilder::processAsm(Ref ast) { import->name = MEMORY_BASE; import->module = "env"; import->base = MEMORY_BASE; - import->type = i32; + import->type = Type::i32; wasm.addGlobal(import); } @@ -905,7 +902,7 @@ void Asm2WasmBuilder::processAsm(Ref ast) { import->name = TABLE_BASE; import->module = "env"; import->base = TABLE_BASE; - import->type = i32; + import->type = Type::i32; wasm.addGlobal(import); } @@ -1277,7 +1274,7 @@ void Asm2WasmBuilder::processAsm(Ref ast) { // when function pointer casts are emulated. if (wasm.table.segments.size() == 0) { wasm.table.segments.emplace_back( - builder.makeGlobalGet(Name(TABLE_BASE), i32)); + builder.makeGlobalGet(Name(TABLE_BASE), Type::i32)); } auto& segment = wasm.table.segments[0]; functionTableStarts[name] = @@ -1332,7 +1329,7 @@ void Asm2WasmBuilder::processAsm(Ref ast) { auto value = pair[1]->getInteger(); auto* global = builder.makeGlobal(key, - i32, + Type::i32, builder.makeConst(Literal(int32_t(value))), Builder::Immutable); wasm.addGlobal(global); @@ -1512,11 +1509,11 @@ void Asm2WasmBuilder::processAsm(Ref ast) { curr->operands[i]->type == Type::unreachable); // overloaded, upgrade to f64 switch (curr->operands[i]->type) { - case i32: + case Type::i32: curr->operands[i] = parent->builder.makeUnary( ConvertSInt32ToFloat64, curr->operands[i]); break; - case f32: + case Type::f32: curr->operands[i] = parent->builder.makeUnary(PromoteFloat32, curr->operands[i]); break; @@ -1533,18 +1530,18 @@ void Asm2WasmBuilder::processAsm(Ref ast) { // we use a JS f64 value which is the most general, and convert to // it switch (old) { - case i32: { + case Type::i32: { Unary* trunc = parent->builder.makeUnary(TruncSFloat64ToInt32, curr); replaceCurrent( makeTrappingUnary(trunc, parent->trappingFunctions)); break; } - case f32: { + case Type::f32: { replaceCurrent(parent->builder.makeUnary(DemoteFloat64, curr)); break; } - case none: { + case Type::none: { // this function returns a value, but we are not using it, so it // must be dropped. autodrop will do that for us. break; @@ -1553,7 +1550,7 @@ void Asm2WasmBuilder::processAsm(Ref ast) { WASM_UNREACHABLE("unexpected type"); } } else { - assert(old == none); + assert(old == Type::none); // we don't want a return value here, but the import does provide // one autodrop will do that for us. } @@ -1651,8 +1648,8 @@ void Asm2WasmBuilder::processAsm(Ref ast) { i > 0 && (expressionStack[i - 1]->is() || expressionStack[i - 1]->is() || expressionStack[i - 1]->is()); - if (i == 0 || parentIsStructure || exp->type == none || - exp->type == unreachable) { + if (i == 0 || parentIsStructure || exp->type == Type::none || + exp->type == Type::unreachable) { if (debugLocations.count(exp) > 0) { // already present, so look back up i++; @@ -1746,33 +1743,34 @@ void Asm2WasmBuilder::processAsm(Ref ast) { // returns x / y auto* func = wasm.getFunction(udivmoddi4); Builder::clearLocals(func); - Index xl = Builder::addParam(func, "xl", i32), - xh = Builder::addParam(func, "xh", i32), - yl = Builder::addParam(func, "yl", i32), - yh = Builder::addParam(func, "yh", i32), - r = Builder::addParam(func, "r", i32), - x64 = Builder::addVar(func, "x64", i64), - y64 = Builder::addVar(func, "y64", i64); + Index xl = Builder::addParam(func, "xl", Type::i32), + xh = Builder::addParam(func, "xh", Type::i32), + yl = Builder::addParam(func, "yl", Type::i32), + yh = Builder::addParam(func, "yh", Type::i32), + r = Builder::addParam(func, "r", Type::i32), + x64 = Builder::addVar(func, "x64", Type::i64), + y64 = Builder::addVar(func, "y64", Type::i64); auto* body = allocator.alloc(); body->list.push_back( builder.makeLocalSet(x64, I64Utilities::recreateI64(builder, xl, xh))); body->list.push_back( builder.makeLocalSet(y64, I64Utilities::recreateI64(builder, yl, yh))); - body->list.push_back(builder.makeIf( - builder.makeLocalGet(r, i32), - builder.makeStore(8, - 0, - 8, - builder.makeLocalGet(r, i32), - builder.makeBinary(RemUInt64, - builder.makeLocalGet(x64, i64), - builder.makeLocalGet(y64, i64)), - i64))); body->list.push_back( - builder.makeLocalSet(x64, - builder.makeBinary(DivUInt64, - builder.makeLocalGet(x64, i64), - builder.makeLocalGet(y64, i64)))); + builder.makeIf(builder.makeLocalGet(r, Type::i32), + builder.makeStore( + 8, + 0, + 8, + builder.makeLocalGet(r, Type::i32), + builder.makeBinary(RemUInt64, + builder.makeLocalGet(x64, Type::i64), + builder.makeLocalGet(y64, Type::i64)), + Type::i64))); + body->list.push_back(builder.makeLocalSet( + x64, + builder.makeBinary(DivUInt64, + builder.makeLocalGet(x64, Type::i64), + builder.makeLocalGet(y64, Type::i64)))); body->list.push_back( builder.makeGlobalSet(tempRet0, I64Utilities::getI64High(builder, x64))); body->list.push_back(I64Utilities::getI64Low(builder, x64)); @@ -1841,7 +1839,7 @@ Function* Asm2WasmBuilder::processFunction(Ref ast) { return; } addedI32Temp = true; - Builder::addVar(function, I32_TEMP, i32); + Builder::addVar(function, I32_TEMP, Type::i32); functionVariables.insert(I32_TEMP); asmData.addVar(I32_TEMP, ASM_INT); }; @@ -1867,7 +1865,7 @@ Function* Asm2WasmBuilder::processFunction(Ref ast) { if (name == DEBUGGER) { Call* call = allocator.alloc(); call->target = DEBUGGER; - call->type = none; + call->type = Type::none; static bool addedImport = false; if (!addedImport) { addedImport = true; @@ -1945,13 +1943,14 @@ Function* Asm2WasmBuilder::processFunction(Ref ast) { if (ret->valueType != ret->value->type) { // in asm.js we have some implicit coercions that we must do explicitly // here - if (ret->valueType == f32 && ret->value->type == f64) { + if (ret->valueType == Type::f32 && ret->value->type == Type::f64) { auto conv = allocator.alloc(); conv->op = DemoteFloat64; conv->value = ret->value; conv->type = Type::f32; ret->value = conv; - } else if (ret->valueType == f64 && ret->value->type == f32) { + } else if (ret->valueType == Type::f64 && + ret->value->type == Type::f32) { ret->value = ensureDouble(ret->value); } else { abort_on("bad sub[] types", ast); @@ -1963,9 +1962,9 @@ Function* Asm2WasmBuilder::processFunction(Ref ast) { if (what == BINARY) { if ((ast[1] == OR || ast[1] == TRSHIFT) && ast[3]->isNumber() && ast[3]->getNumber() == 0) { - auto ret = - process(ast[2]); // just look through the ()|0 or ()>>>0 coercion - fixCallType(ret, i32); + // just look through the ()|0 or ()>>>0 coercion + auto ret = process(ast[2]); + fixCallType(ret, Type::i32); return ret; } auto ret = allocator.alloc(); @@ -1981,7 +1980,7 @@ Function* Asm2WasmBuilder::processFunction(Ref ast) { call->target = F64_REM; call->operands.push_back(ensureDouble(ret->left)); call->operands.push_back(ensureDouble(ret->right)); - call->type = f64; + call->type = Type::f64; static bool addedImport = false; if (!addedImport) { addedImport = true; @@ -2013,22 +2012,22 @@ Function* Asm2WasmBuilder::processFunction(Ref ast) { } else if (what == UNARY_PREFIX) { if (ast[1] == PLUS) { Literal literal = checkLiteral(ast); - if (literal.type != none) { + if (literal.type != Type::none) { return builder.makeConst(literal); } auto ret = process(ast[2]); // we are a +() coercion - if (ret->type == i32) { + if (ret->type == Type::i32) { auto conv = allocator.alloc(); conv->op = isUnsignedCoercion(ast[2]) ? ConvertUInt32ToFloat64 : ConvertSInt32ToFloat64; conv->value = ret; - conv->type = Type::f64; + conv->type = Type::Type::f64; return conv; } - if (ret->type == f32) { + if (ret->type == Type::f32) { return ensureDouble(ret); } - fixCallType(ret, f64); + fixCallType(ret, Type::f64); return ret; } else if (ast[1] == MINUS) { if (ast[2]->isNumber() || @@ -2067,7 +2066,7 @@ Function* Asm2WasmBuilder::processFunction(Ref ast) { // if we have an unsigned coercion on us, it is an unsigned op Expression* expr = process(ast[2][2]); bool isSigned = !isParentUnsignedCoercion(astStackHelper.getParent()); - bool isF64 = expr->type == f64; + bool isF64 = expr->type == Type::f64; UnaryOp op; if (isSigned && isF64) { op = UnaryOp::TruncSFloat64ToInt32; @@ -2092,7 +2091,7 @@ Function* Asm2WasmBuilder::processFunction(Ref ast) { auto ret = allocator.alloc(); ret->op = EqZInt32; ret->value = process(ast[2]); - ret->type = i32; + ret->type = Type::i32; return ret; } abort_on("bad unary", ast); @@ -2125,34 +2124,34 @@ Function* Asm2WasmBuilder::processFunction(Ref ast) { if (name == Math_fround) { assert(ast[2]->size() == 1); Literal lit = checkLiteral(ast[2][0], false /* raw is float */); - if (lit.type == f64) { + if (lit.type == Type::f64) { return builder.makeConst(Literal((float)lit.getf64())); } auto ret = allocator.alloc(); ret->value = process(ast[2][0]); - if (ret->value->type == f64) { + if (ret->value->type == Type::f64) { ret->op = DemoteFloat64; - } else if (ret->value->type == i32) { + } else if (ret->value->type == Type::i32) { if (isUnsignedCoercion(ast[2][0])) { ret->op = ConvertUInt32ToFloat32; } else { ret->op = ConvertSInt32ToFloat32; } - } else if (ret->value->type == f32) { + } else if (ret->value->type == Type::f32) { return ret->value; - } else if (ret->value->type == none) { // call, etc. - ret->value->type = f32; + } else if (ret->value->type == Type::none) { // call, etc. + ret->value->type = Type::f32; return ret->value; } else { abort_on("confusing fround target", ast[2][0]); } - ret->type = f32; + ret->type = Type::f32; return ret; } if (name == Math_abs) { // overloaded on type: i32, f32 or f64 Expression* value = process(ast[2][0]); - if (value->type == i32) { + if (value->type == Type::i32) { // No wasm support, so use a temp local ensureI32Temp(); auto set = allocator.alloc(); @@ -2163,7 +2162,7 @@ Function* Asm2WasmBuilder::processFunction(Ref ast) { auto get = [&]() { auto ret = allocator.alloc(); ret->index = function->getLocalIndex(I32_TEMP); - ret->type = i32; + ret->type = Type::i32; return ret; }; auto isNegative = allocator.alloc(); @@ -2177,18 +2176,18 @@ Function* Asm2WasmBuilder::processFunction(Ref ast) { flip->op = SubInt32; flip->left = builder.makeConst(Literal(0)); flip->right = get(); - flip->type = i32; + flip->type = Type::i32; auto select = allocator.alloc