Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions lib/Backend/GlobOpt.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -15637,6 +15637,7 @@ GlobOpt::PreLowerCanonicalize(IR::Instr *instr, Value **pSrc1Val, Value **pSrc2V
case Js::OpCode::TrapIfMinIntOverNegOne:
case Js::OpCode::TrapIfTruncOverflow:
case Js::OpCode::TrapIfZero:
case Js::OpCode::TrapIfUnalignedAccess:
case Js::OpCode::FromVar:
case Js::OpCode::Conv_Prim:
case Js::OpCode::LdC_A_I4:
Expand Down
22 changes: 22 additions & 0 deletions lib/Backend/GlobOptExpr.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -435,6 +435,28 @@ GlobOpt::OptimizeChecks(IR::Instr * const instr)
}
break;
}
case Js::OpCode::TrapIfUnalignedAccess:
if (src1 && src1->IsImmediateOpnd())
{
int64 val = src1->GetImmediateValue(func);
Assert(src2->IsImmediateOpnd());
uint32 cmpValue = (uint32)src2->GetImmediateValue(func);
uint32 mask = src2->GetSize() - 1;
Assert((cmpValue & ~mask) == 0);

if (((uint32)val & mask) == cmpValue)
{
instr->FreeSrc2();
instr->m_opcode = Js::OpCode::Ld_I4;
}
else
{
TransformIntoUnreachable(WASMERR_UnalignedAtomicAccess, instr);
InsertByteCodeUses(instr);
RemoveCodeAfterNoFallthroughInstr(instr); //remove dead code
}
}
break;
default:
return;
}
Expand Down
122 changes: 37 additions & 85 deletions lib/Backend/IRBuilderAsmJs.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1420,64 +1420,20 @@ IRBuilderAsmJs::BuildStartCall(Js::OpCodeAsmJs newOpcode, uint32 offset)
void
IRBuilderAsmJs::InitializeMemAccessTypeInfo(Js::ArrayBufferView::ViewType viewType, _Out_ MemAccessTypeInfo * typeInfo)
{
typeInfo->type = TyInt32;
typeInfo->valueRegType = WAsmJs::INT32;

AssertOrFailFast(typeInfo);

switch (viewType)
{
case Js::ArrayBufferView::TYPE_INT8_TO_INT64:
typeInfo->valueRegType = WAsmJs::INT64;
case Js::ArrayBufferView::TYPE_INT8:
typeInfo->arrayType = ValueType::GetObject(ObjectType::Int8Array);
typeInfo->type = TyInt8;
break;
case Js::ArrayBufferView::TYPE_UINT8_TO_INT64:
typeInfo->valueRegType = WAsmJs::INT64;
case Js::ArrayBufferView::TYPE_UINT8:
typeInfo->arrayType = ValueType::GetObject(ObjectType::Uint8Array);
typeInfo->type = TyUint8;
break;
case Js::ArrayBufferView::TYPE_INT16_TO_INT64:
typeInfo->valueRegType = WAsmJs::INT64;
case Js::ArrayBufferView::TYPE_INT16:
typeInfo->arrayType = ValueType::GetObject(ObjectType::Int16Array);
typeInfo->type = TyInt16;
break;
case Js::ArrayBufferView::TYPE_UINT16_TO_INT64:
typeInfo->valueRegType = WAsmJs::INT64;
case Js::ArrayBufferView::TYPE_UINT16:
typeInfo->arrayType = ValueType::GetObject(ObjectType::Uint16Array);
typeInfo->type = TyUint16;
break;
case Js::ArrayBufferView::TYPE_INT32_TO_INT64:
typeInfo->valueRegType = WAsmJs::INT64;
case Js::ArrayBufferView::TYPE_INT32:
typeInfo->arrayType = ValueType::GetObject(ObjectType::Int32Array);
typeInfo->type = TyInt32;
break;
case Js::ArrayBufferView::TYPE_UINT32_TO_INT64:
typeInfo->valueRegType = WAsmJs::INT64;
case Js::ArrayBufferView::TYPE_UINT32:
typeInfo->arrayType = ValueType::GetObject(ObjectType::Uint32Array);
typeInfo->type = TyUint32;
break;
case Js::ArrayBufferView::TYPE_FLOAT32:
typeInfo->valueRegType = WAsmJs::FLOAT32;
typeInfo->arrayType = ValueType::GetObject(ObjectType::Float32Array);
typeInfo->type = TyFloat32;
break;
case Js::ArrayBufferView::TYPE_FLOAT64:
typeInfo->valueRegType = WAsmJs::FLOAT64;
typeInfo->arrayType = ValueType::GetObject(ObjectType::Float64Array);
typeInfo->type = TyFloat64;
break;
case Js::ArrayBufferView::TYPE_INT64:
typeInfo->valueRegType = WAsmJs::INT64;
typeInfo->arrayType = ValueType::GetObject(ObjectType::Int64Array);
typeInfo->type = TyInt64;
#define ARRAYBUFFER_VIEW(name, align, RegType, MemType, irSuffix) \
case Js::ArrayBufferView::TYPE_##name: \
typeInfo->valueRegType = WAsmJs::FromPrimitiveType<RegType>(); \
typeInfo->type = Ty##irSuffix;\
typeInfo->arrayType = ValueType::GetObject(ObjectType::##irSuffix##Array); \
Assert(TySize[Ty##irSuffix] == (1<<align)); \
break;
#include "Language/AsmJsArrayBufferViews.h"
default:
Assume(UNREACHED);
AssertOrFailFast(UNREACHED);
}
}

Expand All @@ -1493,11 +1449,15 @@ IRBuilderAsmJs::BuildWasmMemAccess(Js::OpCodeAsmJs newOpcode, uint32 offset)
void
IRBuilderAsmJs::BuildWasmMemAccess(Js::OpCodeAsmJs newOpcode, uint32 offset, uint32 slotIndex, Js::RegSlot value, uint32 constOffset, Js::ArrayBufferView::ViewType viewType)
{
bool isLd = newOpcode == Js::OpCodeAsmJs::LdArrWasm;
Js::OpCode op = isLd ? Js::OpCode::LdArrViewElemWasm : Js::OpCode::StArrViewElem;
bool isAtomic = newOpcode == Js::OpCodeAsmJs::StArrAtomic || newOpcode == Js::OpCodeAsmJs::LdArrAtomic;
bool isLd = newOpcode == Js::OpCodeAsmJs::LdArrWasm || newOpcode == Js::OpCodeAsmJs::LdArrAtomic;
Js::OpCode op = isAtomic ?
isLd ? Js::OpCode::LdAtomicWasm : Js::OpCode::StAtomicWasm
: isLd ? Js::OpCode::LdArrViewElemWasm : Js::OpCode::StArrViewElem;

MemAccessTypeInfo typeInfo;
InitializeMemAccessTypeInfo(viewType, &typeInfo);
const uint32 memAccessSize = TySize[typeInfo.type];

Js::RegSlot valueRegSlot = GetRegSlotFromTypedReg(value, typeInfo.valueRegType);
IR::Instr * instr = nullptr;
Expand All @@ -1506,6 +1466,22 @@ IRBuilderAsmJs::BuildWasmMemAccess(Js::OpCodeAsmJs newOpcode, uint32 offset, uin

Js::RegSlot indexRegSlot = GetRegSlotFromIntReg(slotIndex);
IR::RegOpnd * indexOpnd = BuildSrcOpnd(indexRegSlot, TyUint32);
if (isAtomic && memAccessSize > 1)
{
const uint32 mask = memAccessSize - 1;
// We need (constOffset + index) & mask == 0
// Since we know constOffset ahead of time
// what we need to check is index & mask == (memAccessSize - (constOffset & mask)) & mask
const uint32 offseted = constOffset & mask;
// In this IntContOpnd, the value is what the index&mask should be, the type carries the size of the access
IR::Opnd* offsetedOpnd = IR::IntConstOpnd::NewFromType((memAccessSize - offseted) & mask, typeInfo.type, m_func);
IR::RegOpnd* intermediateIndex = IR::RegOpnd::New(TyUint32, m_func);
instr = IR::Instr::New(Js::OpCode::TrapIfUnalignedAccess, intermediateIndex, indexOpnd, offsetedOpnd, m_func);
AddInstr(instr, offset);

// Create dependency between load/store and trap through the index
indexOpnd = intermediateIndex;
}
indirOpnd = IR::IndirOpnd::New(BuildSrcOpnd(AsmJsRegSlots::BufferReg, TyVar), constOffset, typeInfo.type, m_func);
indirOpnd->SetIndexOpnd(indexOpnd);
indirOpnd->GetBaseOpnd()->SetValueType(typeInfo.arrayType);
Expand Down Expand Up @@ -6877,36 +6853,12 @@ IRBuilderAsmJs::BuildAsmSimdTypedArr(Js::OpCodeAsmJs newOpcode, uint32 offset, u

switch (viewType)
{
case Js::ArrayBufferView::TYPE_INT8:
arrayType = ValueType::GetObject(ObjectType::Int8Array);
break;
case Js::ArrayBufferView::TYPE_UINT8:
arrayType = ValueType::GetObject(ObjectType::Uint8Array);
break;
case Js::ArrayBufferView::TYPE_INT16:
arrayType = ValueType::GetObject(ObjectType::Int16Array);
mask = (uint32)~1;
break;
case Js::ArrayBufferView::TYPE_UINT16:
arrayType = ValueType::GetObject(ObjectType::Uint16Array);
mask = (uint32)~1;
break;
case Js::ArrayBufferView::TYPE_INT32:
arrayType = ValueType::GetObject(ObjectType::Int32Array);
mask = (uint32)~3;
break;
case Js::ArrayBufferView::TYPE_UINT32:
arrayType = ValueType::GetObject(ObjectType::Uint32Array);
mask = (uint32)~3;
break;
case Js::ArrayBufferView::TYPE_FLOAT32:
arrayType = ValueType::GetObject(ObjectType::Float32Array);
mask = (uint32)~3;
break;
case Js::ArrayBufferView::TYPE_FLOAT64:
arrayType = ValueType::GetObject(ObjectType::Float64Array);
mask = (uint32)~7;
#define ARRAYBUFFER_VIEW(name, align, RegType, MemType, irSuffix) \
case Js::ArrayBufferView::TYPE_##name: \
mask = ARRAYBUFFER_VIEW_MASK(align); \
arrayType = ValueType::GetObject(ObjectType::##irSuffix##Array); \
break;
#include "Language/AsmJsArrayBufferViews.h"
default:
Assert(UNREACHED);
}
Expand Down
6 changes: 6 additions & 0 deletions lib/Backend/JnHelperMethod.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -176,6 +176,12 @@ DECLSPEC_GUARDIGNORE _NOINLINE intptr_t GetNonTableMethodAddress(ThreadContextI

case HelperDirectMath_Tan:
return ShiftAddr(context, (double(*)(double))__libm_sse2_tan);

case HelperAtomicStore64:
return ShiftAddr(context, (double(*)(double))InterlockedExchange64);

case HelperMemoryBarrier:
return ShiftAddr(context, (void(*)())MemoryBarrier);
#endif

case HelperDirectMath_FloorDb:
Expand Down
2 changes: 2 additions & 0 deletions lib/Backend/JnHelperMethodList.h
Original file line number Diff line number Diff line change
Expand Up @@ -582,6 +582,8 @@ HELPERCALL(DirectMath_Int64Rol , (int64(*)(int64,int64)) Wasm::WasmMath::Rol<int
HELPERCALL(DirectMath_Int64Ror , (int64(*)(int64,int64)) Wasm::WasmMath::Ror<int64>, 0)
HELPERCALL(DirectMath_Int64Clz , (int64(*)(int64)) Wasm::WasmMath::Clz<int64>, 0)
HELPERCALL(DirectMath_Int64Ctz , (int64(*)(int64)) Wasm::WasmMath::Ctz<int64>, 0)
HELPERCALL(AtomicStore64, nullptr, 0)
HELPERCALL(MemoryBarrier, nullptr, 0)
#elif defined(_M_X64)
// AMD64 regular CRT calls -- on AMD64 calling convention is already what we want -- args in XMM0, XMM1 rather than on stack which is slower.
HELPERCALL(DirectMath_Acos, nullptr, 0)
Expand Down
103 changes: 99 additions & 4 deletions lib/Backend/Lower.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1128,6 +1128,9 @@ Lowerer::LowerRange(IR::Instr *instrStart, IR::Instr *instrEnd, bool defaultDoFa
case Js::OpCode::TrapIfZero:
LowerTrapIfZero(instr);
break;
case Js::OpCode::TrapIfUnalignedAccess:
instrPrev = LowerTrapIfUnalignedAccess(instr);
break;
case Js::OpCode::DivU_I4:
case Js::OpCode::Div_I4:
this->LowerDivI4(instr);
Expand Down Expand Up @@ -1541,10 +1544,18 @@ Lowerer::LowerRange(IR::Instr *instrStart, IR::Instr *instrEnd, bool defaultDoFa
instrPrev = LowerLdArrViewElem(instr);
break;

case Js::OpCode::StAtomicWasm:
instrPrev = LowerStAtomicsWasm(instr);
break;

case Js::OpCode::StArrViewElem:
instrPrev = LowerStArrViewElem(instr);
break;

case Js::OpCode::LdAtomicWasm:
instrPrev = LowerLdAtomicsWasm(instr);
break;

case Js::OpCode::LdArrViewElemWasm:
instrPrev = LowerLdArrViewElemWasm(instr);
break;
Expand Down Expand Up @@ -9168,7 +9179,7 @@ Lowerer::LowerLdArrViewElem(IR::Instr * instr)
}

IR::Instr *
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nit: Without knowing that LowerWasmArrayBoundsCheck returns a pointer to an instruction that is after the check, this looks like we're putting the store before the bounds check. Not really a problem if you know what's going on, but maybe could be a little clearer?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think you're right. There is something off about that api as a whole.
I created issue #4751 to track cleaning this up.

Lowerer::LowerWasmMemOp(IR::Instr * instr, IR::Opnd *addrOpnd)
Lowerer::LowerWasmArrayBoundsCheck(IR::Instr * instr, IR::Opnd *addrOpnd)
{
uint32 offset = addrOpnd->AsIndirOpnd()->GetOffset();

Expand All @@ -9184,7 +9195,7 @@ Lowerer::LowerWasmMemOp(IR::Instr * instr, IR::Opnd *addrOpnd)
}
else
{
return m_lowererMD.LowerWasmMemOp(instr, addrOpnd);
return m_lowererMD.LowerWasmArrayBoundsCheck(instr, addrOpnd);
}
}

Expand All @@ -9204,7 +9215,7 @@ Lowerer::LowerLdArrViewElemWasm(IR::Instr * instr)
Assert(!dst->IsFloat32() || src1->IsFloat32());
Assert(!dst->IsFloat64() || src1->IsFloat64());

IR::Instr * done = LowerWasmMemOp(instr, src1);
IR::Instr * done = LowerWasmArrayBoundsCheck(instr, src1);
IR::Instr* newMove = InsertMove(dst, src1, done);

#if ENABLE_FAST_ARRAYBUFFER
Expand All @@ -9214,6 +9225,7 @@ Lowerer::LowerLdArrViewElemWasm(IR::Instr * instr)
#else
Unused(newMove);
#endif

instr->Remove();
return instrPrev;
#else
Expand Down Expand Up @@ -9375,6 +9387,57 @@ Lowerer::LowerMemOp(IR::Instr * instr)
return instrPrev;
}

IR::Instr*
Lowerer::LowerStAtomicsWasm(IR::Instr* instr)
{
#ifdef ENABLE_WASM
Assert(m_func->GetJITFunctionBody()->IsWasmFunction());
Assert(instr);
Assert(instr->m_opcode == Js::OpCode::StAtomicWasm);

IR::Instr * instrPrev = instr->m_prev;

IR::Opnd * dst = instr->GetDst();
IR::Opnd * src1 = instr->GetSrc1();

Assert(IRType_IsNativeInt(dst->GetType()));

IR::Instr * done = LowerWasmArrayBoundsCheck(instr, dst);
m_lowererMD.LowerAtomicStore(dst, src1, done);

instr->Remove();
return instrPrev;
#else
Assert(UNREACHED);
return instr;
#endif
}

IR::Instr * Lowerer::LowerLdAtomicsWasm(IR::Instr * instr)
{
#ifdef ENABLE_WASM
Assert(m_func->GetJITFunctionBody()->IsWasmFunction());
Assert(instr);
Assert(instr->m_opcode == Js::OpCode::LdAtomicWasm);

IR::Instr * instrPrev = instr->m_prev;

IR::Opnd * dst = instr->GetDst();
IR::Opnd * src1 = instr->GetSrc1();

Assert(IRType_IsNativeInt(dst->GetType()));

IR::Instr * done = LowerWasmArrayBoundsCheck(instr, src1);
m_lowererMD.LowerAtomicLoad(dst, src1, done);

instr->Remove();
return instrPrev;
#else
Assert(UNREACHED);
return instr;
#endif
}

IR::Instr *
Lowerer::LowerStArrViewElem(IR::Instr * instr)
{
Expand All @@ -9401,7 +9464,7 @@ Lowerer::LowerStArrViewElem(IR::Instr * instr)

if (m_func->GetJITFunctionBody()->IsWasmFunction())
{
done = LowerWasmMemOp(instr, dst);
done = LowerWasmArrayBoundsCheck(instr, dst);
}
else if (offset < 0)
{
Expand Down Expand Up @@ -25382,6 +25445,38 @@ Lowerer::LowerTrapIfZero(IR::Instr * const instr)
LowererMD::ChangeToAssign(instr);
}

IR::Instr*
Lowerer::LowerTrapIfUnalignedAccess(IR::Instr * const instr)
{
IR::Opnd* src1 = instr->GetSrc1();
IR::Opnd* src2 = instr->UnlinkSrc2();
Assert(instr);
Assert(instr->m_opcode == Js::OpCode::TrapIfUnalignedAccess);
Assert(src1 && !src1->IsVar());
Assert(src2 && src2->IsImmediateOpnd());
Assert(src2->GetSize() > 1);

uint32 mask = src2->GetSize() - 1;
uint32 cmpValue = (uint32)src2->GetImmediateValue(m_func);
src2->Free(m_func);

IR::IntConstOpnd* maskOpnd = IR::IntConstOpnd::New(mask, src1->GetType(), m_func);
IR::RegOpnd* maskedOpnd = IR::RegOpnd::New(src1->GetType(), m_func);
IR::Instr* newInstr = IR::Instr::New(Js::OpCode::And_I4, maskedOpnd, src1, maskOpnd, m_func);
instr->InsertBefore(newInstr);

IR::IntConstOpnd* cmpOpnd = IR::IntConstOpnd::New(cmpValue, maskedOpnd->GetType(), m_func, true);
IR::LabelInstr* alignedLabel = IR::LabelInstr::New(Js::OpCode::Label, m_func);
newInstr = IR::BranchInstr::New(Js::OpCode::BrEq_I4, alignedLabel, maskedOpnd, cmpOpnd, m_func);
instr->InsertBefore(newInstr);
InsertLabel(true, instr);
GenerateThrow(IR::IntConstOpnd::NewFromType(SCODE_CODE(WASMERR_UnalignedAtomicAccess), TyInt32, m_func), instr);
instr->InsertBefore(alignedLabel);

instr->m_opcode = Js::OpCode::Ld_I4;
return instr;
}

void
Lowerer::LowerTrapIfMinIntOverNegOne(IR::Instr * const instr)
{
Expand Down
Loading