Skip to content

Commit c5c578e

Browse files
rmacnak-googleCommit Queue
authored and
Commit Queue
committed
[vm, ffi] Don't read out-of-bounds when marshalling structs by value.
Switch the Windows ARM64 builds to use MSVC. Clang disagrees with itself about handling of small structs in variadic functions, allowing splitting between the last argument register and the stack as the callee but not as the caller. TEST=ci Cq-Include-Trybots: luci.dart.try:vm-ffi-android-debug-arm-try,vm-ffi-android-debug-arm64c-try,vm-ffi-android-release-arm-try,vm-ffi-android-release-arm64c-try,vm-ffi-qemu-linux-release-arm-try,vm-linux-release-arm64-try,vm-mac-debug-arm64-try,vm-mac-release-arm64-try,vm-win-debug-arm64-try,vm-win-release-arm64-try,vm-ffi-qemu-linux-release-riscv64-try,vm-linux-debug-ia32-try,vm-linux-release-ia32-try,vm-win-release-ia32-try,vm-linux-debug-x64-try,vm-linux-release-x64-try,vm-mac-debug-x64-try,vm-mac-release-x64-try,vm-win-debug-x64-try,vm-win-release-x64-try Bug: #52644 Bug: #53829 Change-Id: I2fd6c40620a885479f11bb8528ca1e9df3948a2f Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/331209 Commit-Queue: Ryan Macnak <[email protected]> Reviewed-by: Daco Harkes <[email protected]> Reviewed-by: Siva Annamalai <[email protected]>
1 parent 9186eb1 commit c5c578e

File tree

136 files changed

+1381
-875
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

136 files changed

+1381
-875
lines changed

runtime/bin/ffi_test/ffi_test_functions.cc

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -734,6 +734,23 @@ DART_EXPORT int64_t SumStruct9Uint8(Struct9Uint8 s9) {
734734
return s9.a0 + s9.a1 + s9.a2 + s9.a3 + s9.a4 + s9.a5 + s9.a6 + s9.a7 + s9.a8;
735735
}
736736

737+
DART_EXPORT int64_t
738+
SumReturnStruct9Uint8(Struct9Uint8 (*callback)(Struct9Uint8*),
739+
Struct9Uint8* in) {
740+
std::cout << "SumReturnStruct9Uint8 in (" << in->a0 << ", " << in->a1 << ", "
741+
<< in->a2 << ", " << in->a3 << ", " << in->a4 << ", " << in->a5
742+
<< ", " << in->a6 << ", " << in->a7 << ", " << in->a8 << ")\n";
743+
744+
Struct9Uint8 out = callback(in);
745+
746+
std::cout << "SumReturnStruct9Uint8 out (" << out.a0 << ", " << out.a1 << ", "
747+
<< out.a2 << ", " << out.a3 << ", " << out.a4 << ", " << out.a5
748+
<< ", " << out.a6 << ", " << out.a7 << ", " << out.a8 << ")\n";
749+
750+
return out.a0 + out.a1 + out.a2 + out.a3 + out.a4 + out.a5 + out.a6 + out.a7 +
751+
out.a8;
752+
}
753+
737754
// Allocates a multiple of the larest page size, so the last element of the
738755
// array is right at a page boundary. Explicitly allocate and make inaccessible
739756
// the next page to avoid flaky false-successes if the next page happens to be

runtime/vm/compiler/backend/flow_graph_compiler.cc

Lines changed: 29 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -3292,6 +3292,13 @@ void FlowGraphCompiler::EmitNativeMove(
32923292
if (src_payload_type.IsInt() && dst_payload_type.IsInt() &&
32933293
(src_payload_size != src_container_size ||
32943294
dst_payload_size != dst_container_size)) {
3295+
if (source.IsStack() && src_container_size > src_payload_size) {
3296+
// Shrink loads since all loads are extending.
3297+
return EmitNativeMove(
3298+
destination,
3299+
source.WithOtherNativeType(zone_, src_payload_type, src_payload_type),
3300+
temp);
3301+
}
32953302
if (src_payload_size <= dst_payload_size &&
32963303
src_container_size >= dst_container_size) {
32973304
// The upper bits of the source are already properly sign or zero
@@ -3361,32 +3368,6 @@ void FlowGraphCompiler::EmitNativeMove(
33613368
return;
33623369
}
33633370

3364-
#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
3365-
// Arm does not support sign extending from a memory location, x86 does.
3366-
if (sign_or_zero_extend && source.IsStack()) {
3367-
ASSERT(destination.IsRegisters());
3368-
const auto& intermediate = destination.WithOtherNativeType(
3369-
zone_, src_payload_type, src_container_type);
3370-
EmitNativeMove(intermediate, source, temp);
3371-
EmitNativeMove(destination, intermediate, temp);
3372-
return;
3373-
}
3374-
#endif
3375-
3376-
// If we're not sign extending, and we're moving 8 or 16 bits into a
3377-
// register, upgrade the move to take upper bits of garbage from the
3378-
// source location. This is the same as leaving the previous garbage in
3379-
// there.
3380-
//
3381-
// TODO(40210): If our assemblers would support moving 1 and 2 bytes into
3382-
// registers, this code can be removed.
3383-
if (!sign_or_zero_extend && destination.IsRegisters() &&
3384-
destination.container_type().SizeInBytes() <= 2) {
3385-
ASSERT(source.payload_type().IsInt());
3386-
return EmitNativeMove(destination.WidenTo4Bytes(zone_),
3387-
source.WidenTo4Bytes(zone_), temp);
3388-
}
3389-
33903371
// Do the simple architecture specific moves.
33913372
EmitNativeMoveArchitecture(destination, source);
33923373
}
@@ -3405,7 +3386,17 @@ void FlowGraphCompiler::EmitMoveToNative(
34053386
} else {
34063387
const auto& src =
34073388
compiler::ffi::NativeLocation::FromLocation(zone_, src_loc, src_type);
3408-
EmitNativeMove(dst, src, temp);
3389+
// Deal with sign mismatch caused by lack of kUnboxedUint64 representation.
3390+
if (src_type == kUnboxedInt64 &&
3391+
dst.container_type().AsPrimitive().representation() ==
3392+
compiler::ffi::kUint64) {
3393+
EmitNativeMove(dst,
3394+
src.WithOtherNativeType(zone_, dst.container_type(),
3395+
dst.container_type()),
3396+
temp);
3397+
} else {
3398+
EmitNativeMove(dst, src, temp);
3399+
}
34093400
}
34103401
}
34113402

@@ -3421,9 +3412,18 @@ void FlowGraphCompiler::EmitMoveFromNative(
34213412
EmitNativeMove(dest_split, src.Split(zone_, 2, i), temp);
34223413
}
34233414
} else {
3424-
const auto& dest =
3415+
const auto& dst =
34253416
compiler::ffi::NativeLocation::FromLocation(zone_, dst_loc, dst_type);
3426-
EmitNativeMove(dest, src, temp);
3417+
// Deal with sign mismatch caused by lack of kUnboxedUint64 representation.
3418+
if (dst_type == kUnboxedInt64 &&
3419+
src.container_type().AsPrimitive().representation() ==
3420+
compiler::ffi::kUint64) {
3421+
EmitNativeMove(dst.WithOtherNativeType(zone_, src.container_type(),
3422+
src.container_type()),
3423+
src, temp);
3424+
} else {
3425+
EmitNativeMove(dst, src, temp);
3426+
}
34273427
}
34283428
}
34293429

runtime/vm/compiler/backend/flow_graph_compiler.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -945,6 +945,10 @@ class FlowGraphCompiler : public ValueObject {
945945
// Architecture specific implementation of simple native moves.
946946
void EmitNativeMoveArchitecture(const compiler::ffi::NativeLocation& dst,
947947
const compiler::ffi::NativeLocation& src);
948+
void EmitNativeLoad(Register dst,
949+
Register base,
950+
intptr_t offset,
951+
compiler::ffi::PrimitiveType type);
948952

949953
void EmitFrameEntry();
950954

runtime/vm/compiler/backend/flow_graph_compiler_arm.cc

Lines changed: 47 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -956,9 +956,9 @@ void FlowGraphCompiler::EmitNativeMoveArchitecture(
956956
const auto& dst = destination.AsRegisters();
957957
ASSERT(dst.num_regs() == 1);
958958
const auto dst_reg = dst.reg_at(0);
959+
ASSERT(destination.container_type().SizeInBytes() <= 4);
959960
if (!sign_or_zero_extend) {
960-
ASSERT(dst_size == 4);
961-
__ mov(dst_reg, compiler::Operand(src_reg));
961+
__ MoveRegister(dst_reg, src_reg);
962962
} else {
963963
if (src_payload_type.IsSigned()) {
964964
__ sbfx(dst_reg, src_reg, 0, src_size * kBitsPerByte);
@@ -977,8 +977,8 @@ void FlowGraphCompiler::EmitNativeMoveArchitecture(
977977
ASSERT(destination.IsStack());
978978
const auto& dst = destination.AsStack();
979979
ASSERT(!sign_or_zero_extend);
980-
ASSERT(dst_size <= 4);
981-
auto const op_size = BytesToOperandSize(dst_size);
980+
auto const op_size =
981+
BytesToOperandSize(destination.container_type().SizeInBytes());
982982
__ StoreToOffset(src.reg_at(0), dst.base_register(),
983983
dst.offset_in_bytes(), op_size);
984984
}
@@ -1036,12 +1036,8 @@ void FlowGraphCompiler::EmitNativeMoveArchitecture(
10361036
const auto& dst = destination.AsRegisters();
10371037
ASSERT(dst.num_regs() == 1);
10381038
const auto dst_reg = dst.reg_at(0);
1039-
ASSERT(!sign_or_zero_extend);
1040-
ASSERT(dst_size <= 4);
1041-
auto const op_size = BytesToOperandSize(dst_size);
1042-
__ LoadFromOffset(dst_reg, src.base_register(), src.offset_in_bytes(),
1043-
op_size);
1044-
1039+
EmitNativeLoad(dst_reg, src.base_register(), src.offset_in_bytes(),
1040+
src_payload_type.AsPrimitive().representation());
10451041
} else if (destination.IsFpuRegisters()) {
10461042
ASSERT(src_payload_type.Equals(dst_payload_type));
10471043
ASSERT(src_payload_type.IsFloat());
@@ -1066,6 +1062,47 @@ void FlowGraphCompiler::EmitNativeMoveArchitecture(
10661062
}
10671063
}
10681064

1065+
void FlowGraphCompiler::EmitNativeLoad(Register dst,
1066+
Register base,
1067+
intptr_t offset,
1068+
compiler::ffi::PrimitiveType type) {
1069+
switch (type) {
1070+
case compiler::ffi::kInt8:
1071+
__ LoadFromOffset(dst, base, offset, compiler::kByte);
1072+
break;
1073+
case compiler::ffi::kUint8:
1074+
__ LoadFromOffset(dst, base, offset, compiler::kUnsignedByte);
1075+
break;
1076+
case compiler::ffi::kInt16:
1077+
__ LoadFromOffset(dst, base, offset, compiler::kTwoBytes);
1078+
break;
1079+
case compiler::ffi::kUint16:
1080+
__ LoadFromOffset(dst, base, offset, compiler::kUnsignedTwoBytes);
1081+
break;
1082+
case compiler::ffi::kInt32:
1083+
__ LoadFromOffset(dst, base, offset, compiler::kFourBytes);
1084+
break;
1085+
case compiler::ffi::kUint32:
1086+
case compiler::ffi::kFloat:
1087+
case compiler::ffi::kHalfDouble:
1088+
__ LoadFromOffset(dst, base, offset, compiler::kUnsignedFourBytes);
1089+
break;
1090+
1091+
case compiler::ffi::kInt24:
1092+
__ LoadFromOffset(dst, base, offset, compiler::kUnsignedTwoBytes);
1093+
__ LoadFromOffset(TMP, base, offset + 2, compiler::kByte);
1094+
__ orr(dst, dst, compiler::Operand(TMP, LSL, 16));
1095+
break;
1096+
case compiler::ffi::kUint24:
1097+
__ LoadFromOffset(dst, base, offset, compiler::kUnsignedTwoBytes);
1098+
__ LoadFromOffset(TMP, base, offset + 2, compiler::kUnsignedByte);
1099+
__ orr(dst, dst, compiler::Operand(TMP, LSL, 16));
1100+
break;
1101+
default:
1102+
UNREACHABLE();
1103+
}
1104+
}
1105+
10691106
void FlowGraphCompiler::LoadBSSEntry(BSS::Relocation relocation,
10701107
Register dst,
10711108
Register tmp) {

runtime/vm/compiler/backend/flow_graph_compiler_arm64.cc

Lines changed: 89 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -944,34 +944,14 @@ void FlowGraphCompiler::EmitNativeMoveArchitecture(
944944
const auto& dst = destination.AsRegisters();
945945
ASSERT(dst.num_regs() == 1);
946946
const auto dst_reg = dst.reg_at(0);
947+
ASSERT(destination.container_type().SizeInBytes() <= 8);
947948
if (!sign_or_zero_extend) {
948-
switch (dst_size) {
949-
case 8:
950-
__ mov(dst_reg, src_reg);
951-
return;
952-
case 4:
953-
__ movw(dst_reg, src_reg);
954-
return;
955-
default:
956-
UNIMPLEMENTED();
957-
}
949+
__ MoveRegister(dst_reg, src_reg);
958950
} else {
959-
switch (src_payload_type.AsPrimitive().representation()) {
960-
case compiler::ffi::kInt8: // Sign extend operand.
961-
__ sxtb(dst_reg, src_reg);
962-
return;
963-
case compiler::ffi::kInt16:
964-
__ sxth(dst_reg, src_reg);
965-
return;
966-
case compiler::ffi::kUint8: // Zero extend operand.
967-
__ uxtb(dst_reg, src_reg);
968-
return;
969-
case compiler::ffi::kUint16:
970-
__ uxth(dst_reg, src_reg);
971-
return;
972-
default:
973-
// 32 to 64 bit is covered in IL by Representation conversions.
974-
UNIMPLEMENTED();
951+
if (src_payload_type.IsSigned()) {
952+
__ sbfx(dst_reg, src_reg, 0, src_size * kBitsPerByte);
953+
} else {
954+
__ ubfx(dst_reg, src_reg, 0, src_size * kBitsPerByte);
975955
}
976956
}
977957

@@ -983,7 +963,8 @@ void FlowGraphCompiler::EmitNativeMoveArchitecture(
983963
ASSERT(destination.IsStack());
984964
const auto& dst = destination.AsStack();
985965
ASSERT(!sign_or_zero_extend);
986-
auto const op_size = BytesToOperandSize(dst_size);
966+
auto const op_size =
967+
BytesToOperandSize(destination.container_type().SizeInBytes());
987968
__ StoreToOffset(src.reg_at(0), dst.base_register(),
988969
dst.offset_in_bytes(), op_size);
989970
}
@@ -1026,11 +1007,8 @@ void FlowGraphCompiler::EmitNativeMoveArchitecture(
10261007
const auto& dst = destination.AsRegisters();
10271008
ASSERT(dst.num_regs() == 1);
10281009
const auto dst_reg = dst.reg_at(0);
1029-
ASSERT(!sign_or_zero_extend);
1030-
auto const op_size = BytesToOperandSize(dst_size);
1031-
__ LoadFromOffset(dst_reg, src.base_register(), src.offset_in_bytes(),
1032-
op_size);
1033-
1010+
EmitNativeLoad(dst_reg, src.base_register(), src.offset_in_bytes(),
1011+
src_payload_type.AsPrimitive().representation());
10341012
} else if (destination.IsFpuRegisters()) {
10351013
ASSERT(src_payload_type.Equals(dst_payload_type));
10361014
ASSERT(src_payload_type.IsFloat());
@@ -1055,6 +1033,85 @@ void FlowGraphCompiler::EmitNativeMoveArchitecture(
10551033
}
10561034
}
10571035

1036+
void FlowGraphCompiler::EmitNativeLoad(Register dst,
1037+
Register base,
1038+
intptr_t offset,
1039+
compiler::ffi::PrimitiveType type) {
1040+
switch (type) {
1041+
case compiler::ffi::kInt8:
1042+
__ LoadFromOffset(dst, base, offset, compiler::kByte);
1043+
break;
1044+
case compiler::ffi::kUint8:
1045+
__ LoadFromOffset(dst, base, offset, compiler::kUnsignedByte);
1046+
break;
1047+
case compiler::ffi::kInt16:
1048+
__ LoadFromOffset(dst, base, offset, compiler::kTwoBytes);
1049+
break;
1050+
case compiler::ffi::kUint16:
1051+
__ LoadFromOffset(dst, base, offset, compiler::kUnsignedTwoBytes);
1052+
break;
1053+
case compiler::ffi::kInt32:
1054+
__ LoadFromOffset(dst, base, offset, compiler::kFourBytes);
1055+
break;
1056+
case compiler::ffi::kUint32:
1057+
case compiler::ffi::kFloat:
1058+
__ LoadFromOffset(dst, base, offset, compiler::kUnsignedFourBytes);
1059+
break;
1060+
case compiler::ffi::kInt64:
1061+
case compiler::ffi::kUint64:
1062+
case compiler::ffi::kDouble:
1063+
__ LoadFromOffset(dst, base, offset, compiler::kEightBytes);
1064+
break;
1065+
1066+
case compiler::ffi::kInt24:
1067+
__ LoadFromOffset(dst, base, offset, compiler::kUnsignedTwoBytes);
1068+
__ LoadFromOffset(TMP, base, offset + 2, compiler::kByte);
1069+
__ orr(dst, dst, compiler::Operand(TMP, LSL, 16));
1070+
break;
1071+
case compiler::ffi::kUint24:
1072+
__ LoadFromOffset(dst, base, offset, compiler::kUnsignedTwoBytes);
1073+
__ LoadFromOffset(TMP, base, offset + 2, compiler::kUnsignedByte);
1074+
__ orr(dst, dst, compiler::Operand(TMP, LSL, 16));
1075+
break;
1076+
case compiler::ffi::kInt40:
1077+
__ LoadFromOffset(dst, base, offset, compiler::kUnsignedFourBytes);
1078+
__ LoadFromOffset(TMP, base, offset + 4, compiler::kByte);
1079+
__ orr(dst, dst, compiler::Operand(TMP, LSL, 32));
1080+
break;
1081+
case compiler::ffi::kUint40:
1082+
__ LoadFromOffset(dst, base, offset, compiler::kUnsignedFourBytes);
1083+
__ LoadFromOffset(TMP, base, offset + 4, compiler::kUnsignedByte);
1084+
__ orr(dst, dst, compiler::Operand(TMP, LSL, 32));
1085+
break;
1086+
case compiler::ffi::kInt48:
1087+
__ LoadFromOffset(dst, base, offset, compiler::kUnsignedFourBytes);
1088+
__ LoadFromOffset(TMP, base, offset + 4, compiler::kTwoBytes);
1089+
__ orr(dst, dst, compiler::Operand(TMP, LSL, 32));
1090+
break;
1091+
case compiler::ffi::kUint48:
1092+
__ LoadFromOffset(dst, base, offset, compiler::kUnsignedFourBytes);
1093+
__ LoadFromOffset(TMP, base, offset + 4, compiler::kUnsignedTwoBytes);
1094+
__ orr(dst, dst, compiler::Operand(TMP, LSL, 32));
1095+
break;
1096+
case compiler::ffi::kInt56:
1097+
__ LoadFromOffset(dst, base, offset, compiler::kUnsignedFourBytes);
1098+
__ LoadFromOffset(TMP, base, offset + 4, compiler::kUnsignedTwoBytes);
1099+
__ orr(dst, dst, compiler::Operand(TMP, LSL, 32));
1100+
__ LoadFromOffset(TMP, base, offset + 6, compiler::kByte);
1101+
__ orr(dst, dst, compiler::Operand(TMP, LSL, 48));
1102+
break;
1103+
case compiler::ffi::kUint56:
1104+
__ LoadFromOffset(dst, base, offset, compiler::kUnsignedFourBytes);
1105+
__ LoadFromOffset(TMP, base, offset + 4, compiler::kUnsignedTwoBytes);
1106+
__ orr(dst, dst, compiler::Operand(TMP, LSL, 32));
1107+
__ LoadFromOffset(TMP, base, offset + 6, compiler::kUnsignedByte);
1108+
__ orr(dst, dst, compiler::Operand(TMP, LSL, 48));
1109+
break;
1110+
default:
1111+
UNREACHABLE();
1112+
}
1113+
}
1114+
10581115
void FlowGraphCompiler::LoadBSSEntry(BSS::Relocation relocation,
10591116
Register dst,
10601117
Register tmp) {

0 commit comments

Comments
 (0)