diff --git a/lib/IRGen/GenClass.cpp b/lib/IRGen/GenClass.cpp index 6e371750f7465..26de51e966f37 100644 --- a/lib/IRGen/GenClass.cpp +++ b/lib/IRGen/GenClass.cpp @@ -102,6 +102,10 @@ namespace { StructLayout *createLayoutWithTailElems(IRGenModule &IGM, SILType classType, ArrayRef tailTypes) const; + + using HeapTypeInfo::initialize; + void initialize(IRGenFunction &IGF, Explosion &e, Address addr, + bool isOutlined) const override; }; } // end anonymous namespace @@ -483,6 +487,29 @@ ClassTypeInfo::getClassLayout(IRGenModule &IGM, SILType classType, return *Layout; } +void ClassTypeInfo::initialize(IRGenFunction &IGF, Explosion &src, Address addr, + bool isOutlined) const { + // If the address is a poitner to the exploded type then we can simply emit a + // store and be done. + auto *exploded = src.getAll().front(); + if (exploded->getType() == addr->getType()->getPointerElementType()) { + IGF.Builder.CreateStore(exploded, addr); + (void)src.claimNext(); + return; + } + + // If both are the same (address) type then just emit a memcpy. + if (exploded->getType() == addr->getType()) { + IGF.emitMemCpy(addr.getAddress(), exploded, getFixedSize(), + addr.getAlignment()); + (void)src.claimNext(); + return; + } + + // Otherwise, bail to the default implementation. + HeapTypeInfo::initialize(IGF, src, addr, isOutlined); +} + /// Cast the base to i8*, apply the given inbounds offset (in bytes, /// as a size_t), and cast to a pointer to the given type. llvm::Value *IRGenFunction::emitByteOffsetGEP(llvm::Value *base, diff --git a/lib/IRGen/IRGenSIL.cpp b/lib/IRGen/IRGenSIL.cpp index c3b2fd60cc206..27c392c80a370 100644 --- a/lib/IRGen/IRGenSIL.cpp +++ b/lib/IRGen/IRGenSIL.cpp @@ -877,9 +877,7 @@ class IRGenSILFunction : void visitDestroyValueInst(DestroyValueInst *i); void visitAutoreleaseValueInst(AutoreleaseValueInst *i); void visitSetDeallocatingInst(SetDeallocatingInst *i); - void visitObjectInst(ObjectInst *i) { - llvm_unreachable("object instruction cannot appear in a function"); - } + void visitObjectInst(ObjectInst *i); void visitStructInst(StructInst *i); void visitTupleInst(TupleInst *i); void visitEnumInst(EnumInst *i); @@ -3450,6 +3448,49 @@ void IRGenSILFunction::visitDestroyValueInst(swift::DestroyValueInst *i) { .consume(*this, in, getDefaultAtomicity()); } +void IRGenSILFunction::visitObjectInst(swift::ObjectInst *i) { + SILType objType = i->getType().getObjectType(); + // TODO: Currently generic classes aren't allowed but in the future we could + // support this. + assert(!objType.getClassOrBoundGenericClass()->getAsGenericContext() || + !objType.getClassOrBoundGenericClass() + ->getAsGenericContext() + ->isGeneric() && + "Generics are not yet supported"); + + const auto &typeInfo = cast(getTypeInfo(objType)); + llvm::Value *metadata = emitClassHeapMetadataRef( + *this, objType.getASTType(), MetadataValueType::TypeMetadata, + MetadataState::Complete); + // TODO: this shouldn't be a stack alloc but, in order to maintain + // compatibility with alloc_ref we have to be a pointer. + Address alloca = + createAlloca(typeInfo.getStorageType()->getPointerElementType(), + typeInfo.getFixedAlignment()); + auto classAddr = Builder.CreateBitCast(alloca, IGM.RefCountedPtrTy); + auto classVal = emitInitStackObjectCall(metadata, classAddr.getAddress(), + "reference.new"); + classVal = Builder.CreateBitCast(classVal, typeInfo.getStorageType()); + // Match each property in the class decl to elements in the object + // instruction. + auto propsArr = + i->getType().getClassOrBoundGenericClass()->getStoredProperties(); + SmallVector props(propsArr.begin(), propsArr.end()); + for (SILValue elt : i->getAllElements()) { + auto prop = props.pop_back_val(); + auto elementExplosion = getLoweredExplosion(elt); + auto propType = IGM.getLoweredType(prop->getType()); + const auto &propTypeInfo = cast(getTypeInfo(propType)); + auto propAddr = projectPhysicalClassMemberAddress(*this, classVal, objType, + propType, prop); + propTypeInfo.initialize(*this, elementExplosion, propAddr, false); + } + + Explosion e; + e.add(classVal); + setLoweredExplosion(i, e); +} + void IRGenSILFunction::visitStructInst(swift::StructInst *i) { Explosion out; for (SILValue elt : i->getElements()) diff --git a/lib/SIL/SILVerifier.cpp b/lib/SIL/SILVerifier.cpp index d7660d79bf4c8..05f1ddee5fdaf 100644 --- a/lib/SIL/SILVerifier.cpp +++ b/lib/SIL/SILVerifier.cpp @@ -1809,8 +1809,12 @@ class SILVerifier : public SILVerifierBase { checkGlobalAccessInst(GVI); } - void checkObjectInst(ObjectInst *) { - require(false, "object instruction is only allowed in a static initializer"); + void checkObjectInst(ObjectInst *objectInst) { + auto *classDecl = objectInst->getType().getClassOrBoundGenericClass(); + require(classDecl, "Type of object instruction must be a class"); + require(!classDecl->getAsGenericContext() || + !classDecl->getAsGenericContext()->isGeneric(), + "Generics are not yet supported in object instructions"); } void checkIntegerLiteralInst(IntegerLiteralInst *ILI) { diff --git a/lib/SILOptimizer/Transforms/DeadStoreElimination.cpp b/lib/SILOptimizer/Transforms/DeadStoreElimination.cpp index 266859152516a..e371e55ae7a3f 100644 --- a/lib/SILOptimizer/Transforms/DeadStoreElimination.cpp +++ b/lib/SILOptimizer/Transforms/DeadStoreElimination.cpp @@ -1038,6 +1038,9 @@ void DSEContext::processLoadInst(SILInstruction *I, DSEKind Kind) { void DSEContext::processStoreInst(SILInstruction *I, DSEKind Kind) { auto *SI = cast(I); + // TODO: for some reason these stores are removed when they shouldn't be. + if (isa(SI->getSrc())) + return; processWrite(I, SI->getSrc(), SI->getDest(), Kind); } diff --git a/lib/SILOptimizer/Utils/SILInliner.cpp b/lib/SILOptimizer/Utils/SILInliner.cpp index cf760936987e8..8fe65bcbcf448 100644 --- a/lib/SILOptimizer/Utils/SILInliner.cpp +++ b/lib/SILOptimizer/Utils/SILInliner.cpp @@ -911,7 +911,7 @@ InlineCost swift::instructionInlineCost(SILInstruction &I) { case SILInstructionKind::MarkUninitializedInst: llvm_unreachable("not valid in canonical sil"); case SILInstructionKind::ObjectInst: - llvm_unreachable("not valid in a function"); + return InlineCost::Free; } llvm_unreachable("Unhandled ValueKind in switch."); diff --git a/test/IRGen/object.sil b/test/IRGen/object.sil new file mode 100644 index 0000000000000..b25edadfe9be8 --- /dev/null +++ b/test/IRGen/object.sil @@ -0,0 +1,105 @@ +// RUN: %swift -disable-legacy-type-info -module-name run %s -emit-ir -o - | %FileCheck %s + +// REQUIRES: CODEGENERATOR=X86 + +sil_stage canonical + +import Builtin +import Swift +import SwiftShims + +class Foo { + @_hasStorage @_hasInitialValue var x: Int { get set } + @objc deinit + init() +} + +class Bar { + @_hasStorage @_hasInitialValue var x: Int { get set } + @_hasStorage @_hasInitialValue var y: Int { get set } + @_hasStorage @_hasInitialValue var z: Int { get set } + @objc deinit + init() +} + +// CHECK-LABEL: define swiftcc i64 @foouser +// CHECK: entry +// CHECK: [[ALLOC:%.*]] = alloca %T3run3FooC +// CHECK: [[META:%.*]] = call swiftcc %swift.metadata_response @"$s3run3FooCMa" +// CHECK: [[META_E:%.*]] = extractvalue %swift.metadata_response [[META]] +// CHECK: [[REF:%.*]] = bitcast %T3run3FooC* [[ALLOC]] to %swift.refcounted* +// CHECK: [[OBJ_R:%.*]] = call %swift.refcounted* @swift_initStackObject({{.*}}[[META_E]], {{.*}}[[REF]]) +// CHECK: [[OBJ:%.*]] = bitcast %swift.refcounted* [[OBJ_R]] to %T3run3FooC* + +// CHECK: [[X_PTR:%.*]] = getelementptr inbounds %T3run3FooC, %T3run3FooC* [[OBJ]], i32 0, i32 1 +// CHECK: [[X:%.*]] = getelementptr inbounds %TSi, %TSi* [[X_PTR]], i32 0, i32 0 +// CHECK: store i64 0, i64* [[X]] + +// CHECK: [[X_PTR1:%.*]] = getelementptr inbounds %T3run3FooC, %T3run3FooC* [[OBJ]], i32 0, i32 1 +// CHECK: [[X1:%.*]] = getelementptr inbounds %TSi, %TSi* [[X_PTR1]], i32 0, i32 0 +// CHECK: [[X_VAL:%.*]] = load i64, i64* [[X1]] +// CHECK: [[ADDED:%.*]] = call { i64, i1 } @llvm.sadd.with.overflow.i64(i64 [[X_VAL]], i64 1) +// CHECK: [[SUM:%.*]] = extractvalue { i64, i1 } [[ADDED]], 0 + +// CHECK: [[X2:%.*]] = getelementptr inbounds %TSi, %TSi* [[X_PTR1]], i32 0, i32 0 +// CHECK: store i64 [[SUM]], i64* [[X2]] +// CHECK: [[X3:%.*]] = getelementptr inbounds %TSi, %TSi* [[X_PTR1]], i32 0, i32 0 +// CHECK: [[OUT:%.*]] = load i64, i64* [[X3]] +// CHECK: ret i64 [[OUT]] +sil @foouser : $@convention(thin) () -> Int { +bb0: + %1 = integer_literal $Builtin.Int64, 0 // user: %2 + %2 = struct $Int (%1 : $Builtin.Int64) // user: %3 + %3 = object $Foo (%2 : $Int) // user: %4 + %4 = ref_element_addr %3 : $Foo, #Foo.x // users: %17, %5 + %5 = begin_access [modify] [dynamic] [no_nested_conflict] %4 : $*Int // users: %7, %15, %16 + %6 = integer_literal $Builtin.Int64, 1 // user: %10 + %7 = struct_element_addr %5 : $*Int, #Int._value // user: %8 + %8 = load %7 : $*Builtin.Int64 // user: %10 + %9 = integer_literal $Builtin.Int1, -1 // user: %10 + %10 = builtin "sadd_with_overflow_Int64"(%8 : $Builtin.Int64, %6 : $Builtin.Int64, %9 : $Builtin.Int1) : $(Builtin.Int64, Builtin.Int1) // users: %12, %11 + %11 = tuple_extract %10 : $(Builtin.Int64, Builtin.Int1), 0 // user: %14 + %12 = tuple_extract %10 : $(Builtin.Int64, Builtin.Int1), 1 // user: %13 + cond_fail %12 : $Builtin.Int1, "arithmetic overflow" // id: %13 + %14 = struct $Int (%11 : $Builtin.Int64) // user: %15 + store %14 to %5 : $*Int // id: %15 + end_access %5 : $*Int // id: %16 + %17 = begin_access [read] [static] [no_nested_conflict] %4 : $*Int // users: %19, %18 + %18 = load %17 : $*Int // user: %20 + end_access %17 : $*Int // id: %19 + return %18 : $Int // id: %20 +} // end sil function 'foouser' + +sil_vtable Foo { + +} + +// CHECK-LABEL: define swiftcc i64 @baruser +// CHECK: [[ALLOC:%.*]] = alloca %T3run3BarC +// CHECK: [[META:%.*]] = call swiftcc %swift.metadata_response @"$s3run3BarCMa" +// CHECK: [[META_E:%.*]] = extractvalue %swift.metadata_response [[META]] +// CHECK: [[REF:%.*]] = bitcast %T3run3BarC* [[ALLOC]] to %swift.refcounted* +// CHECK: [[OBJ_R:%.*]] = call %swift.refcounted* @swift_initStackObject({{.*}}[[META_E]], {{.*}}[[REF]]) +// CHECK: [[OBJ:%.*]] = bitcast %swift.refcounted* [[OBJ_R]] to %T3run3BarC* +// CHECK: [[X_PTR:%.*]] = getelementptr inbounds %T3run3BarC, %T3run3BarC* [[OBJ]], i32 0, i32 3 +// CHECK: [[X:%.*]] = getelementptr inbounds %TSi, %TSi* [[X_PTR]], i32 0, i32 0 +// CHECK: store i64 0, i64* [[X]] +// CHECK: [[Y_PTR:%.*]] = getelementptr inbounds %T3run3BarC, %T3run3BarC* [[OBJ]], i32 0, i32 2 +// CHECK: [[Y:%.*]] = getelementptr inbounds %TSi, %TSi* [[Y_PTR]], i32 0, i32 0 +// CHECK: store i64 0, i64* [[Y]] +// CHECK: [[Z_PTR:%.*]] = getelementptr inbounds %T3run3BarC, %T3run3BarC* [[OBJ]], i32 0, i32 1 +// CHECK: [[Z:%.*]] = getelementptr inbounds %TSi, %TSi* [[Z_PTR]], i32 0, i32 0 +// CHECK: store i64 0, i64* [[Z]] +sil @baruser : $@convention(thin) () -> Int { +bb0: + %1 = integer_literal $Builtin.Int64, 0 + %2 = struct $Int (%1 : $Builtin.Int64) + %3 = object $Bar (%2 : $Int, %2 : $Int, %2 : $Int) + + return %2 : $Int +} + + +sil_vtable Bar { + +}