diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs index ba65c8205a500..1bcb891a2504a 100644 --- a/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs +++ b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs @@ -22,11 +22,11 @@ use rustc_codegen_ssa::traits::{ }; use rustc_middle::bug; #[cfg(feature = "master")] -use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt}; +use rustc_middle::ty::layout::FnAbiOf; use rustc_middle::ty::layout::{HasTypingEnv, LayoutOf}; use rustc_middle::ty::{self, Instance, Ty}; use rustc_span::{Span, Symbol, sym}; -use rustc_target::callconv::{ArgAbi, FnAbi, PassMode}; +use rustc_target::callconv::{ArgAbi, PassMode}; use rustc_target::spec::PanicStrategy; #[cfg(feature = "master")] @@ -200,9 +200,8 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tc fn codegen_intrinsic_call( &mut self, instance: Instance<'tcx>, - fn_abi: &FnAbi<'tcx, Ty<'tcx>>, args: &[OperandRef<'tcx, RValue<'gcc>>], - llresult: RValue<'gcc>, + result: PlaceRef<'tcx, RValue<'gcc>>, span: Span, ) -> Result<(), Instance<'tcx>> { let tcx = self.tcx; @@ -221,7 +220,6 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tc let name_str = name.as_str(); let llret_ty = self.layout_of(ret_ty).gcc_type(self); - let result = PlaceRef::new_sized(llresult, fn_abi.ret.layout); let simple = get_simple_intrinsic(self, name); let simple_func = get_simple_function(self, name); @@ -271,7 +269,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tc args[0].immediate(), args[1].immediate(), args[2].immediate(), - llresult, + result, ); return Ok(()); } @@ -286,17 +284,10 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tc } sym::volatile_load | sym::unaligned_volatile_load => { - let tp_ty = fn_args.type_at(0); let ptr = args[0].immediate(); - let layout = self.layout_of(tp_ty); - let load = if let PassMode::Cast { cast: ref ty, pad_i32: _ } = fn_abi.ret.mode { - let gcc_ty = ty.gcc_type(self); - self.volatile_load(gcc_ty, ptr) - } else { - self.volatile_load(layout.gcc_type(self), ptr) - }; + let load = self.volatile_load(result.layout.gcc_type(self), ptr); // TODO(antoyo): set alignment. - if let BackendRepr::Scalar(scalar) = layout.backend_repr { + if let BackendRepr::Scalar(scalar) = result.layout.backend_repr { self.to_immediate_scalar(load, scalar) } else { load @@ -511,16 +502,14 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tc _ => return Err(Instance::new_raw(instance.def_id(), instance.args)), }; - if !fn_abi.ret.is_ignore() { - if let PassMode::Cast { cast: ref ty, .. } = fn_abi.ret.mode { - let ptr_llty = self.type_ptr_to(ty.gcc_type(self)); - let ptr = self.pointercast(result.val.llval, ptr_llty); - self.store(value, ptr, result.val.align); - } else { - OperandRef::from_immediate_or_packed_pair(self, value, result.layout) - .val - .store(self, result); - } + if result.layout.ty.is_bool() { + OperandRef::from_immediate_or_packed_pair(self, value, result.layout) + .val + .store(self, result); + } else if !result.layout.ty.is_unit() { + let ptr_llty = self.type_ptr_to(result.layout.gcc_type(self)); + let ptr = self.pointercast(result.val.llval, ptr_llty); + self.store(value, ptr, result.val.align); } Ok(()) } @@ -1230,14 +1219,13 @@ fn try_intrinsic<'a, 'b, 'gcc, 'tcx>( try_func: RValue<'gcc>, data: RValue<'gcc>, _catch_func: RValue<'gcc>, - dest: RValue<'gcc>, + dest: PlaceRef<'tcx, RValue<'gcc>>, ) { if bx.sess().panic_strategy() == PanicStrategy::Abort { bx.call(bx.type_void(), None, None, try_func, &[data], None, None); // Return 0 unconditionally from the intrinsic call; // we can never unwind. - let ret_align = bx.tcx.data_layout.i32_align.abi; - bx.store(bx.const_i32(0), dest, ret_align); + OperandValue::Immediate(bx.const_i32(0)).store(bx, dest); } else { if wants_msvc_seh(bx.sess()) { unimplemented!(); @@ -1261,12 +1249,12 @@ fn try_intrinsic<'a, 'b, 'gcc, 'tcx>( // functions in play. By calling a shim we're guaranteed that our shim will have // the right personality function. #[cfg(feature = "master")] -fn codegen_gnu_try<'gcc>( - bx: &mut Builder<'_, 'gcc, '_>, +fn codegen_gnu_try<'gcc, 'tcx>( + bx: &mut Builder<'_, 'gcc, 'tcx>, try_func: RValue<'gcc>, data: RValue<'gcc>, catch_func: RValue<'gcc>, - dest: RValue<'gcc>, + dest: PlaceRef<'tcx, RValue<'gcc>>, ) { let cx: &CodegenCx<'gcc, '_> = bx.cx; let (llty, func) = get_rust_try_fn(cx, &mut |mut bx| { @@ -1322,8 +1310,7 @@ fn codegen_gnu_try<'gcc>( // Note that no invoke is used here because by definition this function // can't panic (that's what it's catching). let ret = bx.call(llty, None, None, func, &[try_func, data, catch_func], None, None); - let i32_align = bx.tcx().data_layout.i32_align.abi; - bx.store(ret, dest, i32_align); + OperandValue::Immediate(ret).store(bx, dest); } // Helper function used to get a handle to the `__rust_try` function used to diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs index 5ca5737529229..e8629aeebb95a 100644 --- a/compiler/rustc_codegen_llvm/src/intrinsic.rs +++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs @@ -15,11 +15,10 @@ use rustc_middle::ty::{self, GenericArgsRef, Ty}; use rustc_middle::{bug, span_bug}; use rustc_span::{Span, Symbol, sym}; use rustc_symbol_mangling::mangle_internal_symbol; -use rustc_target::callconv::{FnAbi, PassMode}; use rustc_target::spec::{HasTargetSpec, PanicStrategy}; use tracing::debug; -use crate::abi::{FnAbiLlvmExt, LlvmType}; +use crate::abi::FnAbiLlvmExt; use crate::builder::Builder; use crate::context::CodegenCx; use crate::llvm::{self, Metadata}; @@ -165,9 +164,8 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { fn codegen_intrinsic_call( &mut self, instance: ty::Instance<'tcx>, - fn_abi: &FnAbi<'tcx, Ty<'tcx>>, args: &[OperandRef<'tcx, &'ll Value>], - llresult: &'ll Value, + result: PlaceRef<'tcx, &'ll Value>, span: Span, ) -> Result<(), ty::Instance<'tcx>> { let tcx = self.tcx; @@ -184,7 +182,6 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { let name = tcx.item_name(def_id); let llret_ty = self.layout_of(ret_ty).llvm_type(self); - let result = PlaceRef::new_sized(llresult, fn_abi.ret.layout); let simple = get_simple_intrinsic(self, name); let llval = match name { @@ -255,7 +252,7 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { args[0].immediate(), args[1].immediate(), args[2].immediate(), - llresult, + result, ); return Ok(()); } @@ -264,7 +261,7 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { self.call_intrinsic("llvm.va_copy", &[args[0].immediate(), args[1].immediate()]) } sym::va_arg => { - match fn_abi.ret.layout.backend_repr { + match result.layout.backend_repr { BackendRepr::Scalar(scalar) => { match scalar.primitive() { Primitive::Int(..) => { @@ -299,18 +296,12 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { } sym::volatile_load | sym::unaligned_volatile_load => { - let tp_ty = fn_args.type_at(0); let ptr = args[0].immediate(); - let load = if let PassMode::Cast { cast: ty, pad_i32: _ } = &fn_abi.ret.mode { - let llty = ty.llvm_type(self); - self.volatile_load(llty, ptr) - } else { - self.volatile_load(self.layout_of(tp_ty).llvm_type(self), ptr) - }; + let load = self.volatile_load(result.layout.llvm_type(self), ptr); let align = if name == sym::unaligned_volatile_load { 1 } else { - self.align_of(tp_ty).bytes() as u32 + result.layout.align.abi.bytes() as u32 }; unsafe { llvm::LLVMSetAlignment(load, align); @@ -629,14 +620,12 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { } }; - if !fn_abi.ret.is_ignore() { - if let PassMode::Cast { .. } = &fn_abi.ret.mode { - self.store(llval, result.val.llval, result.val.align); - } else { - OperandRef::from_immediate_or_packed_pair(self, llval, result.layout) - .val - .store(self, result); - } + if result.layout.ty.is_bool() { + OperandRef::from_immediate_or_packed_pair(self, llval, result.layout) + .val + .store(self, result); + } else if !result.layout.ty.is_unit() { + self.store_to_place(llval, result.val); } Ok(()) } @@ -688,20 +677,19 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { } } -fn catch_unwind_intrinsic<'ll>( - bx: &mut Builder<'_, 'll, '_>, +fn catch_unwind_intrinsic<'ll, 'tcx>( + bx: &mut Builder<'_, 'll, 'tcx>, try_func: &'ll Value, data: &'ll Value, catch_func: &'ll Value, - dest: &'ll Value, + dest: PlaceRef<'tcx, &'ll Value>, ) { if bx.sess().panic_strategy() == PanicStrategy::Abort { let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void()); bx.call(try_func_ty, None, None, try_func, &[data], None, None); // Return 0 unconditionally from the intrinsic call; // we can never unwind. - let ret_align = bx.tcx().data_layout.i32_align.abi; - bx.store(bx.const_i32(0), dest, ret_align); + OperandValue::Immediate(bx.const_i32(0)).store(bx, dest); } else if wants_msvc_seh(bx.sess()) { codegen_msvc_try(bx, try_func, data, catch_func, dest); } else if wants_wasm_eh(bx.sess()) { @@ -720,12 +708,12 @@ fn catch_unwind_intrinsic<'ll>( // instructions are meant to work for all targets, as of the time of this // writing, however, LLVM does not recommend the usage of these new instructions // as the old ones are still more optimized. -fn codegen_msvc_try<'ll>( - bx: &mut Builder<'_, 'll, '_>, +fn codegen_msvc_try<'ll, 'tcx>( + bx: &mut Builder<'_, 'll, 'tcx>, try_func: &'ll Value, data: &'ll Value, catch_func: &'ll Value, - dest: &'ll Value, + dest: PlaceRef<'tcx, &'ll Value>, ) { let (llty, llfn) = get_rust_try_fn(bx, &mut |mut bx| { bx.set_personality_fn(bx.eh_personality()); @@ -865,17 +853,16 @@ fn codegen_msvc_try<'ll>( // Note that no invoke is used here because by definition this function // can't panic (that's what it's catching). let ret = bx.call(llty, None, None, llfn, &[try_func, data, catch_func], None, None); - let i32_align = bx.tcx().data_layout.i32_align.abi; - bx.store(ret, dest, i32_align); + OperandValue::Immediate(ret).store(bx, dest); } // WASM's definition of the `rust_try` function. -fn codegen_wasm_try<'ll>( - bx: &mut Builder<'_, 'll, '_>, +fn codegen_wasm_try<'ll, 'tcx>( + bx: &mut Builder<'_, 'll, 'tcx>, try_func: &'ll Value, data: &'ll Value, catch_func: &'ll Value, - dest: &'ll Value, + dest: PlaceRef<'tcx, &'ll Value>, ) { let (llty, llfn) = get_rust_try_fn(bx, &mut |mut bx| { bx.set_personality_fn(bx.eh_personality()); @@ -939,8 +926,7 @@ fn codegen_wasm_try<'ll>( // Note that no invoke is used here because by definition this function // can't panic (that's what it's catching). let ret = bx.call(llty, None, None, llfn, &[try_func, data, catch_func], None, None); - let i32_align = bx.tcx().data_layout.i32_align.abi; - bx.store(ret, dest, i32_align); + OperandValue::Immediate(ret).store(bx, dest); } // Definition of the standard `try` function for Rust using the GNU-like model @@ -954,12 +940,12 @@ fn codegen_wasm_try<'ll>( // function calling it, and that function may already have other personality // functions in play. By calling a shim we're guaranteed that our shim will have // the right personality function. -fn codegen_gnu_try<'ll>( - bx: &mut Builder<'_, 'll, '_>, +fn codegen_gnu_try<'ll, 'tcx>( + bx: &mut Builder<'_, 'll, 'tcx>, try_func: &'ll Value, data: &'ll Value, catch_func: &'ll Value, - dest: &'ll Value, + dest: PlaceRef<'tcx, &'ll Value>, ) { let (llty, llfn) = get_rust_try_fn(bx, &mut |mut bx| { // Codegens the shims described above: @@ -1006,19 +992,18 @@ fn codegen_gnu_try<'ll>( // Note that no invoke is used here because by definition this function // can't panic (that's what it's catching). let ret = bx.call(llty, None, None, llfn, &[try_func, data, catch_func], None, None); - let i32_align = bx.tcx().data_layout.i32_align.abi; - bx.store(ret, dest, i32_align); + OperandValue::Immediate(ret).store(bx, dest); } // Variant of codegen_gnu_try used for emscripten where Rust panics are // implemented using C++ exceptions. Here we use exceptions of a specific type // (`struct rust_panic`) to represent Rust panics. -fn codegen_emcc_try<'ll>( - bx: &mut Builder<'_, 'll, '_>, +fn codegen_emcc_try<'ll, 'tcx>( + bx: &mut Builder<'_, 'll, 'tcx>, try_func: &'ll Value, data: &'ll Value, catch_func: &'ll Value, - dest: &'ll Value, + dest: PlaceRef<'tcx, &'ll Value>, ) { let (llty, llfn) = get_rust_try_fn(bx, &mut |mut bx| { // Codegens the shims described above: @@ -1089,8 +1074,7 @@ fn codegen_emcc_try<'ll>( // Note that no invoke is used here because by definition this function // can't panic (that's what it's catching). let ret = bx.call(llty, None, None, llfn, &[try_func, data, catch_func], None, None); - let i32_align = bx.tcx().data_layout.i32_align.abi; - bx.store(ret, dest, i32_align); + OperandValue::Immediate(ret).store(bx, dest); } // Helper function to give a Block to a closure to codegen a shim function. diff --git a/compiler/rustc_codegen_ssa/src/mir/block.rs b/compiler/rustc_codegen_ssa/src/mir/block.rs index 922b8a5824bec..1baab62ae43ac 100644 --- a/compiler/rustc_codegen_ssa/src/mir/block.rs +++ b/compiler/rustc_codegen_ssa/src/mir/block.rs @@ -11,8 +11,8 @@ use rustc_middle::ty::print::{with_no_trimmed_paths, with_no_visible_paths}; use rustc_middle::ty::{self, Instance, Ty}; use rustc_middle::{bug, span_bug}; use rustc_session::config::OptLevel; +use rustc_span::Span; use rustc_span::source_map::Spanned; -use rustc_span::{Span, sym}; use rustc_target::callconv::{ArgAbi, FnAbi, PassMode}; use tracing::{debug, info}; @@ -827,7 +827,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { helper: &TerminatorCodegenHelper<'tcx>, bx: &mut Bx, intrinsic: ty::IntrinsicDef, - instance: Option>, + instance: Instance<'tcx>, source_info: mir::SourceInfo, target: Option, unwind: mir::UnwindAction, @@ -836,58 +836,56 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // Emit a panic or a no-op for `assert_*` intrinsics. // These are intrinsics that compile to panics so that we can get a message // which mentions the offending type, even from a const context. - if let Some(requirement) = ValidityRequirement::from_intrinsic(intrinsic.name) { - let ty = instance.unwrap().args.type_at(0); - - let do_panic = !bx - .tcx() - .check_validity_requirement((requirement, bx.typing_env().as_query_input(ty))) - .expect("expect to have layout during codegen"); - - let layout = bx.layout_of(ty); - - Some(if do_panic { - let msg_str = with_no_visible_paths!({ - with_no_trimmed_paths!({ - if layout.is_uninhabited() { - // Use this error even for the other intrinsics as it is more precise. - format!("attempted to instantiate uninhabited type `{ty}`") - } else if requirement == ValidityRequirement::Zero { - format!("attempted to zero-initialize type `{ty}`, which is invalid") - } else { - format!( - "attempted to leave type `{ty}` uninitialized, which is invalid" - ) - } - }) - }); - let msg = bx.const_str(&msg_str); + let Some(requirement) = ValidityRequirement::from_intrinsic(intrinsic.name) else { + return None; + }; - // Obtain the panic entry point. - let (fn_abi, llfn, instance) = - common::build_langcall(bx, Some(source_info.span), LangItem::PanicNounwind); + let ty = instance.args.type_at(0); - // Codegen the actual panic invoke/call. - helper.do_call( - self, - bx, - fn_abi, - llfn, - &[msg.0, msg.1], - target.as_ref().map(|bb| (ReturnDest::Nothing, *bb)), - unwind, - &[], - Some(instance), - mergeable_succ, - ) - } else { - // a NOP - let target = target.unwrap(); - helper.funclet_br(self, bx, target, mergeable_succ) - }) - } else { - None + let is_valid = bx + .tcx() + .check_validity_requirement((requirement, bx.typing_env().as_query_input(ty))) + .expect("expect to have layout during codegen"); + + if is_valid { + // a NOP + let target = target.unwrap(); + return Some(helper.funclet_br(self, bx, target, mergeable_succ)); } + + let layout = bx.layout_of(ty); + + let msg_str = with_no_visible_paths!({ + with_no_trimmed_paths!({ + if layout.is_uninhabited() { + // Use this error even for the other intrinsics as it is more precise. + format!("attempted to instantiate uninhabited type `{ty}`") + } else if requirement == ValidityRequirement::Zero { + format!("attempted to zero-initialize type `{ty}`, which is invalid") + } else { + format!("attempted to leave type `{ty}` uninitialized, which is invalid") + } + }) + }); + let msg = bx.const_str(&msg_str); + + // Obtain the panic entry point. + let (fn_abi, llfn, instance) = + common::build_langcall(bx, Some(source_info.span), LangItem::PanicNounwind); + + // Codegen the actual panic invoke/call. + Some(helper.do_call( + self, + bx, + fn_abi, + llfn, + &[msg.0, msg.1], + target.as_ref().map(|bb| (ReturnDest::Nothing, *bb)), + unwind, + &[], + Some(instance), + mergeable_succ, + )) } fn codegen_call_terminator( @@ -903,42 +901,127 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { fn_span: Span, mergeable_succ: bool, ) -> MergingSucc { - let source_info = terminator.source_info; - let span = source_info.span; + let source_info = mir::SourceInfo { span: fn_span, ..terminator.source_info }; // Create the callee. This is a fn ptr or zero-sized and hence a kind of scalar. let callee = self.codegen_operand(bx, func); let (instance, mut llfn) = match *callee.layout.ty.kind() { - ty::FnDef(def_id, args) => ( - Some(ty::Instance::expect_resolve( + ty::FnDef(def_id, generic_args) => { + let instance = ty::Instance::expect_resolve( bx.tcx(), bx.typing_env(), def_id, - args, + generic_args, fn_span, - )), - None, - ), + ); + + let instance = match instance.def { + // We don't need AsyncDropGlueCtorShim here because it is not `noop func`, + // it is `func returning noop future` + ty::InstanceKind::DropGlue(_, None) => { + // Empty drop glue; a no-op. + let target = target.unwrap(); + return helper.funclet_br(self, bx, target, mergeable_succ); + } + ty::InstanceKind::Intrinsic(def_id) => { + let intrinsic = bx.tcx().intrinsic(def_id).unwrap(); + if let Some(merging_succ) = self.codegen_panic_intrinsic( + &helper, + bx, + intrinsic, + instance, + source_info, + target, + unwind, + mergeable_succ, + ) { + return merging_succ; + } + + let result_layout = + self.cx.layout_of(self.monomorphized_place_ty(destination.as_ref())); + + let (result, store_in_local) = if result_layout.is_zst() { + ( + PlaceRef::new_sized(bx.const_undef(bx.type_ptr()), result_layout), + None, + ) + } else if let Some(local) = destination.as_local() { + match self.locals[local] { + LocalRef::Place(dest) => (dest, None), + LocalRef::UnsizedPlace(_) => bug!("return type must be sized"), + LocalRef::PendingOperand => { + // Currently, intrinsics always need a location to store + // the result, so we create a temporary `alloca` for the + // result. + let tmp = PlaceRef::alloca(bx, result_layout); + tmp.storage_live(bx); + (tmp, Some(local)) + } + LocalRef::Operand(_) => { + bug!("place local already assigned to"); + } + } + } else { + (self.codegen_place(bx, destination.as_ref()), None) + }; + + if result.val.align < result.layout.align.abi { + // Currently, MIR code generation does not create calls + // that store directly to fields of packed structs (in + // fact, the calls it creates write only to temps). + // + // If someone changes that, please update this code path + // to create a temporary. + span_bug!(self.mir.span, "can't directly store to unaligned value"); + } + + let args: Vec<_> = + args.iter().map(|arg| self.codegen_operand(bx, &arg.node)).collect(); + + match self.codegen_intrinsic_call(bx, instance, &args, result, source_info) + { + Ok(()) => { + if let Some(local) = store_in_local { + let op = bx.load_operand(result); + result.storage_dead(bx); + self.overwrite_local(local, LocalRef::Operand(op)); + self.debug_introduce_local(bx, local); + } + + return if let Some(target) = target { + helper.funclet_br(self, bx, target, mergeable_succ) + } else { + bx.unreachable(); + MergingSucc::False + }; + } + Err(instance) => { + if intrinsic.must_be_overridden { + span_bug!( + fn_span, + "intrinsic {} must be overridden by codegen backend, but isn't", + intrinsic.name, + ); + } + instance + } + } + } + _ => instance, + }; + + (Some(instance), None) + } ty::FnPtr(..) => (None, Some(callee.immediate())), _ => bug!("{} is not callable", callee.layout.ty), }; - let def = instance.map(|i| i.def); - - // We don't need AsyncDropGlueCtorShim here because it is not `noop func`, - // it is `func returning noop future` - if let Some(ty::InstanceKind::DropGlue(_, None)) = def { - // Empty drop glue; a no-op. - let target = target.unwrap(); - return helper.funclet_br(self, bx, target, mergeable_succ); - } - // FIXME(eddyb) avoid computing this if possible, when `instance` is // available - right now `sig` is only needed for getting the `abi` // and figuring out how many extra args were passed to a C-variadic `fn`. let sig = callee.layout.ty.fn_sig(bx.tcx()); - let abi = sig.abi(); let extra_args = &args[sig.inputs().skip_binder().len()..]; let extra_args = bx.tcx().mk_type_list_from_iter(extra_args.iter().map(|op_arg| { @@ -954,93 +1037,16 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // The arguments we'll be passing. Plus one to account for outptr, if used. let arg_count = fn_abi.args.len() + fn_abi.ret.is_indirect() as usize; - let instance = match def { - Some(ty::InstanceKind::Intrinsic(def_id)) => { - let intrinsic = bx.tcx().intrinsic(def_id).unwrap(); - if let Some(merging_succ) = self.codegen_panic_intrinsic( - &helper, - bx, - intrinsic, - instance, - source_info, - target, - unwind, - mergeable_succ, - ) { - return merging_succ; - } - - let mut llargs = Vec::with_capacity(1); - let ret_dest = self.make_return_dest( - bx, - destination, - &fn_abi.ret, - &mut llargs, - Some(intrinsic), - ); - let dest = match ret_dest { - _ if fn_abi.ret.is_indirect() => llargs[0], - ReturnDest::Nothing => bx.const_undef(bx.type_ptr()), - ReturnDest::IndirectOperand(dst, _) | ReturnDest::Store(dst) => dst.val.llval, - ReturnDest::DirectOperand(_) => { - bug!("Cannot use direct operand with an intrinsic call") - } - }; - - let args: Vec<_> = - args.iter().map(|arg| self.codegen_operand(bx, &arg.node)).collect(); - - if matches!(intrinsic, ty::IntrinsicDef { name: sym::caller_location, .. }) { - let location = self - .get_caller_location(bx, mir::SourceInfo { span: fn_span, ..source_info }); - - assert_eq!(llargs, []); - if let ReturnDest::IndirectOperand(tmp, _) = ret_dest { - location.val.store(bx, tmp); - } - self.store_return(bx, ret_dest, &fn_abi.ret, location.immediate()); - return helper.funclet_br(self, bx, target.unwrap(), mergeable_succ); - } - - let instance = *instance.as_ref().unwrap(); - match Self::codegen_intrinsic_call(bx, instance, fn_abi, &args, dest, span) { - Ok(()) => { - if let ReturnDest::IndirectOperand(dst, _) = ret_dest { - self.store_return(bx, ret_dest, &fn_abi.ret, dst.val.llval); - } - - return if let Some(target) = target { - helper.funclet_br(self, bx, target, mergeable_succ) - } else { - bx.unreachable(); - MergingSucc::False - }; - } - Err(instance) => { - if intrinsic.must_be_overridden { - span_bug!( - span, - "intrinsic {} must be overridden by codegen backend, but isn't", - intrinsic.name, - ); - } - Some(instance) - } - } - } - _ => instance, - }; - let mut llargs = Vec::with_capacity(arg_count); // We still need to call `make_return_dest` even if there's no `target`, since // `fn_abi.ret` could be `PassMode::Indirect`, even if it is uninhabited, // and `make_return_dest` adds the return-place indirect pointer to `llargs`. - let return_dest = self.make_return_dest(bx, destination, &fn_abi.ret, &mut llargs, None); + let return_dest = self.make_return_dest(bx, destination, &fn_abi.ret, &mut llargs); let destination = target.map(|target| (return_dest, target)); // Split the rust-call tupled arguments off. - let (first_args, untuple) = if abi == ExternAbi::RustCall + let (first_args, untuple) = if sig.abi() == ExternAbi::RustCall && let Some((tup, args)) = args.split_last() { (args, Some(tup)) @@ -1055,7 +1061,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { 'make_args: for (i, arg) in first_args.iter().enumerate() { let mut op = self.codegen_operand(bx, &arg.node); - if let (0, Some(ty::InstanceKind::Virtual(_, idx))) = (i, def) { + if let (0, Some(ty::InstanceKind::Virtual(_, idx))) = (i, instance.map(|i| i.def)) { match op.val { Pair(data_ptr, meta) => { // In the case of Rc, we need to explicitly pass a @@ -1109,7 +1115,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // Make sure that we've actually unwrapped the rcvr down // to a pointer or ref to `dyn* Trait`. if !op.layout.ty.builtin_deref(true).unwrap().is_dyn_star() { - span_bug!(span, "can't codegen a virtual call on {:#?}", op); + span_bug!(fn_span, "can't codegen a virtual call on {:#?}", op); } let place = op.deref(bx.cx()); let data_place = place.project_field(bx, 0); @@ -1125,7 +1131,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { continue; } _ => { - span_bug!(span, "can't codegen a virtual call on {:#?}", op); + span_bug!(fn_span, "can't codegen a virtual call on {:#?}", op); } } } @@ -1175,8 +1181,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { mir_args + 1, "#[track_caller] fn's must have 1 more argument in their ABI than in their MIR: {instance:?} {fn_span:?} {fn_abi:?}", ); - let location = - self.get_caller_location(bx, mir::SourceInfo { span: fn_span, ..source_info }); + let location = self.get_caller_location(bx, source_info); debug!( "codegen_call_terminator({:?}): location={:?} (fn_span {:?})", terminator, location, fn_span @@ -1195,9 +1200,9 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let fn_ptr = match (instance, llfn) { (Some(instance), None) => bx.get_fn_addr(instance), (_, Some(llfn)) => llfn, - _ => span_bug!(span, "no instance or llfn for call"), + _ => span_bug!(fn_span, "no instance or llfn for call"), }; - self.set_debug_loc(bx, mir::SourceInfo { span: fn_span, ..source_info }); + self.set_debug_loc(bx, source_info); helper.do_call( self, bx, @@ -1667,7 +1672,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { tuple.layout.fields.count() } - fn get_caller_location( + pub(super) fn get_caller_location( &mut self, bx: &mut Bx, source_info: mir::SourceInfo, @@ -1868,7 +1873,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { dest: mir::Place<'tcx>, fn_ret: &ArgAbi<'tcx, Ty<'tcx>>, llargs: &mut Vec, - intrinsic: Option, ) -> ReturnDest<'tcx, Bx::Value> { // If the return is ignored, we can just return a do-nothing `ReturnDest`. if fn_ret.is_ignore() { @@ -1888,13 +1892,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { tmp.storage_live(bx); llargs.push(tmp.val.llval); ReturnDest::IndirectOperand(tmp, index) - } else if intrinsic.is_some() { - // Currently, intrinsics always need a location to store - // the result, so we create a temporary `alloca` for the - // result. - let tmp = PlaceRef::alloca(bx, fn_ret.layout); - tmp.storage_live(bx); - ReturnDest::IndirectOperand(tmp, index) } else { ReturnDest::DirectOperand(index) }; @@ -1904,7 +1901,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } } } else { - self.codegen_place(bx, mir::PlaceRef { local: dest.local, projection: dest.projection }) + self.codegen_place(bx, dest.as_ref()) }; if fn_ret.is_indirect() { if dest.val.align < dest.layout.align.abi { diff --git a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs index b0fcfee2adf5f..a6d159c51e132 100644 --- a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs +++ b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs @@ -1,9 +1,9 @@ use rustc_abi::WrappingRange; +use rustc_middle::mir::SourceInfo; use rustc_middle::ty::{self, Ty, TyCtxt}; use rustc_middle::{bug, span_bug}; use rustc_session::config::OptLevel; -use rustc_span::{Span, sym}; -use rustc_target::callconv::{FnAbi, PassMode}; +use rustc_span::sym; use super::FunctionCx; use super::operand::OperandRef; @@ -52,13 +52,14 @@ fn memset_intrinsic<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { /// In the `Err` case, returns the instance that should be called instead. pub fn codegen_intrinsic_call( + &mut self, bx: &mut Bx, instance: ty::Instance<'tcx>, - fn_abi: &FnAbi<'tcx, Ty<'tcx>>, args: &[OperandRef<'tcx, Bx::Value>], - llresult: Bx::Value, - span: Span, + result: PlaceRef<'tcx, Bx::Value>, + source_info: SourceInfo, ) -> Result<(), ty::Instance<'tcx>> { + let span = source_info.span; let callee_ty = instance.ty(bx.tcx(), bx.typing_env()); let ty::FnDef(def_id, fn_args) = *callee_ty.kind() else { @@ -97,7 +98,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } let llret_ty = bx.backend_type(bx.layout_of(ret_ty)); - let result = PlaceRef::new_sized(llresult, fn_abi.ret.layout); let llval = match name { sym::abort => { @@ -105,6 +105,12 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { return Ok(()); } + sym::caller_location => { + let location = self.get_caller_location(bx, source_info); + location.val.store(bx, result); + return Ok(()); + } + sym::va_start => bx.va_start(args[0].immediate()), sym::va_end => bx.va_end(args[0].immediate()), sym::size_of_val => { @@ -528,18 +534,16 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { _ => { // Need to use backend-specific things in the implementation. - return bx.codegen_intrinsic_call(instance, fn_abi, args, llresult, span); + return bx.codegen_intrinsic_call(instance, args, result, span); } }; - if !fn_abi.ret.is_ignore() { - if let PassMode::Cast { .. } = &fn_abi.ret.mode { - bx.store_to_place(llval, result.val); - } else { - OperandRef::from_immediate_or_packed_pair(bx, llval, result.layout) - .val - .store(bx, result); - } + if result.layout.ty.is_bool() { + OperandRef::from_immediate_or_packed_pair(bx, llval, result.layout) + .val + .store(bx, result); + } else if !result.layout.ty.is_unit() { + bx.store_to_place(llval, result.val); } Ok(()) } diff --git a/compiler/rustc_codegen_ssa/src/traits/intrinsic.rs b/compiler/rustc_codegen_ssa/src/traits/intrinsic.rs index 88cf8dbf0c5c8..a07c569a03237 100644 --- a/compiler/rustc_codegen_ssa/src/traits/intrinsic.rs +++ b/compiler/rustc_codegen_ssa/src/traits/intrinsic.rs @@ -1,9 +1,9 @@ -use rustc_middle::ty::{self, Ty}; +use rustc_middle::ty; use rustc_span::Span; -use rustc_target::callconv::FnAbi; use super::BackendTypes; use crate::mir::operand::OperandRef; +use crate::mir::place::PlaceRef; pub trait IntrinsicCallBuilderMethods<'tcx>: BackendTypes { /// Remember to add all intrinsics here, in `compiler/rustc_hir_analysis/src/check/mod.rs`, @@ -14,9 +14,8 @@ pub trait IntrinsicCallBuilderMethods<'tcx>: BackendTypes { fn codegen_intrinsic_call( &mut self, instance: ty::Instance<'tcx>, - fn_abi: &FnAbi<'tcx, Ty<'tcx>>, args: &[OperandRef<'tcx, Self::Value>], - llresult: Self::Value, + result: PlaceRef<'tcx, Self::Value>, span: Span, ) -> Result<(), ty::Instance<'tcx>>;