Skip to content

Commit ade2871

Browse files
committed
Auto merge of #115915 - cjgillot:const-pair, r=<try>
WIP Replace ConstValue::Slice by ConstValue::ScalarPair Both the interpreter and codegen have dedicated paths for scalar pairs. Having one in ConstValue makes is able to represent all "immediate" values. In order to mitigate the increase in the size of the ConstValue struct, a first commit interns it.
2 parents 8ed1d4a + b07c1b4 commit ade2871

File tree

84 files changed

+621
-550
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

84 files changed

+621
-550
lines changed

compiler/rustc_codegen_cranelift/src/constant.rs

+95-97
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
44
use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
55
use rustc_middle::mir::interpret::{
6-
read_target_uint, AllocId, ConstValue, ErrorHandled, GlobalAlloc, Scalar,
6+
read_target_uint, AllocId, ConstValue, ConstValueKind, ErrorHandled, GlobalAlloc, Scalar,
77
};
88

99
use cranelift_module::*;
@@ -103,6 +103,91 @@ pub(crate) fn codegen_constant_operand<'tcx>(
103103
codegen_const_value(fx, const_val, ty)
104104
}
105105

106+
pub(crate) fn codegen_const_scalar<'tcx>(
107+
fx: &mut FunctionCx<'_, '_, 'tcx>,
108+
scalar: Scalar,
109+
layout: TyAndLayout<'tcx>,
110+
) -> CValue<'tcx> {
111+
match scalar {
112+
Scalar::Int(int) => {
113+
if fx.clif_type(layout.ty).is_some() {
114+
return CValue::const_val(fx, layout, int);
115+
} else {
116+
let raw_val = int.to_bits(int.size()).unwrap();
117+
let val = match int.size().bytes() {
118+
1 => fx.bcx.ins().iconst(types::I8, raw_val as i64),
119+
2 => fx.bcx.ins().iconst(types::I16, raw_val as i64),
120+
4 => fx.bcx.ins().iconst(types::I32, raw_val as i64),
121+
8 => fx.bcx.ins().iconst(types::I64, raw_val as i64),
122+
16 => {
123+
let lsb = fx.bcx.ins().iconst(types::I64, raw_val as u64 as i64);
124+
let msb = fx.bcx.ins().iconst(types::I64, (raw_val >> 64) as u64 as i64);
125+
fx.bcx.ins().iconcat(lsb, msb)
126+
}
127+
_ => unreachable!(),
128+
};
129+
130+
// FIXME avoid this extra copy to the stack and directly write to the final
131+
// destination
132+
let place = CPlace::new_stack_slot(fx, layout);
133+
place.to_ptr().store(fx, val, MemFlags::trusted());
134+
place.to_cvalue(fx)
135+
}
136+
}
137+
Scalar::Ptr(ptr, _size) => {
138+
let (alloc_id, offset) = ptr.into_parts(); // we know the `offset` is relative
139+
let base_addr = match fx.tcx.global_alloc(alloc_id) {
140+
GlobalAlloc::Memory(alloc) => {
141+
let data_id = data_id_for_alloc_id(
142+
&mut fx.constants_cx,
143+
fx.module,
144+
alloc_id,
145+
alloc.inner().mutability,
146+
);
147+
let local_data_id = fx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
148+
if fx.clif_comments.enabled() {
149+
fx.add_comment(local_data_id, format!("{:?}", alloc_id));
150+
}
151+
fx.bcx.ins().global_value(fx.pointer_type, local_data_id)
152+
}
153+
GlobalAlloc::Function(instance) => {
154+
let func_id = crate::abi::import_function(fx.tcx, fx.module, instance);
155+
let local_func_id = fx.module.declare_func_in_func(func_id, &mut fx.bcx.func);
156+
fx.bcx.ins().func_addr(fx.pointer_type, local_func_id)
157+
}
158+
GlobalAlloc::VTable(ty, trait_ref) => {
159+
let alloc_id = fx.tcx.vtable_allocation((ty, trait_ref));
160+
let alloc = fx.tcx.global_alloc(alloc_id).unwrap_memory();
161+
// FIXME: factor this common code with the `Memory` arm into a function?
162+
let data_id = data_id_for_alloc_id(
163+
&mut fx.constants_cx,
164+
fx.module,
165+
alloc_id,
166+
alloc.inner().mutability,
167+
);
168+
let local_data_id = fx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
169+
fx.bcx.ins().global_value(fx.pointer_type, local_data_id)
170+
}
171+
GlobalAlloc::Static(def_id) => {
172+
assert!(fx.tcx.is_static(def_id));
173+
let data_id = data_id_for_static(fx.tcx, fx.module, def_id, false);
174+
let local_data_id = fx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
175+
if fx.clif_comments.enabled() {
176+
fx.add_comment(local_data_id, format!("{:?}", def_id));
177+
}
178+
fx.bcx.ins().global_value(fx.pointer_type, local_data_id)
179+
}
180+
};
181+
let val = if offset.bytes() != 0 {
182+
fx.bcx.ins().iadd_imm(base_addr, i64::try_from(offset.bytes()).unwrap())
183+
} else {
184+
base_addr
185+
};
186+
CValue::by_val(val, layout)
187+
}
188+
}
189+
}
190+
106191
pub(crate) fn codegen_const_value<'tcx>(
107192
fx: &mut FunctionCx<'_, '_, 'tcx>,
108193
const_val: ConstValue<'tcx>,
@@ -115,106 +200,19 @@ pub(crate) fn codegen_const_value<'tcx>(
115200
return CValue::by_ref(crate::Pointer::dangling(layout.align.pref), layout);
116201
}
117202

118-
match const_val {
119-
ConstValue::ZeroSized => unreachable!(), // we already handled ZST above
120-
ConstValue::Scalar(x) => match x {
121-
Scalar::Int(int) => {
122-
if fx.clif_type(layout.ty).is_some() {
123-
return CValue::const_val(fx, layout, int);
124-
} else {
125-
let raw_val = int.to_bits(int.size()).unwrap();
126-
let val = match int.size().bytes() {
127-
1 => fx.bcx.ins().iconst(types::I8, raw_val as i64),
128-
2 => fx.bcx.ins().iconst(types::I16, raw_val as i64),
129-
4 => fx.bcx.ins().iconst(types::I32, raw_val as i64),
130-
8 => fx.bcx.ins().iconst(types::I64, raw_val as i64),
131-
16 => {
132-
let lsb = fx.bcx.ins().iconst(types::I64, raw_val as u64 as i64);
133-
let msb =
134-
fx.bcx.ins().iconst(types::I64, (raw_val >> 64) as u64 as i64);
135-
fx.bcx.ins().iconcat(lsb, msb)
136-
}
137-
_ => unreachable!(),
138-
};
139-
140-
// FIXME avoid this extra copy to the stack and directly write to the final
141-
// destination
142-
let place = CPlace::new_stack_slot(fx, layout);
143-
place.to_ptr().store(fx, val, MemFlags::trusted());
144-
place.to_cvalue(fx)
145-
}
146-
}
147-
Scalar::Ptr(ptr, _size) => {
148-
let (alloc_id, offset) = ptr.into_parts(); // we know the `offset` is relative
149-
let base_addr = match fx.tcx.global_alloc(alloc_id) {
150-
GlobalAlloc::Memory(alloc) => {
151-
let data_id = data_id_for_alloc_id(
152-
&mut fx.constants_cx,
153-
fx.module,
154-
alloc_id,
155-
alloc.inner().mutability,
156-
);
157-
let local_data_id =
158-
fx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
159-
if fx.clif_comments.enabled() {
160-
fx.add_comment(local_data_id, format!("{:?}", alloc_id));
161-
}
162-
fx.bcx.ins().global_value(fx.pointer_type, local_data_id)
163-
}
164-
GlobalAlloc::Function(instance) => {
165-
let func_id = crate::abi::import_function(fx.tcx, fx.module, instance);
166-
let local_func_id =
167-
fx.module.declare_func_in_func(func_id, &mut fx.bcx.func);
168-
fx.bcx.ins().func_addr(fx.pointer_type, local_func_id)
169-
}
170-
GlobalAlloc::VTable(ty, trait_ref) => {
171-
let alloc_id = fx.tcx.vtable_allocation((ty, trait_ref));
172-
let alloc = fx.tcx.global_alloc(alloc_id).unwrap_memory();
173-
// FIXME: factor this common code with the `Memory` arm into a function?
174-
let data_id = data_id_for_alloc_id(
175-
&mut fx.constants_cx,
176-
fx.module,
177-
alloc_id,
178-
alloc.inner().mutability,
179-
);
180-
let local_data_id =
181-
fx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
182-
fx.bcx.ins().global_value(fx.pointer_type, local_data_id)
183-
}
184-
GlobalAlloc::Static(def_id) => {
185-
assert!(fx.tcx.is_static(def_id));
186-
let data_id = data_id_for_static(fx.tcx, fx.module, def_id, false);
187-
let local_data_id =
188-
fx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
189-
if fx.clif_comments.enabled() {
190-
fx.add_comment(local_data_id, format!("{:?}", def_id));
191-
}
192-
fx.bcx.ins().global_value(fx.pointer_type, local_data_id)
193-
}
194-
};
195-
let val = if offset.bytes() != 0 {
196-
fx.bcx.ins().iadd_imm(base_addr, i64::try_from(offset.bytes()).unwrap())
197-
} else {
198-
base_addr
199-
};
200-
CValue::by_val(val, layout)
201-
}
202-
},
203-
ConstValue::Indirect { alloc_id, offset } => CValue::by_ref(
203+
match *const_val.kind() {
204+
ConstValueKind::ZeroSized => unreachable!(), // we already handled ZST above
205+
ConstValueKind::Scalar(x) => codegen_const_scalar(fx, x, layout),
206+
ConstValueKind::Indirect { alloc_id, offset } => CValue::by_ref(
204207
pointer_for_allocation(fx, alloc_id)
205208
.offset_i64(fx, i64::try_from(offset.bytes()).unwrap()),
206209
layout,
207210
),
208-
ConstValue::Slice { data, start, end } => {
209-
let alloc_id = fx.tcx.reserve_and_set_memory_alloc(data);
210-
let ptr = pointer_for_allocation(fx, alloc_id)
211-
.offset_i64(fx, i64::try_from(start).unwrap())
212-
.get_addr(fx);
213-
let len = fx
214-
.bcx
215-
.ins()
216-
.iconst(fx.pointer_type, i64::try_from(end.checked_sub(start).unwrap()).unwrap());
217-
CValue::by_val_pair(ptr, len, layout)
211+
ConstValueKind::ScalarPair(a, b) => {
212+
// FIXME wrong layout.
213+
let a = codegen_const_scalar(fx, a, layout);
214+
let b = codegen_const_scalar(fx, b, layout);
215+
CValue::by_val_pair(a, b, layout)
218216
}
219217
}
220218
}

compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs

+2-2
Original file line numberDiff line numberDiff line change
@@ -171,8 +171,8 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>(
171171
let idx_const = crate::constant::mir_operand_get_const_val(fx, idx)
172172
.expect("simd_shuffle idx not const");
173173

174-
let idx_bytes = match idx_const {
175-
ConstValue::Indirect { alloc_id, offset } => {
174+
let idx_bytes = match *idx_const.kind() {
175+
ConstValueKind::Indirect { alloc_id, offset } => {
176176
let alloc = fx.tcx.global_alloc(alloc_id).unwrap_memory();
177177
let size = Size::from_bytes(
178178
4 * ret_lane_count, /* size_of([u32; ret_lane_count]) */

compiler/rustc_codegen_ssa/src/common.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -197,7 +197,7 @@ pub fn asm_const_to_str<'tcx>(
197197
const_value: ConstValue<'tcx>,
198198
ty_and_layout: TyAndLayout<'tcx>,
199199
) -> String {
200-
let ConstValue::Scalar(scalar) = const_value else {
200+
let Some(scalar) = const_value.try_to_scalar() else {
201201
span_bug!(sp, "expected Scalar for promoted asm const, but got {:#?}", const_value)
202202
};
203203
let value = scalar.assert_bits(ty_and_layout.size);

compiler/rustc_codegen_ssa/src/mir/operand.rs

+13-16
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ use crate::traits::*;
77
use crate::MemFlags;
88

99
use rustc_middle::mir;
10-
use rustc_middle::mir::interpret::{alloc_range, ConstValue, Pointer, Scalar};
10+
use rustc_middle::mir::interpret::{alloc_range, ConstValue, ConstValueKind};
1111
use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
1212
use rustc_middle::ty::Ty;
1313
use rustc_target::abi::{self, Abi, Align, Size};
@@ -91,35 +91,32 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
9191
) -> Self {
9292
let layout = bx.layout_of(ty);
9393

94-
let val = match val {
95-
ConstValue::Scalar(x) => {
94+
let val = match *val.kind() {
95+
ConstValueKind::Scalar(x) => {
9696
let Abi::Scalar(scalar) = layout.abi else {
9797
bug!("from_const: invalid ByVal layout: {:#?}", layout);
9898
};
9999
let llval = bx.scalar_to_backend(x, scalar, bx.immediate_backend_type(layout));
100100
OperandValue::Immediate(llval)
101101
}
102-
ConstValue::ZeroSized => return OperandRef::zero_sized(layout),
103-
ConstValue::Slice { data, start, end } => {
104-
let Abi::ScalarPair(a_scalar, _) = layout.abi else {
105-
bug!("from_const: invalid ScalarPair layout: {:#?}", layout);
102+
ConstValueKind::ZeroSized => return OperandRef::zero_sized(layout),
103+
ConstValueKind::ScalarPair(a, b) => {
104+
let Abi::ScalarPair(a_scalar, b_scalar) = layout.abi else {
105+
bug!("from_const: invalid ByVal layout: {:#?}", layout);
106106
};
107-
let a = Scalar::from_pointer(
108-
Pointer::new(
109-
bx.tcx().reserve_and_set_memory_alloc(data),
110-
Size::from_bytes(start),
111-
),
112-
&bx.tcx(),
113-
);
114107
let a_llval = bx.scalar_to_backend(
115108
a,
116109
a_scalar,
117110
bx.scalar_pair_element_backend_type(layout, 0, true),
118111
);
119-
let b_llval = bx.const_usize((end - start) as u64);
112+
let b_llval = bx.scalar_to_backend(
113+
b,
114+
b_scalar,
115+
bx.scalar_pair_element_backend_type(layout, 1, true),
116+
);
120117
OperandValue::Pair(a_llval, b_llval)
121118
}
122-
ConstValue::Indirect { alloc_id, offset } => {
119+
ConstValueKind::Indirect { alloc_id, offset } => {
123120
let alloc = bx.tcx().global_alloc(alloc_id).unwrap_memory();
124121
return Self::from_const_alloc(bx, layout, alloc, offset);
125122
}

compiler/rustc_const_eval/src/const_eval/eval_queries.rs

+10-24
Original file line numberDiff line numberDiff line change
@@ -111,23 +111,24 @@ pub(super) fn op_to_const<'tcx>(
111111
ecx: &CompileTimeEvalContext<'_, 'tcx>,
112112
op: &OpTy<'tcx>,
113113
) -> ConstValue<'tcx> {
114+
let tcx = *ecx.tcx;
114115
// Handle ZST consistently and early.
115116
if op.layout.is_zst() {
116-
return ConstValue::ZeroSized;
117+
return ConstValue::zero_sized(tcx);
117118
}
118119

119-
// All scalar types should be stored as `ConstValue::Scalar`. This is needed to make
120-
// `ConstValue::try_to_scalar` efficient; we want that to work for *all* constants of scalar
120+
// All scalar types should be stored as `ConstValueKind::Scalar`. This is needed to make
121+
// `ConstValueKind::try_to_scalar` efficient; we want that to work for *all* constants of scalar
121122
// type (it's used throughout the compiler and having it work just on literals is not enough)
122123
// and we want it to be fast (i.e., don't go to an `Allocation` and reconstruct the `Scalar`
123124
// from its byte-serialized form).
124125
let force_as_immediate = match op.layout.abi {
125126
Abi::Scalar(abi::Scalar::Initialized { .. }) => true,
126-
// We don't *force* `ConstValue::Slice` for `ScalarPair`. This has the advantage that if the
127+
// We don't *force* `ConstValueKind::Slice` for `ScalarPair`. This has the advantage that if the
127128
// input `op` is a place, then turning it into a `ConstValue` and back into a `OpTy` will
128129
// not have to generate any duplicate allocations (we preserve the original `AllocId` in
129-
// `ConstValue::Indirect`). It means accessing the contents of a slice can be slow (since
130-
// they can be stored as `ConstValue::Indirect`), but that's not relevant since we barely
130+
// `ConstValueKind::Indirect`). It means accessing the contents of a slice can be slow (since
131+
// they can be stored as `ConstValueKind::Indirect`), but that's not relevant since we barely
131132
// ever have to do this. (`try_get_slice_bytes_for_diagnostics` exists to provide this
132133
// functionality.)
133134
_ => false,
@@ -145,27 +146,12 @@ pub(super) fn op_to_const<'tcx>(
145146
// We know `offset` is relative to the allocation, so we can use `into_parts`.
146147
let (alloc_id, offset) = mplace.ptr().into_parts();
147148
let alloc_id = alloc_id.expect("cannot have `fake` place fot non-ZST type");
148-
ConstValue::Indirect { alloc_id, offset }
149+
ConstValue::from_memory(tcx, alloc_id, offset)
149150
}
150151
// see comment on `let force_as_immediate` above
151152
Right(imm) => match *imm {
152-
Immediate::Scalar(x) => ConstValue::Scalar(x),
153-
Immediate::ScalarPair(a, b) => {
154-
debug!("ScalarPair(a: {:?}, b: {:?})", a, b);
155-
// FIXME: assert that this has an appropriate type.
156-
// Currently we actually get here for non-[u8] slices during valtree construction!
157-
let msg = "`op_to_const` on an immediate scalar pair must only be used on slice references to actually allocated memory";
158-
// We know `offset` is relative to the allocation, so we can use `into_parts`.
159-
// We use `ConstValue::Slice` so that we don't have to generate an allocation for
160-
// `ConstValue::Indirect` here.
161-
let (alloc_id, offset) = a.to_pointer(ecx).expect(msg).into_parts();
162-
let alloc_id = alloc_id.expect(msg);
163-
let data = ecx.tcx.global_alloc(alloc_id).unwrap_memory();
164-
let start = offset.bytes_usize();
165-
let len = b.to_target_usize(ecx).expect(msg);
166-
let len: usize = len.try_into().unwrap();
167-
ConstValue::Slice { data, start, end: start + len }
168-
}
153+
Immediate::Scalar(x) => ConstValue::from_scalar(tcx, x),
154+
Immediate::ScalarPair(a, b) => ConstValue::from_pair(tcx, a, b),
169155
Immediate::Uninit => bug!("`Uninit` is not a valid value for {}", op.layout.ty),
170156
},
171157
}

compiler/rustc_const_eval/src/const_eval/mod.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ pub(crate) fn const_caller_location(
3030
if intern_const_alloc_recursive(&mut ecx, InternKind::Constant, &loc_place).is_err() {
3131
bug!("intern_const_alloc_recursive should not error in this case")
3232
}
33-
ConstValue::Scalar(Scalar::from_maybe_pointer(loc_place.ptr(), &tcx))
33+
ConstValue::from_scalar(tcx, Scalar::from_maybe_pointer(loc_place.ptr(), &tcx))
3434
}
3535

3636
// We forbid type-level constants that contain more than `VALTREE_MAX_NODES` nodes.

compiler/rustc_const_eval/src/const_eval/valtrees.rs

+2-2
Original file line numberDiff line numberDiff line change
@@ -219,10 +219,10 @@ pub fn valtree_to_const_value<'tcx>(
219219
match ty.kind() {
220220
ty::FnDef(..) => {
221221
assert!(valtree.unwrap_branch().is_empty());
222-
ConstValue::ZeroSized
222+
ConstValue::zero_sized(tcx)
223223
}
224224
ty::Bool | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Char => match valtree {
225-
ty::ValTree::Leaf(scalar_int) => ConstValue::Scalar(Scalar::Int(scalar_int)),
225+
ty::ValTree::Leaf(scalar_int) => ConstValue::from_scalar(tcx, Scalar::Int(scalar_int)),
226226
ty::ValTree::Branch(_) => bug!(
227227
"ValTrees for Bool, Int, Uint, Float or Char should have the form ValTree::Leaf"
228228
),

0 commit comments

Comments
 (0)