Skip to content

compiler: Use size_of from the prelude instead of imported #138040

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Mar 9, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
26 changes: 13 additions & 13 deletions compiler/rustc_arena/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ impl<T> ArenaChunk<T> {
#[inline]
fn end(&mut self) -> *mut T {
unsafe {
if mem::size_of::<T>() == 0 {
if size_of::<T>() == 0 {
// A pointer as large as possible for zero-sized elements.
ptr::without_provenance_mut(!0)
} else {
Expand Down Expand Up @@ -151,7 +151,7 @@ impl<T> TypedArena<T> {
}

unsafe {
if mem::size_of::<T>() == 0 {
if size_of::<T>() == 0 {
self.ptr.set(self.ptr.get().wrapping_byte_add(1));
let ptr = ptr::NonNull::<T>::dangling().as_ptr();
// Don't drop the object. This `write` is equivalent to `forget`.
Expand All @@ -173,13 +173,13 @@ impl<T> TypedArena<T> {
// FIXME: this should *likely* use `offset_from`, but more
// investigation is needed (including running tests in miri).
let available_bytes = self.end.get().addr() - self.ptr.get().addr();
let additional_bytes = additional.checked_mul(mem::size_of::<T>()).unwrap();
let additional_bytes = additional.checked_mul(size_of::<T>()).unwrap();
available_bytes >= additional_bytes
}

#[inline]
fn alloc_raw_slice(&self, len: usize) -> *mut T {
assert!(mem::size_of::<T>() != 0);
assert!(size_of::<T>() != 0);
assert!(len != 0);

// Ensure the current chunk can fit `len` objects.
Expand Down Expand Up @@ -213,7 +213,7 @@ impl<T> TypedArena<T> {
// So we collect all the elements beforehand, which takes care of reentrancy and panic
// safety. This function is much less hot than `DroplessArena::alloc_from_iter`, so it
// doesn't need to be hyper-optimized.
assert!(mem::size_of::<T>() != 0);
assert!(size_of::<T>() != 0);

let mut vec: SmallVec<[_; 8]> = iter.into_iter().collect();
if vec.is_empty() {
Expand All @@ -236,7 +236,7 @@ impl<T> TypedArena<T> {
unsafe {
// We need the element size to convert chunk sizes (ranging from
// PAGE to HUGE_PAGE bytes) to element counts.
let elem_size = cmp::max(1, mem::size_of::<T>());
let elem_size = cmp::max(1, size_of::<T>());
let mut chunks = self.chunks.borrow_mut();
let mut new_cap;
if let Some(last_chunk) = chunks.last_mut() {
Expand All @@ -246,7 +246,7 @@ impl<T> TypedArena<T> {
// FIXME: this should *likely* use `offset_from`, but more
// investigation is needed (including running tests in miri).
let used_bytes = self.ptr.get().addr() - last_chunk.start().addr();
last_chunk.entries = used_bytes / mem::size_of::<T>();
last_chunk.entries = used_bytes / size_of::<T>();
}

// If the previous chunk's len is less than HUGE_PAGE
Expand Down Expand Up @@ -276,15 +276,15 @@ impl<T> TypedArena<T> {
let end = self.ptr.get().addr();
// We then calculate the number of elements to be dropped in the last chunk,
// which is the filled area's length.
let diff = if mem::size_of::<T>() == 0 {
let diff = if size_of::<T>() == 0 {
// `T` is ZST. It can't have a drop flag, so the value here doesn't matter. We get
// the number of zero-sized values in the last and only chunk, just out of caution.
// Recall that `end` was incremented for each allocated value.
end - start
} else {
// FIXME: this should *likely* use `offset_from`, but more
// investigation is needed (including running tests in miri).
(end - start) / mem::size_of::<T>()
(end - start) / size_of::<T>()
};
// Pass that to the `destroy` method.
unsafe {
Expand Down Expand Up @@ -329,7 +329,7 @@ fn align_up(val: usize, align: usize) -> usize {

// Pointer alignment is common in compiler types, so keep `DroplessArena` aligned to them
// to optimize away alignment code.
const DROPLESS_ALIGNMENT: usize = mem::align_of::<usize>();
const DROPLESS_ALIGNMENT: usize = align_of::<usize>();

/// An arena that can hold objects of multiple different types that impl `Copy`
/// and/or satisfy `!mem::needs_drop`.
Expand Down Expand Up @@ -447,7 +447,7 @@ impl DroplessArena {
#[inline]
pub fn alloc<T>(&self, object: T) -> &mut T {
assert!(!mem::needs_drop::<T>());
assert!(mem::size_of::<T>() != 0);
assert!(size_of::<T>() != 0);

let mem = self.alloc_raw(Layout::new::<T>()) as *mut T;

Expand All @@ -471,7 +471,7 @@ impl DroplessArena {
T: Copy,
{
assert!(!mem::needs_drop::<T>());
assert!(mem::size_of::<T>() != 0);
assert!(size_of::<T>() != 0);
assert!(!slice.is_empty());

let mem = self.alloc_raw(Layout::for_value::<[T]>(slice)) as *mut T;
Expand Down Expand Up @@ -546,7 +546,7 @@ impl DroplessArena {
// Warning: this function is reentrant: `iter` could hold a reference to `&self` and
// allocate additional elements while we're iterating.
let iter = iter.into_iter();
assert!(mem::size_of::<T>() != 0);
assert!(size_of::<T>() != 0);
assert!(!mem::needs_drop::<T>());

let size_hint = iter.size_hint();
Expand Down
6 changes: 1 addition & 5 deletions compiler/rustc_codegen_gcc/src/builder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2439,9 +2439,5 @@ fn get_maybe_pointer_size(value: RValue<'_>) -> u32 {
#[cfg(not(feature = "master"))]
fn get_maybe_pointer_size(value: RValue<'_>) -> u32 {
let type_ = value.get_type();
if type_.get_pointee().is_some() {
std::mem::size_of::<*const ()>() as _
} else {
type_.get_size()
}
if type_.get_pointee().is_some() { size_of::<*const ()>() as _ } else { type_.get_size() }
}
2 changes: 1 addition & 1 deletion compiler/rustc_codegen_ssa/src/back/link.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1177,7 +1177,7 @@ mod win {
let mut cp: u32 = 0;
// We're using the `LOCALE_RETURN_NUMBER` flag to return a u32.
// But the API requires us to pass the data as though it's a [u16] string.
let len = std::mem::size_of::<u32>() / std::mem::size_of::<u16>();
let len = size_of::<u32>() / size_of::<u16>();
let data = std::slice::from_raw_parts_mut(&mut cp as *mut u32 as *mut u16, len);
let len_written = GetLocaleInfoEx(
LOCALE_NAME_SYSTEM_DEFAULT,
Expand Down
8 changes: 3 additions & 5 deletions compiler/rustc_data_structures/src/aligned.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,8 @@ use std::ptr::Alignment;

/// Returns the ABI-required minimum alignment of a type in bytes.
///
/// This is equivalent to [`mem::align_of`], but also works for some unsized
/// This is equivalent to [`align_of`], but also works for some unsized
/// types (e.g. slices or rustc's `List`s).
///
/// [`mem::align_of`]: std::mem::align_of
pub const fn align_of<T: ?Sized + Aligned>() -> Alignment {
T::ALIGN
}
Expand All @@ -15,10 +13,10 @@ pub const fn align_of<T: ?Sized + Aligned>() -> Alignment {
/// # Safety
///
/// `Self::ALIGN` must be equal to the alignment of `Self`. For sized types it
/// is [`mem::align_of<Self>()`], for unsized types it depends on the type, for
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

put turbofish here

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I did add a turbofish. It's now align_of::<Self>(). You replied on the deleted line, so maybe you missed that?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

whoops

/// is [`align_of::<Self>()`], for unsized types it depends on the type, for
/// example `[T]` has alignment of `T`.
///
/// [`mem::align_of<Self>()`]: std::mem::align_of
/// [`align_of::<Self>()`]: align_of
pub unsafe trait Aligned {
/// Alignment of `Self`.
const ALIGN: Alignment;
Expand Down
6 changes: 2 additions & 4 deletions compiler/rustc_data_structures/src/profiling.rs
Original file line number Diff line number Diff line change
Expand Up @@ -863,15 +863,13 @@ fn get_thread_id() -> u32 {
cfg_match! {
windows => {
pub fn get_resident_set_size() -> Option<usize> {
use std::mem;

use windows::{
Win32::System::ProcessStatus::{K32GetProcessMemoryInfo, PROCESS_MEMORY_COUNTERS},
Win32::System::Threading::GetCurrentProcess,
};

let mut pmc = PROCESS_MEMORY_COUNTERS::default();
let pmc_size = mem::size_of_val(&pmc);
let pmc_size = size_of_val(&pmc);
unsafe {
K32GetProcessMemoryInfo(
GetCurrentProcess(),
Expand All @@ -889,7 +887,7 @@ cfg_match! {
pub fn get_resident_set_size() -> Option<usize> {
use libc::{c_int, c_void, getpid, proc_pidinfo, proc_taskinfo, PROC_PIDTASKINFO};
use std::mem;
const PROC_TASKINFO_SIZE: c_int = mem::size_of::<proc_taskinfo>() as c_int;
const PROC_TASKINFO_SIZE: c_int = size_of::<proc_taskinfo>() as c_int;

unsafe {
let mut info: proc_taskinfo = mem::zeroed();
Expand Down
4 changes: 2 additions & 2 deletions compiler/rustc_data_structures/src/sharded.rs
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
use std::borrow::Borrow;
use std::collections::hash_map::RawEntryMut;
use std::hash::{Hash, Hasher};
use std::{iter, mem};
use std::iter;

use either::Either;

Expand Down Expand Up @@ -221,7 +221,7 @@ pub fn make_hash<K: Hash + ?Sized>(val: &K) -> u64 {
/// consistently for each `Sharded` instance.
#[inline]
fn get_shard_hash(hash: u64) -> usize {
let hash_len = mem::size_of::<usize>();
let hash_len = size_of::<usize>();
// Ignore the top 7 bits as hashbrown uses these and get the next SHARD_BITS highest bits.
// hashbrown also uses the lowest bits, so we can't use those
(hash >> (hash_len * 8 - 7 - SHARD_BITS)) as usize
Expand Down
2 changes: 1 addition & 1 deletion compiler/rustc_errors/src/diagnostic.rs
Original file line number Diff line number Diff line change
Expand Up @@ -490,7 +490,7 @@ pub struct Diag<'a, G: EmissionGuarantee = ErrorGuaranteed> {
// would be bad.
impl<G> !Clone for Diag<'_, G> {}

rustc_data_structures::static_assert_size!(Diag<'_, ()>, 3 * std::mem::size_of::<usize>());
rustc_data_structures::static_assert_size!(Diag<'_, ()>, 3 * size_of::<usize>());

impl<G: EmissionGuarantee> Deref for Diag<'_, G> {
type Target = DiagInner;
Expand Down
4 changes: 2 additions & 2 deletions compiler/rustc_hir/src/def.rs
Original file line number Diff line number Diff line change
Expand Up @@ -429,7 +429,7 @@ pub enum Res<Id = hir::HirId> {
/// mention any generic parameters to allow the following with `min_const_generics`:
/// ```
/// # struct Foo;
/// impl Foo { fn test() -> [u8; std::mem::size_of::<Self>()] { todo!() } }
/// impl Foo { fn test() -> [u8; size_of::<Self>()] { todo!() } }
///
/// struct Bar([u8; baz::<Self>()]);
/// const fn baz<T>() -> usize { 10 }
Expand All @@ -439,7 +439,7 @@ pub enum Res<Id = hir::HirId> {
/// compat lint:
/// ```
/// fn foo<T>() {
/// let _bar = [1_u8; std::mem::size_of::<*mut T>()];
/// let _bar = [1_u8; size_of::<*mut T>()];
/// }
/// ```
// FIXME(generic_const_exprs): Remove this bodge once that feature is stable.
Expand Down
2 changes: 1 addition & 1 deletion compiler/rustc_incremental/src/persist/file_format.rs
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,7 @@ pub(crate) fn read_file(

// Check HEADER_FORMAT_VERSION
{
debug_assert!(::std::mem::size_of_val(&HEADER_FORMAT_VERSION) == 2);
debug_assert!(size_of_val(&HEADER_FORMAT_VERSION) == 2);
let mut header_format_version = [0u8; 2];
file.read_exact(&mut header_format_version)?;
let header_format_version =
Expand Down
6 changes: 4 additions & 2 deletions compiler/rustc_index/src/bit_set.rs
Original file line number Diff line number Diff line change
@@ -1,7 +1,9 @@
use std::marker::PhantomData;
#[cfg(not(feature = "nightly"))]
use std::mem;
use std::ops::{BitAnd, BitAndAssign, BitOrAssign, Bound, Not, Range, RangeBounds, Shl};
use std::rc::Rc;
use std::{fmt, iter, mem, slice};
use std::{fmt, iter, slice};

use Chunk::*;
#[cfg(feature = "nightly")]
Expand All @@ -14,7 +16,7 @@ use crate::{Idx, IndexVec};
mod tests;

type Word = u64;
const WORD_BYTES: usize = mem::size_of::<Word>();
const WORD_BYTES: usize = size_of::<Word>();
const WORD_BITS: usize = WORD_BYTES * 8;

// The choice of chunk size has some trade-offs.
Expand Down
2 changes: 0 additions & 2 deletions compiler/rustc_index/src/vec/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,6 @@ crate::newtype_index! {

#[test]
fn index_size_is_optimized() {
use std::mem::size_of;

assert_eq!(size_of::<MyIdx>(), 4);
// Uses 0xFFFF_FFFB
assert_eq!(size_of::<Option<MyIdx>>(), 4);
Expand Down
2 changes: 1 addition & 1 deletion compiler/rustc_lint_defs/src/builtin.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2711,7 +2711,7 @@ declare_lint! {
///
/// ```rust
/// const fn foo<T>() -> usize {
/// if std::mem::size_of::<*mut T>() < 8 { // size of *mut T does not depend on T
/// if size_of::<*mut T>() < 8 { // size of *mut T does not depend on T
/// 4
/// } else {
/// 8
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -223,8 +223,8 @@ impl<D: TyDecoder> Decodable<D> for InitMaskMaterialized {
// large.
impl hash::Hash for InitMaskMaterialized {
fn hash<H: hash::Hasher>(&self, state: &mut H) {
const MAX_BLOCKS_TO_HASH: usize = super::MAX_BYTES_TO_HASH / std::mem::size_of::<Block>();
const MAX_BLOCKS_LEN: usize = super::MAX_HASHED_BUFFER_LEN / std::mem::size_of::<Block>();
const MAX_BLOCKS_TO_HASH: usize = super::MAX_BYTES_TO_HASH / size_of::<Block>();
const MAX_BLOCKS_LEN: usize = super::MAX_HASHED_BUFFER_LEN / size_of::<Block>();

// Partially hash the `blocks` buffer when it is large. To limit collisions with common
// prefixes and suffixes, we hash the length and some slices of the buffer.
Expand Down
2 changes: 1 addition & 1 deletion compiler/rustc_middle/src/mir/interpret/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -573,7 +573,7 @@ pub fn write_target_uint(
#[inline]
pub fn read_target_uint(endianness: Endian, mut source: &[u8]) -> Result<u128, io::Error> {
// This u128 holds an "any-size uint" (since smaller uints can fits in it)
let mut buf = [0u8; std::mem::size_of::<u128>()];
let mut buf = [0u8; size_of::<u128>()];
// So we do not read exactly 16 bytes into the u128, just the "payload".
let uint = match endianness {
Endian::Little => {
Expand Down
4 changes: 2 additions & 2 deletions compiler/rustc_middle/src/mir/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -332,13 +332,13 @@ pub struct Body<'tcx> {
///
/// ```rust
/// fn test<T>() {
/// let _ = [0; std::mem::size_of::<*mut T>()];
/// let _ = [0; size_of::<*mut T>()];
/// }
/// ```
///
/// **WARNING**: Do not change this flags after the MIR was originally created, even if an optimization
/// removed the last mention of all generic params. We do not want to rely on optimizations and
/// potentially allow things like `[u8; std::mem::size_of::<T>() * 0]` due to this.
/// potentially allow things like `[u8; size_of::<T>() * 0]` due to this.
pub is_polymorphic: bool,

/// The phase at which this MIR should be "injected" into the compilation process.
Expand Down
2 changes: 1 addition & 1 deletion compiler/rustc_middle/src/query/erase.rs
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ pub type Erase<T: EraseType> = Erased<impl Copy>;
pub fn erase<T: EraseType>(src: T) -> Erase<T> {
// Ensure the sizes match
const {
if std::mem::size_of::<T>() != std::mem::size_of::<T::Result>() {
if size_of::<T>() != size_of::<T::Result>() {
panic!("size of T must match erased type T::Result")
}
};
Expand Down
4 changes: 2 additions & 2 deletions compiler/rustc_middle/src/query/plumbing.rs
Original file line number Diff line number Diff line change
Expand Up @@ -370,7 +370,7 @@ macro_rules! define_callbacks {
// Increase this limit if necessary, but do try to keep the size low if possible
#[cfg(target_pointer_width = "64")]
const _: () = {
if mem::size_of::<Key<'static>>() > 88 {
if size_of::<Key<'static>>() > 88 {
panic!("{}", concat!(
"the query `",
stringify!($name),
Expand All @@ -386,7 +386,7 @@ macro_rules! define_callbacks {
#[cfg(target_pointer_width = "64")]
#[cfg(not(feature = "rustc_randomized_layouts"))]
const _: () = {
if mem::size_of::<Value<'static>>() > 64 {
if size_of::<Value<'static>>() > 64 {
panic!("{}", concat!(
"the query `",
stringify!($name),
Expand Down
4 changes: 2 additions & 2 deletions compiler/rustc_middle/src/ty/consts/int.rs
Original file line number Diff line number Diff line change
Expand Up @@ -408,7 +408,7 @@ macro_rules! from_x_for_scalar_int {
fn from(u: $ty) -> Self {
Self {
data: u128::from(u),
size: NonZero::new(std::mem::size_of::<$ty>() as u8).unwrap(),
size: NonZero::new(size_of::<$ty>() as u8).unwrap(),
}
}
}
Expand All @@ -424,7 +424,7 @@ macro_rules! from_scalar_int_for_x {
fn from(int: ScalarInt) -> Self {
// The `unwrap` cannot fail because to_bits (if it succeeds)
// is guaranteed to return a value that fits into the size.
int.to_bits(Size::from_bytes(std::mem::size_of::<$ty>()))
int.to_bits(Size::from_bytes(size_of::<$ty>()))
.try_into().unwrap()
}
}
Expand Down
7 changes: 3 additions & 4 deletions compiler/rustc_middle/src/ty/generic_args.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@

use core::intrinsics;
use std::marker::PhantomData;
use std::mem;
use std::num::NonZero;
use std::ptr::NonNull;

Expand Down Expand Up @@ -176,17 +175,17 @@ impl<'tcx> GenericArgKind<'tcx> {
let (tag, ptr) = match self {
GenericArgKind::Lifetime(lt) => {
// Ensure we can use the tag bits.
assert_eq!(mem::align_of_val(&*lt.0.0) & TAG_MASK, 0);
assert_eq!(align_of_val(&*lt.0.0) & TAG_MASK, 0);
(REGION_TAG, NonNull::from(lt.0.0).cast())
}
GenericArgKind::Type(ty) => {
// Ensure we can use the tag bits.
assert_eq!(mem::align_of_val(&*ty.0.0) & TAG_MASK, 0);
assert_eq!(align_of_val(&*ty.0.0) & TAG_MASK, 0);
(TYPE_TAG, NonNull::from(ty.0.0).cast())
}
GenericArgKind::Const(ct) => {
// Ensure we can use the tag bits.
assert_eq!(mem::align_of_val(&*ct.0.0) & TAG_MASK, 0);
assert_eq!(align_of_val(&*ct.0.0) & TAG_MASK, 0);
(CONST_TAG, NonNull::from(ct.0.0).cast())
}
};
Expand Down
Loading
Loading