diff --git a/src/bootstrap/flags.rs b/src/bootstrap/flags.rs
index fb380af0a47e7..646b9e05d99c3 100644
--- a/src/bootstrap/flags.rs
+++ b/src/bootstrap/flags.rs
@@ -503,6 +503,20 @@ Arguments:
             }
         };
 
+        if let Subcommand::Check { .. } = &cmd {
+            if matches.opt_str("stage").is_some() {
+                println!("{}", "--stage not supported for x.py check, always treated as stage 0");
+                process::exit(1);
+            }
+            if matches.opt_str("keep-stage").is_some() {
+                println!(
+                    "{}",
+                    "--keep-stage not supported for x.py check, only one stage available"
+                );
+                process::exit(1);
+            }
+        }
+
         Flags {
             verbose: matches.opt_count("verbose"),
             stage: matches.opt_str("stage").map(|j| j.parse().expect("`stage` should be a number")),
diff --git a/src/libarena/lib.rs b/src/libarena/lib.rs
index 0f0bd617f439c..bbe80c26dcbf9 100644
--- a/src/libarena/lib.rs
+++ b/src/libarena/lib.rs
@@ -5,8 +5,7 @@
 //! of individual objects while the arena itself is still alive. The benefit
 //! of an arena is very fast allocation; just a pointer bump.
 //!
-//! This crate implements `TypedArena`, a simple arena that can only hold
-//! objects of a single type.
+//! This crate implements several kinds of arena.
 
 #![doc(
     html_root_url = "https://doc.rust-lang.org/nightly/",
@@ -98,7 +97,13 @@ impl<T> TypedArenaChunk<T> {
     }
 }
 
+// The arenas start with PAGE-sized chunks, and then each new chunk is twice as
+// big as its predecessor, up until we reach HUGE_PAGE-sized chunks, whereupon
+// we stop growing. This scales well, from arenas that are barely used up to
+// arenas that are used for 100s of MiBs. Note also that the chosen sizes match
+// the usual sizes of pages and huge pages on Linux.
 const PAGE: usize = 4096;
+const HUGE_PAGE: usize = 2 * 1024 * 1024;
 
 impl<T> Default for TypedArena<T> {
     /// Creates a new `TypedArena`.
@@ -211,6 +216,9 @@ impl<T> TypedArena<T> {
     #[cold]
     fn grow(&self, n: usize) {
         unsafe {
+            // We need the element size in to convert chunk sizes (ranging from
+            // PAGE to HUGE_PAGE bytes) to element counts.
+            let elem_size = cmp::max(1, mem::size_of::<T>());
             let mut chunks = self.chunks.borrow_mut();
             let (chunk, mut new_capacity);
             if let Some(last_chunk) = chunks.last_mut() {
@@ -221,18 +229,20 @@ impl<T> TypedArena<T> {
                     self.end.set(last_chunk.end());
                     return;
                 } else {
+                    // If the previous chunk's capacity is less than HUGE_PAGE
+                    // bytes, then this chunk will be least double the previous
+                    // chunk's size.
                     new_capacity = last_chunk.storage.capacity();
-                    loop {
+                    if new_capacity < HUGE_PAGE / elem_size {
                         new_capacity = new_capacity.checked_mul(2).unwrap();
-                        if new_capacity >= currently_used_cap + n {
-                            break;
-                        }
                     }
                 }
             } else {
-                let elem_size = cmp::max(1, mem::size_of::<T>());
-                new_capacity = cmp::max(n, PAGE / elem_size);
+                new_capacity = PAGE / elem_size;
             }
+            // Also ensure that this chunk can fit `n`.
+            new_capacity = cmp::max(n, new_capacity);
+
             chunk = TypedArenaChunk::<T>::new(new_capacity);
             self.ptr.set(chunk.start());
             self.end.set(chunk.end());
@@ -347,17 +357,20 @@ impl DroplessArena {
                     self.end.set(last_chunk.end());
                     return;
                 } else {
+                    // If the previous chunk's capacity is less than HUGE_PAGE
+                    // bytes, then this chunk will be least double the previous
+                    // chunk's size.
                     new_capacity = last_chunk.storage.capacity();
-                    loop {
+                    if new_capacity < HUGE_PAGE {
                         new_capacity = new_capacity.checked_mul(2).unwrap();
-                        if new_capacity >= used_bytes + needed_bytes {
-                            break;
-                        }
                     }
                 }
             } else {
-                new_capacity = cmp::max(needed_bytes, PAGE);
+                new_capacity = PAGE;
             }
+            // Also ensure that this chunk can fit `needed_bytes`.
+            new_capacity = cmp::max(needed_bytes, new_capacity);
+
             chunk = TypedArenaChunk::<u8>::new(new_capacity);
             self.ptr.set(chunk.start());
             self.end.set(chunk.end());
diff --git a/src/libcore/iter/range.rs b/src/libcore/iter/range.rs
index bb68184c8dd77..d74df82bddd9d 100644
--- a/src/libcore/iter/range.rs
+++ b/src/libcore/iter/range.rs
@@ -4,47 +4,182 @@ use crate::ops::{self, Add, Sub, Try};
 
 use super::{FusedIterator, TrustedLen};
 
-/// Objects that can be stepped over in both directions.
+/// Objects that have a notion of *successor* and *predecessor* operations.
 ///
-/// The `steps_between` function provides a way to efficiently compare
-/// two `Step` objects.
-#[unstable(
-    feature = "step_trait",
-    reason = "likely to be replaced by finer-grained traits",
-    issue = "42168"
-)]
-pub trait Step: Clone + PartialOrd + Sized {
-    /// Returns the number of steps between two step objects. The count is
-    /// inclusive of `start` and exclusive of `end`.
-    ///
-    /// Returns `None` if it is not possible to calculate `steps_between`
-    /// without overflow.
+/// The *successor* operation moves towards values that compare greater.
+/// The *predecessor* operation moves towards values that compare lesser.
+///
+/// # Safety
+///
+/// This trait is `unsafe` because its implementation must be correct for
+/// the safety of `unsafe trait TrustedLen` implementations, and the results
+/// of using this trait can otherwise be trusted by `unsafe` code to be correct
+/// and fulfill the listed obligations.
+#[unstable(feature = "step_trait", reason = "recently redesigned", issue = "42168")]
+pub unsafe trait Step: Clone + PartialOrd + Sized {
+    /// Returns the number of *successor* steps required to get from `start` to `end`.
+    ///
+    /// Returns `None` if the number of steps would overflow `usize`
+    /// (or is infinite, or if `end` would never be reached).
+    ///
+    /// # Invariants
+    ///
+    /// For any `a`, `b`, and `n`:
+    ///
+    /// * `steps_between(&a, &b) == Some(n)` if and only if `Step::forward_checked(&a, n) == Some(b)`
+    /// * `steps_between(&a, &b) == Some(n)` if and only if `Step::backward_checked(&a, n) == Some(a)`
+    /// * `steps_between(&a, &b) == Some(n)` only if `a <= b`
+    ///   * Corollary: `steps_between(&a, &b) == Some(0)` if and only if `a == b`
+    ///   * Note that `a <= b` does _not_ imply `steps_between(&a, &b) != None`;
+    ///     this is the case wheen it would require more than `usize::MAX` steps to get to `b`
+    /// * `steps_between(&a, &b) == None` if `a > b`
     fn steps_between(start: &Self, end: &Self) -> Option<usize>;
 
-    /// Replaces this step with `1`, returning a clone of itself.
+    /// Returns the value that would be obtained by taking the *successor*
+    /// of `self` `count` times.
+    ///
+    /// If this would overflow the range of values supported by `Self`, returns `None`.
     ///
-    /// The output of this method should always be greater than the output of replace_zero.
-    fn replace_one(&mut self) -> Self;
+    /// # Invariants
+    ///
+    /// For any `a`, `n`, and `m`:
+    ///
+    /// * `Step::forward_checked(a, n).and_then(|x| Step::forward_checked(x, m)) == Step::forward_checked(a, m).and_then(|x| Step::forward_checked(x, n))`
+    ///
+    /// For any `a`, `n`, and `m` where `n + m` does not overflow:
+    ///
+    /// * `Step::forward_checked(a, n).and_then(|x| Step::forward_checked(x, m)) == Step::forward_checked(a, n + m)`
+    ///
+    /// For any `a` and `n`:
+    ///
+    /// * `Step::forward_checked(a, n) == (0..n).try_fold(a, |x, _| Step::forward_checked(&x, 1))`
+    ///   * Corollary: `Step::forward_checked(&a, 0) == Some(a)`
+    #[unstable(feature = "step_trait_ext", reason = "recently added", issue = "42168")]
+    fn forward_checked(start: Self, count: usize) -> Option<Self>;
 
-    /// Replaces this step with `0`, returning a clone of itself.
+    /// Returns the value that would be obtained by taking the *successor*
+    /// of `self` `count` times.
+    ///
+    /// If this would overflow the range of values supported by `Self`,
+    /// this function is allowed to panic, wrap, or saturate.
+    /// The suggested behavior is to panic when debug assertions are enabled,
+    /// and to wrap or saturate otherwise.
     ///
-    /// The output of this method should always be less than the output of replace_one.
-    fn replace_zero(&mut self) -> Self;
+    /// Unsafe code should not rely on the correctness of behavior after overflow.
+    ///
+    /// # Invariants
+    ///
+    /// For any `a`, `n`, and `m`, where no overflow occurs:
+    ///
+    /// * `Step::forward(Step::forward(a, n), m) == Step::forward(a, n + m)`
+    ///
+    /// For any `a` and `n`, where no overflow occurs:
+    ///
+    /// * `Step::forward_checked(a, n) == Some(Step::forward(a, n))`
+    /// * `Step::forward(a, n) == (0..n).fold(a, |x, _| Step::forward(x, 1))`
+    ///   * Corollary: `Step::forward(a, 0) == a`
+    /// * `Step::forward(a, n) >= a`
+    /// * `Step::backward(Step::forward(a, n), n) == a`
+    #[unstable(feature = "step_trait_ext", reason = "recently added", issue = "42168")]
+    fn forward(start: Self, count: usize) -> Self {
+        Step::forward_checked(start, count).expect("overflow in `Step::forward`")
+    }
 
-    /// Adds one to this step, returning the result.
-    fn add_one(&self) -> Self;
+    /// Returns the value that would be obtained by taking the *successor*
+    /// of `self` `count` times.
+    ///
+    /// # Safety
+    ///
+    /// It is undefined behavior for this operation to overflow the
+    /// range of values supported by `Self`. If you cannot guarantee that this
+    /// will not overflow, use `forward` or `forward_checked` instead.
+    ///
+    /// # Invariants
+    ///
+    /// For any `a`:
+    ///
+    /// * if there exists `b` such that `b > a`, it is safe to call `Step::forward_unchecked(a, 1)`
+    /// * if there exists `b`, `n` such that `steps_between(&a, &b) == Some(n)`,
+    ///   it is safe to call `Step::forward_unchecked(a, m)` for any `m <= n`.
+    ///
+    /// For any `a` and `n`, where no overflow occurs:
+    ///
+    /// * `Step::forward_unchecked(a, n)` is equivalent to `Step::forward(a, n)`
+    #[unstable(feature = "unchecked_math", reason = "niche optimization path", issue = "none")]
+    unsafe fn forward_unchecked(start: Self, count: usize) -> Self {
+        Step::forward(start, count)
+    }
 
-    /// Subtracts one to this step, returning the result.
-    fn sub_one(&self) -> Self;
+    /// Returns the value that would be obtained by taking the *successor*
+    /// of `self` `count` times.
+    ///
+    /// If this would overflow the range of values supported by `Self`, returns `None`.
+    ///
+    /// # Invariants
+    ///
+    /// For any `a`, `n`, and `m`:
+    ///
+    /// * `Step::backward_checked(a, n).and_then(|x| Step::backward_checked(x, m)) == n.checked_add(m).and_then(|x| Step::backward_checked(a, x))`
+    /// * `Step::backward_checked(a, n).and_then(|x| Step::backward_checked(x, m)) == try { Step::backward_checked(a, n.checked_add(m)?) }`
+    ///
+    /// For any `a` and `n`:
+    ///
+    /// * `Step::backward_checked(a, n) == (0..n).try_fold(a, |x, _| Step::backward_checked(&x, 1))`
+    ///   * Corollary: `Step::backward_checked(&a, 0) == Some(a)`
+    #[unstable(feature = "step_trait_ext", reason = "recently added", issue = "42168")]
+    fn backward_checked(start: Self, count: usize) -> Option<Self>;
 
-    /// Adds a `usize`, returning `None` on overflow.
-    fn add_usize(&self, n: usize) -> Option<Self>;
+    /// Returns the value that would be obtained by taking the *predecessor*
+    /// of `self` `count` times.
+    ///
+    /// If this would overflow the range of values supported by `Self`,
+    /// this function is allowed to panic, wrap, or saturate.
+    /// The suggested behavior is to panic when debug assertions are enabled,
+    /// and to wrap or saturate otherwise.
+    ///
+    /// Unsafe code should not rely on the correctness of behavior after overflow.
+    ///
+    /// # Invariants
+    ///
+    /// For any `a`, `n`, and `m`, where no overflow occurs:
+    ///
+    /// * `Step::backward(Step::backward(a, n), m) == Step::backward(a, n + m)`
+    ///
+    /// For any `a` and `n`, where no overflow occurs:
+    ///
+    /// * `Step::backward_checked(a, n) == Some(Step::backward(a, n))`
+    /// * `Step::backward(a, n) == (0..n).fold(a, |x, _| Step::backward(x, 1))`
+    ///   * Corollary: `Step::backward(a, 0) == a`
+    /// * `Step::backward(a, n) <= a`
+    /// * `Step::forward(Step::backward(a, n), n) == a`
+    #[unstable(feature = "step_trait_ext", reason = "recently added", issue = "42168")]
+    fn backward(start: Self, count: usize) -> Self {
+        Step::backward_checked(start, count).expect("overflow in `Step::backward`")
+    }
 
-    /// Subtracts a `usize`, returning `None` on underflow.
-    fn sub_usize(&self, n: usize) -> Option<Self> {
-        // this default implementation makes the addition of `sub_usize` a non-breaking change
-        let _ = n;
-        unimplemented!()
+    /// Returns the value that would be obtained by taking the *predecessor*
+    /// of `self` `count` times.
+    ///
+    /// # Safety
+    ///
+    /// It is undefined behavior for this operation to overflow the
+    /// range of values supported by `Self`. If you cannot guarantee that this
+    /// will not overflow, use `backward` or `backward_checked` instead.
+    ///
+    /// # Invariants
+    ///
+    /// For any `a`:
+    ///
+    /// * if there exists `b` such that `b < a`, it is safe to call `Step::backward_unchecked(a, 1)`
+    /// * if there exists `b`, `n` such that `steps_between(&b, &a) == Some(n)`,
+    ///   it is safe to call `Step::backward_unchecked(a, m)` for any `m <= n`.
+    ///
+    /// For any `a` and `n`, where no overflow occurs:
+    ///
+    /// * `Step::backward_unchecked(a, n)` is equivalent to `Step::backward(a, n)`
+    #[unstable(feature = "unchecked_math", reason = "niche optimization path", issue = "none")]
+    unsafe fn backward_unchecked(start: Self, count: usize) -> Self {
+        Step::backward(start, count)
     }
 }
 
@@ -52,127 +187,218 @@ pub trait Step: Clone + PartialOrd + Sized {
 macro_rules! step_identical_methods {
     () => {
         #[inline]
-        fn replace_one(&mut self) -> Self {
-            mem::replace(self, 1)
+        unsafe fn forward_unchecked(start: Self, n: usize) -> Self {
+            start.unchecked_add(n as Self)
         }
 
         #[inline]
-        fn replace_zero(&mut self) -> Self {
-            mem::replace(self, 0)
+        unsafe fn backward_unchecked(start: Self, n: usize) -> Self {
+            start.unchecked_sub(n as Self)
         }
 
         #[inline]
-        fn add_one(&self) -> Self {
-            Add::add(*self, 1)
+        fn forward(start: Self, n: usize) -> Self {
+            // In debug builds, trigger a panic on overflow.
+            // This should optimize completely out in release builds.
+            if Self::forward_checked(start, n).is_none() {
+                let _ = Add::add(Self::MAX, 1);
+            }
+            // Do wrapping math to allow e.g. `Step::forward(-128i8, 255)`.
+            start.wrapping_add(n as Self)
         }
 
         #[inline]
-        fn sub_one(&self) -> Self {
-            Sub::sub(*self, 1)
+        fn backward(start: Self, n: usize) -> Self {
+            // In debug builds, trigger a panic on overflow.
+            // This should optimize completely out in release builds.
+            if Self::backward_checked(start, n).is_none() {
+                let _ = Sub::sub(Self::MIN, 1);
+            }
+            // Do wrapping math to allow e.g. `Step::backward(127i8, 255)`.
+            start.wrapping_sub(n as Self)
         }
     };
 }
 
-macro_rules! step_impl_unsigned {
-    ($($t:ty)*) => ($(
-        #[unstable(feature = "step_trait",
-                   reason = "likely to be replaced by finer-grained traits",
-                   issue = "42168")]
-        impl Step for $t {
-            #[inline]
-            fn steps_between(start: &$t, end: &$t) -> Option<usize> {
-                if *start < *end {
-                    usize::try_from(*end - *start).ok()
-                } else {
-                    Some(0)
+macro_rules! step_integer_impls {
+    {
+        narrower than or same width as usize:
+            $( [ $u_narrower:ident $i_narrower:ident ] ),+;
+        wider than usize:
+            $( [ $u_wider:ident $i_wider:ident ] ),+;
+    } => {
+        $(
+            #[allow(unreachable_patterns)]
+            #[unstable(feature = "step_trait", reason = "recently redesigned", issue = "42168")]
+            unsafe impl Step for $u_narrower {
+                step_identical_methods!();
+
+                #[inline]
+                fn steps_between(start: &Self, end: &Self) -> Option<usize> {
+                    if *start <= *end {
+                        // This relies on $u_narrower <= usize
+                        Some((*end - *start) as usize)
+                    } else {
+                        None
+                    }
                 }
-            }
 
-            #[inline]
-            #[allow(unreachable_patterns)]
-            fn add_usize(&self, n: usize) -> Option<Self> {
-                match <$t>::try_from(n) {
-                    Ok(n_as_t) => self.checked_add(n_as_t),
-                    Err(_) => None,
+                #[inline]
+                fn forward_checked(start: Self, n: usize) -> Option<Self> {
+                    match Self::try_from(n) {
+                        Ok(n) => start.checked_add(n),
+                        Err(_) => None, // if n is out of range, `unsigned_start + n` is too
+                    }
+                }
+
+                #[inline]
+                fn backward_checked(start: Self, n: usize) -> Option<Self> {
+                    match Self::try_from(n) {
+                        Ok(n) => start.checked_sub(n),
+                        Err(_) => None, // if n is out of range, `unsigned_start - n` is too
+                    }
                 }
             }
 
-            #[inline]
             #[allow(unreachable_patterns)]
-            fn sub_usize(&self, n: usize) -> Option<Self> {
-                match <$t>::try_from(n) {
-                    Ok(n_as_t) => self.checked_sub(n_as_t),
-                    Err(_) => None,
+            #[unstable(feature = "step_trait", reason = "recently redesigned", issue = "42168")]
+            unsafe impl Step for $i_narrower {
+                step_identical_methods!();
+
+                #[inline]
+                fn steps_between(start: &Self, end: &Self) -> Option<usize> {
+                    if *start <= *end {
+                        // This relies on $i_narrower <= usize
+                        //
+                        // Casting to isize extends the width but preserves the sign.
+                        // Use wrapping_sub in isize space and cast to usize to compute
+                        // the difference that may not fit inside the range of isize.
+                        Some((*end as isize).wrapping_sub(*start as isize) as usize)
+                    } else {
+                        None
+                    }
                 }
-            }
 
-            step_identical_methods!();
-        }
-    )*)
-}
-macro_rules! step_impl_signed {
-    ($( [$t:ty : $unsigned:ty] )*) => ($(
-        #[unstable(feature = "step_trait",
-                   reason = "likely to be replaced by finer-grained traits",
-                   issue = "42168")]
-        impl Step for $t {
-            #[inline]
-            fn steps_between(start: &$t, end: &$t) -> Option<usize> {
-                if *start < *end {
-                    // Use .wrapping_sub and cast to unsigned to compute the
-                    // difference that may not fit inside the range of $t.
-                    usize::try_from(end.wrapping_sub(*start) as $unsigned).ok()
-                } else {
-                    Some(0)
+                #[inline]
+                fn forward_checked(start: Self, n: usize) -> Option<Self> {
+                    match $u_narrower::try_from(n) {
+                        Ok(n) => {
+                            // Wrapping handles cases like
+                            // `Step::forward(-120_i8, 200) == Some(80_i8)`,
+                            // even though 200 is out of range for i8.
+                            let wrapped = start.wrapping_add(n as Self);
+                            if wrapped >= start {
+                                Some(wrapped)
+                            } else {
+                                None // Addition overflowed
+                            }
+                        }
+                        // If n is out of range of e.g. u8,
+                        // then it is bigger than the entire range for i8 is wide
+                        // so `any_i8 + n` necessarily overflows i8.
+                        Err(_) => None,
+                    }
+                }
+
+                #[inline]
+                fn backward_checked(start: Self, n: usize) -> Option<Self> {
+                    match $u_narrower::try_from(n) {
+                        Ok(n) => {
+                            // Wrapping handles cases like
+                            // `Step::forward(-120_i8, 200) == Some(80_i8)`,
+                            // even though 200 is out of range for i8.
+                            let wrapped = start.wrapping_sub(n as Self);
+                            if wrapped <= start {
+                                Some(wrapped)
+                            } else {
+                                None // Subtraction overflowed
+                            }
+                        }
+                        // If n is out of range of e.g. u8,
+                        // then it is bigger than the entire range for i8 is wide
+                        // so `any_i8 - n` necessarily overflows i8.
+                        Err(_) => None,
+                    }
                 }
             }
+        )+
 
-            #[inline]
+        $(
             #[allow(unreachable_patterns)]
-            fn add_usize(&self, n: usize) -> Option<Self> {
-                match <$unsigned>::try_from(n) {
-                    Ok(n_as_unsigned) => {
-                        // Wrapping in unsigned space handles cases like
-                        // `-120_i8.add_usize(200) == Some(80_i8)`,
-                        // even though 200_usize is out of range for i8.
-                        let wrapped = (*self as $unsigned).wrapping_add(n_as_unsigned) as $t;
-                        if wrapped >= *self {
-                            Some(wrapped)
-                        } else {
-                            None  // Addition overflowed
-                        }
+            #[unstable(feature = "step_trait", reason = "recently redesigned", issue = "42168")]
+            unsafe impl Step for $u_wider {
+                step_identical_methods!();
+
+                #[inline]
+                fn steps_between(start: &Self, end: &Self) -> Option<usize> {
+                    if *start <= *end {
+                        usize::try_from(*end - *start).ok()
+                    } else {
+                        None
                     }
-                    Err(_) => None,
+                }
+
+                #[inline]
+                fn forward_checked(start: Self, n: usize) -> Option<Self> {
+                    start.checked_add(n as Self)
+                }
+
+                #[inline]
+                fn backward_checked(start: Self, n: usize) -> Option<Self> {
+                    start.checked_sub(n as Self)
                 }
             }
 
-            #[inline]
             #[allow(unreachable_patterns)]
-            fn sub_usize(&self, n: usize) -> Option<Self> {
-                match <$unsigned>::try_from(n) {
-                    Ok(n_as_unsigned) => {
-                        // Wrapping in unsigned space handles cases like
-                        // `80_i8.sub_usize(200) == Some(-120_i8)`,
-                        // even though 200_usize is out of range for i8.
-                        let wrapped = (*self as $unsigned).wrapping_sub(n_as_unsigned) as $t;
-                        if wrapped <= *self {
-                            Some(wrapped)
-                        } else {
-                            None  // Subtraction underflowed
+            #[unstable(feature = "step_trait", reason = "recently redesigned", issue = "42168")]
+            unsafe impl Step for $i_wider {
+                step_identical_methods!();
+
+                #[inline]
+                fn steps_between(start: &Self, end: &Self) -> Option<usize> {
+                    if *start <= *end {
+                        match end.checked_sub(*start) {
+                            Some(result) => usize::try_from(result).ok(),
+                            // If the difference is too big for e.g. i128,
+                            // it's also gonna be too big for usize with fewer bits.
+                            None => None,
                         }
+                    } else {
+                        None
                     }
-                    Err(_) => None,
+                }
+
+                #[inline]
+                fn forward_checked(start: Self, n: usize) -> Option<Self> {
+                    start.checked_add(n as Self)
+                }
+
+                #[inline]
+                fn backward_checked(start: Self, n: usize) -> Option<Self> {
+                    start.checked_sub(n as Self)
                 }
             }
+        )+
+    };
+}
 
-            step_identical_methods!();
-        }
-    )*)
+#[cfg(target_pointer_width = "64")]
+step_integer_impls! {
+    narrower than or same width as usize: [u8 i8], [u16 i16], [u32 i32], [u64 i64], [usize isize];
+    wider than usize: [u128 i128];
 }
 
-step_impl_unsigned!(usize u8 u16 u32 u64 u128);
-step_impl_signed!([isize: usize][i8: u8][i16: u16]);
-step_impl_signed!([i32: u32][i64: u64][i128: u128]);
+#[cfg(target_pointer_width = "32")]
+step_integer_impls! {
+    narrower than or same width as usize: [u8 i8], [u16 i16], [u32 i32], [usize isize];
+    wider than usize: [u64 i64], [u128 i128];
+}
+
+#[cfg(target_pointer_width = "16")]
+step_integer_impls! {
+    narrower than or same width as usize: [u8 i8], [u16 i16], [usize isize];
+    wider than usize: [u32 i32], [u64 i64], [u128 i128];
+}
 
 macro_rules! range_exact_iter_impl {
     ($($t:ty)*) => ($(
@@ -188,20 +414,6 @@ macro_rules! range_incl_exact_iter_impl {
     )*)
 }
 
-macro_rules! range_trusted_len_impl {
-    ($($t:ty)*) => ($(
-        #[unstable(feature = "trusted_len", issue = "37572")]
-        unsafe impl TrustedLen for ops::Range<$t> { }
-    )*)
-}
-
-macro_rules! range_incl_trusted_len_impl {
-    ($($t:ty)*) => ($(
-        #[unstable(feature = "trusted_len", issue = "37572")]
-        unsafe impl TrustedLen for ops::RangeInclusive<$t> { }
-    )*)
-}
-
 #[stable(feature = "rust1", since = "1.0.0")]
 impl<A: Step> Iterator for ops::Range<A> {
     type Item = A;
@@ -209,16 +421,12 @@ impl<A: Step> Iterator for ops::Range<A> {
     #[inline]
     fn next(&mut self) -> Option<A> {
         if self.start < self.end {
-            // We check for overflow here, even though it can't actually
-            // happen. Adding this check does however help llvm vectorize loops
-            // for some ranges that don't get vectorized otherwise,
-            // and this won't actually result in an extra check in an optimized build.
-            if let Some(mut n) = self.start.add_usize(1) {
-                mem::swap(&mut n, &mut self.start);
-                Some(n)
-            } else {
-                None
-            }
+            // SAFETY: just checked precondition
+            // We use the unchecked version here, because
+            // this helps LLVM vectorize loops for some ranges
+            // that don't get vectorized otherwise.
+            let n = unsafe { Step::forward_unchecked(self.start.clone(), 1) };
+            Some(mem::replace(&mut self.start, n))
         } else {
             None
         }
@@ -226,17 +434,19 @@ impl<A: Step> Iterator for ops::Range<A> {
 
     #[inline]
     fn size_hint(&self) -> (usize, Option<usize>) {
-        match Step::steps_between(&self.start, &self.end) {
-            Some(hint) => (hint, Some(hint)),
-            None => (usize::MAX, None),
+        if self.start < self.end {
+            let hint = Step::steps_between(&self.start, &self.end);
+            (hint.unwrap_or(usize::MAX), hint)
+        } else {
+            (0, Some(0))
         }
     }
 
     #[inline]
     fn nth(&mut self, n: usize) -> Option<A> {
-        if let Some(plus_n) = self.start.add_usize(n) {
+        if let Some(plus_n) = Step::forward_checked(self.start.clone(), n) {
             if plus_n < self.end {
-                self.start = plus_n.add_one();
+                self.start = Step::forward(plus_n.clone(), 1);
                 return Some(plus_n);
             }
         }
@@ -262,25 +472,42 @@ impl<A: Step> Iterator for ops::Range<A> {
 }
 
 // These macros generate `ExactSizeIterator` impls for various range types.
-// Range<{u,i}64> and RangeInclusive<{u,i}{32,64,size}> are excluded
-// because they cannot guarantee having a length <= usize::MAX, which is
-// required by ExactSizeIterator.
-range_exact_iter_impl!(usize u8 u16 u32 isize i8 i16 i32);
-range_incl_exact_iter_impl!(u8 u16 i8 i16);
-
-// These macros generate `TrustedLen` impls.
 //
-// They need to guarantee that .size_hint() is either exact, or that
-// the upper bound is None when it does not fit the type limits.
-range_trusted_len_impl!(usize isize u8 i8 u16 i16 u32 i32 u64 i64 u128 i128);
-range_incl_trusted_len_impl!(usize isize u8 i8 u16 i16 u32 i32 u64 i64 u128 i128);
+// * `ExactSizeIterator::len` is required to always return an exact `usize`,
+//   so no range can be longer than `usize::MAX`.
+// * For integer types in `Range<_>` this is the case for types narrower than or as wide as `usize`.
+//   For integer types in `RangeInclusive<_>`
+//   this is the case for types *strictly narrower* than `usize`
+//   since e.g. `(0..=u64::MAX).len()` would be `u64::MAX + 1`.
+range_exact_iter_impl! {
+    usize u8 u16
+    isize i8 i16
+
+    // These are incorect per the reasoning above,
+    // but removing them would be a breaking change as they were stabilized in Rust 1.0.0.
+    // So e.g. `(0..66_000_u32).len()` for example will compile without error or warnings
+    // on 16-bit platforms, but continue to give a wrong result.
+    u32
+    i32
+}
+range_incl_exact_iter_impl! {
+    u8
+    i8
+
+    // These are incorect per the reasoning above,
+    // but removing them would be a breaking change as they were stabilized in Rust 1.26.0.
+    // So e.g. `(0..=u16::MAX).len()` for example will compile without error or warnings
+    // on 16-bit platforms, but continue to give a wrong result.
+    u16
+    i16
+}
 
 #[stable(feature = "rust1", since = "1.0.0")]
 impl<A: Step> DoubleEndedIterator for ops::Range<A> {
     #[inline]
     fn next_back(&mut self) -> Option<A> {
         if self.start < self.end {
-            self.end = self.end.sub_one();
+            self.end = Step::backward(self.end.clone(), 1);
             Some(self.end.clone())
         } else {
             None
@@ -289,9 +516,9 @@ impl<A: Step> DoubleEndedIterator for ops::Range<A> {
 
     #[inline]
     fn nth_back(&mut self, n: usize) -> Option<A> {
-        if let Some(minus_n) = self.end.sub_usize(n) {
+        if let Some(minus_n) = Step::backward_checked(self.end.clone(), n) {
             if minus_n > self.start {
-                self.end = minus_n.sub_one();
+                self.end = Step::backward(minus_n, 1);
                 return Some(self.end.clone());
             }
         }
@@ -301,6 +528,9 @@ impl<A: Step> DoubleEndedIterator for ops::Range<A> {
     }
 }
 
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<A: Step> TrustedLen for ops::Range<A> {}
+
 #[stable(feature = "fused", since = "1.26.0")]
 impl<A: Step> FusedIterator for ops::Range<A> {}
 
@@ -310,9 +540,8 @@ impl<A: Step> Iterator for ops::RangeFrom<A> {
 
     #[inline]
     fn next(&mut self) -> Option<A> {
-        let mut n = self.start.add_one();
-        mem::swap(&mut n, &mut self.start);
-        Some(n)
+        let n = Step::forward(self.start.clone(), 1);
+        Some(mem::replace(&mut self.start, n))
     }
 
     #[inline]
@@ -322,8 +551,16 @@ impl<A: Step> Iterator for ops::RangeFrom<A> {
 
     #[inline]
     fn nth(&mut self, n: usize) -> Option<A> {
-        let plus_n = self.start.add_usize(n).expect("overflow in RangeFrom::nth");
-        self.start = plus_n.add_one();
+        // If we would jump over the maximum value, panic immediately.
+        // This is consistent with behavior before the Step redesign,
+        // even though it's inconsistent with n `next` calls.
+        // To get consistent behavior, change it to use `forward` instead.
+        // This change should go through FCP separately to the redesign, so is for now left as a
+        // FIXME: make this consistent
+        let plus_n =
+            Step::forward_checked(self.start.clone(), n).expect("overflow in RangeFrom::nth");
+        // The final step should always be debug-checked.
+        self.start = Step::forward(plus_n.clone(), 1);
         Some(plus_n)
     }
 }
@@ -345,7 +582,7 @@ impl<A: Step> Iterator for ops::RangeInclusive<A> {
         }
         let is_iterating = self.start < self.end;
         Some(if is_iterating {
-            let n = self.start.add_one();
+            let n = Step::forward(self.start.clone(), 1);
             mem::replace(&mut self.start, n)
         } else {
             self.exhausted = true;
@@ -371,12 +608,12 @@ impl<A: Step> Iterator for ops::RangeInclusive<A> {
             return None;
         }
 
-        if let Some(plus_n) = self.start.add_usize(n) {
+        if let Some(plus_n) = Step::forward_checked(self.start.clone(), n) {
             use crate::cmp::Ordering::*;
 
             match plus_n.partial_cmp(&self.end) {
                 Some(Less) => {
-                    self.start = plus_n.add_one();
+                    self.start = Step::forward(plus_n.clone(), 1);
                     return Some(plus_n);
                 }
                 Some(Equal) => {
@@ -407,7 +644,7 @@ impl<A: Step> Iterator for ops::RangeInclusive<A> {
         let mut accum = init;
 
         while self.start < self.end {
-            let n = self.start.add_one();
+            let n = Step::forward(self.start.clone(), 1);
             let n = mem::replace(&mut self.start, n);
             accum = f(accum, n)?;
         }
@@ -446,7 +683,7 @@ impl<A: Step> DoubleEndedIterator for ops::RangeInclusive<A> {
         }
         let is_iterating = self.start < self.end;
         Some(if is_iterating {
-            let n = self.end.sub_one();
+            let n = Step::backward(self.end.clone(), 1);
             mem::replace(&mut self.end, n)
         } else {
             self.exhausted = true;
@@ -460,12 +697,12 @@ impl<A: Step> DoubleEndedIterator for ops::RangeInclusive<A> {
             return None;
         }
 
-        if let Some(minus_n) = self.end.sub_usize(n) {
+        if let Some(minus_n) = Step::backward_checked(self.end.clone(), n) {
             use crate::cmp::Ordering::*;
 
             match minus_n.partial_cmp(&self.start) {
                 Some(Greater) => {
-                    self.end = minus_n.sub_one();
+                    self.end = Step::backward(minus_n.clone(), 1);
                     return Some(minus_n);
                 }
                 Some(Equal) => {
@@ -496,7 +733,7 @@ impl<A: Step> DoubleEndedIterator for ops::RangeInclusive<A> {
         let mut accum = init;
 
         while self.start < self.end {
-            let n = self.end.sub_one();
+            let n = Step::backward(self.end.clone(), 1);
             let n = mem::replace(&mut self.end, n);
             accum = f(accum, n)?;
         }
@@ -511,5 +748,8 @@ impl<A: Step> DoubleEndedIterator for ops::RangeInclusive<A> {
     }
 }
 
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<A: Step> TrustedLen for ops::RangeInclusive<A> {}
+
 #[stable(feature = "fused", since = "1.26.0")]
 impl<A: Step> FusedIterator for ops::RangeInclusive<A> {}
diff --git a/src/libcore/num/mod.rs b/src/libcore/num/mod.rs
index a259e293b0c1a..9b4872f5d9f7a 100644
--- a/src/libcore/num/mod.rs
+++ b/src/libcore/num/mod.rs
@@ -749,6 +749,23 @@ $EndFeature, "
             }
         }
 
+        doc_comment! {
+            concat!("Unchecked integer addition. Computes `self + rhs, assuming overflow
+cannot occur. This results in undefined behavior when `self + rhs > ", stringify!($SelfT),
+"::max_value()` or `self + rhs < ", stringify!($SelfT), "::min_value()`."),
+            #[unstable(
+                feature = "unchecked_math",
+                reason = "niche optimization path",
+                issue = "none",
+            )]
+            #[must_use = "this returns the result of the operation, \
+                          without modifying the original"]
+            #[inline]
+            pub unsafe fn unchecked_add(self, rhs: Self) -> Self {
+                intrinsics::unchecked_add(self, rhs)
+            }
+        }
+
         doc_comment! {
             concat!("Checked integer subtraction. Computes `self - rhs`, returning `None` if
 overflow occurred.
@@ -774,6 +791,23 @@ $EndFeature, "
             }
         }
 
+        doc_comment! {
+            concat!("Unchecked integer subtraction. Computes `self - rhs, assuming overflow
+cannot occur. This results in undefined behavior when `self - rhs > ", stringify!($SelfT),
+"::max_value()` or `self - rhs < ", stringify!($SelfT), "::min_value()`."),
+            #[unstable(
+                feature = "unchecked_math",
+                reason = "niche optimization path",
+                issue = "none",
+            )]
+            #[must_use = "this returns the result of the operation, \
+                          without modifying the original"]
+            #[inline]
+            pub unsafe fn unchecked_sub(self, rhs: Self) -> Self {
+                intrinsics::unchecked_sub(self, rhs)
+            }
+        }
+
         doc_comment! {
             concat!("Checked integer multiplication. Computes `self * rhs`, returning `None` if
 overflow occurred.
@@ -799,6 +833,23 @@ $EndFeature, "
             }
         }
 
+        doc_comment! {
+            concat!("Unchecked integer multiplication. Computes `self * rhs, assuming overflow
+cannot occur. This results in undefined behavior when `self * rhs > ", stringify!($SelfT),
+"::max_value()` or `self * rhs < ", stringify!($SelfT), "::min_value()`."),
+            #[unstable(
+                feature = "unchecked_math",
+                reason = "niche optimization path",
+                issue = "none",
+            )]
+            #[must_use = "this returns the result of the operation, \
+                          without modifying the original"]
+            #[inline]
+            pub unsafe fn unchecked_mul(self, rhs: Self) -> Self {
+                intrinsics::unchecked_mul(self, rhs)
+            }
+        }
+
         doc_comment! {
             concat!("Checked integer division. Computes `self / rhs`, returning `None` if `rhs == 0`
 or the division results in overflow.
@@ -2936,6 +2987,23 @@ assert_eq!((", stringify!($SelfT), "::MAX - 2).checked_add(3), None);", $EndFeat
             }
         }
 
+        doc_comment! {
+            concat!("Unchecked integer addition. Computes `self + rhs, assuming overflow
+cannot occur. This results in undefined behavior when `self + rhs > ", stringify!($SelfT),
+"::max_value()` or `self + rhs < ", stringify!($SelfT), "::min_value()`."),
+            #[unstable(
+                feature = "unchecked_math",
+                reason = "niche optimization path",
+                issue = "none",
+            )]
+            #[must_use = "this returns the result of the operation, \
+                          without modifying the original"]
+            #[inline]
+            pub unsafe fn unchecked_add(self, rhs: Self) -> Self {
+                intrinsics::unchecked_add(self, rhs)
+            }
+        }
+
         doc_comment! {
             concat!("Checked integer subtraction. Computes `self - rhs`, returning
 `None` if overflow occurred.
@@ -2959,6 +3027,23 @@ assert_eq!(0", stringify!($SelfT), ".checked_sub(1), None);", $EndFeature, "
             }
         }
 
+        doc_comment! {
+            concat!("Unchecked integer subtraction. Computes `self - rhs, assuming overflow
+cannot occur. This results in undefined behavior when `self - rhs > ", stringify!($SelfT),
+"::max_value()` or `self - rhs < ", stringify!($SelfT), "::min_value()`."),
+            #[unstable(
+                feature = "unchecked_math",
+                reason = "niche optimization path",
+                issue = "none",
+            )]
+            #[must_use = "this returns the result of the operation, \
+                          without modifying the original"]
+            #[inline]
+            pub unsafe fn unchecked_sub(self, rhs: Self) -> Self {
+                intrinsics::unchecked_sub(self, rhs)
+            }
+        }
+
         doc_comment! {
             concat!("Checked integer multiplication. Computes `self * rhs`, returning
 `None` if overflow occurred.
@@ -2982,6 +3067,23 @@ assert_eq!(", stringify!($SelfT), "::MAX.checked_mul(2), None);", $EndFeature, "
             }
         }
 
+        doc_comment! {
+            concat!("Unchecked integer multiplication. Computes `self * rhs, assuming overflow
+cannot occur. This results in undefined behavior when `self * rhs > ", stringify!($SelfT),
+"::max_value()` or `self * rhs < ", stringify!($SelfT), "::min_value()`."),
+            #[unstable(
+                feature = "unchecked_math",
+                reason = "niche optimization path",
+                issue = "none",
+            )]
+            #[must_use = "this returns the result of the operation, \
+                          without modifying the original"]
+            #[inline]
+            pub unsafe fn unchecked_mul(self, rhs: Self) -> Self {
+                intrinsics::unchecked_mul(self, rhs)
+            }
+        }
+
         doc_comment! {
             concat!("Checked integer division. Computes `self / rhs`, returning `None`
 if `rhs == 0`.
diff --git a/src/libcore/tests/iter.rs b/src/libcore/tests/iter.rs
index 7da02b11676ab..52cf068f0a567 100644
--- a/src/libcore/tests/iter.rs
+++ b/src/libcore/tests/iter.rs
@@ -2139,6 +2139,24 @@ fn test_range_inclusive_nth_back() {
     assert_eq!(ExactSizeIterator::is_empty(&r), true);
 }
 
+#[test]
+fn test_range_len() {
+    assert_eq!((0..10_u8).len(), 10);
+    assert_eq!((9..10_u8).len(), 1);
+    assert_eq!((10..10_u8).len(), 0);
+    assert_eq!((11..10_u8).len(), 0);
+    assert_eq!((100..10_u8).len(), 0);
+}
+
+#[test]
+fn test_range_inclusive_len() {
+    assert_eq!((0..=10_u8).len(), 11);
+    assert_eq!((9..=10_u8).len(), 2);
+    assert_eq!((10..=10_u8).len(), 1);
+    assert_eq!((11..=10_u8).len(), 0);
+    assert_eq!((100..=10_u8).len(), 0);
+}
+
 #[test]
 fn test_range_step() {
     #![allow(deprecated)]
@@ -2509,42 +2527,91 @@ fn test_chain_fold() {
 }
 
 #[test]
-fn test_step_replace_unsigned() {
-    let mut x = 4u32;
-    let y = x.replace_zero();
-    assert_eq!(x, 0);
-    assert_eq!(y, 4);
+fn test_steps_between() {
+    assert_eq!(Step::steps_between(&20_u8, &200_u8), Some(180_usize));
+    assert_eq!(Step::steps_between(&-20_i8, &80_i8), Some(100_usize));
+    assert_eq!(Step::steps_between(&-120_i8, &80_i8), Some(200_usize));
+    assert_eq!(Step::steps_between(&20_u32, &4_000_100_u32), Some(4_000_080_usize));
+    assert_eq!(Step::steps_between(&-20_i32, &80_i32), Some(100_usize));
+    assert_eq!(Step::steps_between(&-2_000_030_i32, &2_000_050_i32), Some(4_000_080_usize));
 
-    x = 5;
-    let y = x.replace_one();
-    assert_eq!(x, 1);
-    assert_eq!(y, 5);
+    // Skip u64/i64 to avoid differences with 32-bit vs 64-bit platforms
+
+    assert_eq!(Step::steps_between(&20_u128, &200_u128), Some(180_usize));
+    assert_eq!(Step::steps_between(&-20_i128, &80_i128), Some(100_usize));
+    if cfg!(target_pointer_width = "64") {
+        assert_eq!(Step::steps_between(&10_u128, &0x1_0000_0000_0000_0009_u128), Some(usize::MAX));
+    }
+    assert_eq!(Step::steps_between(&10_u128, &0x1_0000_0000_0000_000a_u128), None);
+    assert_eq!(Step::steps_between(&10_i128, &0x1_0000_0000_0000_000a_i128), None);
+    assert_eq!(
+        Step::steps_between(&-0x1_0000_0000_0000_0000_i128, &0x1_0000_0000_0000_0000_i128,),
+        None,
+    );
 }
 
 #[test]
-fn test_step_replace_signed() {
-    let mut x = 4i32;
-    let y = x.replace_zero();
-    assert_eq!(x, 0);
-    assert_eq!(y, 4);
+fn test_step_forward() {
+    assert_eq!(Step::forward_checked(55_u8, 200_usize), Some(255_u8));
+    assert_eq!(Step::forward_checked(252_u8, 200_usize), None);
+    assert_eq!(Step::forward_checked(0_u8, 256_usize), None);
+    assert_eq!(Step::forward_checked(-110_i8, 200_usize), Some(90_i8));
+    assert_eq!(Step::forward_checked(-110_i8, 248_usize), None);
+    assert_eq!(Step::forward_checked(-126_i8, 256_usize), None);
+
+    assert_eq!(Step::forward_checked(35_u16, 100_usize), Some(135_u16));
+    assert_eq!(Step::forward_checked(35_u16, 65500_usize), Some(u16::MAX));
+    assert_eq!(Step::forward_checked(36_u16, 65500_usize), None);
+    assert_eq!(Step::forward_checked(-110_i16, 200_usize), Some(90_i16));
+    assert_eq!(Step::forward_checked(-20_030_i16, 50_050_usize), Some(30_020_i16));
+    assert_eq!(Step::forward_checked(-10_i16, 40_000_usize), None);
+    assert_eq!(Step::forward_checked(-10_i16, 70_000_usize), None);
 
-    x = 5;
-    let y = x.replace_one();
-    assert_eq!(x, 1);
-    assert_eq!(y, 5);
+    assert_eq!(Step::forward_checked(10_u128, 70_000_usize), Some(70_010_u128));
+    assert_eq!(Step::forward_checked(10_i128, 70_030_usize), Some(70_040_i128));
+    assert_eq!(
+        Step::forward_checked(0xffff_ffff_ffff_ffff__ffff_ffff_ffff_ff00_u128, 0xff_usize),
+        Some(u128::MAX),
+    );
+    assert_eq!(
+        Step::forward_checked(0xffff_ffff_ffff_ffff__ffff_ffff_ffff_ff00_u128, 0x100_usize),
+        None
+    );
+    assert_eq!(
+        Step::forward_checked(0x7fff_ffff_ffff_ffff__ffff_ffff_ffff_ff00_i128, 0xff_usize),
+        Some(i128::MAX),
+    );
+    assert_eq!(
+        Step::forward_checked(0x7fff_ffff_ffff_ffff__ffff_ffff_ffff_ff00_i128, 0x100_usize),
+        None
+    );
 }
 
 #[test]
-fn test_step_replace_no_between() {
-    let mut x = 4u128;
-    let y = x.replace_zero();
-    assert_eq!(x, 0);
-    assert_eq!(y, 4);
+fn test_step_backward() {
+    assert_eq!(Step::backward_checked(255_u8, 200_usize), Some(55_u8));
+    assert_eq!(Step::backward_checked(100_u8, 200_usize), None);
+    assert_eq!(Step::backward_checked(255_u8, 256_usize), None);
+    assert_eq!(Step::backward_checked(90_i8, 200_usize), Some(-110_i8));
+    assert_eq!(Step::backward_checked(110_i8, 248_usize), None);
+    assert_eq!(Step::backward_checked(127_i8, 256_usize), None);
+
+    assert_eq!(Step::backward_checked(135_u16, 100_usize), Some(35_u16));
+    assert_eq!(Step::backward_checked(u16::MAX, 65500_usize), Some(35_u16));
+    assert_eq!(Step::backward_checked(10_u16, 11_usize), None);
+    assert_eq!(Step::backward_checked(90_i16, 200_usize), Some(-110_i16));
+    assert_eq!(Step::backward_checked(30_020_i16, 50_050_usize), Some(-20_030_i16));
+    assert_eq!(Step::backward_checked(-10_i16, 40_000_usize), None);
+    assert_eq!(Step::backward_checked(-10_i16, 70_000_usize), None);
 
-    x = 5;
-    let y = x.replace_one();
-    assert_eq!(x, 1);
-    assert_eq!(y, 5);
+    assert_eq!(Step::backward_checked(70_010_u128, 70_000_usize), Some(10_u128));
+    assert_eq!(Step::backward_checked(70_020_i128, 70_030_usize), Some(-10_i128));
+    assert_eq!(Step::backward_checked(10_u128, 7_usize), Some(3_u128));
+    assert_eq!(Step::backward_checked(10_u128, 11_usize), None);
+    assert_eq!(
+        Step::backward_checked(-0x7fff_ffff_ffff_ffff__ffff_ffff_ffff_ff00_i128, 0x100_usize),
+        Some(i128::MIN)
+    );
 }
 
 #[test]
diff --git a/src/libcore/tests/lib.rs b/src/libcore/tests/lib.rs
index e7d36d327cd89..d636542d699f9 100644
--- a/src/libcore/tests/lib.rs
+++ b/src/libcore/tests/lib.rs
@@ -22,6 +22,7 @@
 #![feature(slice_partition_at_index)]
 #![feature(specialization)]
 #![feature(step_trait)]
+#![feature(step_trait_ext)]
 #![feature(str_internals)]
 #![feature(test)]
 #![feature(trusted_len)]
diff --git a/src/librustc_ast_passes/node_count.rs b/src/librustc_ast_passes/node_count.rs
index 3cf562b927e2e..34db59b1b458d 100644
--- a/src/librustc_ast_passes/node_count.rs
+++ b/src/librustc_ast_passes/node_count.rs
@@ -1,4 +1,4 @@
-// Simply gives a rought count of the number of nodes in an AST.
+// Simply gives a rough count of the number of nodes in an AST.
 
 use rustc_ast::ast::*;
 use rustc_ast::visit::*;
diff --git a/src/librustc_index/vec.rs b/src/librustc_index/vec.rs
index a84f89c7cd950..67dcea58cf82b 100644
--- a/src/librustc_index/vec.rs
+++ b/src/librustc_index/vec.rs
@@ -65,7 +65,7 @@ impl Idx for u32 {
 /// `u32::MAX`. You can also customize things like the `Debug` impl,
 /// what traits are derived, and so forth via the macro.
 #[macro_export]
-#[allow_internal_unstable(step_trait, rustc_attrs)]
+#[allow_internal_unstable(step_trait, step_trait_ext, rustc_attrs)]
 macro_rules! newtype_index {
     // ---- public rules ----
 
@@ -181,7 +181,7 @@ macro_rules! newtype_index {
             }
         }
 
-        impl ::std::iter::Step for $type {
+        unsafe impl ::std::iter::Step for $type {
             #[inline]
             fn steps_between(start: &Self, end: &Self) -> Option<usize> {
                 <usize as ::std::iter::Step>::steps_between(
@@ -191,33 +191,13 @@ macro_rules! newtype_index {
             }
 
             #[inline]
-            fn replace_one(&mut self) -> Self {
-                ::std::mem::replace(self, Self::from_u32(1))
+            fn forward_checked(start: Self, u: usize) -> Option<Self> {
+                Self::index(start).checked_add(u).map(Self::from_usize)
             }
 
             #[inline]
-            fn replace_zero(&mut self) -> Self {
-                ::std::mem::replace(self, Self::from_u32(0))
-            }
-
-            #[inline]
-            fn add_one(&self) -> Self {
-                Self::from_usize(Self::index(*self) + 1)
-            }
-
-            #[inline]
-            fn sub_one(&self) -> Self {
-                Self::from_usize(Self::index(*self) - 1)
-            }
-
-            #[inline]
-            fn add_usize(&self, u: usize) -> Option<Self> {
-                Self::index(*self).checked_add(u).map(Self::from_usize)
-            }
-
-            #[inline]
-            fn sub_usize(&self, u: usize) -> Option<Self> {
-                Self::index(*self).checked_sub(u).map(Self::from_usize)
+            fn backward_checked(start: Self, u: usize) -> Option<Self> {
+                Self::index(start).checked_sub(u).map(Self::from_usize)
             }
         }
 
diff --git a/src/librustc_middle/middle/region.rs b/src/librustc_middle/middle/region.rs
index c3eeea7662ba9..f02d8fe8ad601 100644
--- a/src/librustc_middle/middle/region.rs
+++ b/src/librustc_middle/middle/region.rs
@@ -4,7 +4,7 @@
 //! For more information about how MIR-based region-checking works,
 //! see the [rustc dev guide].
 //!
-//! [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/mir/borrowck.html
+//! [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/borrow_check.html
 
 use crate::ich::{NodeIdHashingMode, StableHashingContext};
 use crate::ty::{self, DefIdTree, TyCtxt};
@@ -181,7 +181,7 @@ impl Scope {
                 // `blk`; reuse span of `blk` and shift `lo`
                 // forward to end of indexed statement.
                 //
-                // (This is the special case aluded to in the
+                // (This is the special case alluded to in the
                 // doc-comment for this method)
 
                 let stmt_span = blk.stmts[first_statement_index.index()].span;
diff --git a/src/librustc_save_analysis/dump_visitor.rs b/src/librustc_save_analysis/dump_visitor.rs
index 534fe172bef9c..3dd715f9e3df6 100644
--- a/src/librustc_save_analysis/dump_visitor.rs
+++ b/src/librustc_save_analysis/dump_visitor.rs
@@ -21,7 +21,7 @@ use rustc_ast::walk_list;
 use rustc_ast_pretty::pprust::{bounds_to_string, generic_params_to_string, ty_to_string};
 use rustc_data_structures::fx::FxHashSet;
 use rustc_hir::def::{DefKind as HirDefKind, Res};
-use rustc_hir::def_id::DefId;
+use rustc_hir::def_id::{DefId, LocalDefId};
 use rustc_middle::span_bug;
 use rustc_middle::ty::{self, DefIdTree, TyCtxt};
 use rustc_session::config::Input;
@@ -104,12 +104,10 @@ impl<'l, 'tcx> DumpVisitor<'l, 'tcx> {
         self.dumper.analysis()
     }
 
-    fn nest_tables<F>(&mut self, item_id: NodeId, f: F)
+    fn nest_tables<F>(&mut self, item_def_id: LocalDefId, f: F)
     where
         F: FnOnce(&mut Self),
     {
-        let item_def_id = self.tcx.hir().local_def_id_from_node_id(item_id);
-
         let tables = if self.tcx.has_typeck_tables(item_def_id) {
             self.tcx.typeck_tables_of(item_def_id)
         } else {
@@ -272,8 +270,9 @@ impl<'l, 'tcx> DumpVisitor<'l, 'tcx> {
     ) {
         debug!("process_method: {}:{}", id, ident);
 
-        let hir_id = self.tcx.hir().node_id_to_hir_id(id);
-        self.nest_tables(id, |v| {
+        let map = &self.tcx.hir();
+        let hir_id = map.node_id_to_hir_id(id);
+        self.nest_tables(map.local_def_id(hir_id), |v| {
             if let Some(mut method_data) = v.save_ctxt.get_method_data(id, ident, span) {
                 v.process_formals(&sig.decl.inputs, &method_data.qualname);
                 v.process_generic_params(&generics, &method_data.qualname, id);
@@ -296,7 +295,8 @@ impl<'l, 'tcx> DumpVisitor<'l, 'tcx> {
                 // start walking from the newly-created definition.
                 match sig.header.asyncness {
                     ast::Async::Yes { return_impl_trait_id, .. } => {
-                        v.nest_tables(return_impl_trait_id, |v| v.visit_ty(ret_ty))
+                        let hir_id = map.node_id_to_hir_id(return_impl_trait_id);
+                        v.nest_tables(map.local_def_id(hir_id), |v| v.visit_ty(ret_ty))
                     }
                     _ => v.visit_ty(ret_ty),
                 }
@@ -364,8 +364,9 @@ impl<'l, 'tcx> DumpVisitor<'l, 'tcx> {
         ty_params: &'l ast::Generics,
         body: Option<&'l ast::Block>,
     ) {
-        let hir_id = self.tcx.hir().node_id_to_hir_id(item.id);
-        self.nest_tables(item.id, |v| {
+        let map = &self.tcx.hir();
+        let hir_id = map.node_id_to_hir_id(item.id);
+        self.nest_tables(map.local_def_id(hir_id), |v| {
             if let Some(fn_data) = v.save_ctxt.get_item_data(item) {
                 down_cast_data!(fn_data, DefData, item.span);
                 v.process_formals(&decl.inputs, &fn_data.qualname);
@@ -389,7 +390,8 @@ impl<'l, 'tcx> DumpVisitor<'l, 'tcx> {
                     // start walking from the newly-created definition.
                     match header.asyncness {
                         ast::Async::Yes { return_impl_trait_id, .. } => {
-                            v.nest_tables(return_impl_trait_id, |v| v.visit_ty(ret_ty))
+                            let hir_id = map.node_id_to_hir_id(return_impl_trait_id);
+                            v.nest_tables(map.local_def_id(hir_id), |v| v.visit_ty(ret_ty))
                         }
                         _ => v.visit_ty(ret_ty),
                     }
@@ -407,7 +409,7 @@ impl<'l, 'tcx> DumpVisitor<'l, 'tcx> {
         expr: Option<&'l ast::Expr>,
     ) {
         let hir_id = self.tcx.hir().node_id_to_hir_id(item.id);
-        self.nest_tables(item.id, |v| {
+        self.nest_tables(self.tcx.hir().local_def_id(hir_id), |v| {
             if let Some(var_data) = v.save_ctxt.get_item_data(item) {
                 down_cast_data!(var_data, DefData, item.span);
                 v.dumper.dump_def(&access_from!(v.save_ctxt, item, hir_id), var_data);
@@ -427,15 +429,13 @@ impl<'l, 'tcx> DumpVisitor<'l, 'tcx> {
         vis: ast::Visibility,
         attrs: &'l [Attribute],
     ) {
-        let qualname = format!(
-            "::{}",
-            self.tcx.def_path_str(self.tcx.hir().local_def_id_from_node_id(id).to_def_id())
-        );
+        let hir_id = self.tcx.hir().node_id_to_hir_id(id);
+        let qualname =
+            format!("::{}", self.tcx.def_path_str(self.tcx.hir().local_def_id(hir_id).to_def_id()));
 
         if !self.span.filter_generated(ident.span) {
             let sig = sig::assoc_const_signature(id, ident.name, typ, expr, &self.save_ctxt);
             let span = self.span_from_span(ident.span);
-            let hir_id = self.tcx.hir().node_id_to_hir_id(id);
 
             self.dumper.dump_def(
                 &access_from_vis!(self.save_ctxt, vis, hir_id),
@@ -457,7 +457,7 @@ impl<'l, 'tcx> DumpVisitor<'l, 'tcx> {
         }
 
         // walk type and init value
-        self.nest_tables(id, |v| {
+        self.nest_tables(self.tcx.hir().local_def_id(hir_id), |v| {
             v.visit_ty(typ);
             if let Some(expr) = expr {
                 v.visit_expr(expr);
@@ -474,10 +474,9 @@ impl<'l, 'tcx> DumpVisitor<'l, 'tcx> {
     ) {
         debug!("process_struct {:?} {:?}", item, item.span);
         let name = item.ident.to_string();
-        let qualname = format!(
-            "::{}",
-            self.tcx.def_path_str(self.tcx.hir().local_def_id_from_node_id(item.id).to_def_id())
-        );
+        let hir_id = self.tcx.hir().node_id_to_hir_id(item.id);
+        let qualname =
+            format!("::{}", self.tcx.def_path_str(self.tcx.hir().local_def_id(hir_id).to_def_id()));
 
         let kind = match item.kind {
             ast::ItemKind::Struct(_, _) => DefKind::Struct,
@@ -509,7 +508,6 @@ impl<'l, 'tcx> DumpVisitor<'l, 'tcx> {
 
         if !self.span.filter_generated(item.ident.span) {
             let span = self.span_from_span(item.ident.span);
-            let hir_id = self.tcx.hir().node_id_to_hir_id(item.id);
             self.dumper.dump_def(
                 &access_from!(self.save_ctxt, item, hir_id),
                 Def {
@@ -529,7 +527,7 @@ impl<'l, 'tcx> DumpVisitor<'l, 'tcx> {
             );
         }
 
-        self.nest_tables(item.id, |v| {
+        self.nest_tables(self.tcx.hir().local_def_id(hir_id), |v| {
             for field in def.fields() {
                 v.process_struct_field_def(field, item.id);
                 v.visit_ty(&field.ty);
@@ -669,14 +667,15 @@ impl<'l, 'tcx> DumpVisitor<'l, 'tcx> {
         }
 
         let map = &self.tcx.hir();
-        self.nest_tables(item.id, |v| {
+        let hir_id = map.node_id_to_hir_id(item.id);
+        self.nest_tables(map.local_def_id(hir_id), |v| {
             v.visit_ty(&typ);
             if let &Some(ref trait_ref) = trait_ref {
                 v.process_path(trait_ref.ref_id, &trait_ref.path);
             }
             v.process_generic_params(generics, "", item.id);
             for impl_item in impl_items {
-                v.process_impl_item(impl_item, map.local_def_id_from_node_id(item.id).to_def_id());
+                v.process_impl_item(impl_item, map.local_def_id(hir_id).to_def_id());
             }
         });
     }
@@ -1411,7 +1410,10 @@ impl<'l, 'tcx> Visitor<'l> for DumpVisitor<'l, 'tcx> {
             }
             ast::TyKind::Array(ref element, ref length) => {
                 self.visit_ty(element);
-                self.nest_tables(length.id, |v| v.visit_expr(&length.value));
+                let hir_id = self.tcx.hir().node_id_to_hir_id(length.id);
+                self.nest_tables(self.tcx.hir().local_def_id(hir_id), |v| {
+                    v.visit_expr(&length.value)
+                });
             }
             ast::TyKind::ImplTrait(id, ref bounds) => {
                 // FIXME: As of writing, the opaque type lowering introduces
@@ -1423,7 +1425,13 @@ impl<'l, 'tcx> Visitor<'l> for DumpVisitor<'l, 'tcx> {
                 // bounds...
                 // This will panic if called on return type `impl Trait`, which
                 // we guard against in `process_fn`.
-                self.nest_tables(id, |v| v.process_bounds(bounds));
+                // FIXME(#71104) Should really be using just `node_id_to_hir_id` but
+                // some `NodeId` do not seem to have a corresponding HirId.
+                if let Some(hir_id) = self.tcx.hir().opt_node_id_to_hir_id(id) {
+                    self.nest_tables(self.tcx.hir().local_def_id(hir_id), |v| {
+                        v.process_bounds(bounds)
+                    });
+                }
             }
             _ => visit::walk_ty(self, t),
         }
@@ -1471,7 +1479,8 @@ impl<'l, 'tcx> Visitor<'l> for DumpVisitor<'l, 'tcx> {
                 }
 
                 // walk the body
-                self.nest_tables(ex.id, |v| {
+                let hir_id = self.tcx.hir().node_id_to_hir_id(ex.id);
+                self.nest_tables(self.tcx.hir().local_def_id(hir_id), |v| {
                     v.process_formals(&decl.inputs, &id);
                     v.visit_expr(body)
                 });
@@ -1488,7 +1497,10 @@ impl<'l, 'tcx> Visitor<'l> for DumpVisitor<'l, 'tcx> {
             }
             ast::ExprKind::Repeat(ref element, ref count) => {
                 self.visit_expr(element);
-                self.nest_tables(count.id, |v| v.visit_expr(&count.value));
+                let hir_id = self.tcx.hir().node_id_to_hir_id(count.id);
+                self.nest_tables(self.tcx.hir().local_def_id(hir_id), |v| {
+                    v.visit_expr(&count.value)
+                });
             }
             // In particular, we take this branch for call and path expressions,
             // where we'll index the idents involved just by continuing to walk.
diff --git a/src/librustc_typeck/check/mod.rs b/src/librustc_typeck/check/mod.rs
index 956e09ec52b4a..79dde84b8b1bc 100644
--- a/src/librustc_typeck/check/mod.rs
+++ b/src/librustc_typeck/check/mod.rs
@@ -831,13 +831,6 @@ fn primary_body_of(
 }
 
 fn has_typeck_tables(tcx: TyCtxt<'_>, def_id: DefId) -> bool {
-    // FIXME(#71104) some `LocalDefId` do not seem to have a corresponding `HirId`.
-    if let Some(def_id) = def_id.as_local() {
-        if tcx.hir().opt_local_def_id_to_hir_id(def_id).is_none() {
-            return false;
-        }
-    }
-
     // Closures' tables come from their outermost function,
     // as they are part of the same "inference environment".
     let outer_def_id = tcx.closure_base_def_id(def_id);
diff --git a/src/test/ui/impl-trait/example-calendar.rs b/src/test/ui/impl-trait/example-calendar.rs
index f1b1656745e7c..fafab8a102a90 100644
--- a/src/test/ui/impl-trait/example-calendar.rs
+++ b/src/test/ui/impl-trait/example-calendar.rs
@@ -2,6 +2,7 @@
 
 #![feature(fn_traits,
            step_trait,
+           step_trait_ext,
            unboxed_closures,
 )]
 
@@ -10,7 +11,6 @@
 //! Originally converted to Rust by [Daniel Keep](https://github.com/DanielKeep).
 
 use std::fmt::Write;
-use std::mem;
 
 /// Date representation.
 #[derive(Copy, Clone, Debug, Eq, Ord, PartialEq, PartialOrd)]
@@ -156,32 +156,16 @@ impl<'a, 'b> std::ops::Add<&'b NaiveDate> for &'a NaiveDate {
     }
 }
 
-impl std::iter::Step for NaiveDate {
+unsafe impl std::iter::Step for NaiveDate {
     fn steps_between(_: &Self, _: &Self) -> Option<usize> {
         unimplemented!()
     }
 
-    fn replace_one(&mut self) -> Self {
-        mem::replace(self, NaiveDate(0, 0, 1))
+    fn forward_checked(start: Self, n: usize) -> Option<Self> {
+        Some((0..n).fold(start, |x, _| x.succ()))
     }
 
-    fn replace_zero(&mut self) -> Self {
-        mem::replace(self, NaiveDate(0, 0, 0))
-    }
-
-    fn add_one(&self) -> Self {
-        self.succ()
-    }
-
-    fn sub_one(&self) -> Self {
-        unimplemented!()
-    }
-
-    fn add_usize(&self, _: usize) -> Option<Self> {
-        unimplemented!()
-    }
-
-    fn sub_usize(&self, _: usize) -> Option<Self> {
+    fn backward_checked(_: Self, _: usize) -> Option<Self> {
         unimplemented!()
     }
 }
diff --git a/triagebot.toml b/triagebot.toml
index 56d29994a8df4..2210a8ff8e656 100644
--- a/triagebot.toml
+++ b/triagebot.toml
@@ -36,4 +36,6 @@ label = "ICEBreaker-Cleanup-Crew"
 
 [prioritize]
 label = "I-prioritize"
+prioritize_on = ["regression-from-stable-to-stable", "regression-from-stable-to-beta", "regression-from-stable-to-nightly"]
+priority_labels = "P-*"
 zulip_stream = 227806