diff --git a/src/bootstrap/doc.rs b/src/bootstrap/doc.rs
index d02c19467ee68..c0abf4c5da3b0 100644
--- a/src/bootstrap/doc.rs
+++ b/src/bootstrap/doc.rs
@@ -461,8 +461,8 @@ impl Step for Std {
 
             builder.run(&mut cargo.into());
         };
-        let krates = ["alloc", "core", "std", "proc_macro", "test"];
-        for krate in &krates {
+        static KRATES: &[&str] = &["alloc", "core", "std", "proc_macro", "test"];
+        for krate in KRATES {
             run_cargo_rustdoc_for(krate);
         }
         builder.cp_r(&my_out, &out);
@@ -470,13 +470,12 @@ impl Step for Std {
         // Look for src/libstd, src/libcore etc in the `x.py doc` arguments and
         // open the corresponding rendered docs.
         for path in builder.paths.iter().map(components_simplified) {
-            if path.get(0) == Some(&"src")
-                && path.get(1).map_or(false, |dir| dir.starts_with("lib"))
-            {
-                let requested_crate = &path[1][3..];
-                if krates.contains(&requested_crate) {
-                    let index = out.join(requested_crate).join("index.html");
-                    open(builder, &index);
+            if let ["src", path, ..] = path.as_slice() {
+                if let Some(krate) = path.strip_prefix("lib") {
+                    if KRATES.contains(&krate) {
+                        let index = out.join(krate).join("index.html");
+                        open(builder, &index);
+                    }
                 }
             }
         }
diff --git a/src/librustc_ast/util/comments.rs b/src/librustc_ast/util/comments.rs
index 9874754fcd2f7..2894a3855ef31 100644
--- a/src/librustc_ast/util/comments.rs
+++ b/src/librustc_ast/util/comments.rs
@@ -29,27 +29,31 @@ pub struct Comment {
 }
 
 pub fn is_line_doc_comment(s: &str) -> bool {
-    let res = (s.starts_with("///") && *s.as_bytes().get(3).unwrap_or(&b' ') != b'/')
-        || s.starts_with("//!");
-    debug!("is {:?} a doc comment? {}", s, res);
-    res
+    let yes = match s.as_bytes() {
+        [b'/', b'/', b'/', c, ..] => *c != b'/',
+        [b'/', b'/', b'/', ..] => true,
+        [b'/', b'/', b'!', ..] => true,
+        _ => false,
+    };
+    debug!("is {:?} a line doc comment? {}", s, yes);
+    yes
 }
 
 pub fn is_block_doc_comment(s: &str) -> bool {
-    // Prevent `/**/` from being parsed as a doc comment
-    let res = ((s.starts_with("/**") && *s.as_bytes().get(3).unwrap_or(&b' ') != b'*')
-        || s.starts_with("/*!"))
-        && s.len() >= 5;
-    debug!("is {:?} a doc comment? {}", s, res);
-    res
+    // Previously, `/**/` was incorrectly regarded as a doc comment because it
+    // starts with `/**` and ends with `*/`. However, this caused an ICE
+    // because some code assumed that the length of a doc comment is at least 5.
+    let yes = match s.as_bytes() {
+        [b'/', b'*', b'*', c, _, ..] => *c != b'*',
+        [b'/', b'*', b'!', _, _, ..] => true,
+        _ => false,
+    };
+    debug!("is {:?} a block doc comment? {}", s, yes);
+    yes
 }
 
-// FIXME(#64197): Try to privatize this again.
-pub fn is_doc_comment(s: &str) -> bool {
-    (s.starts_with("///") && is_line_doc_comment(s))
-        || s.starts_with("//!")
-        || (s.starts_with("/**") && is_block_doc_comment(s))
-        || s.starts_with("/*!")
+fn is_doc_comment(s: &str) -> bool {
+    is_line_doc_comment(s) || is_block_doc_comment(s)
 }
 
 pub fn doc_comment_style(comment: &str) -> ast::AttrStyle {
@@ -127,22 +131,26 @@ pub fn strip_doc_comment_decoration(comment: &str) -> String {
     const ONELINERS: &[&str] = &["///!", "///", "//!", "//"];
 
     for prefix in ONELINERS {
-        if comment.starts_with(*prefix) {
-            return (&comment[prefix.len()..]).to_string();
+        if let Some(tail) = comment.strip_prefix(*prefix) {
+            return tail.to_owned();
         }
     }
 
-    if comment.starts_with("/*") {
-        let lines =
-            comment[3..comment.len() - 2].lines().map(|s| s.to_string()).collect::<Vec<String>>();
+    match comment
+        .strip_prefix("/**")
+        .or_else(|| comment.strip_prefix("/*!"))
+        .and_then(|s| s.strip_suffix("*/"))
+    {
+        Some(doc) => {
+            let lines = doc.lines().map(|s| s.to_string()).collect::<Vec<String>>();
 
-        let lines = vertical_trim(lines);
-        let lines = horizontal_trim(lines);
+            let lines = vertical_trim(lines);
+            let lines = horizontal_trim(lines);
 
-        return lines.join("\n");
+            lines.join("\n")
+        }
+        _ => panic!("not a doc-comment: {}", comment),
     }
-
-    panic!("not a doc-comment: {}", comment);
 }
 
 /// Returns `None` if the first `col` chars of `s` contain a non-whitespace char.
diff --git a/src/librustc_expand/parse/lexer/tests.rs b/src/librustc_expand/parse/lexer/tests.rs
index 2932475430bbf..f56b9cfaadd99 100644
--- a/src/librustc_expand/parse/lexer/tests.rs
+++ b/src/librustc_expand/parse/lexer/tests.rs
@@ -1,5 +1,5 @@
 use rustc_ast::token::{self, Token, TokenKind};
-use rustc_ast::util::comments::is_doc_comment;
+use rustc_ast::util::comments::is_line_doc_comment;
 use rustc_ast::with_default_globals;
 use rustc_data_structures::sync::Lrc;
 use rustc_errors::{emitter::EmitterWriter, Handler};
@@ -225,9 +225,9 @@ fn literal_suffixes() {
 
 #[test]
 fn line_doc_comments() {
-    assert!(is_doc_comment("///"));
-    assert!(is_doc_comment("/// blah"));
-    assert!(!is_doc_comment("////"));
+    assert!(is_line_doc_comment("///"));
+    assert!(is_line_doc_comment("/// blah"));
+    assert!(!is_line_doc_comment("////"));
 }
 
 #[test]
diff --git a/src/librustdoc/clean/utils.rs b/src/librustdoc/clean/utils.rs
index 832b2420c2389..1868310abc125 100644
--- a/src/librustdoc/clean/utils.rs
+++ b/src/librustdoc/clean/utils.rs
@@ -481,12 +481,14 @@ pub fn print_const(cx: &DocContext<'_>, n: &'tcx ty::Const<'_>) -> String {
         _ => {
             let mut s = n.to_string();
             // array lengths are obviously usize
-            if s.ends_with("_usize") {
-                let n = s.len() - "_usize".len();
-                s.truncate(n);
-                if s.ends_with(": ") {
-                    let n = s.len() - ": ".len();
-                    s.truncate(n);
+            if let Some(head) = s.strip_suffix("_usize") {
+                let new_len = match head.strip_suffix(": ") {
+                    None => head.len(),
+                    Some(hhead) => hhead.len(),
+                };
+                // SAFETY: `new_len` should be in between char boundary
+                unsafe {
+                    s.as_mut_vec().set_len(new_len);
                 }
             }
             s
diff --git a/src/librustdoc/html/markdown.rs b/src/librustdoc/html/markdown.rs
index 7a6626766d388..a4fd5d54cf3d5 100644
--- a/src/librustdoc/html/markdown.rs
+++ b/src/librustdoc/html/markdown.rs
@@ -133,16 +133,24 @@ impl<'a> Line<'a> {
 // then reallocate to remove it; which would make us return a String.
 fn map_line(s: &str) -> Line<'_> {
     let trimmed = s.trim();
-    if trimmed.starts_with("##") {
-        Line::Shown(Cow::Owned(s.replacen("##", "#", 1)))
-    } else if trimmed.starts_with("# ") {
-        // # text
-        Line::Hidden(&trimmed[2..])
-    } else if trimmed == "#" {
-        // We cannot handle '#text' because it could be #[attr].
-        Line::Hidden("")
-    } else {
-        Line::Shown(Cow::Borrowed(s))
+    match trimmed.strip_prefix("#") {
+        Some(tail) => match tail.strip_prefix("#") {
+            // `##text` rendered as `#text`.
+            Some(_) => Line::Shown(tail.into()),
+            None => {
+                if tail.is_empty() {
+                    // `#` will be hidden.
+                    Line::Hidden("")
+                } else if let Some(text) = tail.strip_prefix(' ') {
+                    // `# text` will be hidden.
+                    Line::Hidden(text)
+                } else {
+                    // `#text` will be shown as it could be `#[attr]`
+                    Line::Shown(s.into())
+                }
+            }
+        },
+        None => Line::Shown(s.into()),
     }
 }
 
@@ -754,7 +762,7 @@ impl LangString {
                 }
                 x if x.starts_with("ignore-") => {
                     if enable_per_target_ignores {
-                        ignores.push(x.trim_start_matches("ignore-").to_owned());
+                        ignores.push(x.strip_prefix("ignore-").unwrap().to_owned());
                         seen_rust_tags = !seen_other_tags;
                     }
                 }
@@ -776,10 +784,10 @@ impl LangString {
                     data.no_run = true;
                 }
                 x if x.starts_with("edition") => {
-                    data.edition = x[7..].parse::<Edition>().ok();
+                    data.edition = x.strip_prefix("edition").unwrap().parse::<Edition>().ok();
                 }
-                x if allow_error_code_check && x.starts_with('E') && x.len() == 5 => {
-                    if x[1..].parse::<u32>().is_ok() {
+                x if allow_error_code_check && x.len() == 5 && x.starts_with('E') => {
+                    if x.strip_prefix('E').unwrap().parse::<u32>().is_ok() {
                         data.error_codes.push(x.to_owned());
                         seen_rust_tags = !seen_other_tags || seen_rust_tags;
                     } else {
diff --git a/src/librustdoc/html/render.rs b/src/librustdoc/html/render.rs
index 04c4685213b2e..b4c2d924f6410 100644
--- a/src/librustdoc/html/render.rs
+++ b/src/librustdoc/html/render.rs
@@ -810,11 +810,9 @@ themePicker.onblur = handleThemeButtonsBlur;
                 if line.starts_with(&format!("\"{}\"", krate)) {
                     continue;
                 }
-                if line.ends_with(",\\") {
-                    ret.push(line[..line.len() - 2].to_string());
-                } else {
-                    // Ends with "\\" (it's the case for the last added crate line)
-                    ret.push(line[..line.len() - 1].to_string());
+                // Ends with "\\" (it's the case for the last added crate line)
+                if let Some(head) = line.strip_suffix(",\\").or_else(|| line.strip_suffix("\\")) {
+                    ret.push(head.to_string());
                 }
                 krates.push(
                     line.split('"')
diff --git a/src/librustdoc/html/sources.rs b/src/librustdoc/html/sources.rs
index f0900c34a4ba3..abd5b761f144c 100644
--- a/src/librustdoc/html/sources.rs
+++ b/src/librustdoc/html/sources.rs
@@ -83,8 +83,7 @@ impl<'a> SourceCollector<'a> {
         };
 
         // Remove the utf-8 BOM if any
-        let contents =
-            if contents.starts_with("\u{feff}") { &contents[3..] } else { &contents[..] };
+        let contents = contents.strip_prefix("\u{feff}").unwrap_or(&contents[..]);
 
         // Create the intermediate directories
         let mut cur = self.dst.clone();
diff --git a/src/librustdoc/markdown.rs b/src/librustdoc/markdown.rs
index e0753bcd70f29..89a194e2f2c40 100644
--- a/src/librustdoc/markdown.rs
+++ b/src/librustdoc/markdown.rs
@@ -18,9 +18,9 @@ fn extract_leading_metadata(s: &str) -> (Vec<&str>, &str) {
     let mut count = 0;
 
     for line in s.lines() {
-        if line.starts_with("# ") || line.starts_with('%') {
+        if let Some(tail) = line.strip_prefix("# ").or_else(|| line.strip_prefix('%')) {
             // trim the whitespace after the symbol
-            metadata.push(line[1..].trim_start());
+            metadata.push(tail.trim_start());
             count += line.len() + 1;
         } else {
             return (metadata, &s[count..]);
diff --git a/src/librustdoc/passes/collect_intra_doc_links.rs b/src/librustdoc/passes/collect_intra_doc_links.rs
index 8da74f375d9ce..a4d5e55cc23b0 100644
--- a/src/librustdoc/passes/collect_intra_doc_links.rs
+++ b/src/librustdoc/passes/collect_intra_doc_links.rs
@@ -541,38 +541,31 @@ impl<'a, 'tcx> DocFolder for LinkCollector<'a, 'tcx> {
 
             let link = ori_link.replace("`", "");
             let parts = link.split('#').collect::<Vec<_>>();
-            let (link, extra_fragment) = if parts.len() > 2 {
-                build_diagnostic(
-                    cx,
-                    &item,
-                    &link,
-                    &dox,
-                    link_range,
-                    "has an issue with the link anchor.",
-                    "only one `#` is allowed in a link",
-                    None,
-                );
-                continue;
-            } else if parts.len() == 2 {
-                if parts[0].trim().is_empty() {
-                    // This is an anchor to an element of the current page, nothing to do in here!
+            let (link, extra_fragment) = match *parts {
+                [] => unreachable!("`str::split` always returns a non-empty list"),
+                [a] => (a.to_owned(), None),
+                // This is an anchor to an element of the current page, nothing to do in here!
+                [a, _] if a.trim().is_empty() => continue,
+                [a, b] => (a.to_owned(), Some(b.to_owned())),
+                [_, _, _, ..] => {
+                    build_diagnostic(
+                        cx,
+                        &item,
+                        &link,
+                        &dox,
+                        link_range,
+                        "has an issue with the link anchor.",
+                        "only one `#` is allowed in a link",
+                        None,
+                    );
                     continue;
                 }
-                (parts[0].to_owned(), Some(parts[1].to_owned()))
-            } else {
-                (parts[0].to_owned(), None)
             };
             let resolved_self;
             let mut path_str;
             let (res, fragment) = {
-                let mut kind = None;
-                path_str = if let Some(prefix) = ["struct@", "enum@", "type@", "trait@", "union@"]
-                    .iter()
-                    .find(|p| link.starts_with(**p))
-                {
-                    kind = Some(TypeNS);
-                    link.trim_start_matches(prefix)
-                } else if let Some(prefix) = [
+                static TYPES: &[&str] = &["struct@", "enum@", "type@", "trait@", "union@"];
+                static TY_KINDS: &[&str] = &[
                     "const@",
                     "static@",
                     "value@",
@@ -581,28 +574,27 @@ impl<'a, 'tcx> DocFolder for LinkCollector<'a, 'tcx> {
                     "fn@",
                     "module@",
                     "method@",
-                ]
-                .iter()
-                .find(|p| link.starts_with(**p))
-                {
-                    kind = Some(ValueNS);
-                    link.trim_start_matches(prefix)
-                } else if link.ends_with("()") {
-                    kind = Some(ValueNS);
-                    link.trim_end_matches("()")
-                } else if link.starts_with("macro@") {
-                    kind = Some(MacroNS);
-                    link.trim_start_matches("macro@")
-                } else if link.starts_with("derive@") {
-                    kind = Some(MacroNS);
-                    link.trim_start_matches("derive@")
-                } else if link.ends_with('!') {
-                    kind = Some(MacroNS);
-                    link.trim_end_matches('!')
-                } else {
-                    &link[..]
-                }
-                .trim();
+                ];
+                let (kind, path) =
+                    if let Some(tail) = TYPES.iter().filter_map(|&p| link.strip_prefix(p)).next() {
+                        (Some(TypeNS), tail)
+                    } else if let Some(tail) =
+                        TY_KINDS.iter().filter_map(|&p| link.strip_prefix(p)).next()
+                    {
+                        (Some(ValueNS), tail)
+                    } else if let Some(head) = link.strip_suffix("()") {
+                        (Some(ValueNS), head)
+                    } else if let Some(tail) =
+                        link.strip_prefix("macro@").or_else(|| link.strip_prefix("derive@"))
+                    {
+                        (Some(MacroNS), tail)
+                    } else if let Some(head) = link.strip_suffix('!') {
+                        (Some(MacroNS), head)
+                    } else {
+                        (None, &link[..])
+                    };
+
+                path_str = path.trim();
 
                 if path_str.contains(|ch: char| !(ch.is_alphanumeric() || ch == ':' || ch == '_')) {
                     continue;
@@ -623,9 +615,9 @@ impl<'a, 'tcx> DocFolder for LinkCollector<'a, 'tcx> {
                     if item.is_mod() && item.attrs.inner_docs { None } else { parent_node };
 
                 // replace `Self` with suitable item's parent name
-                if path_str.starts_with("Self::") {
+                if let Some(item) = path_str.strip_prefix("Self::") {
                     if let Some(ref name) = parent_name {
-                        resolved_self = format!("{}::{}", name, &path_str[6..]);
+                        resolved_self = format!("{}::{}", name, item);
                         path_str = &resolved_self;
                     }
                 }
diff --git a/src/tools/clippy/clippy_lints/src/doc.rs b/src/tools/clippy/clippy_lints/src/doc.rs
index d52bb8961fae7..87dceb858449a 100644
--- a/src/tools/clippy/clippy_lints/src/doc.rs
+++ b/src/tools/clippy/clippy_lints/src/doc.rs
@@ -257,54 +257,57 @@ fn lint_for_missing_headers<'tcx>(
 /// the spans but this function is inspired from the later.
 #[allow(clippy::cast_possible_truncation)]
 #[must_use]
-pub fn strip_doc_comment_decoration(comment: &str, span: Span) -> (String, Vec<(usize, Span)>) {
+fn strip_doc_comment_decoration(comment: &str, span: Span) -> (String, Vec<(usize, Span)>) {
     // one-line comments lose their prefix
     const ONELINERS: &[&str] = &["///!", "///", "//!", "//"];
     for prefix in ONELINERS {
-        if comment.starts_with(*prefix) {
-            let doc = &comment[prefix.len()..];
+        if let Some(doc) = comment.strip_prefix(*prefix) {
             let mut doc = doc.to_owned();
             doc.push('\n');
+            let len = doc.len();
             return (
-                doc.to_owned(),
-                vec![(doc.len(), span.with_lo(span.lo() + BytePos(prefix.len() as u32)))],
+                doc,
+                vec![(len, span.with_lo(span.lo() + BytePos(prefix.len() as u32)))],
             );
         }
     }
 
-    if comment.starts_with("/*") {
-        let doc = &comment[3..comment.len() - 2];
-        let mut sizes = vec![];
-        let mut contains_initial_stars = false;
-        for line in doc.lines() {
-            let offset = line.as_ptr() as usize - comment.as_ptr() as usize;
-            debug_assert_eq!(offset as u32 as usize, offset);
-            contains_initial_stars |= line.trim_start().starts_with('*');
-            // +1 for the newline
-            sizes.push((line.len() + 1, span.with_lo(span.lo() + BytePos(offset as u32))));
-        }
-        if !contains_initial_stars {
-            return (doc.to_string(), sizes);
-        }
-        // remove the initial '*'s if any
-        let mut no_stars = String::with_capacity(doc.len());
-        for line in doc.lines() {
-            let mut chars = line.chars();
-            while let Some(c) = chars.next() {
-                if c.is_whitespace() {
-                    no_stars.push(c);
-                } else {
-                    no_stars.push(if c == '*' { ' ' } else { c });
-                    break;
-                }
+    let doc = comment.strip_prefix("/**")
+        .or_else(|| comment.strip_prefix("/*!"))
+        .and_then(|s| s.strip_suffix("*/"));
+    let doc = match doc {
+        Some(doc) => doc,
+        _ => panic!("not a doc-comment: {}", comment),
+    };
+
+    let mut sizes = vec![];
+    let mut contains_initial_stars = false;
+    for line in doc.lines() {
+        let offset = line.as_ptr() as usize - comment.as_ptr() as usize;
+        debug_assert_eq!(offset as u32 as usize, offset);
+        contains_initial_stars |= line.trim_start().starts_with('*');
+        // +1 for the newline
+        sizes.push((line.len() + 1, span.with_lo(span.lo() + BytePos(offset as u32))));
+    }
+    if !contains_initial_stars {
+        return (doc.to_owned(), sizes);
+    }
+    // remove the initial '*'s if any
+    let mut no_stars = String::with_capacity(doc.len());
+    for line in doc.lines() {
+        let mut chars = line.chars();
+        while let Some(c) = chars.next() {
+            if c.is_whitespace() {
+                no_stars.push(c);
+            } else {
+                no_stars.push(if c == '*' { ' ' } else { c });
+                break;
             }
-            no_stars.push_str(chars.as_str());
-            no_stars.push('\n');
         }
-        return (no_stars, sizes);
+        no_stars.push_str(chars.as_str());
+        no_stars.push('\n');
     }
-
-    panic!("not a doc-comment: {}", comment);
+    (no_stars, sizes)
 }
 
 #[derive(Copy, Clone)]
@@ -319,7 +322,7 @@ fn check_attrs<'a>(cx: &LateContext<'_>, valid_idents: &FxHashSet<String>, attrs
 
     for attr in attrs {
         if let AttrKind::DocComment(ref comment) = attr.kind {
-            let comment = comment.to_string();
+            let comment: &str = &*comment.as_str();
             let (comment, current_spans) = strip_doc_comment_decoration(&comment, attr.span);
             spans.extend_from_slice(&current_spans);
             doc.push_str(&comment);
@@ -480,7 +483,7 @@ fn check_word(cx: &LateContext<'_>, word: &str, span: Span) {
             return false;
         }
 
-        let s = if s.ends_with('s') { &s[..s.len() - 1] } else { s };
+        let s = s.strip_suffix('s').unwrap_or(s);
 
         s.chars().all(char::is_alphanumeric)
             && s.chars().filter(|&c| c.is_uppercase()).take(2).count() > 1