From 865c2dba30cee0d61ca0073aaf5fd32587887674 Mon Sep 17 00:00:00 2001
From: Alexis Beingessner <a.beingessner@gmail.com>
Date: Sat, 22 Nov 2014 21:34:11 -0500
Subject: [PATCH 01/40] add MoveItems to RingBuf, fixes #19085

---
 src/libcollections/ring_buf.rs | 110 ++++++++++++++++++++++++++++++---
 1 file changed, 102 insertions(+), 8 deletions(-)

diff --git a/src/libcollections/ring_buf.rs b/src/libcollections/ring_buf.rs
index 643b500ec3e2b..78e0bb19b4cd4 100644
--- a/src/libcollections/ring_buf.rs
+++ b/src/libcollections/ring_buf.rs
@@ -34,8 +34,6 @@ static MINIMUM_CAPACITY: uint = 2u;
 
 // FIXME(conventions): implement shrink_to_fit. Awkward with the current design, but it should
 // be scrapped anyway. Defer to rewrite?
-// FIXME(conventions): implement into_iter
-
 
 /// `RingBuf` is a circular buffer that implements `Deque`.
 pub struct RingBuf<T> {
@@ -394,6 +392,14 @@ impl<T> RingBuf<T> {
         }
     }
 
+    /// Consumes the list into an iterator yielding elements by value.
+    #[unstable = "matches collection reform specification, waiting for dust to settle"]
+    pub fn into_iter(self) -> MoveItems<T> {
+        MoveItems {
+            inner: self,
+        }
+    }
+
     /// Returns the number of elements in the `RingBuf`.
     ///
     /// # Example
@@ -737,11 +743,9 @@ impl<'a, T> Iterator<&'a mut T> for MutItems<'a, T> {
         }
         let tail = self.tail;
         self.tail = wrap_index(self.tail + 1, self.cap);
-        if mem::size_of::<T>() != 0 {
-            unsafe { Some(&mut *self.ptr.offset(tail as int)) }
-        } else {
-            // use a non-zero pointer
-            Some(unsafe { mem::transmute(1u) })
+
+        unsafe {
+            Some(&mut *self.ptr.offset(tail as int))
         }
     }
 
@@ -759,12 +763,43 @@ impl<'a, T> DoubleEndedIterator<&'a mut T> for MutItems<'a, T> {
             return None;
         }
         self.head = wrap_index(self.head - 1, self.cap);
-        unsafe { Some(&mut *self.ptr.offset(self.head as int)) }
+
+        unsafe {
+            Some(&mut *self.ptr.offset(self.head as int))
+        }
     }
 }
 
 impl<'a, T> ExactSize<&'a mut T> for MutItems<'a, T> {}
 
+// A by-value RingBuf iterator
+pub struct MoveItems<T> {
+    inner: RingBuf<T>,
+}
+
+impl<T> Iterator<T> for MoveItems<T> {
+    #[inline]
+    fn next(&mut self) -> Option<T> {
+        self.inner.pop_front()
+    }
+
+    #[inline]
+    fn size_hint(&self) -> (uint, Option<uint>) {
+        let len = self.inner.len();
+        (len, Some(len))
+    }
+}
+
+impl<T> DoubleEndedIterator<T> for MoveItems<T> {
+    #[inline]
+    fn next_back(&mut self) -> Option<T> {
+        self.inner.pop_back()
+    }
+}
+
+
+impl<T> ExactSize<T> for MoveItems<T> {}
+
 impl<A: PartialEq> PartialEq for RingBuf<A> {
     fn eq(&self, other: &RingBuf<A>) -> bool {
         self.len() == other.len() &&
@@ -1314,6 +1349,65 @@ mod tests {
         }
     }
 
+    #[test]
+    fn test_into_iter() {
+
+        // Empty iter
+        {
+            let d: RingBuf<int> = RingBuf::new();
+            let mut iter = d.into_iter();
+
+            assert_eq!(iter.size_hint(), (0, Some(0)));
+            assert_eq!(iter.next(), None);
+            assert_eq!(iter.size_hint(), (0, Some(0)));
+        }
+
+        // simple iter
+        {
+            let mut d = RingBuf::new();
+            for i in range(0i, 5) {
+                d.push_back(i);
+            }
+
+            let b = vec![0,1,2,3,4];
+            assert_eq!(d.into_iter().collect::<Vec<int>>(), b);
+        }
+
+        // wrapped iter
+        {
+            let mut d = RingBuf::new();
+            for i in range(0i, 5) {
+                d.push_back(i);
+            }
+            for i in range(6, 9) {
+                d.push_front(i);
+            }
+
+            let b = vec![8,7,6,0,1,2,3,4];
+            assert_eq!(d.into_iter().collect::<Vec<int>>(), b);
+        }
+
+        // partially used
+        {
+            let mut d = RingBuf::new();
+            for i in range(0i, 5) {
+                d.push_back(i);
+            }
+            for i in range(6, 9) {
+                d.push_front(i);
+            }
+
+            let mut it = d.into_iter();
+            assert_eq!(it.size_hint(), (8, Some(8)));
+            assert_eq!(it.next(), Some(8));
+            assert_eq!(it.size_hint(), (7, Some(7)));
+            assert_eq!(it.next_back(), Some(4));
+            assert_eq!(it.size_hint(), (6, Some(6)));
+            assert_eq!(it.next(), Some(7));
+            assert_eq!(it.size_hint(), (5, Some(5)));
+        }
+    }
+
     #[test]
     fn test_from_iter() {
         use std::iter;

From 59d13820c45ba86da1b45cfb6e41dadaeed2f07d Mon Sep 17 00:00:00 2001
From: Tom Jakubowski <tom@crystae.net>
Date: Mon, 24 Nov 2014 10:14:46 -0800
Subject: [PATCH 02/40] rustdoc: Render Sized? on traits and generics

Both `trait Foo for Sized?` and `<Sized? T>` are handled correctly.

Fix #18515
---
 src/librustdoc/clean/inline.rs |  3 ++-
 src/librustdoc/clean/mod.rs    | 37 ++++++++++++++++++++++++++--------
 src/librustdoc/doctree.rs      |  1 +
 src/librustdoc/html/format.rs  |  3 +++
 src/librustdoc/html/render.rs  |  6 ++++++
 src/librustdoc/visit_ast.rs    |  3 ++-
 6 files changed, 43 insertions(+), 10 deletions(-)

diff --git a/src/librustdoc/clean/inline.rs b/src/librustdoc/clean/inline.rs
index 16edccd154302..c671e8dcaf808 100644
--- a/src/librustdoc/clean/inline.rs
+++ b/src/librustdoc/clean/inline.rs
@@ -159,11 +159,12 @@ pub fn build_external_trait(cx: &DocContext, tcx: &ty::ctxt,
         }
     });
     let trait_def = ty::lookup_trait_def(tcx, did);
-    let bounds = trait_def.bounds.clean(cx);
+    let (bounds, default_unbound) = trait_def.bounds.clean(cx);
     clean::Trait {
         generics: (&def.generics, subst::TypeSpace).clean(cx),
         items: items.collect(),
         bounds: bounds,
+        default_unbound: default_unbound
     }
 }
 
diff --git a/src/librustdoc/clean/mod.rs b/src/librustdoc/clean/mod.rs
index 5985516a559f3..db23ec07a84ea 100644
--- a/src/librustdoc/clean/mod.rs
+++ b/src/librustdoc/clean/mod.rs
@@ -464,7 +464,9 @@ pub struct TyParam {
     pub name: String,
     pub did: ast::DefId,
     pub bounds: Vec<TyParamBound>,
-    pub default: Option<Type>
+    pub default: Option<Type>,
+    /// An optional default bound on the parameter which is unbound, like `Sized?`
+    pub default_unbound: Option<Type>
 }
 
 impl Clean<TyParam> for ast::TyParam {
@@ -473,7 +475,8 @@ impl Clean<TyParam> for ast::TyParam {
             name: self.ident.clean(cx),
             did: ast::DefId { krate: ast::LOCAL_CRATE, node: self.id },
             bounds: self.bounds.clean(cx),
-            default: self.default.clean(cx)
+            default: self.default.clean(cx),
+            default_unbound: self.unbound.clean(cx)
         }
     }
 }
@@ -482,11 +485,13 @@ impl<'tcx> Clean<TyParam> for ty::TypeParameterDef<'tcx> {
     fn clean(&self, cx: &DocContext) -> TyParam {
         cx.external_typarams.borrow_mut().as_mut().unwrap()
           .insert(self.def_id, self.name.clean(cx));
+        let (bounds, default_unbound) = self.bounds.clean(cx);
         TyParam {
             name: self.name.clean(cx),
             did: self.def_id,
-            bounds: self.bounds.clean(cx),
-            default: self.default.clean(cx)
+            bounds: bounds,
+            default: self.default.clean(cx),
+            default_unbound: default_unbound
         }
     }
 }
@@ -588,12 +593,16 @@ impl<'tcx> Clean<TyParamBound> for ty::TraitRef<'tcx> {
     }
 }
 
-impl<'tcx> Clean<Vec<TyParamBound>> for ty::ParamBounds<'tcx> {
-    fn clean(&self, cx: &DocContext) -> Vec<TyParamBound> {
+// Returns (bounds, default_unbound)
+impl<'tcx> Clean<(Vec<TyParamBound>, Option<Type>)> for ty::ParamBounds<'tcx> {
+    fn clean(&self, cx: &DocContext) -> (Vec<TyParamBound>, Option<Type>) {
         let mut v = Vec::new();
+        let mut has_sized_bound = false;
         for b in self.builtin_bounds.iter() {
             if b != ty::BoundSized {
                 v.push(b.clean(cx));
+            } else {
+                has_sized_bound = true;
             }
         }
         for t in self.trait_bounds.iter() {
@@ -602,7 +611,15 @@ impl<'tcx> Clean<Vec<TyParamBound>> for ty::ParamBounds<'tcx> {
         for r in self.region_bounds.iter().filter_map(|r| r.clean(cx)) {
             v.push(RegionBound(r));
         }
-        return v;
+        if has_sized_bound {
+            (v, None)
+        } else {
+            let ty = match ty::BoundSized.clean(cx) {
+                TraitBound(ty) => ty,
+                _ => unreachable!()
+            };
+            (v, Some(ty))
+        }
     }
 }
 
@@ -950,6 +967,8 @@ pub struct Trait {
     pub items: Vec<TraitMethod>,
     pub generics: Generics,
     pub bounds: Vec<TyParamBound>,
+    /// An optional default bound not required for `Self`, like `Sized?`
+    pub default_unbound: Option<Type>
 }
 
 impl Clean<Item> for doctree::Trait {
@@ -965,6 +984,7 @@ impl Clean<Item> for doctree::Trait {
                 items: self.items.clean(cx),
                 generics: self.generics.clean(cx),
                 bounds: self.bounds.clean(cx),
+                default_unbound: self.default_unbound.clean(cx)
             }),
         }
     }
@@ -2258,7 +2278,8 @@ impl Clean<Item> for ty::AssociatedType {
                     node: ast::DUMMY_NODE_ID
                 },
                 bounds: vec![],
-                default: None
+                default: None,
+                default_unbound: None
             }),
             visibility: None,
             def_id: self.def_id,
diff --git a/src/librustdoc/doctree.rs b/src/librustdoc/doctree.rs
index b78ce21eb06e3..adfd9aa821328 100644
--- a/src/librustdoc/doctree.rs
+++ b/src/librustdoc/doctree.rs
@@ -177,6 +177,7 @@ pub struct Trait {
     pub whence: Span,
     pub vis: ast::Visibility,
     pub stab: Option<attr::Stability>,
+    pub default_unbound: Option<ast::TraitRef> // FIXME(tomjakubowski)
 }
 
 pub struct Impl {
diff --git a/src/librustdoc/html/format.rs b/src/librustdoc/html/format.rs
index 43aef11ce5c2f..4d12701379002 100644
--- a/src/librustdoc/html/format.rs
+++ b/src/librustdoc/html/format.rs
@@ -94,6 +94,9 @@ impl fmt::Show for clean::Generics {
                 if i > 0 {
                     try!(f.write(", ".as_bytes()))
                 }
+                if let Some(ref unbound) = tp.default_unbound {
+                    try!(write!(f, "{}? ", unbound));
+                };
                 try!(f.write(tp.name.as_bytes()));
 
                 if tp.bounds.len() > 0 {
diff --git a/src/librustdoc/html/render.rs b/src/librustdoc/html/render.rs
index 24b5904b6d3b0..3fbb2a8749f90 100644
--- a/src/librustdoc/html/render.rs
+++ b/src/librustdoc/html/render.rs
@@ -1670,7 +1670,13 @@ fn item_function(w: &mut fmt::Formatter, it: &clean::Item,
 fn item_trait(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item,
               t: &clean::Trait) -> fmt::Result {
     let mut bounds = String::new();
+    if let Some(ref ty) = t.default_unbound {
+        bounds.push_str(format!(" for {}?", ty).as_slice());
+    }
     if t.bounds.len() > 0 {
+        if bounds.len() > 0 {
+            bounds.push(' ');
+        }
         bounds.push_str(": ");
         for (i, p) in t.bounds.iter().enumerate() {
             if i > 0 { bounds.push_str(" + "); }
diff --git a/src/librustdoc/visit_ast.rs b/src/librustdoc/visit_ast.rs
index 8f0f19fe16d0c..b5b34ef6efe6a 100644
--- a/src/librustdoc/visit_ast.rs
+++ b/src/librustdoc/visit_ast.rs
@@ -322,7 +322,7 @@ impl<'a, 'tcx> RustdocVisitor<'a, 'tcx> {
                 };
                 om.constants.push(s);
             },
-            ast::ItemTrait(ref gen, _, ref b, ref items) => {
+            ast::ItemTrait(ref gen, ref def_ub, ref b, ref items) => {
                 let t = Trait {
                     name: name,
                     items: items.clone(),
@@ -333,6 +333,7 @@ impl<'a, 'tcx> RustdocVisitor<'a, 'tcx> {
                     whence: item.span,
                     vis: item.vis,
                     stab: self.stability(item.id),
+                    default_unbound: def_ub.clone()
                 };
                 om.traits.push(t);
             },

From ba9e02f862b0540b809ea7c064eb7b4f04fd7512 Mon Sep 17 00:00:00 2001
From: Steve Klabnik <steve@steveklabnik.com>
Date: Mon, 24 Nov 2014 17:23:55 -0500
Subject: [PATCH 03/40] remove the generation of grammar from the reference

---
 configure  |  1 -
 mk/docs.mk | 20 --------------------
 2 files changed, 21 deletions(-)

diff --git a/configure b/configure
index 5ac398220592d..613f62db9e49a 100755
--- a/configure
+++ b/configure
@@ -624,7 +624,6 @@ probe CFG_LD               ld
 probe CFG_VALGRIND         valgrind
 probe CFG_PERF             perf
 probe CFG_ISCC             iscc
-probe CFG_LLNEXTGEN        LLnextgen
 probe CFG_JAVAC            javac
 probe CFG_ANTLR4           antlr4
 probe CFG_GRUN             grun
diff --git a/mk/docs.mk b/mk/docs.mk
index 48eb9e81c20a4..59b17ff16de33 100644
--- a/mk/docs.mk
+++ b/mk/docs.mk
@@ -246,26 +246,6 @@ endef
 $(foreach lang,$(L10N_LANGS),$(eval $(call DEF_L10N_DOC,$(lang),guide)))
 
 
-######################################################################
-# LLnextgen (grammar analysis from refman)
-######################################################################
-
-ifeq ($(CFG_LLNEXTGEN),)
-  $(info cfg: no llnextgen found, omitting grammar-verification)
-else
-.PHONY: verify-grammar
-
-doc/rust.g: $(D)/rust.md $(S)src/etc/extract_grammar.py
-	@$(call E, extract_grammar: $@)
-	$(Q)$(CFG_PYTHON) $(S)src/etc/extract_grammar.py $< >$@
-
-verify-grammar: doc/rust.g
-	@$(call E, LLnextgen: $<)
-	$(Q)$(CFG_LLNEXTGEN) --generate-lexer-wrapper=no $< >$@
-	$(Q)rm -f doc/rust.c doc/rust.h
-endif
-
-
 ######################################################################
 # Rustdoc (libstd/extra)
 ######################################################################

From 3b9dfd6af04ca008a4c2ef13b7fd2e8433dc473f Mon Sep 17 00:00:00 2001
From: Ben S <ogham@bsago.me>
Date: Mon, 24 Nov 2014 19:04:54 +0000
Subject: [PATCH 04/40] Clean up FileType enum following enum namespacing

All of the enum components had a redundant 'Type' specifier: TypeSymlink, TypeDirectory, TypeFile. This change removes them, replacing them with a namespace: FileType::Symlink, FileType::Directory, and FileType::RegularFile.

RegularFile is used instead of just File, as File by itself could be mistakenly thought of as referring to the struct.

[breaking-change]
---
 src/librustc_back/fs.rs          |  2 +-
 src/libstd/io/fs.rs              | 30 +++++++++++++++---------------
 src/libstd/io/mod.rs             | 13 ++++++-------
 src/libstd/sys/unix/fs.rs        | 12 ++++++------
 src/libstd/sys/windows/fs.rs     | 12 ++++++------
 src/test/run-pass/issue-18619.rs |  2 +-
 6 files changed, 35 insertions(+), 36 deletions(-)

diff --git a/src/librustc_back/fs.rs b/src/librustc_back/fs.rs
index d9bf804403955..d7deb09985f78 100644
--- a/src/librustc_back/fs.rs
+++ b/src/librustc_back/fs.rs
@@ -37,7 +37,7 @@ pub fn realpath(original: &Path) -> io::IoResult<Path> {
 
             match fs::lstat(&result) {
                 Err(..) => break,
-                Ok(ref stat) if stat.kind != io::TypeSymlink => break,
+                Ok(ref stat) if stat.kind != io::FileType::Symlink => break,
                 Ok(..) => {
                     followed += 1;
                     let path = try!(fs::readlink(&result));
diff --git a/src/libstd/io/fs.rs b/src/libstd/io/fs.rs
index cd4141e045cb5..fd411099fbc98 100644
--- a/src/libstd/io/fs.rs
+++ b/src/libstd/io/fs.rs
@@ -54,7 +54,7 @@ fs::unlink(&path);
 
 use clone::Clone;
 use io::standard_error;
-use io::{FilePermission, Write, Open, FileAccess, FileMode};
+use io::{FilePermission, Write, Open, FileAccess, FileMode, FileType};
 use io::{IoResult, IoError, FileStat, SeekStyle, Seek, Writer, Reader};
 use io::{Read, Truncate, ReadWrite, Append};
 use io::UpdateIoError;
@@ -592,7 +592,7 @@ pub fn mkdir_recursive(path: &Path, mode: FilePermission) -> IoResult<()> {
         match result {
             Err(mkdir_err) => {
                 // already exists ?
-                if try!(stat(&curpath)).kind != io::TypeDirectory {
+                if try!(stat(&curpath)).kind != FileType::Directory {
                     return Err(mkdir_err);
                 }
             }
@@ -638,7 +638,7 @@ pub fn rmdir_recursive(path: &Path) -> IoResult<()> {
                 false => try!(update_err(lstat(&child), path))
             };
 
-            if child_type.kind == io::TypeDirectory {
+            if child_type.kind == FileType::Directory {
                 rm_stack.push(child);
                 has_child_dir = true;
             } else {
@@ -772,13 +772,13 @@ impl PathExtensions for path::Path {
     }
     fn is_file(&self) -> bool {
         match self.stat() {
-            Ok(s) => s.kind == io::TypeFile,
+            Ok(s) => s.kind == FileType::RegularFile,
             Err(..) => false
         }
     }
     fn is_dir(&self) -> bool {
         match self.stat() {
-            Ok(s) => s.kind == io::TypeDirectory,
+            Ok(s) => s.kind == FileType::Directory,
             Err(..) => false
         }
     }
@@ -806,7 +806,7 @@ fn access_string(access: FileAccess) -> &'static str {
 #[allow(unused_mut)]
 mod test {
     use prelude::*;
-    use io::{SeekSet, SeekCur, SeekEnd, Read, Open, ReadWrite};
+    use io::{SeekSet, SeekCur, SeekEnd, Read, Open, ReadWrite, FileType};
     use io;
     use str;
     use io::fs::*;
@@ -1028,12 +1028,12 @@ mod test {
             fs.write(msg.as_bytes()).unwrap();
 
             let fstat_res = check!(fs.stat());
-            assert_eq!(fstat_res.kind, io::TypeFile);
+            assert_eq!(fstat_res.kind, FileType::RegularFile);
         }
         let stat_res_fn = check!(stat(filename));
-        assert_eq!(stat_res_fn.kind, io::TypeFile);
+        assert_eq!(stat_res_fn.kind, FileType::RegularFile);
         let stat_res_meth = check!(filename.stat());
-        assert_eq!(stat_res_meth.kind, io::TypeFile);
+        assert_eq!(stat_res_meth.kind, FileType::RegularFile);
         check!(unlink(filename));
     }
 
@@ -1043,9 +1043,9 @@ mod test {
         let filename = &tmpdir.join("file_stat_correct_on_is_dir");
         check!(mkdir(filename, io::USER_RWX));
         let stat_res_fn = check!(stat(filename));
-        assert!(stat_res_fn.kind == io::TypeDirectory);
+        assert!(stat_res_fn.kind == FileType::Directory);
         let stat_res_meth = check!(filename.stat());
-        assert!(stat_res_meth.kind == io::TypeDirectory);
+        assert!(stat_res_meth.kind == FileType::Directory);
         check!(rmdir(filename));
     }
 
@@ -1315,8 +1315,8 @@ mod test {
         check!(File::create(&input).write("foobar".as_bytes()));
         check!(symlink(&input, &out));
         if cfg!(not(windows)) {
-            assert_eq!(check!(lstat(&out)).kind, io::TypeSymlink);
-            assert_eq!(check!(out.lstat()).kind, io::TypeSymlink);
+            assert_eq!(check!(lstat(&out)).kind, FileType::Symlink);
+            assert_eq!(check!(out.lstat()).kind, FileType::Symlink);
         }
         assert_eq!(check!(stat(&out)).size, check!(stat(&input)).size);
         assert_eq!(check!(File::open(&out).read_to_end()),
@@ -1350,8 +1350,8 @@ mod test {
         check!(File::create(&input).write("foobar".as_bytes()));
         check!(link(&input, &out));
         if cfg!(not(windows)) {
-            assert_eq!(check!(lstat(&out)).kind, io::TypeFile);
-            assert_eq!(check!(out.lstat()).kind, io::TypeFile);
+            assert_eq!(check!(lstat(&out)).kind, FileType::RegularFile);
+            assert_eq!(check!(out.lstat()).kind, FileType::RegularFile);
             assert_eq!(check!(stat(&out)).unstable.nlink, 2);
             assert_eq!(check!(out.stat()).unstable.nlink, 2);
         }
diff --git a/src/libstd/io/mod.rs b/src/libstd/io/mod.rs
index 681400e9db581..85d9bc484994d 100644
--- a/src/libstd/io/mod.rs
+++ b/src/libstd/io/mod.rs
@@ -224,7 +224,6 @@ responding to errors that may occur while attempting to read the numbers.
 pub use self::SeekStyle::*;
 pub use self::FileMode::*;
 pub use self::FileAccess::*;
-pub use self::FileType::*;
 pub use self::IoErrorKind::*;
 
 use char::Char;
@@ -1698,22 +1697,22 @@ pub enum FileAccess {
 #[deriving(PartialEq, Show, Hash, Clone)]
 pub enum FileType {
     /// This is a normal file, corresponding to `S_IFREG`
-    TypeFile,
+    RegularFile,
 
     /// This file is a directory, corresponding to `S_IFDIR`
-    TypeDirectory,
+    Directory,
 
     /// This file is a named pipe, corresponding to `S_IFIFO`
-    TypeNamedPipe,
+    NamedPipe,
 
     /// This file is a block device, corresponding to `S_IFBLK`
-    TypeBlockSpecial,
+    BlockSpecial,
 
     /// This file is a symbolic link to another file, corresponding to `S_IFLNK`
-    TypeSymlink,
+    Symlink,
 
     /// The type of this file is not recognized as one of the other categories
-    TypeUnknown,
+    Unknown,
 }
 
 /// A structure used to describe metadata information about a file. This
diff --git a/src/libstd/sys/unix/fs.rs b/src/libstd/sys/unix/fs.rs
index 816876b5e4ad9..4b47b768d600c 100644
--- a/src/libstd/sys/unix/fs.rs
+++ b/src/libstd/sys/unix/fs.rs
@@ -305,12 +305,12 @@ fn mkstat(stat: &libc::stat) -> FileStat {
     FileStat {
         size: stat.st_size as u64,
         kind: match (stat.st_mode as libc::mode_t) & libc::S_IFMT {
-            libc::S_IFREG => io::TypeFile,
-            libc::S_IFDIR => io::TypeDirectory,
-            libc::S_IFIFO => io::TypeNamedPipe,
-            libc::S_IFBLK => io::TypeBlockSpecial,
-            libc::S_IFLNK => io::TypeSymlink,
-            _ => io::TypeUnknown,
+            libc::S_IFREG => io::FileType::RegularFile,
+            libc::S_IFDIR => io::FileType::Directory,
+            libc::S_IFIFO => io::FileType::NamedPipe,
+            libc::S_IFBLK => io::FileType::BlockSpecial,
+            libc::S_IFLNK => io::FileType::Symlink,
+            _ => io::FileType::Unknown,
         },
         perm: FilePermission::from_bits_truncate(stat.st_mode as u32),
         created: mktime(stat.st_ctime as u64, stat.st_ctime_nsec as u64),
diff --git a/src/libstd/sys/windows/fs.rs b/src/libstd/sys/windows/fs.rs
index b881eb2d4955c..9c4ffb926b5ae 100644
--- a/src/libstd/sys/windows/fs.rs
+++ b/src/libstd/sys/windows/fs.rs
@@ -407,12 +407,12 @@ fn mkstat(stat: &libc::stat) -> FileStat {
     FileStat {
         size: stat.st_size as u64,
         kind: match (stat.st_mode as libc::c_int) & libc::S_IFMT {
-            libc::S_IFREG => io::TypeFile,
-            libc::S_IFDIR => io::TypeDirectory,
-            libc::S_IFIFO => io::TypeNamedPipe,
-            libc::S_IFBLK => io::TypeBlockSpecial,
-            libc::S_IFLNK => io::TypeSymlink,
-            _ => io::TypeUnknown,
+            libc::S_IFREG => io::FileType::RegularFile,
+            libc::S_IFDIR => io::FileType::Directory,
+            libc::S_IFIFO => io::FileType::NamedPipe,
+            libc::S_IFBLK => io::FileType::BlockSpecial,
+            libc::S_IFLNK => io::FileType::Symlink,
+            _ => io::FileType::Unknown,
         },
         perm: FilePermission::from_bits_truncate(stat.st_mode as u32),
         created: stat.st_ctime as u64,
diff --git a/src/test/run-pass/issue-18619.rs b/src/test/run-pass/issue-18619.rs
index 70ccc20e01a16..a885513611d3b 100644
--- a/src/test/run-pass/issue-18619.rs
+++ b/src/test/run-pass/issue-18619.rs
@@ -11,5 +11,5 @@
 use std::io::FileType;
 
 pub fn main() {
-    let _ = FileType::TypeFile.clone();
+    let _ = FileType::RegularFile.clone();
 }

From 36372b929e9d44c7421827b160505854ceeb9a83 Mon Sep 17 00:00:00 2001
From: Alex Crichton <alex@alexcrichton.com>
Date: Mon, 24 Nov 2014 15:54:14 -0800
Subject: [PATCH 05/40] std: Export BinarySearchResult

At the same time remove the `pub use` of the variants in favor of accessing
through the enum type itself. This is a breaking change as the `Found` and
`NotFound` variants must now be imported through `BinarySearchResult` instead of
just `std::slice`.

[breaking-change]
Closes #19272
---
 src/libcollections/slice.rs |  2 +-
 src/libcore/slice.rs        | 18 ++++++++----------
 src/libregex/parse.rs       |  6 +++---
 3 files changed, 12 insertions(+), 14 deletions(-)

diff --git a/src/libcollections/slice.rs b/src/libcollections/slice.rs
index ab5ac5bf9e146..63f6e1ce470cf 100644
--- a/src/libcollections/slice.rs
+++ b/src/libcollections/slice.rs
@@ -106,7 +106,7 @@ pub use core::slice::{OrdSlicePrelude, SlicePrelude, Items, MutItems};
 pub use core::slice::{ImmutableIntSlice, MutableIntSlice};
 pub use core::slice::{MutSplits, MutChunks, Splits};
 pub use core::slice::{bytes, mut_ref_slice, ref_slice, CloneSlicePrelude};
-pub use core::slice::{Found, NotFound, from_raw_buf, from_raw_mut_buf};
+pub use core::slice::{from_raw_buf, from_raw_mut_buf, BinarySearchResult};
 
 // Functional utilities
 
diff --git a/src/libcore/slice.rs b/src/libcore/slice.rs
index 754da272c2481..40d56f230dce7 100644
--- a/src/libcore/slice.rs
+++ b/src/libcore/slice.rs
@@ -34,8 +34,6 @@
 // * The `raw` and `bytes` submodules.
 // * Boilerplate trait implementations.
 
-pub use self::BinarySearchResult::*;
-
 use mem::transmute;
 use clone::Clone;
 use cmp::{PartialEq, PartialOrd, Eq, Ord, Ordering, Less, Equal, Greater, Equiv};
@@ -219,7 +217,7 @@ pub trait SlicePrelude<T> for Sized? {
     /// found; the fourth could match any position in `[1,4]`.
     ///
     /// ```rust
-    /// use std::slice::{Found, NotFound};
+    /// use std::slice::BinarySearchResult::{Found, NotFound};
     /// let s = [0i, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55];
     /// let s = s.as_slice();
     ///
@@ -548,7 +546,7 @@ impl<T> SlicePrelude<T> for [T] {
         while lim != 0 {
             let ix = base + (lim >> 1);
             match f(&self[ix]) {
-                Equal => return Found(ix),
+                Equal => return BinarySearchResult::Found(ix),
                 Less => {
                     base = ix + 1;
                     lim -= 1;
@@ -557,7 +555,7 @@ impl<T> SlicePrelude<T> for [T] {
             }
             lim >>= 1;
         }
-        return NotFound(base);
+        return BinarySearchResult::NotFound(base);
     }
 
     #[inline]
@@ -838,7 +836,7 @@ pub trait OrdSlicePrelude<T: Ord> for Sized? {
     /// found; the fourth could match any position in `[1,4]`.
     ///
     /// ```rust
-    /// use std::slice::{Found, NotFound};
+    /// use std::slice::BinarySearchResult::{Found, NotFound};
     /// let s = [0i, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55];
     /// let s = s.as_slice();
     ///
@@ -1517,8 +1515,8 @@ impl BinarySearchResult {
     /// Similar to `Result::ok`.
     pub fn found(&self) -> Option<uint> {
         match *self {
-            Found(i) => Some(i),
-            NotFound(_) => None
+            BinarySearchResult::Found(i) => Some(i),
+            BinarySearchResult::NotFound(_) => None
         }
     }
 
@@ -1526,8 +1524,8 @@ impl BinarySearchResult {
     /// Similar to `Result::err`.
     pub fn not_found(&self) -> Option<uint> {
         match *self {
-            Found(_) => None,
-            NotFound(i) => Some(i)
+            BinarySearchResult::Found(_) => None,
+            BinarySearchResult::NotFound(i) => Some(i)
         }
     }
 }
diff --git a/src/libregex/parse.rs b/src/libregex/parse.rs
index c6f09e4697182..2bf3fa992cd66 100644
--- a/src/libregex/parse.rs
+++ b/src/libregex/parse.rs
@@ -18,7 +18,7 @@ use std::cmp;
 use std::fmt;
 use std::iter;
 use std::num;
-use std::slice;
+use std::slice::BinarySearchResult;
 
 /// Static data containing Unicode ranges for general categories and scripts.
 use unicode::regex::{UNICODE_CLASSES, PERLD, PERLS, PERLW};
@@ -1027,8 +1027,8 @@ fn is_valid_cap(c: char) -> bool {
 
 fn find_class(classes: NamedClasses, name: &str) -> Option<Vec<(char, char)>> {
     match classes.binary_search(|&(s, _)| s.cmp(name)) {
-        slice::Found(i) => Some(classes[i].val1().to_vec()),
-        slice::NotFound(_) => None,
+        BinarySearchResult::Found(i) => Some(classes[i].val1().to_vec()),
+        BinarySearchResult::NotFound(_) => None,
     }
 }
 

From 2a6f197bf4358f6ed9211777e59553128caa459b Mon Sep 17 00:00:00 2001
From: Chase Southwood <chase.southwood@gmail.com>
Date: Tue, 25 Nov 2014 02:15:28 -0600
Subject: [PATCH 06/40] Implement union, intersection, and difference functions
 for TrieSet.

---
 src/libcollections/trie/set.rs | 269 ++++++++++++++++++++++++++++++++-
 1 file changed, 268 insertions(+), 1 deletion(-)

diff --git a/src/libcollections/trie/set.rs b/src/libcollections/trie/set.rs
index f40c0db5edf99..dd884b6ee41d8 100644
--- a/src/libcollections/trie/set.rs
+++ b/src/libcollections/trie/set.rs
@@ -9,7 +9,6 @@
 // except according to those terms.
 
 // FIXME(conventions): implement bounded iterators
-// FIXME(conventions): implement union family of fns
 // FIXME(conventions): implement BitOr, BitAnd, BitXor, and Sub
 // FIXME(conventions): replace each_reverse by making iter DoubleEnded
 // FIXME(conventions): implement iter_mut and into_iter
@@ -19,6 +18,7 @@ use core::prelude::*;
 use core::default::Default;
 use core::fmt;
 use core::fmt::Show;
+use core::iter::Peekable;
 use std::hash::Hash;
 
 use trie_map::{TrieMap, Entries};
@@ -172,6 +172,106 @@ impl TrieSet {
         SetItems{iter: self.map.upper_bound(val)}
     }
 
+    /// Visits the values representing the difference, in ascending order.
+    ///
+    /// # Example
+    ///
+    /// ```
+    /// use std::collections::TrieSet;
+    ///
+    /// let a: TrieSet = [1, 2, 3].iter().map(|&x| x).collect();
+    /// let b: TrieSet = [3, 4, 5].iter().map(|&x| x).collect();
+    ///
+    /// // Can be seen as `a - b`.
+    /// for x in a.difference(&b) {
+    ///     println!("{}", x); // Print 1 then 2
+    /// }
+    ///
+    /// let diff1: TrieSet = a.difference(&b).collect();
+    /// assert_eq!(diff1, [1, 2].iter().map(|&x| x).collect());
+    ///
+    /// // Note that difference is not symmetric,
+    /// // and `b - a` means something else:
+    /// let diff2: TrieSet = b.difference(&a).collect();
+    /// assert_eq!(diff2, [4, 5].iter().map(|&x| x).collect());
+    /// ```
+    #[unstable = "matches collection reform specification, waiting for dust to settle"]
+    pub fn difference<'a>(&'a self, other: &'a TrieSet) -> DifferenceItems<'a> {
+        DifferenceItems{a: self.iter().peekable(), b: other.iter().peekable()}
+    }
+
+    /// Visits the values representing the symmetric difference, in ascending order.
+    ///
+    /// # Example
+    ///
+    /// ```
+    /// use std::collections::TrieSet;
+    ///
+    /// let a: TrieSet = [1, 2, 3].iter().map(|&x| x).collect();
+    /// let b: TrieSet = [3, 4, 5].iter().map(|&x| x).collect();
+    ///
+    /// // Print 1, 2, 4, 5 in ascending order.
+    /// for x in a.symmetric_difference(&b) {
+    ///     println!("{}", x);
+    /// }
+    ///
+    /// let diff1: TrieSet = a.symmetric_difference(&b).collect();
+    /// let diff2: TrieSet = b.symmetric_difference(&a).collect();
+    ///
+    /// assert_eq!(diff1, diff2);
+    /// assert_eq!(diff1, [1, 2, 4, 5].iter().map(|&x| x).collect());
+    /// ```
+    #[unstable = "matches collection reform specification, waiting for dust to settle."]
+    pub fn symmetric_difference<'a>(&'a self, other: &'a TrieSet) -> SymDifferenceItems<'a> {
+        SymDifferenceItems{a: self.iter().peekable(), b: other.iter().peekable()}
+    }
+
+    /// Visits the values representing the intersection, in ascending order.
+    ///
+    /// # Example
+    ///
+    /// ```
+    /// use std::collections::TrieSet;
+    ///
+    /// let a: TrieSet = [1, 2, 3].iter().map(|&x| x).collect();
+    /// let b: TrieSet = [2, 3, 4].iter().map(|&x| x).collect();
+    ///
+    /// // Print 2, 3 in ascending order.
+    /// for x in a.intersection(&b) {
+    ///     println!("{}", x);
+    /// }
+    ///
+    /// let diff: TrieSet = a.intersection(&b).collect();
+    /// assert_eq!(diff, [2, 3].iter().map(|&x| x).collect());
+    /// ```
+    #[unstable = "matches collection reform specification, waiting for dust to settle"]
+    pub fn intersection<'a>(&'a self, other: &'a TrieSet) -> IntersectionItems<'a> {
+        IntersectionItems{a: self.iter().peekable(), b: other.iter().peekable()}
+    }
+
+    /// Visits the values representing the union, in ascending order.
+    ///
+    /// # Example
+    ///
+    /// ```
+    /// use std::collections::TrieSet;
+    ///
+    /// let a: TrieSet = [1, 2, 3].iter().map(|&x| x).collect();
+    /// let b: TrieSet = [3, 4, 5].iter().map(|&x| x).collect();
+    ///
+    /// // Print 1, 2, 3, 4, 5 in ascending order.
+    /// for x in a.union(&b) {
+    ///     println!("{}", x);
+    /// }
+    ///
+    /// let diff: TrieSet = a.union(&b).collect();
+    /// assert_eq!(diff, [1, 2, 3, 4, 5].iter().map(|&x| x).collect());
+    /// ```
+    #[unstable = "matches collection reform specification, waiting for dust to settle"]
+    pub fn union<'a>(&'a self, other: &'a TrieSet) -> UnionItems<'a> {
+        UnionItems{a: self.iter().peekable(), b: other.iter().peekable()}
+    }
+
     /// Return the number of elements in the set
     ///
     /// # Example
@@ -368,6 +468,39 @@ pub struct SetItems<'a> {
     iter: Entries<'a, ()>
 }
 
+/// An iterator producing elements in the set difference (in-order).
+pub struct DifferenceItems<'a> {
+    a: Peekable<uint, SetItems<'a>>,
+    b: Peekable<uint, SetItems<'a>>,
+}
+
+/// An iterator producing elements in the set symmetric difference (in-order).
+pub struct SymDifferenceItems<'a> {
+    a: Peekable<uint, SetItems<'a>>,
+    b: Peekable<uint, SetItems<'a>>,
+}
+
+/// An iterator producing elements in the set intersection (in-order).
+pub struct IntersectionItems<'a> {
+    a: Peekable<uint, SetItems<'a>>,
+    b: Peekable<uint, SetItems<'a>>,
+}
+
+/// An iterator producing elements in the set union (in-order).
+pub struct UnionItems<'a> {
+    a: Peekable<uint, SetItems<'a>>,
+    b: Peekable<uint, SetItems<'a>>,
+}
+
+/// Compare `x` and `y`, but return `short` if x is None and `long` if y is None
+fn cmp_opt(x: Option<&uint>, y: Option<&uint>, short: Ordering, long: Ordering) -> Ordering {
+    match (x, y) {
+        (None    , _       ) => short,
+        (_       , None    ) => long,
+        (Some(x1), Some(y1)) => x1.cmp(y1),
+    }
+}
+
 impl<'a> Iterator<uint> for SetItems<'a> {
     fn next(&mut self) -> Option<uint> {
         self.iter.next().map(|(key, _)| key)
@@ -378,6 +511,60 @@ impl<'a> Iterator<uint> for SetItems<'a> {
     }
 }
 
+impl<'a> Iterator<uint> for DifferenceItems<'a> {
+    fn next(&mut self) -> Option<uint> {
+        loop {
+            match cmp_opt(self.a.peek(), self.b.peek(), Less, Less) {
+                Less    => return self.a.next(),
+                Equal   => { self.a.next(); self.b.next(); }
+                Greater => { self.b.next(); }
+            }
+        }
+    }
+}
+
+impl<'a> Iterator<uint> for SymDifferenceItems<'a> {
+    fn next(&mut self) -> Option<uint> {
+        loop {
+            match cmp_opt(self.a.peek(), self.b.peek(), Greater, Less) {
+                Less => return self.a.next(),
+                Equal => { self.a.next(); self.b.next(); }
+                Greater => return self.b.next(),
+            }
+        }
+    }
+}
+
+impl<'a> Iterator<uint> for IntersectionItems<'a> {
+    fn next(&mut self) -> Option<uint> {
+        loop {
+            let o_cmp = match (self.a.peek(), self.b.peek()) {
+                (None    , _       ) => None,
+                (_       , None    ) => None,
+                (Some(a1), Some(b1)) => Some(a1.cmp(b1)),
+            };
+            match o_cmp {
+                None          => return None,
+                Some(Less)    => { self.a.next(); }
+                Some(Equal)   => { self.b.next(); return self.a.next() }
+                Some(Greater) => { self.b.next(); }
+            }
+        }
+    }
+}
+
+impl<'a> Iterator<uint> for UnionItems<'a> {
+    fn next(&mut self) -> Option<uint> {
+        loop {
+            match cmp_opt(self.a.peek(), self.b.peek(), Greater, Less) {
+                Less    => return self.a.next(),
+                Equal   => { self.b.next(); return self.a.next() }
+                Greater => return self.b.next(),
+            }
+        }
+    }
+}
+
 #[cfg(test)]
 mod test {
     use std::prelude::*;
@@ -471,4 +658,84 @@ mod test {
         assert!(b > a && b >= a);
         assert!(a < b && a <= b);
     }
+
+    fn check(a: &[uint],
+             b: &[uint],
+             expected: &[uint],
+             f: |&TrieSet, &TrieSet, f: |uint| -> bool| -> bool) {
+        let mut set_a = TrieSet::new();
+        let mut set_b = TrieSet::new();
+
+        for x in a.iter() { assert!(set_a.insert(*x)) }
+        for y in b.iter() { assert!(set_b.insert(*y)) }
+
+        let mut i = 0;
+        f(&set_a, &set_b, |x| {
+            assert_eq!(x, expected[i]);
+            i += 1;
+            true
+        });
+        assert_eq!(i, expected.len());
+    }
+
+    #[test]
+    fn test_intersection() {
+        fn check_intersection(a: &[uint], b: &[uint], expected: &[uint]) {
+            check(a, b, expected, |x, y, f| x.intersection(y).all(f))
+        }
+
+        check_intersection(&[], &[], &[]);
+        check_intersection(&[1, 2, 3], &[], &[]);
+        check_intersection(&[], &[1, 2, 3], &[]);
+        check_intersection(&[2], &[1, 2, 3], &[2]);
+        check_intersection(&[1, 2, 3], &[2], &[2]);
+        check_intersection(&[11, 1, 3, 77, 103, 5],
+                           &[2, 11, 77, 5, 3],
+                           &[3, 5, 11, 77]);
+    }
+
+    #[test]
+    fn test_difference() {
+        fn check_difference(a: &[uint], b: &[uint], expected: &[uint]) {
+            check(a, b, expected, |x, y, f| x.difference(y).all(f))
+        }
+
+        check_difference(&[], &[], &[]);
+        check_difference(&[1, 12], &[], &[1, 12]);
+        check_difference(&[], &[1, 2, 3, 9], &[]);
+        check_difference(&[1, 3, 5, 9, 11],
+                         &[3, 9],
+                         &[1, 5, 11]);
+        check_difference(&[11, 22, 33, 40, 42],
+                         &[14, 23, 34, 38, 39, 50],
+                         &[11, 22, 33, 40, 42]);
+    }
+
+    #[test]
+    fn test_symmetric_difference() {
+        fn check_symmetric_difference(a: &[uint], b: &[uint], expected: &[uint]) {
+            check(a, b, expected, |x, y, f| x.symmetric_difference(y).all(f))
+        }
+
+        check_symmetric_difference(&[], &[], &[]);
+        check_symmetric_difference(&[1, 2, 3], &[2], &[1, 3]);
+        check_symmetric_difference(&[2], &[1, 2, 3], &[1, 3]);
+        check_symmetric_difference(&[1, 3, 5, 9, 11],
+                                   &[3, 9, 14, 22],
+                                   &[1, 5, 11, 14, 22]);
+    }
+
+    #[test]
+    fn test_union() {
+        fn check_union(a: &[uint], b: &[uint], expected: &[uint]) {
+            check(a, b, expected, |x, y, f| x.union(y).all(f))
+        }
+
+        check_union(&[], &[], &[]);
+        check_union(&[1, 2, 3], &[2], &[1, 2, 3]);
+        check_union(&[2], &[1, 2, 3], &[1, 2, 3]);
+        check_union(&[1, 3, 5, 9, 11, 16, 19, 24],
+                    &[1, 5, 9, 13, 19],
+                    &[1, 3, 5, 9, 11, 13, 16, 19, 24]);
+    }
 }

From fbde11297fc06efa8e1e63527467ef7e65d072fa Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Adolfo=20Ochagav=C3=ADa?= <aochagavia92@gmail.com>
Date: Mon, 24 Nov 2014 12:46:02 +0100
Subject: [PATCH 07/40] Allow constant struct fields and tuple indexing

---
 src/librustc/middle/const_eval.rs | 28 ++++++++++++++++++++++++++++
 1 file changed, 28 insertions(+)

diff --git a/src/librustc/middle/const_eval.rs b/src/librustc/middle/const_eval.rs
index 98ac7e413ca7c..7afbf92235147 100644
--- a/src/librustc/middle/const_eval.rs
+++ b/src/librustc/middle/const_eval.rs
@@ -567,6 +567,34 @@ pub fn eval_const_expr_partial(tcx: &ty::ctxt, e: &Expr) -> Result<const_val, St
             None => Ok(const_int(0i64))
         }
       }
+      ast::ExprTupField(ref base, index) => {
+        // Get the base tuple if it is constant
+        if let Some(&ast::ExprTup(ref fields)) = lookup_const(tcx, &**base).map(|s| &s.node) {
+            // Check that the given index is within bounds and evaluate its value
+            if fields.len() > index.node {
+                return eval_const_expr_partial(tcx, &*fields[index.node])
+            } else {
+                return Err("tuple index out of bounds".to_string())
+            }
+        }
+
+        Err("non-constant struct in constant expr".to_string())
+      }
+      ast::ExprField(ref base, field_name) => {
+        // Get the base expression if it is a struct and it is constant
+        if let Some(&ast::ExprStruct(_, ref fields, _)) = lookup_const(tcx, &**base)
+                                                            .map(|s| &s.node) {
+            // Check that the given field exists and evaluate it
+            if let Some(f) = fields.iter().find(|f|
+                                           f.ident.node.as_str() == field_name.node.as_str()) {
+                return eval_const_expr_partial(tcx, &*f.expr)
+            } else {
+                return Err("nonexistent struct field".to_string())
+            }
+        }
+
+        Err("non-constant struct in constant expr".to_string())
+      }
       _ => Err("unsupported constant expr".to_string())
     }
 }

From 080e625dae129aad7db4e69d74fa0f767f700325 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Adolfo=20Ochagav=C3=ADa?= <aochagavia92@gmail.com>
Date: Tue, 25 Nov 2014 10:19:58 +0100
Subject: [PATCH 08/40] Add tests for issue 19244

---
 src/test/compile-fail/issue-19244-1.rs | 18 ++++++++++++++++++
 src/test/compile-fail/issue-19244-2.rs | 17 +++++++++++++++++
 src/test/run-pass/issue-19244.rs       | 23 +++++++++++++++++++++++
 3 files changed, 58 insertions(+)
 create mode 100644 src/test/compile-fail/issue-19244-1.rs
 create mode 100644 src/test/compile-fail/issue-19244-2.rs
 create mode 100644 src/test/run-pass/issue-19244.rs

diff --git a/src/test/compile-fail/issue-19244-1.rs b/src/test/compile-fail/issue-19244-1.rs
new file mode 100644
index 0000000000000..4fcbb87889054
--- /dev/null
+++ b/src/test/compile-fail/issue-19244-1.rs
@@ -0,0 +1,18 @@
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(tuple_indexing)]
+
+const TUP: (uint,) = (42,);
+
+fn main() {
+    let a: [int, ..TUP.1];
+    //~^ ERROR expected constant expr for array length: tuple index out of bounds
+}
diff --git a/src/test/compile-fail/issue-19244-2.rs b/src/test/compile-fail/issue-19244-2.rs
new file mode 100644
index 0000000000000..d9aeecc02222c
--- /dev/null
+++ b/src/test/compile-fail/issue-19244-2.rs
@@ -0,0 +1,17 @@
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+struct MyStruct { field: uint }
+const STRUCT: MyStruct = MyStruct { field: 42 };
+
+fn main() {
+    let a: [int, ..STRUCT.nonexistent_field];
+    //~^ ERROR expected constant expr for array length: nonexistent struct field
+}
diff --git a/src/test/run-pass/issue-19244.rs b/src/test/run-pass/issue-19244.rs
new file mode 100644
index 0000000000000..fecddea13e0f8
--- /dev/null
+++ b/src/test/run-pass/issue-19244.rs
@@ -0,0 +1,23 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(tuple_indexing)]
+
+struct MyStruct { field: uint }
+const STRUCT: MyStruct = MyStruct { field: 42 };
+const TUP: (uint,) = (43,);
+
+fn main() {
+    let a = [0i, ..STRUCT.field];
+    let b = [0i, ..TUP.0];
+
+    assert!(a.len() == 42);
+    assert!(b.len() == 43);
+}

From 4671f7f07ca13cfc134af07e0d4556631be53104 Mon Sep 17 00:00:00 2001
From: Niko Matsakis <niko@alum.mit.edu>
Date: Tue, 25 Nov 2014 06:13:45 -0500
Subject: [PATCH 09/40] Stop indenting error messages. It throws off M-x
 next-error in emacs and seems to serve little purpose.

---
 src/libtest/lib.rs | 4 +---
 1 file changed, 1 insertion(+), 3 deletions(-)

diff --git a/src/libtest/lib.rs b/src/libtest/lib.rs
index 0ea8ca84ef8bd..6b3e7caa9769f 100644
--- a/src/libtest/lib.rs
+++ b/src/libtest/lib.rs
@@ -643,9 +643,7 @@ impl<T: Writer> ConsoleTestState<T> {
                 fail_out.push_str(format!("---- {} stdout ----\n\t",
                                           f.name.as_slice()).as_slice());
                 let output = String::from_utf8_lossy(stdout.as_slice());
-                fail_out.push_str(output.as_slice()
-                                        .replace("\n", "\n\t")
-                                        .as_slice());
+                fail_out.push_str(output.as_slice());
                 fail_out.push_str("\n");
             }
         }

From 96880be0e3c4b054aa056d293d5cec02db4d37e4 Mon Sep 17 00:00:00 2001
From: Pascal Hertleif <killercup@gmail.com>
Date: Tue, 25 Nov 2014 13:20:47 +0100
Subject: [PATCH 10/40] Change 'Failure' to 'Panic' in Bug Report Docs

---
 src/doc/complement-bugreport.md | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/src/doc/complement-bugreport.md b/src/doc/complement-bugreport.md
index 940a4ca6db7b3..918c087e66bf2 100644
--- a/src/doc/complement-bugreport.md
+++ b/src/doc/complement-bugreport.md
@@ -2,7 +2,7 @@
 
 # I think I found a bug in the compiler!
 
-If you see this message: `error: internal compiler error: unexpected failure`,
+If you see this message: `error: internal compiler error: unexpected panic`,
 then you have definitely found a bug in the compiler. It's also possible that
 your code is not well-typed, but if you saw this message, it's still a bug in
 error reporting.

From b1e720fb7e462351b24f1d3eaa8ac21044c41d28 Mon Sep 17 00:00:00 2001
From: Alexis Beingessner <a.beingessner@gmail.com>
Date: Tue, 25 Nov 2014 01:16:50 -0500
Subject: [PATCH 11/40] Make HashMap::take not corrupt the map. Fixes #19292

---
 src/libstd/collections/hash/map.rs | 36 +++++++++++++++++++++++++++++-
 1 file changed, 35 insertions(+), 1 deletion(-)

diff --git a/src/libstd/collections/hash/map.rs b/src/libstd/collections/hash/map.rs
index 69375e8d4f84e..d34e99187c236 100644
--- a/src/libstd/collections/hash/map.rs
+++ b/src/libstd/collections/hash/map.rs
@@ -1376,7 +1376,7 @@ impl<'a, K, V> OccupiedEntry<'a, K, V> {
 
     /// Takes the value out of the entry, and returns it
     pub fn take(self) -> V {
-        let (_, _, v) = self.elem.take();
+        let (_, v) = pop_internal(self.elem);
         v
     }
 }
@@ -1433,6 +1433,7 @@ mod test_map {
     use hash;
     use iter::{Iterator,range_inclusive,range_step_inclusive};
     use cell::RefCell;
+    use rand::{weak_rng, Rng};
 
     struct KindaIntLike(int);
 
@@ -2062,4 +2063,37 @@ mod test_map {
         assert_eq!(map.get(&10).unwrap(), &1000);
         assert_eq!(map.len(), 6);
     }
+
+    #[test]
+    fn test_entry_take_doesnt_corrupt() {
+        // Test for #19292
+        fn check(m: &HashMap<int, ()>) {
+            for k in m.keys() {
+                assert!(m.contains_key(k),
+                        "{} is in keys() but not in the map?", k);
+            }
+        }
+
+        let mut m = HashMap::new();
+        let mut rng = weak_rng();
+
+        // Populate the map with some items.
+        for _ in range(0u, 50) {
+            let x = rng.gen_range(-10, 10);
+            m.insert(x, ());
+        }
+
+        for i in range(0u, 1000) {
+            let x = rng.gen_range(-10, 10);
+            match m.entry(x) {
+                Vacant(_) => {},
+                Occupied(e) => {
+                    println!("{}: remove {}", i, x);
+                    e.take();
+                },
+            }
+
+            check(&m);
+        }
+    }
 }

From 71b8b04f48a3c596a50d78405eb5b0eea80ad8e6 Mon Sep 17 00:00:00 2001
From: Steve Klabnik <steve@steveklabnik.com>
Date: Tue, 25 Nov 2014 10:57:17 -0500
Subject: [PATCH 12/40] Make note about cross-borrowing.

Fixes #19302.
---
 src/doc/guide-pointers.md | 23 ++++++++++++++++++++++-
 1 file changed, 22 insertions(+), 1 deletion(-)

diff --git a/src/doc/guide-pointers.md b/src/doc/guide-pointers.md
index cf7ecd7e51ff7..64b440259cf2c 100644
--- a/src/doc/guide-pointers.md
+++ b/src/doc/guide-pointers.md
@@ -445,11 +445,32 @@ fn succ(x: &int) -> int { *x + 1 }
 to
 
 ```{rust}
+use std::rc::Rc;
+
 fn box_succ(x: Box<int>) -> int { *x + 1 }
 
-fn rc_succ(x: std::rc::Rc<int>) -> int { *x + 1 }
+fn rc_succ(x: Rc<int>) -> int { *x + 1 }
+```
+
+Note that the caller of your function will have to modify their calls slightly:
+
+```{rust}
+use std::rc::Rc;
+
+fn succ(x: &int) -> int { *x + 1 }
+
+let ref_x = &5i;
+let box_x = box 5i;
+let rc_x  = Rc::new(5i);
+
+succ(ref_x);
+succ(&*box_x);
+succ(&*rc_x);
 ```
 
+The initial `*` dereferences the pointer, and then `&` takes a reference to
+those contents.
+
 # Boxes
 
 `Box<T>` is Rust's 'boxed pointer' type. Boxes provide the simplest form of

From d7b29a6ccd0d3190070eee564b080015951b7c46 Mon Sep 17 00:00:00 2001
From: Steve Klabnik <steve@steveklabnik.com>
Date: Tue, 25 Nov 2014 11:18:39 -0500
Subject: [PATCH 13/40] Make note that examples need a main()

Fixes #19199
---
 src/doc/guide.md | 8 +++++++-
 1 file changed, 7 insertions(+), 1 deletion(-)

diff --git a/src/doc/guide.md b/src/doc/guide.md
index 418f82c996957..c3fdb7c262536 100644
--- a/src/doc/guide.md
+++ b/src/doc/guide.md
@@ -378,9 +378,15 @@ of your time with Rust.
 The first thing we'll learn about are 'variable bindings.' They look like this:
 
 ```{rust}
-let x = 5i;
+fn main() {
+    let x = 5i;
+}
 ```
 
+Putting `fn main() {` in each example is a bit tedious, so we'll leave that out
+in the future. If you're following along, make sure to edit your `main()`
+function, rather than leaving it off. Otherwise, you'll get an error.
+
 In many languages, this is called a 'variable.' But Rust's variable bindings
 have a few tricks up their sleeves. Rust has a very powerful feature called
 'pattern matching' that we'll get into detail with later, but the left

From c9816be35a1bcdef915498939a1f62c18dee1c04 Mon Sep 17 00:00:00 2001
From: Daniel Micay <danielmicay@gmail.com>
Date: Tue, 25 Nov 2014 11:18:31 -0500
Subject: [PATCH 14/40] vec: add missing out-of-memory check

Closes #19305
---
 src/libcollections/vec.rs | 1 +
 1 file changed, 1 insertion(+)

diff --git a/src/libcollections/vec.rs b/src/libcollections/vec.rs
index a3291e01942f1..e239125244f17 100644
--- a/src/libcollections/vec.rs
+++ b/src/libcollections/vec.rs
@@ -165,6 +165,7 @@ impl<T> Vec<T> {
             let size = capacity.checked_mul(mem::size_of::<T>())
                                .expect("capacity overflow");
             let ptr = unsafe { allocate(size, mem::min_align_of::<T>()) };
+            if ptr.is_null() { ::alloc::oom() }
             Vec { ptr: ptr as *mut T, len: 0, cap: capacity }
         }
     }

From 72beb1f88501c6324d6f337e28541f1e84e40833 Mon Sep 17 00:00:00 2001
From: Steve Klabnik <steve@steveklabnik.com>
Date: Tue, 25 Nov 2014 11:31:49 -0500
Subject: [PATCH 15/40] Extra note about struct matching order

Fixes #19178
---
 src/doc/guide.md | 16 ++++++++++++++++
 1 file changed, 16 insertions(+)

diff --git a/src/doc/guide.md b/src/doc/guide.md
index 418f82c996957..8f2aea0de54d7 100644
--- a/src/doc/guide.md
+++ b/src/doc/guide.md
@@ -3991,6 +3991,22 @@ match origin {
 }
 ```
 
+You can do this kind of match on any member, not just the first:
+
+```{rust}
+# #![allow(non_shorthand_field_patterns)]
+struct Point {
+    x: int,
+    y: int,
+}
+
+let origin = Point { x: 0i, y: 0i };
+
+match origin {
+    Point { y: y, .. } => println!("y is {}", y),
+}
+```
+
 Whew! That's a lot of different ways to match things, and they can all be
 mixed and matched, depending on what you're doing:
 

From 8369a607ed86a31614758839b861e0017b5f86be Mon Sep 17 00:00:00 2001
From: Steve Klabnik <steve@steveklabnik.com>
Date: Tue, 25 Nov 2014 11:37:20 -0500
Subject: [PATCH 16/40] add slice patterns to the guide

Fixes #19177.
---
 src/doc/guide.md | 13 +++++++++++++
 1 file changed, 13 insertions(+)

diff --git a/src/doc/guide.md b/src/doc/guide.md
index 418f82c996957..92b616d9c55e3 100644
--- a/src/doc/guide.md
+++ b/src/doc/guide.md
@@ -3991,6 +3991,19 @@ match origin {
 }
 ```
 
+If you want to match against a slice or array, you can use `[]`:
+
+```{rust}
+fn main() {
+    let v = vec!["match_this", "1"];
+
+    match v.as_slice() {
+        ["match_this", second] => println!("The second element is {}", second),
+        _ => {},
+    }
+}
+```
+
 Whew! That's a lot of different ways to match things, and they can all be
 mixed and matched, depending on what you're doing:
 

From 55853c532f6bede515c2161dd45c114e51b7a4cb Mon Sep 17 00:00:00 2001
From: Steve Klabnik <steve@steveklabnik.com>
Date: Tue, 25 Nov 2014 11:43:03 -0500
Subject: [PATCH 17/40] We now support 64 bit Windows.

Fixes #18844
---
 src/doc/guide.md | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/src/doc/guide.md b/src/doc/guide.md
index 418f82c996957..4fdb68a2fb8df 100644
--- a/src/doc/guide.md
+++ b/src/doc/guide.md
@@ -62,7 +62,7 @@ the easiest way to keep people updated while Rust is in its alpha state.
 
 Oh, we should also mention the officially supported platforms:
 
-* Windows (7, 8, Server 2008 R2), x86 only
+* Windows (7, 8, Server 2008 R2)
 * Linux (2.6.18 or later, various distributions), x86 and x86-64
 * OSX 10.7 (Lion) or greater, x86 and x86-64
 

From 79d9bebf49ec56078439816f4856dc4e47aa6523 Mon Sep 17 00:00:00 2001
From: Steven Fackler <sfackler@gmail.com>
Date: Tue, 25 Nov 2014 10:00:46 -0800
Subject: [PATCH 18/40] Fix xcrate enum namespacing

Closes #19293
---
 src/librustc/metadata/encoder.rs               | 10 ----------
 src/librustc_llvm/lib.rs                       |  1 +
 src/test/auxiliary/issue-13872-2.rs            |  2 +-
 src/test/auxiliary/issue_19293.rs              | 14 ++++++++++++++
 .../compile-fail/enums-are-namespaced-xc.rs    | 18 ++++++++++++++++++
 src/test/compile-fail/unreachable-variant.rs   |  2 +-
 src/test/compile-fail/xc-private-method2.rs    |  2 +-
 src/test/run-pass/issue-19293.rs               | 17 +++++++++++++++++
 src/test/run-pass/issue-2316-c.rs              |  2 +-
 src/test/run-pass/issue-8259.rs                |  2 +-
 src/test/run-pass/struct_variant_xc.rs         |  2 +-
 src/test/run-pass/struct_variant_xc_match.rs   |  2 +-
 src/test/run-pass/xcrate-unit-struct.rs        | 10 +++++-----
 13 files changed, 62 insertions(+), 22 deletions(-)
 create mode 100644 src/test/auxiliary/issue_19293.rs
 create mode 100644 src/test/compile-fail/enums-are-namespaced-xc.rs
 create mode 100644 src/test/run-pass/issue-19293.rs

diff --git a/src/librustc/metadata/encoder.rs b/src/librustc/metadata/encoder.rs
index 7e4d2621f1837..7f4e811f514a1 100644
--- a/src/librustc/metadata/encoder.rs
+++ b/src/librustc/metadata/encoder.rs
@@ -500,20 +500,10 @@ fn encode_reexported_static_methods(ecx: &EncodeContext,
 /// Iterates through "auxiliary node IDs", which are node IDs that describe
 /// top-level items that are sub-items of the given item. Specifically:
 ///
-/// * For enums, iterates through the node IDs of the variants.
-///
 /// * For newtype structs, iterates through the node ID of the constructor.
 fn each_auxiliary_node_id(item: &ast::Item, callback: |NodeId| -> bool) -> bool {
     let mut continue_ = true;
     match item.node {
-        ast::ItemEnum(ref enum_def, _) => {
-            for variant in enum_def.variants.iter() {
-                continue_ = callback(variant.node.id);
-                if !continue_ {
-                    break
-                }
-            }
-        }
         ast::ItemStruct(ref struct_def, _) => {
             // If this is a newtype struct, return the constructor.
             match struct_def.ctor_id {
diff --git a/src/librustc_llvm/lib.rs b/src/librustc_llvm/lib.rs
index d67d0fa59ae28..8d14912a6d4da 100644
--- a/src/librustc_llvm/lib.rs
+++ b/src/librustc_llvm/lib.rs
@@ -45,6 +45,7 @@ pub use self::DiagnosticKind::*;
 pub use self::CallConv::*;
 pub use self::Visibility::*;
 pub use self::DiagnosticSeverity::*;
+pub use self::Linkage::*;
 
 use std::c_str::ToCStr;
 use std::cell::RefCell;
diff --git a/src/test/auxiliary/issue-13872-2.rs b/src/test/auxiliary/issue-13872-2.rs
index e2744b7910f98..8294d2b4594cf 100644
--- a/src/test/auxiliary/issue-13872-2.rs
+++ b/src/test/auxiliary/issue-13872-2.rs
@@ -10,4 +10,4 @@
 
 extern crate "issue-13872-1" as foo;
 
-pub use foo::B;
+pub use foo::A::B;
diff --git a/src/test/auxiliary/issue_19293.rs b/src/test/auxiliary/issue_19293.rs
new file mode 100644
index 0000000000000..40c8eb9b23ad7
--- /dev/null
+++ b/src/test/auxiliary/issue_19293.rs
@@ -0,0 +1,14 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+pub struct Foo (pub int);
+pub enum MyEnum {
+    Foo(Foo),
+}
diff --git a/src/test/compile-fail/enums-are-namespaced-xc.rs b/src/test/compile-fail/enums-are-namespaced-xc.rs
new file mode 100644
index 0000000000000..5315e6c834ab3
--- /dev/null
+++ b/src/test/compile-fail/enums-are-namespaced-xc.rs
@@ -0,0 +1,18 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// aux-build:namespaced_enums.rs
+extern crate namespaced_enums;
+
+fn main() {
+    let _ = namespaced_enums::A; //~ ERROR unresolved name
+    let _ = namespaced_enums::B(10); //~ ERROR unresolved name
+    let _ = namespaced_enums::C { a: 10 }; //~ ERROR does not name a structure
+}
diff --git a/src/test/compile-fail/unreachable-variant.rs b/src/test/compile-fail/unreachable-variant.rs
index a6f17efe6b5e9..ef991d8533737 100644
--- a/src/test/compile-fail/unreachable-variant.rs
+++ b/src/test/compile-fail/unreachable-variant.rs
@@ -13,5 +13,5 @@
 extern crate "unreachable-variant" as other;
 
 fn main() {
-    let _x = other::super_sekrit::baz; //~ ERROR is private
+    let _x = other::super_sekrit::sooper_sekrit::baz; //~ ERROR is private
 }
diff --git a/src/test/compile-fail/xc-private-method2.rs b/src/test/compile-fail/xc-private-method2.rs
index 48b07a39eb898..26e055d7cbb9d 100644
--- a/src/test/compile-fail/xc-private-method2.rs
+++ b/src/test/compile-fail/xc-private-method2.rs
@@ -16,6 +16,6 @@ fn main() {
     let _ = xc_private_method_lib::Struct{ x: 10 }.meth_struct();
     //~^ ERROR method `meth_struct` is private
 
-    let _ = xc_private_method_lib::Variant1(20).meth_enum();
+    let _ = xc_private_method_lib::Enum::Variant1(20).meth_enum();
     //~^ ERROR method `meth_enum` is private
 }
diff --git a/src/test/run-pass/issue-19293.rs b/src/test/run-pass/issue-19293.rs
new file mode 100644
index 0000000000000..4a446a76de389
--- /dev/null
+++ b/src/test/run-pass/issue-19293.rs
@@ -0,0 +1,17 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// aux-build:issue_19293.rs
+extern crate issue_19293;
+use issue_19293::{Foo, MyEnum};
+
+fn main() {
+    MyEnum::Foo(Foo(5));
+}
diff --git a/src/test/run-pass/issue-2316-c.rs b/src/test/run-pass/issue-2316-c.rs
index a27f0b8d6598c..a6fac423bb677 100644
--- a/src/test/run-pass/issue-2316-c.rs
+++ b/src/test/run-pass/issue-2316-c.rs
@@ -15,5 +15,5 @@ extern crate issue_2316_b;
 use issue_2316_b::cloth;
 
 pub fn main() {
-  let _c: cloth::fabric = cloth::calico;
+  let _c: cloth::fabric = cloth::fabric::calico;
 }
diff --git a/src/test/run-pass/issue-8259.rs b/src/test/run-pass/issue-8259.rs
index 4805b7713ee99..fb893873bc4d3 100644
--- a/src/test/run-pass/issue-8259.rs
+++ b/src/test/run-pass/issue-8259.rs
@@ -11,6 +11,6 @@
 // aux-build:issue-8259.rs
 
 extern crate "issue-8259" as other;
-static a: other::Foo<'static> = other::A;
+static a: other::Foo<'static> = other::Foo::A;
 
 pub fn main() {}
diff --git a/src/test/run-pass/struct_variant_xc.rs b/src/test/run-pass/struct_variant_xc.rs
index 11521e86117b4..923a1427869f5 100644
--- a/src/test/run-pass/struct_variant_xc.rs
+++ b/src/test/run-pass/struct_variant_xc.rs
@@ -11,7 +11,7 @@
 // aux-build:struct_variant_xc_aux.rs
 extern crate struct_variant_xc_aux;
 
-use struct_variant_xc_aux::StructVariant;
+use struct_variant_xc_aux::Enum::StructVariant;
 
 pub fn main() {
     let _ = StructVariant { arg: 1 };
diff --git a/src/test/run-pass/struct_variant_xc_match.rs b/src/test/run-pass/struct_variant_xc_match.rs
index e7bc61c1fb99c..41dcb7ddbc86b 100644
--- a/src/test/run-pass/struct_variant_xc_match.rs
+++ b/src/test/run-pass/struct_variant_xc_match.rs
@@ -11,7 +11,7 @@
 // aux-build:struct_variant_xc_aux.rs
 extern crate struct_variant_xc_aux;
 
-use struct_variant_xc_aux::{StructVariant, Variant};
+use struct_variant_xc_aux::Enum::{StructVariant, Variant};
 
 pub fn main() {
     let arg = match (StructVariant { arg: 42 }) {
diff --git a/src/test/run-pass/xcrate-unit-struct.rs b/src/test/run-pass/xcrate-unit-struct.rs
index 7eb73968db5ef..30b5f47b2ae2e 100644
--- a/src/test/run-pass/xcrate-unit-struct.rs
+++ b/src/test/run-pass/xcrate-unit-struct.rs
@@ -12,10 +12,10 @@
 extern crate xcrate_unit_struct;
 
 const s1: xcrate_unit_struct::Struct = xcrate_unit_struct::Struct;
-static s2: xcrate_unit_struct::Unit = xcrate_unit_struct::UnitVariant;
+static s2: xcrate_unit_struct::Unit = xcrate_unit_struct::Unit::UnitVariant;
 static s3: xcrate_unit_struct::Unit =
-                xcrate_unit_struct::Argument(xcrate_unit_struct::Struct);
-static s4: xcrate_unit_struct::Unit = xcrate_unit_struct::Argument(s1);
+                xcrate_unit_struct::Unit::Argument(xcrate_unit_struct::Struct);
+static s4: xcrate_unit_struct::Unit = xcrate_unit_struct::Unit::Argument(s1);
 static s5: xcrate_unit_struct::TupleStruct = xcrate_unit_struct::TupleStruct(20, "foo");
 
 fn f1(_: xcrate_unit_struct::Struct) {}
@@ -24,8 +24,8 @@ fn f3(_: xcrate_unit_struct::TupleStruct) {}
 
 pub fn main() {
     f1(xcrate_unit_struct::Struct);
-    f2(xcrate_unit_struct::UnitVariant);
-    f2(xcrate_unit_struct::Argument(xcrate_unit_struct::Struct));
+    f2(xcrate_unit_struct::Unit::UnitVariant);
+    f2(xcrate_unit_struct::Unit::Argument(xcrate_unit_struct::Struct));
     f3(xcrate_unit_struct::TupleStruct(10, "bar"));
 
     f1(s1);

From 4ce3ba484b00b6a8e2db63f61c8944e0b33b07ff Mon Sep 17 00:00:00 2001
From: Steve Klabnik <steve@steveklabnik.com>
Date: Tue, 25 Nov 2014 12:26:14 -0500
Subject: [PATCH 19/40] Improve documentation for unreachable

Fixes #18876
---
 src/libstd/macros.rs | 50 +++++++++++++++++++++++++++++---------------
 1 file changed, 33 insertions(+), 17 deletions(-)

diff --git a/src/libstd/macros.rs b/src/libstd/macros.rs
index c3260225d0a3f..dbb45f2e55601 100644
--- a/src/libstd/macros.rs
+++ b/src/libstd/macros.rs
@@ -186,26 +186,42 @@ macro_rules! debug_assert_eq(
     ($($arg:tt)*) => (if cfg!(not(ndebug)) { assert_eq!($($arg)*); })
 )
 
-/// A utility macro for indicating unreachable code. It will panic if
-/// executed. This is occasionally useful to put after loops that never
-/// terminate normally, but instead directly return from a function.
+/// A utility macro for indicating unreachable code.
 ///
-/// # Example
+/// This is useful any time that the compiler can't determine that some code is unreachable. For
+/// example:
+///
+/// * Match arms with guard conditions.
+/// * Loops that dynamically terminate.
+/// * Iterators that dynamically terminate.
+///
+/// # Panics
+///
+/// This will always panic.
+///
+/// # Examples
+///
+/// Match arms:
 ///
-/// ```{.rust}
-/// struct Item { weight: uint }
-///
-/// fn choose_weighted_item(v: &[Item]) -> Item {
-///     assert!(!v.is_empty());
-///     let mut so_far = 0u;
-///     for item in v.iter() {
-///         so_far += item.weight;
-///         if so_far > 100 {
-///             return *item;
-///         }
+/// ```rust
+/// fn foo(x: Option<int>) {
+///    match x {
+///     Some(n) if n >= 0 => println!("Some(Non-negative)"),
+///     Some(n) if n <  0 => println!("Some(Negative)"),
+///     Some(_)           => unreachable!(), // compile error if commented out
+///     None              => println!("None")
+/// }
+/// ```
+///
+/// Iterators:
+///
+/// ```rust
+/// fn divide_by_three(x: i32) -> i32 { // one of the poorest implementations of x/3
+///     for i in std::iter::count(0_i32, 1) {
+///         if i < 0 { panic!("i32 overflow"); }
+///         if x < 3*i { return i; }
 ///     }
-///     // The above loop always returns, so we must hint to the
-///     // type checker that it isn't possible to get down here
+///
 ///     unreachable!();
 /// }
 /// ```

From 80d520fcf2f71148db1df4377757258f7f7b7f3a Mon Sep 17 00:00:00 2001
From: Richard Diamond <wichard@vitalitystudios.com>
Date: Tue, 25 Nov 2014 17:28:49 -0600
Subject: [PATCH 20/40] Don't use the same llvmdeps.rs for every host.

---
 mk/docs.mk               |  3 ++-
 mk/llvm.mk               | 24 +++++++++++++++++-------
 mk/target.mk             |  5 ++---
 src/librustc_llvm/lib.rs |  4 +++-
 4 files changed, 24 insertions(+), 12 deletions(-)

diff --git a/mk/docs.mk b/mk/docs.mk
index 48eb9e81c20a4..0b5240d51d521 100644
--- a/mk/docs.mk
+++ b/mk/docs.mk
@@ -299,7 +299,8 @@ $(2) += doc/$(1)/index.html
 doc/$(1)/index.html: CFG_COMPILER_HOST_TRIPLE = $(CFG_TARGET)
 doc/$(1)/index.html: $$(LIB_DOC_DEP_$(1)) doc/$(1)/
 	@$$(call E, rustdoc: $$@)
-	$$(Q)$$(RUSTDOC) --cfg dox --cfg stage2 $$<
+	$$(Q)CFG_LLVM_LINKAGE_FILE=$$(LLVM_LINKAGE_PATH_$(CFG_BUILD)) \
+		$$(RUSTDOC) --cfg dox --cfg stage2 $$<
 endef
 
 $(foreach crate,$(DOC_CRATES),$(eval $(call DEF_LIB_DOC,$(crate),DOC_TARGETS)))
diff --git a/mk/llvm.mk b/mk/llvm.mk
index bce4390205626..ba2e073803935 100644
--- a/mk/llvm.mk
+++ b/mk/llvm.mk
@@ -49,6 +49,12 @@ else
 LLVM_STDCPP_LOCATION_$(1) =
 endif
 
+
+# LLVM linkage:
+LLVM_LINKAGE_PATH_$(1):=$$(abspath $$(RT_OUTPUT_DIR_$(1))/llvmdeps.rs)
+$$(LLVM_LINKAGE_PATH_$(1)): $(S)src/etc/mklldeps.py $$(LLVM_CONFIG_$(1))
+	$(Q)$(CFG_PYTHON) "$$<" "$$@" "$$(LLVM_COMPONENTS)" "$$(CFG_ENABLE_LLVM_STATIC_STDCPP)" \
+		$$(LLVM_CONFIG_$(1))
 endef
 
 $(foreach host,$(CFG_HOST), \
@@ -57,10 +63,14 @@ $(foreach host,$(CFG_HOST), \
 $(foreach host,$(CFG_HOST), \
  $(eval LLVM_CONFIGS := $(LLVM_CONFIGS) $(LLVM_CONFIG_$(host))))
 
-$(S)src/librustc_llvm/llvmdeps.rs: \
-		    $(LLVM_CONFIGS) \
-		    $(S)src/etc/mklldeps.py \
-		    $(MKFILE_DEPS)
-	$(Q)$(CFG_PYTHON) $(S)src/etc/mklldeps.py \
-		"$@" "$(LLVM_COMPONENTS)" "$(CFG_ENABLE_LLVM_STATIC_STDCPP)" \
-		$(LLVM_CONFIGS)
+# This can't be done in target.mk because it's included before this file.
+define LLVM_LINKAGE_DEPS
+$$(TLIB$(1)_T_$(2)_H_$(3))/stamp.rustc_llvm: $$(LLVM_LINKAGE_PATH_$(3))
+endef
+
+$(foreach source,$(CFG_HOST), \
+ $(foreach target,$(CFG_TARGET), \
+  $(eval $(call LLVM_LINKAGE_DEPS,0,$(target),$(source))) \
+  $(eval $(call LLVM_LINKAGE_DEPS,1,$(target),$(source))) \
+  $(eval $(call LLVM_LINKAGE_DEPS,2,$(target),$(source))) \
+  $(eval $(call LLVM_LINKAGE_DEPS,3,$(target),$(source)))))
diff --git a/mk/target.mk b/mk/target.mk
index acdf780f10545..5b0de64574cd3 100644
--- a/mk/target.mk
+++ b/mk/target.mk
@@ -79,7 +79,8 @@ $$(TLIB$(1)_T_$(2)_H_$(3))/stamp.$(4): \
 	    $$(dir $$@)$$(call CFG_LIB_GLOB_$(2),$(4)))
 	$$(call REMOVE_ALL_OLD_GLOB_MATCHES, \
 	    $$(dir $$@)$$(call CFG_RLIB_GLOB,$(4)))
-	$$(STAGE$(1)_T_$(2)_H_$(3)) \
+	$(Q)CFG_LLVM_LINKAGE_FILE=$$(LLVM_LINKAGE_PATH_$(2)) \
+	    $$(subst @,,$$(STAGE$(1)_T_$(2)_H_$(3))) \
 		$$(RUST_LIB_FLAGS_ST$(1)) \
 		-L "$$(RT_OUTPUT_DIR_$(2))" \
 		-L "$$(LLVM_LIBDIR_$(2))" \
@@ -134,8 +135,6 @@ SNAPSHOT_RUSTC_POST_CLEANUP=$(HBIN0_H_$(CFG_BUILD))/rustc$(X_$(CFG_BUILD))
 
 define TARGET_HOST_RULES
 
-$$(TLIB$(1)_T_$(2)_H_$(3))/stamp.rustc_llvm: $(S)src/librustc_llvm/llvmdeps.rs
-
 $$(TBIN$(1)_T_$(2)_H_$(3))/:
 	mkdir -p $$@
 
diff --git a/src/librustc_llvm/lib.rs b/src/librustc_llvm/lib.rs
index d67d0fa59ae28..8c12ccb9c8bd6 100644
--- a/src/librustc_llvm/lib.rs
+++ b/src/librustc_llvm/lib.rs
@@ -2214,4 +2214,6 @@ pub unsafe fn static_link_hack_this_sucks() {
 // parts of LLVM that rustllvm depends on aren't thrown away by the linker.
 // Works to the above fix for #15460 to ensure LLVM dependencies that
 // are only used by rustllvm don't get stripped by the linker.
-mod llvmdeps;
+mod llvmdeps {
+    include!(env!("CFG_LLVM_LINKAGE_FILE"))
+}

From f17faf49be668dfc22e9cae97ed644e3881fd64e Mon Sep 17 00:00:00 2001
From: Richard Diamond <wichard@vitalitystudios.com>
Date: Tue, 25 Nov 2014 17:53:05 -0600
Subject: [PATCH 21/40] Never generate multiple extern {} blocks in
 mklldeps.py.

---
 src/etc/mklldeps.py | 101 ++++++++++++++++++--------------------------
 1 file changed, 40 insertions(+), 61 deletions(-)

diff --git a/src/etc/mklldeps.py b/src/etc/mklldeps.py
index 0003de117a835..834ba074c6210 100644
--- a/src/etc/mklldeps.py
+++ b/src/etc/mklldeps.py
@@ -19,6 +19,7 @@
 components = sys.argv[2].split(' ')
 components = [i for i in components if i]  # ignore extra whitespaces
 enable_static = sys.argv[3]
+llconfig = sys.argv[4]
 
 f.write("""// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
 // file at the top-level directory of this distribution and at
@@ -44,69 +45,47 @@ def run(args):
         sys.exit(1)
     return out
 
-for llconfig in sys.argv[4:]:
-    f.write("\n")
-
-    out = run([llconfig, '--host-target'])
-    arch, os = out.split('-', 1)
-    arch = 'x86' if arch == 'i686' or arch == 'i386' else arch
-    if 'darwin' in os:
-        os = 'macos'
-    elif 'linux' in os:
-        os = 'linux'
-    elif 'freebsd' in os:
-        os = 'freebsd'
-    elif 'dragonfly' in os:
-        os = 'dragonfly'
-    elif 'android' in os:
-        os = 'android'
-    elif 'win' in os or 'mingw' in os:
-        os = 'windows'
-    cfg = [
-        "target_arch = \"" + arch + "\"",
-        "target_os = \"" + os + "\"",
-    ]
-
-    f.write("#[cfg(all(" + ', '.join(cfg) + "))]\n")
-
-    version = run([llconfig, '--version']).strip()
-
-    # LLVM libs
-    if version < '3.5':
-      args = [llconfig, '--libs']
-    else:
-      args = [llconfig, '--libs', '--system-libs']
-    args.extend(components)
-    out = run(args)
-    for lib in out.strip().replace("\n", ' ').split(' '):
-        lib = lib.strip()[2:] # chop of the leading '-l'
-        f.write("#[link(name = \"" + lib + "\"")
-        # LLVM libraries are all static libraries
-        if 'LLVM' in lib:
-            f.write(", kind = \"static\"")
-        f.write(")]\n")
-
-    # llvm-config before 3.5 didn't have a system-libs flag
-    if version < '3.5':
-      if os == 'win32':
+f.write("\n")
+
+version = run([llconfig, '--version']).strip()
+
+# LLVM libs
+if version < '3.5':
+    args = [llconfig, '--libs']
+else:
+    args = [llconfig, '--libs', '--system-libs']
+
+args.extend(components)
+out = run(args)
+for lib in out.strip().replace("\n", ' ').split(' '):
+    lib = lib.strip()[2:] # chop of the leading '-l'
+    f.write("#[link(name = \"" + lib + "\"")
+    # LLVM libraries are all static libraries
+    if 'LLVM' in lib:
+        f.write(", kind = \"static\"")
+    f.write(")]\n")
+
+# llvm-config before 3.5 didn't have a system-libs flag
+if version < '3.5':
+    if os == 'win32':
         f.write("#[link(name = \"imagehlp\")]")
 
-    # LLVM ldflags
-    out = run([llconfig, '--ldflags'])
-    for lib in out.strip().split(' '):
-        if lib[:2] == "-l":
-            f.write("#[link(name = \"" + lib[2:] + "\")]\n")
-
-    # C++ runtime library
-    out = run([llconfig, '--cxxflags'])
-    if enable_static == '1':
-      assert('stdlib=libc++' not in out)
-      f.write("#[link(name = \"stdc++\", kind = \"static\")]\n")
-    else:
-      if 'stdlib=libc++' in out:
+# LLVM ldflags
+out = run([llconfig, '--ldflags'])
+for lib in out.strip().split(' '):
+    if lib[:2] == "-l":
+        f.write("#[link(name = \"" + lib[2:] + "\")]\n")
+
+# C++ runtime library
+out = run([llconfig, '--cxxflags'])
+if enable_static == '1':
+    assert('stdlib=libc++' not in out)
+    f.write("#[link(name = \"stdc++\", kind = \"static\")]\n")
+else:
+    if 'stdlib=libc++' in out:
         f.write("#[link(name = \"c++\")]\n")
-      else:
+    else:
         f.write("#[link(name = \"stdc++\")]\n")
 
-    # Attach everything to an extern block
-    f.write("extern {}\n")
+# Attach everything to an extern block
+f.write("extern {}\n")

From 6cb03baffa329f785bdef4079456dc85ec3b0bbc Mon Sep 17 00:00:00 2001
From: Ulysse Carion <ulysse@ulysse.io>
Date: Tue, 25 Nov 2014 16:32:53 -0800
Subject: [PATCH 22/40] Fix formatting of the pointers guide.

---
 src/doc/guide-pointers.md | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/src/doc/guide-pointers.md b/src/doc/guide-pointers.md
index cf7ecd7e51ff7..08d7c2a4158a4 100644
--- a/src/doc/guide-pointers.md
+++ b/src/doc/guide-pointers.md
@@ -572,7 +572,7 @@ fn add_one(x: &mut int) -> int {
 fn main() {
     let x = box 5i;
 
-    println!("{}", add_one(&*x)); // error: cannot borrow immutable dereference 
+    println!("{}", add_one(&*x)); // error: cannot borrow immutable dereference
                                   // of `&`-pointer as mutable
 }
 ```
@@ -700,9 +700,9 @@ This gives you flexibility without sacrificing performance.
 
 You may think that this gives us terrible performance: return a value and then
 immediately box it up ?! Isn't that the worst of both worlds? Rust is smarter
-than that. There is no copy in this code. main allocates enough room for the
-`box , passes a pointer to that memory into foo as x, and then foo writes the
-value straight into that pointer. This writes the return value directly into
+than that. There is no copy in this code. `main` allocates enough room for the
+`box`, passes a pointer to that memory into `foo` as `x`, and then `foo` writes
+the value straight into that pointer. This writes the return value directly into
 the allocated box.
 
 This is important enough that it bears repeating: pointers are not for

From 4653ad02055e1accae0fce6aad000b01fbe61d20 Mon Sep 17 00:00:00 2001
From: Huon Wilson <dbau.pp+github@gmail.com>
Date: Mon, 24 Nov 2014 15:56:34 +1100
Subject: [PATCH 23/40] Make syntax::owned_slice a Box<[T]> wrapper.

This makes it correct (e.g. avoiding null pointers) and safe.
---
 src/librustc/middle/typeck/check/mod.rs |   2 +-
 src/libstd/prelude.rs                   |   1 +
 src/libsyntax/owned_slice.rs            | 104 ++++--------------------
 src/libsyntax/print/pprust.rs           |   2 +-
 4 files changed, 19 insertions(+), 90 deletions(-)

diff --git a/src/librustc/middle/typeck/check/mod.rs b/src/librustc/middle/typeck/check/mod.rs
index 553d80852c28f..9c8374da37ac5 100644
--- a/src/librustc/middle/typeck/check/mod.rs
+++ b/src/librustc/middle/typeck/check/mod.rs
@@ -5809,7 +5809,7 @@ pub fn check_bounds_are_used<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>,
         if !*b {
             span_err!(ccx.tcx.sess, span, E0091,
                 "type parameter `{}` is unused",
-                token::get_ident(tps.get(i).ident));
+                token::get_ident(tps[i].ident));
         }
     }
 }
diff --git a/src/libstd/prelude.rs b/src/libstd/prelude.rs
index 65f45c3f97e14..fe8648b22021c 100644
--- a/src/libstd/prelude.rs
+++ b/src/libstd/prelude.rs
@@ -85,6 +85,7 @@
 #[doc(no_inline)] pub use slice::{SlicePrelude, AsSlice, CloneSlicePrelude};
 #[doc(no_inline)] pub use slice::{VectorVector, PartialEqSlicePrelude, OrdSlicePrelude};
 #[doc(no_inline)] pub use slice::{CloneSliceAllocPrelude, OrdSliceAllocPrelude, SliceAllocPrelude};
+#[doc(no_inline)] pub use slice::{BoxedSlicePrelude};
 #[doc(no_inline)] pub use string::{IntoString, String, ToString};
 #[doc(no_inline)] pub use vec::Vec;
 
diff --git a/src/libsyntax/owned_slice.rs b/src/libsyntax/owned_slice.rs
index f622e2d611276..747f8c150b4a7 100644
--- a/src/libsyntax/owned_slice.rs
+++ b/src/libsyntax/owned_slice.rs
@@ -10,99 +10,39 @@
 
 use std::fmt;
 use std::default::Default;
-use std::hash;
-use std::{mem, raw, ptr, slice, vec};
-use std::rt::heap::EMPTY;
+use std::vec;
 use serialize::{Encodable, Decodable, Encoder, Decoder};
 
-/// A non-growable owned slice. This would preferably become `~[T]`
-/// under DST.
-#[unsafe_no_drop_flag] // data is set to null on destruction
+/// A non-growable owned slice. This is a separate type to allow the
+/// representation to change.
+#[deriving(Hash, PartialEq, Eq, PartialOrd, Ord)]
 pub struct OwnedSlice<T> {
-    /// null iff len == 0
-    data: *mut T,
-    len: uint,
+    data: Box<[T]>
 }
 
 impl<T:fmt::Show> fmt::Show for OwnedSlice<T> {
     fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
-        try!("OwnedSlice {{".fmt(fmt));
-        for i in self.iter() {
-            try!(i.fmt(fmt));
-        }
-        try!("}}".fmt(fmt));
-        Ok(())
-    }
-}
-
-#[unsafe_destructor]
-impl<T> Drop for OwnedSlice<T> {
-    fn drop(&mut self) {
-        if self.data.is_null() { return }
-
-        // extract the vector
-        let v = mem::replace(self, OwnedSlice::empty());
-        // free via the Vec destructor
-        v.into_vec();
+        self.data.fmt(fmt)
     }
 }
 
 impl<T> OwnedSlice<T> {
     pub fn empty() -> OwnedSlice<T> {
-        OwnedSlice  { data: ptr::null_mut(), len: 0 }
+        OwnedSlice  { data: box [] }
     }
 
     #[inline(never)]
-    pub fn from_vec(mut v: Vec<T>) -> OwnedSlice<T> {
-        let len = v.len();
-
-        if len == 0 {
-            OwnedSlice::empty()
-        } else {
-            // drop excess capacity to avoid breaking sized deallocation
-            v.shrink_to_fit();
-
-            let p = v.as_mut_ptr();
-            // we own the allocation now
-            unsafe { mem::forget(v) }
-
-            OwnedSlice { data: p, len: len }
-        }
+    pub fn from_vec(v: Vec<T>) -> OwnedSlice<T> {
+        OwnedSlice { data: v.into_boxed_slice() }
     }
 
     #[inline(never)]
     pub fn into_vec(self) -> Vec<T> {
-        // null is ok, because len == 0 in that case, as required by Vec.
-        unsafe {
-            let ret = Vec::from_raw_parts(self.data, self.len, self.len);
-            // the vector owns the allocation now
-            mem::forget(self);
-            ret
-        }
+        self.data.into_vec()
     }
 
     pub fn as_slice<'a>(&'a self) -> &'a [T] {
-        let ptr = if self.data.is_null() {
-            // length zero, i.e. this will never be read as a T.
-            EMPTY as *const T
-        } else {
-            self.data as *const T
-        };
-
-        let slice: &[T] = unsafe {mem::transmute(raw::Slice {
-            data: ptr,
-            len: self.len
-        })};
-
-        slice
-    }
-
-    pub fn get<'a>(&'a self, i: uint) -> &'a T {
-        self.as_slice().get(i).expect("OwnedSlice: index out of bounds")
-    }
-
-    pub fn iter<'r>(&'r self) -> slice::Items<'r, T> {
-        self.as_slice().iter()
+        &*self.data
     }
 
     pub fn move_iter(self) -> vec::MoveItems<T> {
@@ -112,10 +52,12 @@ impl<T> OwnedSlice<T> {
     pub fn map<U>(&self, f: |&T| -> U) -> OwnedSlice<U> {
         self.iter().map(f).collect()
     }
+}
 
-    pub fn len(&self) -> uint { self.len }
-
-    pub fn is_empty(&self) -> bool { self.len == 0 }
+impl<T> Deref<[T]> for OwnedSlice<T> {
+    fn deref(&self) -> &[T] {
+        self.as_slice()
+    }
 }
 
 impl<T> Default for OwnedSlice<T> {
@@ -130,20 +72,6 @@ impl<T: Clone> Clone for OwnedSlice<T> {
     }
 }
 
-impl<S: hash::Writer, T: hash::Hash<S>> hash::Hash<S> for OwnedSlice<T> {
-    fn hash(&self, state: &mut S) {
-        self.as_slice().hash(state)
-    }
-}
-
-impl<T: PartialEq> PartialEq for OwnedSlice<T> {
-    fn eq(&self, other: &OwnedSlice<T>) -> bool {
-        self.as_slice() == other.as_slice()
-    }
-}
-
-impl<T: Eq> Eq for OwnedSlice<T> {}
-
 impl<T> FromIterator<T> for OwnedSlice<T> {
     fn from_iter<I: Iterator<T>>(mut iter: I) -> OwnedSlice<T> {
         OwnedSlice::from_vec(iter.collect())
diff --git a/src/libsyntax/print/pprust.rs b/src/libsyntax/print/pprust.rs
index 8e7804aaa713f..0a6f5dabcce87 100644
--- a/src/libsyntax/print/pprust.rs
+++ b/src/libsyntax/print/pprust.rs
@@ -2458,7 +2458,7 @@ impl<'a> State<'a> {
                 s.print_lifetime_def(lifetime)
             } else {
                 let idx = idx - generics.lifetimes.len();
-                let param = generics.ty_params.get(idx);
+                let param = &generics.ty_params[idx];
                 s.print_ty_param(param)
             }
         }));

From ce507c6c2217f88394f8c3fc10f7c36af6867fb2 Mon Sep 17 00:00:00 2001
From: Richard Diamond <wichard@vitalitystudios.com>
Date: Tue, 25 Nov 2014 19:03:03 -0600
Subject: [PATCH 24/40] Don't forget the tests.

---
 mk/tests.mk | 6 ++++--
 1 file changed, 4 insertions(+), 2 deletions(-)

diff --git a/mk/tests.mk b/mk/tests.mk
index 63a34e0f01007..0ec0c81f2882e 100644
--- a/mk/tests.mk
+++ b/mk/tests.mk
@@ -412,7 +412,8 @@ $(3)/stage$(1)/test/$(4)test-$(2)$$(X_$(2)): \
 		$$(CRATEFILE_$(4)) \
 		$$(TESTDEP_$(1)_$(2)_$(3)_$(4))
 	@$$(call E, rustc: $$@)
-	$$(STAGE$(1)_T_$(2)_H_$(3)) -o $$@ $$< --test \
+	$(Q)CFG_LLVM_LINKAGE_FILE=$$(LLVM_LINKAGE_PATH_$(2)) \
+	    $$(subst @,,$$(STAGE$(1)_T_$(2)_H_$(3))) -o $$@ $$< --test \
 		-L "$$(RT_OUTPUT_DIR_$(2))" \
 		-L "$$(LLVM_LIBDIR_$(2))" \
 		$$(RUSTFLAGS_$(4))
@@ -890,7 +891,8 @@ endif
 ifeq ($(2),$$(CFG_BUILD))
 $$(call TEST_OK_FILE,$(1),$(2),$(3),doc-crate-$(4)): $$(CRATEDOCTESTDEP_$(1)_$(2)_$(3)_$(4))
 	@$$(call E, run doc-crate-$(4) [$(2)])
-	$$(Q)$$(RUSTDOC_$(1)_T_$(2)_H_$(3)) --test --cfg dox \
+	$$(Q)CFG_LLVM_LINKAGE_FILE=$$(LLVM_LINKAGE_PATH_$(2)) \
+	    $$(RUSTDOC_$(1)_T_$(2)_H_$(3)) --test --cfg dox \
 	    	$$(CRATEFILE_$(4)) --test-args "$$(TESTARGS)" && touch $$@
 else
 $$(call TEST_OK_FILE,$(1),$(2),$(3),doc-crate-$(4)):

From 945b4edd678c8512b9b51855543db18251238d21 Mon Sep 17 00:00:00 2001
From: Steven Fackler <sfackler@gmail.com>
Date: Tue, 25 Nov 2014 17:37:13 -0800
Subject: [PATCH 25/40] Allow mutable access to wrapped internal type in
 Buffered*

This is necessary to e.g. set a timeout on the underlying stream.
---
 src/libstd/io/buffered.rs | 36 ++++++++++++++++++++++++++----------
 1 file changed, 26 insertions(+), 10 deletions(-)

diff --git a/src/libstd/io/buffered.rs b/src/libstd/io/buffered.rs
index 148323762c8b8..6c704d2f23dce 100644
--- a/src/libstd/io/buffered.rs
+++ b/src/libstd/io/buffered.rs
@@ -75,10 +75,14 @@ impl<R: Reader> BufferedReader<R> {
     }
 
     /// Gets a reference to the underlying reader.
+    pub fn get_ref<'a>(&self) -> &R { &self.inner }
+
+    /// Gets a mutable reference to the underlying reader.
     ///
-    /// This type does not expose the ability to get a mutable reference to the
-    /// underlying reader because that could possibly corrupt the buffer.
-    pub fn get_ref<'a>(&'a self) -> &'a R { &self.inner }
+    /// ## Warning
+    ///
+    /// It is inadvisable to directly read from the underlying reader.
+    pub fn get_mut(&mut self) -> &mut R { &mut self.inner }
 
     /// Unwraps this `BufferedReader`, returning the underlying reader.
     ///
@@ -176,10 +180,14 @@ impl<W: Writer> BufferedWriter<W> {
     }
 
     /// Gets a reference to the underlying writer.
+    pub fn get_ref(&self) -> &W { self.inner.as_ref().unwrap() }
+
+    /// Gets a mutable reference to the underlying write.
     ///
-    /// This type does not expose the ability to get a mutable reference to the
-    /// underlying reader because that could possibly corrupt the buffer.
-    pub fn get_ref<'a>(&'a self) -> &'a W { self.inner.as_ref().unwrap() }
+    /// ## Warning
+    ///
+    /// It is inadvisable to directly read from the underlying writer.
+    pub fn get_mut(&mut self) -> &mut W { self.inner.as_mut().unwrap() }
 
     /// Unwraps this `BufferedWriter`, returning the underlying writer.
     ///
@@ -341,14 +349,22 @@ impl<S: Stream> BufferedStream<S> {
     }
 
     /// Gets a reference to the underlying stream.
-    ///
-    /// This type does not expose the ability to get a mutable reference to the
-    /// underlying reader because that could possibly corrupt the buffer.
-    pub fn get_ref<'a>(&'a self) -> &'a S {
+    pub fn get_ref(&self) -> &S {
         let InternalBufferedWriter(ref w) = self.inner.inner;
         w.get_ref()
     }
 
+    /// Gets a mutable reference to the underlying stream.
+    ///
+    /// ## Warning
+    ///
+    /// It is inadvisable to read directly from or write directly to the
+    /// underlying stream.
+    pub fn get_mut(&mut self) -> &mut S {
+        let InternalBufferedWriter(ref mut w) = self.inner.inner;
+        w.get_mut()
+    }
+
     /// Unwraps this `BufferedStream`, returning the underlying stream.
     ///
     /// The internal buffer is flushed before returning the stream. Any leftover

From f38e4e6d97bf1691858d007afd36b1f356de4774 Mon Sep 17 00:00:00 2001
From: Steve Klabnik <steve@steveklabnik.com>
Date: Mon, 24 Nov 2014 20:06:06 -0500
Subject: [PATCH 26/40] /** -> ///

This is considered good convention.
---
 src/libcollections/enum_set.rs                |  40 +-
 src/libcore/finally.rs                        |  62 +-
 src/libcore/fmt/float.rs                      |  60 +-
 src/libcore/ops.rs                            | 937 ++++++++----------
 src/libcore/slice.rs                          |  38 +-
 src/liblibc/lib.rs                            |  26 +-
 src/librustc/lint/context.rs                  |   8 +-
 src/librustc/middle/fast_reject.rs            |   2 +-
 src/librustc/middle/mem_categorization.rs     |  34 +-
 src/librustc/middle/region.rs                 |  74 +-
 src/librustc/middle/resolve.rs                |  28 +-
 src/librustc/middle/subst.rs                  |  31 +-
 src/librustc/middle/traits/fulfill.rs         |  22 +-
 src/librustc/middle/traits/mod.rs             | 142 ++-
 src/librustc/middle/traits/select.rs          |  50 +-
 src/librustc/middle/ty.rs                     | 314 +++---
 src/librustc/middle/ty_fold.rs                |   4 +-
 src/librustc/middle/typeck/check/mod.rs       |  24 +-
 .../typeck/infer/region_inference/mod.rs      |  12 +-
 src/librustc/middle/typeck/infer/unify.rs     | 105 +-
 src/librustc/middle/typeck/mod.rs             |  26 +-
 src/librustc/middle/typeck/variance.rs        |  54 +-
 src/librustc/util/snapshot_vec.rs             |   6 +-
 src/librustc_llvm/lib.rs                      |  82 +-
 src/librustc_trans/driver/driver.rs           |   6 +-
 src/librustc_trans/trans/_match.rs            |  27 +-
 src/librustc_trans/trans/adt.rs               | 190 ++--
 src/librustc_trans/trans/basic_block.rs       |   4 +-
 src/librustc_trans/trans/datum.rs             |  54 +-
 src/librustc_trans/trans/expr.rs              |  32 +-
 src/librustc_trans/trans/meth.rs              |  10 +-
 src/librustc_trans/trans/type_.rs             |   4 +-
 src/librustc_trans/trans/value.rs             |   8 +-
 src/libserialize/base64.rs                    |  82 +-
 src/libserialize/hex.rs                       |  76 +-
 src/libstd/num/strconv.rs                     |  51 +-
 src/libstd/os.rs                              |  48 +-
 src/libstd/sync/lock.rs                       |  27 +-
 src/libstd/sync/raw.rs                        |  61 +-
 src/libstd/sys/windows/process.rs             |  30 +-
 src/libsyntax/codemap.rs                      |  10 +-
 src/libsyntax/ext/quote.rs                    |  16 +-
 src/libsyntax/parse/token.rs                  |  12 +-
 src/libterm/terminfo/parm.rs                  |  20 +-
 src/libtime/lib.rs                            |  70 +-
 45 files changed, 1361 insertions(+), 1658 deletions(-)

diff --git a/src/libcollections/enum_set.rs b/src/libcollections/enum_set.rs
index 3d750a30c2960..d21465c822f47 100644
--- a/src/libcollections/enum_set.rs
+++ b/src/libcollections/enum_set.rs
@@ -42,27 +42,25 @@ impl<E:CLike+fmt::Show> fmt::Show for EnumSet<E> {
     }
 }
 
-/**
-An interface for casting C-like enum to uint and back.
-A typically implementation is as below.
-
-```{rust,ignore}
-#[repr(uint)]
-enum Foo {
-    A, B, C
-}
-
-impl CLike for Foo {
-    fn to_uint(&self) -> uint {
-        *self as uint
-    }
-
-    fn from_uint(v: uint) -> Foo {
-        unsafe { mem::transmute(v) }
-    }
-}
-```
-*/
+/// An interface for casting C-like enum to uint and back.
+/// A typically implementation is as below.
+///
+/// ```{rust,ignore}
+/// #[repr(uint)]
+/// enum Foo {
+///     A, B, C
+/// }
+///
+/// impl CLike for Foo {
+///     fn to_uint(&self) -> uint {
+///         *self as uint
+///     }
+///
+///     fn from_uint(v: uint) -> Foo {
+///         unsafe { mem::transmute(v) }
+///     }
+/// }
+/// ```
 pub trait CLike {
     /// Converts a C-like enum to a `uint`.
     fn to_uint(&self) -> uint;
diff --git a/src/libcore/finally.rs b/src/libcore/finally.rs
index 2e358e7a74b64..e1e409fbaeb72 100644
--- a/src/libcore/finally.rs
+++ b/src/libcore/finally.rs
@@ -58,38 +58,36 @@ impl<T> Finally<T> for fn() -> T {
     }
 }
 
-/**
- * The most general form of the `finally` functions. The function
- * `try_fn` will be invoked first; whether or not it panics, the
- * function `finally_fn` will be invoked next. The two parameters
- * `mutate` and `drop` are used to thread state through the two
- * closures. `mutate` is used for any shared, mutable state that both
- * closures require access to; `drop` is used for any state that the
- * `try_fn` requires ownership of.
- *
- * **WARNING:** While shared, mutable state between the try and finally
- * function is often necessary, one must be very careful; the `try`
- * function could have panicked at any point, so the values of the shared
- * state may be inconsistent.
- *
- * # Example
- *
- * ```
- * use std::finally::try_finally;
- *
- * struct State<'a> { buffer: &'a mut [u8], len: uint }
- * # let mut buf = [];
- * let mut state = State { buffer: &mut buf, len: 0 };
- * try_finally(
- *     &mut state, (),
- *     |state, ()| {
- *         // use state.buffer, state.len
- *     },
- *     |state| {
- *         // use state.buffer, state.len to cleanup
- *     })
- * ```
- */
+/// The most general form of the `finally` functions. The function
+/// `try_fn` will be invoked first; whether or not it panics, the
+/// function `finally_fn` will be invoked next. The two parameters
+/// `mutate` and `drop` are used to thread state through the two
+/// closures. `mutate` is used for any shared, mutable state that both
+/// closures require access to; `drop` is used for any state that the
+/// `try_fn` requires ownership of.
+///
+/// **WARNING:** While shared, mutable state between the try and finally
+/// function is often necessary, one must be very careful; the `try`
+/// function could have panicked at any point, so the values of the shared
+/// state may be inconsistent.
+///
+/// # Example
+///
+/// ```
+/// use std::finally::try_finally;
+///
+/// struct State<'a> { buffer: &'a mut [u8], len: uint }
+/// # let mut buf = [];
+/// let mut state = State { buffer: &mut buf, len: 0 };
+/// try_finally(
+///     &mut state, (),
+///     |state, ()| {
+///         // use state.buffer, state.len
+///     },
+///     |state| {
+///         // use state.buffer, state.len to cleanup
+///     })
+/// ```
 pub fn try_finally<T,U,R>(mutate: &mut T,
                           drop: U,
                           try_fn: |&mut T, U| -> R,
diff --git a/src/libcore/fmt/float.rs b/src/libcore/fmt/float.rs
index 1760c4d8e6616..aa481bd7e0055 100644
--- a/src/libcore/fmt/float.rs
+++ b/src/libcore/fmt/float.rs
@@ -54,36 +54,36 @@ pub enum SignFormat {
 
 static DIGIT_E_RADIX: uint = ('e' as uint) - ('a' as uint) + 11u;
 
-/**
- * Converts a number to its string representation as a byte vector.
- * This is meant to be a common base implementation for all numeric string
- * conversion functions like `to_string()` or `to_str_radix()`.
- *
- * # Arguments
- * - `num`           - The number to convert. Accepts any number that
- *                     implements the numeric traits.
- * - `radix`         - Base to use. Accepts only the values 2-36. If the exponential notation
- *                     is used, then this base is only used for the significand. The exponent
- *                     itself always printed using a base of 10.
- * - `negative_zero` - Whether to treat the special value `-0` as
- *                     `-0` or as `+0`.
- * - `sign`          - How to emit the sign. See `SignFormat`.
- * - `digits`        - The amount of digits to use for emitting the fractional
- *                     part, if any. See `SignificantDigits`.
- * - `exp_format`   - Whether or not to use the exponential (scientific) notation.
- *                    See `ExponentFormat`.
- * - `exp_capital`   - Whether or not to use a capital letter for the exponent sign, if
- *                     exponential notation is desired.
- * - `f`             - A closure to invoke with the bytes representing the
- *                     float.
- *
- * # Panics
- * - Panics if `radix` < 2 or `radix` > 36.
- * - Panics if `radix` > 14 and `exp_format` is `ExpDec` due to conflict
- *   between digit and exponent sign `'e'`.
- * - Panics if `radix` > 25 and `exp_format` is `ExpBin` due to conflict
- *   between digit and exponent sign `'p'`.
- */
+/// Converts a number to its string representation as a byte vector.
+/// This is meant to be a common base implementation for all numeric string
+/// conversion functions like `to_string()` or `to_str_radix()`.
+///
+/// # Arguments
+///
+/// - `num`           - The number to convert. Accepts any number that
+///                     implements the numeric traits.
+/// - `radix`         - Base to use. Accepts only the values 2-36. If the exponential notation
+///                     is used, then this base is only used for the significand. The exponent
+///                     itself always printed using a base of 10.
+/// - `negative_zero` - Whether to treat the special value `-0` as
+///                     `-0` or as `+0`.
+/// - `sign`          - How to emit the sign. See `SignFormat`.
+/// - `digits`        - The amount of digits to use for emitting the fractional
+///                     part, if any. See `SignificantDigits`.
+/// - `exp_format`   - Whether or not to use the exponential (scientific) notation.
+///                    See `ExponentFormat`.
+/// - `exp_capital`   - Whether or not to use a capital letter for the exponent sign, if
+///                     exponential notation is desired.
+/// - `f`             - A closure to invoke with the bytes representing the
+///                     float.
+///
+/// # Panics
+///
+/// - Panics if `radix` < 2 or `radix` > 36.
+/// - Panics if `radix` > 14 and `exp_format` is `ExpDec` due to conflict
+///   between digit and exponent sign `'e'`.
+/// - Panics if `radix` > 25 and `exp_format` is `ExpBin` due to conflict
+///   between digit and exponent sign `'p'`.
 pub fn float_to_str_bytes_common<T: Float, U>(
     num: T,
     radix: uint,
diff --git a/src/libcore/ops.rs b/src/libcore/ops.rs
index 185c937eb6b37..b2749ca054ad1 100644
--- a/src/libcore/ops.rs
+++ b/src/libcore/ops.rs
@@ -57,60 +57,54 @@
 
 use kinds::Sized;
 
-/**
- *
- * The `Drop` trait is used to run some code when a value goes out of scope. This
- * is sometimes called a 'destructor'.
- *
- * # Example
- *
- * A trivial implementation of `Drop`. The `drop` method is called when `_x` goes
- * out of scope, and therefore `main` prints `Dropping!`.
- *
- * ```rust
- * struct HasDrop;
- *
- * impl Drop for HasDrop {
- *   fn drop(&mut self) {
- *       println!("Dropping!");
- *   }
- * }
- *
- * fn main() {
- *   let _x = HasDrop;
- * }
- * ```
- */
+/// The `Drop` trait is used to run some code when a value goes out of scope. This
+/// is sometimes called a 'destructor'.
+///
+/// # Example
+///
+/// A trivial implementation of `Drop`. The `drop` method is called when `_x` goes
+/// out of scope, and therefore `main` prints `Dropping!`.
+///
+/// ```rust
+/// struct HasDrop;
+///
+/// impl Drop for HasDrop {
+///   fn drop(&mut self) {
+///       println!("Dropping!");
+///   }
+/// }
+///
+/// fn main() {
+///   let _x = HasDrop;
+/// }
+/// ```
 #[lang="drop"]
 pub trait Drop {
     /// The `drop` method, called when the value goes out of scope.
     fn drop(&mut self);
 }
 
-/**
- *
- * The `Add` trait is used to specify the functionality of `+`.
- *
- * # Example
- *
- * A trivial implementation of `Add`. When `Foo + Foo` happens, it ends up
- * calling `add`, and therefore, `main` prints `Adding!`.
- *
- * ```rust
- * struct Foo;
- *
- * impl Add<Foo, Foo> for Foo {
- *     fn add(&self, _rhs: &Foo) -> Foo {
- *       println!("Adding!");
- *       *self
- *   }
- * }
- *
- * fn main() {
- *   Foo + Foo;
- * }
- * ```
- */
+/// The `Add` trait is used to specify the functionality of `+`.
+///
+/// # Example
+///
+/// A trivial implementation of `Add`. When `Foo + Foo` happens, it ends up
+/// calling `add`, and therefore, `main` prints `Adding!`.
+///
+/// ```rust
+/// struct Foo;
+///
+/// impl Add<Foo, Foo> for Foo {
+///     fn add(&self, _rhs: &Foo) -> Foo {
+///       println!("Adding!");
+///       *self
+///   }
+/// }
+///
+/// fn main() {
+///   Foo + Foo;
+/// }
+/// ```
 #[lang="add"]
 pub trait Add<Sized? RHS,Result> for Sized? {
     /// The method for the `+` operator
@@ -128,30 +122,27 @@ macro_rules! add_impl(
 
 add_impl!(uint u8 u16 u32 u64 int i8 i16 i32 i64 f32 f64)
 
-/**
- *
- * The `Sub` trait is used to specify the functionality of `-`.
- *
- * # Example
- *
- * A trivial implementation of `Sub`. When `Foo - Foo` happens, it ends up
- * calling `sub`, and therefore, `main` prints `Subtracting!`.
- *
- * ```rust
- * struct Foo;
- *
- * impl Sub<Foo, Foo> for Foo {
- *     fn sub(&self, _rhs: &Foo) -> Foo {
- *         println!("Subtracting!");
- *         *self
- *     }
- * }
- *
- * fn main() {
- *     Foo - Foo;
- * }
- * ```
- */
+/// The `Sub` trait is used to specify the functionality of `-`.
+///
+/// # Example
+///
+/// A trivial implementation of `Sub`. When `Foo - Foo` happens, it ends up
+/// calling `sub`, and therefore, `main` prints `Subtracting!`.
+///
+/// ```rust
+/// struct Foo;
+///
+/// impl Sub<Foo, Foo> for Foo {
+///     fn sub(&self, _rhs: &Foo) -> Foo {
+///         println!("Subtracting!");
+///         *self
+///     }
+/// }
+///
+/// fn main() {
+///     Foo - Foo;
+/// }
+/// ```
 #[lang="sub"]
 pub trait Sub<Sized? RHS, Result> for Sized? {
     /// The method for the `-` operator
@@ -169,30 +160,27 @@ macro_rules! sub_impl(
 
 sub_impl!(uint u8 u16 u32 u64 int i8 i16 i32 i64 f32 f64)
 
-/**
- *
- * The `Mul` trait is used to specify the functionality of `*`.
- *
- * # Example
- *
- * A trivial implementation of `Mul`. When `Foo * Foo` happens, it ends up
- * calling `mul`, and therefore, `main` prints `Multiplying!`.
- *
- * ```rust
- * struct Foo;
- *
- * impl Mul<Foo, Foo> for Foo {
- *     fn mul(&self, _rhs: &Foo) -> Foo {
- *         println!("Multiplying!");
- *         *self
- *     }
- * }
- *
- * fn main() {
- *     Foo * Foo;
- * }
- * ```
- */
+/// The `Mul` trait is used to specify the functionality of `*`.
+///
+/// # Example
+///
+/// A trivial implementation of `Mul`. When `Foo * Foo` happens, it ends up
+/// calling `mul`, and therefore, `main` prints `Multiplying!`.
+///
+/// ```rust
+/// struct Foo;
+///
+/// impl Mul<Foo, Foo> for Foo {
+///     fn mul(&self, _rhs: &Foo) -> Foo {
+///         println!("Multiplying!");
+///         *self
+///     }
+/// }
+///
+/// fn main() {
+///     Foo * Foo;
+/// }
+/// ```
 #[lang="mul"]
 pub trait Mul<Sized? RHS, Result>  for Sized? {
     /// The method for the `*` operator
@@ -210,30 +198,27 @@ macro_rules! mul_impl(
 
 mul_impl!(uint u8 u16 u32 u64 int i8 i16 i32 i64 f32 f64)
 
-/**
- *
- * The `Div` trait is used to specify the functionality of `/`.
- *
- * # Example
- *
- * A trivial implementation of `Div`. When `Foo / Foo` happens, it ends up
- * calling `div`, and therefore, `main` prints `Dividing!`.
- *
- * ```
- * struct Foo;
- *
- * impl Div<Foo, Foo> for Foo {
- *     fn div(&self, _rhs: &Foo) -> Foo {
- *         println!("Dividing!");
- *         *self
- *     }
- * }
- *
- * fn main() {
- *     Foo / Foo;
- * }
- * ```
- */
+/// The `Div` trait is used to specify the functionality of `/`.
+///
+/// # Example
+///
+/// A trivial implementation of `Div`. When `Foo / Foo` happens, it ends up
+/// calling `div`, and therefore, `main` prints `Dividing!`.
+///
+/// ```
+/// struct Foo;
+///
+/// impl Div<Foo, Foo> for Foo {
+///     fn div(&self, _rhs: &Foo) -> Foo {
+///         println!("Dividing!");
+///         *self
+///     }
+/// }
+///
+/// fn main() {
+///     Foo / Foo;
+/// }
+/// ```
 #[lang="div"]
 pub trait Div<Sized? RHS, Result> for Sized? {
     /// The method for the `/` operator
@@ -251,30 +236,27 @@ macro_rules! div_impl(
 
 div_impl!(uint u8 u16 u32 u64 int i8 i16 i32 i64 f32 f64)
 
-/**
- *
- * The `Rem` trait is used to specify the functionality of `%`.
- *
- * # Example
- *
- * A trivial implementation of `Rem`. When `Foo % Foo` happens, it ends up
- * calling `rem`, and therefore, `main` prints `Remainder-ing!`.
- *
- * ```
- * struct Foo;
- *
- * impl Rem<Foo, Foo> for Foo {
- *     fn rem(&self, _rhs: &Foo) -> Foo {
- *         println!("Remainder-ing!");
- *         *self
- *     }
- * }
- *
- * fn main() {
- *     Foo % Foo;
- * }
- * ```
- */
+/// The `Rem` trait is used to specify the functionality of `%`.
+///
+/// # Example
+///
+/// A trivial implementation of `Rem`. When `Foo % Foo` happens, it ends up
+/// calling `rem`, and therefore, `main` prints `Remainder-ing!`.
+///
+/// ```
+/// struct Foo;
+///
+/// impl Rem<Foo, Foo> for Foo {
+///     fn rem(&self, _rhs: &Foo) -> Foo {
+///         println!("Remainder-ing!");
+///         *self
+///     }
+/// }
+///
+/// fn main() {
+///     Foo % Foo;
+/// }
+/// ```
 #[lang="rem"]
 pub trait Rem<Sized? RHS, Result>  for Sized? {
     /// The method for the `%` operator
@@ -306,30 +288,27 @@ rem_impl!(uint u8 u16 u32 u64 int i8 i16 i32 i64)
 rem_float_impl!(f32, fmodf)
 rem_float_impl!(f64, fmod)
 
-/**
- *
- * The `Neg` trait is used to specify the functionality of unary `-`.
- *
- * # Example
- *
- * A trivial implementation of `Neg`. When `-Foo` happens, it ends up calling
- * `neg`, and therefore, `main` prints `Negating!`.
- *
- * ```
- * struct Foo;
- *
- * impl Neg<Foo> for Foo {
- *     fn neg(&self) -> Foo {
- *         println!("Negating!");
- *         *self
- *     }
- * }
- *
- * fn main() {
- *     -Foo;
- * }
- * ```
- */
+/// The `Neg` trait is used to specify the functionality of unary `-`.
+///
+/// # Example
+///
+/// A trivial implementation of `Neg`. When `-Foo` happens, it ends up calling
+/// `neg`, and therefore, `main` prints `Negating!`.
+///
+/// ```
+/// struct Foo;
+///
+/// impl Neg<Foo> for Foo {
+///     fn neg(&self) -> Foo {
+///         println!("Negating!");
+///         *self
+///     }
+/// }
+///
+/// fn main() {
+///     -Foo;
+/// }
+/// ```
 #[lang="neg"]
 pub trait Neg<Result> for Sized? {
     /// The method for the unary `-` operator
@@ -363,30 +342,27 @@ neg_uint_impl!(u32, i32)
 neg_uint_impl!(u64, i64)
 
 
-/**
- *
- * The `Not` trait is used to specify the functionality of unary `!`.
- *
- * # Example
- *
- * A trivial implementation of `Not`. When `!Foo` happens, it ends up calling
- * `not`, and therefore, `main` prints `Not-ing!`.
- *
- * ```
- * struct Foo;
- *
- * impl Not<Foo> for Foo {
- *     fn not(&self) -> Foo {
- *         println!("Not-ing!");
- *         *self
- *     }
- * }
- *
- * fn main() {
- *     !Foo;
- * }
- * ```
- */
+/// The `Not` trait is used to specify the functionality of unary `!`.
+///
+/// # Example
+///
+/// A trivial implementation of `Not`. When `!Foo` happens, it ends up calling
+/// `not`, and therefore, `main` prints `Not-ing!`.
+///
+/// ```
+/// struct Foo;
+///
+/// impl Not<Foo> for Foo {
+///     fn not(&self) -> Foo {
+///         println!("Not-ing!");
+///         *self
+///     }
+/// }
+///
+/// fn main() {
+///     !Foo;
+/// }
+/// ```
 #[lang="not"]
 pub trait Not<Result> for Sized? {
     /// The method for the unary `!` operator
@@ -405,30 +381,27 @@ macro_rules! not_impl(
 
 not_impl!(bool uint u8 u16 u32 u64 int i8 i16 i32 i64)
 
-/**
- *
- * The `BitAnd` trait is used to specify the functionality of `&`.
- *
- * # Example
- *
- * A trivial implementation of `BitAnd`. When `Foo & Foo` happens, it ends up
- * calling `bitand`, and therefore, `main` prints `Bitwise And-ing!`.
- *
- * ```
- * struct Foo;
- *
- * impl BitAnd<Foo, Foo> for Foo {
- *     fn bitand(&self, _rhs: &Foo) -> Foo {
- *         println!("Bitwise And-ing!");
- *         *self
- *     }
- * }
- *
- * fn main() {
- *     Foo & Foo;
- * }
- * ```
- */
+/// The `BitAnd` trait is used to specify the functionality of `&`.
+///
+/// # Example
+///
+/// A trivial implementation of `BitAnd`. When `Foo & Foo` happens, it ends up
+/// calling `bitand`, and therefore, `main` prints `Bitwise And-ing!`.
+///
+/// ```
+/// struct Foo;
+///
+/// impl BitAnd<Foo, Foo> for Foo {
+///     fn bitand(&self, _rhs: &Foo) -> Foo {
+///         println!("Bitwise And-ing!");
+///         *self
+///     }
+/// }
+///
+/// fn main() {
+///     Foo & Foo;
+/// }
+/// ```
 #[lang="bitand"]
 pub trait BitAnd<Sized? RHS, Result> for Sized? {
     /// The method for the `&` operator
@@ -446,30 +419,27 @@ macro_rules! bitand_impl(
 
 bitand_impl!(bool uint u8 u16 u32 u64 int i8 i16 i32 i64)
 
-/**
- *
- * The `BitOr` trait is used to specify the functionality of `|`.
- *
- * # Example
- *
- * A trivial implementation of `BitOr`. When `Foo | Foo` happens, it ends up
- * calling `bitor`, and therefore, `main` prints `Bitwise Or-ing!`.
- *
- * ```
- * struct Foo;
- *
- * impl BitOr<Foo, Foo> for Foo {
- *     fn bitor(&self, _rhs: &Foo) -> Foo {
- *         println!("Bitwise Or-ing!");
- *         *self
- *     }
- * }
- *
- * fn main() {
- *     Foo | Foo;
- * }
- * ```
- */
+/// The `BitOr` trait is used to specify the functionality of `|`.
+///
+/// # Example
+///
+/// A trivial implementation of `BitOr`. When `Foo | Foo` happens, it ends up
+/// calling `bitor`, and therefore, `main` prints `Bitwise Or-ing!`.
+///
+/// ```
+/// struct Foo;
+///
+/// impl BitOr<Foo, Foo> for Foo {
+///     fn bitor(&self, _rhs: &Foo) -> Foo {
+///         println!("Bitwise Or-ing!");
+///         *self
+///     }
+/// }
+///
+/// fn main() {
+///     Foo | Foo;
+/// }
+/// ```
 #[lang="bitor"]
 pub trait BitOr<Sized? RHS, Result> for Sized? {
     /// The method for the `|` operator
@@ -487,30 +457,27 @@ macro_rules! bitor_impl(
 
 bitor_impl!(bool uint u8 u16 u32 u64 int i8 i16 i32 i64)
 
-/**
- *
- * The `BitXor` trait is used to specify the functionality of `^`.
- *
- * # Example
- *
- * A trivial implementation of `BitXor`. When `Foo ^ Foo` happens, it ends up
- * calling `bitxor`, and therefore, `main` prints `Bitwise Xor-ing!`.
- *
- * ```
- * struct Foo;
- *
- * impl BitXor<Foo, Foo> for Foo {
- *     fn bitxor(&self, _rhs: &Foo) -> Foo {
- *         println!("Bitwise Xor-ing!");
- *         *self
- *     }
- * }
- *
- * fn main() {
- *     Foo ^ Foo;
- * }
- * ```
- */
+/// The `BitXor` trait is used to specify the functionality of `^`.
+///
+/// # Example
+///
+/// A trivial implementation of `BitXor`. When `Foo ^ Foo` happens, it ends up
+/// calling `bitxor`, and therefore, `main` prints `Bitwise Xor-ing!`.
+///
+/// ```
+/// struct Foo;
+///
+/// impl BitXor<Foo, Foo> for Foo {
+///     fn bitxor(&self, _rhs: &Foo) -> Foo {
+///         println!("Bitwise Xor-ing!");
+///         *self
+///     }
+/// }
+///
+/// fn main() {
+///     Foo ^ Foo;
+/// }
+/// ```
 #[lang="bitxor"]
 pub trait BitXor<Sized? RHS, Result> for Sized? {
     /// The method for the `^` operator
@@ -528,30 +495,27 @@ macro_rules! bitxor_impl(
 
 bitxor_impl!(bool uint u8 u16 u32 u64 int i8 i16 i32 i64)
 
-/**
- *
- * The `Shl` trait is used to specify the functionality of `<<`.
- *
- * # Example
- *
- * A trivial implementation of `Shl`. When `Foo << Foo` happens, it ends up
- * calling `shl`, and therefore, `main` prints `Shifting left!`.
- *
- * ```
- * struct Foo;
- *
- * impl Shl<Foo, Foo> for Foo {
- *     fn shl(&self, _rhs: &Foo) -> Foo {
- *         println!("Shifting left!");
- *         *self
- *     }
- * }
- *
- * fn main() {
- *     Foo << Foo;
- * }
- * ```
- */
+/// The `Shl` trait is used to specify the functionality of `<<`.
+///
+/// # Example
+///
+/// A trivial implementation of `Shl`. When `Foo << Foo` happens, it ends up
+/// calling `shl`, and therefore, `main` prints `Shifting left!`.
+///
+/// ```
+/// struct Foo;
+///
+/// impl Shl<Foo, Foo> for Foo {
+///     fn shl(&self, _rhs: &Foo) -> Foo {
+///         println!("Shifting left!");
+///         *self
+///     }
+/// }
+///
+/// fn main() {
+///     Foo << Foo;
+/// }
+/// ```
 #[lang="shl"]
 pub trait Shl<Sized? RHS, Result> for Sized? {
     /// The method for the `<<` operator
@@ -571,30 +535,27 @@ macro_rules! shl_impl(
 
 shl_impl!(uint u8 u16 u32 u64 int i8 i16 i32 i64)
 
-/**
- *
- * The `Shr` trait is used to specify the functionality of `>>`.
- *
- * # Example
- *
- * A trivial implementation of `Shr`. When `Foo >> Foo` happens, it ends up
- * calling `shr`, and therefore, `main` prints `Shifting right!`.
- *
- * ```
- * struct Foo;
- *
- * impl Shr<Foo, Foo> for Foo {
- *     fn shr(&self, _rhs: &Foo) -> Foo {
- *         println!("Shifting right!");
- *         *self
- *     }
- * }
- *
- * fn main() {
- *     Foo >> Foo;
- * }
- * ```
- */
+/// The `Shr` trait is used to specify the functionality of `>>`.
+///
+/// # Example
+///
+/// A trivial implementation of `Shr`. When `Foo >> Foo` happens, it ends up
+/// calling `shr`, and therefore, `main` prints `Shifting right!`.
+///
+/// ```
+/// struct Foo;
+///
+/// impl Shr<Foo, Foo> for Foo {
+///     fn shr(&self, _rhs: &Foo) -> Foo {
+///         println!("Shifting right!");
+///         *self
+///     }
+/// }
+///
+/// fn main() {
+///     Foo >> Foo;
+/// }
+/// ```
 #[lang="shr"]
 pub trait Shr<Sized? RHS, Result> for Sized? {
     /// The method for the `>>` operator
@@ -612,105 +573,96 @@ macro_rules! shr_impl(
 
 shr_impl!(uint u8 u16 u32 u64 int i8 i16 i32 i64)
 
-/**
- *
- * The `Index` trait is used to specify the functionality of indexing operations
- * like `arr[idx]` when used in an immutable context.
- *
- * # Example
- *
- * A trivial implementation of `Index`. When `Foo[Foo]` happens, it ends up
- * calling `index`, and therefore, `main` prints `Indexing!`.
- *
- * ```
- * struct Foo;
- *
- * impl Index<Foo, Foo> for Foo {
- *     fn index<'a>(&'a self, _index: &Foo) -> &'a Foo {
- *         println!("Indexing!");
- *         self
- *     }
- * }
- *
- * fn main() {
- *     Foo[Foo];
- * }
- * ```
- */
+/// The `Index` trait is used to specify the functionality of indexing operations
+/// like `arr[idx]` when used in an immutable context.
+///
+/// # Example
+///
+/// A trivial implementation of `Index`. When `Foo[Foo]` happens, it ends up
+/// calling `index`, and therefore, `main` prints `Indexing!`.
+///
+/// ```
+/// struct Foo;
+///
+/// impl Index<Foo, Foo> for Foo {
+///     fn index<'a>(&'a self, _index: &Foo) -> &'a Foo {
+///         println!("Indexing!");
+///         self
+///     }
+/// }
+///
+/// fn main() {
+///     Foo[Foo];
+/// }
+/// ```
 #[lang="index"]
 pub trait Index<Sized? Index, Sized? Result> for Sized? {
     /// The method for the indexing (`Foo[Bar]`) operation
     fn index<'a>(&'a self, index: &Index) -> &'a Result;
 }
 
-/**
- *
- * The `IndexMut` trait is used to specify the functionality of indexing
- * operations like `arr[idx]`, when used in a mutable context.
- *
- * # Example
- *
- * A trivial implementation of `IndexMut`. When `Foo[Foo]` happens, it ends up
- * calling `index_mut`, and therefore, `main` prints `Indexing!`.
- *
- * ```
- * struct Foo;
- *
- * impl IndexMut<Foo, Foo> for Foo {
- *     fn index_mut<'a>(&'a mut self, _index: &Foo) -> &'a mut Foo {
- *         println!("Indexing!");
- *         self
- *     }
- * }
- *
- * fn main() {
- *     &mut Foo[Foo];
- * }
- * ```
- */
+/// The `IndexMut` trait is used to specify the functionality of indexing
+/// operations like `arr[idx]`, when used in a mutable context.
+///
+/// # Example
+///
+/// A trivial implementation of `IndexMut`. When `Foo[Foo]` happens, it ends up
+/// calling `index_mut`, and therefore, `main` prints `Indexing!`.
+///
+/// ```
+/// struct Foo;
+///
+/// impl IndexMut<Foo, Foo> for Foo {
+///     fn index_mut<'a>(&'a mut self, _index: &Foo) -> &'a mut Foo {
+///         println!("Indexing!");
+///         self
+///     }
+/// }
+///
+/// fn main() {
+///     &mut Foo[Foo];
+/// }
+/// ```
 #[lang="index_mut"]
 pub trait IndexMut<Sized? Index, Sized? Result> for Sized? {
     /// The method for the indexing (`Foo[Bar]`) operation
     fn index_mut<'a>(&'a mut self, index: &Index) -> &'a mut Result;
 }
 
-/**
- *
- * The `Slice` trait is used to specify the functionality of slicing operations
- * like `arr[from..to]` when used in an immutable context.
- *
- * # Example
- *
- * A trivial implementation of `Slice`. When `Foo[..Foo]` happens, it ends up
- * calling `slice_to`, and therefore, `main` prints `Slicing!`.
- *
- * ```ignore
- * struct Foo;
- *
- * impl Slice<Foo, Foo> for Foo {
- *     fn as_slice_<'a>(&'a self) -> &'a Foo {
- *         println!("Slicing!");
- *         self
- *     }
- *     fn slice_from_or_fail<'a>(&'a self, _from: &Foo) -> &'a Foo {
- *         println!("Slicing!");
- *         self
- *     }
- *     fn slice_to_or_fail<'a>(&'a self, _to: &Foo) -> &'a Foo {
- *         println!("Slicing!");
- *         self
- *     }
- *     fn slice_or_fail<'a>(&'a self, _from: &Foo, _to: &Foo) -> &'a Foo {
- *         println!("Slicing!");
- *         self
- *     }
- * }
- *
- * fn main() {
- *     Foo[..Foo];
- * }
- * ```
- */
+/// The `Slice` trait is used to specify the functionality of slicing operations
+/// like `arr[from..to]` when used in an immutable context.
+///
+/// # Example
+///
+/// A trivial implementation of `Slice`. When `Foo[..Foo]` happens, it ends up
+/// calling `slice_to`, and therefore, `main` prints `Slicing!`.
+///
+/// ```ignore
+/// struct Foo;
+///
+/// impl Slice<Foo, Foo> for Foo {
+///     fn as_slice_<'a>(&'a self) -> &'a Foo {
+///         println!("Slicing!");
+///         self
+///     }
+///     fn slice_from_or_fail<'a>(&'a self, _from: &Foo) -> &'a Foo {
+///         println!("Slicing!");
+///         self
+///     }
+///     fn slice_to_or_fail<'a>(&'a self, _to: &Foo) -> &'a Foo {
+///         println!("Slicing!");
+///         self
+///     }
+///     fn slice_or_fail<'a>(&'a self, _from: &Foo, _to: &Foo) -> &'a Foo {
+///         println!("Slicing!");
+///         self
+///     }
+/// }
+///
+/// fn main() {
+///     Foo[..Foo];
+/// }
+/// ```
 #[lang="slice"]
 pub trait Slice<Sized? Idx, Sized? Result> for Sized? {
     /// The method for the slicing operation foo[]
@@ -723,43 +675,40 @@ pub trait Slice<Sized? Idx, Sized? Result> for Sized? {
     fn slice_or_fail<'a>(&'a self, from: &Idx, to: &Idx) -> &'a Result;
 }
 
-/**
- *
- * The `SliceMut` trait is used to specify the functionality of slicing
- * operations like `arr[from..to]`, when used in a mutable context.
- *
- * # Example
- *
- * A trivial implementation of `SliceMut`. When `Foo[Foo..]` happens, it ends up
- * calling `slice_from_mut`, and therefore, `main` prints `Slicing!`.
- *
- * ```ignore
- * struct Foo;
- *
- * impl SliceMut<Foo, Foo> for Foo {
- *     fn as_mut_slice_<'a>(&'a mut self) -> &'a mut Foo {
- *         println!("Slicing!");
- *         self
- *     }
- *     fn slice_from_or_fail_mut<'a>(&'a mut self, _from: &Foo) -> &'a mut Foo {
- *         println!("Slicing!");
- *         self
- *     }
- *     fn slice_to_or_fail_mut<'a>(&'a mut self, _to: &Foo) -> &'a mut Foo {
- *         println!("Slicing!");
- *         self
- *     }
- *     fn slice_or_fail_mut<'a>(&'a mut self, _from: &Foo, _to: &Foo) -> &'a mut Foo {
- *         println!("Slicing!");
- *         self
- *     }
- * }
- *
- * pub fn main() {
- *     Foo[mut Foo..];
- * }
- * ```
- */
+/// The `SliceMut` trait is used to specify the functionality of slicing
+/// operations like `arr[from..to]`, when used in a mutable context.
+///
+/// # Example
+///
+/// A trivial implementation of `SliceMut`. When `Foo[Foo..]` happens, it ends up
+/// calling `slice_from_mut`, and therefore, `main` prints `Slicing!`.
+///
+/// ```ignore
+/// struct Foo;
+///
+/// impl SliceMut<Foo, Foo> for Foo {
+///     fn as_mut_slice_<'a>(&'a mut self) -> &'a mut Foo {
+///         println!("Slicing!");
+///         self
+///     }
+///     fn slice_from_or_fail_mut<'a>(&'a mut self, _from: &Foo) -> &'a mut Foo {
+///         println!("Slicing!");
+///         self
+///     }
+///     fn slice_to_or_fail_mut<'a>(&'a mut self, _to: &Foo) -> &'a mut Foo {
+///         println!("Slicing!");
+///         self
+///     }
+///     fn slice_or_fail_mut<'a>(&'a mut self, _from: &Foo, _to: &Foo) -> &'a mut Foo {
+///         println!("Slicing!");
+///         self
+///     }
+/// }
+///
+/// pub fn main() {
+///     Foo[mut Foo..];
+/// }
+/// ```
 #[lang="slice_mut"]
 pub trait SliceMut<Sized? Idx, Sized? Result> for Sized? {
     /// The method for the slicing operation foo[]
@@ -772,33 +721,30 @@ pub trait SliceMut<Sized? Idx, Sized? Result> for Sized? {
     fn slice_or_fail_mut<'a>(&'a mut self, from: &Idx, to: &Idx) -> &'a mut Result;
 }
 
-/**
- *
- * The `Deref` trait is used to specify the functionality of dereferencing
- * operations like `*v`.
- *
- * # Example
- *
- * A struct with a single field which is accessible via dereferencing the
- * struct.
- *
- * ```
- * struct DerefExample<T> {
- *     value: T
- * }
- *
- * impl<T> Deref<T> for DerefExample<T> {
- *     fn deref<'a>(&'a self) -> &'a T {
- *         &self.value
- *     }
- * }
- *
- * fn main() {
- *     let x = DerefExample { value: 'a' };
- *     assert_eq!('a', *x);
- * }
- * ```
- */
+/// The `Deref` trait is used to specify the functionality of dereferencing
+/// operations like `*v`.
+///
+/// # Example
+///
+/// A struct with a single field which is accessible via dereferencing the
+/// struct.
+///
+/// ```
+/// struct DerefExample<T> {
+///     value: T
+/// }
+///
+/// impl<T> Deref<T> for DerefExample<T> {
+///     fn deref<'a>(&'a self) -> &'a T {
+///         &self.value
+///     }
+/// }
+///
+/// fn main() {
+///     let x = DerefExample { value: 'a' };
+///     assert_eq!('a', *x);
+/// }
+/// ```
 #[lang="deref"]
 pub trait Deref<Sized? Result> for Sized? {
     /// The method called to dereference a value
@@ -813,40 +759,37 @@ impl<'a, Sized? T> Deref<T> for &'a mut T {
     fn deref(&self) -> &T { *self }
 }
 
-/**
- *
- * The `DerefMut` trait is used to specify the functionality of dereferencing
- * mutably like `*v = 1;`
- *
- * # Example
- *
- * A struct with a single field which is modifiable via dereferencing the
- * struct.
- *
- * ```
- * struct DerefMutExample<T> {
- *     value: T
- * }
- *
- * impl<T> Deref<T> for DerefMutExample<T> {
- *     fn deref<'a>(&'a self) -> &'a T {
- *         &self.value
- *     }
- * }
- *
- * impl<T> DerefMut<T> for DerefMutExample<T> {
- *     fn deref_mut<'a>(&'a mut self) -> &'a mut T {
- *         &mut self.value
- *     }
- * }
- *
- * fn main() {
- *     let mut x = DerefMutExample { value: 'a' };
- *     *x = 'b';
- *     assert_eq!('b', *x);
- * }
- * ```
- */
+/// The `DerefMut` trait is used to specify the functionality of dereferencing
+/// mutably like `*v = 1;`
+///
+/// # Example
+///
+/// A struct with a single field which is modifiable via dereferencing the
+/// struct.
+///
+/// ```
+/// struct DerefMutExample<T> {
+///     value: T
+/// }
+///
+/// impl<T> Deref<T> for DerefMutExample<T> {
+///     fn deref<'a>(&'a self) -> &'a T {
+///         &self.value
+///     }
+/// }
+///
+/// impl<T> DerefMut<T> for DerefMutExample<T> {
+///     fn deref_mut<'a>(&'a mut self) -> &'a mut T {
+///         &mut self.value
+///     }
+/// }
+///
+/// fn main() {
+///     let mut x = DerefMutExample { value: 'a' };
+///     *x = 'b';
+///     assert_eq!('b', *x);
+/// }
+/// ```
 #[lang="deref_mut"]
 pub trait DerefMut<Sized? Result>: Deref<Result> {
     /// The method called to mutably dereference a value
diff --git a/src/libcore/slice.rs b/src/libcore/slice.rs
index 6625d19781a23..07b21e18253dc 100644
--- a/src/libcore/slice.rs
+++ b/src/libcore/slice.rs
@@ -1634,9 +1634,7 @@ impl BinarySearchResult {
 // Free functions
 //
 
-/**
- * Converts a pointer to A into a slice of length 1 (without copying).
- */
+/// Converts a pointer to A into a slice of length 1 (without copying).
 #[unstable = "waiting for DST"]
 pub fn ref_slice<'a, A>(s: &'a A) -> &'a [A] {
     unsafe {
@@ -1644,9 +1642,7 @@ pub fn ref_slice<'a, A>(s: &'a A) -> &'a [A] {
     }
 }
 
-/**
- * Converts a pointer to A into a slice of length 1 (without copying).
- */
+/// Converts a pointer to A into a slice of length 1 (without copying).
 #[unstable = "waiting for DST"]
 pub fn mut_ref_slice<'a, A>(s: &'a mut A) -> &'a mut [A] {
     unsafe {
@@ -1710,10 +1706,8 @@ pub mod raw {
     use raw::Slice;
     use option::{None, Option, Some};
 
-    /**
-     * Form a slice from a pointer and length (as a number of units,
-     * not bytes).
-     */
+    /// Form a slice from a pointer and length (as a number of units,
+    /// not bytes).
     #[inline]
     #[deprecated = "renamed to slice::from_raw_buf"]
     pub unsafe fn buf_as_slice<T,U>(p: *const T, len: uint, f: |v: &[T]| -> U)
@@ -1724,10 +1718,8 @@ pub mod raw {
         }))
     }
 
-    /**
-     * Form a slice from a pointer and length (as a number of units,
-     * not bytes).
-     */
+    /// Form a slice from a pointer and length (as a number of units,
+    /// not bytes).
     #[inline]
     #[deprecated = "renamed to slice::from_raw_mut_buf"]
     pub unsafe fn mut_buf_as_slice<T,
@@ -1742,12 +1734,10 @@ pub mod raw {
         }))
     }
 
-    /**
-     * Returns a pointer to first element in slice and adjusts
-     * slice so it no longer contains that element. Returns None
-     * if the slice is empty. O(1).
-     */
-     #[inline]
+    /// Returns a pointer to first element in slice and adjusts
+    /// slice so it no longer contains that element. Returns None
+    /// if the slice is empty. O(1).
+    #[inline]
     #[deprecated = "inspect `Slice::{data, len}` manually (increment data by 1)"]
     pub unsafe fn shift_ptr<T>(slice: &mut Slice<T>) -> Option<*const T> {
         if slice.len == 0 { return None; }
@@ -1757,11 +1747,9 @@ pub mod raw {
         Some(head)
     }
 
-    /**
-     * Returns a pointer to last element in slice and adjusts
-     * slice so it no longer contains that element. Returns None
-     * if the slice is empty. O(1).
-     */
+    /// Returns a pointer to last element in slice and adjusts
+    /// slice so it no longer contains that element. Returns None
+    /// if the slice is empty. O(1).
     #[inline]
     #[deprecated = "inspect `Slice::{data, len}` manually (decrement len by 1)"]
     pub unsafe fn pop_ptr<T>(slice: &mut Slice<T>) -> Option<*const T> {
diff --git a/src/liblibc/lib.rs b/src/liblibc/lib.rs
index 10610b7058406..55f483682b75e 100644
--- a/src/liblibc/lib.rs
+++ b/src/liblibc/lib.rs
@@ -329,20 +329,18 @@ pub mod types {
     // Standard types that are opaque or common, so are not per-target.
     pub mod common {
         pub mod c95 {
-            /**
-            Type used to construct void pointers for use with C.
-
-            This type is only useful as a pointer target. Do not use it as a
-            return type for FFI functions which have the `void` return type in
-            C. Use the unit type `()` or omit the return type instead.
-
-            For LLVM to recognize the void pointer type and by extension
-            functions like malloc(), we need to have it represented as i8* in
-            LLVM bitcode. The enum used here ensures this and prevents misuse
-            of the "raw" type by only having private variants.. We need two
-            variants, because the compiler complains about the repr attribute
-            otherwise.
-            */
+            /// Type used to construct void pointers for use with C.
+            ///
+            /// This type is only useful as a pointer target. Do not use it as a
+            /// return type for FFI functions which have the `void` return type in
+            /// C. Use the unit type `()` or omit the return type instead.
+            ///
+            /// For LLVM to recognize the void pointer type and by extension
+            /// functions like malloc(), we need to have it represented as i8* in
+            /// LLVM bitcode. The enum used here ensures this and prevents misuse
+            /// of the "raw" type by only having private variants.. We need two
+            /// variants, because the compiler complains about the repr attribute
+            /// otherwise.
             #[repr(u8)]
             pub enum c_void {
                 __variant1,
diff --git a/src/librustc/lint/context.rs b/src/librustc/lint/context.rs
index 1b2b044b49afe..c7bed838eb919 100644
--- a/src/librustc/lint/context.rs
+++ b/src/librustc/lint/context.rs
@@ -464,11 +464,9 @@ impl<'a, 'tcx> Context<'a, 'tcx> {
         self.lookup_and_emit(lint, Some(span), msg);
     }
 
-    /**
-     * Merge the lints specified by any lint attributes into the
-     * current lint context, call the provided function, then reset the
-     * lints in effect to their previous state.
-     */
+    /// Merge the lints specified by any lint attributes into the
+    /// current lint context, call the provided function, then reset the
+    /// lints in effect to their previous state.
     fn with_lint_attrs(&mut self,
                        attrs: &[ast::Attribute],
                        f: |&mut Context|) {
diff --git a/src/librustc/middle/fast_reject.rs b/src/librustc/middle/fast_reject.rs
index 7514a63c7fa58..593ec94903562 100644
--- a/src/librustc/middle/fast_reject.rs
+++ b/src/librustc/middle/fast_reject.rs
@@ -13,7 +13,7 @@ use syntax::ast;
 
 use self::SimplifiedType::*;
 
-/** See `simplify_type */
+/// See `simplify_type
 #[deriving(Clone, PartialEq, Eq, Hash)]
 pub enum SimplifiedType {
     BoolSimplifiedType,
diff --git a/src/librustc/middle/mem_categorization.rs b/src/librustc/middle/mem_categorization.rs
index 046ab162cfcb0..d0b2f5c1b6ec1 100644
--- a/src/librustc/middle/mem_categorization.rs
+++ b/src/librustc/middle/mem_categorization.rs
@@ -266,24 +266,22 @@ pub struct MemCategorizationContext<'t,TYPER:'t> {
 
 pub type McResult<T> = Result<T, ()>;
 
-/**
- * The `Typer` trait provides the interface for the mem-categorization
- * module to the results of the type check. It can be used to query
- * the type assigned to an expression node, to inquire after adjustments,
- * and so on.
- *
- * This interface is needed because mem-categorization is used from
- * two places: `regionck` and `borrowck`. `regionck` executes before
- * type inference is complete, and hence derives types and so on from
- * intermediate tables.  This also implies that type errors can occur,
- * and hence `node_ty()` and friends return a `Result` type -- any
- * error will propagate back up through the mem-categorization
- * routines.
- *
- * In the borrow checker, in contrast, type checking is complete and we
- * know that no errors have occurred, so we simply consult the tcx and we
- * can be sure that only `Ok` results will occur.
- */
+/// The `Typer` trait provides the interface for the mem-categorization
+/// module to the results of the type check. It can be used to query
+/// the type assigned to an expression node, to inquire after adjustments,
+/// and so on.
+///
+/// This interface is needed because mem-categorization is used from
+/// two places: `regionck` and `borrowck`. `regionck` executes before
+/// type inference is complete, and hence derives types and so on from
+/// intermediate tables.  This also implies that type errors can occur,
+/// and hence `node_ty()` and friends return a `Result` type -- any
+/// error will propagate back up through the mem-categorization
+/// routines.
+///
+/// In the borrow checker, in contrast, type checking is complete and we
+/// know that no errors have occurred, so we simply consult the tcx and we
+/// can be sure that only `Ok` results will occur.
 pub trait Typer<'tcx> {
     fn tcx<'a>(&'a self) -> &'a ty::ctxt<'tcx>;
     fn node_ty(&self, id: ast::NodeId) -> McResult<Ty<'tcx>>;
diff --git a/src/librustc/middle/region.rs b/src/librustc/middle/region.rs
index c5511f995bc68..3684f64ebe537 100644
--- a/src/librustc/middle/region.rs
+++ b/src/librustc/middle/region.rs
@@ -72,46 +72,44 @@ impl CodeExtent {
     }
 }
 
-/**
 The region maps encode information about region relationships.
 
-- `scope_map` maps from a scope id to the enclosing scope id; this is
-  usually corresponding to the lexical nesting, though in the case of
-  closures the parent scope is the innermost conditional expression or repeating
-  block
-
-- `var_map` maps from a variable or binding id to the block in which
-  that variable is declared.
-
-- `free_region_map` maps from a free region `a` to a list of free
-  regions `bs` such that `a <= b for all b in bs`
-  - the free region map is populated during type check as we check
-    each function. See the function `relate_free_regions` for
-    more information.
-
-- `rvalue_scopes` includes entries for those expressions whose cleanup
-  scope is larger than the default. The map goes from the expression
-  id to the cleanup scope id. For rvalues not present in this table,
-  the appropriate cleanup scope is the innermost enclosing statement,
-  conditional expression, or repeating block (see `terminating_scopes`).
-
-- `terminating_scopes` is a set containing the ids of each statement,
-  or conditional/repeating expression. These scopes are calling "terminating
-  scopes" because, when attempting to find the scope of a temporary, by
-  default we search up the enclosing scopes until we encounter the
-  terminating scope. A conditional/repeating
-  expression is one which is not guaranteed to execute exactly once
-  upon entering the parent scope. This could be because the expression
-  only executes conditionally, such as the expression `b` in `a && b`,
-  or because the expression may execute many times, such as a loop
-  body. The reason that we distinguish such expressions is that, upon
-  exiting the parent scope, we cannot statically know how many times
-  the expression executed, and thus if the expression creates
-  temporaries we cannot know statically how many such temporaries we
-  would have to cleanup. Therefore we ensure that the temporaries never
-  outlast the conditional/repeating expression, preventing the need
-  for dynamic checks and/or arbitrary amounts of stack space.
-*/
+/// - `scope_map` maps from a scope id to the enclosing scope id; this is
+///   usually corresponding to the lexical nesting, though in the case of
+///   closures the parent scope is the innermost conditional expression or repeating
+///   block
+///
+/// - `var_map` maps from a variable or binding id to the block in which
+///   that variable is declared.
+///
+/// - `free_region_map` maps from a free region `a` to a list of free
+///   regions `bs` such that `a <= b for all b in bs`
+///   - the free region map is populated during type check as we check
+///     each function. See the function `relate_free_regions` for
+///     more information.
+///
+/// - `rvalue_scopes` includes entries for those expressions whose cleanup
+///   scope is larger than the default. The map goes from the expression
+///   id to the cleanup scope id. For rvalues not present in this table,
+///   the appropriate cleanup scope is the innermost enclosing statement,
+///   conditional expression, or repeating block (see `terminating_scopes`).
+///
+/// - `terminating_scopes` is a set containing the ids of each statement,
+///   or conditional/repeating expression. These scopes are calling "terminating
+///   scopes" because, when attempting to find the scope of a temporary, by
+///   default we search up the enclosing scopes until we encounter the
+///   terminating scope. A conditional/repeating
+///   expression is one which is not guaranteed to execute exactly once
+///   upon entering the parent scope. This could be because the expression
+///   only executes conditionally, such as the expression `b` in `a && b`,
+///   or because the expression may execute many times, such as a loop
+///   body. The reason that we distinguish such expressions is that, upon
+///   exiting the parent scope, we cannot statically know how many times
+///   the expression executed, and thus if the expression creates
+///   temporaries we cannot know statically how many such temporaries we
+///   would have to cleanup. Therefore we ensure that the temporaries never
+///   outlast the conditional/repeating expression, preventing the need
+///   for dynamic checks and/or arbitrary amounts of stack space.
 pub struct RegionMaps {
     scope_map: RefCell<FnvHashMap<CodeExtent, CodeExtent>>,
     var_map: RefCell<NodeMap<CodeExtent>>,
diff --git a/src/librustc/middle/resolve.rs b/src/librustc/middle/resolve.rs
index 68a31c83ea484..3fcaa43c2433c 100644
--- a/src/librustc/middle/resolve.rs
+++ b/src/librustc/middle/resolve.rs
@@ -761,10 +761,8 @@ impl NameBindings {
         }
     }
 
-    /**
-     * Returns the module node. Panics if this node does not have a module
-     * definition.
-     */
+    /// Returns the module node. Panics if this node does not have a module
+    /// definition.
     fn get_module(&self) -> Rc<Module> {
         match self.get_module_if_available() {
             None => {
@@ -1098,18 +1096,16 @@ impl<'a> Resolver<'a> {
         visit::walk_crate(&mut visitor, krate);
     }
 
-    /**
-     * Adds a new child item to the module definition of the parent node and
-     * returns its corresponding name bindings as well as the current parent.
-     * Or, if we're inside a block, creates (or reuses) an anonymous module
-     * corresponding to the innermost block ID and returns the name bindings
-     * as well as the newly-created parent.
-     *
-     * # Panics
-     *
-     * Panics if this node does not have a module definition and we are not inside
-     * a block.
-     */
+    /// Adds a new child item to the module definition of the parent node and
+    /// returns its corresponding name bindings as well as the current parent.
+    /// Or, if we're inside a block, creates (or reuses) an anonymous module
+    /// corresponding to the innermost block ID and returns the name bindings
+    /// as well as the newly-created parent.
+    ///
+    /// # Panics
+    ///
+    /// Panics if this node does not have a module definition and we are not inside
+    /// a block.
     fn add_child(&self,
                  name: Name,
                  reduced_graph_parent: ReducedGraphParent,
diff --git a/src/librustc/middle/subst.rs b/src/librustc/middle/subst.rs
index b030867fc841c..e4bf5fec2ca95 100644
--- a/src/librustc/middle/subst.rs
+++ b/src/librustc/middle/subst.rs
@@ -24,22 +24,19 @@ use syntax::codemap::{Span, DUMMY_SP};
 
 ///////////////////////////////////////////////////////////////////////////
 
-/**
- * A substitution mapping type/region parameters to new values. We
- * identify each in-scope parameter by an *index* and a *parameter
- * space* (which indices where the parameter is defined; see
- * `ParamSpace`).
- */
+/// A substitution mapping type/region parameters to new values. We
+/// identify each in-scope parameter by an *index* and a *parameter
+/// space* (which indices where the parameter is defined; see
+/// `ParamSpace`).
 #[deriving(Clone, PartialEq, Eq, Hash, Show)]
 pub struct Substs<'tcx> {
     pub types: VecPerParamSpace<Ty<'tcx>>,
     pub regions: RegionSubsts,
 }
 
-/**
- * Represents the values to use when substituting lifetime parameters.
- * If the value is `ErasedRegions`, then this subst is occurring during
- * trans, and all region parameters will be replaced with `ty::ReStatic`. */
+/// Represents the values to use when substituting lifetime parameters.
+/// If the value is `ErasedRegions`, then this subst is occurring during
+/// trans, and all region parameters will be replaced with `ty::ReStatic`.
 #[deriving(Clone, PartialEq, Eq, Hash, Show)]
 pub enum RegionSubsts {
     ErasedRegions,
@@ -226,11 +223,9 @@ impl ParamSpace {
     }
 }
 
-/**
- * Vector of things sorted by param space. Used to keep
- * the set of things declared on the type, self, or method
- * distinct.
- */
+/// Vector of things sorted by param space. Used to keep
+/// the set of things declared on the type, self, or method
+/// distinct.
 #[deriving(PartialEq, Eq, Clone, Hash, Encodable, Decodable)]
 pub struct VecPerParamSpace<T> {
     // This was originally represented as a tuple with one Vec<T> for
@@ -250,10 +245,8 @@ pub struct VecPerParamSpace<T> {
     content: Vec<T>,
 }
 
-/**
- * The `split` function converts one `VecPerParamSpace` into this
- * `SeparateVecsPerParamSpace` structure.
- */
+/// The `split` function converts one `VecPerParamSpace` into this
+/// `SeparateVecsPerParamSpace` structure.
 pub struct SeparateVecsPerParamSpace<T> {
     pub types: Vec<T>,
     pub selfs: Vec<T>,
diff --git a/src/librustc/middle/traits/fulfill.rs b/src/librustc/middle/traits/fulfill.rs
index 62382ac386fcd..5b4ac509ccc01 100644
--- a/src/librustc/middle/traits/fulfill.rs
+++ b/src/librustc/middle/traits/fulfill.rs
@@ -19,18 +19,16 @@ use super::FulfillmentError;
 use super::CodeSelectionError;
 use super::select::SelectionContext;
 
-/**
- * The fulfillment context is used to drive trait resolution.  It
- * consists of a list of obligations that must be (eventually)
- * satisfied. The job is to track which are satisfied, which yielded
- * errors, and which are still pending. At any point, users can call
- * `select_where_possible`, and the fulfilment context will try to do
- * selection, retaining only those obligations that remain
- * ambiguous. This may be helpful in pushing type inference
- * along. Once all type inference constraints have been generated, the
- * method `select_all_or_error` can be used to report any remaining
- * ambiguous cases as errors.
- */
+/// The fulfillment context is used to drive trait resolution.  It
+/// consists of a list of obligations that must be (eventually)
+/// satisfied. The job is to track which are satisfied, which yielded
+/// errors, and which are still pending. At any point, users can call
+/// `select_where_possible`, and the fulfilment context will try to do
+/// selection, retaining only those obligations that remain
+/// ambiguous. This may be helpful in pushing type inference
+/// along. Once all type inference constraints have been generated, the
+/// method `select_all_or_error` can be used to report any remaining
+/// ambiguous cases as errors.
 pub struct FulfillmentContext<'tcx> {
     // A list of all obligations that have been registered with this
     // fulfillment context.
diff --git a/src/librustc/middle/traits/mod.rs b/src/librustc/middle/traits/mod.rs
index 0a47d64789038..9845a371e3478 100644
--- a/src/librustc/middle/traits/mod.rs
+++ b/src/librustc/middle/traits/mod.rs
@@ -42,14 +42,12 @@ mod fulfill;
 mod select;
 mod util;
 
-/**
- * An `Obligation` represents some trait reference (e.g. `int:Eq`) for
- * which the vtable must be found.  The process of finding a vtable is
- * called "resolving" the `Obligation`. This process consists of
- * either identifying an `impl` (e.g., `impl Eq for int`) that
- * provides the required vtable, or else finding a bound that is in
- * scope. The eventual result is usually a `Selection` (defined below).
- */
+/// An `Obligation` represents some trait reference (e.g. `int:Eq`) for
+/// which the vtable must be found.  The process of finding a vtable is
+/// called "resolving" the `Obligation`. This process consists of
+/// either identifying an `impl` (e.g., `impl Eq for int`) that
+/// provides the required vtable, or else finding a bound that is in
+/// scope. The eventual result is usually a `Selection` (defined below).
 #[deriving(Clone)]
 pub struct Obligation<'tcx> {
     pub cause: ObligationCause<'tcx>,
@@ -57,9 +55,7 @@ pub struct Obligation<'tcx> {
     pub trait_ref: Rc<ty::TraitRef<'tcx>>,
 }
 
-/**
- * Why did we incur this obligation? Used for error reporting.
- */
+/// Why did we incur this obligation? Used for error reporting.
 #[deriving(Clone)]
 pub struct ObligationCause<'tcx> {
     pub span: Span,
@@ -121,57 +117,53 @@ pub enum FulfillmentErrorCode<'tcx> {
     CodeAmbiguity,
 }
 
-/**
- * When performing resolution, it is typically the case that there
- * can be one of three outcomes:
- *
- * - `Ok(Some(r))`: success occurred with result `r`
- * - `Ok(None)`: could not definitely determine anything, usually due
- *   to inconclusive type inference.
- * - `Err(e)`: error `e` occurred
- */
+/// When performing resolution, it is typically the case that there
+/// can be one of three outcomes:
+///
+/// - `Ok(Some(r))`: success occurred with result `r`
+/// - `Ok(None)`: could not definitely determine anything, usually due
+///   to inconclusive type inference.
+/// - `Err(e)`: error `e` occurred
 pub type SelectionResult<'tcx, T> = Result<Option<T>, SelectionError<'tcx>>;
 
-/**
- * Given the successful resolution of an obligation, the `Vtable`
- * indicates where the vtable comes from. Note that while we call this
- * a "vtable", it does not necessarily indicate dynamic dispatch at
- * runtime. `Vtable` instances just tell the compiler where to find
- * methods, but in generic code those methods are typically statically
- * dispatched -- only when an object is constructed is a `Vtable`
- * instance reified into an actual vtable.
- *
- * For example, the vtable may be tied to a specific impl (case A),
- * or it may be relative to some bound that is in scope (case B).
- *
- *
- * ```
- * impl<T:Clone> Clone<T> for Option<T> { ... } // Impl_1
- * impl<T:Clone> Clone<T> for Box<T> { ... }    // Impl_2
- * impl Clone for int { ... }             // Impl_3
- *
- * fn foo<T:Clone>(concrete: Option<Box<int>>,
- *                 param: T,
- *                 mixed: Option<T>) {
- *
- *    // Case A: Vtable points at a specific impl. Only possible when
- *    // type is concretely known. If the impl itself has bounded
- *    // type parameters, Vtable will carry resolutions for those as well:
- *    concrete.clone(); // Vtable(Impl_1, [Vtable(Impl_2, [Vtable(Impl_3)])])
- *
- *    // Case B: Vtable must be provided by caller. This applies when
- *    // type is a type parameter.
- *    param.clone();    // VtableParam(Oblig_1)
- *
- *    // Case C: A mix of cases A and B.
- *    mixed.clone();    // Vtable(Impl_1, [VtableParam(Oblig_1)])
- * }
- * ```
- *
- * ### The type parameter `N`
- *
- * See explanation on `VtableImplData`.
- */
+/// Given the successful resolution of an obligation, the `Vtable`
+/// indicates where the vtable comes from. Note that while we call this
+/// a "vtable", it does not necessarily indicate dynamic dispatch at
+/// runtime. `Vtable` instances just tell the compiler where to find
+/// methods, but in generic code those methods are typically statically
+/// dispatched -- only when an object is constructed is a `Vtable`
+/// instance reified into an actual vtable.
+///
+/// For example, the vtable may be tied to a specific impl (case A),
+/// or it may be relative to some bound that is in scope (case B).
+///
+///
+/// ```
+/// impl<T:Clone> Clone<T> for Option<T> { ... } // Impl_1
+/// impl<T:Clone> Clone<T> for Box<T> { ... }    // Impl_2
+/// impl Clone for int { ... }             // Impl_3
+///
+/// fn foo<T:Clone>(concrete: Option<Box<int>>,
+///                 param: T,
+///                 mixed: Option<T>) {
+///
+///    // Case A: Vtable points at a specific impl. Only possible when
+///    // type is concretely known. If the impl itself has bounded
+///    // type parameters, Vtable will carry resolutions for those as well:
+///    concrete.clone(); // Vtable(Impl_1, [Vtable(Impl_2, [Vtable(Impl_3)])])
+///
+///    // Case B: Vtable must be provided by caller. This applies when
+///    // type is a type parameter.
+///    param.clone();    // VtableParam(Oblig_1)
+///
+///    // Case C: A mix of cases A and B.
+///    mixed.clone();    // Vtable(Impl_1, [VtableParam(Oblig_1)])
+/// }
+/// ```
+///
+/// ### The type parameter `N`
+///
+/// See explanation on `VtableImplData`.
 #[deriving(Show,Clone)]
 pub enum Vtable<'tcx, N> {
     /// Vtable identifying a particular impl.
@@ -191,18 +183,16 @@ pub enum Vtable<'tcx, N> {
     VtableBuiltin(VtableBuiltinData<N>),
 }
 
-/**
- * Identifies a particular impl in the source, along with a set of
- * substitutions from the impl's type/lifetime parameters. The
- * `nested` vector corresponds to the nested obligations attached to
- * the impl's type parameters.
- *
- * The type parameter `N` indicates the type used for "nested
- * obligations" that are required by the impl. During type check, this
- * is `Obligation`, as one might expect. During trans, however, this
- * is `()`, because trans only requires a shallow resolution of an
- * impl, and nested obligations are satisfied later.
- */
+/// Identifies a particular impl in the source, along with a set of
+/// substitutions from the impl's type/lifetime parameters. The
+/// `nested` vector corresponds to the nested obligations attached to
+/// the impl's type parameters.
+///
+/// The type parameter `N` indicates the type used for "nested
+/// obligations" that are required by the impl. During type check, this
+/// is `Obligation`, as one might expect. During trans, however, this
+/// is `()`, because trans only requires a shallow resolution of an
+/// impl, and nested obligations are satisfied later.
 #[deriving(Clone)]
 pub struct VtableImplData<'tcx, N> {
     pub impl_def_id: ast::DefId,
@@ -215,11 +205,9 @@ pub struct VtableBuiltinData<N> {
     pub nested: subst::VecPerParamSpace<N>
 }
 
-/**
- * A vtable provided as a parameter by the caller. For example, in a
- * function like `fn foo<T:Eq>(...)`, if the `eq()` method is invoked
- * on an instance of `T`, the vtable would be of type `VtableParam`.
- */
+/// A vtable provided as a parameter by the caller. For example, in a
+/// function like `fn foo<T:Eq>(...)`, if the `eq()` method is invoked
+/// on an instance of `T`, the vtable would be of type `VtableParam`.
 #[deriving(PartialEq,Eq,Clone)]
 pub struct VtableParamData<'tcx> {
     // In the above example, this would `Eq`
diff --git a/src/librustc/middle/traits/select.rs b/src/librustc/middle/traits/select.rs
index 9cb7023e1b59f..48a6f363ad8e6 100644
--- a/src/librustc/middle/traits/select.rs
+++ b/src/librustc/middle/traits/select.rs
@@ -102,32 +102,30 @@ pub enum MethodMatchedData {
     CoerciveMethodMatch(/* impl we matched */ ast::DefId)
 }
 
-/**
- * The selection process begins by considering all impls, where
- * clauses, and so forth that might resolve an obligation.  Sometimes
- * we'll be able to say definitively that (e.g.) an impl does not
- * apply to the obligation: perhaps it is defined for `uint` but the
- * obligation is for `int`. In that case, we drop the impl out of the
- * list.  But the other cases are considered *candidates*.
- *
- * Candidates can either be definitive or ambiguous. An ambiguous
- * candidate is one that might match or might not, depending on how
- * type variables wind up being resolved. This only occurs during inference.
- *
- * For selection to suceed, there must be exactly one non-ambiguous
- * candidate.  Usually, it is not possible to have more than one
- * definitive candidate, due to the coherence rules. However, there is
- * one case where it could occur: if there is a blanket impl for a
- * trait (that is, an impl applied to all T), and a type parameter
- * with a where clause. In that case, we can have a candidate from the
- * where clause and a second candidate from the impl. This is not a
- * problem because coherence guarantees us that the impl which would
- * be used to satisfy the where clause is the same one that we see
- * now. To resolve this issue, therefore, we ignore impls if we find a
- * matching where clause. Part of the reason for this is that where
- * clauses can give additional information (like, the types of output
- * parameters) that would have to be inferred from the impl.
- */
+/// The selection process begins by considering all impls, where
+/// clauses, and so forth that might resolve an obligation.  Sometimes
+/// we'll be able to say definitively that (e.g.) an impl does not
+/// apply to the obligation: perhaps it is defined for `uint` but the
+/// obligation is for `int`. In that case, we drop the impl out of the
+/// list.  But the other cases are considered *candidates*.
+///
+/// Candidates can either be definitive or ambiguous. An ambiguous
+/// candidate is one that might match or might not, depending on how
+/// type variables wind up being resolved. This only occurs during inference.
+///
+/// For selection to suceed, there must be exactly one non-ambiguous
+/// candidate.  Usually, it is not possible to have more than one
+/// definitive candidate, due to the coherence rules. However, there is
+/// one case where it could occur: if there is a blanket impl for a
+/// trait (that is, an impl applied to all T), and a type parameter
+/// with a where clause. In that case, we can have a candidate from the
+/// where clause and a second candidate from the impl. This is not a
+/// problem because coherence guarantees us that the impl which would
+/// be used to satisfy the where clause is the same one that we see
+/// now. To resolve this issue, therefore, we ignore impls if we find a
+/// matching where clause. Part of the reason for this is that where
+/// clauses can give additional information (like, the types of output
+/// parameters) that would have to be inferred from the impl.
 #[deriving(PartialEq,Eq,Show,Clone)]
 enum Candidate<'tcx> {
     BuiltinCandidate(ty::BuiltinBound),
diff --git a/src/librustc/middle/ty.rs b/src/librustc/middle/ty.rs
index 98b958749d55e..05b560a55aad2 100644
--- a/src/librustc/middle/ty.rs
+++ b/src/librustc/middle/ty.rs
@@ -743,18 +743,16 @@ impl<'tcx> FnOutput<'tcx> {
     }
 }
 
-/**
- * Signature of a function type, which I have arbitrarily
- * decided to use to refer to the input/output types.
- *
- * - `inputs` is the list of arguments and their modes.
- * - `output` is the return type.
- * - `variadic` indicates whether this is a varidic function. (only true for foreign fns)
- *
- * Note that a `FnSig` introduces a level of region binding, to
- * account for late-bound parameters that appear in the types of the
- * fn's arguments or the fn's return type.
- */
+/// Signature of a function type, which I have arbitrarily
+/// decided to use to refer to the input/output types.
+///
+/// - `inputs` is the list of arguments and their modes.
+/// - `output` is the return type.
+/// - `variadic` indicates whether this is a varidic function. (only true for foreign fns)
+///
+/// Note that a `FnSig` introduces a level of region binding, to
+/// account for late-bound parameters that appear in the types of the
+/// fn's arguments or the fn's return type.
 #[deriving(Clone, PartialEq, Eq, Hash)]
 pub struct FnSig<'tcx> {
     pub inputs: Vec<Ty<'tcx>>,
@@ -769,47 +767,45 @@ pub struct ParamTy {
     pub def_id: DefId
 }
 
-/**
- * A [De Bruijn index][dbi] is a standard means of representing
- * regions (and perhaps later types) in a higher-ranked setting. In
- * particular, imagine a type like this:
- *
- *     for<'a> fn(for<'b> fn(&'b int, &'a int), &'a char)
- *     ^          ^            |        |         |
- *     |          |            |        |         |
- *     |          +------------+ 1      |         |
- *     |                                |         |
- *     +--------------------------------+ 2       |
- *     |                                          |
- *     +------------------------------------------+ 1
- *
- * In this type, there are two binders (the outer fn and the inner
- * fn). We need to be able to determine, for any given region, which
- * fn type it is bound by, the inner or the outer one. There are
- * various ways you can do this, but a De Bruijn index is one of the
- * more convenient and has some nice properties. The basic idea is to
- * count the number of binders, inside out. Some examples should help
- * clarify what I mean.
- *
- * Let's start with the reference type `&'b int` that is the first
- * argument to the inner function. This region `'b` is assigned a De
- * Bruijn index of 1, meaning "the innermost binder" (in this case, a
- * fn). The region `'a` that appears in the second argument type (`&'a
- * int`) would then be assigned a De Bruijn index of 2, meaning "the
- * second-innermost binder". (These indices are written on the arrays
- * in the diagram).
- *
- * What is interesting is that De Bruijn index attached to a particular
- * variable will vary depending on where it appears. For example,
- * the final type `&'a char` also refers to the region `'a` declared on
- * the outermost fn. But this time, this reference is not nested within
- * any other binders (i.e., it is not an argument to the inner fn, but
- * rather the outer one). Therefore, in this case, it is assigned a
- * De Bruijn index of 1, because the innermost binder in that location
- * is the outer fn.
- *
- * [dbi]: http://en.wikipedia.org/wiki/De_Bruijn_index
- */
+/// A [De Bruijn index][dbi] is a standard means of representing
+/// regions (and perhaps later types) in a higher-ranked setting. In
+/// particular, imagine a type like this:
+///
+///     for<'a> fn(for<'b> fn(&'b int, &'a int), &'a char)
+///     ^          ^            |        |         |
+///     |          |            |        |         |
+///     |          +------------+ 1      |         |
+///     |                                |         |
+///     +--------------------------------+ 2       |
+///     |                                          |
+///     +------------------------------------------+ 1
+///
+/// In this type, there are two binders (the outer fn and the inner
+/// fn). We need to be able to determine, for any given region, which
+/// fn type it is bound by, the inner or the outer one. There are
+/// various ways you can do this, but a De Bruijn index is one of the
+/// more convenient and has some nice properties. The basic idea is to
+/// count the number of binders, inside out. Some examples should help
+/// clarify what I mean.
+///
+/// Let's start with the reference type `&'b int` that is the first
+/// argument to the inner function. This region `'b` is assigned a De
+/// Bruijn index of 1, meaning "the innermost binder" (in this case, a
+/// fn). The region `'a` that appears in the second argument type (`&'a
+/// int`) would then be assigned a De Bruijn index of 2, meaning "the
+/// second-innermost binder". (These indices are written on the arrays
+/// in the diagram).
+///
+/// What is interesting is that De Bruijn index attached to a particular
+/// variable will vary depending on where it appears. For example,
+/// the final type `&'a char` also refers to the region `'a` declared on
+/// the outermost fn. But this time, this reference is not nested within
+/// any other binders (i.e., it is not an argument to the inner fn, but
+/// rather the outer one). Therefore, in this case, it is assigned a
+/// De Bruijn index of 1, because the innermost binder in that location
+/// is the outer fn.
+///
+/// [dbi]: http://en.wikipedia.org/wiki/De_Bruijn_index
 #[deriving(Clone, PartialEq, Eq, Hash, Encodable, Decodable, Show)]
 pub struct DebruijnIndex {
     // We maintain the invariant that this is never 0. So 1 indicates
@@ -856,11 +852,9 @@ pub enum Region {
     ReEmpty,
 }
 
-/**
- * Upvars do not get their own node-id. Instead, we use the pair of
- * the original var id (that is, the root variable that is referenced
- * by the upvar) and the id of the closure expression.
- */
+/// Upvars do not get their own node-id. Instead, we use the pair of
+/// the original var id (that is, the root variable that is referenced
+/// by the upvar) and the id of the closure expression.
 #[deriving(Clone, PartialEq, Eq, Hash, Show)]
 pub struct UpvarId {
     pub var_id: ast::NodeId,
@@ -913,55 +907,53 @@ pub enum BorrowKind {
     MutBorrow
 }
 
-/**
- * Information describing the borrowing of an upvar. This is computed
- * during `typeck`, specifically by `regionck`. The general idea is
- * that the compiler analyses treat closures like:
- *
- *     let closure: &'e fn() = || {
- *        x = 1;   // upvar x is assigned to
- *        use(y);  // upvar y is read
- *        foo(&z); // upvar z is borrowed immutably
- *     };
- *
- * as if they were "desugared" to something loosely like:
- *
- *     struct Vars<'x,'y,'z> { x: &'x mut int,
- *                             y: &'y const int,
- *                             z: &'z int }
- *     let closure: &'e fn() = {
- *         fn f(env: &Vars) {
- *             *env.x = 1;
- *             use(*env.y);
- *             foo(env.z);
- *         }
- *         let env: &'e mut Vars<'x,'y,'z> = &mut Vars { x: &'x mut x,
- *                                                       y: &'y const y,
- *                                                       z: &'z z };
- *         (env, f)
- *     };
- *
- * This is basically what happens at runtime. The closure is basically
- * an existentially quantified version of the `(env, f)` pair.
- *
- * This data structure indicates the region and mutability of a single
- * one of the `x...z` borrows.
- *
- * It may not be obvious why each borrowed variable gets its own
- * lifetime (in the desugared version of the example, these are indicated
- * by the lifetime parameters `'x`, `'y`, and `'z` in the `Vars` definition).
- * Each such lifetime must encompass the lifetime `'e` of the closure itself,
- * but need not be identical to it. The reason that this makes sense:
- *
- * - Callers are only permitted to invoke the closure, and hence to
- *   use the pointers, within the lifetime `'e`, so clearly `'e` must
- *   be a sublifetime of `'x...'z`.
- * - The closure creator knows which upvars were borrowed by the closure
- *   and thus `x...z` will be reserved for `'x...'z` respectively.
- * - Through mutation, the borrowed upvars can actually escape
- *   the closure, so sometimes it is necessary for them to be larger
- *   than the closure lifetime itself.
- */
+/// Information describing the borrowing of an upvar. This is computed
+/// during `typeck`, specifically by `regionck`. The general idea is
+/// that the compiler analyses treat closures like:
+///
+///     let closure: &'e fn() = || {
+///        x = 1;   // upvar x is assigned to
+///        use(y);  // upvar y is read
+///        foo(&z); // upvar z is borrowed immutably
+///     };
+///
+/// as if they were "desugared" to something loosely like:
+///
+///     struct Vars<'x,'y,'z> { x: &'x mut int,
+///                             y: &'y const int,
+///                             z: &'z int }
+///     let closure: &'e fn() = {
+///         fn f(env: &Vars) {
+///             *env.x = 1;
+///             use(*env.y);
+///             foo(env.z);
+///         }
+///         let env: &'e mut Vars<'x,'y,'z> = &mut Vars { x: &'x mut x,
+///                                                       y: &'y const y,
+///                                                       z: &'z z };
+///         (env, f)
+///     };
+///
+/// This is basically what happens at runtime. The closure is basically
+/// an existentially quantified version of the `(env, f)` pair.
+///
+/// This data structure indicates the region and mutability of a single
+/// one of the `x...z` borrows.
+///
+/// It may not be obvious why each borrowed variable gets its own
+/// lifetime (in the desugared version of the example, these are indicated
+/// by the lifetime parameters `'x`, `'y`, and `'z` in the `Vars` definition).
+/// Each such lifetime must encompass the lifetime `'e` of the closure itself,
+/// but need not be identical to it. The reason that this makes sense:
+///
+/// - Callers are only permitted to invoke the closure, and hence to
+///   use the pointers, within the lifetime `'e`, so clearly `'e` must
+///   be a sublifetime of `'x...'z`.
+/// - The closure creator knows which upvars were borrowed by the closure
+///   and thus `x...z` will be reserved for `'x...'z` respectively.
+/// - Through mutation, the borrowed upvars can actually escape
+///   the closure, so sometimes it is necessary for them to be larger
+///   than the closure lifetime itself.
 #[deriving(PartialEq, Clone, Encodable, Decodable, Show)]
 pub struct UpvarBorrow {
     pub kind: BorrowKind,
@@ -1111,37 +1103,33 @@ pub struct TyTrait<'tcx> {
     pub bounds: ExistentialBounds
 }
 
-/**
- * A complete reference to a trait. These take numerous guises in syntax,
- * but perhaps the most recognizable form is in a where clause:
- *
- *     T : Foo<U>
- *
- * This would be represented by a trait-reference where the def-id is the
- * def-id for the trait `Foo` and the substs defines `T` as parameter 0 in the
- * `SelfSpace` and `U` as parameter 0 in the `TypeSpace`.
- *
- * Trait references also appear in object types like `Foo<U>`, but in
- * that case the `Self` parameter is absent from the substitutions.
- *
- * Note that a `TraitRef` introduces a level of region binding, to
- * account for higher-ranked trait bounds like `T : for<'a> Foo<&'a
- * U>` or higher-ranked object types.
- */
+/// A complete reference to a trait. These take numerous guises in syntax,
+/// but perhaps the most recognizable form is in a where clause:
+///
+///     T : Foo<U>
+///
+/// This would be represented by a trait-reference where the def-id is the
+/// def-id for the trait `Foo` and the substs defines `T` as parameter 0 in the
+/// `SelfSpace` and `U` as parameter 0 in the `TypeSpace`.
+///
+/// Trait references also appear in object types like `Foo<U>`, but in
+/// that case the `Self` parameter is absent from the substitutions.
+///
+/// Note that a `TraitRef` introduces a level of region binding, to
+/// account for higher-ranked trait bounds like `T : for<'a> Foo<&'a
+/// U>` or higher-ranked object types.
 #[deriving(Clone, PartialEq, Eq, Hash, Show)]
 pub struct TraitRef<'tcx> {
     pub def_id: DefId,
     pub substs: Substs<'tcx>,
 }
 
-/**
- * Binder serves as a synthetic binder for lifetimes. It is used when
- * we wish to replace the escaping higher-ranked lifetimes in a type
- * or something else that is not itself a binder (this is because the
- * `replace_late_bound_regions` function replaces all lifetimes bound
- * by the binder supplied to it; but a type is not a binder, so you
- * must introduce an artificial one).
- */
+/// Binder serves as a synthetic binder for lifetimes. It is used when
+/// we wish to replace the escaping higher-ranked lifetimes in a type
+/// or something else that is not itself a binder (this is because the
+/// `replace_late_bound_regions` function replaces all lifetimes bound
+/// by the binder supplied to it; but a type is not a binder, so you
+/// must introduce an artificial one).
 #[deriving(Clone, PartialEq, Eq, Hash, Show)]
 pub struct Binder<T> {
     pub value: T
@@ -1425,27 +1413,25 @@ impl<'tcx> Generics<'tcx> {
     }
 }
 
-/**
- * Represents the bounds declared on a particular set of type
- * parameters.  Should eventually be generalized into a flag list of
- * where clauses.  You can obtain a `GenericBounds` list from a
- * `Generics` by using the `to_bounds` method. Note that this method
- * reflects an important semantic invariant of `GenericBounds`: while
- * the bounds in a `Generics` are expressed in terms of the bound type
- * parameters of the impl/trait/whatever, a `GenericBounds` instance
- * represented a set of bounds for some particular instantiation,
- * meaning that the generic parameters have been substituted with
- * their values.
- *
- * Example:
- *
- *     struct Foo<T,U:Bar<T>> { ... }
- *
- * Here, the `Generics` for `Foo` would contain a list of bounds like
- * `[[], [U:Bar<T>]]`.  Now if there were some particular reference
- * like `Foo<int,uint>`, then the `GenericBounds` would be `[[],
- * [uint:Bar<int>]]`.
- */
+/// Represents the bounds declared on a particular set of type
+/// parameters.  Should eventually be generalized into a flag list of
+/// where clauses.  You can obtain a `GenericBounds` list from a
+/// `Generics` by using the `to_bounds` method. Note that this method
+/// reflects an important semantic invariant of `GenericBounds`: while
+/// the bounds in a `Generics` are expressed in terms of the bound type
+/// parameters of the impl/trait/whatever, a `GenericBounds` instance
+/// represented a set of bounds for some particular instantiation,
+/// meaning that the generic parameters have been substituted with
+/// their values.
+///
+/// Example:
+///
+///     struct Foo<T,U:Bar<T>> { ... }
+///
+/// Here, the `Generics` for `Foo` would contain a list of bounds like
+/// `[[], [U:Bar<T>]]`.  Now if there were some particular reference
+/// like `Foo<int,uint>`, then the `GenericBounds` would be `[[],
+/// [uint:Bar<int>]]`.
 #[deriving(Clone, Show)]
 pub struct GenericBounds<'tcx> {
     pub types: VecPerParamSpace<ParamBounds<'tcx>>,
@@ -2455,18 +2441,16 @@ pub fn type_needs_unwind_cleanup<'tcx>(cx: &ctxt<'tcx>, ty: Ty<'tcx>) -> bool {
     }
 }
 
-/**
- * Type contents is how the type checker reasons about kinds.
- * They track what kinds of things are found within a type.  You can
- * think of them as kind of an "anti-kind".  They track the kinds of values
- * and thinks that are contained in types.  Having a larger contents for
- * a type tends to rule that type *out* from various kinds.  For example,
- * a type that contains a reference is not sendable.
- *
- * The reason we compute type contents and not kinds is that it is
- * easier for me (nmatsakis) to think about what is contained within
- * a type than to think about what is *not* contained within a type.
- */
+/// Type contents is how the type checker reasons about kinds.
+/// They track what kinds of things are found within a type.  You can
+/// think of them as kind of an "anti-kind".  They track the kinds of values
+/// and thinks that are contained in types.  Having a larger contents for
+/// a type tends to rule that type *out* from various kinds.  For example,
+/// a type that contains a reference is not sendable.
+///
+/// The reason we compute type contents and not kinds is that it is
+/// easier for me (nmatsakis) to think about what is contained within
+/// a type than to think about what is *not* contained within a type.
 #[deriving(Clone)]
 pub struct TypeContents {
     pub bits: u64
diff --git a/src/librustc/middle/ty_fold.rs b/src/librustc/middle/ty_fold.rs
index 913919fe774f3..8ba23c3393df2 100644
--- a/src/librustc/middle/ty_fold.rs
+++ b/src/librustc/middle/ty_fold.rs
@@ -701,9 +701,7 @@ pub fn super_fold_obligation<'tcx, T:TypeFolder<'tcx>>(this: &mut T,
 ///////////////////////////////////////////////////////////////////////////
 // Higher-ranked things
 
-/**
- * Designates a "binder" for late-bound regions.
- */
+/// Designates a "binder" for late-bound regions.
 pub trait HigherRankedFoldable<'tcx>: Repr<'tcx> {
     /// Folds the contents of `self`, ignoring the region binder created
     /// by `self`.
diff --git a/src/librustc/middle/typeck/check/mod.rs b/src/librustc/middle/typeck/check/mod.rs
index d38c5bc0ca9cf..facde399b3ffb 100644
--- a/src/librustc/middle/typeck/check/mod.rs
+++ b/src/librustc/middle/typeck/check/mod.rs
@@ -897,19 +897,17 @@ fn check_impl_items_against_trait<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>,
     }
 }
 
-/**
- * Checks that a method from an impl conforms to the signature of
- * the same method as declared in the trait.
- *
- * # Parameters
- *
- * - impl_generics: the generics declared on the impl itself (not the method!)
- * - impl_m: type of the method we are checking
- * - impl_m_span: span to use for reporting errors
- * - impl_m_body_id: id of the method body
- * - trait_m: the method in the trait
- * - trait_to_impl_substs: the substitutions used on the type of the trait
- */
+/// Checks that a method from an impl conforms to the signature of
+/// the same method as declared in the trait.
+///
+/// # Parameters
+///
+/// - impl_generics: the generics declared on the impl itself (not the method!)
+/// - impl_m: type of the method we are checking
+/// - impl_m_span: span to use for reporting errors
+/// - impl_m_body_id: id of the method body
+/// - trait_m: the method in the trait
+/// - trait_to_impl_substs: the substitutions used on the type of the trait
 fn compare_impl_method<'tcx>(tcx: &ty::ctxt<'tcx>,
                              impl_m: &ty::Method<'tcx>,
                              impl_m_span: Span,
diff --git a/src/librustc/middle/typeck/infer/region_inference/mod.rs b/src/librustc/middle/typeck/infer/region_inference/mod.rs
index 6a447d467cfce..d68ddb30baa97 100644
--- a/src/librustc/middle/typeck/infer/region_inference/mod.rs
+++ b/src/librustc/middle/typeck/infer/region_inference/mod.rs
@@ -694,13 +694,11 @@ impl<'a, 'tcx> RegionVarBindings<'a, 'tcx> {
         }
     }
 
-    /**
-    This function performs the actual region resolution.  It must be
-    called after all constraints have been added.  It performs a
-    fixed-point iteration to find region values which satisfy all
-    constraints, assuming such values can be found; if they cannot,
-    errors are reported.
-    */
+    /// This function performs the actual region resolution.  It must be
+    /// called after all constraints have been added.  It performs a
+    /// fixed-point iteration to find region values which satisfy all
+    /// constraints, assuming such values can be found; if they cannot,
+    /// errors are reported.
     pub fn resolve_regions(&self) -> Vec<RegionResolutionError<'tcx>> {
         debug!("RegionVarBindings: resolve_regions()");
         let mut errors = vec!();
diff --git a/src/librustc/middle/typeck/infer/unify.rs b/src/librustc/middle/typeck/infer/unify.rs
index fcf042b3f8b80..8d3142902be46 100644
--- a/src/librustc/middle/typeck/infer/unify.rs
+++ b/src/librustc/middle/typeck/infer/unify.rs
@@ -22,85 +22,68 @@ use syntax::ast;
 use util::ppaux::Repr;
 use util::snapshot_vec as sv;
 
-/**
- * This trait is implemented by any type that can serve as a type
- * variable. We call such variables *unification keys*. For example,
- * this trait is implemented by `IntVid`, which represents integral
- * variables.
- *
- * Each key type has an associated value type `V`. For example, for
- * `IntVid`, this is `Option<IntVarValue>`, representing some
- * (possibly not yet known) sort of integer.
- *
- * Implementations of this trait are at the end of this file.
- */
+/// This trait is implemented by any type that can serve as a type
+/// variable. We call such variables *unification keys*. For example,
+/// this trait is implemented by `IntVid`, which represents integral
+/// variables.
+///
+/// Each key type has an associated value type `V`. For example, for
+/// `IntVid`, this is `Option<IntVarValue>`, representing some
+/// (possibly not yet known) sort of integer.
+///
+/// Implementations of this trait are at the end of this file.
 pub trait UnifyKey<'tcx, V> : Clone + Show + PartialEq + Repr<'tcx> {
     fn index(&self) -> uint;
 
     fn from_index(u: uint) -> Self;
 
-    /**
-     * Given an inference context, returns the unification table
-     * appropriate to this key type.
-     */
+    // Given an inference context, returns the unification table
+    // appropriate to this key type.
     fn unification_table<'v>(infcx: &'v InferCtxt)
                              -> &'v RefCell<UnificationTable<Self,V>>;
 
     fn tag(k: Option<Self>) -> &'static str;
 }
 
-/**
- * Trait for valid types that a type variable can be set to. Note that
- * this is typically not the end type that the value will take on, but
- * rather an `Option` wrapper (where `None` represents a variable
- * whose value is not yet set).
- *
- * Implementations of this trait are at the end of this file.
- */
+/// Trait for valid types that a type variable can be set to. Note that
+/// this is typically not the end type that the value will take on, but
+/// rather an `Option` wrapper (where `None` represents a variable
+/// whose value is not yet set).
+///
+/// Implementations of this trait are at the end of this file.
 pub trait UnifyValue<'tcx> : Clone + Repr<'tcx> + PartialEq {
 }
 
-/**
- * Value of a unification key. We implement Tarjan's union-find
- * algorithm: when two keys are unified, one of them is converted
- * into a "redirect" pointing at the other. These redirects form a
- * DAG: the roots of the DAG (nodes that are not redirected) are each
- * associated with a value of type `V` and a rank. The rank is used
- * to keep the DAG relatively balanced, which helps keep the running
- * time of the algorithm under control. For more information, see
- * <http://en.wikipedia.org/wiki/Disjoint-set_data_structure>.
- */
+/// Value of a unification key. We implement Tarjan's union-find
+/// algorithm: when two keys are unified, one of them is converted
+/// into a "redirect" pointing at the other. These redirects form a
+/// DAG: the roots of the DAG (nodes that are not redirected) are each
+/// associated with a value of type `V` and a rank. The rank is used
+/// to keep the DAG relatively balanced, which helps keep the running
+/// time of the algorithm under control. For more information, see
+/// <http://en.wikipedia.org/wiki/Disjoint-set_data_structure>.
 #[deriving(PartialEq,Clone)]
 pub enum VarValue<K,V> {
     Redirect(K),
     Root(V, uint),
 }
 
-/**
- * Table of unification keys and their values.
- */
+/// Table of unification keys and their values.
 pub struct UnificationTable<K,V> {
-    /**
-     * Indicates the current value of each key.
-     */
-
+    /// Indicates the current value of each key.
     values: sv::SnapshotVec<VarValue<K,V>,(),Delegate>,
 }
 
-/**
- * At any time, users may snapshot a unification table.  The changes
- * made during the snapshot may either be *committed* or *rolled back*.
- */
+/// At any time, users may snapshot a unification table.  The changes
+/// made during the snapshot may either be *committed* or *rolled back*.
 pub struct Snapshot<K> {
     // Link snapshot to the key type `K` of the table.
     marker: marker::CovariantType<K>,
     snapshot: sv::Snapshot,
 }
 
-/**
- * Internal type used to represent the result of a `get()` operation.
- * Conveys the current root and value of the key.
- */
+/// Internal type used to represent the result of a `get()` operation.
+/// Conveys the current root and value of the key.
 pub struct Node<K,V> {
     pub key: K,
     pub value: V,
@@ -121,28 +104,22 @@ impl<'tcx, V:PartialEq+Clone+Repr<'tcx>, K:UnifyKey<'tcx, V>> UnificationTable<K
         }
     }
 
-    /**
-     * Starts a new snapshot. Each snapshot must be either
-     * rolled back or committed in a "LIFO" (stack) order.
-     */
+    /// Starts a new snapshot. Each snapshot must be either
+    /// rolled back or committed in a "LIFO" (stack) order.
     pub fn snapshot(&mut self) -> Snapshot<K> {
         Snapshot { marker: marker::CovariantType::<K>,
                    snapshot: self.values.start_snapshot() }
     }
 
-    /**
-     * Reverses all changes since the last snapshot. Also
-     * removes any keys that have been created since then.
-     */
+    /// Reverses all changes since the last snapshot. Also
+    /// removes any keys that have been created since then.
     pub fn rollback_to(&mut self, snapshot: Snapshot<K>) {
         debug!("{}: rollback_to()", UnifyKey::tag(None::<K>));
         self.values.rollback_to(snapshot.snapshot);
     }
 
-    /**
-     * Commits all changes since the last snapshot. Of course, they
-     * can still be undone if there is a snapshot further out.
-     */
+    /// Commits all changes since the last snapshot. Of course, they
+    /// can still be undone if there is a snapshot further out.
     pub fn commit(&mut self, snapshot: Snapshot<K>) {
         debug!("{}: commit()", UnifyKey::tag(None::<K>));
         self.values.commit(snapshot.snapshot);
@@ -255,10 +232,8 @@ impl<K,V> sv::SnapshotVecDelegate<VarValue<K,V>,()> for Delegate {
 // Code to handle simple keys like ints, floats---anything that
 // doesn't have a subtyping relationship we need to worry about.
 
-/**
- * Indicates a type that does not have any kind of subtyping
- * relationship.
- */
+/// Indicates a type that does not have any kind of subtyping
+/// relationship.
 pub trait SimplyUnifiable<'tcx> : Clone + PartialEq + Repr<'tcx> {
     fn to_type(&self) -> Ty<'tcx>;
     fn to_type_err(expected_found<Self>) -> ty::type_err<'tcx>;
diff --git a/src/librustc/middle/typeck/mod.rs b/src/librustc/middle/typeck/mod.rs
index ad64537e1533e..501dfcb2e2d9e 100644
--- a/src/librustc/middle/typeck/mod.rs
+++ b/src/librustc/middle/typeck/mod.rs
@@ -150,20 +150,18 @@ pub struct MethodCallee<'tcx> {
     pub substs: subst::Substs<'tcx>
 }
 
-/**
- * With method calls, we store some extra information in
- * side tables (i.e method_map). We use
- * MethodCall as a key to index into these tables instead of
- * just directly using the expression's NodeId. The reason
- * for this being that we may apply adjustments (coercions)
- * with the resulting expression also needing to use the
- * side tables. The problem with this is that we don't
- * assign a separate NodeId to this new expression
- * and so it would clash with the base expression if both
- * needed to add to the side tables. Thus to disambiguate
- * we also keep track of whether there's an adjustment in
- * our key.
- */
+/// With method calls, we store some extra information in
+/// side tables (i.e method_map). We use
+/// MethodCall as a key to index into these tables instead of
+/// just directly using the expression's NodeId. The reason
+/// for this being that we may apply adjustments (coercions)
+/// with the resulting expression also needing to use the
+/// side tables. The problem with this is that we don't
+/// assign a separate NodeId to this new expression
+/// and so it would clash with the base expression if both
+/// needed to add to the side tables. Thus to disambiguate
+/// we also keep track of whether there's an adjustment in
+/// our key.
 #[deriving(Clone, PartialEq, Eq, Hash, Show)]
 pub struct MethodCall {
     pub expr_id: ast::NodeId,
diff --git a/src/librustc/middle/typeck/variance.rs b/src/librustc/middle/typeck/variance.rs
index 51b610dccce38..745e6332918ad 100644
--- a/src/librustc/middle/typeck/variance.rs
+++ b/src/librustc/middle/typeck/variance.rs
@@ -219,18 +219,16 @@ pub fn infer_variance(tcx: &ty::ctxt) {
     tcx.variance_computed.set(true);
 }
 
-/**************************************************************************
- * Representing terms
- *
- * Terms are structured as a straightforward tree. Rather than rely on
- * GC, we allocate terms out of a bounded arena (the lifetime of this
- * arena is the lifetime 'a that is threaded around).
- *
- * We assign a unique index to each type/region parameter whose variance
- * is to be inferred. We refer to such variables as "inferreds". An
- * `InferredIndex` is a newtype'd int representing the index of such
- * a variable.
- */
+// Representing terms
+//
+// Terms are structured as a straightforward tree. Rather than rely on
+// GC, we allocate terms out of a bounded arena (the lifetime of this
+// arena is the lifetime 'a that is threaded around).
+//
+// We assign a unique index to each type/region parameter whose variance
+// is to be inferred. We refer to such variables as "inferreds". An
+// `InferredIndex` is a newtype'd int representing the index of such
+// a variable.
 
 type VarianceTermPtr<'a> = &'a VarianceTerm<'a>;
 
@@ -253,9 +251,7 @@ impl<'a> fmt::Show for VarianceTerm<'a> {
     }
 }
 
-/**************************************************************************
- * The first pass over the crate simply builds up the set of inferreds.
- */
+// The first pass over the crate simply builds up the set of inferreds.
 
 struct TermsContext<'a, 'tcx: 'a> {
     tcx: &'a ty::ctxt<'tcx>,
@@ -399,12 +395,10 @@ impl<'a, 'tcx, 'v> Visitor<'v> for TermsContext<'a, 'tcx> {
     }
 }
 
-/**************************************************************************
- * Constraint construction and representation
- *
- * The second pass over the AST determines the set of constraints.
- * We walk the set of items and, for each member, generate new constraints.
- */
+// Constraint construction and representation
+//
+// The second pass over the AST determines the set of constraints.
+// We walk the set of items and, for each member, generate new constraints.
 
 struct ConstraintContext<'a, 'tcx: 'a> {
     terms_cx: TermsContext<'a, 'tcx>,
@@ -944,14 +938,12 @@ impl<'a, 'tcx> ConstraintContext<'a, 'tcx> {
     }
 }
 
-/**************************************************************************
- * Constraint solving
- *
- * The final phase iterates over the constraints, refining the variance
- * for each inferred until a fixed point is reached. This will be the
- * optimal solution to the constraints. The final variance for each
- * inferred is then written into the `variance_map` in the tcx.
- */
+// Constraint solving
+//
+// The final phase iterates over the constraints, refining the variance
+// for each inferred until a fixed point is reached. This will be the
+// optimal solution to the constraints. The final variance for each
+// inferred is then written into the `variance_map` in the tcx.
 
 struct SolveContext<'a, 'tcx: 'a> {
     terms_cx: TermsContext<'a, 'tcx>,
@@ -1086,9 +1078,7 @@ impl<'a, 'tcx> SolveContext<'a, 'tcx> {
     }
 }
 
-/**************************************************************************
- * Miscellany transformations on variance
- */
+// Miscellany transformations on variance
 
 trait Xform {
     fn xform(self, v: Self) -> Self;
diff --git a/src/librustc/util/snapshot_vec.rs b/src/librustc/util/snapshot_vec.rs
index 91e67bbacc30f..f6c2694dd2321 100644
--- a/src/librustc/util/snapshot_vec.rs
+++ b/src/librustc/util/snapshot_vec.rs
@@ -177,10 +177,8 @@ impl<T,U,D:SnapshotVecDelegate<T,U>> SnapshotVec<T,U,D> {
         assert!(self.undo_log.len() == snapshot.length);
     }
 
-    /**
-     * Commits all changes since the last snapshot. Of course, they
-     * can still be undone if there is a snapshot further out.
-     */
+    /// Commits all changes since the last snapshot. Of course, they
+    /// can still be undone if there is a snapshot further out.
     pub fn commit(&mut self, snapshot: Snapshot) {
         debug!("commit({})", snapshot.length);
 
diff --git a/src/librustc_llvm/lib.rs b/src/librustc_llvm/lib.rs
index d67d0fa59ae28..23726664e3c6b 100644
--- a/src/librustc_llvm/lib.rs
+++ b/src/librustc_llvm/lib.rs
@@ -520,24 +520,24 @@ extern {
     pub fn LLVMGetModuleContext(M: ModuleRef) -> ContextRef;
     pub fn LLVMDisposeModule(M: ModuleRef);
 
-    /** Data layout. See Module::getDataLayout. */
+    /// Data layout. See Module::getDataLayout.
     pub fn LLVMGetDataLayout(M: ModuleRef) -> *const c_char;
     pub fn LLVMSetDataLayout(M: ModuleRef, Triple: *const c_char);
 
-    /** Target triple. See Module::getTargetTriple. */
+    /// Target triple. See Module::getTargetTriple.
     pub fn LLVMGetTarget(M: ModuleRef) -> *const c_char;
     pub fn LLVMSetTarget(M: ModuleRef, Triple: *const c_char);
 
-    /** See Module::dump. */
+    /// See Module::dump.
     pub fn LLVMDumpModule(M: ModuleRef);
 
-    /** See Module::setModuleInlineAsm. */
+    /// See Module::setModuleInlineAsm.
     pub fn LLVMSetModuleInlineAsm(M: ModuleRef, Asm: *const c_char);
 
-    /** See llvm::LLVMTypeKind::getTypeID. */
+    /// See llvm::LLVMTypeKind::getTypeID.
     pub fn LLVMGetTypeKind(Ty: TypeRef) -> TypeKind;
 
-    /** See llvm::LLVMType::getContext. */
+    /// See llvm::LLVMType::getContext.
     pub fn LLVMGetTypeContext(Ty: TypeRef) -> ContextRef;
 
     /* Operations on integer types */
@@ -1460,30 +1460,29 @@ extern {
     pub fn LLVMIsATerminatorInst(Inst: ValueRef) -> ValueRef;
     pub fn LLVMIsAStoreInst(Inst: ValueRef) -> ValueRef;
 
-    /** Writes a module to the specified path. Returns 0 on success. */
+    /// Writes a module to the specified path. Returns 0 on success.
     pub fn LLVMWriteBitcodeToFile(M: ModuleRef, Path: *const c_char) -> c_int;
 
-    /** Creates target data from a target layout string. */
+    /// Creates target data from a target layout string.
     pub fn LLVMCreateTargetData(StringRep: *const c_char) -> TargetDataRef;
     /// Adds the target data to the given pass manager. The pass manager
     /// references the target data only weakly.
     pub fn LLVMAddTargetData(TD: TargetDataRef, PM: PassManagerRef);
-    /** Number of bytes clobbered when doing a Store to *T. */
+    /// Number of bytes clobbered when doing a Store to *T.
     pub fn LLVMStoreSizeOfType(TD: TargetDataRef, Ty: TypeRef)
                                -> c_ulonglong;
 
-    /** Number of bytes clobbered when doing a Store to *T. */
+    /// Number of bytes clobbered when doing a Store to *T.
     pub fn LLVMSizeOfTypeInBits(TD: TargetDataRef, Ty: TypeRef)
                                 -> c_ulonglong;
 
-    /** Distance between successive elements in an array of T.
-    Includes ABI padding. */
+    /// Distance between successive elements in an array of T. Includes ABI padding.
     pub fn LLVMABISizeOfType(TD: TargetDataRef, Ty: TypeRef) -> c_ulonglong;
 
-    /** Returns the preferred alignment of a type. */
+    /// Returns the preferred alignment of a type.
     pub fn LLVMPreferredAlignmentOfType(TD: TargetDataRef, Ty: TypeRef)
                                         -> c_uint;
-    /** Returns the minimum alignment of a type. */
+    /// Returns the minimum alignment of a type.
     pub fn LLVMABIAlignmentOfType(TD: TargetDataRef, Ty: TypeRef)
                                   -> c_uint;
 
@@ -1494,41 +1493,39 @@ extern {
                                Element: c_uint)
                                -> c_ulonglong;
 
-    /**
-     * Returns the minimum alignment of a type when part of a call frame.
-     */
+    /// Returns the minimum alignment of a type when part of a call frame.
     pub fn LLVMCallFrameAlignmentOfType(TD: TargetDataRef, Ty: TypeRef)
                                         -> c_uint;
 
-    /** Disposes target data. */
+    /// Disposes target data.
     pub fn LLVMDisposeTargetData(TD: TargetDataRef);
 
-    /** Creates a pass manager. */
+    /// Creates a pass manager.
     pub fn LLVMCreatePassManager() -> PassManagerRef;
 
-    /** Creates a function-by-function pass manager */
+    /// Creates a function-by-function pass manager
     pub fn LLVMCreateFunctionPassManagerForModule(M: ModuleRef)
                                                   -> PassManagerRef;
 
-    /** Disposes a pass manager. */
+    /// Disposes a pass manager.
     pub fn LLVMDisposePassManager(PM: PassManagerRef);
 
-    /** Runs a pass manager on a module. */
+    /// Runs a pass manager on a module.
     pub fn LLVMRunPassManager(PM: PassManagerRef, M: ModuleRef) -> Bool;
 
-    /** Runs the function passes on the provided function. */
+    /// Runs the function passes on the provided function.
     pub fn LLVMRunFunctionPassManager(FPM: PassManagerRef, F: ValueRef)
                                       -> Bool;
 
-    /** Initializes all the function passes scheduled in the manager */
+    /// Initializes all the function passes scheduled in the manager
     pub fn LLVMInitializeFunctionPassManager(FPM: PassManagerRef) -> Bool;
 
-    /** Finalizes all the function passes scheduled in the manager */
+    /// Finalizes all the function passes scheduled in the manager
     pub fn LLVMFinalizeFunctionPassManager(FPM: PassManagerRef) -> Bool;
 
     pub fn LLVMInitializePasses();
 
-    /** Adds a verification pass. */
+    /// Adds a verification pass.
     pub fn LLVMAddVerifierPass(PM: PassManagerRef);
 
     pub fn LLVMAddGlobalOptimizerPass(PM: PassManagerRef);
@@ -1598,38 +1595,38 @@ extern {
         Internalize: Bool,
         RunInliner: Bool);
 
-    /** Destroys a memory buffer. */
+    /// Destroys a memory buffer.
     pub fn LLVMDisposeMemoryBuffer(MemBuf: MemoryBufferRef);
 
 
     /* Stuff that's in rustllvm/ because it's not upstream yet. */
 
-    /** Opens an object file. */
+    /// Opens an object file.
     pub fn LLVMCreateObjectFile(MemBuf: MemoryBufferRef) -> ObjectFileRef;
-    /** Closes an object file. */
+    /// Closes an object file.
     pub fn LLVMDisposeObjectFile(ObjFile: ObjectFileRef);
 
-    /** Enumerates the sections in an object file. */
+    /// Enumerates the sections in an object file.
     pub fn LLVMGetSections(ObjFile: ObjectFileRef) -> SectionIteratorRef;
-    /** Destroys a section iterator. */
+    /// Destroys a section iterator.
     pub fn LLVMDisposeSectionIterator(SI: SectionIteratorRef);
-    /** Returns true if the section iterator is at the end of the section
-    list: */
+    /// Returns true if the section iterator is at the end of the section
+    /// list:
     pub fn LLVMIsSectionIteratorAtEnd(ObjFile: ObjectFileRef,
                                       SI: SectionIteratorRef)
                                       -> Bool;
-    /** Moves the section iterator to point to the next section. */
+    /// Moves the section iterator to point to the next section.
     pub fn LLVMMoveToNextSection(SI: SectionIteratorRef);
-    /** Returns the current section size. */
+    /// Returns the current section size.
     pub fn LLVMGetSectionSize(SI: SectionIteratorRef) -> c_ulonglong;
-    /** Returns the current section contents as a string buffer. */
+    /// Returns the current section contents as a string buffer.
     pub fn LLVMGetSectionContents(SI: SectionIteratorRef) -> *const c_char;
 
-    /** Reads the given file and returns it as a memory buffer. Use
-    LLVMDisposeMemoryBuffer() to get rid of it. */
+    /// Reads the given file and returns it as a memory buffer. Use
+    /// LLVMDisposeMemoryBuffer() to get rid of it.
     pub fn LLVMRustCreateMemoryBufferWithContentsOfFile(Path: *const c_char)
                                                         -> MemoryBufferRef;
-    /** Borrows the contents of the memory buffer (doesn't copy it) */
+    /// Borrows the contents of the memory buffer (doesn't copy it)
     pub fn LLVMCreateMemoryBufferWithMemoryRange(InputData: *const c_char,
                                                  InputDataLength: size_t,
                                                  BufferName: *const c_char,
@@ -1643,8 +1640,7 @@ extern {
     pub fn LLVMIsMultithreaded() -> Bool;
     pub fn LLVMStartMultithreaded() -> Bool;
 
-    /** Returns a string describing the last error caused by an LLVMRust*
-    call. */
+    /// Returns a string describing the last error caused by an LLVMRust* call.
     pub fn LLVMRustGetLastError() -> *const c_char;
 
     /// Print the pass timings since static dtors aren't picking them up.
@@ -1662,10 +1658,10 @@ extern {
                                 Count: c_uint)
                                 -> ValueRef;
 
-    /** Enables LLVM debug output. */
+    /// Enables LLVM debug output.
     pub fn LLVMSetDebug(Enabled: c_int);
 
-    /** Prepares inline assembly. */
+    /// Prepares inline assembly.
     pub fn LLVMInlineAsm(Ty: TypeRef,
                          AsmString: *const c_char,
                          Constraints: *const c_char,
diff --git a/src/librustc_trans/driver/driver.rs b/src/librustc_trans/driver/driver.rs
index b3b68d0c22b38..a5709a0219ba9 100644
--- a/src/librustc_trans/driver/driver.rs
+++ b/src/librustc_trans/driver/driver.rs
@@ -98,10 +98,8 @@ pub fn compile_input(sess: Session,
     phase_6_link_output(&sess, &trans, &outputs);
 }
 
-/**
- * The name used for source code that doesn't originate in a file
- * (e.g. source from stdin or a string)
- */
+/// The name used for source code that doesn't originate in a file
+/// (e.g. source from stdin or a string)
 pub fn anon_src() -> String {
     "<anon>".to_string()
 }
diff --git a/src/librustc_trans/trans/_match.rs b/src/librustc_trans/trans/_match.rs
index 381220d587cbc..fcbe40acafc52 100644
--- a/src/librustc_trans/trans/_match.rs
+++ b/src/librustc_trans/trans/_match.rs
@@ -325,15 +325,14 @@ pub enum TransBindingMode {
     TrByRef,
 }
 
-/**
- * Information about a pattern binding:
- * - `llmatch` is a pointer to a stack slot.  The stack slot contains a
- *   pointer into the value being matched.  Hence, llmatch has type `T**`
- *   where `T` is the value being matched.
- * - `trmode` is the trans binding mode
- * - `id` is the node id of the binding
- * - `ty` is the Rust type of the binding */
- #[deriving(Clone)]
+/// Information about a pattern binding:
+/// - `llmatch` is a pointer to a stack slot.  The stack slot contains a
+///   pointer into the value being matched.  Hence, llmatch has type `T**`
+///   where `T` is the value being matched.
+/// - `trmode` is the trans binding mode
+/// - `id` is the node id of the binding
+/// - `ty` is the Rust type of the binding
+#[deriving(Clone)]
 pub struct BindingInfo<'tcx> {
     pub llmatch: ValueRef,
     pub trmode: TransBindingMode,
@@ -350,12 +349,10 @@ struct ArmData<'p, 'blk, 'tcx: 'blk> {
     bindings_map: BindingsMap<'tcx>
 }
 
-/**
- * Info about Match.
- * If all `pats` are matched then arm `data` will be executed.
- * As we proceed `bound_ptrs` are filled with pointers to values to be bound,
- * these pointers are stored in llmatch variables just before executing `data` arm.
- */
+/// Info about Match.
+/// If all `pats` are matched then arm `data` will be executed.
+/// As we proceed `bound_ptrs` are filled with pointers to values to be bound,
+/// these pointers are stored in llmatch variables just before executing `data` arm.
 struct Match<'a, 'p: 'a, 'blk: 'a, 'tcx: 'blk> {
     pats: Vec<&'p ast::Pat>,
     data: &'a ArmData<'p, 'blk, 'tcx>,
diff --git a/src/librustc_trans/trans/adt.rs b/src/librustc_trans/trans/adt.rs
index e7d1b9726a1b1..e2ce6ee41ee89 100644
--- a/src/librustc_trans/trans/adt.rs
+++ b/src/librustc_trans/trans/adt.rs
@@ -79,46 +79,38 @@ type Hint = attr::ReprAttr;
 pub enum Repr<'tcx> {
     /// C-like enums; basically an int.
     CEnum(IntType, Disr, Disr), // discriminant range (signedness based on the IntType)
-    /**
-     * Single-case variants, and structs/tuples/records.
-     *
-     * Structs with destructors need a dynamic destroyedness flag to
-     * avoid running the destructor too many times; this is included
-     * in the `Struct` if present.
-     */
+    /// Single-case variants, and structs/tuples/records.
+    ///
+    /// Structs with destructors need a dynamic destroyedness flag to
+    /// avoid running the destructor too many times; this is included
+    /// in the `Struct` if present.
     Univariant(Struct<'tcx>, bool),
-    /**
-     * General-case enums: for each case there is a struct, and they
-     * all start with a field for the discriminant.
-     *
-     * Types with destructors need a dynamic destroyedness flag to
-     * avoid running the destructor too many times; the last argument
-     * indicates whether such a flag is present.
-     */
+    /// General-case enums: for each case there is a struct, and they
+    /// all start with a field for the discriminant.
+    ///
+    /// Types with destructors need a dynamic destroyedness flag to
+    /// avoid running the destructor too many times; the last argument
+    /// indicates whether such a flag is present.
     General(IntType, Vec<Struct<'tcx>>, bool),
-    /**
-     * Two cases distinguished by a nullable pointer: the case with discriminant
-     * `nndiscr` must have single field which is known to be nonnull due to its type.
-     * The other case is known to be zero sized. Hence we represent the enum
-     * as simply a nullable pointer: if not null it indicates the `nndiscr` variant,
-     * otherwise it indicates the other case.
-     */
+    /// Two cases distinguished by a nullable pointer: the case with discriminant
+    /// `nndiscr` must have single field which is known to be nonnull due to its type.
+    /// The other case is known to be zero sized. Hence we represent the enum
+    /// as simply a nullable pointer: if not null it indicates the `nndiscr` variant,
+    /// otherwise it indicates the other case.
     RawNullablePointer {
         nndiscr: Disr,
         nnty: Ty<'tcx>,
         nullfields: Vec<Ty<'tcx>>
     },
-    /**
-     * Two cases distinguished by a nullable pointer: the case with discriminant
-     * `nndiscr` is represented by the struct `nonnull`, where the `ptrfield`th
-     * field is known to be nonnull due to its type; if that field is null, then
-     * it represents the other case, which is inhabited by at most one value
-     * (and all other fields are undefined/unused).
-     *
-     * For example, `std::option::Option` instantiated at a safe pointer type
-     * is represented such that `None` is a null pointer and `Some` is the
-     * identity function.
-     */
+    /// Two cases distinguished by a nullable pointer: the case with discriminant
+    /// `nndiscr` is represented by the struct `nonnull`, where the `ptrfield`th
+    /// field is known to be nonnull due to its type; if that field is null, then
+    /// it represents the other case, which is inhabited by at most one value
+    /// (and all other fields are undefined/unused).
+    ///
+    /// For example, `std::option::Option` instantiated at a safe pointer type
+    /// is represented such that `None` is a null pointer and `Some` is the
+    /// identity function.
     StructWrappedNullablePointer {
         nonnull: Struct<'tcx>,
         nndiscr: Disr,
@@ -139,11 +131,9 @@ pub struct Struct<'tcx> {
     pub fields: Vec<Ty<'tcx>>
 }
 
-/**
- * Convenience for `represent_type`.  There should probably be more or
- * these, for places in trans where the `Ty` isn't directly
- * available.
- */
+/// Convenience for `represent_type`.  There should probably be more or
+/// these, for places in trans where the `Ty` isn't directly
+/// available.
 pub fn represent_node<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
                                   node: ast::NodeId) -> Rc<Repr<'tcx>> {
     represent_type(bcx.ccx(), node_id_type(bcx, node))
@@ -514,16 +504,14 @@ fn ensure_enum_fits_in_address_space<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
 }
 
 
-/**
- * LLVM-level types are a little complicated.
- *
- * C-like enums need to be actual ints, not wrapped in a struct,
- * because that changes the ABI on some platforms (see issue #10308).
- *
- * For nominal types, in some cases, we need to use LLVM named structs
- * and fill in the actual contents in a second pass to prevent
- * unbounded recursion; see also the comments in `trans::type_of`.
- */
+/// LLVM-level types are a little complicated.
+///
+/// C-like enums need to be actual ints, not wrapped in a struct,
+/// because that changes the ABI on some platforms (see issue #10308).
+///
+/// For nominal types, in some cases, we need to use LLVM named structs
+/// and fill in the actual contents in a second pass to prevent
+/// unbounded recursion; see also the comments in `trans::type_of`.
 pub fn type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, r: &Repr<'tcx>) -> Type {
     generic_type_of(cx, r, None, false, false)
 }
@@ -620,12 +608,10 @@ fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, st: &Struct<'tcx>,
     }
 }
 
-/**
- * Obtain a representation of the discriminant sufficient to translate
- * destructuring; this may or may not involve the actual discriminant.
- *
- * This should ideally be less tightly tied to `_match`.
- */
+/// Obtain a representation of the discriminant sufficient to translate
+/// destructuring; this may or may not involve the actual discriminant.
+///
+/// This should ideally be less tightly tied to `_match`.
 pub fn trans_switch<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
                                 r: &Repr<'tcx>, scrutinee: ValueRef)
                                 -> (_match::BranchKind, Option<ValueRef>) {
@@ -713,12 +699,10 @@ fn load_discr(bcx: Block, ity: IntType, ptr: ValueRef, min: Disr, max: Disr)
     }
 }
 
-/**
- * Yield information about how to dispatch a case of the
- * discriminant-like value returned by `trans_switch`.
- *
- * This should ideally be less tightly tied to `_match`.
- */
+/// Yield information about how to dispatch a case of the
+/// discriminant-like value returned by `trans_switch`.
+///
+/// This should ideally be less tightly tied to `_match`.
 pub fn trans_case<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, r: &Repr, discr: Disr)
                               -> _match::OptResult<'blk, 'tcx> {
     match *r {
@@ -741,10 +725,8 @@ pub fn trans_case<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, r: &Repr, discr: Disr)
     }
 }
 
-/**
- * Set the discriminant for a new value of the given case of the given
- * representation.
- */
+/// Set the discriminant for a new value of the given case of the given
+/// representation.
 pub fn trans_set_discr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, r: &Repr<'tcx>,
                                    val: ValueRef, discr: Disr) {
     match *r {
@@ -799,10 +781,8 @@ fn assert_discr_in_range(ity: IntType, min: Disr, max: Disr, discr: Disr) {
     }
 }
 
-/**
- * The number of fields in a given case; for use when obtaining this
- * information from the type or definition is less convenient.
- */
+/// The number of fields in a given case; for use when obtaining this
+/// information from the type or definition is less convenient.
 pub fn num_args(r: &Repr, discr: Disr) -> uint {
     match *r {
         CEnum(..) => 0,
@@ -946,27 +926,25 @@ pub fn trans_drop_flag_ptr<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, r: &Repr<'tcx
     }
 }
 
-/**
- * Construct a constant value, suitable for initializing a
- * GlobalVariable, given a case and constant values for its fields.
- * Note that this may have a different LLVM type (and different
- * alignment!) from the representation's `type_of`, so it needs a
- * pointer cast before use.
- *
- * The LLVM type system does not directly support unions, and only
- * pointers can be bitcast, so a constant (and, by extension, the
- * GlobalVariable initialized by it) will have a type that can vary
- * depending on which case of an enum it is.
- *
- * To understand the alignment situation, consider `enum E { V64(u64),
- * V32(u32, u32) }` on Windows.  The type has 8-byte alignment to
- * accommodate the u64, but `V32(x, y)` would have LLVM type `{i32,
- * i32, i32}`, which is 4-byte aligned.
- *
- * Currently the returned value has the same size as the type, but
- * this could be changed in the future to avoid allocating unnecessary
- * space after values of shorter-than-maximum cases.
- */
+/// Construct a constant value, suitable for initializing a
+/// GlobalVariable, given a case and constant values for its fields.
+/// Note that this may have a different LLVM type (and different
+/// alignment!) from the representation's `type_of`, so it needs a
+/// pointer cast before use.
+///
+/// The LLVM type system does not directly support unions, and only
+/// pointers can be bitcast, so a constant (and, by extension, the
+/// GlobalVariable initialized by it) will have a type that can vary
+/// depending on which case of an enum it is.
+///
+/// To understand the alignment situation, consider `enum E { V64(u64),
+/// V32(u32, u32) }` on Windows.  The type has 8-byte alignment to
+/// accommodate the u64, but `V32(x, y)` would have LLVM type `{i32,
+/// i32, i32}`, which is 4-byte aligned.
+///
+/// Currently the returned value has the same size as the type, but
+/// this could be changed in the future to avoid allocating unnecessary
+/// space after values of shorter-than-maximum cases.
 pub fn trans_const<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, r: &Repr<'tcx>, discr: Disr,
                              vals: &[ValueRef]) -> ValueRef {
     match *r {
@@ -1019,9 +997,7 @@ pub fn trans_const<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, r: &Repr<'tcx>, discr
     }
 }
 
-/**
- * Compute struct field offsets relative to struct begin.
- */
+/// Compute struct field offsets relative to struct begin.
 fn compute_struct_field_offsets<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
                                           st: &Struct<'tcx>) -> Vec<u64> {
     let mut offsets = vec!();
@@ -1040,16 +1016,14 @@ fn compute_struct_field_offsets<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
     offsets
 }
 
-/**
- * Building structs is a little complicated, because we might need to
- * insert padding if a field's value is less aligned than its type.
- *
- * Continuing the example from `trans_const`, a value of type `(u32,
- * E)` should have the `E` at offset 8, but if that field's
- * initializer is 4-byte aligned then simply translating the tuple as
- * a two-element struct will locate it at offset 4, and accesses to it
- * will read the wrong memory.
- */
+/// Building structs is a little complicated, because we might need to
+/// insert padding if a field's value is less aligned than its type.
+///
+/// Continuing the example from `trans_const`, a value of type `(u32,
+/// E)` should have the `E` at offset 8, but if that field's
+/// initializer is 4-byte aligned then simply translating the tuple as
+/// a two-element struct will locate it at offset 4, and accesses to it
+/// will read the wrong memory.
 fn build_const_struct<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
                                 st: &Struct<'tcx>, vals: &[ValueRef])
                                 -> Vec<ValueRef> {
@@ -1130,13 +1104,11 @@ pub fn const_get_discrim(ccx: &CrateContext, r: &Repr, val: ValueRef)
     }
 }
 
-/**
- * Extract a field of a constant value, as appropriate for its
- * representation.
- *
- * (Not to be confused with `common::const_get_elt`, which operates on
- * raw LLVM-level structs and arrays.)
- */
+/// Extract a field of a constant value, as appropriate for its
+/// representation.
+///
+/// (Not to be confused with `common::const_get_elt`, which operates on
+/// raw LLVM-level structs and arrays.)
 pub fn const_get_field(ccx: &CrateContext, r: &Repr, val: ValueRef,
                        _discr: Disr, ix: uint) -> ValueRef {
     match *r {
diff --git a/src/librustc_trans/trans/basic_block.rs b/src/librustc_trans/trans/basic_block.rs
index 35afe7ac55e0d..328c8e616c40b 100644
--- a/src/librustc_trans/trans/basic_block.rs
+++ b/src/librustc_trans/trans/basic_block.rs
@@ -17,9 +17,7 @@ pub struct BasicBlock(pub BasicBlockRef);
 
 pub type Preds<'a> = Map<'a, Value, BasicBlock, Filter<'a, Value, Users>>;
 
-/**
- * Wrapper for LLVM BasicBlockRef
- */
+/// Wrapper for LLVM BasicBlockRef
 impl BasicBlock {
     pub fn get(&self) -> BasicBlockRef {
         let BasicBlock(v) = *self; v
diff --git a/src/librustc_trans/trans/datum.rs b/src/librustc_trans/trans/datum.rs
index 354a607220715..de7924b27eab0 100644
--- a/src/librustc_trans/trans/datum.rs
+++ b/src/librustc_trans/trans/datum.rs
@@ -31,12 +31,10 @@ use util::ppaux::{ty_to_string};
 use std::fmt;
 use syntax::ast;
 
-/**
- * A `Datum` encapsulates the result of evaluating an expression.  It
- * describes where the value is stored, what Rust type the value has,
- * whether it is addressed by reference, and so forth. Please refer
- * the section on datums in `doc.rs` for more details.
- */
+/// A `Datum` encapsulates the result of evaluating an expression.  It
+/// describes where the value is stored, what Rust type the value has,
+/// whether it is addressed by reference, and so forth. Please refer
+/// the section on datums in `doc.rs` for more details.
 #[deriving(Clone)]
 pub struct Datum<'tcx, K> {
     /// The llvm value.  This is either a pointer to the Rust value or
@@ -190,25 +188,19 @@ fn add_rvalue_clean<'a, 'tcx>(mode: RvalueMode,
 
 pub trait KindOps {
 
-    /**
-     * Take appropriate action after the value in `datum` has been
-     * stored to a new location.
-     */
+    /// Take appropriate action after the value in `datum` has been
+    /// stored to a new location.
     fn post_store<'blk, 'tcx>(&self,
                               bcx: Block<'blk, 'tcx>,
                               val: ValueRef,
                               ty: Ty<'tcx>)
                               -> Block<'blk, 'tcx>;
 
-    /**
-     * True if this mode is a reference mode, meaning that the datum's
-     * val field is a pointer to the actual value
-     */
+    /// True if this mode is a reference mode, meaning that the datum's
+    /// val field is a pointer to the actual value
     fn is_by_ref(&self) -> bool;
 
-    /**
-     * Converts to an Expr kind
-     */
+    /// Converts to an Expr kind
     fn to_expr_kind(self) -> Expr;
 
 }
@@ -361,14 +353,12 @@ impl<'tcx> Datum<'tcx, Rvalue> {
     }
 }
 
-/**
- * Methods suitable for "expr" datums that could be either lvalues or
- * rvalues. These include coercions into lvalues/rvalues but also a number
- * of more general operations. (Some of those operations could be moved to
- * the more general `impl<K> Datum<K>`, but it's convenient to have them
- * here since we can `match self.kind` rather than having to implement
- * generic methods in `KindOps`.)
- */
+/// Methods suitable for "expr" datums that could be either lvalues or
+/// rvalues. These include coercions into lvalues/rvalues but also a number
+/// of more general operations. (Some of those operations could be moved to
+/// the more general `impl<K> Datum<K>`, but it's convenient to have them
+/// here since we can `match self.kind` rather than having to implement
+/// generic methods in `KindOps`.)
 impl<'tcx> Datum<'tcx, Expr> {
     fn match_kind<R>(self,
                      if_lvalue: |Datum<'tcx, Lvalue>| -> R,
@@ -494,12 +484,10 @@ impl<'tcx> Datum<'tcx, Expr> {
 
 }
 
-/**
- * Methods suitable only for lvalues. These include the various
- * operations to extract components out of compound data structures,
- * such as extracting the field from a struct or a particular element
- * from an array.
- */
+/// Methods suitable only for lvalues. These include the various
+/// operations to extract components out of compound data structures,
+/// such as extracting the field from a struct or a particular element
+/// from an array.
 impl<'tcx> Datum<'tcx, Lvalue> {
     pub fn to_llref(self) -> ValueRef {
         /*!
@@ -542,9 +530,7 @@ impl<'tcx> Datum<'tcx, Lvalue> {
     }
 }
 
-/**
- * Generic methods applicable to any sort of datum.
- */
+/// Generic methods applicable to any sort of datum.
 impl<'tcx, K: KindOps + fmt::Show> Datum<'tcx, K> {
     pub fn new(val: ValueRef, ty: Ty<'tcx>, kind: K) -> Datum<'tcx, K> {
         Datum { val: val, ty: ty, kind: kind }
diff --git a/src/librustc_trans/trans/expr.rs b/src/librustc_trans/trans/expr.rs
index 482b318e37202..b39029d3b94c5 100644
--- a/src/librustc_trans/trans/expr.rs
+++ b/src/librustc_trans/trans/expr.rs
@@ -1377,14 +1377,12 @@ fn trans_struct<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
     })
 }
 
-/**
- * Information that `trans_adt` needs in order to fill in the fields
- * of a struct copied from a base struct (e.g., from an expression
- * like `Foo { a: b, ..base }`.
- *
- * Note that `fields` may be empty; the base expression must always be
- * evaluated for side-effects.
- */
+/// Information that `trans_adt` needs in order to fill in the fields
+/// of a struct copied from a base struct (e.g., from an expression
+/// like `Foo { a: b, ..base }`.
+///
+/// Note that `fields` may be empty; the base expression must always be
+/// evaluated for side-effects.
 pub struct StructBaseInfo<'a, 'tcx> {
     /// The base expression; will be evaluated after all explicit fields.
     expr: &'a ast::Expr,
@@ -1392,16 +1390,14 @@ pub struct StructBaseInfo<'a, 'tcx> {
     fields: Vec<(uint, Ty<'tcx>)>
 }
 
-/**
- * Constructs an ADT instance:
- *
- * - `fields` should be a list of field indices paired with the
- * expression to store into that field.  The initializers will be
- * evaluated in the order specified by `fields`.
- *
- * - `optbase` contains information on the base struct (if any) from
- * which remaining fields are copied; see comments on `StructBaseInfo`.
- */
+/// Constructs an ADT instance:
+///
+/// - `fields` should be a list of field indices paired with the
+/// expression to store into that field.  The initializers will be
+/// evaluated in the order specified by `fields`.
+///
+/// - `optbase` contains information on the base struct (if any) from
+/// which remaining fields are copied; see comments on `StructBaseInfo`.
 pub fn trans_adt<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
                                  ty: Ty<'tcx>,
                                  discr: ty::Disr,
diff --git a/src/librustc_trans/trans/meth.rs b/src/librustc_trans/trans/meth.rs
index 0ff7f3ee71cc6..2c0e4d31d97d0 100644
--- a/src/librustc_trans/trans/meth.rs
+++ b/src/librustc_trans/trans/meth.rs
@@ -46,12 +46,10 @@ use syntax::codemap::DUMMY_SP;
 // drop_glue pointer, size, align.
 static VTABLE_OFFSET: uint = 3;
 
-/**
-The main "translation" pass for methods.  Generates code
-for non-monomorphized methods only.  Other methods will
-be generated once they are invoked with specific type parameters,
-see `trans::base::lval_static_fn()` or `trans::base::monomorphic_fn()`.
-*/
+/// The main "translation" pass for methods.  Generates code
+/// for non-monomorphized methods only.  Other methods will
+/// be generated once they are invoked with specific type parameters,
+/// see `trans::base::lval_static_fn()` or `trans::base::monomorphic_fn()`.
 pub fn trans_impl(ccx: &CrateContext,
                   name: ast::Ident,
                   impl_items: &[ast::ImplItem],
diff --git a/src/librustc_trans/trans/type_.rs b/src/librustc_trans/trans/type_.rs
index 0662909e40f5a..8bff7602ddcea 100644
--- a/src/librustc_trans/trans/type_.rs
+++ b/src/librustc_trans/trans/type_.rs
@@ -35,9 +35,7 @@ macro_rules! ty (
     ($e:expr) => ( Type::from_ref(unsafe { $e }))
 )
 
-/**
- * Wrapper for LLVM TypeRef
- */
+/// Wrapper for LLVM TypeRef
 impl Type {
     #[inline(always)]
     pub fn from_ref(r: TypeRef) -> Type {
diff --git a/src/librustc_trans/trans/value.rs b/src/librustc_trans/trans/value.rs
index 8d74275c92ac7..33ea239412af6 100644
--- a/src/librustc_trans/trans/value.rs
+++ b/src/librustc_trans/trans/value.rs
@@ -25,9 +25,7 @@ macro_rules! opt_val ( ($e:expr) => (
     }
 ))
 
-/**
- * Wrapper for LLVM ValueRef
- */
+/// Wrapper for LLVM ValueRef
 impl Value {
     /// Returns the native ValueRef
     pub fn get(&self) -> ValueRef {
@@ -127,9 +125,7 @@ impl Value {
 
 pub struct Use(UseRef);
 
-/**
- * Wrapper for LLVM UseRef
- */
+/// Wrapper for LLVM UseRef
 impl Use {
     pub fn get(&self) -> UseRef {
         let Use(v) = *self; v
diff --git a/src/libserialize/base64.rs b/src/libserialize/base64.rs
index 41feee8257f9c..b7d8885a5f993 100644
--- a/src/libserialize/base64.rs
+++ b/src/libserialize/base64.rs
@@ -64,21 +64,19 @@ pub trait ToBase64 for Sized? {
 }
 
 impl ToBase64 for [u8] {
-    /**
-     * Turn a vector of `u8` bytes into a base64 string.
-     *
-     * # Example
-     *
-     * ```rust
-     * extern crate serialize;
-     * use serialize::base64::{ToBase64, STANDARD};
-     *
-     * fn main () {
-     *     let str = [52,32].to_base64(STANDARD);
-     *     println!("base 64 output: {}", str);
-     * }
-     * ```
-     */
+    /// Turn a vector of `u8` bytes into a base64 string.
+    ///
+    /// # Example
+    ///
+    /// ```rust
+    /// extern crate serialize;
+    /// use serialize::base64::{ToBase64, STANDARD};
+    ///
+    /// fn main () {
+    ///     let str = [52,32].to_base64(STANDARD);
+    ///     println!("base 64 output: {}", str);
+    /// }
+    /// ```
     fn to_base64(&self, config: Config) -> String {
         let bytes = match config.char_set {
             Standard => STANDARD_CHARS,
@@ -194,34 +192,32 @@ impl error::Error for FromBase64Error {
 }
 
 impl FromBase64 for str {
-    /**
-     * Convert any base64 encoded string (literal, `@`, `&`, or `~`)
-     * to the byte values it encodes.
-     *
-     * You can use the `String::from_utf8` function to turn a `Vec<u8>` into a
-     * string with characters corresponding to those values.
-     *
-     * # Example
-     *
-     * This converts a string literal to base64 and back.
-     *
-     * ```rust
-     * extern crate serialize;
-     * use serialize::base64::{ToBase64, FromBase64, STANDARD};
-     *
-     * fn main () {
-     *     let hello_str = b"Hello, World".to_base64(STANDARD);
-     *     println!("base64 output: {}", hello_str);
-     *     let res = hello_str.as_slice().from_base64();
-     *     if res.is_ok() {
-     *       let opt_bytes = String::from_utf8(res.unwrap());
-     *       if opt_bytes.is_ok() {
-     *         println!("decoded from base64: {}", opt_bytes.unwrap());
-     *       }
-     *     }
-     * }
-     * ```
-     */
+    /// Convert any base64 encoded string (literal, `@`, `&`, or `~`)
+    /// to the byte values it encodes.
+    ///
+    /// You can use the `String::from_utf8` function to turn a `Vec<u8>` into a
+    /// string with characters corresponding to those values.
+    ///
+    /// # Example
+    ///
+    /// This converts a string literal to base64 and back.
+    ///
+    /// ```rust
+    /// extern crate serialize;
+    /// use serialize::base64::{ToBase64, FromBase64, STANDARD};
+    ///
+    /// fn main () {
+    ///     let hello_str = b"Hello, World".to_base64(STANDARD);
+    ///     println!("base64 output: {}", hello_str);
+    ///     let res = hello_str.as_slice().from_base64();
+    ///     if res.is_ok() {
+    ///       let opt_bytes = String::from_utf8(res.unwrap());
+    ///       if opt_bytes.is_ok() {
+    ///         println!("decoded from base64: {}", opt_bytes.unwrap());
+    ///       }
+    ///     }
+    /// }
+    /// ```
     #[inline]
     fn from_base64(&self) -> Result<Vec<u8>, FromBase64Error> {
         self.as_bytes().from_base64()
diff --git a/src/libserialize/hex.rs b/src/libserialize/hex.rs
index 78859d6778d70..2a3c410ba7c58 100644
--- a/src/libserialize/hex.rs
+++ b/src/libserialize/hex.rs
@@ -27,21 +27,19 @@ pub trait ToHex for Sized? {
 static CHARS: &'static[u8] = b"0123456789abcdef";
 
 impl ToHex for [u8] {
-    /**
-     * Turn a vector of `u8` bytes into a hexadecimal string.
-     *
-     * # Example
-     *
-     * ```rust
-     * extern crate serialize;
-     * use serialize::hex::ToHex;
-     *
-     * fn main () {
-     *     let str = [52,32].to_hex();
-     *     println!("{}", str);
-     * }
-     * ```
-     */
+    /// Turn a vector of `u8` bytes into a hexadecimal string.
+    ///
+    /// # Example
+    ///
+    /// ```rust
+    /// extern crate serialize;
+    /// use serialize::hex::ToHex;
+    ///
+    /// fn main () {
+    ///     let str = [52,32].to_hex();
+    ///     println!("{}", str);
+    /// }
+    /// ```
     fn to_hex(&self) -> String {
         let mut v = Vec::with_capacity(self.len() * 2);
         for &byte in self.iter() {
@@ -95,31 +93,29 @@ impl error::Error for FromHexError {
 
 
 impl FromHex for str {
-    /**
-     * Convert any hexadecimal encoded string (literal, `@`, `&`, or `~`)
-     * to the byte values it encodes.
-     *
-     * You can use the `String::from_utf8` function to turn a
-     * `Vec<u8>` into a string with characters corresponding to those values.
-     *
-     * # Example
-     *
-     * This converts a string literal to hexadecimal and back.
-     *
-     * ```rust
-     * extern crate serialize;
-     * use serialize::hex::{FromHex, ToHex};
-     *
-     * fn main () {
-     *     let hello_str = "Hello, World".as_bytes().to_hex();
-     *     println!("{}", hello_str);
-     *     let bytes = hello_str.as_slice().from_hex().unwrap();
-     *     println!("{}", bytes);
-     *     let result_str = String::from_utf8(bytes).unwrap();
-     *     println!("{}", result_str);
-     * }
-     * ```
-     */
+    /// Convert any hexadecimal encoded string (literal, `@`, `&`, or `~`)
+    /// to the byte values it encodes.
+    ///
+    /// You can use the `String::from_utf8` function to turn a
+    /// `Vec<u8>` into a string with characters corresponding to those values.
+    ///
+    /// # Example
+    ///
+    /// This converts a string literal to hexadecimal and back.
+    ///
+    /// ```rust
+    /// extern crate serialize;
+    /// use serialize::hex::{FromHex, ToHex};
+    ///
+    /// fn main () {
+    ///     let hello_str = "Hello, World".as_bytes().to_hex();
+    ///     println!("{}", hello_str);
+    ///     let bytes = hello_str.as_slice().from_hex().unwrap();
+    ///     println!("{}", bytes);
+    ///     let result_str = String::from_utf8(bytes).unwrap();
+    ///     println!("{}", result_str);
+    /// }
+    /// ```
     fn from_hex(&self) -> Result<Vec<u8>, FromHexError> {
         // This may be an overestimate if there is any whitespace
         let mut b = Vec::with_capacity(self.len() / 2);
diff --git a/src/libstd/num/strconv.rs b/src/libstd/num/strconv.rs
index 649298d9c0818..be6e387ad836d 100644
--- a/src/libstd/num/strconv.rs
+++ b/src/libstd/num/strconv.rs
@@ -67,31 +67,32 @@ pub enum SignFormat {
     SignAll,
 }
 
-/**
- * Converts an integral number to its string representation as a byte vector.
- * This is meant to be a common base implementation for all integral string
- * conversion functions like `to_string()` or `to_str_radix()`.
- *
- * # Arguments
- * - `num`           - The number to convert. Accepts any number that
- *                     implements the numeric traits.
- * - `radix`         - Base to use. Accepts only the values 2-36.
- * - `sign`          - How to emit the sign. Options are:
- *     - `SignNone`: No sign at all. Basically emits `abs(num)`.
- *     - `SignNeg`:  Only `-` on negative values.
- *     - `SignAll`:  Both `+` on positive, and `-` on negative numbers.
- * - `f`             - a callback which will be invoked for each ascii character
- *                     which composes the string representation of this integer
- *
- * # Return value
- * A tuple containing the byte vector, and a boolean flag indicating
- * whether it represents a special value like `inf`, `-inf`, `NaN` or not.
- * It returns a tuple because there can be ambiguity between a special value
- * and a number representation at higher bases.
- *
- * # Panics
- * - Panics if `radix` < 2 or `radix` > 36.
- */
+/// Converts an integral number to its string representation as a byte vector.
+/// This is meant to be a common base implementation for all integral string
+/// conversion functions like `to_string()` or `to_str_radix()`.
+///
+/// # Arguments
+///
+/// - `num`           - The number to convert. Accepts any number that
+///                     implements the numeric traits.
+/// - `radix`         - Base to use. Accepts only the values 2-36.
+/// - `sign`          - How to emit the sign. Options are:
+///     - `SignNone`: No sign at all. Basically emits `abs(num)`.
+///     - `SignNeg`:  Only `-` on negative values.
+///     - `SignAll`:  Both `+` on positive, and `-` on negative numbers.
+/// - `f`             - a callback which will be invoked for each ascii character
+///                     which composes the string representation of this integer
+///
+/// # Return value
+///
+/// A tuple containing the byte vector, and a boolean flag indicating
+/// whether it represents a special value like `inf`, `-inf`, `NaN` or not.
+/// It returns a tuple because there can be ambiguity between a special value
+/// and a number representation at higher bases.
+///
+/// # Panics
+///
+/// - Panics if `radix` < 2 or `radix` > 36.
 fn int_to_str_bytes_common<T: Int>(num: T, radix: uint, sign: SignFormat, f: |u8|) {
     assert!(2 <= radix && radix <= 36);
 
diff --git a/src/libstd/os.rs b/src/libstd/os.rs
index 9b50361ec1f0d..c6554d2ed58e2 100644
--- a/src/libstd/os.rs
+++ b/src/libstd/os.rs
@@ -788,18 +788,16 @@ pub fn homedir() -> Option<Path> {
     _homedir()
 }
 
-/**
- * Returns the path to a temporary directory.
- *
- * On Unix, returns the value of the 'TMPDIR' environment variable if it is
- * set, otherwise for non-Android it returns '/tmp'. If Android, since there
- * is no global temporary folder (it is usually allocated per-app), we return
- * '/data/local/tmp'.
- *
- * On Windows, returns the value of, in order, the 'TMP', 'TEMP',
- * 'USERPROFILE' environment variable  if any are set and not the empty
- * string. Otherwise, tmpdir returns the path to the Windows directory.
- */
+/// Returns the path to a temporary directory.
+///
+/// On Unix, returns the value of the 'TMPDIR' environment variable if it is
+/// set, otherwise for non-Android it returns '/tmp'. If Android, since there
+/// is no global temporary folder (it is usually allocated per-app), we return
+/// '/data/local/tmp'.
+///
+/// On Windows, returns the value of, in order, the 'TMP', 'TEMP',
+/// 'USERPROFILE' environment variable  if any are set and not the empty
+/// string. Otherwise, tmpdir returns the path to the Windows directory.
 pub fn tmpdir() -> Path {
     return lookup();
 
@@ -933,16 +931,14 @@ pub fn last_os_error() -> String {
 
 static EXIT_STATUS: AtomicInt = INIT_ATOMIC_INT;
 
-/**
- * Sets the process exit code
- *
- * Sets the exit code returned by the process if all supervised tasks
- * terminate successfully (without panicking). If the current root task panics
- * and is supervised by the scheduler then any user-specified exit status is
- * ignored and the process exits with the default panic status.
- *
- * Note that this is not synchronized against modifications of other threads.
- */
+/// Sets the process exit code
+///
+/// Sets the exit code returned by the process if all supervised tasks
+/// terminate successfully (without panicking). If the current root task panics
+/// and is supervised by the scheduler then any user-specified exit status is
+/// ignored and the process exits with the default panic status.
+///
+/// Note that this is not synchronized against modifications of other threads.
 pub fn set_exit_status(code: int) {
     EXIT_STATUS.store(code, SeqCst)
 }
@@ -963,11 +959,9 @@ unsafe fn load_argc_and_argv(argc: int,
     })
 }
 
-/**
- * Returns the command line arguments
- *
- * Returns a list of the command line arguments.
- */
+/// Returns the command line arguments
+///
+/// Returns a list of the command line arguments.
 #[cfg(target_os = "macos")]
 fn real_args_as_bytes() -> Vec<Vec<u8>> {
     unsafe {
diff --git a/src/libstd/sync/lock.rs b/src/libstd/sync/lock.rs
index 6b63f7ae61881..77f5b01351908 100644
--- a/src/libstd/sync/lock.rs
+++ b/src/libstd/sync/lock.rs
@@ -29,9 +29,7 @@ use rustrt::task::Task;
 
 use super::raw;
 
-/****************************************************************************
- * Poisoning helpers
- ****************************************************************************/
+// Poisoning helpers
 
 struct PoisonOnFail<'a> {
     flag: &'a mut bool,
@@ -67,9 +65,7 @@ impl<'a> Drop for PoisonOnFail<'a> {
     }
 }
 
-/****************************************************************************
- * Condvar
- ****************************************************************************/
+// Condvar
 
 enum Inner<'a> {
     InnerMutex(raw::MutexGuard<'a>),
@@ -147,10 +143,6 @@ impl<'a> Condvar<'a> {
     }
 }
 
-/****************************************************************************
- * Mutex
- ****************************************************************************/
-
 /// A wrapper type which provides synchronized access to the underlying data, of
 /// type `T`. A mutex always provides exclusive access, and concurrent requests
 /// will block while the mutex is already locked.
@@ -249,10 +241,6 @@ impl<'a, T: Send> DerefMut<T> for MutexGuard<'a, T> {
     fn deref_mut<'a>(&'a mut self) -> &'a mut T { &mut *self._data }
 }
 
-/****************************************************************************
- * R/W lock protected lock
- ****************************************************************************/
-
 /// A dual-mode reader-writer lock. The data can be accessed mutably or
 /// immutably, and immutably-accessing tasks may run concurrently.
 ///
@@ -387,10 +375,6 @@ impl<'a, T: Send + Sync> DerefMut<T> for RWLockWriteGuard<'a, T> {
     fn deref_mut<'a>(&'a mut self) -> &'a mut T { &mut *self._data }
 }
 
-/****************************************************************************
- * Barrier
- ****************************************************************************/
-
 /// A barrier enables multiple tasks to synchronize the beginning
 /// of some computation.
 ///
@@ -452,10 +436,6 @@ impl Barrier {
     }
 }
 
-/****************************************************************************
- * Tests
- ****************************************************************************/
-
 #[cfg(test)]
 mod tests {
     use prelude::*;
@@ -795,9 +775,6 @@ mod tests {
         }
     }
 
-    /************************************************************************
-     * Barrier tests
-     ************************************************************************/
     #[test]
     fn test_barrier() {
         let barrier = Arc::new(Barrier::new(10));
diff --git a/src/libstd/sync/raw.rs b/src/libstd/sync/raw.rs
index ff3f2c9462c83..47580a115131b 100644
--- a/src/libstd/sync/raw.rs
+++ b/src/libstd/sync/raw.rs
@@ -32,10 +32,6 @@ use vec::Vec;
 use super::mutex;
 use comm::{Receiver, Sender, channel};
 
-/****************************************************************************
- * Internals
- ****************************************************************************/
-
 // Each waiting task receives on one of these.
 type WaitEnd = Receiver<()>;
 type SignalEnd = Sender<()>;
@@ -353,10 +349,6 @@ struct SemCondGuard<'a> {
     cvar: Condvar<'a>,
 }
 
-/****************************************************************************
- * Semaphores
- ****************************************************************************/
-
 /// A counting, blocking, bounded-waiting semaphore.
 pub struct Semaphore {
     sem: Sem<()>,
@@ -394,10 +386,6 @@ impl Semaphore {
     }
 }
 
-/****************************************************************************
- * Mutexes
- ****************************************************************************/
-
 /// A blocking, bounded-waiting, mutual exclusion lock with an associated
 /// FIFO condition variable.
 ///
@@ -441,10 +429,6 @@ impl Mutex {
     }
 }
 
-/****************************************************************************
- * Reader-writer locks
- ****************************************************************************/
-
 // NB: Wikipedia - Readers-writers_problem#The_third_readers-writers_problem
 
 /// A blocking, no-starvation, reader-writer lock with an associated condvar.
@@ -618,10 +602,6 @@ impl<'a> Drop for RWLockReadGuard<'a> {
     }
 }
 
-/****************************************************************************
- * Tests
- ****************************************************************************/
-
 #[cfg(test)]
 mod tests {
     pub use self::RWLockMode::*;
@@ -634,9 +614,6 @@ mod tests {
     use result;
     use task;
 
-    /************************************************************************
-     * Semaphore tests
-     ************************************************************************/
     #[test]
     fn test_sem_acquire_release() {
         let s = Semaphore::new(1);
@@ -644,16 +621,19 @@ mod tests {
         s.release();
         s.acquire();
     }
+
     #[test]
     fn test_sem_basic() {
         let s = Semaphore::new(1);
         let _g = s.access();
     }
+
     #[test]
     #[should_fail]
     fn test_sem_basic2() {
         Semaphore::new(-1);
     }
+
     #[test]
     fn test_sem_as_mutex() {
         let s = Arc::new(Semaphore::new(1));
@@ -665,6 +645,7 @@ mod tests {
         let _g = s.access();
         for _ in range(0u, 5) { task::deschedule(); }
     }
+
     #[test]
     fn test_sem_as_cvar() {
         /* Child waits and parent signals */
@@ -691,6 +672,7 @@ mod tests {
         s.acquire();
         tx.send(());
     }
+
     #[test]
     fn test_sem_multi_resource() {
         // Parent and child both get in the critical section at the same
@@ -708,6 +690,7 @@ mod tests {
         tx2.send(());
         let _ = rx1.recv();
     }
+
     #[test]
     fn test_sem_runtime_friendly_blocking() {
         // Force the runtime to schedule two threads on the same sched_loop.
@@ -727,9 +710,7 @@ mod tests {
         }
         rx.recv(); // wait for child to be done
     }
-    /************************************************************************
-     * Mutex tests
-     ************************************************************************/
+
     #[test]
     fn test_mutex_lock() {
         // Unsafely achieve shared state, and do the textbook
@@ -761,6 +742,7 @@ mod tests {
             }
         }
     }
+
     #[test]
     fn test_mutex_cond_wait() {
         let m = Arc::new(Mutex::new());
@@ -820,14 +802,17 @@ mod tests {
         // wait until all children wake up
         for rx in rxs.iter_mut() { rx.recv(); }
     }
+
     #[test]
     fn test_mutex_cond_broadcast() {
         test_mutex_cond_broadcast_helper(12);
     }
+
     #[test]
     fn test_mutex_cond_broadcast_none() {
         test_mutex_cond_broadcast_helper(0);
     }
+
     #[test]
     fn test_mutex_cond_no_waiter() {
         let m = Arc::new(Mutex::new());
@@ -838,6 +823,7 @@ mod tests {
         let lock = m2.lock();
         assert!(!lock.cond.signal());
     }
+
     #[test]
     fn test_mutex_killed_simple() {
         use any::Any;
@@ -854,6 +840,7 @@ mod tests {
         // child task must have finished by the time try returns
         drop(m.lock());
     }
+
     #[test]
     fn test_mutex_cond_signal_on_0() {
         // Tests that signal_on(0) is equivalent to signal().
@@ -866,6 +853,7 @@ mod tests {
         });
         lock.cond.wait();
     }
+
     #[test]
     fn test_mutex_no_condvars() {
         let result = task::try(proc() {
@@ -884,11 +872,10 @@ mod tests {
         });
         assert!(result.is_err());
     }
-    /************************************************************************
-     * Reader/writer lock tests
-     ************************************************************************/
+
     #[cfg(test)]
     pub enum RWLockMode { Read, Write, Downgrade, DowngradeRead }
+
     #[cfg(test)]
     fn lock_rwlock_in_mode(x: &Arc<RWLock>, mode: RWLockMode, blk: ||) {
         match mode {
@@ -898,6 +885,7 @@ mod tests {
             DowngradeRead => { let _g = x.write().downgrade(); blk() }
         }
     }
+
     #[cfg(test)]
     fn test_rwlock_exclusion(x: Arc<RWLock>,
                              mode1: RWLockMode,
@@ -934,6 +922,7 @@ mod tests {
             }
         }
     }
+
     #[test]
     fn test_rwlock_readers_wont_modify_the_data() {
         test_rwlock_exclusion(Arc::new(RWLock::new()), Read, Write);
@@ -943,6 +932,7 @@ mod tests {
         test_rwlock_exclusion(Arc::new(RWLock::new()), Write, DowngradeRead);
         test_rwlock_exclusion(Arc::new(RWLock::new()), DowngradeRead, Write);
     }
+
     #[test]
     fn test_rwlock_writers_and_writers() {
         test_rwlock_exclusion(Arc::new(RWLock::new()), Write, Write);
@@ -950,6 +940,7 @@ mod tests {
         test_rwlock_exclusion(Arc::new(RWLock::new()), Downgrade, Write);
         test_rwlock_exclusion(Arc::new(RWLock::new()), Downgrade, Downgrade);
     }
+
     #[cfg(test)]
     fn test_rwlock_handshake(x: Arc<RWLock>,
                              mode1: RWLockMode,
@@ -982,6 +973,7 @@ mod tests {
             rx1.recv();
         })
     }
+
     #[test]
     fn test_rwlock_readers_and_readers() {
         test_rwlock_handshake(Arc::new(RWLock::new()), Read, Read, false);
@@ -991,6 +983,7 @@ mod tests {
         test_rwlock_handshake(Arc::new(RWLock::new()), Read, DowngradeRead, true);
         // Two downgrade_reads can never both end up reading at the same time.
     }
+
     #[test]
     fn test_rwlock_downgrade_unlock() {
         // Tests that downgrade can unlock the lock in both modes
@@ -1001,12 +994,14 @@ mod tests {
         lock_rwlock_in_mode(&y, DowngradeRead, || { });
         test_rwlock_exclusion(y, Write, Write);
     }
+
     #[test]
     fn test_rwlock_read_recursive() {
         let x = RWLock::new();
         let _g1 = x.read();
         let _g2 = x.read();
     }
+
     #[test]
     fn test_rwlock_cond_wait() {
         // As test_mutex_cond_wait above.
@@ -1040,6 +1035,7 @@ mod tests {
         rx.recv(); // Wait until child wakes up
         drop(x.read()); // Just for good measure
     }
+
     #[cfg(test)]
     fn test_rwlock_cond_broadcast_helper(num_waiters: uint) {
         // Much like the mutex broadcast test. Downgrade-enabled.
@@ -1073,11 +1069,13 @@ mod tests {
         // wait until all children wake up
         for rx in rxs.iter_mut() { let _ = rx.recv(); }
     }
+
     #[test]
     fn test_rwlock_cond_broadcast() {
         test_rwlock_cond_broadcast_helper(0);
         test_rwlock_cond_broadcast_helper(12);
     }
+
     #[cfg(test)]
     fn rwlock_kill_helper(mode1: RWLockMode, mode2: RWLockMode) {
         use any::Any;
@@ -1095,22 +1093,27 @@ mod tests {
         // child task must have finished by the time try returns
         lock_rwlock_in_mode(&x, mode2, || { })
     }
+
     #[test]
     fn test_rwlock_reader_killed_writer() {
         rwlock_kill_helper(Read, Write);
     }
+
     #[test]
     fn test_rwlock_writer_killed_reader() {
         rwlock_kill_helper(Write, Read);
     }
+
     #[test]
     fn test_rwlock_reader_killed_reader() {
         rwlock_kill_helper(Read, Read);
     }
+
     #[test]
     fn test_rwlock_writer_killed_writer() {
         rwlock_kill_helper(Write, Write);
     }
+
     #[test]
     fn test_rwlock_kill_downgrader() {
         rwlock_kill_helper(Downgrade, Read);
diff --git a/src/libstd/sys/windows/process.rs b/src/libstd/sys/windows/process.rs
index 3fb5ee34356fe..37e0fd6e55d7f 100644
--- a/src/libstd/sys/windows/process.rs
+++ b/src/libstd/sys/windows/process.rs
@@ -33,13 +33,11 @@ use string::String;
 
 pub use sys_common::ProcessConfig;
 
-/**
- * A value representing a child process.
- *
- * The lifetime of this value is linked to the lifetime of the actual
- * process - the Process destructor calls self.finish() which waits
- * for the process to terminate.
- */
+/// A value representing a child process.
+///
+/// The lifetime of this value is linked to the lifetime of the actual
+/// process - the Process destructor calls self.finish() which waits
+/// for the process to terminate.
 pub struct Process {
     /// The unique id of the process (this should never be negative).
     pid: pid_t,
@@ -263,16 +261,14 @@ impl Process {
         }
     }
 
-    /**
-     * Waits for a process to exit and returns the exit code, failing
-     * if there is no process with the specified id.
-     *
-     * Note that this is private to avoid race conditions on unix where if
-     * a user calls waitpid(some_process.get_id()) then some_process.finish()
-     * and some_process.destroy() and some_process.finalize() will then either
-     * operate on a none-existent process or, even worse, on a newer process
-     * with the same id.
-     */
+    /// Waits for a process to exit and returns the exit code, failing
+    /// if there is no process with the specified id.
+    ///
+    /// Note that this is private to avoid race conditions on unix where if
+    /// a user calls waitpid(some_process.get_id()) then some_process.finish()
+    /// and some_process.destroy() and some_process.finalize() will then either
+    /// operate on a none-existent process or, even worse, on a newer process
+    /// with the same id.
     pub fn wait(&self, deadline: u64) -> IoResult<ProcessExit> {
         use libc::types::os::arch::extra::DWORD;
         use libc::consts::os::extra::{
diff --git a/src/libsyntax/codemap.rs b/src/libsyntax/codemap.rs
index b019b31de5f3f..bea7c394e7570 100644
--- a/src/libsyntax/codemap.rs
+++ b/src/libsyntax/codemap.rs
@@ -83,12 +83,10 @@ impl Sub<CharPos,CharPos> for CharPos {
     }
 }
 
-/**
-Spans represent a region of code, used for error reporting. Positions in spans
-are *absolute* positions from the beginning of the codemap, not positions
-relative to FileMaps. Methods on the CodeMap can be used to relate spans back
-to the original source.
-*/
+/// Spans represent a region of code, used for error reporting. Positions in spans
+/// are *absolute* positions from the beginning of the codemap, not positions
+/// relative to FileMaps. Methods on the CodeMap can be used to relate spans back
+/// to the original source.
 #[deriving(Clone, Show, Hash)]
 pub struct Span {
     pub lo: BytePos,
diff --git a/src/libsyntax/ext/quote.rs b/src/libsyntax/ext/quote.rs
index eaa3632cf499e..e703ac21f2642 100644
--- a/src/libsyntax/ext/quote.rs
+++ b/src/libsyntax/ext/quote.rs
@@ -17,16 +17,12 @@ use parse::token::*;
 use parse::token;
 use ptr::P;
 
-/**
-*
-* Quasiquoting works via token trees.
-*
-* This is registered as a set of expression syntax extension called quote!
-* that lifts its argument token-tree to an AST representing the
-* construction of the same token tree, with token::SubstNt interpreted
-* as antiquotes (splices).
-*
-*/
+//!  Quasiquoting works via token trees.
+//!
+//!  This is registered as a set of expression syntax extension called quote!
+//!  that lifts its argument token-tree to an AST representing the
+//!  construction of the same token tree, with token::SubstNt interpreted
+//!  as antiquotes (splices).
 
 pub mod rt {
     use ast;
diff --git a/src/libsyntax/parse/token.rs b/src/libsyntax/parse/token.rs
index 3a3407aedba96..583ace977fe46 100644
--- a/src/libsyntax/parse/token.rs
+++ b/src/libsyntax/parse/token.rs
@@ -421,13 +421,11 @@ macro_rules! declare_special_idents_and_keywords {(
         )*
     }
 
-    /**
-     * All the valid words that have meaning in the Rust language.
-     *
-     * Rust keywords are either 'strict' or 'reserved'.  Strict keywords may not
-     * appear as identifiers at all. Reserved keywords are not used anywhere in
-     * the language and may not appear as identifiers.
-     */
+    /// All the valid words that have meaning in the Rust language.
+    ///
+    /// Rust keywords are either 'strict' or 'reserved'.  Strict keywords may not
+    /// appear as identifiers at all. Reserved keywords are not used anywhere in
+    /// the language and may not appear as identifiers.
     pub mod keywords {
         pub use self::Keyword::*;
         use ast;
diff --git a/src/libterm/terminfo/parm.rs b/src/libterm/terminfo/parm.rs
index cfb4c3bc40322..62a49c5d90270 100644
--- a/src/libterm/terminfo/parm.rs
+++ b/src/libterm/terminfo/parm.rs
@@ -80,17 +80,15 @@ impl Variables {
     }
 }
 
-/**
-  Expand a parameterized capability
-
-  # Arguments
-  * `cap`    - string to expand
-  * `params` - vector of params for %p1 etc
-  * `vars`   - Variables struct for %Pa etc
-
-  To be compatible with ncurses, `vars` should be the same between calls to `expand` for
-  multiple capabilities for the same terminal.
-  */
+/// Expand a parameterized capability
+///
+/// # Arguments
+/// * `cap`    - string to expand
+/// * `params` - vector of params for %p1 etc
+/// * `vars`   - Variables struct for %Pa etc
+///
+/// To be compatible with ncurses, `vars` should be the same between calls to `expand` for
+/// multiple capabilities for the same terminal.
 pub fn expand(cap: &[u8], params: &[Param], vars: &mut Variables)
     -> Result<Vec<u8> , String> {
     let mut state = Nothing;
diff --git a/src/libtime/lib.rs b/src/libtime/lib.rs
index 062035c23f906..135c8a7280880 100644
--- a/src/libtime/lib.rs
+++ b/src/libtime/lib.rs
@@ -123,10 +123,8 @@ impl Sub<Timespec, Duration> for Timespec {
     }
 }
 
-/**
- * Returns the current time as a `timespec` containing the seconds and
- * nanoseconds since 1970-01-01T00:00:00Z.
- */
+/// Returns the current time as a `timespec` containing the seconds and
+/// nanoseconds since 1970-01-01T00:00:00Z.
 pub fn get_time() -> Timespec {
     unsafe {
         let (sec, nsec) = os_get_time();
@@ -171,10 +169,8 @@ pub fn get_time() -> Timespec {
 }
 
 
-/**
- * Returns the current value of a high-resolution performance counter
- * in nanoseconds since an unspecified epoch.
- */
+/// Returns the current value of a high-resolution performance counter
+/// in nanoseconds since an unspecified epoch.
 pub fn precise_time_ns() -> u64 {
     return os_precise_time_ns();
 
@@ -218,10 +214,8 @@ pub fn precise_time_ns() -> u64 {
 }
 
 
-/**
- * Returns the current value of a high-resolution performance counter
- * in seconds since an unspecified epoch.
- */
+/// Returns the current value of a high-resolution performance counter
+/// in seconds since an unspecified epoch.
 pub fn precise_time_s() -> f64 {
     return (precise_time_ns() as f64) / 1000000000.;
 }
@@ -346,12 +340,10 @@ impl Tm {
         at_utc(self.to_timespec())
     }
 
-    /**
-     * Returns a TmFmt that outputs according to the `asctime` format in ISO
-     * C, in the local timezone.
-     *
-     * Example: "Thu Jan  1 00:00:00 1970"
-     */
+    /// Returns a TmFmt that outputs according to the `asctime` format in ISO
+    /// C, in the local timezone.
+    ///
+    /// Example: "Thu Jan  1 00:00:00 1970"
     pub fn ctime(&self) -> TmFmt {
         TmFmt {
             tm: self,
@@ -359,12 +351,10 @@ impl Tm {
         }
     }
 
-    /**
-     * Returns a TmFmt that outputs according to the `asctime` format in ISO
-     * C.
-     *
-     * Example: "Thu Jan  1 00:00:00 1970"
-     */
+    /// Returns a TmFmt that outputs according to the `asctime` format in ISO
+    /// C.
+    ///
+    /// Example: "Thu Jan  1 00:00:00 1970"
     pub fn asctime(&self) -> TmFmt {
         TmFmt {
             tm: self,
@@ -380,12 +370,10 @@ impl Tm {
         })
     }
 
-    /**
-     * Returns a TmFmt that outputs according to RFC 822.
-     *
-     * local: "Thu, 22 Mar 2012 07:53:18 PST"
-     * utc:   "Thu, 22 Mar 2012 14:53:18 GMT"
-     */
+    /// Returns a TmFmt that outputs according to RFC 822.
+    ///
+    /// local: "Thu, 22 Mar 2012 07:53:18 PST"
+    /// utc:   "Thu, 22 Mar 2012 14:53:18 GMT"
     pub fn rfc822(&self) -> TmFmt {
         if self.tm_gmtoff == 0_i32 {
             TmFmt {
@@ -400,12 +388,10 @@ impl Tm {
         }
     }
 
-    /**
-     * Returns a TmFmt that outputs according to RFC 822 with Zulu time.
-     *
-     * local: "Thu, 22 Mar 2012 07:53:18 -0700"
-     * utc:   "Thu, 22 Mar 2012 14:53:18 -0000"
-     */
+    /// Returns a TmFmt that outputs according to RFC 822 with Zulu time.
+    ///
+    /// local: "Thu, 22 Mar 2012 07:53:18 -0700"
+    /// utc:   "Thu, 22 Mar 2012 14:53:18 -0000"
     pub fn rfc822z(&self) -> TmFmt {
         TmFmt {
             tm: self,
@@ -413,13 +399,11 @@ impl Tm {
         }
     }
 
-    /**
-     * Returns a TmFmt that outputs according to RFC 3339. RFC 3339 is
-     * compatible with ISO 8601.
-     *
-     * local: "2012-02-22T07:53:18-07:00"
-     * utc:   "2012-02-22T14:53:18Z"
-     */
+    /// Returns a TmFmt that outputs according to RFC 3339. RFC 3339 is
+    /// compatible with ISO 8601.
+    ///
+    /// local: "2012-02-22T07:53:18-07:00"
+    /// utc:   "2012-02-22T14:53:18Z"
     pub fn rfc3339<'a>(&'a self) -> TmFmt {
         TmFmt {
             tm: self,

From d48886cc88944c51066742e72fe29b1bc6e004a3 Mon Sep 17 00:00:00 2001
From: Chase Southwood <chase.southwood@gmail.com>
Date: Tue, 25 Nov 2014 20:57:32 -0600
Subject: [PATCH 27/40] Make BinaryHeap's Items iterator implement DoubleEnded
 and ExactSize

---
 src/libcollections/binary_heap.rs | 17 +++++++++++++++++
 1 file changed, 17 insertions(+)

diff --git a/src/libcollections/binary_heap.rs b/src/libcollections/binary_heap.rs
index 8efc4cd50c1a6..4abe555b0eaea 100644
--- a/src/libcollections/binary_heap.rs
+++ b/src/libcollections/binary_heap.rs
@@ -567,6 +567,13 @@ impl<'a, T> Iterator<&'a T> for Items<'a, T> {
     fn size_hint(&self) -> (uint, Option<uint>) { self.iter.size_hint() }
 }
 
+impl<'a, T> DoubleEndedIterator<&'a T> for Items<'a, T> {
+    #[inline]
+    fn next_back(&mut self) -> Option<(&'a T)> { self.iter.next_back() }
+}
+
+impl<'a, T> ExactSize<&'a T> for Items<'a, T> {}
+
 /// An iterator that moves out of a `BinaryHeap`.
 pub struct MoveItems<T> {
     iter: vec::MoveItems<T>,
@@ -625,6 +632,16 @@ mod tests {
         }
     }
 
+    #[test]
+    fn test_iterator_reverse() {
+        let data = vec!(5i, 9, 3);
+        let iterout = vec!(3i, 5, 9);
+        let pq = BinaryHeap::from_vec(data);
+
+        let v: Vec<int> = pq.iter().rev().map(|&x| x).collect();
+        assert_eq!(v, iterout);
+    }
+
     #[test]
     fn test_move_iter() {
         let data = vec!(5i, 9, 3);

From b7520f595f42ab6729075ec151cecfb0e3e7ee4c Mon Sep 17 00:00:00 2001
From: Andrew Paseltiner <apaseltiner@gmail.com>
Date: Wed, 26 Nov 2014 08:03:31 -0500
Subject: [PATCH 28/40] fix errors in the guide

- `s/(left|right) hand/\1-hand/`
- `s/parenthesis/parentheses/`
- `s/unicode/Unicode/`
- `s/validly-encoded/validly encoded/`
---
 src/doc/guide.md | 16 ++++++++--------
 1 file changed, 8 insertions(+), 8 deletions(-)

diff --git a/src/doc/guide.md b/src/doc/guide.md
index 418f82c996957..430f44cdc21d0 100644
--- a/src/doc/guide.md
+++ b/src/doc/guide.md
@@ -683,7 +683,7 @@ fn main() {
 ```
 
 This is the simplest possible function declaration. As we mentioned before,
-`fn` says 'this is a function,' followed by the name, some parenthesis because
+`fn` says 'this is a function,' followed by the name, some parentheses because
 this function takes no arguments, and then some curly braces to indicate the
 body. Here's a function named `foo`:
 
@@ -884,7 +884,7 @@ Tuples are an ordered list of a fixed size. Like this:
 let x = (1i, "hello");
 ```
 
-The parenthesis and commas form this two-length tuple. Here's the same code, but
+The parentheses and commas form this two-length tuple. Here's the same code, but
 with the type annotated:
 
 ```rust
@@ -908,9 +908,9 @@ let (x, y, z) = (1i, 2i, 3i);
 println!("x is {}", x);
 ```
 
-Remember before when I said the left hand side of a `let` statement was more
+Remember before when I said the left-hand side of a `let` statement was more
 powerful than just assigning a binding? Here we are. We can put a pattern on
-the left hand side of the `let`, and if it matches up to the right hand side,
+the left-hand side of the `let`, and if it matches up to the right-hand side,
 we can assign multiple bindings at once. In this case, `let` 'destructures,'
 or 'breaks up,' the tuple, and assigns the bits to three bindings.
 
@@ -1453,9 +1453,9 @@ focus. Any time you have a data structure of variable size, things can get
 tricky, and strings are a re-sizable data structure. That said, Rust's strings
 also work differently than in some other systems languages, such as C.
 
-Let's dig into the details. A **string** is a sequence of unicode scalar values
+Let's dig into the details. A **string** is a sequence of Unicode scalar values
 encoded as a stream of UTF-8 bytes. All strings are guaranteed to be
-validly-encoded UTF-8 sequences. Additionally, strings are not null-terminated
+validly encoded UTF-8 sequences. Additionally, strings are not null-terminated
 and can contain null bytes.
 
 Rust has two main types of strings: `&str` and `String`.
@@ -3933,7 +3933,7 @@ match x {
 }
 ```
 
-Here, the `val` inside the `match` has type `int`. In other words, the left hand
+Here, the `val` inside the `match` has type `int`. In other words, the left-hand
 side of the pattern destructures the value. If we have `&5i`, then in `&val`, `val`
 would be `5i`.
 
@@ -4681,7 +4681,7 @@ let x: Option<int> = Some(5i);
 
 In the type declaration, we say `Option<int>`. Note how similar this looks to
 `Option<T>`. So, in this particular `Option`, `T` has the value of `int`. On
-the right hand side of the binding, we do make a `Some(T)`, where `T` is `5i`.
+the right-hand side of the binding, we do make a `Some(T)`, where `T` is `5i`.
 Since that's an `int`, the two sides match, and Rust is happy. If they didn't
 match, we'd get an error:
 

From ce238d752b1e04d4aea21c0fadf420a270ed6ff9 Mon Sep 17 00:00:00 2001
From: Corey Farwell <coreyf@rwell.org>
Date: Sat, 22 Nov 2014 13:48:01 -0500
Subject: [PATCH 29/40] Unpublicize reexports, unprefix JSON type aliases

The type aliases json::JsonString and json::JsonObject were originally
prefixed with 'json' to prevent collisions with (at the time) the enums
json::String and json::Object respectively. Now that enum namespacing
has landed, this 'json' prefix is redundant and can be removed:

json::JsonArray -> json::Array
json::JsonObject -> json::Object

In addition, this commit also unpublicizes all of the re-exports in this
JSON module, as a part of #19253

[breaking-change]
---
 src/libserialize/json.rs        | 182 ++++++++++++++++----------------
 src/test/run-pass/issue-2804.rs |   2 +-
 2 files changed, 92 insertions(+), 92 deletions(-)

diff --git a/src/libserialize/json.rs b/src/libserialize/json.rs
index 4a2ca58fc9269..473552ab0ff98 100644
--- a/src/libserialize/json.rs
+++ b/src/libserialize/json.rs
@@ -194,12 +194,11 @@ fn main() {
 
 */
 
-pub use self::JsonEvent::*;
-pub use self::StackElement::*;
-pub use self::Json::*;
-pub use self::ErrorCode::*;
-pub use self::ParserError::*;
-pub use self::DecoderError::*;
+use self::JsonEvent::*;
+use self::StackElement::*;
+use self::ErrorCode::*;
+use self::ParserError::*;
+use self::DecoderError::*;
 use self::ParserState::*;
 use self::InternalStackElement::*;
 
@@ -223,13 +222,13 @@ pub enum Json {
     F64(f64),
     String(string::String),
     Boolean(bool),
-    Array(JsonArray),
-    Object(JsonObject),
+    Array(self::Array),
+    Object(self::Object),
     Null,
 }
 
-pub type JsonArray = Vec<Json>;
-pub type JsonObject = TreeMap<string::String, Json>;
+pub type Array = Vec<Json>;
+pub type Object = TreeMap<string::String, Json>;
 
 /// The errors that can arise while parsing a JSON stream.
 #[deriving(Clone, PartialEq)]
@@ -274,7 +273,7 @@ pub enum DecoderError {
 
 /// Returns a readable error string for a given error code.
 pub fn error_str(error: ErrorCode) -> &'static str {
-    return match error {
+    match error {
         InvalidSyntax => "invalid syntax",
         InvalidNumber => "invalid number",
         EOFWhileParsingObject => "EOF While parsing object",
@@ -863,14 +862,14 @@ impl<'a> ::Encoder<io::IoError> for PrettyEncoder<'a> {
 impl<E: ::Encoder<S>, S> Encodable<E, S> for Json {
     fn encode(&self, e: &mut E) -> Result<(), S> {
         match *self {
-            I64(v) => v.encode(e),
-            U64(v) => v.encode(e),
-            F64(v) => v.encode(e),
-            String(ref v) => v.encode(e),
-            Boolean(v) => v.encode(e),
-            Array(ref v) => v.encode(e),
-            Object(ref v) => v.encode(e),
-            Null => e.emit_nil(),
+            Json::I64(v) => v.encode(e),
+            Json::U64(v) => v.encode(e),
+            Json::F64(v) => v.encode(e),
+            Json::String(ref v) => v.encode(e),
+            Json::Boolean(v) => v.encode(e),
+            Json::Array(ref v) => v.encode(e),
+            Json::Object(ref v) => v.encode(e),
+            Json::Null => e.emit_nil(),
         }
     }
 }
@@ -900,7 +899,7 @@ impl Json {
     /// Otherwise, returns None.
     pub fn find<'a>(&'a self, key: &str) -> Option<&'a Json>{
         match self {
-            &Object(ref map) => map.get(key),
+            &Json::Object(ref map) => map.get(key),
             _ => None
         }
     }
@@ -924,7 +923,7 @@ impl Json {
     /// or the Json value is not an Object, returns None.
     pub fn search<'a>(&'a self, key: &str) -> Option<&'a Json> {
         match self {
-            &Object(ref map) => {
+            &Json::Object(ref map) => {
                 match map.get(key) {
                     Some(json_value) => Some(json_value),
                     None => {
@@ -949,9 +948,9 @@ impl Json {
 
     /// If the Json value is an Object, returns the associated TreeMap.
     /// Returns None otherwise.
-    pub fn as_object<'a>(&'a self) -> Option<&'a JsonObject> {
+    pub fn as_object<'a>(&'a self) -> Option<&'a Object> {
         match self {
-            &Object(ref map) => Some(map),
+            &Json::Object(ref map) => Some(map),
             _ => None
         }
     }
@@ -963,9 +962,9 @@ impl Json {
 
     /// If the Json value is an Array, returns the associated vector.
     /// Returns None otherwise.
-    pub fn as_array<'a>(&'a self) -> Option<&'a JsonArray> {
+    pub fn as_array<'a>(&'a self) -> Option<&'a Array> {
         match self {
-            &Array(ref array) => Some(&*array),
+            &Json::Array(ref array) => Some(&*array),
             _ => None
         }
     }
@@ -979,7 +978,7 @@ impl Json {
     /// Returns None otherwise.
     pub fn as_string<'a>(&'a self) -> Option<&'a str> {
         match *self {
-            String(ref s) => Some(s.as_slice()),
+            Json::String(ref s) => Some(s.as_slice()),
             _ => None
         }
     }
@@ -987,7 +986,7 @@ impl Json {
     /// Returns true if the Json value is a Number. Returns false otherwise.
     pub fn is_number(&self) -> bool {
         match *self {
-            I64(_) | U64(_) | F64(_) => true,
+            Json::I64(_) | Json::U64(_) | Json::F64(_) => true,
             _ => false,
         }
     }
@@ -995,7 +994,7 @@ impl Json {
     /// Returns true if the Json value is a i64. Returns false otherwise.
     pub fn is_i64(&self) -> bool {
         match *self {
-            I64(_) => true,
+            Json::I64(_) => true,
             _ => false,
         }
     }
@@ -1003,7 +1002,7 @@ impl Json {
     /// Returns true if the Json value is a u64. Returns false otherwise.
     pub fn is_u64(&self) -> bool {
         match *self {
-            U64(_) => true,
+            Json::U64(_) => true,
             _ => false,
         }
     }
@@ -1011,7 +1010,7 @@ impl Json {
     /// Returns true if the Json value is a f64. Returns false otherwise.
     pub fn is_f64(&self) -> bool {
         match *self {
-            F64(_) => true,
+            Json::F64(_) => true,
             _ => false,
         }
     }
@@ -1020,8 +1019,8 @@ impl Json {
     /// Returns None otherwise.
     pub fn as_i64(&self) -> Option<i64> {
         match *self {
-            I64(n) => Some(n),
-            U64(n) => num::cast(n),
+            Json::I64(n) => Some(n),
+            Json::U64(n) => num::cast(n),
             _ => None
         }
     }
@@ -1030,8 +1029,8 @@ impl Json {
     /// Returns None otherwise.
     pub fn as_u64(&self) -> Option<u64> {
         match *self {
-            I64(n) => num::cast(n),
-            U64(n) => Some(n),
+            Json::I64(n) => num::cast(n),
+            Json::U64(n) => Some(n),
             _ => None
         }
     }
@@ -1040,9 +1039,9 @@ impl Json {
     /// Returns None otherwise.
     pub fn as_f64(&self) -> Option<f64> {
         match *self {
-            I64(n) => num::cast(n),
-            U64(n) => num::cast(n),
-            F64(n) => Some(n),
+            Json::I64(n) => num::cast(n),
+            Json::U64(n) => num::cast(n),
+            Json::F64(n) => Some(n),
             _ => None
         }
     }
@@ -1056,7 +1055,7 @@ impl Json {
     /// Returns None otherwise.
     pub fn as_boolean(&self) -> Option<bool> {
         match self {
-            &Boolean(b) => Some(b),
+            &Json::Boolean(b) => Some(b),
             _ => None
         }
     }
@@ -1070,7 +1069,7 @@ impl Json {
     /// Returns None otherwise.
     pub fn as_null(&self) -> Option<()> {
         match self {
-            &Null => Some(()),
+            &Json::Null => Some(()),
             _ => None
         }
     }
@@ -1085,7 +1084,7 @@ impl<'a> ops::Index<&'a str, Json>  for Json {
 impl ops::Index<uint, Json> for Json {
     fn index<'a>(&'a self, idx: &uint) -> &'a Json {
         match self {
-            &Array(ref v) => v.index(idx),
+            &Json::Array(ref v) => v.index(idx),
             _ => panic!("can only index Json with uint if it is an array")
         }
     }
@@ -1844,16 +1843,16 @@ impl<T: Iterator<char>> Builder<T> {
     }
 
     fn build_value(&mut self) -> Result<Json, BuilderError> {
-        match self.token {
-            Some(NullValue) => Ok(Null),
-            Some(I64Value(n)) => Ok(I64(n)),
-            Some(U64Value(n)) => Ok(U64(n)),
-            Some(F64Value(n)) => Ok(F64(n)),
-            Some(BooleanValue(b)) => Ok(Boolean(b)),
+        return match self.token {
+            Some(NullValue) => Ok(Json::Null),
+            Some(I64Value(n)) => Ok(Json::I64(n)),
+            Some(U64Value(n)) => Ok(Json::U64(n)),
+            Some(F64Value(n)) => Ok(Json::F64(n)),
+            Some(BooleanValue(b)) => Ok(Json::Boolean(b)),
             Some(StringValue(ref mut s)) => {
                 let mut temp = string::String::new();
                 swap(s, &mut temp);
-                Ok(String(temp))
+                Ok(Json::String(temp))
             }
             Some(Error(e)) => Err(e),
             Some(ArrayStart) => self.build_array(),
@@ -1870,7 +1869,7 @@ impl<T: Iterator<char>> Builder<T> {
 
         loop {
             if self.token == Some(ArrayEnd) {
-                return Ok(Array(values.into_iter().collect()));
+                return Ok(Json::Array(values.into_iter().collect()));
             }
             match self.build_value() {
                 Ok(v) => values.push(v),
@@ -1887,7 +1886,7 @@ impl<T: Iterator<char>> Builder<T> {
 
         loop {
             match self.token {
-                Some(ObjectEnd) => { return Ok(Object(values)); }
+                Some(ObjectEnd) => { return Ok(Json::Object(values)); }
                 Some(Error(e)) => { return Err(e); }
                 None => { break; }
                 _ => {}
@@ -1947,14 +1946,14 @@ impl Decoder {
 macro_rules! expect(
     ($e:expr, Null) => ({
         match $e {
-            Null => Ok(()),
+            Json::Null => Ok(()),
             other => Err(ExpectedError("Null".to_string(),
                                        format!("{}", other)))
         }
     });
     ($e:expr, $t:ident) => ({
         match $e {
-            $t(v) => Ok(v),
+            Json::$t(v) => Ok(v),
             other => {
                 Err(ExpectedError(stringify!($t).to_string(),
                                   format!("{}", other)))
@@ -1967,25 +1966,25 @@ macro_rules! read_primitive {
     ($name:ident, $ty:ty) => {
         fn $name(&mut self) -> DecodeResult<$ty> {
             match self.pop() {
-                I64(f) => {
+                Json::I64(f) => {
                     match num::cast(f) {
                         Some(f) => Ok(f),
                         None => Err(ExpectedError("Number".to_string(), format!("{}", f))),
                     }
                 }
-                U64(f) => {
+                Json::U64(f) => {
                     match num::cast(f) {
                         Some(f) => Ok(f),
                         None => Err(ExpectedError("Number".to_string(), format!("{}", f))),
                     }
                 }
-                F64(f) => {
+                Json::F64(f) => {
                     match num::cast(f) {
                         Some(f) => Ok(f),
                         None => Err(ExpectedError("Number".to_string(), format!("{}", f))),
                     }
                 }
-                String(s) => {
+                Json::String(s) => {
                     // re: #12967.. a type w/ numeric keys (ie HashMap<uint, V> etc)
                     // is going to have a string here, as per JSON spec.
                     match std::str::from_str(s.as_slice()) {
@@ -2021,10 +2020,10 @@ impl ::Decoder<DecoderError> for Decoder {
     fn read_f64(&mut self) -> DecodeResult<f64> {
         debug!("read_f64");
         match self.pop() {
-            I64(f) => Ok(f as f64),
-            U64(f) => Ok(f as f64),
-            F64(f) => Ok(f),
-            String(s) => {
+            Json::I64(f) => Ok(f as f64),
+            Json::U64(f) => Ok(f as f64),
+            Json::F64(f) => Ok(f),
+            Json::String(s) => {
                 // re: #12967.. a type w/ numeric keys (ie HashMap<uint, V> etc)
                 // is going to have a string here, as per JSON spec.
                 match std::str::from_str(s.as_slice()) {
@@ -2032,7 +2031,7 @@ impl ::Decoder<DecoderError> for Decoder {
                     None => Err(ExpectedError("Number".to_string(), s)),
                 }
             },
-            Null => Ok(f64::NAN),
+            Json::Null => Ok(f64::NAN),
             value => Err(ExpectedError("Number".to_string(), format!("{}", value)))
         }
     }
@@ -2073,10 +2072,10 @@ impl ::Decoder<DecoderError> for Decoder {
                             -> DecodeResult<T> {
         debug!("read_enum_variant(names={})", names);
         let name = match self.pop() {
-            String(s) => s,
-            Object(mut o) => {
+            Json::String(s) => s,
+            Json::Object(mut o) => {
                 let n = match o.remove(&"variant".to_string()) {
-                    Some(String(s)) => s,
+                    Some(Json::String(s)) => s,
                     Some(val) => {
                         return Err(ExpectedError("String".to_string(), format!("{}", val)))
                     }
@@ -2085,7 +2084,7 @@ impl ::Decoder<DecoderError> for Decoder {
                     }
                 };
                 match o.remove(&"fields".to_string()) {
-                    Some(Array(l)) => {
+                    Some(Json::Array(l)) => {
                         for field in l.into_iter().rev() {
                             self.stack.push(field);
                         }
@@ -2158,7 +2157,7 @@ impl ::Decoder<DecoderError> for Decoder {
             None => {
                 // Add a Null and try to parse it as an Option<_>
                 // to get None as a default value.
-                self.stack.push(Null);
+                self.stack.push(Json::Null);
                 match f(self) {
                     Ok(x) => x,
                     Err(_) => return Err(MissingFieldError(name.to_string())),
@@ -2169,7 +2168,7 @@ impl ::Decoder<DecoderError> for Decoder {
                 try!(f(self))
             }
         };
-        self.stack.push(Object(obj));
+        self.stack.push(Json::Object(obj));
         Ok(value)
     }
 
@@ -2214,7 +2213,7 @@ impl ::Decoder<DecoderError> for Decoder {
     fn read_option<T>(&mut self, f: |&mut Decoder, bool| -> DecodeResult<T>) -> DecodeResult<T> {
         debug!("read_option()");
         match self.pop() {
-            Null => f(self, false),
+            Json::Null => f(self, false),
             value => { self.stack.push(value); f(self, true) }
         }
     }
@@ -2242,7 +2241,7 @@ impl ::Decoder<DecoderError> for Decoder {
         let len = obj.len();
         for (key, value) in obj.into_iter() {
             self.stack.push(value);
-            self.stack.push(String(key));
+            self.stack.push(Json::String(key));
         }
         f(self, len)
     }
@@ -2273,7 +2272,7 @@ pub trait ToJson for Sized? {
 macro_rules! to_json_impl_i64(
     ($($t:ty), +) => (
         $(impl ToJson for $t {
-            fn to_json(&self) -> Json { I64(*self as i64) }
+            fn to_json(&self) -> Json { Json::I64(*self as i64) }
         })+
     )
 )
@@ -2283,7 +2282,7 @@ to_json_impl_i64!(int, i8, i16, i32, i64)
 macro_rules! to_json_impl_u64(
     ($($t:ty), +) => (
         $(impl ToJson for $t {
-            fn to_json(&self) -> Json { U64(*self as u64) }
+            fn to_json(&self) -> Json { Json::U64(*self as u64) }
         })+
     )
 )
@@ -2301,26 +2300,26 @@ impl ToJson for f32 {
 impl ToJson for f64 {
     fn to_json(&self) -> Json {
         match self.classify() {
-            FPNaN | FPInfinite => Null,
-            _                  => F64(*self)
+            FPNaN | FPInfinite => Json::Null,
+            _                  => Json::F64(*self)
         }
     }
 }
 
 impl ToJson for () {
-    fn to_json(&self) -> Json { Null }
+    fn to_json(&self) -> Json { Json::Null }
 }
 
 impl ToJson for bool {
-    fn to_json(&self) -> Json { Boolean(*self) }
+    fn to_json(&self) -> Json { Json::Boolean(*self) }
 }
 
 impl ToJson for str {
-    fn to_json(&self) -> Json { String(self.into_string()) }
+    fn to_json(&self) -> Json { Json::String(self.into_string()) }
 }
 
 impl ToJson for string::String {
-    fn to_json(&self) -> Json { String((*self).clone()) }
+    fn to_json(&self) -> Json { Json::String((*self).clone()) }
 }
 
 macro_rules! tuple_impl {
@@ -2335,7 +2334,7 @@ macro_rules! tuple_impl {
             #[allow(non_snake_case)]
             fn to_json(&self) -> Json {
                 match *self {
-                    ($(ref $tyvar),*,) => Array(vec![$($tyvar.to_json()),*])
+                    ($(ref $tyvar),*,) => Json::Array(vec![$($tyvar.to_json()),*])
                 }
             }
         }
@@ -2356,11 +2355,11 @@ tuple_impl!{A, B, C, D, E, F, G, H, I, J, K}
 tuple_impl!{A, B, C, D, E, F, G, H, I, J, K, L}
 
 impl<A: ToJson> ToJson for [A] {
-    fn to_json(&self) -> Json { Array(self.iter().map(|elt| elt.to_json()).collect()) }
+    fn to_json(&self) -> Json { Json::Array(self.iter().map(|elt| elt.to_json()).collect()) }
 }
 
 impl<A: ToJson> ToJson for Vec<A> {
-    fn to_json(&self) -> Json { Array(self.iter().map(|elt| elt.to_json()).collect()) }
+    fn to_json(&self) -> Json { Json::Array(self.iter().map(|elt| elt.to_json()).collect()) }
 }
 
 impl<A: ToJson> ToJson for TreeMap<string::String, A> {
@@ -2369,7 +2368,7 @@ impl<A: ToJson> ToJson for TreeMap<string::String, A> {
         for (key, value) in self.iter() {
             d.insert((*key).clone(), value.to_json());
         }
-        Object(d)
+        Json::Object(d)
     }
 }
 
@@ -2379,14 +2378,14 @@ impl<A: ToJson> ToJson for HashMap<string::String, A> {
         for (key, value) in self.iter() {
             d.insert((*key).clone(), value.to_json());
         }
-        Object(d)
+        Json::Object(d)
     }
 }
 
 impl<A:ToJson> ToJson for Option<A> {
     fn to_json(&self) -> Json {
         match *self {
-            None => Null,
+            None => Json::Null,
             Some(ref value) => value.to_json()
         }
     }
@@ -2412,15 +2411,16 @@ mod tests {
     use self::DecodeEnum::*;
     use self::test::Bencher;
     use {Encodable, Decodable};
-    use super::{Array, Encoder, Decoder, Error, Boolean, I64, U64, F64, String, Null,
-                PrettyEncoder, Object, Json, from_str, ParseError, ExpectedError,
-                MissingFieldError, UnknownVariantError, DecodeResult, DecoderError,
-                JsonEvent, Parser, StackElement,
-                ObjectStart, ObjectEnd, ArrayStart, ArrayEnd, BooleanValue, U64Value,
-                F64Value, StringValue, NullValue, SyntaxError, Key, Index, Stack,
-                InvalidSyntax, InvalidNumber, EOFWhileParsingObject, EOFWhileParsingArray,
-                EOFWhileParsingValue, EOFWhileParsingString, KeyMustBeAString, ExpectedColon,
-                TrailingCharacters, TrailingComma};
+    use super::Json::*;
+    use super::ErrorCode::*;
+    use super::ParserError::*;
+    use super::DecoderError::*;
+    use super::JsonEvent::*;
+    use super::ParserState::*;
+    use super::StackElement::*;
+    use super::InternalStackElement::*;
+    use super::{PrettyEncoder, Json, from_str, DecodeResult, DecoderError, JsonEvent, Parser,
+                StackElement, Stack, Encoder, Decoder};
     use std::{i64, u64, f32, f64, io};
     use std::collections::TreeMap;
     use std::num::Float;
diff --git a/src/test/run-pass/issue-2804.rs b/src/test/run-pass/issue-2804.rs
index 4f66139789b1e..ba73b7cee2ebc 100644
--- a/src/test/run-pass/issue-2804.rs
+++ b/src/test/run-pass/issue-2804.rs
@@ -21,7 +21,7 @@ enum object {
     int_value(i64),
 }
 
-fn lookup(table: json::JsonObject, key: String, default: String) -> String
+fn lookup(table: json::Object, key: String, default: String) -> String
 {
     match table.find(&key.to_string()) {
         option::Some(&json::String(ref s)) => {

From 74a1041a4d7ae08d223f5ec623f6a698962d5667 Mon Sep 17 00:00:00 2001
From: Niko Matsakis <niko@alum.mit.edu>
Date: Thu, 20 Nov 2014 15:05:29 -0500
Subject: [PATCH 30/40] Implement the new parsing rules for types in the
 parser, modifying the AST appropriately.

---
 src/librustc/diagnostics.rs           |   4 +-
 src/librustc/middle/resolve.rs        |  95 +++++++------
 src/librustc/middle/typeck/astconv.rs | 175 ++++++++++++-----------
 src/libsyntax/ast.rs                  |   4 +-
 src/libsyntax/parse/mod.rs            |   2 +-
 src/libsyntax/parse/parser.rs         | 192 ++++++++++++--------------
 src/libsyntax/print/pprust.rs         |  24 ++--
 7 files changed, 262 insertions(+), 234 deletions(-)

diff --git a/src/librustc/diagnostics.rs b/src/librustc/diagnostics.rs
index afbb18faa0b9f..1873213fadf1d 100644
--- a/src/librustc/diagnostics.rs
+++ b/src/librustc/diagnostics.rs
@@ -145,5 +145,7 @@ register_diagnostics!(
     E0166,
     E0167,
     E0168,
-    E0169
+    E0169,
+    E0170,
+    E0171
 )
diff --git a/src/librustc/middle/resolve.rs b/src/librustc/middle/resolve.rs
index 68a31c83ea484..d334395e9117f 100644
--- a/src/librustc/middle/resolve.rs
+++ b/src/librustc/middle/resolve.rs
@@ -1396,29 +1396,53 @@ impl<'a> Resolver<'a> {
                 // methods within to a new module, if the type was defined
                 // within this module.
 
-                // Create the module and add all methods.
-                match ty.node {
-                    TyPath(ref path, _, _) if path.segments.len() == 1 => {
+                let mod_name = match ty.node {
+                    TyPath(ref path, _) if path.segments.len() == 1 => {
                         // FIXME(18446) we should distinguish between the name of
                         // a trait and the name of an impl of that trait.
-                        let mod_name = path.segments.last().unwrap().identifier.name;
+                        Some(path.segments.last().unwrap().identifier.name)
+                    }
+                    TyObjectSum(ref lhs_ty, _) => {
+                        match lhs_ty.node {
+                            TyPath(ref path, _) if path.segments.len() == 1 => {
+                                Some(path.segments.last().unwrap().identifier.name)
+                            }
+                            _ => {
+                                None
+                            }
+                        }
+                    }
+                    _ => {
+                        None
+                    }
+                };
 
+                match mod_name {
+                    None => {
+                        self.resolve_error(ty.span,
+                                           "inherent implementations may \
+                                            only be implemented in the same \
+                                            module as the type they are \
+                                            implemented for")
+                    }
+                    Some(mod_name) => {
+                        // Create the module and add all methods.
                         let parent_opt = parent.module().children.borrow()
-                                               .get(&mod_name).cloned();
+                            .get(&mod_name).cloned();
                         let new_parent = match parent_opt {
                             // It already exists
                             Some(ref child) if child.get_module_if_available()
-                                                .is_some() &&
-                                           (child.get_module().kind.get() == ImplModuleKind ||
-                                            child.get_module().kind.get() == TraitModuleKind) => {
-                                ModuleReducedGraphParent(child.get_module())
-                            }
+                                .is_some() &&
+                                (child.get_module().kind.get() == ImplModuleKind ||
+                                 child.get_module().kind.get() == TraitModuleKind) => {
+                                    ModuleReducedGraphParent(child.get_module())
+                                }
                             Some(ref child) if child.get_module_if_available()
-                                                .is_some() &&
-                                           child.get_module().kind.get() ==
-                                                EnumModuleKind => {
-                                ModuleReducedGraphParent(child.get_module())
-                            }
+                                .is_some() &&
+                                child.get_module().kind.get() ==
+                                EnumModuleKind => {
+                                    ModuleReducedGraphParent(child.get_module())
+                                }
                             // Create the module
                             _ => {
                                 let name_bindings =
@@ -1433,7 +1457,7 @@ impl<'a> Resolver<'a> {
                                 let ns = TypeNS;
                                 let is_public =
                                     !name_bindings.defined_in_namespace(ns) ||
-                                     name_bindings.defined_in_public_namespace(ns);
+                                    name_bindings.defined_in_public_namespace(ns);
 
                                 name_bindings.define_module(parent_link,
                                                             Some(def_id),
@@ -1459,21 +1483,21 @@ impl<'a> Resolver<'a> {
                                                        ForbidDuplicateValues,
                                                        method.span);
                                     let def = match method.pe_explicit_self()
-                                                          .node {
-                                        SelfStatic => {
-                                            // Static methods become
-                                            // `DefStaticMethod`s.
-                                            DefStaticMethod(local_def(method.id),
-                                                            FromImpl(local_def(item.id)))
-                                        }
-                                        _ => {
-                                            // Non-static methods become
-                                            // `DefMethod`s.
-                                            DefMethod(local_def(method.id),
-                                                      None,
-                                                      FromImpl(local_def(item.id)))
-                                        }
-                                    };
+                                        .node {
+                                            SelfStatic => {
+                                                // Static methods become
+                                                // `DefStaticMethod`s.
+                                                DefStaticMethod(local_def(method.id),
+                                                                FromImpl(local_def(item.id)))
+                                            }
+                                            _ => {
+                                                // Non-static methods become
+                                                // `DefMethod`s.
+                                                DefMethod(local_def(method.id),
+                                                          None,
+                                                          FromImpl(local_def(item.id)))
+                                            }
+                                        };
 
                                     // NB: not IMPORTABLE
                                     let modifiers = if method.pe_vis() == ast::Public {
@@ -1496,7 +1520,7 @@ impl<'a> Resolver<'a> {
                                             ForbidDuplicateTypesAndModules,
                                             typedef.span);
                                     let def = DefAssociatedTy(local_def(
-                                            typedef.id));
+                                        typedef.id));
                                     // NB: not IMPORTABLE
                                     let modifiers = if typedef.vis == ast::Public {
                                         PUBLIC
@@ -1511,13 +1535,6 @@ impl<'a> Resolver<'a> {
                             }
                         }
                     }
-                    _ => {
-                        self.resolve_error(ty.span,
-                                           "inherent implementations may \
-                                            only be implemented in the same \
-                                            module as the type they are \
-                                            implemented for")
-                    }
                 }
 
                 parent
diff --git a/src/librustc/middle/typeck/astconv.rs b/src/librustc/middle/typeck/astconv.rs
index fd5b1bd4793b5..8f1e2d115d3f1 100644
--- a/src/librustc/middle/typeck/astconv.rs
+++ b/src/librustc/middle/typeck/astconv.rs
@@ -59,8 +59,9 @@ use middle::typeck::rscope::{UnelidableRscope, RegionScope, SpecificRscope,
                              ShiftedRscope, BindingRscope};
 use middle::typeck::rscope;
 use middle::typeck::TypeAndSubsts;
+use util::common::ErrorReported;
 use util::nodemap::DefIdMap;
-use util::ppaux::{Repr, UserString};
+use util::ppaux::{mod, Repr, UserString};
 
 use std::rc::Rc;
 use std::iter::AdditiveIterator;
@@ -585,7 +586,7 @@ fn check_path_args(tcx: &ty::ctxt,
 pub fn ast_ty_to_prim_ty<'tcx>(tcx: &ty::ctxt<'tcx>, ast_ty: &ast::Ty)
                                -> Option<Ty<'tcx>> {
     match ast_ty.node {
-        ast::TyPath(ref path, _, id) => {
+        ast::TyPath(ref path, id) => {
             let a_def = match tcx.def_map.borrow().get(&id) {
                 None => {
                     tcx.sess.span_bug(ast_ty.span,
@@ -642,7 +643,7 @@ pub fn ast_ty_to_builtin_ty<'tcx, AC: AstConv<'tcx>, RS: RegionScope>(
     }
 
     match ast_ty.node {
-        ast::TyPath(ref path, _, id) => {
+        ast::TyPath(ref path, id) => {
             let a_def = match this.tcx().def_map.borrow().get(&id) {
                 None => {
                     this.tcx()
@@ -682,64 +683,92 @@ pub fn ast_ty_to_builtin_ty<'tcx, AC: AstConv<'tcx>, RS: RegionScope>(
     }
 }
 
-// Handle `~`, `Box`, and `&` being able to mean strs and vecs.
-// If a_seq_ty is a str or a vec, make it a str/vec.
-// Also handle first-class trait types.
-fn mk_pointer<'tcx, AC: AstConv<'tcx>, RS: RegionScope>(
-        this: &AC,
-        rscope: &RS,
-        a_seq_mutbl: ast::Mutability,
-        a_seq_ty: &ast::Ty,
-        region: ty::Region,
-        constr: |Ty<'tcx>| -> Ty<'tcx>)
-        -> Ty<'tcx>
+fn ast_ty_to_trait_ref<'tcx,AC,RS>(this: &AC,
+                                   rscope: &RS,
+                                   ty: &ast::Ty,
+                                   bounds: &[ast::TyParamBound])
+                                   -> Result<ty::TraitRef<'tcx>, ErrorReported>
+    where AC : AstConv<'tcx>, RS : RegionScope
 {
-    let tcx = this.tcx();
-
-    debug!("mk_pointer(region={}, a_seq_ty={})",
-           region,
-           a_seq_ty.repr(tcx));
+    /*!
+     * In a type like `Foo + Send`, we want to wait to collect the
+     * full set of bounds before we make the object type, because we
+     * need them to infer a region bound.  (For example, if we tried
+     * made a type from just `Foo`, then it wouldn't be enough to
+     * infer a 'static bound, and hence the user would get an error.)
+     * So this function is used when we're dealing with a sum type to
+     * convert the LHS. It only accepts a type that refers to a trait
+     * name, and reports an error otherwise.
+     */
 
-    match a_seq_ty.node {
-        ast::TyVec(ref ty) => {
-            let ty = ast_ty_to_ty(this, rscope, &**ty);
-            return constr(ty::mk_vec(tcx, ty, None));
+    match ty.node {
+        ast::TyPath(ref path, id) => {
+            match this.tcx().def_map.borrow().get(&id) {
+                Some(&def::DefTrait(trait_def_id)) => {
+                    return Ok(ast_path_to_trait_ref(this,
+                                                    rscope,
+                                                    trait_def_id,
+                                                    None,
+                                                    path));
+                }
+                _ => {
+                    span_err!(this.tcx().sess, ty.span, E0170, "expected a reference to a trait");
+                    Err(ErrorReported)
+                }
+            }
         }
-        ast::TyPath(ref path, ref opt_bounds, id) => {
-            // Note that the "bounds must be empty if path is not a trait"
-            // restriction is enforced in the below case for ty_path, which
-            // will run after this as long as the path isn't a trait.
-            match tcx.def_map.borrow().get(&id) {
-                Some(&def::DefPrimTy(ast::TyStr)) => {
-                    check_path_args(tcx, path, NO_TPS | NO_REGIONS);
-                    return ty::mk_str_slice(tcx, region, a_seq_mutbl);
+        _ => {
+            span_err!(this.tcx().sess, ty.span, E0171,
+                      "expected a path on the left-hand side of `+`, not `{}`",
+                      pprust::ty_to_string(ty));
+            match ty.node {
+                ast::TyRptr(None, ref mut_ty) => {
+                    span_note!(this.tcx().sess, ty.span,
+                               "perhaps you meant `&{}({} +{})`? (per RFC 248)",
+                               ppaux::mutability_to_string(mut_ty.mutbl),
+                               pprust::ty_to_string(&*mut_ty.ty),
+                               pprust::bounds_to_string(bounds));
                 }
-                Some(&def::DefTrait(trait_def_id)) => {
-                    let result = ast_path_to_trait_ref(this,
-                                                       rscope,
-                                                       trait_def_id,
-                                                       None,
-                                                       path);
-                    let empty_vec = [];
-                    let bounds = match *opt_bounds { None => empty_vec.as_slice(),
-                                                     Some(ref bounds) => bounds.as_slice() };
-                    let existential_bounds = conv_existential_bounds(this,
-                                                                     rscope,
-                                                                     path.span,
-                                                                     &[Rc::new(result.clone())],
-                                                                     bounds);
-                    let tr = ty::mk_trait(tcx,
-                                          result,
-                                          existential_bounds);
-                    return ty::mk_rptr(tcx, region, ty::mt{mutbl: a_seq_mutbl, ty: tr});
+
+                ast::TyRptr(Some(ref lt), ref mut_ty) => {
+                    span_note!(this.tcx().sess, ty.span,
+                               "perhaps you meant `&{} {}({} +{})`? (per RFC 248)",
+                               pprust::lifetime_to_string(lt),
+                               ppaux::mutability_to_string(mut_ty.mutbl),
+                               pprust::ty_to_string(&*mut_ty.ty),
+                               pprust::bounds_to_string(bounds));
+                }
+
+                _ => {
+                    span_note!(this.tcx().sess, ty.span,
+                               "perhaps you forget parentheses? (per RFC 248)");
                 }
-                _ => {}
             }
+            Err(ErrorReported)
         }
-        _ => {}
     }
 
-    constr(ast_ty_to_ty(this, rscope, a_seq_ty))
+}
+
+fn trait_ref_to_object_type<'tcx,AC,RS>(this: &AC,
+                                        rscope: &RS,
+                                        span: Span,
+                                        trait_ref: ty::TraitRef<'tcx>,
+                                        bounds: &[ast::TyParamBound])
+                                        -> Ty<'tcx>
+    where AC : AstConv<'tcx>, RS : RegionScope
+{
+    let existential_bounds = conv_existential_bounds(this,
+                                                     rscope,
+                                                     span,
+                                                     &[Rc::new(trait_ref.clone())],
+                                                     bounds);
+
+    let result = ty::mk_trait(this.tcx(), trait_ref, existential_bounds);
+    debug!("trait_ref_to_object_type: result={}",
+           result.repr(this.tcx()));
+
+    result
 }
 
 fn qpath_to_ty<'tcx,AC,RS>(this: &AC,
@@ -806,6 +835,17 @@ pub fn ast_ty_to_ty<'tcx, AC: AstConv<'tcx>, RS: RegionScope>(
             ast::TyVec(ref ty) => {
                 ty::mk_vec(tcx, ast_ty_to_ty(this, rscope, &**ty), None)
             }
+            ast::TyObjectSum(ref ty, ref bounds) => {
+                match ast_ty_to_trait_ref(this, rscope, &**ty, bounds.as_slice()) {
+                    Ok(trait_ref) => {
+                        trait_ref_to_object_type(this, rscope, ast_ty.span,
+                                                 trait_ref, bounds.as_slice())
+                    }
+                    Err(ErrorReported) => {
+                        ty::mk_err()
+                    }
+                }
+            }
             ast::TyPtr(ref mt) => {
                 ty::mk_ptr(tcx, ty::mt {
                     ty: ast_ty_to_ty(this, rscope, &*mt.ty),
@@ -815,8 +855,8 @@ pub fn ast_ty_to_ty<'tcx, AC: AstConv<'tcx>, RS: RegionScope>(
             ast::TyRptr(ref region, ref mt) => {
                 let r = opt_ast_region_to_region(this, rscope, ast_ty.span, region);
                 debug!("ty_rptr r={}", r.repr(this.tcx()));
-                mk_pointer(this, rscope, mt.mutbl, &*mt.ty, r,
-                           |ty| ty::mk_rptr(tcx, r, ty::mt {ty: ty, mutbl: mt.mutbl}))
+                let t = ast_ty_to_ty(this, rscope, &*mt.ty);
+                ty::mk_rptr(tcx, r, ty::mt {ty: t, mutbl: mt.mutbl})
             }
             ast::TyTup(ref fields) => {
                 let flds = fields.iter()
@@ -874,7 +914,7 @@ pub fn ast_ty_to_ty<'tcx, AC: AstConv<'tcx>, RS: RegionScope>(
             ast::TyPolyTraitRef(ref bounds) => {
                 conv_ty_poly_trait_ref(this, rscope, ast_ty.span, bounds.as_slice())
             }
-            ast::TyPath(ref path, ref bounds, id) => {
+            ast::TyPath(ref path, id) => {
                 let a_def = match tcx.def_map.borrow().get(&id) {
                     None => {
                         tcx.sess
@@ -884,35 +924,16 @@ pub fn ast_ty_to_ty<'tcx, AC: AstConv<'tcx>, RS: RegionScope>(
                     }
                     Some(&d) => d
                 };
-                // Kind bounds on path types are only supported for traits.
-                match a_def {
-                    // But don't emit the error if the user meant to do a trait anyway.
-                    def::DefTrait(..) => { },
-                    _ if bounds.is_some() =>
-                        tcx.sess.span_err(ast_ty.span,
-                                          "kind bounds can only be used on trait types"),
-                    _ => { },
-                }
                 match a_def {
                     def::DefTrait(trait_def_id) => {
+                        // N.B. this case overlaps somewhat with
+                        // TyObjectSum, see that fn for details
                         let result = ast_path_to_trait_ref(this,
                                                            rscope,
                                                            trait_def_id,
                                                            None,
                                                            path);
-                        let empty_bounds: &[ast::TyParamBound] = &[];
-                        let ast_bounds = match *bounds {
-                            Some(ref b) => b.as_slice(),
-                            None => empty_bounds
-                        };
-                        let bounds = conv_existential_bounds(this,
-                                                             rscope,
-                                                             ast_ty.span,
-                                                             &[Rc::new(result.clone())],
-                                                             ast_bounds);
-                        let result_ty = ty::mk_trait(tcx, result, bounds);
-                        debug!("ast_ty_to_ty: result_ty={}", result_ty.repr(this.tcx()));
-                        result_ty
+                        trait_ref_to_object_type(this, rscope, path.span, result, &[])
                     }
                     def::DefTy(did, _) | def::DefStruct(did) => {
                         ast_path_to_ty(this, rscope, did, path).ty
diff --git a/src/libsyntax/ast.rs b/src/libsyntax/ast.rs
index 3d33774aa55e1..14f164ff23b82 100644
--- a/src/libsyntax/ast.rs
+++ b/src/libsyntax/ast.rs
@@ -1151,7 +1151,9 @@ pub enum Ty_ {
     /// A path (`module::module::...::Type`) or primitive
     ///
     /// Type parameters are stored in the Path itself
-    TyPath(Path, Option<TyParamBounds>, NodeId), // for #7264; see above
+    TyPath(Path, NodeId),
+    /// Something like `A+B`. Note that `B` must always be a path.
+    TyObjectSum(P<Ty>, TyParamBounds),
     /// A type like `for<'a> Foo<&'a Bar>`
     TyPolyTraitRef(TyParamBounds),
     /// A "qualified path", e.g. `<Vec<T> as SomeTrait>::SomeType`
diff --git a/src/libsyntax/parse/mod.rs b/src/libsyntax/parse/mod.rs
index 96659031e6a37..b46f7cdfe22ad 100644
--- a/src/libsyntax/parse/mod.rs
+++ b/src/libsyntax/parse/mod.rs
@@ -1029,7 +1029,7 @@ mod test {
                                                 parameters: ast::PathParameters::none(),
                                             }
                                         ),
-                                        }, None, ast::DUMMY_NODE_ID),
+                                        }, ast::DUMMY_NODE_ID),
                                         span:sp(10,13)
                                     }),
                                     pat: P(ast::Pat {
diff --git a/src/libsyntax/parse/parser.rs b/src/libsyntax/parse/parser.rs
index c731a0005f884..35187ebb52210 100644
--- a/src/libsyntax/parse/parser.rs
+++ b/src/libsyntax/parse/parser.rs
@@ -111,16 +111,6 @@ pub enum PathParsingMode {
     /// A path with a lifetime and type parameters with double colons before
     /// the type parameters; e.g. `foo::bar::<'a>::Baz::<T>`
     LifetimeAndTypesWithColons,
-    /// A path with a lifetime and type parameters with bounds before the last
-    /// set of type parameters only; e.g. `foo::bar<'a>::Baz+X+Y<T>` This
-    /// form does not use extra double colons.
-    LifetimeAndTypesAndBounds,
-}
-
-/// A path paired with optional type bounds.
-pub struct PathAndBounds {
-    pub path: ast::Path,
-    pub bounds: Option<ast::TyParamBounds>,
 }
 
 enum ItemOrViewItem {
@@ -1053,17 +1043,9 @@ impl<'a> Parser<'a> {
         }
     }
 
-    pub fn parse_ty_path(&mut self, plus_allowed: bool) -> Ty_ {
-        let mode = if plus_allowed {
-            LifetimeAndTypesAndBounds
-        } else {
-            LifetimeAndTypesWithoutColons
-        };
-        let PathAndBounds {
-            path,
-            bounds
-        } = self.parse_path(mode);
-        TyPath(path, bounds, ast::DUMMY_NODE_ID)
+    pub fn parse_ty_path(&mut self) -> Ty_ {
+        let path = self.parse_path(LifetimeAndTypesWithoutColons);
+        TyPath(path, ast::DUMMY_NODE_ID)
     }
 
     /// parse a TyBareFn type:
@@ -1286,7 +1268,7 @@ impl<'a> Parser<'a> {
         let lo = self.span.lo;
         let ident = self.parse_ident();
         self.expect(&token::Eq);
-        let typ = self.parse_ty(true);
+        let typ = self.parse_ty_sum();
         let hi = self.span.hi;
         self.expect(&token::Semi);
         Typedef {
@@ -1385,7 +1367,7 @@ impl<'a> Parser<'a> {
     /// Parse a possibly mutable type
     pub fn parse_mt(&mut self) -> MutTy {
         let mutbl = self.parse_mutability();
-        let t = self.parse_ty(true);
+        let t = self.parse_ty();
         MutTy { ty: t, mutbl: mutbl }
     }
 
@@ -1396,7 +1378,7 @@ impl<'a> Parser<'a> {
         let mutbl = self.parse_mutability();
         let id = self.parse_ident();
         self.expect(&token::Colon);
-        let ty = self.parse_ty(true);
+        let ty = self.parse_ty_sum();
         let hi = ty.span.hi;
         ast::TypeField {
             ident: id,
@@ -1411,7 +1393,19 @@ impl<'a> Parser<'a> {
             if self.eat(&token::Not) {
                 NoReturn(self.span)
             } else {
-                Return(self.parse_ty(true))
+                let t = self.parse_ty();
+
+                // We used to allow `fn foo() -> &T + U`, but don't
+                // anymore. If we see it, report a useful error.  This
+                // only makes sense because `parse_ret_ty` is only
+                // used in fn *declarations*, not fn types or where
+                // clauses (i.e., not when parsing something like
+                // `FnMut() -> T + Send`, where the `+` is legal).
+                if self.token == token::BinOp(token::Plus) {
+                    self.warn("deprecated syntax: `()` are required, see RFC 248 for details");
+                }
+
+                Return(t)
             }
         } else {
             let pos = self.span.lo;
@@ -1423,11 +1417,36 @@ impl<'a> Parser<'a> {
         }
     }
 
+    /// Parse a type in a context where `T1+T2` is allowed.
+    pub fn parse_ty_sum(&mut self) -> P<Ty> {
+        let lo = self.span.lo;
+        let lhs = self.parse_ty();
+
+        if !self.eat(&token::BinOp(token::Plus)) {
+            return lhs;
+        }
+
+        let bounds = self.parse_ty_param_bounds();
+
+        // In type grammar, `+` is treated like a binary operator,
+        // and hence both L and R side are required.
+        if bounds.len() == 0 {
+            let last_span = self.last_span;
+            self.span_err(last_span,
+                          "at least one type parameter bound \
+                          must be specified");
+        }
+
+        let sp = mk_sp(lo, self.last_span.hi);
+        let sum = ast::TyObjectSum(lhs, bounds);
+        P(Ty {id: ast::DUMMY_NODE_ID, node: sum, span: sp})
+    }
+
     /// Parse a type.
     ///
     /// The second parameter specifies whether the `+` binary operator is
     /// allowed in the type grammar.
-    pub fn parse_ty(&mut self, plus_allowed: bool) -> P<Ty> {
+    pub fn parse_ty(&mut self) -> P<Ty> {
         maybe_whole!(no_clone self, NtTy);
 
         let lo = self.span.lo;
@@ -1441,7 +1460,7 @@ impl<'a> Parser<'a> {
             let mut ts = vec![];
             let mut last_comma = false;
             while self.token != token::CloseDelim(token::Paren) {
-                ts.push(self.parse_ty(true));
+                ts.push(self.parse_ty_sum());
                 if self.token == token::Comma {
                     last_comma = true;
                     self.bump();
@@ -1465,7 +1484,7 @@ impl<'a> Parser<'a> {
                 token::OpenDelim(token::Bracket) => self.obsolete(last_span, ObsoleteOwnedVector),
                 _ => self.obsolete(last_span, ObsoleteOwnedType)
             }
-            TyTup(vec![self.parse_ty(false)])
+            TyTup(vec![self.parse_ty()])
         } else if self.token == token::BinOp(token::Star) {
             // STAR POINTER (bare pointer?)
             self.bump();
@@ -1473,7 +1492,7 @@ impl<'a> Parser<'a> {
         } else if self.token == token::OpenDelim(token::Bracket) {
             // VECTOR
             self.expect(&token::OpenDelim(token::Bracket));
-            let t = self.parse_ty(true);
+            let t = self.parse_ty_sum();
 
             // Parse the `, ..e` in `[ int, ..e ]`
             // where `e` is a const expression
@@ -1514,7 +1533,7 @@ impl<'a> Parser<'a> {
         } else if self.token == token::Lt {
             // QUALIFIED PATH `<TYPE as TRAIT_REF>::item`
             self.bump();
-            let self_type = self.parse_ty(true);
+            let self_type = self.parse_ty_sum();
             self.expect_keyword(keywords::As);
             let trait_ref = self.parse_trait_ref();
             self.expect(&token::Gt);
@@ -1529,7 +1548,7 @@ impl<'a> Parser<'a> {
                   self.token.is_ident() ||
                   self.token.is_path() {
             // NAMED TYPE
-            self.parse_ty_path(plus_allowed)
+            self.parse_ty_path()
         } else if self.eat(&token::Underscore) {
             // TYPE TO BE INFERRED
             TyInfer
@@ -1563,7 +1582,7 @@ impl<'a> Parser<'a> {
                            known as `*const T`");
             MutImmutable
         };
-        let t = self.parse_ty(true);
+        let t = self.parse_ty();
         MutTy { ty: t, mutbl: mutbl }
     }
 
@@ -1603,7 +1622,7 @@ impl<'a> Parser<'a> {
                                    special_idents::invalid)
         };
 
-        let t = self.parse_ty(true);
+        let t = self.parse_ty_sum();
 
         Arg {
             ty: t,
@@ -1621,7 +1640,7 @@ impl<'a> Parser<'a> {
     pub fn parse_fn_block_arg(&mut self) -> Arg {
         let pat = self.parse_pat();
         let t = if self.eat(&token::Colon) {
-            self.parse_ty(true)
+            self.parse_ty_sum()
         } else {
             P(Ty {
                 id: ast::DUMMY_NODE_ID,
@@ -1739,7 +1758,7 @@ impl<'a> Parser<'a> {
     /// mode. The `mode` parameter determines whether lifetimes, types, and/or
     /// bounds are permitted and whether `::` must precede type parameter
     /// groups.
-    pub fn parse_path(&mut self, mode: PathParsingMode) -> PathAndBounds {
+    pub fn parse_path(&mut self, mode: PathParsingMode) -> ast::Path {
         // Check for a whole path...
         let found = match self.token {
             token::Interpolated(token::NtPath(_)) => Some(self.bump_and_get()),
@@ -1747,10 +1766,7 @@ impl<'a> Parser<'a> {
         };
         match found {
             Some(token::Interpolated(token::NtPath(box path))) => {
-                return PathAndBounds {
-                    path: path,
-                    bounds: None
-                }
+                return path;
             }
             _ => {}
         }
@@ -1762,8 +1778,7 @@ impl<'a> Parser<'a> {
         // identifier followed by an optional lifetime and a set of types.
         // A bound set is a set of type parameter bounds.
         let segments = match mode {
-            LifetimeAndTypesWithoutColons |
-            LifetimeAndTypesAndBounds => {
+            LifetimeAndTypesWithoutColons => {
                 self.parse_path_segments_without_colons()
             }
             LifetimeAndTypesWithColons => {
@@ -1774,44 +1789,14 @@ impl<'a> Parser<'a> {
             }
         };
 
-        // Next, parse a plus and bounded type parameters, if
-        // applicable. We need to remember whether the separate was
-        // present for later, because in some contexts it's a parse
-        // error.
-        let opt_bounds = {
-            if mode == LifetimeAndTypesAndBounds &&
-                self.eat(&token::BinOp(token::Plus))
-            {
-                let bounds = self.parse_ty_param_bounds();
-
-                // For some reason that I do not fully understand, we
-                // do not permit an empty list in the case where it is
-                // introduced by a `+`, but we do for `:` and other
-                // separators. -nmatsakis
-                if bounds.len() == 0 {
-                    let last_span = self.last_span;
-                    self.span_err(last_span,
-                                  "at least one type parameter bound \
-                                   must be specified");
-                }
-
-                Some(bounds)
-            } else {
-                None
-            }
-        };
-
         // Assemble the span.
         let span = mk_sp(lo, self.last_span.hi);
 
         // Assemble the result.
-        PathAndBounds {
-            path: ast::Path {
-                span: span,
-                global: is_global,
-                segments: segments,
-            },
-            bounds: opt_bounds,
+        ast::Path {
+            span: span,
+            global: is_global,
+            segments: segments,
         }
     }
 
@@ -1837,10 +1822,10 @@ impl<'a> Parser<'a> {
                 let inputs = self.parse_seq_to_end(
                     &token::CloseDelim(token::Paren),
                     seq_sep_trailing_allowed(token::Comma),
-                    |p| p.parse_ty(true));
+                    |p| p.parse_ty_sum());
 
                 let output_ty = if self.eat(&token::RArrow) {
-                    Some(self.parse_ty(true))
+                    Some(self.parse_ty())
                 } else {
                     None
                 };
@@ -2327,7 +2312,7 @@ impl<'a> Parser<'a> {
                         !self.token.is_keyword(keywords::True) &&
                         !self.token.is_keyword(keywords::False) {
                     let pth =
-                        self.parse_path(LifetimeAndTypesWithColons).path;
+                        self.parse_path(LifetimeAndTypesWithColons);
 
                     // `!`, as an operator, is prefix, so we know this isn't that
                     if self.token == token::Not {
@@ -2898,7 +2883,7 @@ impl<'a> Parser<'a> {
             }
             None => {
                 if as_prec > min_prec && self.eat_keyword(keywords::As) {
-                    let rhs = self.parse_ty(false);
+                    let rhs = self.parse_ty();
                     let _as = self.mk_expr(lhs.span.lo,
                                            rhs.span.hi,
                                            ExprCast(lhs, rhs));
@@ -3362,8 +3347,7 @@ impl<'a> Parser<'a> {
                     }) {
                 self.bump();
                 let end = if self.token.is_ident() || self.token.is_path() {
-                    let path = self.parse_path(LifetimeAndTypesWithColons)
-                                   .path;
+                    let path = self.parse_path(LifetimeAndTypesWithColons);
                     let hi = self.span.hi;
                     self.mk_expr(lo, hi, ExprPath(path))
                 } else {
@@ -3433,8 +3417,7 @@ impl<'a> Parser<'a> {
                 }
             } else {
                 // parse an enum pat
-                let enum_path = self.parse_path(LifetimeAndTypesWithColons)
-                                    .path;
+                let enum_path = self.parse_path(LifetimeAndTypesWithColons);
                 match self.token {
                     token::OpenDelim(token::Brace) => {
                         self.bump();
@@ -3548,7 +3531,7 @@ impl<'a> Parser<'a> {
             span: mk_sp(lo, lo),
         });
         if self.eat(&token::Colon) {
-            ty = self.parse_ty(true);
+            ty = self.parse_ty_sum();
         }
         let init = self.parse_initializer();
         P(ast::Local {
@@ -3577,7 +3560,7 @@ impl<'a> Parser<'a> {
         }
         let name = self.parse_ident();
         self.expect(&token::Colon);
-        let ty = self.parse_ty(true);
+        let ty = self.parse_ty_sum();
         spanned(lo, self.last_span.hi, ast::StructField_ {
             kind: NamedField(name, pr),
             id: ast::DUMMY_NODE_ID,
@@ -3624,7 +3607,7 @@ impl<'a> Parser<'a> {
 
             // Potential trouble: if we allow macros with paths instead of
             // idents, we'd need to look ahead past the whole path here...
-            let pth = self.parse_path(NoTypesAllowed).path;
+            let pth = self.parse_path(NoTypesAllowed);
             self.bump();
 
             let id = match self.token {
@@ -3976,7 +3959,7 @@ impl<'a> Parser<'a> {
 
         let default = if self.token == token::Eq {
             self.bump();
-            Some(self.parse_ty(true))
+            Some(self.parse_ty_sum())
         }
         else { None };
 
@@ -4032,7 +4015,7 @@ impl<'a> Parser<'a> {
             Some(token::Comma),
             |p| {
                 p.forbid_lifetime();
-                p.parse_ty(true)
+                p.parse_ty_sum()
             }
         );
         (lifetimes, result.into_vec())
@@ -4265,7 +4248,7 @@ impl<'a> Parser<'a> {
                     // Determine whether this is the fully explicit form, `self:
                     // TYPE`.
                     if self.eat(&token::Colon) {
-                        SelfExplicit(self.parse_ty(false), self_ident)
+                        SelfExplicit(self.parse_ty_sum(), self_ident)
                     } else {
                         SelfValue(self_ident)
                     }
@@ -4277,7 +4260,7 @@ impl<'a> Parser<'a> {
                     // Determine whether this is the fully explicit form,
                     // `self: TYPE`.
                     if self.eat(&token::Colon) {
-                        SelfExplicit(self.parse_ty(false), self_ident)
+                        SelfExplicit(self.parse_ty_sum(), self_ident)
                     } else {
                         SelfValue(self_ident)
                     }
@@ -4466,7 +4449,7 @@ impl<'a> Parser<'a> {
                 && (self.look_ahead(2, |t| *t == token::OpenDelim(token::Paren))
                     || self.look_ahead(2, |t| *t == token::OpenDelim(token::Brace))) {
                 // method macro.
-                let pth = self.parse_path(NoTypesAllowed).path;
+                let pth = self.parse_path(NoTypesAllowed);
                 self.expect(&token::Not);
 
                 // eat a matched-delimiter token tree:
@@ -4564,30 +4547,25 @@ impl<'a> Parser<'a> {
         let could_be_trait = self.token != token::OpenDelim(token::Paren);
 
         // Parse the trait.
-        let mut ty = self.parse_ty(true);
+        let mut ty = self.parse_ty_sum();
 
         // Parse traits, if necessary.
         let opt_trait = if could_be_trait && self.eat_keyword(keywords::For) {
             // New-style trait. Reinterpret the type as a trait.
             let opt_trait_ref = match ty.node {
-                TyPath(ref path, None, node_id) => {
+                TyPath(ref path, node_id) => {
                     Some(TraitRef {
                         path: (*path).clone(),
                         ref_id: node_id,
                     })
                 }
-                TyPath(_, Some(_), _) => {
-                    self.span_err(ty.span,
-                                  "bounded traits are only valid in type position");
-                    None
-                }
                 _ => {
                     self.span_err(ty.span, "not a trait");
                     None
                 }
             };
 
-            ty = self.parse_ty(true);
+            ty = self.parse_ty_sum();
             opt_trait_ref
         } else {
             None
@@ -4606,7 +4584,7 @@ impl<'a> Parser<'a> {
     /// Parse a::B<String,int>
     fn parse_trait_ref(&mut self) -> TraitRef {
         ast::TraitRef {
-            path: self.parse_path(LifetimeAndTypesWithoutColons).path,
+            path: self.parse_path(LifetimeAndTypesWithoutColons),
             ref_id: ast::DUMMY_NODE_ID,
         }
     }
@@ -4638,7 +4616,7 @@ impl<'a> Parser<'a> {
         let mut generics = self.parse_generics();
 
         if self.eat(&token::Colon) {
-            let ty = self.parse_ty(true);
+            let ty = self.parse_ty_sum();
             self.span_err(ty.span, "`virtual` structs have been removed from the language");
         }
 
@@ -4673,7 +4651,7 @@ impl<'a> Parser<'a> {
                 let struct_field_ = ast::StructField_ {
                     kind: UnnamedField(p.parse_visibility()),
                     id: ast::DUMMY_NODE_ID,
-                    ty: p.parse_ty(true),
+                    ty: p.parse_ty_sum(),
                     attrs: attrs,
                 };
                 spanned(lo, p.span.hi, struct_field_)
@@ -4830,7 +4808,7 @@ impl<'a> Parser<'a> {
     fn parse_item_const(&mut self, m: Option<Mutability>) -> ItemInfo {
         let id = self.parse_ident();
         self.expect(&token::Colon);
-        let ty = self.parse_ty(true);
+        let ty = self.parse_ty_sum();
         self.expect(&token::Eq);
         let e = self.parse_expr();
         self.commit_expr_expecting(&*e, token::Semi);
@@ -5023,7 +5001,7 @@ impl<'a> Parser<'a> {
 
         let ident = self.parse_ident();
         self.expect(&token::Colon);
-        let ty = self.parse_ty(true);
+        let ty = self.parse_ty_sum();
         let hi = self.span.hi;
         self.expect(&token::Semi);
         P(ForeignItem {
@@ -5181,7 +5159,7 @@ impl<'a> Parser<'a> {
         let mut tps = self.parse_generics();
         self.parse_where_clause(&mut tps);
         self.expect(&token::Eq);
-        let ty = self.parse_ty(true);
+        let ty = self.parse_ty_sum();
         self.expect(&token::Semi);
         (ident, ItemTy(ty, tps), None)
     }
@@ -5235,7 +5213,7 @@ impl<'a> Parser<'a> {
                     &token::OpenDelim(token::Paren),
                     &token::CloseDelim(token::Paren),
                     seq_sep_trailing_allowed(token::Comma),
-                    |p| p.parse_ty(true)
+                    |p| p.parse_ty_sum()
                 );
                 for ty in arg_tys.into_iter() {
                     args.push(ast::VariantArg {
@@ -5593,7 +5571,7 @@ impl<'a> Parser<'a> {
             // MACRO INVOCATION ITEM
 
             // item macro.
-            let pth = self.parse_path(NoTypesAllowed).path;
+            let pth = self.parse_path(NoTypesAllowed);
             self.expect(&token::Not);
 
             // a 'special' identifier (like what `macro_rules!` uses)
diff --git a/src/libsyntax/print/pprust.rs b/src/libsyntax/print/pprust.rs
index 6960337c3e2c9..78412a76bfeb8 100644
--- a/src/libsyntax/print/pprust.rs
+++ b/src/libsyntax/print/pprust.rs
@@ -293,6 +293,10 @@ pub fn ty_to_string(ty: &ast::Ty) -> String {
     $to_string(|s| s.print_type(ty))
 }
 
+pub fn bounds_to_string(bounds: &[ast::TyParamBound]) -> String {
+    $to_string(|s| s.print_bounds("", bounds))
+}
+
 pub fn pat_to_string(pat: &ast::Pat) -> String {
     $to_string(|s| s.print_pat(pat))
 }
@@ -739,11 +743,15 @@ impl<'a> State<'a> {
                                       Some(&generics),
                                       None));
             }
-            ast::TyPath(ref path, ref bounds, _) => {
-                try!(self.print_bounded_path(path, bounds));
+            ast::TyPath(ref path, _) => {
+                try!(self.print_path(path, false));
+            }
+            ast::TyObjectSum(ref ty, ref bounds) => {
+                try!(self.print_type(&**ty));
+                try!(self.print_bounds("+", bounds.as_slice()));
             }
             ast::TyPolyTraitRef(ref bounds) => {
-                try!(self.print_bounds("", bounds));
+                try!(self.print_bounds("", bounds.as_slice()));
             }
             ast::TyQPath(ref qpath) => {
                 try!(word(&mut self.s, "<"));
@@ -970,7 +978,7 @@ impl<'a> State<'a> {
                     }
                     _ => {}
                 }
-                try!(self.print_bounds(":", bounds));
+                try!(self.print_bounds(":", bounds.as_slice()));
                 try!(self.print_where_clause(generics));
                 try!(word(&mut self.s, " "));
                 try!(self.bopen());
@@ -2329,7 +2337,7 @@ impl<'a> State<'a> {
 
     pub fn print_bounds(&mut self,
                         prefix: &str,
-                        bounds: &OwnedSlice<ast::TyParamBound>)
+                        bounds: &[ast::TyParamBound])
                         -> IoResult<()> {
         if !bounds.is_empty() {
             try!(word(&mut self.s, prefix));
@@ -2418,7 +2426,7 @@ impl<'a> State<'a> {
             _ => {}
         }
         try!(self.print_ident(param.ident));
-        try!(self.print_bounds(":", &param.bounds));
+        try!(self.print_bounds(":", param.bounds.as_slice()));
         match param.default {
             Some(ref default) => {
                 try!(space(&mut self.s));
@@ -2447,7 +2455,7 @@ impl<'a> State<'a> {
             }
 
             try!(self.print_ident(predicate.ident));
-            try!(self.print_bounds(":", &predicate.bounds));
+            try!(self.print_bounds(":", predicate.bounds.as_slice()));
         }
 
         Ok(())
@@ -2664,7 +2672,7 @@ impl<'a> State<'a> {
             try!(self.pclose());
         }
 
-        try!(self.print_bounds(":", bounds));
+        try!(self.print_bounds(":", bounds.as_slice()));
 
         try!(self.print_fn_output(decl));
 

From f4e29e7e9aa1da4fc91a6074b0e4df44a2986517 Mon Sep 17 00:00:00 2001
From: Niko Matsakis <niko@alum.mit.edu>
Date: Thu, 20 Nov 2014 15:08:02 -0500
Subject: [PATCH 31/40] Fixup various places that were doing `&T+'a` and do
 `&(T+'a)`

---
 src/libcore/fmt/mod.rs                                    | 6 +++---
 src/librustc/middle/expr_use_visitor.rs                   | 2 +-
 src/librustc/middle/traits/select.rs                      | 2 +-
 src/librustc/middle/typeck/rscope.rs                      | 4 ++--
 src/librustrt/unwind.rs                                   | 2 +-
 src/libserialize/json.rs                                  | 4 ++--
 src/libstd/comm/select.rs                                 | 2 +-
 src/libstd/failure.rs                                     | 2 +-
 src/libstd/io/mod.rs                                      | 4 ++--
 src/libstd/rt/backtrace.rs                                | 2 +-
 src/libsyntax/print/pprust.rs                             | 2 +-
 src/test/compile-fail/dst-index.rs                        | 2 +-
 src/test/compile-fail/issue-12470.rs                      | 2 +-
 src/test/compile-fail/issue-14285.rs                      | 2 +-
 src/test/compile-fail/kindck-copy.rs                      | 6 +++---
 src/test/compile-fail/kindck-send-object.rs               | 4 ++--
 src/test/compile-fail/kindck-send-object1.rs              | 4 ++--
 src/test/compile-fail/kindck-send-object2.rs              | 2 +-
 src/test/compile-fail/region-object-lifetime-1.rs         | 4 ++--
 src/test/compile-fail/regions-bounded-by-send.rs          | 4 ++--
 src/test/compile-fail/regions-close-object-into-object.rs | 2 +-
 src/test/compile-fail/regions-trait-variance.rs           | 2 +-
 src/test/compile-fail/trait-bounds-not-on-impl.rs         | 2 +-
 src/test/compile-fail/trait-bounds-not-on-struct.rs       | 2 +-
 src/test/compile-fail/trait-bounds-sugar.rs               | 4 ++--
 src/test/run-pass/colorful-write-macros.rs                | 2 +-
 src/test/run-pass/dst-index.rs                            | 2 +-
 src/test/run-pass/issue-10902.rs                          | 4 ++--
 src/test/run-pass/issue-11205.rs                          | 4 ++--
 src/test/run-pass/issue-14901.rs                          | 2 +-
 src/test/run-pass/issue-14958.rs                          | 2 +-
 src/test/run-pass/issue-14959.rs                          | 4 ++--
 src/test/run-pass/issue-5708.rs                           | 4 ++--
 src/test/run-pass/issue-8249.rs                           | 2 +-
 src/test/run-pass/issue-9719.rs                           | 6 +++---
 src/test/run-pass/parameterized-trait-with-bounds.rs      | 2 +-
 src/test/run-pass/regions-early-bound-trait-param.rs      | 4 ++--
 37 files changed, 56 insertions(+), 56 deletions(-)

diff --git a/src/libcore/fmt/mod.rs b/src/libcore/fmt/mod.rs
index 6e77b0a7c7936..d2bf7f1e15ab4 100644
--- a/src/libcore/fmt/mod.rs
+++ b/src/libcore/fmt/mod.rs
@@ -85,7 +85,7 @@ pub struct Formatter<'a> {
     width: Option<uint>,
     precision: Option<uint>,
 
-    buf: &'a mut FormatWriter+'a,
+    buf: &'a mut (FormatWriter+'a),
     curarg: slice::Items<'a, Argument<'a>>,
     args: &'a [Argument<'a>],
 }
@@ -565,7 +565,7 @@ impl<'a, Sized? T: Show> Show for &'a T {
 impl<'a, Sized? T: Show> Show for &'a mut T {
     fn fmt(&self, f: &mut Formatter) -> Result { (**self).fmt(f) }
 }
-impl<'a> Show for &'a Show+'a {
+impl<'a> Show for &'a (Show+'a) {
     fn fmt(&self, f: &mut Formatter) -> Result { (*self).fmt(f) }
 }
 
@@ -724,7 +724,7 @@ macro_rules! tuple (
 
 tuple! { T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, }
 
-impl<'a> Show for &'a any::Any+'a {
+impl<'a> Show for &'a (any::Any+'a) {
     fn fmt(&self, f: &mut Formatter) -> Result { f.pad("&Any") }
 }
 
diff --git a/src/librustc/middle/expr_use_visitor.rs b/src/librustc/middle/expr_use_visitor.rs
index 656feb51a1d3c..5ab3978b8a8f7 100644
--- a/src/librustc/middle/expr_use_visitor.rs
+++ b/src/librustc/middle/expr_use_visitor.rs
@@ -295,7 +295,7 @@ impl OverloadedCallType {
 pub struct ExprUseVisitor<'d,'t,'tcx,TYPER:'t> {
     typer: &'t TYPER,
     mc: mc::MemCategorizationContext<'t,TYPER>,
-    delegate: &'d mut Delegate<'tcx>+'d,
+    delegate: &'d mut (Delegate<'tcx>+'d),
 }
 
 // If the TYPER results in an error, it's because the type check
diff --git a/src/librustc/middle/traits/select.rs b/src/librustc/middle/traits/select.rs
index d1cc851c41f20..8658a5248ccc2 100644
--- a/src/librustc/middle/traits/select.rs
+++ b/src/librustc/middle/traits/select.rs
@@ -43,7 +43,7 @@ use util::ppaux::Repr;
 pub struct SelectionContext<'cx, 'tcx:'cx> {
     infcx: &'cx InferCtxt<'cx, 'tcx>,
     param_env: &'cx ty::ParameterEnvironment<'tcx>,
-    typer: &'cx Typer<'tcx>+'cx,
+    typer: &'cx (Typer<'tcx>+'cx),
 
     /// Skolemizer used specifically for skolemizing entries on the
     /// obligation stack. This ensures that all entries on the stack
diff --git a/src/librustc/middle/typeck/rscope.rs b/src/librustc/middle/typeck/rscope.rs
index 2f72d3cf50db1..3bca24f479f70 100644
--- a/src/librustc/middle/typeck/rscope.rs
+++ b/src/librustc/middle/typeck/rscope.rs
@@ -139,11 +139,11 @@ impl RegionScope for BindingRscope {
 /// A scope which simply shifts the Debruijn index of other scopes
 /// to account for binding levels.
 pub struct ShiftedRscope<'r> {
-    base_scope: &'r RegionScope+'r
+    base_scope: &'r (RegionScope+'r)
 }
 
 impl<'r> ShiftedRscope<'r> {
-    pub fn new(base_scope: &'r RegionScope+'r) -> ShiftedRscope<'r> {
+    pub fn new(base_scope: &'r (RegionScope+'r)) -> ShiftedRscope<'r> {
         ShiftedRscope { base_scope: base_scope }
     }
 }
diff --git a/src/librustrt/unwind.rs b/src/librustrt/unwind.rs
index 7544b93ce522a..697ee95df4c0f 100644
--- a/src/librustrt/unwind.rs
+++ b/src/librustrt/unwind.rs
@@ -86,7 +86,7 @@ struct Exception {
     cause: Option<Box<Any + Send>>,
 }
 
-pub type Callback = fn(msg: &Any + Send, file: &'static str, line: uint);
+pub type Callback = fn(msg: &(Any + Send), file: &'static str, line: uint);
 
 // Variables used for invoking callbacks when a task starts to unwind.
 //
diff --git a/src/libserialize/json.rs b/src/libserialize/json.rs
index 4a2ca58fc9269..19d23265b01ad 100644
--- a/src/libserialize/json.rs
+++ b/src/libserialize/json.rs
@@ -398,7 +398,7 @@ fn fmt_number_or_null(v: f64) -> string::String {
 
 /// A structure for implementing serialization to JSON.
 pub struct Encoder<'a> {
-    writer: &'a mut io::Writer+'a,
+    writer: &'a mut (io::Writer+'a),
 }
 
 impl<'a> Encoder<'a> {
@@ -602,7 +602,7 @@ impl<'a> ::Encoder<io::IoError> for Encoder<'a> {
 /// Another encoder for JSON, but prints out human-readable JSON instead of
 /// compact data
 pub struct PrettyEncoder<'a> {
-    writer: &'a mut io::Writer+'a,
+    writer: &'a mut (io::Writer+'a),
     curr_indent: uint,
     indent: uint,
 }
diff --git a/src/libstd/comm/select.rs b/src/libstd/comm/select.rs
index 621556f75ce3f..3191519815ae4 100644
--- a/src/libstd/comm/select.rs
+++ b/src/libstd/comm/select.rs
@@ -84,7 +84,7 @@ pub struct Handle<'rx, T:'rx> {
     next: *mut Handle<'static, ()>,
     prev: *mut Handle<'static, ()>,
     added: bool,
-    packet: &'rx Packet+'rx,
+    packet: &'rx (Packet+'rx),
 
     // due to our fun transmutes, we be sure to place this at the end. (nothing
     // previous relies on T)
diff --git a/src/libstd/failure.rs b/src/libstd/failure.rs
index 32a8be2290297..d839c1484e562 100644
--- a/src/libstd/failure.rs
+++ b/src/libstd/failure.rs
@@ -40,7 +40,7 @@ impl Writer for Stdio {
     }
 }
 
-pub fn on_fail(obj: &Any + Send, file: &'static str, line: uint) {
+pub fn on_fail(obj: &(Any+Send), file: &'static str, line: uint) {
     let msg = match obj.downcast_ref::<&'static str>() {
         Some(s) => *s,
         None => match obj.downcast_ref::<String>() {
diff --git a/src/libstd/io/mod.rs b/src/libstd/io/mod.rs
index 681400e9db581..311cbe6ece852 100644
--- a/src/libstd/io/mod.rs
+++ b/src/libstd/io/mod.rs
@@ -911,7 +911,7 @@ impl<'a> Reader for Box<Reader+'a> {
     }
 }
 
-impl<'a> Reader for &'a mut Reader+'a {
+impl<'a> Reader for &'a mut (Reader+'a) {
     fn read(&mut self, buf: &mut [u8]) -> IoResult<uint> { (*self).read(buf) }
 }
 
@@ -1279,7 +1279,7 @@ impl<'a> Writer for Box<Writer+'a> {
     }
 }
 
-impl<'a> Writer for &'a mut Writer+'a {
+impl<'a> Writer for &'a mut (Writer+'a) {
     #[inline]
     fn write(&mut self, buf: &[u8]) -> IoResult<()> { (**self).write(buf) }
 
diff --git a/src/libstd/rt/backtrace.rs b/src/libstd/rt/backtrace.rs
index 8102299438749..d47256b1d1890 100644
--- a/src/libstd/rt/backtrace.rs
+++ b/src/libstd/rt/backtrace.rs
@@ -288,7 +288,7 @@ mod imp {
 
         struct Context<'a> {
             idx: int,
-            writer: &'a mut Writer+'a,
+            writer: &'a mut (Writer+'a),
             last_error: Option<IoError>,
         }
 
diff --git a/src/libsyntax/print/pprust.rs b/src/libsyntax/print/pprust.rs
index 78412a76bfeb8..954c72edff46b 100644
--- a/src/libsyntax/print/pprust.rs
+++ b/src/libsyntax/print/pprust.rs
@@ -61,7 +61,7 @@ pub struct State<'a> {
     literals: Option<Vec<comments::Literal> >,
     cur_cmnt_and_lit: CurrentCommentAndLiteral,
     boxes: Vec<pp::Breaks>,
-    ann: &'a PpAnn+'a,
+    ann: &'a (PpAnn+'a),
     encode_idents_with_hygiene: bool,
 }
 
diff --git a/src/test/compile-fail/dst-index.rs b/src/test/compile-fail/dst-index.rs
index 542562b69e6a7..f6511d68662ee 100644
--- a/src/test/compile-fail/dst-index.rs
+++ b/src/test/compile-fail/dst-index.rs
@@ -25,7 +25,7 @@ impl Index<uint, str> for S {
 struct T;
 
 impl Index<uint, Show + 'static> for T {
-    fn index<'a>(&'a self, idx: &uint) -> &'a Show + 'static {
+    fn index<'a>(&'a self, idx: &uint) -> &'a (Show + 'static) {
         static x: uint = 42;
         &x
     }
diff --git a/src/test/compile-fail/issue-12470.rs b/src/test/compile-fail/issue-12470.rs
index aa7e3cd3739b2..0202d538cf650 100644
--- a/src/test/compile-fail/issue-12470.rs
+++ b/src/test/compile-fail/issue-12470.rs
@@ -24,7 +24,7 @@ impl X for B {
 }
 
 struct A<'a> {
-    p: &'a X+'a
+    p: &'a (X+'a)
 }
 
 fn make_a<'a>(p: &'a X) -> A<'a> {
diff --git a/src/test/compile-fail/issue-14285.rs b/src/test/compile-fail/issue-14285.rs
index 624ddf0c8bbdc..cbf4412a81df2 100644
--- a/src/test/compile-fail/issue-14285.rs
+++ b/src/test/compile-fail/issue-14285.rs
@@ -14,7 +14,7 @@ struct A;
 
 impl Foo for A {}
 
-struct B<'a>(&'a Foo+'a);
+struct B<'a>(&'a (Foo+'a));
 
 fn foo<'a>(a: &Foo) -> B<'a> {
     B(a)    //~ ERROR cannot infer an appropriate lifetime
diff --git a/src/test/compile-fail/kindck-copy.rs b/src/test/compile-fail/kindck-copy.rs
index 499144698fb81..202529c30b3e6 100644
--- a/src/test/compile-fail/kindck-copy.rs
+++ b/src/test/compile-fail/kindck-copy.rs
@@ -44,15 +44,15 @@ fn test<'a,T,U:Copy>(_: &'a int) {
 
     // borrowed object types are generally ok
     assert_copy::<&'a Dummy>();
-    assert_copy::<&'a Dummy+Copy>();
-    assert_copy::<&'static Dummy+Copy>();
+    assert_copy::<&'a (Dummy+Copy)>();
+    assert_copy::<&'static (Dummy+Copy)>();
 
     // owned object types are not ok
     assert_copy::<Box<Dummy>>(); //~ ERROR `core::kinds::Copy` is not implemented
     assert_copy::<Box<Dummy+Copy>>(); //~ ERROR `core::kinds::Copy` is not implemented
 
     // mutable object types are not ok
-    assert_copy::<&'a mut Dummy+Copy>();  //~ ERROR `core::kinds::Copy` is not implemented
+    assert_copy::<&'a mut (Dummy+Copy)>();  //~ ERROR `core::kinds::Copy` is not implemented
 
     // closures are like an `&mut` object
     assert_copy::<||>(); //~ ERROR `core::kinds::Copy` is not implemented
diff --git a/src/test/compile-fail/kindck-send-object.rs b/src/test/compile-fail/kindck-send-object.rs
index 9217d05002d88..4fbb3eab8c416 100644
--- a/src/test/compile-fail/kindck-send-object.rs
+++ b/src/test/compile-fail/kindck-send-object.rs
@@ -19,7 +19,7 @@ trait Message : Send { }
 // careful with object types, who knows what they close over...
 
 fn object_ref_with_static_bound_not_ok() {
-    assert_send::<&'static Dummy+'static>();
+    assert_send::<&'static (Dummy+'static)>();
     //~^ ERROR the trait `core::kinds::Send` is not implemented
 }
 
@@ -36,7 +36,7 @@ fn closure_with_no_bound_not_ok<'a>() {
 }
 
 fn object_with_send_bound_ok() {
-    assert_send::<&'static Dummy+Send>();
+    assert_send::<&'static (Dummy+Send)>();
     assert_send::<Box<Dummy+Send>>();
     assert_send::<proc():Send>;
     assert_send::<||:Send>;
diff --git a/src/test/compile-fail/kindck-send-object1.rs b/src/test/compile-fail/kindck-send-object1.rs
index ff8daa045c66d..a551975364329 100644
--- a/src/test/compile-fail/kindck-send-object1.rs
+++ b/src/test/compile-fail/kindck-send-object1.rs
@@ -21,13 +21,13 @@ fn test51<'a>() {
     //~^ ERROR the trait `core::kinds::Send` is not implemented
 }
 fn test52<'a>() {
-    assert_send::<&'a Dummy+Send>();
+    assert_send::<&'a (Dummy+Send)>();
     //~^ ERROR does not fulfill the required lifetime
 }
 
 // ...unless they are properly bounded
 fn test60() {
-    assert_send::<&'static Dummy+Send>();
+    assert_send::<&'static (Dummy+Send)>();
 }
 fn test61() {
     assert_send::<Box<Dummy+Send>>();
diff --git a/src/test/compile-fail/kindck-send-object2.rs b/src/test/compile-fail/kindck-send-object2.rs
index d46c6e68c05c2..ea8c262830633 100644
--- a/src/test/compile-fail/kindck-send-object2.rs
+++ b/src/test/compile-fail/kindck-send-object2.rs
@@ -23,7 +23,7 @@ fn test53() {
 
 // ...unless they are properly bounded
 fn test60() {
-    assert_send::<&'static Dummy+Send>();
+    assert_send::<&'static (Dummy+Send)>();
 }
 fn test61() {
     assert_send::<Box<Dummy+Send>>();
diff --git a/src/test/compile-fail/region-object-lifetime-1.rs b/src/test/compile-fail/region-object-lifetime-1.rs
index 01daeb628ef57..4758ce71ffff5 100644
--- a/src/test/compile-fail/region-object-lifetime-1.rs
+++ b/src/test/compile-fail/region-object-lifetime-1.rs
@@ -28,14 +28,14 @@ fn borrowed_receiver_different_lifetimes<'a,'b>(x: &'a Foo) -> &'b () {
 
 // Borrowed receiver with two distinct lifetimes, but we know that
 // 'b:'a, hence &'a () is permitted.
-fn borrowed_receiver_related_lifetimes<'a,'b>(x: &'a Foo+'b) -> &'a () {
+fn borrowed_receiver_related_lifetimes<'a,'b>(x: &'a (Foo+'b)) -> &'a () {
     x.borrowed()
 }
 
 // Here we have two distinct lifetimes, but we try to return a pointer
 // with the longer lifetime when (from the signature) we only know
 // that it lives as long as the shorter lifetime. Therefore, error.
-fn borrowed_receiver_related_lifetimes2<'a,'b>(x: &'a Foo+'b) -> &'b () {
+fn borrowed_receiver_related_lifetimes2<'a,'b>(x: &'a (Foo+'b)) -> &'b () {
     x.borrowed() //~ ERROR cannot infer
 }
 
diff --git a/src/test/compile-fail/regions-bounded-by-send.rs b/src/test/compile-fail/regions-bounded-by-send.rs
index 182b40ceaae03..660a9be4f63c5 100644
--- a/src/test/compile-fail/regions-bounded-by-send.rs
+++ b/src/test/compile-fail/regions-bounded-by-send.rs
@@ -57,12 +57,12 @@ fn box_with_region_not_ok<'a>() {
 // objects with insufficient bounds no ok
 
 fn object_with_random_bound_not_ok<'a>() {
-    assert_send::<&'a Dummy+'a>();
+    assert_send::<&'a (Dummy+'a)>();
     //~^ ERROR not implemented
 }
 
 fn object_with_send_bound_not_ok<'a>() {
-    assert_send::<&'a Dummy+Send>();
+    assert_send::<&'a (Dummy+Send)>();
     //~^ ERROR does not fulfill
 }
 
diff --git a/src/test/compile-fail/regions-close-object-into-object.rs b/src/test/compile-fail/regions-close-object-into-object.rs
index 835c55c9bd17a..48945868bd355 100644
--- a/src/test/compile-fail/regions-close-object-into-object.rs
+++ b/src/test/compile-fail/regions-close-object-into-object.rs
@@ -10,7 +10,7 @@
 
 
 trait A<T> {}
-struct B<'a, T>(&'a A<T>+'a);
+struct B<'a, T>(&'a (A<T>+'a));
 
 trait X {}
 impl<'a, T> X for B<'a, T> {}
diff --git a/src/test/compile-fail/regions-trait-variance.rs b/src/test/compile-fail/regions-trait-variance.rs
index 3ceb4e3fef6df..4e31a41c4e074 100644
--- a/src/test/compile-fail/regions-trait-variance.rs
+++ b/src/test/compile-fail/regions-trait-variance.rs
@@ -31,7 +31,7 @@ impl Drop for B {
 }
 
 struct A<'r> {
-    p: &'r X+'r
+    p: &'r (X+'r)
 }
 
 fn make_a(p:&X) -> A {
diff --git a/src/test/compile-fail/trait-bounds-not-on-impl.rs b/src/test/compile-fail/trait-bounds-not-on-impl.rs
index 38c7814460147..a034352c4a694 100644
--- a/src/test/compile-fail/trait-bounds-not-on-impl.rs
+++ b/src/test/compile-fail/trait-bounds-not-on-impl.rs
@@ -13,7 +13,7 @@ trait Foo {
 
 struct Bar;
 
-impl Foo + Owned for Bar { //~ ERROR bounded traits are only valid in type position
+impl Foo + Owned for Bar { //~ ERROR not a trait
 }
 
 fn main() { }
diff --git a/src/test/compile-fail/trait-bounds-not-on-struct.rs b/src/test/compile-fail/trait-bounds-not-on-struct.rs
index 0a5909ff2efbe..081efa429c3e4 100644
--- a/src/test/compile-fail/trait-bounds-not-on-struct.rs
+++ b/src/test/compile-fail/trait-bounds-not-on-struct.rs
@@ -11,6 +11,6 @@
 
 struct Foo;
 
-fn foo(_x: Box<Foo + Send>) { } //~ ERROR kind bounds can only be used on trait types
+fn foo(_x: Box<Foo + Send>) { } //~ ERROR expected a reference to a trait
 
 fn main() { }
diff --git a/src/test/compile-fail/trait-bounds-sugar.rs b/src/test/compile-fail/trait-bounds-sugar.rs
index 7ed8db4fcd291..4da496621d1c9 100644
--- a/src/test/compile-fail/trait-bounds-sugar.rs
+++ b/src/test/compile-fail/trait-bounds-sugar.rs
@@ -16,14 +16,14 @@ trait Foo {}
 fn a(_x: Box<Foo+Send>) {
 }
 
-fn b(_x: &'static Foo+'static) {
+fn b(_x: &'static (Foo+'static)) {
 }
 
 fn c(x: Box<Foo+Sync>) {
     a(x); //~ ERROR mismatched types
 }
 
-fn d(x: &'static Foo+Sync) {
+fn d(x: &'static (Foo+Sync)) {
     b(x); //~ ERROR cannot infer
     //~^ ERROR mismatched types
 }
diff --git a/src/test/run-pass/colorful-write-macros.rs b/src/test/run-pass/colorful-write-macros.rs
index 75b8e39133105..bbb049eb96033 100644
--- a/src/test/run-pass/colorful-write-macros.rs
+++ b/src/test/run-pass/colorful-write-macros.rs
@@ -18,7 +18,7 @@ use std::fmt;
 use std::fmt::FormatWriter;
 
 struct Foo<'a> {
-    writer: &'a mut Writer+'a,
+    writer: &'a mut (Writer+'a),
     other: &'a str,
 }
 
diff --git a/src/test/run-pass/dst-index.rs b/src/test/run-pass/dst-index.rs
index 266f9bcba5f92..eaf7131e1d878 100644
--- a/src/test/run-pass/dst-index.rs
+++ b/src/test/run-pass/dst-index.rs
@@ -25,7 +25,7 @@ impl Index<uint, str> for S {
 struct T;
 
 impl Index<uint, Show + 'static> for T {
-    fn index<'a>(&'a self, idx: &uint) -> &'a Show + 'static {
+    fn index<'a>(&'a self, idx: &uint) -> &'a (Show + 'static) {
         static x: uint = 42;
         &x
     }
diff --git a/src/test/run-pass/issue-10902.rs b/src/test/run-pass/issue-10902.rs
index 84d71e1ef5de6..324a1701b2feb 100644
--- a/src/test/run-pass/issue-10902.rs
+++ b/src/test/run-pass/issue-10902.rs
@@ -10,7 +10,7 @@
 
 pub mod two_tuple {
     pub trait T {}
-    pub struct P<'a>(&'a T + 'a, &'a T + 'a);
+    pub struct P<'a>(&'a (T + 'a), &'a (T + 'a));
     pub fn f<'a>(car: &'a T, cdr: &'a T) -> P<'a> {
         P(car, cdr)
     }
@@ -18,7 +18,7 @@ pub mod two_tuple {
 
 pub mod two_fields {
     pub trait T {}
-    pub struct P<'a> { car: &'a T + 'a, cdr: &'a T + 'a }
+    pub struct P<'a> { car: &'a (T + 'a), cdr: &'a (T + 'a) }
     pub fn f<'a>(car: &'a T, cdr: &'a T) -> P<'a> {
         P{ car: car, cdr: cdr }
     }
diff --git a/src/test/run-pass/issue-11205.rs b/src/test/run-pass/issue-11205.rs
index 89224e1fb12c4..ea138311f19b3 100644
--- a/src/test/run-pass/issue-11205.rs
+++ b/src/test/run-pass/issue-11205.rs
@@ -49,7 +49,7 @@ fn main() {
     foog(x, &[box 1i]);
 
     struct T<'a> {
-        t: [&'a Foo+'a, ..2]
+        t: [&'a (Foo+'a), ..2]
     }
     let _n = T {
         t: [&1i, &2i]
@@ -64,7 +64,7 @@ fn main() {
     };
 
     struct F<'b> {
-        t: &'b [&'b Foo+'b]
+        t: &'b [&'b (Foo+'b)]
     }
     let _n = F {
         t: &[&1i, &2i]
diff --git a/src/test/run-pass/issue-14901.rs b/src/test/run-pass/issue-14901.rs
index 647bbfbd65da0..e41754fd1b998 100644
--- a/src/test/run-pass/issue-14901.rs
+++ b/src/test/run-pass/issue-14901.rs
@@ -11,7 +11,7 @@
 use std::io::Reader;
 
 enum Wrapper<'a> {
-    WrapReader(&'a Reader + 'a)
+    WrapReader(&'a (Reader + 'a))
 }
 
 trait Wrap<'a> {
diff --git a/src/test/run-pass/issue-14958.rs b/src/test/run-pass/issue-14958.rs
index 7f3321e0b3eed..1ffd349a65385 100644
--- a/src/test/run-pass/issue-14958.rs
+++ b/src/test/run-pass/issue-14958.rs
@@ -14,7 +14,7 @@ trait Foo {}
 
 struct Bar;
 
-impl<'a> std::ops::Fn<(&'a Foo+'a,), ()> for Bar {
+impl<'a> std::ops::Fn<(&'a (Foo+'a),), ()> for Bar {
     extern "rust-call" fn call(&self, _: (&'a Foo,)) {}
 }
 
diff --git a/src/test/run-pass/issue-14959.rs b/src/test/run-pass/issue-14959.rs
index 6cc5ab4d6cbaa..99472bb3610f8 100644
--- a/src/test/run-pass/issue-14959.rs
+++ b/src/test/run-pass/issue-14959.rs
@@ -33,8 +33,8 @@ impl Alloy {
     }
 }
 
-impl<'a, 'b> Fn<(&'b mut Response+'b,),()> for SendFile<'a> {
-    extern "rust-call" fn call(&self, (_res,): (&'b mut Response+'b,)) {}
+impl<'a, 'b> Fn<(&'b mut (Response+'b),),()> for SendFile<'a> {
+    extern "rust-call" fn call(&self, (_res,): (&'b mut (Response+'b),)) {}
 }
 
 impl<Rq: Request, Rs: Response> Ingot<Rq, Rs> for HelloWorld {
diff --git a/src/test/run-pass/issue-5708.rs b/src/test/run-pass/issue-5708.rs
index 9c728005b6fb7..61ae273aef50b 100644
--- a/src/test/run-pass/issue-5708.rs
+++ b/src/test/run-pass/issue-5708.rs
@@ -29,7 +29,7 @@ impl Inner for int {
 }
 
 struct Outer<'a> {
-    inner: &'a Inner+'a
+    inner: &'a (Inner+'a)
 }
 
 impl<'a> Outer<'a> {
@@ -51,7 +51,7 @@ pub fn main() {
 pub trait MyTrait<T> { }
 
 pub struct MyContainer<'a, T> {
-    foos: Vec<&'a MyTrait<T>+'a> ,
+    foos: Vec<&'a (MyTrait<T>+'a)> ,
 }
 
 impl<'a, T> MyContainer<'a, T> {
diff --git a/src/test/run-pass/issue-8249.rs b/src/test/run-pass/issue-8249.rs
index dae5db11b0a80..44f07def531bc 100644
--- a/src/test/run-pass/issue-8249.rs
+++ b/src/test/run-pass/issue-8249.rs
@@ -13,7 +13,7 @@ struct B;
 impl A for B {}
 
 struct C<'a> {
-    foo: &'a mut A+'a,
+    foo: &'a mut (A+'a),
 }
 
 fn foo(a: &mut A) {
diff --git a/src/test/run-pass/issue-9719.rs b/src/test/run-pass/issue-9719.rs
index ebb9b20ec25fe..4c6b9a3aaa0e4 100644
--- a/src/test/run-pass/issue-9719.rs
+++ b/src/test/run-pass/issue-9719.rs
@@ -16,7 +16,7 @@ mod a {
     pub trait X {}
     impl X for int {}
 
-    pub struct Z<'a>(Enum<&'a X+'a>);
+    pub struct Z<'a>(Enum<&'a (X+'a)>);
     fn foo() { let x = 42i; let z = Z(Enum::A(&x as &X)); let _ = z; }
 }
 
@@ -24,7 +24,7 @@ mod b {
     trait X {}
     impl X for int {}
     struct Y<'a>{
-        x:Option<&'a X+'a>,
+        x:Option<&'a (X+'a)>,
     }
 
     fn bar() {
@@ -36,7 +36,7 @@ mod b {
 mod c {
     pub trait X { fn f(&self); }
     impl X for int { fn f(&self) {} }
-    pub struct Z<'a>(Option<&'a X+'a>);
+    pub struct Z<'a>(Option<&'a (X+'a)>);
     fn main() { let x = 42i; let z = Z(Some(&x as &X)); let _ = z; }
 }
 
diff --git a/src/test/run-pass/parameterized-trait-with-bounds.rs b/src/test/run-pass/parameterized-trait-with-bounds.rs
index 339c9e3c490c7..840e58848a742 100644
--- a/src/test/run-pass/parameterized-trait-with-bounds.rs
+++ b/src/test/run-pass/parameterized-trait-with-bounds.rs
@@ -19,7 +19,7 @@ mod foo {
     pub trait D<'a, T> {}
 }
 
-fn foo1<T>(_: &A<T> + Send) {}
+fn foo1<T>(_: &(A<T> + Send)) {}
 fn foo2<T>(_: Box<A<T> + Send + Sync>) {}
 fn foo3<T>(_: Box<B<int, uint> + 'static>) {}
 fn foo4<'a, T>(_: Box<C<'a, T> + 'static + Send>) {}
diff --git a/src/test/run-pass/regions-early-bound-trait-param.rs b/src/test/run-pass/regions-early-bound-trait-param.rs
index faf371e8826b2..907f610ff25db 100644
--- a/src/test/run-pass/regions-early-bound-trait-param.rs
+++ b/src/test/run-pass/regions-early-bound-trait-param.rs
@@ -30,7 +30,7 @@ fn object_invoke1<'d>(x: &'d Trait<'d>) -> (int, int) {
 }
 
 struct Struct1<'e> {
-    f: &'e Trait<'e>+'e
+    f: &'e (Trait<'e>+'e)
 }
 
 fn field_invoke1<'f, 'g>(x: &'g Struct1<'f>) -> (int,int) {
@@ -40,7 +40,7 @@ fn field_invoke1<'f, 'g>(x: &'g Struct1<'f>) -> (int,int) {
 }
 
 struct Struct2<'h, 'i> {
-    f: &'h Trait<'i>+'h
+    f: &'h (Trait<'i>+'h)
 }
 
 fn object_invoke2<'j, 'k>(x: &'k Trait<'j>) -> int {

From c4a3be6bd1b9b468478c925c8eaa0e54df56a8fe Mon Sep 17 00:00:00 2001
From: Niko Matsakis <niko@alum.mit.edu>
Date: Thu, 20 Nov 2014 15:08:48 -0500
Subject: [PATCH 32/40] Rote changes due to the fact that ast paths no longer
 carry this extraneous bounds.

---
 src/librustc/lint/builtin.rs                  |  2 +-
 src/librustc/metadata/encoder.rs              |  3 +--
 src/librustc/middle/privacy.rs                | 10 +++----
 src/librustc/middle/resolve.rs                | 15 ++++++-----
 src/librustc/middle/resolve_lifetime.rs       |  9 +------
 .../middle/typeck/infer/error_reporting.rs    |  4 +--
 src/librustc_trans/save/mod.rs                |  4 +--
 src/libsyntax/ast_util.rs                     |  2 +-
 src/libsyntax/ext/build.rs                    | 23 ++++++++++------
 src/libsyntax/ext/deriving/generic/mod.rs     |  2 +-
 src/libsyntax/ext/deriving/generic/ty.rs      |  4 +--
 src/libsyntax/ext/format.rs                   |  2 +-
 src/libsyntax/ext/tt/macro_parser.rs          |  4 +--
 src/libsyntax/fold.rs                         | 10 ++++---
 src/libsyntax/print/pprust.rs                 | 26 +++++--------------
 src/libsyntax/test.rs                         |  3 +--
 src/libsyntax/visit.rs                        | 12 ++++-----
 17 files changed, 60 insertions(+), 75 deletions(-)

diff --git a/src/librustc/lint/builtin.rs b/src/librustc/lint/builtin.rs
index 9fe7a21243f18..1dbd170a0d997 100644
--- a/src/librustc/lint/builtin.rs
+++ b/src/librustc/lint/builtin.rs
@@ -421,7 +421,7 @@ impl<'a, 'tcx> ImproperCTypesVisitor<'a, 'tcx> {
 impl<'a, 'tcx, 'v> Visitor<'v> for ImproperCTypesVisitor<'a, 'tcx> {
     fn visit_ty(&mut self, ty: &ast::Ty) {
         match ty.node {
-            ast::TyPath(_, _, id) => self.check_def(ty.span, ty.id, id),
+            ast::TyPath(_, id) => self.check_def(ty.span, ty.id, id),
             _ => (),
         }
         visit::walk_ty(self, ty);
diff --git a/src/librustc/metadata/encoder.rs b/src/librustc/metadata/encoder.rs
index 7e4d2621f1837..8c21e559ec1f8 100644
--- a/src/librustc/metadata/encoder.rs
+++ b/src/librustc/metadata/encoder.rs
@@ -1230,10 +1230,9 @@ fn encode_info_for_item(ecx: &EncodeContext,
         encode_name(rbml_w, item.ident.name);
         encode_attributes(rbml_w, item.attrs.as_slice());
         match ty.node {
-            ast::TyPath(ref path, ref bounds, _) if path.segments
+            ast::TyPath(ref path, _) if path.segments
                                                         .len() == 1 => {
                 let ident = path.segments.last().unwrap().identifier;
-                assert!(bounds.is_none());
                 encode_impl_type_basename(rbml_w, ident);
             }
             _ => {}
diff --git a/src/librustc/middle/privacy.rs b/src/librustc/middle/privacy.rs
index 66c782877f9ca..ec939d19b7240 100644
--- a/src/librustc/middle/privacy.rs
+++ b/src/librustc/middle/privacy.rs
@@ -243,7 +243,7 @@ impl<'a, 'tcx, 'v> Visitor<'v> for EmbargoVisitor<'a, 'tcx> {
             // * Private trait impls for private types can be completely ignored
             ast::ItemImpl(_, _, ref ty, ref impl_items) => {
                 let public_ty = match ty.node {
-                    ast::TyPath(_, _, id) => {
+                    ast::TyPath(_, id) => {
                         match self.tcx.def_map.borrow()[id].clone() {
                             def::DefPrimTy(..) => true,
                             def => {
@@ -311,7 +311,7 @@ impl<'a, 'tcx, 'v> Visitor<'v> for EmbargoVisitor<'a, 'tcx> {
 
             ast::ItemTy(ref ty, _) if public_first => {
                 match ty.node {
-                    ast::TyPath(_, _, id) => {
+                    ast::TyPath(_, id) => {
                         match self.tcx.def_map.borrow()[id].clone() {
                             def::DefPrimTy(..) | def::DefTyParam(..) => {},
                             def => {
@@ -616,7 +616,7 @@ impl<'a, 'tcx> PrivacyVisitor<'a, 'tcx> {
                     // was private.
                     ast::ItemImpl(_, _, ref ty, _) => {
                         let id = match ty.node {
-                            ast::TyPath(_, _, id) => id,
+                            ast::TyPath(_, id) => id,
                             _ => return Some((err_span, err_msg, None)),
                         };
                         let def = self.tcx.def_map.borrow()[id].clone();
@@ -1292,7 +1292,7 @@ impl<'a, 'tcx> VisiblePrivateTypesVisitor<'a, 'tcx> {
 impl<'a, 'b, 'tcx, 'v> Visitor<'v> for CheckTypeForPrivatenessVisitor<'a, 'b, 'tcx> {
     fn visit_ty(&mut self, ty: &ast::Ty) {
         match ty.node {
-            ast::TyPath(_, _, path_id) => {
+            ast::TyPath(_, path_id) => {
                 if self.inner.path_is_private_type(path_id) {
                     self.contains_private = true;
                     // found what we're looking for so let's stop
@@ -1493,7 +1493,7 @@ impl<'a, 'tcx, 'v> Visitor<'v> for VisiblePrivateTypesVisitor<'a, 'tcx> {
 
     fn visit_ty(&mut self, t: &ast::Ty) {
         match t.node {
-            ast::TyPath(ref p, _, path_id) => {
+            ast::TyPath(ref p, path_id) => {
                 if !self.tcx.sess.features.borrow().visible_private_types &&
                         self.path_is_private_type(path_id) {
                     self.tcx.sess.span_err(p.span,
diff --git a/src/librustc/middle/resolve.rs b/src/librustc/middle/resolve.rs
index d334395e9117f..bdb3edef6bdcc 100644
--- a/src/librustc/middle/resolve.rs
+++ b/src/librustc/middle/resolve.rs
@@ -63,7 +63,7 @@ use syntax::ast::{PolyTraitRef, PrimTy, Public, SelfExplicit, SelfStatic};
 use syntax::ast::{RegionTyParamBound, StmtDecl, StructField};
 use syntax::ast::{StructVariantKind, TraitRef, TraitTyParamBound};
 use syntax::ast::{TupleVariantKind, Ty, TyBool, TyChar, TyClosure, TyF32};
-use syntax::ast::{TyF64, TyFloat, TyI, TyI8, TyI16, TyI32, TyI64, TyInt};
+use syntax::ast::{TyF64, TyFloat, TyI, TyI8, TyI16, TyI32, TyI64, TyInt, TyObjectSum};
 use syntax::ast::{TyParam, TyParamBound, TyPath, TyPtr, TyPolyTraitRef, TyProc, TyQPath};
 use syntax::ast::{TyRptr, TyStr, TyU, TyU8, TyU16, TyU32, TyU64, TyUint};
 use syntax::ast::{TypeImplItem, UnnamedField};
@@ -4742,7 +4742,7 @@ impl<'a> Resolver<'a> {
                 // type, the result will be that the type name resolves to a module but not
                 // a type (shadowing any imported modules or types with this name), leading
                 // to weird user-visible bugs. So we ward this off here. See #15060.
-                TyPath(ref path, _, path_id) => {
+                TyPath(ref path, path_id) => {
                     match self.def_map.borrow().get(&path_id) {
                         // FIXME: should we catch other options and give more precise errors?
                         Some(&DefMod(_)) => {
@@ -4908,7 +4908,7 @@ impl<'a> Resolver<'a> {
             // Like path expressions, the interpretation of path types depends
             // on whether the path has multiple elements in it or not.
 
-            TyPath(ref path, ref bounds, path_id) => {
+            TyPath(ref path, path_id) => {
                 // This is a path in the type namespace. Walk through scopes
                 // looking for it.
                 let mut result_def = None;
@@ -4978,11 +4978,12 @@ impl<'a> Resolver<'a> {
                         self.resolve_error(ty.span, msg.as_slice());
                     }
                 }
+            }
 
-                bounds.as_ref().map(|bound_vec| {
-                    self.resolve_type_parameter_bounds(ty.id, bound_vec,
+            TyObjectSum(ref ty, ref bound_vec) => {
+                self.resolve_type(&**ty);
+                self.resolve_type_parameter_bounds(ty.id, bound_vec,
                                                        TraitBoundingTypeParameter);
-                });
             }
 
             TyQPath(ref qpath) => {
@@ -5619,7 +5620,7 @@ impl<'a> Resolver<'a> {
         fn extract_path_and_node_id(t: &Ty, allow: FallbackChecks)
                                                     -> Option<(Path, NodeId, FallbackChecks)> {
             match t.node {
-                TyPath(ref path, _, node_id) => Some((path.clone(), node_id, allow)),
+                TyPath(ref path, node_id) => Some((path.clone(), node_id, allow)),
                 TyPtr(ref mut_ty) => extract_path_and_node_id(&*mut_ty.ty, OnlyTraitAndStatics),
                 TyRptr(_, ref mut_ty) => extract_path_and_node_id(&*mut_ty.ty, allow),
                 // This doesn't handle the remaining `Ty` variants as they are not
diff --git a/src/librustc/middle/resolve_lifetime.rs b/src/librustc/middle/resolve_lifetime.rs
index fae64ff924274..02e353d6b1fdd 100644
--- a/src/librustc/middle/resolve_lifetime.rs
+++ b/src/librustc/middle/resolve_lifetime.rs
@@ -162,7 +162,7 @@ impl<'a, 'v> Visitor<'v> for LifetimeContext<'a> {
                     visit::walk_ty(this, ty);
                 });
             }
-            ast::TyPath(ref path, ref opt_bounds, id) => {
+            ast::TyPath(ref path, id) => {
                 // if this path references a trait, then this will resolve to
                 // a trait ref, which introduces a binding scope.
                 match self.def_map.borrow().get(&id) {
@@ -170,13 +170,6 @@ impl<'a, 'v> Visitor<'v> for LifetimeContext<'a> {
                         self.with(LateScope(&Vec::new(), self.scope), |this| {
                             this.visit_path(path, id);
                         });
-
-                        match *opt_bounds {
-                            Some(ref bounds) => {
-                                visit::walk_ty_param_bounds_helper(self, bounds);
-                            }
-                            None => { }
-                        }
                     }
                     _ => {
                         visit::walk_ty(self, ty);
diff --git a/src/librustc/middle/typeck/infer/error_reporting.rs b/src/librustc/middle/typeck/infer/error_reporting.rs
index bc36a2bd801b4..a78977d9fc7f6 100644
--- a/src/librustc/middle/typeck/infer/error_reporting.rs
+++ b/src/librustc/middle/typeck/infer/error_reporting.rs
@@ -1249,7 +1249,7 @@ impl<'a, 'tcx> Rebuilder<'a, 'tcx> {
                     }
                     ty_queue.push(&*mut_ty.ty);
                 }
-                ast::TyPath(ref path, ref bounds, id) => {
+                ast::TyPath(ref path, id) => {
                     let a_def = match self.tcx.def_map.borrow().get(&id) {
                         None => {
                             self.tcx
@@ -1296,7 +1296,7 @@ impl<'a, 'tcx> Rebuilder<'a, 'tcx> {
                             let new_path = self.rebuild_path(rebuild_info, lifetime);
                             let to = ast::Ty {
                                 id: cur_ty.id,
-                                node: ast::TyPath(new_path, bounds.clone(), id),
+                                node: ast::TyPath(new_path, id),
                                 span: cur_ty.span
                             };
                             new_ty = self.rebuild_ty(new_ty, P(to));
diff --git a/src/librustc_trans/save/mod.rs b/src/librustc_trans/save/mod.rs
index ba6292f8975da..f5c732d9adcf1 100644
--- a/src/librustc_trans/save/mod.rs
+++ b/src/librustc_trans/save/mod.rs
@@ -651,7 +651,7 @@ impl <'l, 'tcx> DxrVisitor<'l, 'tcx> {
                     typ: &ast::Ty,
                     impl_items: &Vec<ast::ImplItem>) {
         match typ.node {
-            ast::TyPath(ref path, _, id) => {
+            ast::TyPath(ref path, id) => {
                 match self.lookup_type_ref(id) {
                     Some(id) => {
                         let sub_span = self.span.sub_span_for_type_name(path.span);
@@ -1256,7 +1256,7 @@ impl<'l, 'tcx, 'v> Visitor<'v> for DxrVisitor<'l, 'tcx> {
         }
 
         match t.node {
-            ast::TyPath(ref path, _, id) => {
+            ast::TyPath(ref path, id) => {
                 match self.lookup_type_ref(id) {
                     Some(id) => {
                         let sub_span = self.span.sub_span_for_type_name(t.span);
diff --git a/src/libsyntax/ast_util.rs b/src/libsyntax/ast_util.rs
index 043e79bffd9e9..4d6ac4f343096 100644
--- a/src/libsyntax/ast_util.rs
+++ b/src/libsyntax/ast_util.rs
@@ -454,7 +454,7 @@ impl<'a, 'v, O: IdVisitingOperation> Visitor<'v> for IdVisitor<'a, O> {
     fn visit_ty(&mut self, typ: &Ty) {
         self.operation.visit_id(typ.id);
         match typ.node {
-            TyPath(_, _, id) => self.operation.visit_id(id),
+            TyPath(_, id) => self.operation.visit_id(id),
             _ => {}
         }
         visit::walk_ty(self, typ)
diff --git a/src/libsyntax/ext/build.rs b/src/libsyntax/ext/build.rs
index 2c7f9e889f8b2..bd01e5e643020 100644
--- a/src/libsyntax/ext/build.rs
+++ b/src/libsyntax/ext/build.rs
@@ -44,7 +44,8 @@ pub trait AstBuilder {
     fn ty_mt(&self, ty: P<ast::Ty>, mutbl: ast::Mutability) -> ast::MutTy;
 
     fn ty(&self, span: Span, ty: ast::Ty_) -> P<ast::Ty>;
-    fn ty_path(&self, ast::Path, Option<OwnedSlice<ast::TyParamBound>>) -> P<ast::Ty>;
+    fn ty_path(&self, ast::Path) -> P<ast::Ty>;
+    fn ty_sum(&self, ast::Path, OwnedSlice<ast::TyParamBound>) -> P<ast::Ty>;
     fn ty_ident(&self, span: Span, idents: ast::Ident) -> P<ast::Ty>;
 
     fn ty_rptr(&self, span: Span,
@@ -344,17 +345,21 @@ impl<'a> AstBuilder for ExtCtxt<'a> {
         })
     }
 
-    fn ty_path(&self, path: ast::Path, bounds: Option<OwnedSlice<ast::TyParamBound>>)
-              -> P<ast::Ty> {
+    fn ty_path(&self, path: ast::Path) -> P<ast::Ty> {
+        self.ty(path.span, ast::TyPath(path, ast::DUMMY_NODE_ID))
+    }
+
+    fn ty_sum(&self, path: ast::Path, bounds: OwnedSlice<ast::TyParamBound>) -> P<ast::Ty> {
         self.ty(path.span,
-                ast::TyPath(path, bounds, ast::DUMMY_NODE_ID))
+                ast::TyObjectSum(self.ty_path(path),
+                                 bounds))
     }
 
     // Might need to take bounds as an argument in the future, if you ever want
     // to generate a bounded existential trait type.
     fn ty_ident(&self, span: Span, ident: ast::Ident)
         -> P<ast::Ty> {
-        self.ty_path(self.path_ident(span, ident), None)
+        self.ty_path(self.path_ident(span, ident))
     }
 
     fn ty_rptr(&self,
@@ -386,7 +391,7 @@ impl<'a> AstBuilder for ExtCtxt<'a> {
                               self.ident_of("Option")
                           ),
                           Vec::new(),
-                          vec!( ty )), None)
+                          vec!( ty )))
     }
 
     fn ty_field_imm(&self, span: Span, name: Ident, ty: P<ast::Ty>) -> ast::TypeField {
@@ -425,8 +430,10 @@ impl<'a> AstBuilder for ExtCtxt<'a> {
     }
 
     fn ty_vars_global(&self, ty_params: &OwnedSlice<ast::TyParam>) -> Vec<P<ast::Ty>> {
-        ty_params.iter().map(|p| self.ty_path(
-                self.path_global(DUMMY_SP, vec!(p.ident)), None)).collect()
+        ty_params
+            .iter()
+            .map(|p| self.ty_path(self.path_global(DUMMY_SP, vec!(p.ident))))
+            .collect()
     }
 
     fn trait_ref(&self, path: ast::Path) -> ast::TraitRef {
diff --git a/src/libsyntax/ext/deriving/generic/mod.rs b/src/libsyntax/ext/deriving/generic/mod.rs
index fcd4966683d3b..d5f472bd82710 100644
--- a/src/libsyntax/ext/deriving/generic/mod.rs
+++ b/src/libsyntax/ext/deriving/generic/mod.rs
@@ -444,7 +444,7 @@ impl<'a> TraitDef<'a> {
         // Create the type of `self`.
         let self_type = cx.ty_path(
             cx.path_all(self.span, false, vec!( type_ident ), self_lifetimes,
-                        self_ty_params.into_vec()), None);
+                        self_ty_params.into_vec()));
 
         let attr = cx.attribute(
             self.span,
diff --git a/src/libsyntax/ext/deriving/generic/ty.rs b/src/libsyntax/ext/deriving/generic/ty.rs
index 700ada8b4ad8f..6614ab50f1e45 100644
--- a/src/libsyntax/ext/deriving/generic/ty.rs
+++ b/src/libsyntax/ext/deriving/generic/ty.rs
@@ -70,7 +70,7 @@ impl<'a> Path<'a> {
                  self_ty: Ident,
                  self_generics: &Generics)
                  -> P<ast::Ty> {
-        cx.ty_path(self.to_path(cx, span, self_ty, self_generics), None)
+        cx.ty_path(self.to_path(cx, span, self_ty, self_generics))
     }
     pub fn to_path(&self,
                    cx: &ExtCtxt,
@@ -152,7 +152,7 @@ impl<'a> Ty<'a> {
             }
             Literal(ref p) => { p.to_ty(cx, span, self_ty, self_generics) }
             Self  => {
-                cx.ty_path(self.to_path(cx, span, self_ty, self_generics), None)
+                cx.ty_path(self.to_path(cx, span, self_ty, self_generics))
             }
             Tuple(ref fields) => {
                 let ty = ast::TyTup(fields.iter()
diff --git a/src/libsyntax/ext/format.rs b/src/libsyntax/ext/format.rs
index b04a800a32d2a..6ec12b4d603c1 100644
--- a/src/libsyntax/ext/format.rs
+++ b/src/libsyntax/ext/format.rs
@@ -531,7 +531,7 @@ impl<'a, 'b> Context<'a, 'b> {
                     true, Context::rtpath(self.ecx, "Argument"),
                     vec![static_lifetime],
                     vec![]
-                ), None);
+                ));
             lets.push(Context::item_static_array(self.ecx,
                                                  static_args_name,
                                                  piece_ty,
diff --git a/src/libsyntax/ext/tt/macro_parser.rs b/src/libsyntax/ext/tt/macro_parser.rs
index b4cd9779ae251..4785fe37293c0 100644
--- a/src/libsyntax/ext/tt/macro_parser.rs
+++ b/src/libsyntax/ext/tt/macro_parser.rs
@@ -514,7 +514,7 @@ pub fn parse_nt(p: &mut Parser, name: &str) -> Nonterminal {
       "stmt" => token::NtStmt(p.parse_stmt(Vec::new())),
       "pat" => token::NtPat(p.parse_pat()),
       "expr" => token::NtExpr(p.parse_expr()),
-      "ty" => token::NtTy(p.parse_ty(false /* no need to disambiguate*/)),
+      "ty" => token::NtTy(p.parse_ty()),
       // this could be handled like a token, since it is one
       "ident" => match p.token {
         token::Ident(sn,b) => { p.bump(); token::NtIdent(box sn,b) }
@@ -525,7 +525,7 @@ pub fn parse_nt(p: &mut Parser, name: &str) -> Nonterminal {
         }
       },
       "path" => {
-        token::NtPath(box p.parse_path(LifetimeAndTypesWithoutColons).path)
+        token::NtPath(box p.parse_path(LifetimeAndTypesWithoutColons))
       }
       "meta" => token::NtMeta(p.parse_meta_item()),
       "tt" => {
diff --git a/src/libsyntax/fold.rs b/src/libsyntax/fold.rs
index 6941c0e9c1800..122f99cabb3f6 100644
--- a/src/libsyntax/fold.rs
+++ b/src/libsyntax/fold.rs
@@ -433,11 +433,13 @@ pub fn noop_fold_ty<T: Folder>(t: P<Ty>, fld: &mut T) -> P<Ty> {
             }
             TyTup(tys) => TyTup(tys.move_map(|ty| fld.fold_ty(ty))),
             TyParen(ty) => TyParen(fld.fold_ty(ty)),
-            TyPath(path, bounds, id) => {
+            TyPath(path, id) => {
                 let id = fld.new_id(id);
-                TyPath(fld.fold_path(path),
-                        fld.fold_opt_bounds(bounds),
-                        id)
+                TyPath(fld.fold_path(path), id)
+            }
+            TyObjectSum(ty, bounds) => {
+                TyObjectSum(fld.fold_ty(ty),
+                            fld.fold_bounds(bounds))
             }
             TyQPath(qpath) => {
                 TyQPath(fld.fold_qpath(qpath))
diff --git a/src/libsyntax/print/pprust.rs b/src/libsyntax/print/pprust.rs
index 954c72edff46b..ab78d5ecbfdce 100644
--- a/src/libsyntax/print/pprust.rs
+++ b/src/libsyntax/print/pprust.rs
@@ -1916,11 +1916,11 @@ impl<'a> State<'a> {
         self.print_expr(coll)
     }
 
-    fn print_path_(&mut self,
-                   path: &ast::Path,
-                   colons_before_params: bool,
-                   opt_bounds: &Option<OwnedSlice<ast::TyParamBound>>)
-        -> IoResult<()> {
+    fn print_path(&mut self,
+                  path: &ast::Path,
+                  colons_before_params: bool)
+                  -> IoResult<()>
+    {
         try!(self.maybe_print_comment(path.span.lo));
         if path.global {
             try!(word(&mut self.s, "::"));
@@ -1939,10 +1939,7 @@ impl<'a> State<'a> {
             try!(self.print_path_parameters(&segment.parameters, colons_before_params));
         }
 
-        match *opt_bounds {
-            None => Ok(()),
-            Some(ref bounds) => self.print_bounds("+", bounds)
-        }
+        Ok(())
     }
 
     fn print_path_parameters(&mut self,
@@ -2005,17 +2002,6 @@ impl<'a> State<'a> {
         Ok(())
     }
 
-    fn print_path(&mut self, path: &ast::Path,
-                  colons_before_params: bool) -> IoResult<()> {
-        self.print_path_(path, colons_before_params, &None)
-    }
-
-    fn print_bounded_path(&mut self, path: &ast::Path,
-                          bounds: &Option<OwnedSlice<ast::TyParamBound>>)
-        -> IoResult<()> {
-        self.print_path_(path, false, bounds)
-    }
-
     pub fn print_pat(&mut self, pat: &ast::Pat) -> IoResult<()> {
         try!(self.maybe_print_comment(pat.span.lo));
         try!(self.ann.pre(self, NodePat(pat)));
diff --git a/src/libsyntax/test.rs b/src/libsyntax/test.rs
index f21a3185d6d3e..05828fc05f8c6 100644
--- a/src/libsyntax/test.rs
+++ b/src/libsyntax/test.rs
@@ -482,8 +482,7 @@ fn mk_tests(cx: &TestCtxt) -> P<ast::Item> {
     let ecx = &cx.ext_cx;
     let struct_type = ecx.ty_path(ecx.path(sp, vec![ecx.ident_of("self"),
                                                     ecx.ident_of("test"),
-                                                    ecx.ident_of("TestDescAndFn")]),
-                                  None);
+                                                    ecx.ident_of("TestDescAndFn")]));
     let static_lt = ecx.lifetime(sp, token::special_idents::static_lifetime.name);
     // &'static [self::test::TestDescAndFn]
     let static_type = ecx.ty_rptr(sp,
diff --git a/src/libsyntax/visit.rs b/src/libsyntax/visit.rs
index 3f87dbc0740ec..95679bc6bf0d1 100644
--- a/src/libsyntax/visit.rs
+++ b/src/libsyntax/visit.rs
@@ -404,14 +404,12 @@ pub fn walk_ty<'v, V: Visitor<'v>>(visitor: &mut V, typ: &'v Ty) {
             walk_fn_ret_ty(visitor, &function_declaration.decl.output);
             walk_lifetime_decls_helper(visitor, &function_declaration.lifetimes);
         }
-        TyPath(ref path, ref opt_bounds, id) => {
+        TyPath(ref path, id) => {
             visitor.visit_path(path, id);
-            match *opt_bounds {
-                Some(ref bounds) => {
-                    walk_ty_param_bounds_helper(visitor, bounds);
-                }
-                None => { }
-            }
+        }
+        TyObjectSum(ref ty, ref bounds) => {
+            visitor.visit_ty(&**ty);
+            walk_ty_param_bounds_helper(visitor, bounds);
         }
         TyQPath(ref qpath) => {
             visitor.visit_ty(&*qpath.self_type);

From bc2356558d73f5b197a693ffe20e8c610d182d82 Mon Sep 17 00:00:00 2001
From: Niko Matsakis <niko@alum.mit.edu>
Date: Thu, 20 Nov 2014 19:44:49 -0500
Subject: [PATCH 33/40] Fix rustdoc

---
 src/librustdoc/clean/mod.rs | 23 +++++++++++++++++------
 1 file changed, 17 insertions(+), 6 deletions(-)

diff --git a/src/librustdoc/clean/mod.rs b/src/librustdoc/clean/mod.rs
index 5985516a559f3..6c04707b5ec1d 100644
--- a/src/librustdoc/clean/mod.rs
+++ b/src/librustdoc/clean/mod.rs
@@ -972,7 +972,7 @@ impl Clean<Item> for doctree::Trait {
 
 impl Clean<Type> for ast::TraitRef {
     fn clean(&self, cx: &DocContext) -> Type {
-        resolve_type(cx, self.path.clean(cx), None, self.ref_id)
+        resolve_type(cx, self.path.clean(cx), self.ref_id)
     }
 }
 
@@ -1258,8 +1258,19 @@ impl Clean<Type> for ast::Ty {
             TyFixedLengthVec(ref ty, ref e) => FixedVector(box ty.clean(cx),
                                                            e.span.to_src(cx)),
             TyTup(ref tys) => Tuple(tys.clean(cx)),
-            TyPath(ref p, ref tpbs, id) => {
-                resolve_type(cx, p.clean(cx), tpbs.clean(cx), id)
+            TyPath(ref p, id) => {
+                resolve_type(cx, p.clean(cx), id)
+            }
+            TyObjectSum(ref lhs, ref bounds) => {
+                let lhs_ty = lhs.clean(cx);
+                match lhs_ty {
+                    ResolvedPath { path, typarams: None, did } => {
+                        ResolvedPath { path: path, typarams: Some(bounds.clean(cx)), did: did}
+                    }
+                    _ => {
+                        lhs_ty // shouldn't happen
+                    }
+                }
             }
             TyClosure(ref c) => Closure(box c.clean(cx)),
             TyProc(ref c) => Proc(box c.clean(cx)),
@@ -2110,8 +2121,8 @@ fn name_from_pat(p: &ast::Pat) -> String {
 }
 
 /// Given a Type, resolve it using the def_map
-fn resolve_type(cx: &DocContext, path: Path,
-                tpbs: Option<Vec<TyParamBound>>,
+fn resolve_type(cx: &DocContext,
+                path: Path,
                 id: ast::NodeId) -> Type {
     let tcx = match cx.tcx_opt() {
         Some(tcx) => tcx,
@@ -2148,7 +2159,7 @@ fn resolve_type(cx: &DocContext, path: Path,
         _ => {}
     };
     let did = register_def(&*cx, def);
-    ResolvedPath { path: path, typarams: tpbs, did: did }
+    ResolvedPath { path: path, typarams: None, did: did }
 }
 
 fn register_def(cx: &DocContext, def: def::Def) -> ast::DefId {

From 1479a909bbeb6d76a1a0a165518997241e601925 Mon Sep 17 00:00:00 2001
From: Niko Matsakis <niko@alum.mit.edu>
Date: Thu, 20 Nov 2014 19:45:42 -0500
Subject: [PATCH 34/40] Fix odd example where bounds were permitted and then
 ignored

---
 src/libterm/lib.rs | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/src/libterm/lib.rs b/src/libterm/lib.rs
index 151a388a13335..611c10ea71a09 100644
--- a/src/libterm/lib.rs
+++ b/src/libterm/lib.rs
@@ -115,7 +115,7 @@ pub fn stdout() -> Option<Box<Terminal<WriterWrapper> + Send>> {
 #[cfg(not(windows))]
 /// Return a Terminal wrapping stderr, or None if a terminal couldn't be
 /// opened.
-pub fn stderr() -> Option<Box<Terminal<WriterWrapper> + Send> + Send> {
+pub fn stderr() -> Option<Box<Terminal<WriterWrapper> + Send>> {
     TerminfoTerminal::new(WriterWrapper {
         wrapped: box std::io::stderr() as Box<Writer + Send>,
     })

From 21d5d139fc6e04781b7f9d9eeab0f8f0255ac06b Mon Sep 17 00:00:00 2001
From: Niko Matsakis <niko@alum.mit.edu>
Date: Tue, 25 Nov 2014 07:42:16 -0500
Subject: [PATCH 35/40] Add tests for the parsing of `+` and the error messages
 if people get it wrong.

Fixes #18772.
---
 .../hrtb-precedence-of-plus-error-message.rs  | 35 +++++++++++++++++++
 .../hrtb-precedence-of-plus-where-clause.rs   | 27 ++++++++++++++
 src/test/run-pass/hrtb-precedence-of-plus.rs  | 21 +++++++++++
 3 files changed, 83 insertions(+)
 create mode 100644 src/test/compile-fail/hrtb-precedence-of-plus-error-message.rs
 create mode 100644 src/test/run-pass/hrtb-precedence-of-plus-where-clause.rs
 create mode 100644 src/test/run-pass/hrtb-precedence-of-plus.rs

diff --git a/src/test/compile-fail/hrtb-precedence-of-plus-error-message.rs b/src/test/compile-fail/hrtb-precedence-of-plus-error-message.rs
new file mode 100644
index 0000000000000..a0d81b831f69c
--- /dev/null
+++ b/src/test/compile-fail/hrtb-precedence-of-plus-error-message.rs
@@ -0,0 +1,35 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(unboxed_closures)]
+
+// Test that we suggest the correct parentheses
+
+trait Bar {
+    fn dummy(&self) { }
+}
+
+struct Foo<'a> {
+    a: &'a Bar+'a,
+        //~^ ERROR E0171
+        //~^^ NOTE perhaps you meant `&'a (Bar + 'a)`?
+
+    b: &'a mut Bar+'a,
+        //~^ ERROR E0171
+        //~^^ NOTE perhaps you meant `&'a mut (Bar + 'a)`?
+
+    c: Box<Bar+'a>, // OK, no paren needed in this context
+
+    d: fn() -> Bar+'a,
+        //~^ ERROR E0171
+        //~^^ NOTE perhaps you forgot parentheses
+}
+
+fn main() { }
diff --git a/src/test/run-pass/hrtb-precedence-of-plus-where-clause.rs b/src/test/run-pass/hrtb-precedence-of-plus-where-clause.rs
new file mode 100644
index 0000000000000..86ce49f56cb57
--- /dev/null
+++ b/src/test/run-pass/hrtb-precedence-of-plus-where-clause.rs
@@ -0,0 +1,27 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(unboxed_closures)]
+
+// Test that `F : Fn(int) -> int + Send` is interpreted as two
+// distinct bounds on `F`.
+
+fn foo<F>(f: F)
+    where F : FnOnce(int) -> int + Send
+{
+    bar(f);
+    baz(f);
+}
+
+fn bar<F:Send>(f: F) { }
+
+fn baz<F:FnOnce(int) -> int>(f: F) { }
+
+fn main() {}
diff --git a/src/test/run-pass/hrtb-precedence-of-plus.rs b/src/test/run-pass/hrtb-precedence-of-plus.rs
new file mode 100644
index 0000000000000..9a43b5b711eb5
--- /dev/null
+++ b/src/test/run-pass/hrtb-precedence-of-plus.rs
@@ -0,0 +1,21 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(unboxed_closures)]
+
+// Test that `Fn(int) -> int + 'static` parses as `(Fn(int) -> int) +
+// 'static` and not `Fn(int) -> (int + 'static)`. The latter would
+// cause a compilation error. Issue #18772.
+
+fn adder(y: int) -> Box<Fn(int) -> int + 'static> {
+    box move |&: x| y + x
+}
+
+fn main() {}

From 60541cdc1ec334b278740fd6d59b9d08929e6d0d Mon Sep 17 00:00:00 2001
From: Alex Crichton <alex@alexcrichton.com>
Date: Wed, 26 Nov 2014 10:21:45 -0800
Subject: [PATCH 36/40] Test fixes and rebase conflicts

---
 src/libcoretest/slice.rs                         |  2 +-
 src/librustc/middle/borrowck/fragments.rs        |  4 ++--
 src/librustc/middle/region.rs                    |  4 ++--
 src/librustc/middle/typeck/astconv.rs            |  2 +-
 src/librustdoc/lib.rs                            | 13 +++++++------
 src/libserialize/json.rs                         | 12 +++++-------
 src/libstd/macros.rs                             | 11 ++++++-----
 src/libsyntax/ext/quote.rs                       | 12 ++++++------
 src/libtest/lib.rs                               |  6 +++---
 src/libunicode/normalize.rs                      |  8 ++++----
 src/libunicode/tables.rs                         | 16 ++++++++--------
 .../hrtb-precedence-of-plus-error-message.rs     |  1 +
 .../hrtb-precedence-of-plus-where-clause.rs      |  7 ++++++-
 src/test/run-pass/issue-2804.rs                  |  8 ++++----
 14 files changed, 56 insertions(+), 50 deletions(-)

diff --git a/src/libcoretest/slice.rs b/src/libcoretest/slice.rs
index 29253c50ed065..987da90321117 100644
--- a/src/libcoretest/slice.rs
+++ b/src/libcoretest/slice.rs
@@ -8,7 +8,7 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-use std::slice::{Found, NotFound};
+use std::slice::BinarySearchResult::{Found, NotFound};
 
 #[test]
 fn binary_search_not_found() {
diff --git a/src/librustc/middle/borrowck/fragments.rs b/src/librustc/middle/borrowck/fragments.rs
index 7e766e9138e35..9891320f756f5 100644
--- a/src/librustc/middle/borrowck/fragments.rs
+++ b/src/librustc/middle/borrowck/fragments.rs
@@ -277,8 +277,8 @@ pub fn fixup_fragment_sets<'tcx>(this: &MoveData<'tcx>, tcx: &ty::ctxt<'tcx>) {
 
     fn non_member(elem: MovePathIndex, set: &[MovePathIndex]) -> bool {
         match set.binary_search_elem(&elem) {
-            slice::Found(_) => false,
-            slice::NotFound(_) => true,
+            slice::BinarySearchResult::Found(_) => false,
+            slice::BinarySearchResult::NotFound(_) => true,
         }
     }
 }
diff --git a/src/librustc/middle/region.rs b/src/librustc/middle/region.rs
index 3684f64ebe537..41b06923e8283 100644
--- a/src/librustc/middle/region.rs
+++ b/src/librustc/middle/region.rs
@@ -72,8 +72,8 @@ impl CodeExtent {
     }
 }
 
-The region maps encode information about region relationships.
-
+/// The region maps encode information about region relationships.
+///
 /// - `scope_map` maps from a scope id to the enclosing scope id; this is
 ///   usually corresponding to the lexical nesting, though in the case of
 ///   closures the parent scope is the innermost conditional expression or repeating
diff --git a/src/librustc/middle/typeck/astconv.rs b/src/librustc/middle/typeck/astconv.rs
index 8f1e2d115d3f1..0a5ae10dd0142 100644
--- a/src/librustc/middle/typeck/astconv.rs
+++ b/src/librustc/middle/typeck/astconv.rs
@@ -741,7 +741,7 @@ fn ast_ty_to_trait_ref<'tcx,AC,RS>(this: &AC,
 
                 _ => {
                     span_note!(this.tcx().sess, ty.span,
-                               "perhaps you forget parentheses? (per RFC 248)");
+                               "perhaps you forgot parentheses? (per RFC 248)");
                 }
             }
             Err(ErrorReported)
diff --git a/src/librustdoc/lib.rs b/src/librustdoc/lib.rs
index 36e74d43e6417..5b99151c5328e 100644
--- a/src/librustdoc/lib.rs
+++ b/src/librustdoc/lib.rs
@@ -35,7 +35,8 @@ use std::io::File;
 use std::io;
 use std::rc::Rc;
 use externalfiles::ExternalHtml;
-use serialize::{json, Decodable, Encodable};
+use serialize::{Decodable, Encodable};
+use serialize::json::{mod, Json};
 
 // reexported from `clean` so it can be easily updated with the mod itself
 pub use clean::SCHEMA_VERSION;
@@ -425,11 +426,11 @@ fn json_input(input: &str) -> Result<Output, String> {
     };
     match json::from_reader(&mut input) {
         Err(s) => Err(s.to_string()),
-        Ok(json::Object(obj)) => {
+        Ok(Json::Object(obj)) => {
             let mut obj = obj;
             // Make sure the schema is what we expect
             match obj.remove(&"schema".to_string()) {
-                Some(json::String(version)) => {
+                Some(Json::String(version)) => {
                     if version.as_slice() != SCHEMA_VERSION {
                         return Err(format!(
                                 "sorry, but I only understand version {}",
@@ -468,7 +469,7 @@ fn json_output(krate: clean::Crate, res: Vec<plugins::PluginJson> ,
     //   "plugins": { output of plugins ... }
     // }
     let mut json = std::collections::TreeMap::new();
-    json.insert("schema".to_string(), json::String(SCHEMA_VERSION.to_string()));
+    json.insert("schema".to_string(), Json::String(SCHEMA_VERSION.to_string()));
     let plugins_json = res.into_iter()
                           .filter_map(|opt| {
                               match opt {
@@ -495,8 +496,8 @@ fn json_output(krate: clean::Crate, res: Vec<plugins::PluginJson> ,
     };
 
     json.insert("crate".to_string(), crate_json);
-    json.insert("plugins".to_string(), json::Object(plugins_json));
+    json.insert("plugins".to_string(), Json::Object(plugins_json));
 
     let mut file = try!(File::create(&dst));
-    json::Object(json).to_writer(&mut file)
+    Json::Object(json).to_writer(&mut file)
 }
diff --git a/src/libserialize/json.rs b/src/libserialize/json.rs
index 15c554be5d936..8b252da6715b3 100644
--- a/src/libserialize/json.rs
+++ b/src/libserialize/json.rs
@@ -113,8 +113,7 @@ for custom mappings.
 
 ```rust
 extern crate serialize;
-use serialize::json::ToJson;
-use serialize::json;
+use serialize::json::{mod, ToJson, Json};
 
 // A custom data structure
 struct ComplexNum {
@@ -125,7 +124,7 @@ struct ComplexNum {
 // JSON value representation
 impl ToJson for ComplexNum {
     fn to_json(&self) -> json::Json {
-        json::String(format!("{}+{}i", self.a, self.b))
+        Json::String(format!("{}+{}i", self.a, self.b))
     }
 }
 
@@ -154,8 +153,7 @@ fn main() {
 ```rust
 extern crate serialize;
 use std::collections::TreeMap;
-use serialize::json::ToJson;
-use serialize::json;
+use serialize::json::{mod, ToJson, Json};
 
 // Only generate `Decodable` trait implementation
 #[deriving(Decodable)]
@@ -173,7 +171,7 @@ impl ToJson for TestStruct {
         d.insert("data_int".to_string(), self.data_int.to_json());
         d.insert("data_str".to_string(), self.data_str.to_json());
         d.insert("data_vector".to_string(), self.data_vector.to_json());
-        json::Object(d)
+        Json::Object(d)
     }
 }
 
@@ -184,7 +182,7 @@ fn main() {
         data_str: "toto".to_string(),
         data_vector: vec![2,3,4,5],
     };
-    let json_obj: json::Json = input_data.to_json();
+    let json_obj: Json = input_data.to_json();
     let json_str: String = json_obj.to_string();
 
     // Deserialize like before
diff --git a/src/libstd/macros.rs b/src/libstd/macros.rs
index dbb45f2e55601..12ca80bfaab09 100644
--- a/src/libstd/macros.rs
+++ b/src/libstd/macros.rs
@@ -205,11 +205,12 @@ macro_rules! debug_assert_eq(
 ///
 /// ```rust
 /// fn foo(x: Option<int>) {
-///    match x {
-///     Some(n) if n >= 0 => println!("Some(Non-negative)"),
-///     Some(n) if n <  0 => println!("Some(Negative)"),
-///     Some(_)           => unreachable!(), // compile error if commented out
-///     None              => println!("None")
+///     match x {
+///         Some(n) if n >= 0 => println!("Some(Non-negative)"),
+///         Some(n) if n <  0 => println!("Some(Negative)"),
+///         Some(_)           => unreachable!(), // compile error if commented out
+///         None              => println!("None")
+///     }
 /// }
 /// ```
 ///
diff --git a/src/libsyntax/ext/quote.rs b/src/libsyntax/ext/quote.rs
index e703ac21f2642..3fca110a881c8 100644
--- a/src/libsyntax/ext/quote.rs
+++ b/src/libsyntax/ext/quote.rs
@@ -17,12 +17,12 @@ use parse::token::*;
 use parse::token;
 use ptr::P;
 
-//!  Quasiquoting works via token trees.
-//!
-//!  This is registered as a set of expression syntax extension called quote!
-//!  that lifts its argument token-tree to an AST representing the
-//!  construction of the same token tree, with token::SubstNt interpreted
-//!  as antiquotes (splices).
+///  Quasiquoting works via token trees.
+///
+///  This is registered as a set of expression syntax extension called quote!
+///  that lifts its argument token-tree to an AST representing the
+///  construction of the same token tree, with token::SubstNt interpreted
+///  as antiquotes (splices).
 
 pub mod rt {
     use ast;
diff --git a/src/libtest/lib.rs b/src/libtest/lib.rs
index bed66b99d830c..6556ba3864c86 100644
--- a/src/libtest/lib.rs
+++ b/src/libtest/lib.rs
@@ -1106,9 +1106,9 @@ fn calc_result(desc: &TestDesc, task_succeeded: bool) -> TestResult {
 impl ToJson for Metric {
     fn to_json(&self) -> json::Json {
         let mut map = TreeMap::new();
-        map.insert("value".to_string(), json::F64(self.value));
-        map.insert("noise".to_string(), json::F64(self.noise));
-        json::Object(map)
+        map.insert("value".to_string(), json::Json::F64(self.value));
+        map.insert("noise".to_string(), json::Json::F64(self.noise));
+        json::Json::Object(map)
     }
 }
 
diff --git a/src/libunicode/normalize.rs b/src/libunicode/normalize.rs
index ad36215c11bcb..92b5a69a99fe0 100644
--- a/src/libunicode/normalize.rs
+++ b/src/libunicode/normalize.rs
@@ -25,11 +25,11 @@ fn bsearch_table<T>(c: char, r: &'static [(char, &'static [T])]) -> Option<&'sta
         else if val < c { Less }
         else { Greater }
     }) {
-        slice::Found(idx) => {
+        slice::BinarySearchResult::Found(idx) => {
             let (_, result) = r[idx];
             Some(result)
         }
-        slice::NotFound(_) => None
+        slice::BinarySearchResult::NotFound(_) => None
     }
 }
 
@@ -88,11 +88,11 @@ pub fn compose(a: char, b: char) -> Option<char> {
                     else if val < b { Less }
                     else { Greater }
                 }) {
-                    slice::Found(idx) => {
+                    slice::BinarySearchResult::Found(idx) => {
                         let (_, result) = candidates[idx];
                         Some(result)
                     }
-                    slice::NotFound(_) => None
+                    slice::BinarySearchResult::NotFound(_) => None
                 }
             }
         }
diff --git a/src/libunicode/tables.rs b/src/libunicode/tables.rs
index dfba686143f3b..7cece6701dc85 100644
--- a/src/libunicode/tables.rs
+++ b/src/libunicode/tables.rs
@@ -6249,11 +6249,11 @@ pub mod normalization {
             else if hi < c { Less }
             else { Greater }
         }) {
-            slice::Found(idx) => {
+            slice::BinarySearchResult::Found(idx) => {
                 let (_, _, result) = r[idx];
                 result
             }
-            slice::NotFound(_) => 0
+            slice::BinarySearchResult::NotFound(_) => 0
         }
     }
 
@@ -6392,8 +6392,8 @@ pub mod conversions {
             else if key < c { Less }
             else { Greater }
         }) {
-            slice::Found(i) => Some(i),
-            slice::NotFound(_) => None,
+            slice::BinarySearchResult::Found(i) => Some(i),
+            slice::BinarySearchResult::NotFound(_) => None,
         }
     }
 
@@ -6945,11 +6945,11 @@ pub mod charwidth {
             else if hi < c { Less }
             else { Greater }
         }) {
-            slice::Found(idx) => {
+            slice::BinarySearchResult::Found(idx) => {
                 let (_, _, r_ncjk, r_cjk) = r[idx];
                 if is_cjk { r_cjk } else { r_ncjk }
             }
-            slice::NotFound(_) => 1
+            slice::BinarySearchResult::NotFound(_) => 1
         }
     }
 
@@ -7160,11 +7160,11 @@ pub mod grapheme {
             else if hi < c { Less }
             else { Greater }
         }) {
-            slice::Found(idx) => {
+            slice::BinarySearchResult::Found(idx) => {
                 let (_, _, cat) = r[idx];
                 cat
             }
-            slice::NotFound(_) => GC_Any
+            slice::BinarySearchResult::NotFound(_) => GC_Any
         }
     }
 
diff --git a/src/test/compile-fail/hrtb-precedence-of-plus-error-message.rs b/src/test/compile-fail/hrtb-precedence-of-plus-error-message.rs
index a0d81b831f69c..ff3512ad8e72a 100644
--- a/src/test/compile-fail/hrtb-precedence-of-plus-error-message.rs
+++ b/src/test/compile-fail/hrtb-precedence-of-plus-error-message.rs
@@ -30,6 +30,7 @@ struct Foo<'a> {
     d: fn() -> Bar+'a,
         //~^ ERROR E0171
         //~^^ NOTE perhaps you forgot parentheses
+        //~^^^ WARN deprecated syntax
 }
 
 fn main() { }
diff --git a/src/test/run-pass/hrtb-precedence-of-plus-where-clause.rs b/src/test/run-pass/hrtb-precedence-of-plus-where-clause.rs
index 86ce49f56cb57..88e6de6d3e6ff 100644
--- a/src/test/run-pass/hrtb-precedence-of-plus-where-clause.rs
+++ b/src/test/run-pass/hrtb-precedence-of-plus-where-clause.rs
@@ -13,10 +13,15 @@
 // Test that `F : Fn(int) -> int + Send` is interpreted as two
 // distinct bounds on `F`.
 
-fn foo<F>(f: F)
+fn foo1<F>(f: F)
     where F : FnOnce(int) -> int + Send
 {
     bar(f);
+}
+
+fn foo2<F>(f: F)
+    where F : FnOnce(int) -> int + Send
+{
     baz(f);
 }
 
diff --git a/src/test/run-pass/issue-2804.rs b/src/test/run-pass/issue-2804.rs
index ba73b7cee2ebc..ec19b95ab1a48 100644
--- a/src/test/run-pass/issue-2804.rs
+++ b/src/test/run-pass/issue-2804.rs
@@ -13,7 +13,7 @@ extern crate collections;
 extern crate serialize;
 
 use std::collections::HashMap;
-use serialize::json;
+use serialize::json::{mod, Json};
 use std::option;
 
 enum object {
@@ -24,7 +24,7 @@ enum object {
 fn lookup(table: json::Object, key: String, default: String) -> String
 {
     match table.find(&key.to_string()) {
-        option::Some(&json::String(ref s)) => {
+        option::Some(&Json::String(ref s)) => {
             s.to_string()
         }
         option::Some(value) => {
@@ -40,7 +40,7 @@ fn lookup(table: json::Object, key: String, default: String) -> String
 fn add_interface(_store: int, managed_ip: String, data: json::Json) -> (String, object)
 {
     match &data {
-        &json::Object(ref interface) => {
+        &Json::Object(ref interface) => {
             let name = lookup(interface.clone(),
                               "ifDescr".to_string(),
                               "".to_string());
@@ -59,7 +59,7 @@ fn add_interfaces(store: int, managed_ip: String, device: HashMap<String, json::
 -> Vec<(String, object)> {
     match device["interfaces".to_string()]
     {
-        json::Array(ref interfaces) =>
+        Json::Array(ref interfaces) =>
         {
           interfaces.iter().map(|interface| {
                 add_interface(store, managed_ip.clone(), (*interface).clone())

From cd5c8235c5448a7234548c772468c8d2e8f150d9 Mon Sep 17 00:00:00 2001
From: Steve Klabnik <steve@steveklabnik.com>
Date: Tue, 25 Nov 2014 21:17:11 -0500
Subject: [PATCH 37/40] /*! -> //!

Sister pull request of https://github.com/rust-lang/rust/pull/19288, but
for the other style of block doc comment.
---
 src/libcollections/hash/mod.rs                |  102 +-
 src/libcore/clone.rs                          |   22 +-
 src/libcore/finally.rs                        |   40 +-
 src/libcore/intrinsics.rs                     |   62 +-
 src/libcore/iter.rs                           |   94 +-
 src/libcore/kinds.rs                          |   19 +-
 src/libcore/ops.rs                            |   88 +-
 src/libflate/lib.rs                           |   14 +-
 src/libgraphviz/lib.rs                        |  506 ++--
 src/liblibc/lib.rs                            |  104 +-
 src/librand/distributions/mod.rs              |   19 +-
 src/librustc/lib.rs                           |   14 +-
 src/librustc/middle/astencode.rs              |  120 +-
 src/librustc/middle/borrowck/check_loans.rs   |   42 +-
 src/librustc/middle/borrowck/doc.rs           | 2428 ++++++++---------
 src/librustc/middle/borrowck/fragments.rs     |   47 +-
 .../borrowck/gather_loans/gather_moves.rs     |    4 +-
 .../middle/borrowck/gather_loans/lifetime.rs  |    6 +-
 .../middle/borrowck/gather_loans/mod.rs       |    9 +-
 .../borrowck/gather_loans/restrictions.rs     |    4 +-
 src/librustc/middle/borrowck/mod.rs           |    2 +-
 src/librustc/middle/borrowck/move_data.rs     |   77 +-
 src/librustc/middle/cfg/mod.rs                |    8 +-
 src/librustc/middle/dataflow.rs               |   10 +-
 src/librustc/middle/expr_use_visitor.rs       |   25 +-
 src/librustc/middle/fast_reject.rs            |   24 +-
 src/librustc/middle/graph.rs                  |   46 +-
 src/librustc/middle/liveness.rs               |  196 +-
 src/librustc/middle/mem_categorization.rs     |  133 +-
 src/librustc/middle/region.rs                 |  151 +-
 src/librustc/middle/resolve_lifetime.rs       |   63 +-
 src/librustc/middle/subst.rs                  |  110 +-
 src/librustc/middle/traits/coherence.rs       |    2 +-
 src/librustc/middle/traits/doc.rs             |  796 +++---
 src/librustc/middle/traits/fulfill.rs         |   19 +-
 src/librustc/middle/traits/mod.rs             |   53 +-
 src/librustc/middle/traits/select.rs          |  280 +-
 src/librustc/middle/traits/util.rs            |   49 +-
 src/librustc/middle/ty.rs                     |  197 +-
 src/librustc/middle/ty_fold.rs                |   52 +-
 src/librustc/middle/typeck/astconv.rs         |  141 +-
 src/librustc/middle/typeck/check/closure.rs   |    4 +-
 .../middle/typeck/check/method/confirm.rs     |   26 +-
 .../middle/typeck/check/method/doc.rs         |  227 +-
 .../middle/typeck/check/method/mod.rs         |   69 +-
 .../middle/typeck/check/method/probe.rs       |   93 +-
 src/librustc/middle/typeck/check/mod.rs       |  268 +-
 src/librustc/middle/typeck/check/regionck.rs  |  537 ++--
 .../middle/typeck/check/regionmanip.rs        |   23 +-
 src/librustc/middle/typeck/check/vtable.rs    |   25 +-
 src/librustc/middle/typeck/check/wf.rs        |   58 +-
 src/librustc/middle/typeck/coherence/mod.rs   |    8 +-
 .../middle/typeck/coherence/orphan.rs         |    6 +-
 .../middle/typeck/coherence/overlap.rs        |    6 +-
 src/librustc/middle/typeck/collect.rs         |   17 +-
 src/librustc/middle/typeck/infer/coercion.rs  |  114 +-
 src/librustc/middle/typeck/infer/combine.rs   |   13 +-
 src/librustc/middle/typeck/infer/doc.rs       |  478 ++--
 .../middle/typeck/infer/error_reporting.rs    |  103 +-
 .../middle/typeck/infer/higher_ranked/doc.rs  |  806 +++---
 .../middle/typeck/infer/higher_ranked/mod.rs  |    6 +-
 src/librustc/middle/typeck/infer/lattice.rs   |   42 +-
 src/librustc/middle/typeck/infer/mod.rs       |   26 +-
 .../typeck/infer/region_inference/doc.rs      |  732 +++--
 .../typeck/infer/region_inference/mod.rs      |   30 +-
 src/librustc/middle/typeck/infer/skolemize.rs |   52 +-
 .../middle/typeck/infer/type_variable.rs      |   18 +-
 src/librustc/middle/typeck/infer/unify.rs     |   44 +-
 src/librustc/middle/typeck/variance.rs        |  368 ++-
 src/librustc/plugin/mod.rs                    |   94 +-
 src/librustc/util/common.rs                   |   20 +-
 src/librustc/util/ppaux.rs                    |    7 +-
 src/librustc/util/snapshot_vec.rs             |   42 +-
 src/librustc_trans/lib.rs                     |   14 +-
 src/librustc_trans/test.rs                    |   30 +-
 src/librustc_trans/trans/_match.rs            |  419 ++-
 src/librustc_trans/trans/adt.rs               |   66 +-
 src/librustc_trans/trans/asm.rs               |    4 +-
 src/librustc_trans/trans/base.rs              |   15 +-
 src/librustc_trans/trans/callee.rs            |   71 +-
 src/librustc_trans/trans/cleanup.rs           |  214 +-
 src/librustc_trans/trans/closure.rs           |   22 +-
 src/librustc_trans/trans/common.rs            |   28 +-
 src/librustc_trans/trans/datum.rs             |  161 +-
 src/librustc_trans/trans/debuginfo.rs         |  349 ++-
 src/librustc_trans/trans/doc.rs               |  450 ++-
 src/librustc_trans/trans/expr.rs              |  127 +-
 src/librustc_trans/trans/foreign.rs           |   49 +-
 src/librustc_trans/trans/meth.rs              |   57 +-
 src/librustc_trans/trans/tvec.rs              |   36 +-
 src/librustrt/c_str.rs                        |  120 +-
 src/libserialize/json.rs                      |  355 ++-
 src/libstd/dynamic_lib.rs                     |   10 +-
 src/libstd/fmt.rs                             |  768 +++---
 src/libstd/hash.rs                            |  102 +-
 src/libstd/io/fs.rs                           |   80 +-
 src/libstd/io/mod.rs                          |  400 ++-
 src/libstd/io/net/addrinfo.rs                 |   12 +-
 src/libstd/io/net/pipe.rs                     |   22 +-
 src/libstd/io/stdio.rs                        |   34 +-
 src/libstd/io/test.rs                         |   15 +-
 src/libstd/io/timer.rs                        |   12 +-
 src/libstd/io/util.rs                         |    2 +-
 src/libstd/os.rs                              |   30 +-
 src/libstd/path/mod.rs                        |  106 +-
 src/libstd/rt/mod.rs                          |   72 +-
 src/libstd/sync/future.rs                     |   28 +-
 src/libsyntax/ast.rs                          |    6 +-
 src/libsyntax/ast_util.rs                     |    6 +-
 src/libsyntax/codemap.rs                      |   18 +-
 src/libsyntax/ext/deriving/decodable.rs       |    5 +-
 src/libsyntax/ext/deriving/generic/ty.rs      |    6 +-
 src/libsyntax/ext/deriving/mod.rs             |   13 +-
 src/libsyntax/parse/obsolete.rs               |   12 +-
 src/libsyntax/parse/parser.rs                 |   21 +-
 src/libsyntax/visit.rs                        |    8 +-
 src/libunicode/normalize.rs                   |    5 +-
 src/libunicode/u_char.rs                      |   10 +-
 src/libunicode/u_str.rs                       |   10 +-
 119 files changed, 6860 insertions(+), 8080 deletions(-)

diff --git a/src/libcollections/hash/mod.rs b/src/libcollections/hash/mod.rs
index 4173ffc5d2fd8..1dc2539c592e9 100644
--- a/src/libcollections/hash/mod.rs
+++ b/src/libcollections/hash/mod.rs
@@ -8,58 +8,56 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-/*!
- * Generic hashing support.
- *
- * This module provides a generic way to compute the hash of a value. The
- * simplest way to make a type hashable is to use `#[deriving(Hash)]`:
- *
- * # Example
- *
- * ```rust
- * use std::hash;
- * use std::hash::Hash;
- *
- * #[deriving(Hash)]
- * struct Person {
- *     id: uint,
- *     name: String,
- *     phone: u64,
- * }
- *
- * let person1 = Person { id: 5, name: "Janet".to_string(), phone: 555_666_7777 };
- * let person2 = Person { id: 5, name: "Bob".to_string(), phone: 555_666_7777 };
- *
- * assert!(hash::hash(&person1) != hash::hash(&person2));
- * ```
- *
- * If you need more control over how a value is hashed, you need to implement
- * the trait `Hash`:
- *
- * ```rust
- * use std::hash;
- * use std::hash::Hash;
- * use std::hash::sip::SipState;
- *
- * struct Person {
- *     id: uint,
- *     name: String,
- *     phone: u64,
- * }
- *
- * impl Hash for Person {
- *     fn hash(&self, state: &mut SipState) {
- *         self.id.hash(state);
- *         self.phone.hash(state);
- *     }
- * }
- *
- * let person1 = Person { id: 5, name: "Janet".to_string(), phone: 555_666_7777 };
- * let person2 = Person { id: 5, name: "Bob".to_string(), phone: 555_666_7777 };
- *
- * assert!(hash::hash(&person1) == hash::hash(&person2));
- * ```
- */
+//! Generic hashing support.
+//!
+//! This module provides a generic way to compute the hash of a value. The
+//! simplest way to make a type hashable is to use `#[deriving(Hash)]`:
+//!
+//! # Example
+//!
+//! ```rust
+//! use std::hash;
+//! use std::hash::Hash;
+//!
+//! #[deriving(Hash)]
+//! struct Person {
+//!     id: uint,
+//!     name: String,
+//!     phone: u64,
+//! }
+//!
+//! let person1 = Person { id: 5, name: "Janet".to_string(), phone: 555_666_7777 };
+//! let person2 = Person { id: 5, name: "Bob".to_string(), phone: 555_666_7777 };
+//!
+//! assert!(hash::hash(&person1) != hash::hash(&person2));
+//! ```
+//!
+//! If you need more control over how a value is hashed, you need to implement
+//! the trait `Hash`:
+//!
+//! ```rust
+//! use std::hash;
+//! use std::hash::Hash;
+//! use std::hash::sip::SipState;
+//!
+//! struct Person {
+//!     id: uint,
+//!     name: String,
+//!     phone: u64,
+//! }
+//!
+//! impl Hash for Person {
+//!     fn hash(&self, state: &mut SipState) {
+//!         self.id.hash(state);
+//!         self.phone.hash(state);
+//!     }
+//! }
+//!
+//! let person1 = Person { id: 5, name: "Janet".to_string(), phone: 555_666_7777 };
+//! let person2 = Person { id: 5, name: "Bob".to_string(), phone: 555_666_7777 };
+//!
+//! assert!(hash::hash(&person1) == hash::hash(&person2));
+//! ```
 
 #![allow(unused_must_use)]
 
diff --git a/src/libcore/clone.rs b/src/libcore/clone.rs
index d13daf0964a1a..9f928f57e9e40 100644
--- a/src/libcore/clone.rs
+++ b/src/libcore/clone.rs
@@ -8,18 +8,16 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-/*! The `Clone` trait for types that cannot be 'implicitly copied'
-
-In Rust, some simple types are "implicitly copyable" and when you
-assign them or pass them as arguments, the receiver will get a copy,
-leaving the original value in place. These types do not require
-allocation to copy and do not have finalizers (i.e. they do not
-contain owned boxes or implement `Drop`), so the compiler considers
-them cheap and safe to copy. For other types copies must be made
-explicitly, by convention implementing the `Clone` trait and calling
-the `clone` method.
-
-*/
+//! The `Clone` trait for types that cannot be 'implicitly copied'
+//!
+//! In Rust, some simple types are "implicitly copyable" and when you
+//! assign them or pass them as arguments, the receiver will get a copy,
+//! leaving the original value in place. These types do not require
+//! allocation to copy and do not have finalizers (i.e. they do not
+//! contain owned boxes or implement `Drop`), so the compiler considers
+//! them cheap and safe to copy. For other types copies must be made
+//! explicitly, by convention implementing the `Clone` trait and calling
+//! the `clone` method.
 
 #![unstable]
 
diff --git a/src/libcore/finally.rs b/src/libcore/finally.rs
index 2e358e7a74b64..8bfdd93447723 100644
--- a/src/libcore/finally.rs
+++ b/src/libcore/finally.rs
@@ -8,27 +8,25 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-/*!
-The Finally trait provides a method, `finally` on
-stack closures that emulates Java-style try/finally blocks.
-
-Using the `finally` method is sometimes convenient, but the type rules
-prohibit any shared, mutable state between the "try" case and the
-"finally" case. For advanced cases, the `try_finally` function can
-also be used. See that function for more details.
-
-# Example
-
-```
-use std::finally::Finally;
-
-(|| {
-    // ...
-}).finally(|| {
-    // this code is always run
-})
-```
-*/
+//! The Finally trait provides a method, `finally` on
+//! stack closures that emulates Java-style try/finally blocks.
+//!
+//! Using the `finally` method is sometimes convenient, but the type rules
+//! prohibit any shared, mutable state between the "try" case and the
+//! "finally" case. For advanced cases, the `try_finally` function can
+//! also be used. See that function for more details.
+//!
+//! # Example
+//!
+//! ```
+//! use std::finally::Finally;
+//!
+//! (|| {
+//!     // ...
+//! }).finally(|| {
+//!     // this code is always run
+//! })
+//! ```
 
 #![experimental]
 
diff --git a/src/libcore/intrinsics.rs b/src/libcore/intrinsics.rs
index 067ef47a86b9f..78c74075d4867 100644
--- a/src/libcore/intrinsics.rs
+++ b/src/libcore/intrinsics.rs
@@ -8,38 +8,36 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-/*! rustc compiler intrinsics.
-
-The corresponding definitions are in librustc/middle/trans/foreign.rs.
-
-# Volatiles
-
-The volatile intrinsics provide operations intended to act on I/O
-memory, which are guaranteed to not be reordered by the compiler
-across other volatile intrinsics. See the LLVM documentation on
-[[volatile]].
-
-[volatile]: http://llvm.org/docs/LangRef.html#volatile-memory-accesses
-
-# Atomics
-
-The atomic intrinsics provide common atomic operations on machine
-words, with multiple possible memory orderings. They obey the same
-semantics as C++11. See the LLVM documentation on [[atomics]].
-
-[atomics]: http://llvm.org/docs/Atomics.html
-
-A quick refresher on memory ordering:
-
-* Acquire - a barrier for acquiring a lock. Subsequent reads and writes
-  take place after the barrier.
-* Release - a barrier for releasing a lock. Preceding reads and writes
-  take place before the barrier.
-* Sequentially consistent - sequentially consistent operations are
-  guaranteed to happen in order. This is the standard mode for working
-  with atomic types and is equivalent to Java's `volatile`.
-
-*/
+//! rustc compiler intrinsics.
+//!
+//! The corresponding definitions are in librustc/middle/trans/foreign.rs.
+//!
+//! # Volatiles
+//!
+//! The volatile intrinsics provide operations intended to act on I/O
+//! memory, which are guaranteed to not be reordered by the compiler
+//! across other volatile intrinsics. See the LLVM documentation on
+//! [[volatile]].
+//!
+//! [volatile]: http://llvm.org/docs/LangRef.html#volatile-memory-accesses
+//!
+//! # Atomics
+//!
+//! The atomic intrinsics provide common atomic operations on machine
+//! words, with multiple possible memory orderings. They obey the same
+//! semantics as C++11. See the LLVM documentation on [[atomics]].
+//!
+//! [atomics]: http://llvm.org/docs/Atomics.html
+//!
+//! A quick refresher on memory ordering:
+//!
+//! * Acquire - a barrier for acquiring a lock. Subsequent reads and writes
+//!   take place after the barrier.
+//! * Release - a barrier for releasing a lock. Preceding reads and writes
+//!   take place before the barrier.
+//! * Sequentially consistent - sequentially consistent operations are
+//!   guaranteed to happen in order. This is the standard mode for working
+//!   with atomic types and is equivalent to Java's `volatile`.
 
 #![experimental]
 #![allow(missing_docs)]
diff --git a/src/libcore/iter.rs b/src/libcore/iter.rs
index 496e7979b726e..2d488a4b15563 100644
--- a/src/libcore/iter.rs
+++ b/src/libcore/iter.rs
@@ -8,55 +8,51 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-/*!
-
-Composable external iterators
-
-# The `Iterator` trait
-
-This module defines Rust's core iteration trait. The `Iterator` trait has one
-unimplemented method, `next`. All other methods are derived through default
-methods to perform operations such as `zip`, `chain`, `enumerate`, and `fold`.
-
-The goal of this module is to unify iteration across all containers in Rust.
-An iterator can be considered as a state machine which is used to track which
-element will be yielded next.
-
-There are various extensions also defined in this module to assist with various
-types of iteration, such as the `DoubleEndedIterator` for iterating in reverse,
-the `FromIterator` trait for creating a container from an iterator, and much
-more.
-
-## Rust's `for` loop
-
-The special syntax used by rust's `for` loop is based around the `Iterator`
-trait defined in this module. For loops can be viewed as a syntactical expansion
-into a `loop`, for example, the `for` loop in this example is essentially
-translated to the `loop` below.
-
-```rust
-let values = vec![1i, 2, 3];
-
-// "Syntactical sugar" taking advantage of an iterator
-for &x in values.iter() {
-    println!("{}", x);
-}
-
-// Rough translation of the iteration without a `for` iterator.
-let mut it = values.iter();
-loop {
-    match it.next() {
-        Some(&x) => {
-            println!("{}", x);
-        }
-        None => { break }
-    }
-}
-```
-
-This `for` loop syntax can be applied to any iterator over any type.
-
-*/
+//! Composable external iterators
+//!
+//! # The `Iterator` trait
+//!
+//! This module defines Rust's core iteration trait. The `Iterator` trait has one
+//! unimplemented method, `next`. All other methods are derived through default
+//! methods to perform operations such as `zip`, `chain`, `enumerate`, and `fold`.
+//!
+//! The goal of this module is to unify iteration across all containers in Rust.
+//! An iterator can be considered as a state machine which is used to track which
+//! element will be yielded next.
+//!
+//! There are various extensions also defined in this module to assist with various
+//! types of iteration, such as the `DoubleEndedIterator` for iterating in reverse,
+//! the `FromIterator` trait for creating a container from an iterator, and much
+//! more.
+//!
+//! ## Rust's `for` loop
+//!
+//! The special syntax used by rust's `for` loop is based around the `Iterator`
+//! trait defined in this module. For loops can be viewed as a syntactical expansion
+//! into a `loop`, for example, the `for` loop in this example is essentially
+//! translated to the `loop` below.
+//!
+//! ```rust
+//! let values = vec![1i, 2, 3];
+//!
+//! // "Syntactical sugar" taking advantage of an iterator
+//! for &x in values.iter() {
+//!     println!("{}", x);
+//! }
+//!
+//! // Rough translation of the iteration without a `for` iterator.
+//! let mut it = values.iter();
+//! loop {
+//!     match it.next() {
+//!         Some(&x) => {
+//!             println!("{}", x);
+//!         }
+//!         None => { break }
+//!     }
+//! }
+//! ```
+//!
+//! This `for` loop syntax can be applied to any iterator over any type.
 
 pub use self::MinMaxResult::*;
 
diff --git a/src/libcore/kinds.rs b/src/libcore/kinds.rs
index 6489101f7b980..0c2cb9d591005 100644
--- a/src/libcore/kinds.rs
+++ b/src/libcore/kinds.rs
@@ -8,17 +8,14 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-/*!
-Primitive traits representing basic 'kinds' of types
-
-Rust types can be classified in various useful ways according to
-intrinsic properties of the type. These classifications, often called
-'kinds', are represented as traits.
-
-They cannot be implemented by user code, but are instead implemented
-by the compiler automatically for the types to which they apply.
-
-*/
+//! Primitive traits representing basic 'kinds' of types
+//!
+//! Rust types can be classified in various useful ways according to
+//! intrinsic properties of the type. These classifications, often called
+//! 'kinds', are represented as traits.
+//!
+//! They cannot be implemented by user code, but are instead implemented
+//! by the compiler automatically for the types to which they apply.
 
 /// Types able to be transferred across task boundaries.
 #[lang="send"]
diff --git a/src/libcore/ops.rs b/src/libcore/ops.rs
index 185c937eb6b37..519dfd47fd8e3 100644
--- a/src/libcore/ops.rs
+++ b/src/libcore/ops.rs
@@ -8,52 +8,48 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-/*!
- *
- * Overloadable operators
- *
- * Implementing these traits allows you to get an effect similar to
- * overloading operators.
- *
- * The values for the right hand side of an operator are automatically
- * borrowed, so `a + b` is sugar for `a.add(&b)`.
- *
- * All of these traits are imported by the prelude, so they are available in
- * every Rust program.
- *
- * # Example
- *
- * This example creates a `Point` struct that implements `Add` and `Sub`, and then
- * demonstrates adding and subtracting two `Point`s.
- *
- * ```rust
- * #[deriving(Show)]
- * struct Point {
- *     x: int,
- *     y: int
- * }
- *
- * impl Add<Point, Point> for Point {
- *     fn add(&self, other: &Point) -> Point {
- *         Point {x: self.x + other.x, y: self.y + other.y}
- *     }
- * }
- *
- * impl Sub<Point, Point> for Point {
- *     fn sub(&self, other: &Point) -> Point {
- *         Point {x: self.x - other.x, y: self.y - other.y}
- *     }
- * }
- * fn main() {
- *     println!("{}", Point {x: 1, y: 0} + Point {x: 2, y: 3});
- *     println!("{}", Point {x: 1, y: 0} - Point {x: 2, y: 3});
- * }
- * ```
- *
- * See the documentation for each trait for a minimum implementation that prints
- * something to the screen.
- *
- */
+//! Overloadable operators
+//!
+//! Implementing these traits allows you to get an effect similar to
+//! overloading operators.
+//!
+//! The values for the right hand side of an operator are automatically
+//! borrowed, so `a + b` is sugar for `a.add(&b)`.
+//!
+//! All of these traits are imported by the prelude, so they are available in
+//! every Rust program.
+//!
+//! # Example
+//!
+//! This example creates a `Point` struct that implements `Add` and `Sub`, and then
+//! demonstrates adding and subtracting two `Point`s.
+//!
+//! ```rust
+//! #[deriving(Show)]
+//! struct Point {
+//!     x: int,
+//!     y: int
+//! }
+//!
+//! impl Add<Point, Point> for Point {
+//!     fn add(&self, other: &Point) -> Point {
+//!         Point {x: self.x + other.x, y: self.y + other.y}
+//!     }
+//! }
+//!
+//! impl Sub<Point, Point> for Point {
+//!     fn sub(&self, other: &Point) -> Point {
+//!         Point {x: self.x - other.x, y: self.y - other.y}
+//!     }
+//! }
+//! fn main() {
+//!     println!("{}", Point {x: 1, y: 0} + Point {x: 2, y: 3});
+//!     println!("{}", Point {x: 1, y: 0} - Point {x: 2, y: 3});
+//! }
+//! ```
+//!
+//! See the documentation for each trait for a minimum implementation that prints
+//! something to the screen.
 
 use kinds::Sized;
 
diff --git a/src/libflate/lib.rs b/src/libflate/lib.rs
index 568210118a8ae..36a04392c36f3 100644
--- a/src/libflate/lib.rs
+++ b/src/libflate/lib.rs
@@ -8,15 +8,11 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-/*!
-
-Simple [DEFLATE][def]-based compression. This is a wrapper around the
-[`miniz`][mz] library, which is a one-file pure-C implementation of zlib.
-
-[def]: https://en.wikipedia.org/wiki/DEFLATE
-[mz]: https://code.google.com/p/miniz/
-
-*/
+//! Simple [DEFLATE][def]-based compression. This is a wrapper around the
+//! [`miniz`][mz] library, which is a one-file pure-C implementation of zlib.
+//!
+//! [def]: https://en.wikipedia.org/wiki/DEFLATE
+//! [mz]: https://code.google.com/p/miniz/
 
 #![crate_name = "flate"]
 #![experimental]
diff --git a/src/libgraphviz/lib.rs b/src/libgraphviz/lib.rs
index f149ec509af05..04eeeb62e1d35 100644
--- a/src/libgraphviz/lib.rs
+++ b/src/libgraphviz/lib.rs
@@ -8,260 +8,258 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-/*! Generate files suitable for use with [Graphviz](http://www.graphviz.org/)
-
-The `render` function generates output (e.g. an `output.dot` file) for
-use with [Graphviz](http://www.graphviz.org/) by walking a labelled
-graph. (Graphviz can then automatically lay out the nodes and edges
-of the graph, and also optionally render the graph as an image or
-other [output formats](
-http://www.graphviz.org/content/output-formats), such as SVG.)
-
-Rather than impose some particular graph data structure on clients,
-this library exposes two traits that clients can implement on their
-own structs before handing them over to the rendering function.
-
-Note: This library does not yet provide access to the full
-expressiveness of the [DOT language](
-http://www.graphviz.org/doc/info/lang.html). For example, there are
-many [attributes](http://www.graphviz.org/content/attrs) related to
-providing layout hints (e.g. left-to-right versus top-down, which
-algorithm to use, etc). The current intention of this library is to
-emit a human-readable .dot file with very regular structure suitable
-for easy post-processing.
-
-# Examples
-
-The first example uses a very simple graph representation: a list of
-pairs of ints, representing the edges (the node set is implicit).
-Each node label is derived directly from the int representing the node,
-while the edge labels are all empty strings.
-
-This example also illustrates how to use `CowVec` to return
-an owned vector or a borrowed slice as appropriate: we construct the
-node vector from scratch, but borrow the edge list (rather than
-constructing a copy of all the edges from scratch).
-
-The output from this example renders five nodes, with the first four
-forming a diamond-shaped acyclic graph and then pointing to the fifth
-which is cyclic.
-
-```rust
-use graphviz as dot;
-
-type Nd = int;
-type Ed = (int,int);
-struct Edges(Vec<Ed>);
-
-pub fn render_to<W:Writer>(output: &mut W) {
-    let edges = Edges(vec!((0,1), (0,2), (1,3), (2,3), (3,4), (4,4)));
-    dot::render(&edges, output).unwrap()
-}
-
-impl<'a> dot::Labeller<'a, Nd, Ed> for Edges {
-    fn graph_id(&'a self) -> dot::Id<'a> { dot::Id::new("example1").unwrap() }
-
-    fn node_id(&'a self, n: &Nd) -> dot::Id<'a> {
-        dot::Id::new(format!("N{}", *n)).unwrap()
-    }
-}
-
-impl<'a> dot::GraphWalk<'a, Nd, Ed> for Edges {
-    fn nodes(&self) -> dot::Nodes<'a,Nd> {
-        // (assumes that |N| \approxeq |E|)
-        let &Edges(ref v) = self;
-        let mut nodes = Vec::with_capacity(v.len());
-        for &(s,t) in v.iter() {
-            nodes.push(s); nodes.push(t);
-        }
-        nodes.sort();
-        nodes.dedup();
-        nodes.into_cow()
-    }
-
-    fn edges(&'a self) -> dot::Edges<'a,Ed> {
-        let &Edges(ref edges) = self;
-        edges.as_slice().into_cow()
-    }
-
-    fn source(&self, e: &Ed) -> Nd { let &(s,_) = e; s }
-
-    fn target(&self, e: &Ed) -> Nd { let &(_,t) = e; t }
-}
-
-# pub fn main() { render_to(&mut Vec::new()) }
-```
-
-```no_run
-# pub fn render_to<W:Writer>(output: &mut W) { unimplemented!() }
-pub fn main() {
-    use std::io::File;
-    let mut f = File::create(&Path::new("example1.dot"));
-    render_to(&mut f)
-}
-```
-
-Output from first example (in `example1.dot`):
-
-```ignore
-digraph example1 {
-    N0[label="N0"];
-    N1[label="N1"];
-    N2[label="N2"];
-    N3[label="N3"];
-    N4[label="N4"];
-    N0 -> N1[label=""];
-    N0 -> N2[label=""];
-    N1 -> N3[label=""];
-    N2 -> N3[label=""];
-    N3 -> N4[label=""];
-    N4 -> N4[label=""];
-}
-```
-
-The second example illustrates using `node_label` and `edge_label` to
-add labels to the nodes and edges in the rendered graph. The graph
-here carries both `nodes` (the label text to use for rendering a
-particular node), and `edges` (again a list of `(source,target)`
-indices).
-
-This example also illustrates how to use a type (in this case the edge
-type) that shares substructure with the graph: the edge type here is a
-direct reference to the `(source,target)` pair stored in the graph's
-internal vector (rather than passing around a copy of the pair
-itself). Note that this implies that `fn edges(&'a self)` must
-construct a fresh `Vec<&'a (uint,uint)>` from the `Vec<(uint,uint)>`
-edges stored in `self`.
-
-Since both the set of nodes and the set of edges are always
-constructed from scratch via iterators, we use the `collect()` method
-from the `Iterator` trait to collect the nodes and edges into freshly
-constructed growable `Vec` values (rather use the `into_cow`
-from the `IntoCow` trait as was used in the first example
-above).
-
-The output from this example renders four nodes that make up the
-Hasse-diagram for the subsets of the set `{x, y}`. Each edge is
-labelled with the &sube; character (specified using the HTML character
-entity `&sube`).
-
-```rust
-use graphviz as dot;
-
-type Nd = uint;
-type Ed<'a> = &'a (uint, uint);
-struct Graph { nodes: Vec<&'static str>, edges: Vec<(uint,uint)> }
-
-pub fn render_to<W:Writer>(output: &mut W) {
-    let nodes = vec!("{x,y}","{x}","{y}","{}");
-    let edges = vec!((0,1), (0,2), (1,3), (2,3));
-    let graph = Graph { nodes: nodes, edges: edges };
-
-    dot::render(&graph, output).unwrap()
-}
-
-impl<'a> dot::Labeller<'a, Nd, Ed<'a>> for Graph {
-    fn graph_id(&'a self) -> dot::Id<'a> { dot::Id::new("example2").unwrap() }
-    fn node_id(&'a self, n: &Nd) -> dot::Id<'a> {
-        dot::Id::new(format!("N{}", n)).unwrap()
-    }
-    fn node_label<'a>(&'a self, n: &Nd) -> dot::LabelText<'a> {
-        dot::LabelStr(self.nodes[*n].as_slice().into_cow())
-    }
-    fn edge_label<'a>(&'a self, _: &Ed) -> dot::LabelText<'a> {
-        dot::LabelStr("&sube;".into_cow())
-    }
-}
-
-impl<'a> dot::GraphWalk<'a, Nd, Ed<'a>> for Graph {
-    fn nodes(&self) -> dot::Nodes<'a,Nd> { range(0,self.nodes.len()).collect() }
-    fn edges(&'a self) -> dot::Edges<'a,Ed<'a>> { self.edges.iter().collect() }
-    fn source(&self, e: &Ed) -> Nd { let & &(s,_) = e; s }
-    fn target(&self, e: &Ed) -> Nd { let & &(_,t) = e; t }
-}
-
-# pub fn main() { render_to(&mut Vec::new()) }
-```
-
-```no_run
-# pub fn render_to<W:Writer>(output: &mut W) { unimplemented!() }
-pub fn main() {
-    use std::io::File;
-    let mut f = File::create(&Path::new("example2.dot"));
-    render_to(&mut f)
-}
-```
-
-The third example is similar to the second, except now each node and
-edge now carries a reference to the string label for each node as well
-as that node's index. (This is another illustration of how to share
-structure with the graph itself, and why one might want to do so.)
-
-The output from this example is the same as the second example: the
-Hasse-diagram for the subsets of the set `{x, y}`.
-
-```rust
-use graphviz as dot;
-
-type Nd<'a> = (uint, &'a str);
-type Ed<'a> = (Nd<'a>, Nd<'a>);
-struct Graph { nodes: Vec<&'static str>, edges: Vec<(uint,uint)> }
-
-pub fn render_to<W:Writer>(output: &mut W) {
-    let nodes = vec!("{x,y}","{x}","{y}","{}");
-    let edges = vec!((0,1), (0,2), (1,3), (2,3));
-    let graph = Graph { nodes: nodes, edges: edges };
-
-    dot::render(&graph, output).unwrap()
-}
-
-impl<'a> dot::Labeller<'a, Nd<'a>, Ed<'a>> for Graph {
-    fn graph_id(&'a self) -> dot::Id<'a> { dot::Id::new("example3").unwrap() }
-    fn node_id(&'a self, n: &Nd<'a>) -> dot::Id<'a> {
-        dot::Id::new(format!("N{}", n.val0())).unwrap()
-    }
-    fn node_label<'a>(&'a self, n: &Nd<'a>) -> dot::LabelText<'a> {
-        let &(i, _) = n;
-        dot::LabelStr(self.nodes[i].as_slice().into_cow())
-    }
-    fn edge_label<'a>(&'a self, _: &Ed<'a>) -> dot::LabelText<'a> {
-        dot::LabelStr("&sube;".into_cow())
-    }
-}
-
-impl<'a> dot::GraphWalk<'a, Nd<'a>, Ed<'a>> for Graph {
-    fn nodes(&'a self) -> dot::Nodes<'a,Nd<'a>> {
-        self.nodes.iter().map(|s|s.as_slice()).enumerate().collect()
-    }
-    fn edges(&'a self) -> dot::Edges<'a,Ed<'a>> {
-        self.edges.iter()
-            .map(|&(i,j)|((i, self.nodes[i].as_slice()),
-                          (j, self.nodes[j].as_slice())))
-            .collect()
-    }
-    fn source(&self, e: &Ed<'a>) -> Nd<'a> { let &(s,_) = e; s }
-    fn target(&self, e: &Ed<'a>) -> Nd<'a> { let &(_,t) = e; t }
-}
-
-# pub fn main() { render_to(&mut Vec::new()) }
-```
-
-```no_run
-# pub fn render_to<W:Writer>(output: &mut W) { unimplemented!() }
-pub fn main() {
-    use std::io::File;
-    let mut f = File::create(&Path::new("example3.dot"));
-    render_to(&mut f)
-}
-```
-
-# References
-
-* [Graphviz](http://www.graphviz.org/)
-
-* [DOT language](http://www.graphviz.org/doc/info/lang.html)
-
-*/
+//! Generate files suitable for use with [Graphviz](http://www.graphviz.org/)
+//!
+//! The `render` function generates output (e.g. an `output.dot` file) for
+//! use with [Graphviz](http://www.graphviz.org/) by walking a labelled
+//! graph. (Graphviz can then automatically lay out the nodes and edges
+//! of the graph, and also optionally render the graph as an image or
+//! other [output formats](
+//! http://www.graphviz.org/content/output-formats), such as SVG.)
+//!
+//! Rather than impose some particular graph data structure on clients,
+//! this library exposes two traits that clients can implement on their
+//! own structs before handing them over to the rendering function.
+//!
+//! Note: This library does not yet provide access to the full
+//! expressiveness of the [DOT language](
+//! http://www.graphviz.org/doc/info/lang.html). For example, there are
+//! many [attributes](http://www.graphviz.org/content/attrs) related to
+//! providing layout hints (e.g. left-to-right versus top-down, which
+//! algorithm to use, etc). The current intention of this library is to
+//! emit a human-readable .dot file with very regular structure suitable
+//! for easy post-processing.
+//!
+//! # Examples
+//!
+//! The first example uses a very simple graph representation: a list of
+//! pairs of ints, representing the edges (the node set is implicit).
+//! Each node label is derived directly from the int representing the node,
+//! while the edge labels are all empty strings.
+//!
+//! This example also illustrates how to use `CowVec` to return
+//! an owned vector or a borrowed slice as appropriate: we construct the
+//! node vector from scratch, but borrow the edge list (rather than
+//! constructing a copy of all the edges from scratch).
+//!
+//! The output from this example renders five nodes, with the first four
+//! forming a diamond-shaped acyclic graph and then pointing to the fifth
+//! which is cyclic.
+//!
+//! ```rust
+//! use graphviz as dot;
+//!
+//! type Nd = int;
+//! type Ed = (int,int);
+//! struct Edges(Vec<Ed>);
+//!
+//! pub fn render_to<W:Writer>(output: &mut W) {
+//!     let edges = Edges(vec!((0,1), (0,2), (1,3), (2,3), (3,4), (4,4)));
+//!     dot::render(&edges, output).unwrap()
+//! }
+//!
+//! impl<'a> dot::Labeller<'a, Nd, Ed> for Edges {
+//!     fn graph_id(&'a self) -> dot::Id<'a> { dot::Id::new("example1").unwrap() }
+//!
+//!     fn node_id(&'a self, n: &Nd) -> dot::Id<'a> {
+//!         dot::Id::new(format!("N{}", *n)).unwrap()
+//!     }
+//! }
+//!
+//! impl<'a> dot::GraphWalk<'a, Nd, Ed> for Edges {
+//!     fn nodes(&self) -> dot::Nodes<'a,Nd> {
+//!         // (assumes that |N| \approxeq |E|)
+//!         let &Edges(ref v) = self;
+//!         let mut nodes = Vec::with_capacity(v.len());
+//!         for &(s,t) in v.iter() {
+//!             nodes.push(s); nodes.push(t);
+//!         }
+//!         nodes.sort();
+//!         nodes.dedup();
+//!         nodes.into_cow()
+//!     }
+//!
+//!     fn edges(&'a self) -> dot::Edges<'a,Ed> {
+//!         let &Edges(ref edges) = self;
+//!         edges.as_slice().into_cow()
+//!     }
+//!
+//!     fn source(&self, e: &Ed) -> Nd { let &(s,_) = e; s }
+//!
+//!     fn target(&self, e: &Ed) -> Nd { let &(_,t) = e; t }
+//! }
+//!
+//! # pub fn main() { render_to(&mut Vec::new()) }
+//! ```
+//!
+//! ```no_run
+//! # pub fn render_to<W:Writer>(output: &mut W) { unimplemented!() }
+//! pub fn main() {
+//!     use std::io::File;
+//!     let mut f = File::create(&Path::new("example1.dot"));
+//!     render_to(&mut f)
+//! }
+//! ```
+//!
+//! Output from first example (in `example1.dot`):
+//!
+//! ```ignore
+//! digraph example1 {
+//!     N0[label="N0"];
+//!     N1[label="N1"];
+//!     N2[label="N2"];
+//!     N3[label="N3"];
+//!     N4[label="N4"];
+//!     N0 -> N1[label=""];
+//!     N0 -> N2[label=""];
+//!     N1 -> N3[label=""];
+//!     N2 -> N3[label=""];
+//!     N3 -> N4[label=""];
+//!     N4 -> N4[label=""];
+//! }
+//! ```
+//!
+//! The second example illustrates using `node_label` and `edge_label` to
+//! add labels to the nodes and edges in the rendered graph. The graph
+//! here carries both `nodes` (the label text to use for rendering a
+//! particular node), and `edges` (again a list of `(source,target)`
+//! indices).
+//!
+//! This example also illustrates how to use a type (in this case the edge
+//! type) that shares substructure with the graph: the edge type here is a
+//! direct reference to the `(source,target)` pair stored in the graph's
+//! internal vector (rather than passing around a copy of the pair
+//! itself). Note that this implies that `fn edges(&'a self)` must
+//! construct a fresh `Vec<&'a (uint,uint)>` from the `Vec<(uint,uint)>`
+//! edges stored in `self`.
+//!
+//! Since both the set of nodes and the set of edges are always
+//! constructed from scratch via iterators, we use the `collect()` method
+//! from the `Iterator` trait to collect the nodes and edges into freshly
+//! constructed growable `Vec` values (rather use the `into_cow`
+//! from the `IntoCow` trait as was used in the first example
+//! above).
+//!
+//! The output from this example renders four nodes that make up the
+//! Hasse-diagram for the subsets of the set `{x, y}`. Each edge is
+//! labelled with the &sube; character (specified using the HTML character
+//! entity `&sube`).
+//!
+//! ```rust
+//! use graphviz as dot;
+//!
+//! type Nd = uint;
+//! type Ed<'a> = &'a (uint, uint);
+//! struct Graph { nodes: Vec<&'static str>, edges: Vec<(uint,uint)> }
+//!
+//! pub fn render_to<W:Writer>(output: &mut W) {
+//!     let nodes = vec!("{x,y}","{x}","{y}","{}");
+//!     let edges = vec!((0,1), (0,2), (1,3), (2,3));
+//!     let graph = Graph { nodes: nodes, edges: edges };
+//!
+//!     dot::render(&graph, output).unwrap()
+//! }
+//!
+//! impl<'a> dot::Labeller<'a, Nd, Ed<'a>> for Graph {
+//!     fn graph_id(&'a self) -> dot::Id<'a> { dot::Id::new("example2").unwrap() }
+//!     fn node_id(&'a self, n: &Nd) -> dot::Id<'a> {
+//!         dot::Id::new(format!("N{}", n)).unwrap()
+//!     }
+//!     fn node_label<'a>(&'a self, n: &Nd) -> dot::LabelText<'a> {
+//!         dot::LabelStr(self.nodes[*n].as_slice().into_cow())
+//!     }
+//!     fn edge_label<'a>(&'a self, _: &Ed) -> dot::LabelText<'a> {
+//!         dot::LabelStr("&sube;".into_cow())
+//!     }
+//! }
+//!
+//! impl<'a> dot::GraphWalk<'a, Nd, Ed<'a>> for Graph {
+//!     fn nodes(&self) -> dot::Nodes<'a,Nd> { range(0,self.nodes.len()).collect() }
+//!     fn edges(&'a self) -> dot::Edges<'a,Ed<'a>> { self.edges.iter().collect() }
+//!     fn source(&self, e: &Ed) -> Nd { let & &(s,_) = e; s }
+//!     fn target(&self, e: &Ed) -> Nd { let & &(_,t) = e; t }
+//! }
+//!
+//! # pub fn main() { render_to(&mut Vec::new()) }
+//! ```
+//!
+//! ```no_run
+//! # pub fn render_to<W:Writer>(output: &mut W) { unimplemented!() }
+//! pub fn main() {
+//!     use std::io::File;
+//!     let mut f = File::create(&Path::new("example2.dot"));
+//!     render_to(&mut f)
+//! }
+//! ```
+//!
+//! The third example is similar to the second, except now each node and
+//! edge now carries a reference to the string label for each node as well
+//! as that node's index. (This is another illustration of how to share
+//! structure with the graph itself, and why one might want to do so.)
+//!
+//! The output from this example is the same as the second example: the
+//! Hasse-diagram for the subsets of the set `{x, y}`.
+//!
+//! ```rust
+//! use graphviz as dot;
+//!
+//! type Nd<'a> = (uint, &'a str);
+//! type Ed<'a> = (Nd<'a>, Nd<'a>);
+//! struct Graph { nodes: Vec<&'static str>, edges: Vec<(uint,uint)> }
+//!
+//! pub fn render_to<W:Writer>(output: &mut W) {
+//!     let nodes = vec!("{x,y}","{x}","{y}","{}");
+//!     let edges = vec!((0,1), (0,2), (1,3), (2,3));
+//!     let graph = Graph { nodes: nodes, edges: edges };
+//!
+//!     dot::render(&graph, output).unwrap()
+//! }
+//!
+//! impl<'a> dot::Labeller<'a, Nd<'a>, Ed<'a>> for Graph {
+//!     fn graph_id(&'a self) -> dot::Id<'a> { dot::Id::new("example3").unwrap() }
+//!     fn node_id(&'a self, n: &Nd<'a>) -> dot::Id<'a> {
+//!         dot::Id::new(format!("N{}", n.val0())).unwrap()
+//!     }
+//!     fn node_label<'a>(&'a self, n: &Nd<'a>) -> dot::LabelText<'a> {
+//!         let &(i, _) = n;
+//!         dot::LabelStr(self.nodes[i].as_slice().into_cow())
+//!     }
+//!     fn edge_label<'a>(&'a self, _: &Ed<'a>) -> dot::LabelText<'a> {
+//!         dot::LabelStr("&sube;".into_cow())
+//!     }
+//! }
+//!
+//! impl<'a> dot::GraphWalk<'a, Nd<'a>, Ed<'a>> for Graph {
+//!     fn nodes(&'a self) -> dot::Nodes<'a,Nd<'a>> {
+//!         self.nodes.iter().map(|s|s.as_slice()).enumerate().collect()
+//!     }
+//!     fn edges(&'a self) -> dot::Edges<'a,Ed<'a>> {
+//!         self.edges.iter()
+//!             .map(|&(i,j)|((i, self.nodes[i].as_slice()),
+//!                           (j, self.nodes[j].as_slice())))
+//!             .collect()
+//!     }
+//!     fn source(&self, e: &Ed<'a>) -> Nd<'a> { let &(s,_) = e; s }
+//!     fn target(&self, e: &Ed<'a>) -> Nd<'a> { let &(_,t) = e; t }
+//! }
+//!
+//! # pub fn main() { render_to(&mut Vec::new()) }
+//! ```
+//!
+//! ```no_run
+//! # pub fn render_to<W:Writer>(output: &mut W) { unimplemented!() }
+//! pub fn main() {
+//!     use std::io::File;
+//!     let mut f = File::create(&Path::new("example3.dot"));
+//!     render_to(&mut f)
+//! }
+//! ```
+//!
+//! # References
+//!
+//! * [Graphviz](http://www.graphviz.org/)
+//!
+//! * [DOT language](http://www.graphviz.org/doc/info/lang.html)
 
 #![crate_name = "graphviz"]
 #![experimental]
diff --git a/src/liblibc/lib.rs b/src/liblibc/lib.rs
index 10610b7058406..0014a3e3941d7 100644
--- a/src/liblibc/lib.rs
+++ b/src/liblibc/lib.rs
@@ -19,59 +19,57 @@
        html_root_url = "http://doc.rust-lang.org/nightly/",
        html_playground_url = "http://play.rust-lang.org/")]
 
-/*!
-* Bindings for the C standard library and other platform libraries
-*
-* **NOTE:** These are *architecture and libc* specific. On Linux, these
-* bindings are only correct for glibc.
-*
-* This module contains bindings to the C standard library, organized into
-* modules by their defining standard.  Additionally, it contains some assorted
-* platform-specific definitions.  For convenience, most functions and types
-* are reexported, so `use libc::*` will import the available C bindings as
-* appropriate for the target platform. The exact set of functions available
-* are platform specific.
-*
-* *Note:* Because these definitions are platform-specific, some may not appear
-* in the generated documentation.
-*
-* We consider the following specs reasonably normative with respect to
-* interoperating with the C standard library (libc/msvcrt):
-*
-* * ISO 9899:1990 ('C95', 'ANSI C', 'Standard C'), NA1, 1995.
-* * ISO 9899:1999 ('C99' or 'C9x').
-* * ISO 9945:1988 / IEEE 1003.1-1988 ('POSIX.1').
-* * ISO 9945:2001 / IEEE 1003.1-2001 ('POSIX:2001', 'SUSv3').
-* * ISO 9945:2008 / IEEE 1003.1-2008 ('POSIX:2008', 'SUSv4').
-*
-* Note that any reference to the 1996 revision of POSIX, or any revs between
-* 1990 (when '88 was approved at ISO) and 2001 (when the next actual
-* revision-revision happened), are merely additions of other chapters (1b and
-* 1c) outside the core interfaces.
-*
-* Despite having several names each, these are *reasonably* coherent
-* point-in-time, list-of-definition sorts of specs. You can get each under a
-* variety of names but will wind up with the same definition in each case.
-*
-* See standards(7) in linux-manpages for more details.
-*
-* Our interface to these libraries is complicated by the non-universality of
-* conformance to any of them. About the only thing universally supported is
-* the first (C95), beyond that definitions quickly become absent on various
-* platforms.
-*
-* We therefore wind up dividing our module-space up (mostly for the sake of
-* sanity while editing, filling-in-details and eliminating duplication) into
-* definitions common-to-all (held in modules named c95, c99, posix88, posix01
-* and posix08) and definitions that appear only on *some* platforms (named
-* 'extra'). This would be things like significant OSX foundation kit, or Windows
-* library kernel32.dll, or various fancy glibc, Linux or BSD extensions.
-*
-* In addition to the per-platform 'extra' modules, we define a module of
-* 'common BSD' libc routines that never quite made it into POSIX but show up
-* in multiple derived systems. This is the 4.4BSD r2 / 1995 release, the final
-* one from Berkeley after the lawsuits died down and the CSRG dissolved.
-*/
+//! Bindings for the C standard library and other platform libraries
+//!
+//! **NOTE:** These are *architecture and libc* specific. On Linux, these
+//! bindings are only correct for glibc.
+//!
+//! This module contains bindings to the C standard library, organized into
+//! modules by their defining standard.  Additionally, it contains some assorted
+//! platform-specific definitions.  For convenience, most functions and types
+//! are reexported, so `use libc::*` will import the available C bindings as
+//! appropriate for the target platform. The exact set of functions available
+//! are platform specific.
+//!
+//! *Note:* Because these definitions are platform-specific, some may not appear
+//! in the generated documentation.
+//!
+//! We consider the following specs reasonably normative with respect to
+//! interoperating with the C standard library (libc/msvcrt):
+//!
+//! * ISO 9899:1990 ('C95', 'ANSI C', 'Standard C'), NA1, 1995.
+//! * ISO 9899:1999 ('C99' or 'C9x').
+//! * ISO 9945:1988 / IEEE 1003.1-1988 ('POSIX.1').
+//! * ISO 9945:2001 / IEEE 1003.1-2001 ('POSIX:2001', 'SUSv3').
+//! * ISO 9945:2008 / IEEE 1003.1-2008 ('POSIX:2008', 'SUSv4').
+//!
+//! Note that any reference to the 1996 revision of POSIX, or any revs between
+//! 1990 (when '88 was approved at ISO) and 2001 (when the next actual
+//! revision-revision happened), are merely additions of other chapters (1b and
+//! 1c) outside the core interfaces.
+//!
+//! Despite having several names each, these are *reasonably* coherent
+//! point-in-time, list-of-definition sorts of specs. You can get each under a
+//! variety of names but will wind up with the same definition in each case.
+//!
+//! See standards(7) in linux-manpages for more details.
+//!
+//! Our interface to these libraries is complicated by the non-universality of
+//! conformance to any of them. About the only thing universally supported is
+//! the first (C95), beyond that definitions quickly become absent on various
+//! platforms.
+//!
+//! We therefore wind up dividing our module-space up (mostly for the sake of
+//! sanity while editing, filling-in-details and eliminating duplication) into
+//! definitions common-to-all (held in modules named c95, c99, posix88, posix01
+//! and posix08) and definitions that appear only on *some* platforms (named
+//! 'extra'). This would be things like significant OSX foundation kit, or Windows
+//! library kernel32.dll, or various fancy glibc, Linux or BSD extensions.
+//!
+//! In addition to the per-platform 'extra' modules, we define a module of
+//! 'common BSD' libc routines that never quite made it into POSIX but show up
+//! in multiple derived systems. This is the 4.4BSD r2 / 1995 release, the final
+//! one from Berkeley after the lawsuits died down and the CSRG dissolved.
 
 #![allow(non_camel_case_types)]
 #![allow(non_snake_case)]
diff --git a/src/librand/distributions/mod.rs b/src/librand/distributions/mod.rs
index 5bbddcb7c1652..0fa989bf0b2b9 100644
--- a/src/librand/distributions/mod.rs
+++ b/src/librand/distributions/mod.rs
@@ -8,17 +8,14 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-/*!
-Sampling from random distributions.
-
-This is a generalization of `Rand` to allow parameters to control the
-exact properties of the generated values, e.g. the mean and standard
-deviation of a normal distribution. The `Sample` trait is the most
-general, and allows for generating values that change some state
-internally. The `IndependentSample` trait is for generating values
-that do not need to record state.
-
-*/
+//! Sampling from random distributions.
+//!
+//! This is a generalization of `Rand` to allow parameters to control the
+//! exact properties of the generated values, e.g. the mean and standard
+//! deviation of a normal distribution. The `Sample` trait is the most
+//! general, and allows for generating values that change some state
+//! internally. The `IndependentSample` trait is for generating values
+//! that do not need to record state.
 
 #![experimental]
 
diff --git a/src/librustc/lib.rs b/src/librustc/lib.rs
index f272bb52a1436..c599a0f2daf7f 100644
--- a/src/librustc/lib.rs
+++ b/src/librustc/lib.rs
@@ -8,15 +8,11 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-/*!
-
-The Rust compiler.
-
-# Note
-
-This API is completely unstable and subject to change.
-
-*/
+//! The Rust compiler.
+//!
+//! # Note
+//!
+//! This API is completely unstable and subject to change.
 
 #![crate_name = "rustc"]
 #![experimental]
diff --git a/src/librustc/middle/astencode.rs b/src/librustc/middle/astencode.rs
index 7986a526b23b1..523e997a8deec 100644
--- a/src/librustc/middle/astencode.rs
+++ b/src/librustc/middle/astencode.rs
@@ -196,53 +196,38 @@ fn reserve_id_range(sess: &Session,
 }
 
 impl<'a, 'b, 'tcx> DecodeContext<'a, 'b, 'tcx> {
+    /// Translates an internal id, meaning a node id that is known to refer to some part of the
+    /// item currently being inlined, such as a local variable or argument.  All naked node-ids
+    /// that appear in types have this property, since if something might refer to an external item
+    /// we would use a def-id to allow for the possibility that the item resides in another crate.
     pub fn tr_id(&self, id: ast::NodeId) -> ast::NodeId {
-        /*!
-         * Translates an internal id, meaning a node id that is known
-         * to refer to some part of the item currently being inlined,
-         * such as a local variable or argument.  All naked node-ids
-         * that appear in types have this property, since if something
-         * might refer to an external item we would use a def-id to
-         * allow for the possibility that the item resides in another
-         * crate.
-         */
-
         // from_id_range should be non-empty
         assert!(!self.from_id_range.empty());
         (id - self.from_id_range.min + self.to_id_range.min)
     }
+
+    /// Translates an EXTERNAL def-id, converting the crate number from the one used in the encoded
+    /// data to the current crate numbers..  By external, I mean that it be translated to a
+    /// reference to the item in its original crate, as opposed to being translated to a reference
+    /// to the inlined version of the item.  This is typically, but not always, what you want,
+    /// because most def-ids refer to external things like types or other fns that may or may not
+    /// be inlined.  Note that even when the inlined function is referencing itself recursively, we
+    /// would want `tr_def_id` for that reference--- conceptually the function calls the original,
+    /// non-inlined version, and trans deals with linking that recursive call to the inlined copy.
+    ///
+    /// However, there are a *few* cases where def-ids are used but we know that the thing being
+    /// referenced is in fact *internal* to the item being inlined.  In those cases, you should use
+    /// `tr_intern_def_id()` below.
     pub fn tr_def_id(&self, did: ast::DefId) -> ast::DefId {
-        /*!
-         * Translates an EXTERNAL def-id, converting the crate number
-         * from the one used in the encoded data to the current crate
-         * numbers..  By external, I mean that it be translated to a
-         * reference to the item in its original crate, as opposed to
-         * being translated to a reference to the inlined version of
-         * the item.  This is typically, but not always, what you
-         * want, because most def-ids refer to external things like
-         * types or other fns that may or may not be inlined.  Note
-         * that even when the inlined function is referencing itself
-         * recursively, we would want `tr_def_id` for that
-         * reference--- conceptually the function calls the original,
-         * non-inlined version, and trans deals with linking that
-         * recursive call to the inlined copy.
-         *
-         * However, there are a *few* cases where def-ids are used but
-         * we know that the thing being referenced is in fact *internal*
-         * to the item being inlined.  In those cases, you should use
-         * `tr_intern_def_id()` below.
-         */
 
         decoder::translate_def_id(self.cdata, did)
     }
-    pub fn tr_intern_def_id(&self, did: ast::DefId) -> ast::DefId {
-        /*!
-         * Translates an INTERNAL def-id, meaning a def-id that is
-         * known to refer to some part of the item currently being
-         * inlined.  In that case, we want to convert the def-id to
-         * refer to the current crate and to the new, inlined node-id.
-         */
 
+    /// Translates an INTERNAL def-id, meaning a def-id that is
+    /// known to refer to some part of the item currently being
+    /// inlined.  In that case, we want to convert the def-id to
+    /// refer to the current crate and to the new, inlined node-id.
+    pub fn tr_intern_def_id(&self, did: ast::DefId) -> ast::DefId {
         assert_eq!(did.krate, ast::LOCAL_CRATE);
         ast::DefId { krate: ast::LOCAL_CRATE, node: self.tr_id(did.node) }
     }
@@ -1780,43 +1765,40 @@ impl<'a, 'tcx> rbml_decoder_decoder_helpers<'tcx> for reader::Decoder<'a> {
         }
     }
 
+    /// Converts a def-id that appears in a type.  The correct
+    /// translation will depend on what kind of def-id this is.
+    /// This is a subtle point: type definitions are not
+    /// inlined into the current crate, so if the def-id names
+    /// a nominal type or type alias, then it should be
+    /// translated to refer to the source crate.
+    ///
+    /// However, *type parameters* are cloned along with the function
+    /// they are attached to.  So we should translate those def-ids
+    /// to refer to the new, cloned copy of the type parameter.
+    /// We only see references to free type parameters in the body of
+    /// an inlined function. In such cases, we need the def-id to
+    /// be a local id so that the TypeContents code is able to lookup
+    /// the relevant info in the ty_param_defs table.
+    ///
+    /// *Region parameters*, unfortunately, are another kettle of fish.
+    /// In such cases, def_id's can appear in types to distinguish
+    /// shadowed bound regions and so forth. It doesn't actually
+    /// matter so much what we do to these, since regions are erased
+    /// at trans time, but it's good to keep them consistent just in
+    /// case. We translate them with `tr_def_id()` which will map
+    /// the crate numbers back to the original source crate.
+    ///
+    /// Unboxed closures are cloned along with the function being
+    /// inlined, and all side tables use interned node IDs, so we
+    /// translate their def IDs accordingly.
+    ///
+    /// It'd be really nice to refactor the type repr to not include
+    /// def-ids so that all these distinctions were unnecessary.
     fn convert_def_id(&mut self,
                       dcx: &DecodeContext,
                       source: tydecode::DefIdSource,
                       did: ast::DefId)
                       -> ast::DefId {
-        /*!
-         * Converts a def-id that appears in a type.  The correct
-         * translation will depend on what kind of def-id this is.
-         * This is a subtle point: type definitions are not
-         * inlined into the current crate, so if the def-id names
-         * a nominal type or type alias, then it should be
-         * translated to refer to the source crate.
-         *
-         * However, *type parameters* are cloned along with the function
-         * they are attached to.  So we should translate those def-ids
-         * to refer to the new, cloned copy of the type parameter.
-         * We only see references to free type parameters in the body of
-         * an inlined function. In such cases, we need the def-id to
-         * be a local id so that the TypeContents code is able to lookup
-         * the relevant info in the ty_param_defs table.
-         *
-         * *Region parameters*, unfortunately, are another kettle of fish.
-         * In such cases, def_id's can appear in types to distinguish
-         * shadowed bound regions and so forth. It doesn't actually
-         * matter so much what we do to these, since regions are erased
-         * at trans time, but it's good to keep them consistent just in
-         * case. We translate them with `tr_def_id()` which will map
-         * the crate numbers back to the original source crate.
-         *
-         * Unboxed closures are cloned along with the function being
-         * inlined, and all side tables use interned node IDs, so we
-         * translate their def IDs accordingly.
-         *
-         * It'd be really nice to refactor the type repr to not include
-         * def-ids so that all these distinctions were unnecessary.
-         */
-
         let r = match source {
             NominalType | TypeWithId | RegionParameter => dcx.tr_def_id(did),
             TypeParameter | UnboxedClosureSource => dcx.tr_intern_def_id(did)
diff --git a/src/librustc/middle/borrowck/check_loans.rs b/src/librustc/middle/borrowck/check_loans.rs
index afcc533ffb81c..9a27abbe8322d 100644
--- a/src/librustc/middle/borrowck/check_loans.rs
+++ b/src/librustc/middle/borrowck/check_loans.rs
@@ -684,16 +684,13 @@ impl<'a, 'tcx> CheckLoanCtxt<'a, 'tcx> {
         return ret;
     }
 
+    /// Reports an error if `expr` (which should be a path)
+    /// is using a moved/uninitialized value
     fn check_if_path_is_moved(&self,
                               id: ast::NodeId,
                               span: Span,
                               use_kind: MovedValueUseKind,
                               lp: &Rc<LoanPath<'tcx>>) {
-        /*!
-         * Reports an error if `expr` (which should be a path)
-         * is using a moved/uninitialized value
-         */
-
         debug!("check_if_path_is_moved(id={}, use_kind={}, lp={})",
                id, use_kind, lp.repr(self.bccx.tcx));
         let base_lp = owned_ptr_base_path_rc(lp);
@@ -708,30 +705,29 @@ impl<'a, 'tcx> CheckLoanCtxt<'a, 'tcx> {
         });
     }
 
+    /// Reports an error if assigning to `lp` will use a
+    /// moved/uninitialized value. Mainly this is concerned with
+    /// detecting derefs of uninitialized pointers.
+    ///
+    /// For example:
+    ///
+    /// ```
+    /// let a: int;
+    /// a = 10; // ok, even though a is uninitialized
+    ///
+    /// struct Point { x: uint, y: uint }
+    /// let p: Point;
+    /// p.x = 22; // ok, even though `p` is uninitialized
+    ///
+    /// let p: ~Point;
+    /// (*p).x = 22; // not ok, p is uninitialized, can't deref
+    /// ```
     fn check_if_assigned_path_is_moved(&self,
                                        id: ast::NodeId,
                                        span: Span,
                                        use_kind: MovedValueUseKind,
                                        lp: &Rc<LoanPath<'tcx>>)
     {
-        /*!
-         * Reports an error if assigning to `lp` will use a
-         * moved/uninitialized value. Mainly this is concerned with
-         * detecting derefs of uninitialized pointers.
-         *
-         * For example:
-         *
-         *     let a: int;
-         *     a = 10; // ok, even though a is uninitialized
-         *
-         *     struct Point { x: uint, y: uint }
-         *     let p: Point;
-         *     p.x = 22; // ok, even though `p` is uninitialized
-         *
-         *     let p: ~Point;
-         *     (*p).x = 22; // not ok, p is uninitialized, can't deref
-         */
-
         match lp.kind {
             LpVar(_) | LpUpvar(_) => {
                 // assigning to `x` does not require that `x` is initialized
diff --git a/src/librustc/middle/borrowck/doc.rs b/src/librustc/middle/borrowck/doc.rs
index 5b70d97b40276..c6db5340f0f51 100644
--- a/src/librustc/middle/borrowck/doc.rs
+++ b/src/librustc/middle/borrowck/doc.rs
@@ -8,1219 +8,1215 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-/*!
-
-# The Borrow Checker
-
-This pass has the job of enforcing memory safety. This is a subtle
-topic. This docs aim to explain both the practice and the theory
-behind the borrow checker. They start with a high-level overview of
-how it works, and then proceed to dive into the theoretical
-background. Finally, they go into detail on some of the more subtle
-aspects.
-
-# Table of contents
-
-These docs are long. Search for the section you are interested in.
-
-- Overview
-- Formal model
-- Borrowing and loans
-- Moves and initialization
-- Drop flags and structural fragments
-- Future work
-
-# Overview
-
-The borrow checker checks one function at a time. It operates in two
-passes. The first pass, called `gather_loans`, walks over the function
-and identifies all of the places where borrows (e.g., `&` expressions
-and `ref` bindings) and moves (copies or captures of a linear value)
-occur. It also tracks initialization sites. For each borrow and move,
-it checks various basic safety conditions at this time (for example,
-that the lifetime of the borrow doesn't exceed the lifetime of the
-value being borrowed, or that there is no move out of an `&T`
-referent).
-
-It then uses the dataflow module to propagate which of those borrows
-may be in scope at each point in the procedure. A loan is considered
-to come into scope at the expression that caused it and to go out of
-scope when the lifetime of the resulting reference expires.
-
-Once the in-scope loans are known for each point in the program, the
-borrow checker walks the IR again in a second pass called
-`check_loans`. This pass examines each statement and makes sure that
-it is safe with respect to the in-scope loans.
-
-# Formal model
-
-Throughout the docs we'll consider a simple subset of Rust in which
-you can only borrow from lvalues, defined like so:
-
-```text
-LV = x | LV.f | *LV
-```
-
-Here `x` represents some variable, `LV.f` is a field reference,
-and `*LV` is a pointer dereference. There is no auto-deref or other
-niceties. This means that if you have a type like:
-
-```text
-struct S { f: uint }
-```
-
-and a variable `a: Box<S>`, then the rust expression `a.f` would correspond
-to an `LV` of `(*a).f`.
-
-Here is the formal grammar for the types we'll consider:
-
-```text
-TY = () | S<'LT...> | Box<TY> | & 'LT MQ TY
-MQ = mut | imm | const
-```
-
-Most of these types should be pretty self explanatory. Here `S` is a
-struct name and we assume structs are declared like so:
-
-```text
-SD = struct S<'LT...> { (f: TY)... }
-```
-
-# Borrowing and loans
-
-## An intuitive explanation
-
-### Issuing loans
-
-Now, imagine we had a program like this:
-
-```text
-struct Foo { f: uint, g: uint }
-...
-'a: {
-  let mut x: Box<Foo> = ...;
-  let y = &mut (*x).f;
-  x = ...;
-}
-```
-
-This is of course dangerous because mutating `x` will free the old
-value and hence invalidate `y`. The borrow checker aims to prevent
-this sort of thing.
-
-#### Loans and restrictions
-
-The way the borrow checker works is that it analyzes each borrow
-expression (in our simple model, that's stuff like `&LV`, though in
-real life there are a few other cases to consider). For each borrow
-expression, it computes a `Loan`, which is a data structure that
-records (1) the value being borrowed, (2) the mutability and scope of
-the borrow, and (3) a set of restrictions. In the code, `Loan` is a
-struct defined in `middle::borrowck`. Formally, we define `LOAN` as
-follows:
-
-```text
-LOAN = (LV, LT, MQ, RESTRICTION*)
-RESTRICTION = (LV, ACTION*)
-ACTION = MUTATE | CLAIM | FREEZE
-```
-
-Here the `LOAN` tuple defines the lvalue `LV` being borrowed; the
-lifetime `LT` of that borrow; the mutability `MQ` of the borrow; and a
-list of restrictions. The restrictions indicate actions which, if
-taken, could invalidate the loan and lead to type safety violations.
-
-Each `RESTRICTION` is a pair of a restrictive lvalue `LV` (which will
-either be the path that was borrowed or some prefix of the path that
-was borrowed) and a set of restricted actions.  There are three kinds
-of actions that may be restricted for the path `LV`:
-
-- `MUTATE` means that `LV` cannot be assigned to;
-- `CLAIM` means that the `LV` cannot be borrowed mutably;
-- `FREEZE` means that the `LV` cannot be borrowed immutably;
-
-Finally, it is never possible to move from an lvalue that appears in a
-restriction. This implies that the "empty restriction" `(LV, [])`,
-which contains an empty set of actions, still has a purpose---it
-prevents moves from `LV`. I chose not to make `MOVE` a fourth kind of
-action because that would imply that sometimes moves are permitted
-from restrictived values, which is not the case.
-
-#### Example
-
-To give you a better feeling for what kind of restrictions derived
-from a loan, let's look at the loan `L` that would be issued as a
-result of the borrow `&mut (*x).f` in the example above:
-
-```text
-L = ((*x).f, 'a, mut, RS) where
-    RS = [((*x).f, [MUTATE, CLAIM, FREEZE]),
-          (*x, [MUTATE, CLAIM, FREEZE]),
-          (x, [MUTATE, CLAIM, FREEZE])]
-```
-
-The loan states that the expression `(*x).f` has been loaned as
-mutable for the lifetime `'a`. Because the loan is mutable, that means
-that the value `(*x).f` may be mutated via the newly created reference
-(and *only* via that pointer). This is reflected in the
-restrictions `RS` that accompany the loan.
-
-The first restriction `((*x).f, [MUTATE, CLAIM, FREEZE])` states that
-the lender may not mutate, freeze, nor alias `(*x).f`. Mutation is
-illegal because `(*x).f` is only supposed to be mutated via the new
-reference, not by mutating the original path `(*x).f`. Freezing is
-illegal because the path now has an `&mut` alias; so even if we the
-lender were to consider `(*x).f` to be immutable, it might be mutated
-via this alias. They will be enforced for the lifetime `'a` of the
-loan. After the loan expires, the restrictions no longer apply.
-
-The second restriction on `*x` is interesting because it does not
-apply to the path that was lent (`(*x).f`) but rather to a prefix of
-the borrowed path. This is due to the rules of inherited mutability:
-if the user were to assign to (or freeze) `*x`, they would indirectly
-overwrite (or freeze) `(*x).f`, and thus invalidate the reference
-that was created. In general it holds that when a path is
-lent, restrictions are issued for all the owning prefixes of that
-path. In this case, the path `*x` owns the path `(*x).f` and,
-because `x` is an owned pointer, the path `x` owns the path `*x`.
-Therefore, borrowing `(*x).f` yields restrictions on both
-`*x` and `x`.
-
-### Checking for illegal assignments, moves, and reborrows
-
-Once we have computed the loans introduced by each borrow, the borrow
-checker uses a data flow propagation to compute the full set of loans
-in scope at each expression and then uses that set to decide whether
-that expression is legal.  Remember that the scope of loan is defined
-by its lifetime LT.  We sometimes say that a loan which is in-scope at
-a particular point is an "outstanding loan", and the set of
-restrictions included in those loans as the "outstanding
-restrictions".
-
-The kinds of expressions which in-scope loans can render illegal are:
-- *assignments* (`lv = v`): illegal if there is an in-scope restriction
-  against mutating `lv`;
-- *moves*: illegal if there is any in-scope restriction on `lv` at all;
-- *mutable borrows* (`&mut lv`): illegal there is an in-scope restriction
-  against claiming `lv`;
-- *immutable borrows* (`&lv`): illegal there is an in-scope restriction
-  against freezing `lv`.
-
-## Formal rules
-
-Now that we hopefully have some kind of intuitive feeling for how the
-borrow checker works, let's look a bit more closely now at the precise
-conditions that it uses. For simplicity I will ignore const loans.
-
-I will present the rules in a modified form of standard inference
-rules, which looks as follows:
-
-```text
-PREDICATE(X, Y, Z)                  // Rule-Name
-  Condition 1
-  Condition 2
-  Condition 3
-```
-
-The initial line states the predicate that is to be satisfied.  The
-indented lines indicate the conditions that must be met for the
-predicate to be satisfied. The right-justified comment states the name
-of this rule: there are comments in the borrowck source referencing
-these names, so that you can cross reference to find the actual code
-that corresponds to the formal rule.
-
-### Invariants
-
-I want to collect, at a high-level, the invariants the borrow checker
-maintains. I will give them names and refer to them throughout the
-text. Together these invariants are crucial for the overall soundness
-of the system.
-
-**Mutability requires uniqueness.** To mutate a path
-
-**Unique mutability.** There is only one *usable* mutable path to any
-given memory at any given time. This implies that when claiming memory
-with an expression like `p = &mut x`, the compiler must guarantee that
-the borrowed value `x` can no longer be mutated so long as `p` is
-live. (This is done via restrictions, read on.)
-
-**.**
-
-
-### The `gather_loans` pass
-
-We start with the `gather_loans` pass, which walks the AST looking for
-borrows.  For each borrow, there are three bits of information: the
-lvalue `LV` being borrowed and the mutability `MQ` and lifetime `LT`
-of the resulting pointer. Given those, `gather_loans` applies four
-validity tests:
-
-1. `MUTABILITY(LV, MQ)`: The mutability of the reference is
-compatible with the mutability of `LV` (i.e., not borrowing immutable
-data as mutable).
-
-2. `ALIASABLE(LV, MQ)`: The aliasability of the reference is
-compatible with the aliasability of `LV`. The goal is to prevent
-`&mut` borrows of aliasability data.
-
-3. `LIFETIME(LV, LT, MQ)`: The lifetime of the borrow does not exceed
-the lifetime of the value being borrowed.
-
-4. `RESTRICTIONS(LV, LT, ACTIONS) = RS`: This pass checks and computes the
-restrictions to maintain memory safety. These are the restrictions
-that will go into the final loan. We'll discuss in more detail below.
-
-## Checking mutability
-
-Checking mutability is fairly straightforward. We just want to prevent
-immutable data from being borrowed as mutable. Note that it is ok to
-borrow mutable data as immutable, since that is simply a
-freeze. Formally we define a predicate `MUTABLE(LV, MQ)` which, if
-defined, means that "borrowing `LV` with mutability `MQ` is ok. The
-Rust code corresponding to this predicate is the function
-`check_mutability` in `middle::borrowck::gather_loans`.
-
-### Checking mutability of variables
-
-*Code pointer:* Function `check_mutability()` in `gather_loans/mod.rs`,
-but also the code in `mem_categorization`.
-
-Let's begin with the rules for variables, which state that if a
-variable is declared as mutable, it may be borrowed any which way, but
-otherwise the variable must be borrowed as immutable or const:
-
-```text
-MUTABILITY(X, MQ)                   // M-Var-Mut
-  DECL(X) = mut
-
-MUTABILITY(X, MQ)                   // M-Var-Imm
-  DECL(X) = imm
-  MQ = imm | const
-```
-
-### Checking mutability of owned content
-
-Fields and owned pointers inherit their mutability from
-their base expressions, so both of their rules basically
-delegate the check to the base expression `LV`:
-
-```text
-MUTABILITY(LV.f, MQ)                // M-Field
-  MUTABILITY(LV, MQ)
-
-MUTABILITY(*LV, MQ)                 // M-Deref-Unique
-  TYPE(LV) = Box<Ty>
-  MUTABILITY(LV, MQ)
-```
-
-### Checking mutability of immutable pointer types
-
-Immutable pointer types like `&T` can only
-be borrowed if MQ is immutable or const:
-
-```text
-MUTABILITY(*LV, MQ)                // M-Deref-Borrowed-Imm
-  TYPE(LV) = &Ty
-  MQ == imm | const
-```
-
-### Checking mutability of mutable pointer types
-
-`&mut T` can be frozen, so it is acceptable to borrow it as either imm or mut:
-
-```text
-MUTABILITY(*LV, MQ)                 // M-Deref-Borrowed-Mut
-  TYPE(LV) = &mut Ty
-```
-
-## Checking aliasability
-
-The goal of the aliasability check is to ensure that we never permit
-`&mut` borrows of aliasable data. Formally we define a predicate
-`ALIASABLE(LV, MQ)` which if defined means that
-"borrowing `LV` with mutability `MQ` is ok". The
-Rust code corresponding to this predicate is the function
-`check_aliasability()` in `middle::borrowck::gather_loans`.
-
-### Checking aliasability of variables
-
-Local variables are never aliasable as they are accessible only within
-the stack frame.
-
-```text
-    ALIASABLE(X, MQ)                   // M-Var-Mut
-```
-
-### Checking aliasable of owned content
-
-Owned content is aliasable if it is found in an aliasable location:
-
-```text
-ALIASABLE(LV.f, MQ)                // M-Field
-  ALIASABLE(LV, MQ)
-
-ALIASABLE(*LV, MQ)                 // M-Deref-Unique
-  ALIASABLE(LV, MQ)
-```
-
-### Checking mutability of immutable pointer types
-
-Immutable pointer types like `&T` are aliasable, and hence can only be
-borrowed immutably:
-
-```text
-ALIASABLE(*LV, imm)                // M-Deref-Borrowed-Imm
-  TYPE(LV) = &Ty
-```
-
-### Checking mutability of mutable pointer types
-
-`&mut T` can be frozen, so it is acceptable to borrow it as either imm or mut:
-
-```text
-ALIASABLE(*LV, MQ)                 // M-Deref-Borrowed-Mut
-  TYPE(LV) = &mut Ty
-```
-
-## Checking lifetime
-
-These rules aim to ensure that no data is borrowed for a scope that exceeds
-its lifetime. These two computations wind up being intimately related.
-Formally, we define a predicate `LIFETIME(LV, LT, MQ)`, which states that
-"the lvalue `LV` can be safely borrowed for the lifetime `LT` with mutability
-`MQ`". The Rust code corresponding to this predicate is the module
-`middle::borrowck::gather_loans::lifetime`.
-
-### The Scope function
-
-Several of the rules refer to a helper function `SCOPE(LV)=LT`.  The
-`SCOPE(LV)` yields the lifetime `LT` for which the lvalue `LV` is
-guaranteed to exist, presuming that no mutations occur.
-
-The scope of a local variable is the block where it is declared:
-
-```text
-  SCOPE(X) = block where X is declared
-```
-
-The scope of a field is the scope of the struct:
-
-```text
-  SCOPE(LV.f) = SCOPE(LV)
-```
-
-The scope of a unique referent is the scope of the pointer, since
-(barring mutation or moves) the pointer will not be freed until
-the pointer itself `LV` goes out of scope:
-
-```text
-  SCOPE(*LV) = SCOPE(LV) if LV has type Box<T>
-```
-
-The scope of a borrowed referent is the scope associated with the
-pointer.  This is a conservative approximation, since the data that
-the pointer points at may actually live longer:
-
-```text
-  SCOPE(*LV) = LT if LV has type &'LT T or &'LT mut T
-```
-
-### Checking lifetime of variables
-
-The rule for variables states that a variable can only be borrowed a
-lifetime `LT` that is a subregion of the variable's scope:
-
-```text
-LIFETIME(X, LT, MQ)                 // L-Local
-  LT <= SCOPE(X)
-```
-
-### Checking lifetime for owned content
-
-The lifetime of a field or owned pointer is the same as the lifetime
-of its owner:
-
-```text
-LIFETIME(LV.f, LT, MQ)              // L-Field
-  LIFETIME(LV, LT, MQ)
-
-LIFETIME(*LV, LT, MQ)               // L-Deref-Send
-  TYPE(LV) = Box<Ty>
-  LIFETIME(LV, LT, MQ)
-```
-
-### Checking lifetime for derefs of references
-
-References have a lifetime `LT'` associated with them.  The
-data they point at has been guaranteed to be valid for at least this
-lifetime. Therefore, the borrow is valid so long as the lifetime `LT`
-of the borrow is shorter than the lifetime `LT'` of the pointer
-itself:
-
-```text
-LIFETIME(*LV, LT, MQ)               // L-Deref-Borrowed
-  TYPE(LV) = &LT' Ty OR &LT' mut Ty
-  LT <= LT'
-```
-
-## Computing the restrictions
-
-The final rules govern the computation of *restrictions*, meaning that
-we compute the set of actions that will be illegal for the life of the
-loan. The predicate is written `RESTRICTIONS(LV, LT, ACTIONS) =
-RESTRICTION*`, which can be read "in order to prevent `ACTIONS` from
-occurring on `LV`, the restrictions `RESTRICTION*` must be respected
-for the lifetime of the loan".
-
-Note that there is an initial set of restrictions: these restrictions
-are computed based on the kind of borrow:
-
-```text
-&mut LV =>   RESTRICTIONS(LV, LT, MUTATE|CLAIM|FREEZE)
-&LV =>       RESTRICTIONS(LV, LT, MUTATE|CLAIM)
-&const LV => RESTRICTIONS(LV, LT, [])
-```
-
-The reasoning here is that a mutable borrow must be the only writer,
-therefore it prevents other writes (`MUTATE`), mutable borrows
-(`CLAIM`), and immutable borrows (`FREEZE`). An immutable borrow
-permits other immutable borrows but forbids writes and mutable borrows.
-Finally, a const borrow just wants to be sure that the value is not
-moved out from under it, so no actions are forbidden.
-
-### Restrictions for loans of a local variable
-
-The simplest case is a borrow of a local variable `X`:
-
-```text
-RESTRICTIONS(X, LT, ACTIONS) = (X, ACTIONS)            // R-Variable
-```
-
-In such cases we just record the actions that are not permitted.
-
-### Restrictions for loans of fields
-
-Restricting a field is the same as restricting the owner of that
-field:
-
-```text
-RESTRICTIONS(LV.f, LT, ACTIONS) = RS, (LV.f, ACTIONS)  // R-Field
-  RESTRICTIONS(LV, LT, ACTIONS) = RS
-```
-
-The reasoning here is as follows. If the field must not be mutated,
-then you must not mutate the owner of the field either, since that
-would indirectly modify the field. Similarly, if the field cannot be
-frozen or aliased, we cannot allow the owner to be frozen or aliased,
-since doing so indirectly freezes/aliases the field. This is the
-origin of inherited mutability.
-
-### Restrictions for loans of owned referents
-
-Because the mutability of owned referents is inherited, restricting an
-owned referent is similar to restricting a field, in that it implies
-restrictions on the pointer. However, owned pointers have an important
-twist: if the owner `LV` is mutated, that causes the owned referent
-`*LV` to be freed! So whenever an owned referent `*LV` is borrowed, we
-must prevent the owned pointer `LV` from being mutated, which means
-that we always add `MUTATE` and `CLAIM` to the restriction set imposed
-on `LV`:
-
-```text
-RESTRICTIONS(*LV, LT, ACTIONS) = RS, (*LV, ACTIONS)    // R-Deref-Send-Pointer
-  TYPE(LV) = Box<Ty>
-  RESTRICTIONS(LV, LT, ACTIONS|MUTATE|CLAIM) = RS
-```
-
-### Restrictions for loans of immutable borrowed referents
-
-Immutable borrowed referents are freely aliasable, meaning that
-the compiler does not prevent you from copying the pointer.  This
-implies that issuing restrictions is useless. We might prevent the
-user from acting on `*LV` itself, but there could be another path
-`*LV1` that refers to the exact same memory, and we would not be
-restricting that path. Therefore, the rule for `&Ty` pointers
-always returns an empty set of restrictions, and it only permits
-restricting `MUTATE` and `CLAIM` actions:
-
-```text
-RESTRICTIONS(*LV, LT, ACTIONS) = []                    // R-Deref-Imm-Borrowed
-  TYPE(LV) = &LT' Ty
-  LT <= LT'                                            // (1)
-  ACTIONS subset of [MUTATE, CLAIM]
-```
-
-The reason that we can restrict `MUTATE` and `CLAIM` actions even
-without a restrictions list is that it is never legal to mutate nor to
-borrow mutably the contents of a `&Ty` pointer. In other words,
-those restrictions are already inherent in the type.
-
-Clause (1) in the rule for `&Ty` deserves mention. Here I
-specify that the lifetime of the loan must be less than the lifetime
-of the `&Ty` pointer. In simple cases, this clause is redundant, since
-the `LIFETIME()` function will already enforce the required rule:
-
-```
-fn foo(point: &'a Point) -> &'static f32 {
-    &point.x // Error
-}
-```
-
-The above example fails to compile both because of clause (1) above
-but also by the basic `LIFETIME()` check. However, in more advanced
-examples involving multiple nested pointers, clause (1) is needed:
-
-```
-fn foo(point: &'a &'b mut Point) -> &'b f32 {
-    &point.x // Error
-}
-```
-
-The `LIFETIME` rule here would accept `'b` because, in fact, the
-*memory is* guaranteed to remain valid (i.e., not be freed) for the
-lifetime `'b`, since the `&mut` pointer is valid for `'b`. However, we
-are returning an immutable reference, so we need the memory to be both
-valid and immutable. Even though `point.x` is referenced by an `&mut`
-pointer, it can still be considered immutable so long as that `&mut`
-pointer is found in an aliased location. That means the memory is
-guaranteed to be *immutable* for the lifetime of the `&` pointer,
-which is only `'a`, not `'b`. Hence this example yields an error.
-
-As a final twist, consider the case of two nested *immutable*
-pointers, rather than a mutable pointer within an immutable one:
-
-```
-fn foo(point: &'a &'b Point) -> &'b f32 {
-    &point.x // OK
-}
-```
-
-This function is legal. The reason for this is that the inner pointer
-(`*point : &'b Point`) is enough to guarantee the memory is immutable
-and valid for the lifetime `'b`.  This is reflected in
-`RESTRICTIONS()` by the fact that we do not recurse (i.e., we impose
-no restrictions on `LV`, which in this particular case is the pointer
-`point : &'a &'b Point`).
-
-#### Why both `LIFETIME()` and `RESTRICTIONS()`?
-
-Given the previous text, it might seem that `LIFETIME` and
-`RESTRICTIONS` should be folded together into one check, but there is
-a reason that they are separated. They answer separate concerns.
-The rules pertaining to `LIFETIME` exist to ensure that we don't
-create a borrowed pointer that outlives the memory it points at. So
-`LIFETIME` prevents a function like this:
-
-```
-fn get_1<'a>() -> &'a int {
-    let x = 1;
-    &x
-}
-```
-
-Here we would be returning a pointer into the stack. Clearly bad.
-
-However, the `RESTRICTIONS` rules are more concerned with how memory
-is used. The example above doesn't generate an error according to
-`RESTRICTIONS` because, for local variables, we don't require that the
-loan lifetime be a subset of the local variable lifetime. The idea
-here is that we *can* guarantee that `x` is not (e.g.) mutated for the
-lifetime `'a`, even though `'a` exceeds the function body and thus
-involves unknown code in the caller -- after all, `x` ceases to exist
-after we return and hence the remaining code in `'a` cannot possibly
-mutate it. This distinction is important for type checking functions
-like this one:
-
-```
-fn inc_and_get<'a>(p: &'a mut Point) -> &'a int {
-    p.x += 1;
-    &p.x
-}
-```
-
-In this case, we take in a `&mut` and return a frozen borrowed pointer
-with the same lifetime. So long as the lifetime of the returned value
-doesn't exceed the lifetime of the `&mut` we receive as input, this is
-fine, though it may seem surprising at first (it surprised me when I
-first worked it through). After all, we're guaranteeing that `*p`
-won't be mutated for the lifetime `'a`, even though we can't "see" the
-entirety of the code during that lifetime, since some of it occurs in
-our caller. But we *do* know that nobody can mutate `*p` except
-through `p`. So if we don't mutate `*p` and we don't return `p`, then
-we know that the right to mutate `*p` has been lost to our caller --
-in terms of capability, the caller passed in the ability to mutate
-`*p`, and we never gave it back. (Note that we can't return `p` while
-`*p` is borrowed since that would be a move of `p`, as `&mut` pointers
-are affine.)
-
-### Restrictions for loans of const aliasable referents
-
-Freeze pointers are read-only. There may be `&mut` or `&` aliases, and
-we can not prevent *anything* but moves in that case. So the
-`RESTRICTIONS` function is only defined if `ACTIONS` is the empty set.
-Because moves from a `&const` lvalue are never legal, it is not
-necessary to add any restrictions at all to the final result.
-
-```text
-    RESTRICTIONS(*LV, LT, []) = []                         // R-Deref-Freeze-Borrowed
-      TYPE(LV) = &const Ty
-```
-
-### Restrictions for loans of mutable borrowed referents
-
-Mutable borrowed pointers are guaranteed to be the only way to mutate
-their referent. This permits us to take greater license with them; for
-example, the referent can be frozen simply be ensuring that we do not
-use the original pointer to perform mutate. Similarly, we can allow
-the referent to be claimed, so long as the original pointer is unused
-while the new claimant is live.
-
-The rule for mutable borrowed pointers is as follows:
-
-```text
-RESTRICTIONS(*LV, LT, ACTIONS) = RS, (*LV, ACTIONS)    // R-Deref-Mut-Borrowed
-  TYPE(LV) = &LT' mut Ty
-  LT <= LT'                                            // (1)
-  RESTRICTIONS(LV, LT, ACTIONS) = RS                   // (2)
-```
-
-Let's examine the two numbered clauses:
-
-Clause (1) specifies that the lifetime of the loan (`LT`) cannot
-exceed the lifetime of the `&mut` pointer (`LT'`). The reason for this
-is that the `&mut` pointer is guaranteed to be the only legal way to
-mutate its referent -- but only for the lifetime `LT'`.  After that
-lifetime, the loan on the referent expires and hence the data may be
-modified by its owner again. This implies that we are only able to
-guarantee that the referent will not be modified or aliased for a
-maximum of `LT'`.
-
-Here is a concrete example of a bug this rule prevents:
-
-```
-// Test region-reborrow-from-shorter-mut-ref.rs:
-fn copy_pointer<'a,'b,T>(x: &'a mut &'b mut T) -> &'b mut T {
-    &mut **p // ERROR due to clause (1)
-}
-fn main() {
-    let mut x = 1;
-    let mut y = &mut x; // <-'b-----------------------------+
-    //      +-'a--------------------+                       |
-    //      v                       v                       |
-    let z = copy_borrowed_ptr(&mut y); // y is lent         |
-    *y += 1; // Here y==z, so both should not be usable...  |
-    *z += 1; // ...and yet they would be, but for clause 1. |
-} // <------------------------------------------------------+
-```
-
-Clause (2) propagates the restrictions on the referent to the pointer
-itself. This is the same as with an owned pointer, though the
-reasoning is mildly different. The basic goal in all cases is to
-prevent the user from establishing another route to the same data. To
-see what I mean, let's examine various cases of what can go wrong and
-show how it is prevented.
-
-**Example danger 1: Moving the base pointer.** One of the simplest
-ways to violate the rules is to move the base pointer to a new name
-and access it via that new name, thus bypassing the restrictions on
-the old name. Here is an example:
-
-```
-// src/test/compile-fail/borrowck-move-mut-base-ptr.rs
-fn foo(t0: &mut int) {
-    let p: &int = &*t0; // Freezes `*t0`
-    let t1 = t0;        //~ ERROR cannot move out of `t0`
-    *t1 = 22;           // OK, not a write through `*t0`
-}
-```
-
-Remember that `&mut` pointers are linear, and hence `let t1 = t0` is a
-move of `t0` -- or would be, if it were legal. Instead, we get an
-error, because clause (2) imposes restrictions on `LV` (`t0`, here),
-and any restrictions on a path make it impossible to move from that
-path.
-
-**Example danger 2: Claiming the base pointer.** Another possible
-danger is to mutably borrow the base path. This can lead to two bad
-scenarios. The most obvious is that the mutable borrow itself becomes
-another path to access the same data, as shown here:
-
-```
-// src/test/compile-fail/borrowck-mut-borrow-of-mut-base-ptr.rs
-fn foo<'a>(mut t0: &'a mut int,
-           mut t1: &'a mut int) {
-    let p: &int = &*t0;     // Freezes `*t0`
-    let mut t2 = &mut t0;   //~ ERROR cannot borrow `t0`
-    **t2 += 1;              // Mutates `*t0`
-}
-```
-
-In this example, `**t2` is the same memory as `*t0`. Because `t2` is
-an `&mut` pointer, `**t2` is a unique path and hence it would be
-possible to mutate `**t2` even though that memory was supposed to be
-frozen by the creation of `p`. However, an error is reported -- the
-reason is that the freeze `&*t0` will restrict claims and mutation
-against `*t0` which, by clause 2, in turn prevents claims and mutation
-of `t0`. Hence the claim `&mut t0` is illegal.
-
-Another danger with an `&mut` pointer is that we could swap the `t0`
-value away to create a new path:
-
-```
-// src/test/compile-fail/borrowck-swap-mut-base-ptr.rs
-fn foo<'a>(mut t0: &'a mut int,
-           mut t1: &'a mut int) {
-    let p: &int = &*t0;     // Freezes `*t0`
-    swap(&mut t0, &mut t1); //~ ERROR cannot borrow `t0`
-    *t1 = 22;
-}
-```
-
-This is illegal for the same reason as above. Note that if we added
-back a swap operator -- as we used to have -- we would want to be very
-careful to ensure this example is still illegal.
-
-**Example danger 3: Freeze the base pointer.** In the case where the
-referent is claimed, even freezing the base pointer can be dangerous,
-as shown in the following example:
-
-```
-// src/test/compile-fail/borrowck-borrow-of-mut-base-ptr.rs
-fn foo<'a>(mut t0: &'a mut int,
-           mut t1: &'a mut int) {
-    let p: &mut int = &mut *t0; // Claims `*t0`
-    let mut t2 = &t0;           //~ ERROR cannot borrow `t0`
-    let q: &int = &*t2;         // Freezes `*t0` but not through `*p`
-    *p += 1;                    // violates type of `*q`
-}
-```
-
-Here the problem is that `*t0` is claimed by `p`, and hence `p` wants
-to be the controlling pointer through which mutation or freezes occur.
-But `t2` would -- if it were legal -- have the type `& &mut int`, and
-hence would be a mutable pointer in an aliasable location, which is
-considered frozen (since no one can write to `**t2` as it is not a
-unique path). Therefore, we could reasonably create a frozen `&int`
-pointer pointing at `*t0` that coexists with the mutable pointer `p`,
-which is clearly unsound.
-
-However, it is not always unsafe to freeze the base pointer. In
-particular, if the referent is frozen, there is no harm in it:
-
-```
-// src/test/run-pass/borrowck-borrow-of-mut-base-ptr-safe.rs
-fn foo<'a>(mut t0: &'a mut int,
-           mut t1: &'a mut int) {
-    let p: &int = &*t0; // Freezes `*t0`
-    let mut t2 = &t0;
-    let q: &int = &*t2; // Freezes `*t0`, but that's ok...
-    let r: &int = &*t0; // ...after all, could do same thing directly.
-}
-```
-
-In this case, creating the alias `t2` of `t0` is safe because the only
-thing `t2` can be used for is to further freeze `*t0`, which is
-already frozen. In particular, we cannot assign to `*t0` through the
-new alias `t2`, as demonstrated in this test case:
-
-```
-// src/test/run-pass/borrowck-borrow-mut-base-ptr-in-aliasable-loc.rs
-fn foo(t0: & &mut int) {
-    let t1 = t0;
-    let p: &int = &**t0;
-    **t1 = 22; //~ ERROR cannot assign
-}
-```
-
-This distinction is reflected in the rules. When doing an `&mut`
-borrow -- as in the first example -- the set `ACTIONS` will be
-`CLAIM|MUTATE|FREEZE`, because claiming the referent implies that it
-cannot be claimed, mutated, or frozen by anyone else. These
-restrictions are propagated back to the base path and hence the base
-path is considered unfreezable.
-
-In contrast, when the referent is merely frozen -- as in the second
-example -- the set `ACTIONS` will be `CLAIM|MUTATE`, because freezing
-the referent implies that it cannot be claimed or mutated but permits
-others to freeze. Hence when these restrictions are propagated back to
-the base path, it will still be considered freezable.
-
-
-
-**FIXME #10520: Restrictions against mutating the base pointer.** When
-an `&mut` pointer is frozen or claimed, we currently pass along the
-restriction against MUTATE to the base pointer. I do not believe this
-restriction is needed. It dates from the days when we had a way to
-mutate that preserved the value being mutated (i.e., swap). Nowadays
-the only form of mutation is assignment, which destroys the pointer
-being mutated -- therefore, a mutation cannot create a new path to the
-same data. Rather, it removes an existing path. This implies that not
-only can we permit mutation, we can have mutation kill restrictions in
-the dataflow sense.
-
-**WARNING:** We do not currently have `const` borrows in the
-language. If they are added back in, we must ensure that they are
-consistent with all of these examples. The crucial question will be
-what sorts of actions are permitted with a `&const &mut` pointer. I
-would suggest that an `&mut` referent found in an `&const` location be
-prohibited from both freezes and claims. This would avoid the need to
-prevent `const` borrows of the base pointer when the referent is
-borrowed.
-
-# Moves and initialization
-
-The borrow checker is also in charge of ensuring that:
-
-- all memory which is accessed is initialized
-- immutable local variables are assigned at most once.
-
-These are two separate dataflow analyses built on the same
-framework. Let's look at checking that memory is initialized first;
-the checking of immutable local variable assignments works in a very
-similar way.
-
-To track the initialization of memory, we actually track all the
-points in the program that *create uninitialized memory*, meaning
-moves and the declaration of uninitialized variables. For each of
-these points, we create a bit in the dataflow set. Assignments to a
-variable `x` or path `a.b.c` kill the move/uninitialization bits for
-those paths and any subpaths (e.g., `x`, `x.y`, `a.b.c`, `*a.b.c`).
-Bits are unioned when two control-flow paths join. Thus, the
-presence of a bit indicates that the move may have occurred without an
-intervening assignment to the same memory. At each use of a variable,
-we examine the bits in scope, and check that none of them are
-moves/uninitializations of the variable that is being used.
-
-Let's look at a simple example:
-
-```
-fn foo(a: Box<int>) {
-    let b: Box<int>;   // Gen bit 0.
-
-    if cond {          // Bits: 0
-        use(&*a);
-        b = a;         // Gen bit 1, kill bit 0.
-        use(&*b);
-    } else {
-                       // Bits: 0
-    }
-                       // Bits: 0,1
-    use(&*a);          // Error.
-    use(&*b);          // Error.
-}
-
-fn use(a: &int) { }
-```
-
-In this example, the variable `b` is created uninitialized. In one
-branch of an `if`, we then move the variable `a` into `b`. Once we
-exit the `if`, therefore, it is an error to use `a` or `b` since both
-are only conditionally initialized. I have annotated the dataflow
-state using comments. There are two dataflow bits, with bit 0
-corresponding to the creation of `b` without an initializer, and bit 1
-corresponding to the move of `a`. The assignment `b = a` both
-generates bit 1, because it is a move of `a`, and kills bit 0, because
-`b` is now initialized. On the else branch, though, `b` is never
-initialized, and so bit 0 remains untouched. When the two flows of
-control join, we union the bits from both sides, resulting in both
-bits 0 and 1 being set. Thus any attempt to use `a` uncovers the bit 1
-from the "then" branch, showing that `a` may be moved, and any attempt
-to use `b` uncovers bit 0, from the "else" branch, showing that `b`
-may not be initialized.
-
-## Initialization of immutable variables
-
-Initialization of immutable variables works in a very similar way,
-except that:
-
-1. we generate bits for each assignment to a variable;
-2. the bits are never killed except when the variable goes out of scope.
-
-Thus the presence of an assignment bit indicates that the assignment
-may have occurred. Note that assignments are only killed when the
-variable goes out of scope, as it is not relevant whether or not there
-has been a move in the meantime. Using these bits, we can declare that
-an assignment to an immutable variable is legal iff there is no other
-assignment bit to that same variable in scope.
-
-## Why is the design made this way?
-
-It may seem surprising that we assign dataflow bits to *each move*
-rather than *each path being moved*. This is somewhat less efficient,
-since on each use, we must iterate through all moves and check whether
-any of them correspond to the path in question. Similar concerns apply
-to the analysis for double assignments to immutable variables. The
-main reason to do it this way is that it allows us to print better
-error messages, because when a use occurs, we can print out the
-precise move that may be in scope, rather than simply having to say
-"the variable may not be initialized".
-
-## Data structures used in the move analysis
-
-The move analysis maintains several data structures that enable it to
-cross-reference moves and assignments to determine when they may be
-moving/assigning the same memory. These are all collected into the
-`MoveData` and `FlowedMoveData` structs. The former represents the set
-of move paths, moves, and assignments, and the latter adds in the
-results of a dataflow computation.
-
-### Move paths
-
-The `MovePath` tree tracks every path that is moved or assigned to.
-These paths have the same form as the `LoanPath` data structure, which
-in turn is the "real world version of the lvalues `LV` that we
-introduced earlier. The difference between a `MovePath` and a `LoanPath`
-is that move paths are:
-
-1. Canonicalized, so that we have exactly one copy of each, and
-   we can refer to move paths by index;
-2. Cross-referenced with other paths into a tree, so that given a move
-   path we can efficiently find all parent move paths and all
-   extensions (e.g., given the `a.b` move path, we can easily find the
-   move path `a` and also the move paths `a.b.c`)
-3. Cross-referenced with moves and assignments, so that we can
-   easily find all moves and assignments to a given path.
-
-The mechanism that we use is to create a `MovePath` record for each
-move path. These are arranged in an array and are referenced using
-`MovePathIndex` values, which are newtype'd indices. The `MovePath`
-structs are arranged into a tree, representing using the standard
-Knuth representation where each node has a child 'pointer' and a "next
-sibling" 'pointer'. In addition, each `MovePath` has a parent
-'pointer'.  In this case, the 'pointers' are just `MovePathIndex`
-values.
-
-In this way, if we want to find all base paths of a given move path,
-we can just iterate up the parent pointers (see `each_base_path()` in
-the `move_data` module). If we want to find all extensions, we can
-iterate through the subtree (see `each_extending_path()`).
-
-### Moves and assignments
-
-There are structs to represent moves (`Move`) and assignments
-(`Assignment`), and these are also placed into arrays and referenced
-by index. All moves of a particular path are arranged into a linked
-lists, beginning with `MovePath.first_move` and continuing through
-`Move.next_move`.
-
-We distinguish between "var" assignments, which are assignments to a
-variable like `x = foo`, and "path" assignments (`x.f = foo`).  This
-is because we need to assign dataflows to the former, but not the
-latter, so as to check for double initialization of immutable
-variables.
-
-### Gathering and checking moves
-
-Like loans, we distinguish two phases. The first, gathering, is where
-we uncover all the moves and assignments. As with loans, we do some
-basic sanity checking in this phase, so we'll report errors if you
-attempt to move out of a borrowed pointer etc. Then we do the dataflow
-(see `FlowedMoveData::new`). Finally, in the `check_loans.rs` code, we
-walk back over, identify all uses, assignments, and captures, and
-check that they are legal given the set of dataflow bits we have
-computed for that program point.
-
-# Drop flags and structural fragments
-
-In addition to the job of enforcing memory safety, the borrow checker
-code is also responsible for identifying the *structural fragments* of
-data in the function, to support out-of-band dynamic drop flags
-allocated on the stack. (For background, see [RFC PR #320].)
-
-[RFC PR #320]: https://github.com/rust-lang/rfcs/pull/320
-
-Semantically, each piece of data that has a destructor may need a
-boolean flag to indicate whether or not its destructor has been run
-yet. However, in many cases there is no need to actually maintain such
-a flag: It can be apparent from the code itself that a given path is
-always initialized (or always deinitialized) when control reaches the
-end of its owner's scope, and thus we can unconditionally emit (or
-not) the destructor invocation for that path.
-
-A simple example of this is the following:
-
-```rust
-struct D { p: int }
-impl D { fn new(x: int) -> D { ... }
-impl Drop for D { ... }
-
-fn foo(a: D, b: D, t: || -> bool) {
-    let c: D;
-    let d: D;
-    if t() { c = b; }
-}
-```
-
-At the end of the body of `foo`, the compiler knows that `a` is
-initialized, introducing a drop obligation (deallocating the boxed
-integer) for the end of `a`'s scope that is run unconditionally.
-Likewise the compiler knows that `d` is not initialized, and thus it
-leave out the drop code for `d`.
-
-The compiler cannot statically know the drop-state of `b` nor `c` at
-the end of their scope, since that depends on the value of
-`t`. Therefore, we need to insert boolean flags to track whether we
-need to drop `b` and `c`.
-
-However, the matter is not as simple as just mapping local variables
-to their corresponding drop flags when necessary. In particular, in
-addition to being able to move data out of local variables, Rust
-allows one to move values in and out of structured data.
-
-Consider the following:
-
-```rust
-struct S { x: D, y: D, z: D }
-
-fn foo(a: S, mut b: S, t: || -> bool) {
-    let mut c: S;
-    let d: S;
-    let e: S = a.clone();
-    if t() {
-        c = b;
-        b.x = e.y;
-    }
-    if t() { c.y = D::new(4); }
-}
-```
-
-As before, the drop obligations of `a` and `d` can be statically
-determined, and again the state of `b` and `c` depend on dynamic
-state. But additionally, the dynamic drop obligations introduced by
-`b` and `c` are not just per-local boolean flags. For example, if the
-first call to `t` returns `false` and the second call `true`, then at
-the end of their scope, `b` will be completely initialized, but only
-`c.y` in `c` will be initialized.  If both calls to `t` return `true`,
-then at the end of their scope, `c` will be completely initialized,
-but only `b.x` will be initialized in `b`, and only `e.x` and `e.z`
-will be initialized in `e`.
-
-Note that we need to cover the `z` field in each case in some way,
-since it may (or may not) need to be dropped, even though `z` is never
-directly mentioned in the body of the `foo` function. We call a path
-like `b.z` a *fragment sibling* of `b.x`, since the field `z` comes
-from the same structure `S` that declared the field `x` in `b.x`.
-
-In general we need to maintain boolean flags that match the
-`S`-structure of both `b` and `c`.  In addition, we need to consult
-such a flag when doing an assignment (such as `c.y = D::new(4);`
-above), in order to know whether or not there is a previous value that
-needs to be dropped before we do the assignment.
-
-So for any given function, we need to determine what flags are needed
-to track its drop obligations. Our strategy for determining the set of
-flags is to represent the fragmentation of the structure explicitly:
-by starting initially from the paths that are explicitly mentioned in
-moves and assignments (such as `b.x` and `c.y` above), and then
-traversing the structure of the path's type to identify leftover
-*unmoved fragments*: assigning into `c.y` means that `c.x` and `c.z`
-are leftover unmoved fragments. Each fragment represents a drop
-obligation that may need to be tracked. Paths that are only moved or
-assigned in their entirety (like `a` and `d`) are treated as a single
-drop obligation.
-
-The fragment construction process works by piggy-backing on the
-existing `move_data` module. We already have callbacks that visit each
-direct move and assignment; these form the basis for the sets of
-moved_leaf_paths and assigned_leaf_paths. From these leaves, we can
-walk up their parent chain to identify all of their parent paths.
-We need to identify the parents because of cases like the following:
-
-```rust
-struct Pair<X,Y>{ x: X, y: Y }
-fn foo(dd_d_d: Pair<Pair<Pair<D, D>, D>, D>) {
-    other_function(dd_d_d.x.y);
-}
-```
-
-In this code, the move of the path `dd_d.x.y` leaves behind not only
-the fragment drop-obligation `dd_d.x.x` but also `dd_d.y` as well.
-
-Once we have identified the directly-referenced leaves and their
-parents, we compute the left-over fragments, in the function
-`fragments::add_fragment_siblings`. As of this writing this works by
-looking at each directly-moved or assigned path P, and blindly
-gathering all sibling fields of P (as well as siblings for the parents
-of P, etc). After accumulating all such siblings, we filter out the
-entries added as siblings of P that turned out to be
-directly-referenced paths (or parents of directly referenced paths)
-themselves, thus leaving the never-referenced "left-overs" as the only
-thing left from the gathering step.
-
-## Array structural fragments
-
-A special case of the structural fragments discussed above are
-the elements of an array that has been passed by value, such as
-the following:
-
-```rust
-fn foo(a: [D, ..10], i: uint) -> D {
-    a[i]
-}
-```
-
-The above code moves a single element out of the input array `a`.
-The remainder of the array still needs to be dropped; i.e., it
-is a structural fragment. Note that after performing such a move,
-it is not legal to read from the array `a`. There are a number of
-ways to deal with this, but the important thing to note is that
-the semantics needs to distinguish in some manner between a
-fragment that is the *entire* array versus a fragment that represents
-all-but-one element of the array.  A place where that distinction
-would arise is the following:
-
-```rust
-fn foo(a: [D, ..10], b: [D, ..10], i: uint, t: bool) -> D {
-    if t {
-        a[i]
-    } else {
-        b[i]
-    }
-
-    // When control exits, we will need either to drop all of `a`
-    // and all-but-one of `b`, or to drop all of `b` and all-but-one
-    // of `a`.
-}
-```
-
-There are a number of ways that the trans backend could choose to
-compile this (e.g. a `[bool, ..10]` array for each such moved array;
-or an `Option<uint>` for each moved array).  From the viewpoint of the
-borrow-checker, the important thing is to record what kind of fragment
-is implied by the relevant moves.
-
-# Future work
-
-While writing up these docs, I encountered some rules I believe to be
-stricter than necessary:
-
-- I think restricting the `&mut` LV against moves and `ALIAS` is sufficient,
-  `MUTATE` and `CLAIM` are overkill. `MUTATE` was necessary when swap was
-  a built-in operator, but as it is not, it is implied by `CLAIM`,
-  and `CLAIM` is implied by `ALIAS`. The only net effect of this is an
-  extra error message in some cases, though.
-- I have not described how closures interact. Current code is unsound.
-  I am working on describing and implementing the fix.
-- If we wish, we can easily extend the move checking to allow finer-grained
-  tracking of what is initialized and what is not, enabling code like
-  this:
-
-      a = x.f.g; // x.f.g is now uninitialized
-      // here, x and x.f are not usable, but x.f.h *is*
-      x.f.g = b; // x.f.g is not initialized
-      // now x, x.f, x.f.g, x.f.h are all usable
-
-  What needs to change here, most likely, is that the `moves` module
-  should record not only what paths are moved, but what expressions
-  are actual *uses*. For example, the reference to `x` in `x.f.g = b`
-  is not a true *use* in the sense that it requires `x` to be fully
-  initialized. This is in fact why the above code produces an error
-  today: the reference to `x` in `x.f.g = b` is considered illegal
-  because `x` is not fully initialized.
-
-There are also some possible refactorings:
-
-- It might be nice to replace all loan paths with the MovePath mechanism,
-  since they allow lightweight comparison using an integer.
-
-*/
+//! # The Borrow Checker
+//!
+//! This pass has the job of enforcing memory safety. This is a subtle
+//! topic. This docs aim to explain both the practice and the theory
+//! behind the borrow checker. They start with a high-level overview of
+//! how it works, and then proceed to dive into the theoretical
+//! background. Finally, they go into detail on some of the more subtle
+//! aspects.
+//!
+//! # Table of contents
+//!
+//! These docs are long. Search for the section you are interested in.
+//!
+//! - Overview
+//! - Formal model
+//! - Borrowing and loans
+//! - Moves and initialization
+//! - Drop flags and structural fragments
+//! - Future work
+//!
+//! # Overview
+//!
+//! The borrow checker checks one function at a time. It operates in two
+//! passes. The first pass, called `gather_loans`, walks over the function
+//! and identifies all of the places where borrows (e.g., `&` expressions
+//! and `ref` bindings) and moves (copies or captures of a linear value)
+//! occur. It also tracks initialization sites. For each borrow and move,
+//! it checks various basic safety conditions at this time (for example,
+//! that the lifetime of the borrow doesn't exceed the lifetime of the
+//! value being borrowed, or that there is no move out of an `&T`
+//! referent).
+//!
+//! It then uses the dataflow module to propagate which of those borrows
+//! may be in scope at each point in the procedure. A loan is considered
+//! to come into scope at the expression that caused it and to go out of
+//! scope when the lifetime of the resulting reference expires.
+//!
+//! Once the in-scope loans are known for each point in the program, the
+//! borrow checker walks the IR again in a second pass called
+//! `check_loans`. This pass examines each statement and makes sure that
+//! it is safe with respect to the in-scope loans.
+//!
+//! # Formal model
+//!
+//! Throughout the docs we'll consider a simple subset of Rust in which
+//! you can only borrow from lvalues, defined like so:
+//!
+//! ```text
+//! LV = x | LV.f | *LV
+//! ```
+//!
+//! Here `x` represents some variable, `LV.f` is a field reference,
+//! and `*LV` is a pointer dereference. There is no auto-deref or other
+//! niceties. This means that if you have a type like:
+//!
+//! ```text
+//! struct S { f: uint }
+//! ```
+//!
+//! and a variable `a: Box<S>`, then the rust expression `a.f` would correspond
+//! to an `LV` of `(*a).f`.
+//!
+//! Here is the formal grammar for the types we'll consider:
+//!
+//! ```text
+//! TY = () | S<'LT...> | Box<TY> | & 'LT MQ TY
+//! MQ = mut | imm | const
+//! ```
+//!
+//! Most of these types should be pretty self explanatory. Here `S` is a
+//! struct name and we assume structs are declared like so:
+//!
+//! ```text
+//! SD = struct S<'LT...> { (f: TY)... }
+//! ```
+//!
+//! # Borrowing and loans
+//!
+//! ## An intuitive explanation
+//!
+//! ### Issuing loans
+//!
+//! Now, imagine we had a program like this:
+//!
+//! ```text
+//! struct Foo { f: uint, g: uint }
+//! ...
+//! 'a: {
+//!   let mut x: Box<Foo> = ...;
+//!   let y = &mut (*x).f;
+//!   x = ...;
+//! }
+//! ```
+//!
+//! This is of course dangerous because mutating `x` will free the old
+//! value and hence invalidate `y`. The borrow checker aims to prevent
+//! this sort of thing.
+//!
+//! #### Loans and restrictions
+//!
+//! The way the borrow checker works is that it analyzes each borrow
+//! expression (in our simple model, that's stuff like `&LV`, though in
+//! real life there are a few other cases to consider). For each borrow
+//! expression, it computes a `Loan`, which is a data structure that
+//! records (1) the value being borrowed, (2) the mutability and scope of
+//! the borrow, and (3) a set of restrictions. In the code, `Loan` is a
+//! struct defined in `middle::borrowck`. Formally, we define `LOAN` as
+//! follows:
+//!
+//! ```text
+//! LOAN = (LV, LT, MQ, RESTRICTION*)
+//! RESTRICTION = (LV, ACTION*)
+//! ACTION = MUTATE | CLAIM | FREEZE
+//! ```
+//!
+//! Here the `LOAN` tuple defines the lvalue `LV` being borrowed; the
+//! lifetime `LT` of that borrow; the mutability `MQ` of the borrow; and a
+//! list of restrictions. The restrictions indicate actions which, if
+//! taken, could invalidate the loan and lead to type safety violations.
+//!
+//! Each `RESTRICTION` is a pair of a restrictive lvalue `LV` (which will
+//! either be the path that was borrowed or some prefix of the path that
+//! was borrowed) and a set of restricted actions.  There are three kinds
+//! of actions that may be restricted for the path `LV`:
+//!
+//! - `MUTATE` means that `LV` cannot be assigned to;
+//! - `CLAIM` means that the `LV` cannot be borrowed mutably;
+//! - `FREEZE` means that the `LV` cannot be borrowed immutably;
+//!
+//! Finally, it is never possible to move from an lvalue that appears in a
+//! restriction. This implies that the "empty restriction" `(LV, [])`,
+//! which contains an empty set of actions, still has a purpose---it
+//! prevents moves from `LV`. I chose not to make `MOVE` a fourth kind of
+//! action because that would imply that sometimes moves are permitted
+//! from restrictived values, which is not the case.
+//!
+//! #### Example
+//!
+//! To give you a better feeling for what kind of restrictions derived
+//! from a loan, let's look at the loan `L` that would be issued as a
+//! result of the borrow `&mut (*x).f` in the example above:
+//!
+//! ```text
+//! L = ((*x).f, 'a, mut, RS) where
+//!     RS = [((*x).f, [MUTATE, CLAIM, FREEZE]),
+//!           (*x, [MUTATE, CLAIM, FREEZE]),
+//!           (x, [MUTATE, CLAIM, FREEZE])]
+//! ```
+//!
+//! The loan states that the expression `(*x).f` has been loaned as
+//! mutable for the lifetime `'a`. Because the loan is mutable, that means
+//! that the value `(*x).f` may be mutated via the newly created reference
+//! (and *only* via that pointer). This is reflected in the
+//! restrictions `RS` that accompany the loan.
+//!
+//! The first restriction `((*x).f, [MUTATE, CLAIM, FREEZE])` states that
+//! the lender may not mutate, freeze, nor alias `(*x).f`. Mutation is
+//! illegal because `(*x).f` is only supposed to be mutated via the new
+//! reference, not by mutating the original path `(*x).f`. Freezing is
+//! illegal because the path now has an `&mut` alias; so even if we the
+//! lender were to consider `(*x).f` to be immutable, it might be mutated
+//! via this alias. They will be enforced for the lifetime `'a` of the
+//! loan. After the loan expires, the restrictions no longer apply.
+//!
+//! The second restriction on `*x` is interesting because it does not
+//! apply to the path that was lent (`(*x).f`) but rather to a prefix of
+//! the borrowed path. This is due to the rules of inherited mutability:
+//! if the user were to assign to (or freeze) `*x`, they would indirectly
+//! overwrite (or freeze) `(*x).f`, and thus invalidate the reference
+//! that was created. In general it holds that when a path is
+//! lent, restrictions are issued for all the owning prefixes of that
+//! path. In this case, the path `*x` owns the path `(*x).f` and,
+//! because `x` is an owned pointer, the path `x` owns the path `*x`.
+//! Therefore, borrowing `(*x).f` yields restrictions on both
+//! `*x` and `x`.
+//!
+//! ### Checking for illegal assignments, moves, and reborrows
+//!
+//! Once we have computed the loans introduced by each borrow, the borrow
+//! checker uses a data flow propagation to compute the full set of loans
+//! in scope at each expression and then uses that set to decide whether
+//! that expression is legal.  Remember that the scope of loan is defined
+//! by its lifetime LT.  We sometimes say that a loan which is in-scope at
+//! a particular point is an "outstanding loan", and the set of
+//! restrictions included in those loans as the "outstanding
+//! restrictions".
+//!
+//! The kinds of expressions which in-scope loans can render illegal are:
+//! - *assignments* (`lv = v`): illegal if there is an in-scope restriction
+//!   against mutating `lv`;
+//! - *moves*: illegal if there is any in-scope restriction on `lv` at all;
+//! - *mutable borrows* (`&mut lv`): illegal there is an in-scope restriction
+//!   against claiming `lv`;
+//! - *immutable borrows* (`&lv`): illegal there is an in-scope restriction
+//!   against freezing `lv`.
+//!
+//! ## Formal rules
+//!
+//! Now that we hopefully have some kind of intuitive feeling for how the
+//! borrow checker works, let's look a bit more closely now at the precise
+//! conditions that it uses. For simplicity I will ignore const loans.
+//!
+//! I will present the rules in a modified form of standard inference
+//! rules, which looks as follows:
+//!
+//! ```text
+//! PREDICATE(X, Y, Z)                  // Rule-Name
+//!   Condition 1
+//!   Condition 2
+//!   Condition 3
+//! ```
+//!
+//! The initial line states the predicate that is to be satisfied.  The
+//! indented lines indicate the conditions that must be met for the
+//! predicate to be satisfied. The right-justified comment states the name
+//! of this rule: there are comments in the borrowck source referencing
+//! these names, so that you can cross reference to find the actual code
+//! that corresponds to the formal rule.
+//!
+//! ### Invariants
+//!
+//! I want to collect, at a high-level, the invariants the borrow checker
+//! maintains. I will give them names and refer to them throughout the
+//! text. Together these invariants are crucial for the overall soundness
+//! of the system.
+//!
+//! **Mutability requires uniqueness.** To mutate a path
+//!
+//! **Unique mutability.** There is only one *usable* mutable path to any
+//! given memory at any given time. This implies that when claiming memory
+//! with an expression like `p = &mut x`, the compiler must guarantee that
+//! the borrowed value `x` can no longer be mutated so long as `p` is
+//! live. (This is done via restrictions, read on.)
+//!
+//! **.**
+//!
+//!
+//! ### The `gather_loans` pass
+//!
+//! We start with the `gather_loans` pass, which walks the AST looking for
+//! borrows.  For each borrow, there are three bits of information: the
+//! lvalue `LV` being borrowed and the mutability `MQ` and lifetime `LT`
+//! of the resulting pointer. Given those, `gather_loans` applies four
+//! validity tests:
+//!
+//! 1. `MUTABILITY(LV, MQ)`: The mutability of the reference is
+//! compatible with the mutability of `LV` (i.e., not borrowing immutable
+//! data as mutable).
+//!
+//! 2. `ALIASABLE(LV, MQ)`: The aliasability of the reference is
+//! compatible with the aliasability of `LV`. The goal is to prevent
+//! `&mut` borrows of aliasability data.
+//!
+//! 3. `LIFETIME(LV, LT, MQ)`: The lifetime of the borrow does not exceed
+//! the lifetime of the value being borrowed.
+//!
+//! 4. `RESTRICTIONS(LV, LT, ACTIONS) = RS`: This pass checks and computes the
+//! restrictions to maintain memory safety. These are the restrictions
+//! that will go into the final loan. We'll discuss in more detail below.
+//!
+//! ## Checking mutability
+//!
+//! Checking mutability is fairly straightforward. We just want to prevent
+//! immutable data from being borrowed as mutable. Note that it is ok to
+//! borrow mutable data as immutable, since that is simply a
+//! freeze. Formally we define a predicate `MUTABLE(LV, MQ)` which, if
+//! defined, means that "borrowing `LV` with mutability `MQ` is ok. The
+//! Rust code corresponding to this predicate is the function
+//! `check_mutability` in `middle::borrowck::gather_loans`.
+//!
+//! ### Checking mutability of variables
+//!
+//! *Code pointer:* Function `check_mutability()` in `gather_loans/mod.rs`,
+//! but also the code in `mem_categorization`.
+//!
+//! Let's begin with the rules for variables, which state that if a
+//! variable is declared as mutable, it may be borrowed any which way, but
+//! otherwise the variable must be borrowed as immutable or const:
+//!
+//! ```text
+//! MUTABILITY(X, MQ)                   // M-Var-Mut
+//!   DECL(X) = mut
+//!
+//! MUTABILITY(X, MQ)                   // M-Var-Imm
+//!   DECL(X) = imm
+//!   MQ = imm | const
+//! ```
+//!
+//! ### Checking mutability of owned content
+//!
+//! Fields and owned pointers inherit their mutability from
+//! their base expressions, so both of their rules basically
+//! delegate the check to the base expression `LV`:
+//!
+//! ```text
+//! MUTABILITY(LV.f, MQ)                // M-Field
+//!   MUTABILITY(LV, MQ)
+//!
+//! MUTABILITY(*LV, MQ)                 // M-Deref-Unique
+//!   TYPE(LV) = Box<Ty>
+//!   MUTABILITY(LV, MQ)
+//! ```
+//!
+//! ### Checking mutability of immutable pointer types
+//!
+//! Immutable pointer types like `&T` can only
+//! be borrowed if MQ is immutable or const:
+//!
+//! ```text
+//! MUTABILITY(*LV, MQ)                // M-Deref-Borrowed-Imm
+//!   TYPE(LV) = &Ty
+//!   MQ == imm | const
+//! ```
+//!
+//! ### Checking mutability of mutable pointer types
+//!
+//! `&mut T` can be frozen, so it is acceptable to borrow it as either imm or mut:
+//!
+//! ```text
+//! MUTABILITY(*LV, MQ)                 // M-Deref-Borrowed-Mut
+//!   TYPE(LV) = &mut Ty
+//! ```
+//!
+//! ## Checking aliasability
+//!
+//! The goal of the aliasability check is to ensure that we never permit
+//! `&mut` borrows of aliasable data. Formally we define a predicate
+//! `ALIASABLE(LV, MQ)` which if defined means that
+//! "borrowing `LV` with mutability `MQ` is ok". The
+//! Rust code corresponding to this predicate is the function
+//! `check_aliasability()` in `middle::borrowck::gather_loans`.
+//!
+//! ### Checking aliasability of variables
+//!
+//! Local variables are never aliasable as they are accessible only within
+//! the stack frame.
+//!
+//! ```text
+//!     ALIASABLE(X, MQ)                   // M-Var-Mut
+//! ```
+//!
+//! ### Checking aliasable of owned content
+//!
+//! Owned content is aliasable if it is found in an aliasable location:
+//!
+//! ```text
+//! ALIASABLE(LV.f, MQ)                // M-Field
+//!   ALIASABLE(LV, MQ)
+//!
+//! ALIASABLE(*LV, MQ)                 // M-Deref-Unique
+//!   ALIASABLE(LV, MQ)
+//! ```
+//!
+//! ### Checking mutability of immutable pointer types
+//!
+//! Immutable pointer types like `&T` are aliasable, and hence can only be
+//! borrowed immutably:
+//!
+//! ```text
+//! ALIASABLE(*LV, imm)                // M-Deref-Borrowed-Imm
+//!   TYPE(LV) = &Ty
+//! ```
+//!
+//! ### Checking mutability of mutable pointer types
+//!
+//! `&mut T` can be frozen, so it is acceptable to borrow it as either imm or mut:
+//!
+//! ```text
+//! ALIASABLE(*LV, MQ)                 // M-Deref-Borrowed-Mut
+//!   TYPE(LV) = &mut Ty
+//! ```
+//!
+//! ## Checking lifetime
+//!
+//! These rules aim to ensure that no data is borrowed for a scope that exceeds
+//! its lifetime. These two computations wind up being intimately related.
+//! Formally, we define a predicate `LIFETIME(LV, LT, MQ)`, which states that
+//! "the lvalue `LV` can be safely borrowed for the lifetime `LT` with mutability
+//! `MQ`". The Rust code corresponding to this predicate is the module
+//! `middle::borrowck::gather_loans::lifetime`.
+//!
+//! ### The Scope function
+//!
+//! Several of the rules refer to a helper function `SCOPE(LV)=LT`.  The
+//! `SCOPE(LV)` yields the lifetime `LT` for which the lvalue `LV` is
+//! guaranteed to exist, presuming that no mutations occur.
+//!
+//! The scope of a local variable is the block where it is declared:
+//!
+//! ```text
+//!   SCOPE(X) = block where X is declared
+//! ```
+//!
+//! The scope of a field is the scope of the struct:
+//!
+//! ```text
+//!   SCOPE(LV.f) = SCOPE(LV)
+//! ```
+//!
+//! The scope of a unique referent is the scope of the pointer, since
+//! (barring mutation or moves) the pointer will not be freed until
+//! the pointer itself `LV` goes out of scope:
+//!
+//! ```text
+//!   SCOPE(*LV) = SCOPE(LV) if LV has type Box<T>
+//! ```
+//!
+//! The scope of a borrowed referent is the scope associated with the
+//! pointer.  This is a conservative approximation, since the data that
+//! the pointer points at may actually live longer:
+//!
+//! ```text
+//!   SCOPE(*LV) = LT if LV has type &'LT T or &'LT mut T
+//! ```
+//!
+//! ### Checking lifetime of variables
+//!
+//! The rule for variables states that a variable can only be borrowed a
+//! lifetime `LT` that is a subregion of the variable's scope:
+//!
+//! ```text
+//! LIFETIME(X, LT, MQ)                 // L-Local
+//!   LT <= SCOPE(X)
+//! ```
+//!
+//! ### Checking lifetime for owned content
+//!
+//! The lifetime of a field or owned pointer is the same as the lifetime
+//! of its owner:
+//!
+//! ```text
+//! LIFETIME(LV.f, LT, MQ)              // L-Field
+//!   LIFETIME(LV, LT, MQ)
+//!
+//! LIFETIME(*LV, LT, MQ)               // L-Deref-Send
+//!   TYPE(LV) = Box<Ty>
+//!   LIFETIME(LV, LT, MQ)
+//! ```
+//!
+//! ### Checking lifetime for derefs of references
+//!
+//! References have a lifetime `LT'` associated with them.  The
+//! data they point at has been guaranteed to be valid for at least this
+//! lifetime. Therefore, the borrow is valid so long as the lifetime `LT`
+//! of the borrow is shorter than the lifetime `LT'` of the pointer
+//! itself:
+//!
+//! ```text
+//! LIFETIME(*LV, LT, MQ)               // L-Deref-Borrowed
+//!   TYPE(LV) = &LT' Ty OR &LT' mut Ty
+//!   LT <= LT'
+//! ```
+//!
+//! ## Computing the restrictions
+//!
+//! The final rules govern the computation of *restrictions*, meaning that
+//! we compute the set of actions that will be illegal for the life of the
+//! loan. The predicate is written `RESTRICTIONS(LV, LT, ACTIONS) =
+//! RESTRICTION*`, which can be read "in order to prevent `ACTIONS` from
+//! occurring on `LV`, the restrictions `RESTRICTION*` must be respected
+//! for the lifetime of the loan".
+//!
+//! Note that there is an initial set of restrictions: these restrictions
+//! are computed based on the kind of borrow:
+//!
+//! ```text
+//! &mut LV =>   RESTRICTIONS(LV, LT, MUTATE|CLAIM|FREEZE)
+//! &LV =>       RESTRICTIONS(LV, LT, MUTATE|CLAIM)
+//! &const LV => RESTRICTIONS(LV, LT, [])
+//! ```
+//!
+//! The reasoning here is that a mutable borrow must be the only writer,
+//! therefore it prevents other writes (`MUTATE`), mutable borrows
+//! (`CLAIM`), and immutable borrows (`FREEZE`). An immutable borrow
+//! permits other immutable borrows but forbids writes and mutable borrows.
+//! Finally, a const borrow just wants to be sure that the value is not
+//! moved out from under it, so no actions are forbidden.
+//!
+//! ### Restrictions for loans of a local variable
+//!
+//! The simplest case is a borrow of a local variable `X`:
+//!
+//! ```text
+//! RESTRICTIONS(X, LT, ACTIONS) = (X, ACTIONS)            // R-Variable
+//! ```
+//!
+//! In such cases we just record the actions that are not permitted.
+//!
+//! ### Restrictions for loans of fields
+//!
+//! Restricting a field is the same as restricting the owner of that
+//! field:
+//!
+//! ```text
+//! RESTRICTIONS(LV.f, LT, ACTIONS) = RS, (LV.f, ACTIONS)  // R-Field
+//!   RESTRICTIONS(LV, LT, ACTIONS) = RS
+//! ```
+//!
+//! The reasoning here is as follows. If the field must not be mutated,
+//! then you must not mutate the owner of the field either, since that
+//! would indirectly modify the field. Similarly, if the field cannot be
+//! frozen or aliased, we cannot allow the owner to be frozen or aliased,
+//! since doing so indirectly freezes/aliases the field. This is the
+//! origin of inherited mutability.
+//!
+//! ### Restrictions for loans of owned referents
+//!
+//! Because the mutability of owned referents is inherited, restricting an
+//! owned referent is similar to restricting a field, in that it implies
+//! restrictions on the pointer. However, owned pointers have an important
+//! twist: if the owner `LV` is mutated, that causes the owned referent
+//! `*LV` to be freed! So whenever an owned referent `*LV` is borrowed, we
+//! must prevent the owned pointer `LV` from being mutated, which means
+//! that we always add `MUTATE` and `CLAIM` to the restriction set imposed
+//! on `LV`:
+//!
+//! ```text
+//! RESTRICTIONS(*LV, LT, ACTIONS) = RS, (*LV, ACTIONS)    // R-Deref-Send-Pointer
+//!   TYPE(LV) = Box<Ty>
+//!   RESTRICTIONS(LV, LT, ACTIONS|MUTATE|CLAIM) = RS
+//! ```
+//!
+//! ### Restrictions for loans of immutable borrowed referents
+//!
+//! Immutable borrowed referents are freely aliasable, meaning that
+//! the compiler does not prevent you from copying the pointer.  This
+//! implies that issuing restrictions is useless. We might prevent the
+//! user from acting on `*LV` itself, but there could be another path
+//! `*LV1` that refers to the exact same memory, and we would not be
+//! restricting that path. Therefore, the rule for `&Ty` pointers
+//! always returns an empty set of restrictions, and it only permits
+//! restricting `MUTATE` and `CLAIM` actions:
+//!
+//! ```text
+//! RESTRICTIONS(*LV, LT, ACTIONS) = []                    // R-Deref-Imm-Borrowed
+//!   TYPE(LV) = &LT' Ty
+//!   LT <= LT'                                            // (1)
+//!   ACTIONS subset of [MUTATE, CLAIM]
+//! ```
+//!
+//! The reason that we can restrict `MUTATE` and `CLAIM` actions even
+//! without a restrictions list is that it is never legal to mutate nor to
+//! borrow mutably the contents of a `&Ty` pointer. In other words,
+//! those restrictions are already inherent in the type.
+//!
+//! Clause (1) in the rule for `&Ty` deserves mention. Here I
+//! specify that the lifetime of the loan must be less than the lifetime
+//! of the `&Ty` pointer. In simple cases, this clause is redundant, since
+//! the `LIFETIME()` function will already enforce the required rule:
+//!
+//! ```
+//! fn foo(point: &'a Point) -> &'static f32 {
+//!     &point.x // Error
+//! }
+//! ```
+//!
+//! The above example fails to compile both because of clause (1) above
+//! but also by the basic `LIFETIME()` check. However, in more advanced
+//! examples involving multiple nested pointers, clause (1) is needed:
+//!
+//! ```
+//! fn foo(point: &'a &'b mut Point) -> &'b f32 {
+//!     &point.x // Error
+//! }
+//! ```
+//!
+//! The `LIFETIME` rule here would accept `'b` because, in fact, the
+//! *memory is* guaranteed to remain valid (i.e., not be freed) for the
+//! lifetime `'b`, since the `&mut` pointer is valid for `'b`. However, we
+//! are returning an immutable reference, so we need the memory to be both
+//! valid and immutable. Even though `point.x` is referenced by an `&mut`
+//! pointer, it can still be considered immutable so long as that `&mut`
+//! pointer is found in an aliased location. That means the memory is
+//! guaranteed to be *immutable* for the lifetime of the `&` pointer,
+//! which is only `'a`, not `'b`. Hence this example yields an error.
+//!
+//! As a final twist, consider the case of two nested *immutable*
+//! pointers, rather than a mutable pointer within an immutable one:
+//!
+//! ```
+//! fn foo(point: &'a &'b Point) -> &'b f32 {
+//!     &point.x // OK
+//! }
+//! ```
+//!
+//! This function is legal. The reason for this is that the inner pointer
+//! (`*point : &'b Point`) is enough to guarantee the memory is immutable
+//! and valid for the lifetime `'b`.  This is reflected in
+//! `RESTRICTIONS()` by the fact that we do not recurse (i.e., we impose
+//! no restrictions on `LV`, which in this particular case is the pointer
+//! `point : &'a &'b Point`).
+//!
+//! #### Why both `LIFETIME()` and `RESTRICTIONS()`?
+//!
+//! Given the previous text, it might seem that `LIFETIME` and
+//! `RESTRICTIONS` should be folded together into one check, but there is
+//! a reason that they are separated. They answer separate concerns.
+//! The rules pertaining to `LIFETIME` exist to ensure that we don't
+//! create a borrowed pointer that outlives the memory it points at. So
+//! `LIFETIME` prevents a function like this:
+//!
+//! ```
+//! fn get_1<'a>() -> &'a int {
+//!     let x = 1;
+//!     &x
+//! }
+//! ```
+//!
+//! Here we would be returning a pointer into the stack. Clearly bad.
+//!
+//! However, the `RESTRICTIONS` rules are more concerned with how memory
+//! is used. The example above doesn't generate an error according to
+//! `RESTRICTIONS` because, for local variables, we don't require that the
+//! loan lifetime be a subset of the local variable lifetime. The idea
+//! here is that we *can* guarantee that `x` is not (e.g.) mutated for the
+//! lifetime `'a`, even though `'a` exceeds the function body and thus
+//! involves unknown code in the caller -- after all, `x` ceases to exist
+//! after we return and hence the remaining code in `'a` cannot possibly
+//! mutate it. This distinction is important for type checking functions
+//! like this one:
+//!
+//! ```
+//! fn inc_and_get<'a>(p: &'a mut Point) -> &'a int {
+//!     p.x += 1;
+//!     &p.x
+//! }
+//! ```
+//!
+//! In this case, we take in a `&mut` and return a frozen borrowed pointer
+//! with the same lifetime. So long as the lifetime of the returned value
+//! doesn't exceed the lifetime of the `&mut` we receive as input, this is
+//! fine, though it may seem surprising at first (it surprised me when I
+//! first worked it through). After all, we're guaranteeing that `*p`
+//! won't be mutated for the lifetime `'a`, even though we can't "see" the
+//! entirety of the code during that lifetime, since some of it occurs in
+//! our caller. But we *do* know that nobody can mutate `*p` except
+//! through `p`. So if we don't mutate `*p` and we don't return `p`, then
+//! we know that the right to mutate `*p` has been lost to our caller --
+//! in terms of capability, the caller passed in the ability to mutate
+//! `*p`, and we never gave it back. (Note that we can't return `p` while
+//! `*p` is borrowed since that would be a move of `p`, as `&mut` pointers
+//! are affine.)
+//!
+//! ### Restrictions for loans of const aliasable referents
+//!
+//! Freeze pointers are read-only. There may be `&mut` or `&` aliases, and
+//! we can not prevent *anything* but moves in that case. So the
+//! `RESTRICTIONS` function is only defined if `ACTIONS` is the empty set.
+//! Because moves from a `&const` lvalue are never legal, it is not
+//! necessary to add any restrictions at all to the final result.
+//!
+//! ```text
+//!     RESTRICTIONS(*LV, LT, []) = []                         // R-Deref-Freeze-Borrowed
+//!       TYPE(LV) = &const Ty
+//! ```
+//!
+//! ### Restrictions for loans of mutable borrowed referents
+//!
+//! Mutable borrowed pointers are guaranteed to be the only way to mutate
+//! their referent. This permits us to take greater license with them; for
+//! example, the referent can be frozen simply be ensuring that we do not
+//! use the original pointer to perform mutate. Similarly, we can allow
+//! the referent to be claimed, so long as the original pointer is unused
+//! while the new claimant is live.
+//!
+//! The rule for mutable borrowed pointers is as follows:
+//!
+//! ```text
+//! RESTRICTIONS(*LV, LT, ACTIONS) = RS, (*LV, ACTIONS)    // R-Deref-Mut-Borrowed
+//!   TYPE(LV) = &LT' mut Ty
+//!   LT <= LT'                                            // (1)
+//!   RESTRICTIONS(LV, LT, ACTIONS) = RS                   // (2)
+//! ```
+//!
+//! Let's examine the two numbered clauses:
+//!
+//! Clause (1) specifies that the lifetime of the loan (`LT`) cannot
+//! exceed the lifetime of the `&mut` pointer (`LT'`). The reason for this
+//! is that the `&mut` pointer is guaranteed to be the only legal way to
+//! mutate its referent -- but only for the lifetime `LT'`.  After that
+//! lifetime, the loan on the referent expires and hence the data may be
+//! modified by its owner again. This implies that we are only able to
+//! guarantee that the referent will not be modified or aliased for a
+//! maximum of `LT'`.
+//!
+//! Here is a concrete example of a bug this rule prevents:
+//!
+//! ```
+//! // Test region-reborrow-from-shorter-mut-ref.rs:
+//! fn copy_pointer<'a,'b,T>(x: &'a mut &'b mut T) -> &'b mut T {
+//!     &mut **p // ERROR due to clause (1)
+//! }
+//! fn main() {
+//!     let mut x = 1;
+//!     let mut y = &mut x; // <-'b-----------------------------+
+//!     //      +-'a--------------------+                       |
+//!     //      v                       v                       |
+//!     let z = copy_borrowed_ptr(&mut y); // y is lent         |
+//!     *y += 1; // Here y==z, so both should not be usable...  |
+//!     *z += 1; // ...and yet they would be, but for clause 1. |
+//! } // <------------------------------------------------------+
+//! ```
+//!
+//! Clause (2) propagates the restrictions on the referent to the pointer
+//! itself. This is the same as with an owned pointer, though the
+//! reasoning is mildly different. The basic goal in all cases is to
+//! prevent the user from establishing another route to the same data. To
+//! see what I mean, let's examine various cases of what can go wrong and
+//! show how it is prevented.
+//!
+//! **Example danger 1: Moving the base pointer.** One of the simplest
+//! ways to violate the rules is to move the base pointer to a new name
+//! and access it via that new name, thus bypassing the restrictions on
+//! the old name. Here is an example:
+//!
+//! ```
+//! // src/test/compile-fail/borrowck-move-mut-base-ptr.rs
+//! fn foo(t0: &mut int) {
+//!     let p: &int = &*t0; // Freezes `*t0`
+//!     let t1 = t0;        //~ ERROR cannot move out of `t0`
+//!     *t1 = 22;           // OK, not a write through `*t0`
+//! }
+//! ```
+//!
+//! Remember that `&mut` pointers are linear, and hence `let t1 = t0` is a
+//! move of `t0` -- or would be, if it were legal. Instead, we get an
+//! error, because clause (2) imposes restrictions on `LV` (`t0`, here),
+//! and any restrictions on a path make it impossible to move from that
+//! path.
+//!
+//! **Example danger 2: Claiming the base pointer.** Another possible
+//! danger is to mutably borrow the base path. This can lead to two bad
+//! scenarios. The most obvious is that the mutable borrow itself becomes
+//! another path to access the same data, as shown here:
+//!
+//! ```
+//! // src/test/compile-fail/borrowck-mut-borrow-of-mut-base-ptr.rs
+//! fn foo<'a>(mut t0: &'a mut int,
+//!            mut t1: &'a mut int) {
+//!     let p: &int = &*t0;     // Freezes `*t0`
+//!     let mut t2 = &mut t0;   //~ ERROR cannot borrow `t0`
+//!     **t2 += 1;              // Mutates `*t0`
+//! }
+//! ```
+//!
+//! In this example, `**t2` is the same memory as `*t0`. Because `t2` is
+//! an `&mut` pointer, `**t2` is a unique path and hence it would be
+//! possible to mutate `**t2` even though that memory was supposed to be
+//! frozen by the creation of `p`. However, an error is reported -- the
+//! reason is that the freeze `&*t0` will restrict claims and mutation
+//! against `*t0` which, by clause 2, in turn prevents claims and mutation
+//! of `t0`. Hence the claim `&mut t0` is illegal.
+//!
+//! Another danger with an `&mut` pointer is that we could swap the `t0`
+//! value away to create a new path:
+//!
+//! ```
+//! // src/test/compile-fail/borrowck-swap-mut-base-ptr.rs
+//! fn foo<'a>(mut t0: &'a mut int,
+//!            mut t1: &'a mut int) {
+//!     let p: &int = &*t0;     // Freezes `*t0`
+//!     swap(&mut t0, &mut t1); //~ ERROR cannot borrow `t0`
+//!     *t1 = 22;
+//! }
+//! ```
+//!
+//! This is illegal for the same reason as above. Note that if we added
+//! back a swap operator -- as we used to have -- we would want to be very
+//! careful to ensure this example is still illegal.
+//!
+//! **Example danger 3: Freeze the base pointer.** In the case where the
+//! referent is claimed, even freezing the base pointer can be dangerous,
+//! as shown in the following example:
+//!
+//! ```
+//! // src/test/compile-fail/borrowck-borrow-of-mut-base-ptr.rs
+//! fn foo<'a>(mut t0: &'a mut int,
+//!            mut t1: &'a mut int) {
+//!     let p: &mut int = &mut *t0; // Claims `*t0`
+//!     let mut t2 = &t0;           //~ ERROR cannot borrow `t0`
+//!     let q: &int = &*t2;         // Freezes `*t0` but not through `*p`
+//!     *p += 1;                    // violates type of `*q`
+//! }
+//! ```
+//!
+//! Here the problem is that `*t0` is claimed by `p`, and hence `p` wants
+//! to be the controlling pointer through which mutation or freezes occur.
+//! But `t2` would -- if it were legal -- have the type `& &mut int`, and
+//! hence would be a mutable pointer in an aliasable location, which is
+//! considered frozen (since no one can write to `**t2` as it is not a
+//! unique path). Therefore, we could reasonably create a frozen `&int`
+//! pointer pointing at `*t0` that coexists with the mutable pointer `p`,
+//! which is clearly unsound.
+//!
+//! However, it is not always unsafe to freeze the base pointer. In
+//! particular, if the referent is frozen, there is no harm in it:
+//!
+//! ```
+//! // src/test/run-pass/borrowck-borrow-of-mut-base-ptr-safe.rs
+//! fn foo<'a>(mut t0: &'a mut int,
+//!            mut t1: &'a mut int) {
+//!     let p: &int = &*t0; // Freezes `*t0`
+//!     let mut t2 = &t0;
+//!     let q: &int = &*t2; // Freezes `*t0`, but that's ok...
+//!     let r: &int = &*t0; // ...after all, could do same thing directly.
+//! }
+//! ```
+//!
+//! In this case, creating the alias `t2` of `t0` is safe because the only
+//! thing `t2` can be used for is to further freeze `*t0`, which is
+//! already frozen. In particular, we cannot assign to `*t0` through the
+//! new alias `t2`, as demonstrated in this test case:
+//!
+//! ```
+//! // src/test/run-pass/borrowck-borrow-mut-base-ptr-in-aliasable-loc.rs
+//! fn foo(t0: & &mut int) {
+//!     let t1 = t0;
+//!     let p: &int = &**t0;
+//!     **t1 = 22; //~ ERROR cannot assign
+//! }
+//! ```
+//!
+//! This distinction is reflected in the rules. When doing an `&mut`
+//! borrow -- as in the first example -- the set `ACTIONS` will be
+//! `CLAIM|MUTATE|FREEZE`, because claiming the referent implies that it
+//! cannot be claimed, mutated, or frozen by anyone else. These
+//! restrictions are propagated back to the base path and hence the base
+//! path is considered unfreezable.
+//!
+//! In contrast, when the referent is merely frozen -- as in the second
+//! example -- the set `ACTIONS` will be `CLAIM|MUTATE`, because freezing
+//! the referent implies that it cannot be claimed or mutated but permits
+//! others to freeze. Hence when these restrictions are propagated back to
+//! the base path, it will still be considered freezable.
+//!
+//!
+//!
+//! **FIXME #10520: Restrictions against mutating the base pointer.** When
+//! an `&mut` pointer is frozen or claimed, we currently pass along the
+//! restriction against MUTATE to the base pointer. I do not believe this
+//! restriction is needed. It dates from the days when we had a way to
+//! mutate that preserved the value being mutated (i.e., swap). Nowadays
+//! the only form of mutation is assignment, which destroys the pointer
+//! being mutated -- therefore, a mutation cannot create a new path to the
+//! same data. Rather, it removes an existing path. This implies that not
+//! only can we permit mutation, we can have mutation kill restrictions in
+//! the dataflow sense.
+//!
+//! **WARNING:** We do not currently have `const` borrows in the
+//! language. If they are added back in, we must ensure that they are
+//! consistent with all of these examples. The crucial question will be
+//! what sorts of actions are permitted with a `&const &mut` pointer. I
+//! would suggest that an `&mut` referent found in an `&const` location be
+//! prohibited from both freezes and claims. This would avoid the need to
+//! prevent `const` borrows of the base pointer when the referent is
+//! borrowed.
+//!
+//! # Moves and initialization
+//!
+//! The borrow checker is also in charge of ensuring that:
+//!
+//! - all memory which is accessed is initialized
+//! - immutable local variables are assigned at most once.
+//!
+//! These are two separate dataflow analyses built on the same
+//! framework. Let's look at checking that memory is initialized first;
+//! the checking of immutable local variable assignments works in a very
+//! similar way.
+//!
+//! To track the initialization of memory, we actually track all the
+//! points in the program that *create uninitialized memory*, meaning
+//! moves and the declaration of uninitialized variables. For each of
+//! these points, we create a bit in the dataflow set. Assignments to a
+//! variable `x` or path `a.b.c` kill the move/uninitialization bits for
+//! those paths and any subpaths (e.g., `x`, `x.y`, `a.b.c`, `*a.b.c`).
+//! Bits are unioned when two control-flow paths join. Thus, the
+//! presence of a bit indicates that the move may have occurred without an
+//! intervening assignment to the same memory. At each use of a variable,
+//! we examine the bits in scope, and check that none of them are
+//! moves/uninitializations of the variable that is being used.
+//!
+//! Let's look at a simple example:
+//!
+//! ```
+//! fn foo(a: Box<int>) {
+//!     let b: Box<int>;   // Gen bit 0.
+//!
+//!     if cond {          // Bits: 0
+//!         use(&*a);
+//!         b = a;         // Gen bit 1, kill bit 0.
+//!         use(&*b);
+//!     } else {
+//!                        // Bits: 0
+//!     }
+//!                        // Bits: 0,1
+//!     use(&*a);          // Error.
+//!     use(&*b);          // Error.
+//! }
+//!
+//! fn use(a: &int) { }
+//! ```
+//!
+//! In this example, the variable `b` is created uninitialized. In one
+//! branch of an `if`, we then move the variable `a` into `b`. Once we
+//! exit the `if`, therefore, it is an error to use `a` or `b` since both
+//! are only conditionally initialized. I have annotated the dataflow
+//! state using comments. There are two dataflow bits, with bit 0
+//! corresponding to the creation of `b` without an initializer, and bit 1
+//! corresponding to the move of `a`. The assignment `b = a` both
+//! generates bit 1, because it is a move of `a`, and kills bit 0, because
+//! `b` is now initialized. On the else branch, though, `b` is never
+//! initialized, and so bit 0 remains untouched. When the two flows of
+//! control join, we union the bits from both sides, resulting in both
+//! bits 0 and 1 being set. Thus any attempt to use `a` uncovers the bit 1
+//! from the "then" branch, showing that `a` may be moved, and any attempt
+//! to use `b` uncovers bit 0, from the "else" branch, showing that `b`
+//! may not be initialized.
+//!
+//! ## Initialization of immutable variables
+//!
+//! Initialization of immutable variables works in a very similar way,
+//! except that:
+//!
+//! 1. we generate bits for each assignment to a variable;
+//! 2. the bits are never killed except when the variable goes out of scope.
+//!
+//! Thus the presence of an assignment bit indicates that the assignment
+//! may have occurred. Note that assignments are only killed when the
+//! variable goes out of scope, as it is not relevant whether or not there
+//! has been a move in the meantime. Using these bits, we can declare that
+//! an assignment to an immutable variable is legal iff there is no other
+//! assignment bit to that same variable in scope.
+//!
+//! ## Why is the design made this way?
+//!
+//! It may seem surprising that we assign dataflow bits to *each move*
+//! rather than *each path being moved*. This is somewhat less efficient,
+//! since on each use, we must iterate through all moves and check whether
+//! any of them correspond to the path in question. Similar concerns apply
+//! to the analysis for double assignments to immutable variables. The
+//! main reason to do it this way is that it allows us to print better
+//! error messages, because when a use occurs, we can print out the
+//! precise move that may be in scope, rather than simply having to say
+//! "the variable may not be initialized".
+//!
+//! ## Data structures used in the move analysis
+//!
+//! The move analysis maintains several data structures that enable it to
+//! cross-reference moves and assignments to determine when they may be
+//! moving/assigning the same memory. These are all collected into the
+//! `MoveData` and `FlowedMoveData` structs. The former represents the set
+//! of move paths, moves, and assignments, and the latter adds in the
+//! results of a dataflow computation.
+//!
+//! ### Move paths
+//!
+//! The `MovePath` tree tracks every path that is moved or assigned to.
+//! These paths have the same form as the `LoanPath` data structure, which
+//! in turn is the "real world version of the lvalues `LV` that we
+//! introduced earlier. The difference between a `MovePath` and a `LoanPath`
+//! is that move paths are:
+//!
+//! 1. Canonicalized, so that we have exactly one copy of each, and
+//!    we can refer to move paths by index;
+//! 2. Cross-referenced with other paths into a tree, so that given a move
+//!    path we can efficiently find all parent move paths and all
+//!    extensions (e.g., given the `a.b` move path, we can easily find the
+//!    move path `a` and also the move paths `a.b.c`)
+//! 3. Cross-referenced with moves and assignments, so that we can
+//!    easily find all moves and assignments to a given path.
+//!
+//! The mechanism that we use is to create a `MovePath` record for each
+//! move path. These are arranged in an array and are referenced using
+//! `MovePathIndex` values, which are newtype'd indices. The `MovePath`
+//! structs are arranged into a tree, representing using the standard
+//! Knuth representation where each node has a child 'pointer' and a "next
+//! sibling" 'pointer'. In addition, each `MovePath` has a parent
+//! 'pointer'.  In this case, the 'pointers' are just `MovePathIndex`
+//! values.
+//!
+//! In this way, if we want to find all base paths of a given move path,
+//! we can just iterate up the parent pointers (see `each_base_path()` in
+//! the `move_data` module). If we want to find all extensions, we can
+//! iterate through the subtree (see `each_extending_path()`).
+//!
+//! ### Moves and assignments
+//!
+//! There are structs to represent moves (`Move`) and assignments
+//! (`Assignment`), and these are also placed into arrays and referenced
+//! by index. All moves of a particular path are arranged into a linked
+//! lists, beginning with `MovePath.first_move` and continuing through
+//! `Move.next_move`.
+//!
+//! We distinguish between "var" assignments, which are assignments to a
+//! variable like `x = foo`, and "path" assignments (`x.f = foo`).  This
+//! is because we need to assign dataflows to the former, but not the
+//! latter, so as to check for double initialization of immutable
+//! variables.
+//!
+//! ### Gathering and checking moves
+//!
+//! Like loans, we distinguish two phases. The first, gathering, is where
+//! we uncover all the moves and assignments. As with loans, we do some
+//! basic sanity checking in this phase, so we'll report errors if you
+//! attempt to move out of a borrowed pointer etc. Then we do the dataflow
+//! (see `FlowedMoveData::new`). Finally, in the `check_loans.rs` code, we
+//! walk back over, identify all uses, assignments, and captures, and
+//! check that they are legal given the set of dataflow bits we have
+//! computed for that program point.
+//!
+//! # Drop flags and structural fragments
+//!
+//! In addition to the job of enforcing memory safety, the borrow checker
+//! code is also responsible for identifying the *structural fragments* of
+//! data in the function, to support out-of-band dynamic drop flags
+//! allocated on the stack. (For background, see [RFC PR #320].)
+//!
+//! [RFC PR #320]: https://github.com/rust-lang/rfcs/pull/320
+//!
+//! Semantically, each piece of data that has a destructor may need a
+//! boolean flag to indicate whether or not its destructor has been run
+//! yet. However, in many cases there is no need to actually maintain such
+//! a flag: It can be apparent from the code itself that a given path is
+//! always initialized (or always deinitialized) when control reaches the
+//! end of its owner's scope, and thus we can unconditionally emit (or
+//! not) the destructor invocation for that path.
+//!
+//! A simple example of this is the following:
+//!
+//! ```rust
+//! struct D { p: int }
+//! impl D { fn new(x: int) -> D { ... }
+//! impl Drop for D { ... }
+//!
+//! fn foo(a: D, b: D, t: || -> bool) {
+//!     let c: D;
+//!     let d: D;
+//!     if t() { c = b; }
+//! }
+//! ```
+//!
+//! At the end of the body of `foo`, the compiler knows that `a` is
+//! initialized, introducing a drop obligation (deallocating the boxed
+//! integer) for the end of `a`'s scope that is run unconditionally.
+//! Likewise the compiler knows that `d` is not initialized, and thus it
+//! leave out the drop code for `d`.
+//!
+//! The compiler cannot statically know the drop-state of `b` nor `c` at
+//! the end of their scope, since that depends on the value of
+//! `t`. Therefore, we need to insert boolean flags to track whether we
+//! need to drop `b` and `c`.
+//!
+//! However, the matter is not as simple as just mapping local variables
+//! to their corresponding drop flags when necessary. In particular, in
+//! addition to being able to move data out of local variables, Rust
+//! allows one to move values in and out of structured data.
+//!
+//! Consider the following:
+//!
+//! ```rust
+//! struct S { x: D, y: D, z: D }
+//!
+//! fn foo(a: S, mut b: S, t: || -> bool) {
+//!     let mut c: S;
+//!     let d: S;
+//!     let e: S = a.clone();
+//!     if t() {
+//!         c = b;
+//!         b.x = e.y;
+//!     }
+//!     if t() { c.y = D::new(4); }
+//! }
+//! ```
+//!
+//! As before, the drop obligations of `a` and `d` can be statically
+//! determined, and again the state of `b` and `c` depend on dynamic
+//! state. But additionally, the dynamic drop obligations introduced by
+//! `b` and `c` are not just per-local boolean flags. For example, if the
+//! first call to `t` returns `false` and the second call `true`, then at
+//! the end of their scope, `b` will be completely initialized, but only
+//! `c.y` in `c` will be initialized.  If both calls to `t` return `true`,
+//! then at the end of their scope, `c` will be completely initialized,
+//! but only `b.x` will be initialized in `b`, and only `e.x` and `e.z`
+//! will be initialized in `e`.
+//!
+//! Note that we need to cover the `z` field in each case in some way,
+//! since it may (or may not) need to be dropped, even though `z` is never
+//! directly mentioned in the body of the `foo` function. We call a path
+//! like `b.z` a *fragment sibling* of `b.x`, since the field `z` comes
+//! from the same structure `S` that declared the field `x` in `b.x`.
+//!
+//! In general we need to maintain boolean flags that match the
+//! `S`-structure of both `b` and `c`.  In addition, we need to consult
+//! such a flag when doing an assignment (such as `c.y = D::new(4);`
+//! above), in order to know whether or not there is a previous value that
+//! needs to be dropped before we do the assignment.
+//!
+//! So for any given function, we need to determine what flags are needed
+//! to track its drop obligations. Our strategy for determining the set of
+//! flags is to represent the fragmentation of the structure explicitly:
+//! by starting initially from the paths that are explicitly mentioned in
+//! moves and assignments (such as `b.x` and `c.y` above), and then
+//! traversing the structure of the path's type to identify leftover
+//! *unmoved fragments*: assigning into `c.y` means that `c.x` and `c.z`
+//! are leftover unmoved fragments. Each fragment represents a drop
+//! obligation that may need to be tracked. Paths that are only moved or
+//! assigned in their entirety (like `a` and `d`) are treated as a single
+//! drop obligation.
+//!
+//! The fragment construction process works by piggy-backing on the
+//! existing `move_data` module. We already have callbacks that visit each
+//! direct move and assignment; these form the basis for the sets of
+//! moved_leaf_paths and assigned_leaf_paths. From these leaves, we can
+//! walk up their parent chain to identify all of their parent paths.
+//! We need to identify the parents because of cases like the following:
+//!
+//! ```rust
+//! struct Pair<X,Y>{ x: X, y: Y }
+//! fn foo(dd_d_d: Pair<Pair<Pair<D, D>, D>, D>) {
+//!     other_function(dd_d_d.x.y);
+//! }
+//! ```
+//!
+//! In this code, the move of the path `dd_d.x.y` leaves behind not only
+//! the fragment drop-obligation `dd_d.x.x` but also `dd_d.y` as well.
+//!
+//! Once we have identified the directly-referenced leaves and their
+//! parents, we compute the left-over fragments, in the function
+//! `fragments::add_fragment_siblings`. As of this writing this works by
+//! looking at each directly-moved or assigned path P, and blindly
+//! gathering all sibling fields of P (as well as siblings for the parents
+//! of P, etc). After accumulating all such siblings, we filter out the
+//! entries added as siblings of P that turned out to be
+//! directly-referenced paths (or parents of directly referenced paths)
+//! themselves, thus leaving the never-referenced "left-overs" as the only
+//! thing left from the gathering step.
+//!
+//! ## Array structural fragments
+//!
+//! A special case of the structural fragments discussed above are
+//! the elements of an array that has been passed by value, such as
+//! the following:
+//!
+//! ```rust
+//! fn foo(a: [D, ..10], i: uint) -> D {
+//!     a[i]
+//! }
+//! ```
+//!
+//! The above code moves a single element out of the input array `a`.
+//! The remainder of the array still needs to be dropped; i.e., it
+//! is a structural fragment. Note that after performing such a move,
+//! it is not legal to read from the array `a`. There are a number of
+//! ways to deal with this, but the important thing to note is that
+//! the semantics needs to distinguish in some manner between a
+//! fragment that is the *entire* array versus a fragment that represents
+//! all-but-one element of the array.  A place where that distinction
+//! would arise is the following:
+//!
+//! ```rust
+//! fn foo(a: [D, ..10], b: [D, ..10], i: uint, t: bool) -> D {
+//!     if t {
+//!         a[i]
+//!     } else {
+//!         b[i]
+//!     }
+//!
+//!     // When control exits, we will need either to drop all of `a`
+//!     // and all-but-one of `b`, or to drop all of `b` and all-but-one
+//!     // of `a`.
+//! }
+//! ```
+//!
+//! There are a number of ways that the trans backend could choose to
+//! compile this (e.g. a `[bool, ..10]` array for each such moved array;
+//! or an `Option<uint>` for each moved array).  From the viewpoint of the
+//! borrow-checker, the important thing is to record what kind of fragment
+//! is implied by the relevant moves.
+//!
+//! # Future work
+//!
+//! While writing up these docs, I encountered some rules I believe to be
+//! stricter than necessary:
+//!
+//! - I think restricting the `&mut` LV against moves and `ALIAS` is sufficient,
+//!   `MUTATE` and `CLAIM` are overkill. `MUTATE` was necessary when swap was
+//!   a built-in operator, but as it is not, it is implied by `CLAIM`,
+//!   and `CLAIM` is implied by `ALIAS`. The only net effect of this is an
+//!   extra error message in some cases, though.
+//! - I have not described how closures interact. Current code is unsound.
+//!   I am working on describing and implementing the fix.
+//! - If we wish, we can easily extend the move checking to allow finer-grained
+//!   tracking of what is initialized and what is not, enabling code like
+//!   this:
+//!
+//!       a = x.f.g; // x.f.g is now uninitialized
+//!       // here, x and x.f are not usable, but x.f.h *is*
+//!       x.f.g = b; // x.f.g is not initialized
+//!       // now x, x.f, x.f.g, x.f.h are all usable
+//!
+//!   What needs to change here, most likely, is that the `moves` module
+//!   should record not only what paths are moved, but what expressions
+//!   are actual *uses*. For example, the reference to `x` in `x.f.g = b`
+//!   is not a true *use* in the sense that it requires `x` to be fully
+//!   initialized. This is in fact why the above code produces an error
+//!   today: the reference to `x` in `x.f.g = b` is considered illegal
+//!   because `x` is not fully initialized.
+//!
+//! There are also some possible refactorings:
+//!
+//! - It might be nice to replace all loan paths with the MovePath mechanism,
+//!   since they allow lightweight comparison using an integer.
diff --git a/src/librustc/middle/borrowck/fragments.rs b/src/librustc/middle/borrowck/fragments.rs
index 7e766e9138e35..dddc326df3572 100644
--- a/src/librustc/middle/borrowck/fragments.rs
+++ b/src/librustc/middle/borrowck/fragments.rs
@@ -8,13 +8,10 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-/*!
+//! Helper routines used for fragmenting structural paths due to moves for
+//! tracking drop obligations. Please see the extensive comments in the
+//! section "Structural fragments" in `doc.rs`.
 
-Helper routines used for fragmenting structural paths due to moves for
-tracking drop obligations. Please see the extensive comments in the
-section "Structural fragments" in `doc.rs`.
-
-*/
 use self::Fragment::*;
 
 use session::config;
@@ -176,16 +173,12 @@ pub fn instrument_move_fragments<'tcx>(this: &MoveData<'tcx>,
     instrument_all_paths("assigned_leaf_path", &fragments.assigned_leaf_paths);
 }
 
+/// Normalizes the fragment sets in `this`; i.e., removes duplicate entries, constructs the set of
+/// parents, and constructs the left-over fragments.
+///
+/// Note: "left-over fragments" means paths that were not directly referenced in moves nor
+/// assignments, but must nonetheless be tracked as potential drop obligations.
 pub fn fixup_fragment_sets<'tcx>(this: &MoveData<'tcx>, tcx: &ty::ctxt<'tcx>) {
-    /*!
-     * Normalizes the fragment sets in `this`; i.e., removes
-     * duplicate entries, constructs the set of parents, and
-     * constructs the left-over fragments.
-     *
-     * Note: "left-over fragments" means paths that were not
-     * directly referenced in moves nor assignments, but must
-     * nonetheless be tracked as potential drop obligations.
-     */
 
     let mut fragments = this.fragments.borrow_mut();
 
@@ -283,18 +276,14 @@ pub fn fixup_fragment_sets<'tcx>(this: &MoveData<'tcx>, tcx: &ty::ctxt<'tcx>) {
     }
 }
 
+/// Adds all of the precisely-tracked siblings of `lp` as potential move paths of interest. For
+/// example, if `lp` represents `s.x.j`, then adds moves paths for `s.x.i` and `s.x.k`, the
+/// siblings of `s.x.j`.
 fn add_fragment_siblings<'tcx>(this: &MoveData<'tcx>,
                                tcx: &ty::ctxt<'tcx>,
                                gathered_fragments: &mut Vec<Fragment>,
                                lp: Rc<LoanPath<'tcx>>,
                                origin_id: Option<ast::NodeId>) {
-    /*!
-     * Adds all of the precisely-tracked siblings of `lp` as
-     * potential move paths of interest. For example, if `lp`
-     * represents `s.x.j`, then adds moves paths for `s.x.i` and
-     * `s.x.k`, the siblings of `s.x.j`.
-     */
-
     match lp.kind {
         LpVar(_) | LpUpvar(..) => {} // Local variables have no siblings.
 
@@ -343,6 +332,8 @@ fn add_fragment_siblings<'tcx>(this: &MoveData<'tcx>,
     }
 }
 
+/// We have determined that `origin_lp` destructures to LpExtend(parent, original_field_name).
+/// Based on this, add move paths for all of the siblings of `origin_lp`.
 fn add_fragment_siblings_for_extension<'tcx>(this: &MoveData<'tcx>,
                                              tcx: &ty::ctxt<'tcx>,
                                              gathered_fragments: &mut Vec<Fragment>,
@@ -353,12 +344,6 @@ fn add_fragment_siblings_for_extension<'tcx>(this: &MoveData<'tcx>,
                                              origin_id: Option<ast::NodeId>,
                                              enum_variant_info: Option<(ast::DefId,
                                                                         Rc<LoanPath<'tcx>>)>) {
-    /*!
-     * We have determined that `origin_lp` destructures to
-     * LpExtend(parent, original_field_name). Based on this,
-     * add move paths for all of the siblings of `origin_lp`.
-     */
-
     let parent_ty = parent_lp.to_type();
 
     let add_fragment_sibling_local = |field_name| {
@@ -454,6 +439,8 @@ fn add_fragment_siblings_for_extension<'tcx>(this: &MoveData<'tcx>,
     }
 }
 
+/// Adds the single sibling `LpExtend(parent, new_field_name)` of `origin_lp` (the original
+/// loan-path).
 fn add_fragment_sibling_core<'tcx>(this: &MoveData<'tcx>,
                                    tcx: &ty::ctxt<'tcx>,
                                    gathered_fragments: &mut Vec<Fragment>,
@@ -461,10 +448,6 @@ fn add_fragment_sibling_core<'tcx>(this: &MoveData<'tcx>,
                                    mc: mc::MutabilityCategory,
                                    new_field_name: mc::FieldName,
                                    origin_lp: &Rc<LoanPath<'tcx>>) -> MovePathIndex {
-    /*!
-     * Adds the single sibling `LpExtend(parent, new_field_name)`
-     * of `origin_lp` (the original loan-path).
-     */
     let opt_variant_did = match parent.kind {
         LpDowncast(_, variant_did) => Some(variant_did),
         LpVar(..) | LpUpvar(..) | LpExtend(..) => None,
diff --git a/src/librustc/middle/borrowck/gather_loans/gather_moves.rs b/src/librustc/middle/borrowck/gather_loans/gather_moves.rs
index 1d0b0558bb16c..6511416050427 100644
--- a/src/librustc/middle/borrowck/gather_loans/gather_moves.rs
+++ b/src/librustc/middle/borrowck/gather_loans/gather_moves.rs
@@ -8,9 +8,7 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-/*!
- * Computes moves.
- */
+//! Computes moves.
 
 use middle::borrowck::*;
 use middle::borrowck::LoanPathKind::*;
diff --git a/src/librustc/middle/borrowck/gather_loans/lifetime.rs b/src/librustc/middle/borrowck/gather_loans/lifetime.rs
index 7a7ed3e75d20e..e6a7c150df8f4 100644
--- a/src/librustc/middle/borrowck/gather_loans/lifetime.rs
+++ b/src/librustc/middle/borrowck/gather_loans/lifetime.rs
@@ -8,10 +8,8 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-/*!
- * This module implements the check that the lifetime of a borrow
- * does not exceed the lifetime of the value being borrowed.
- */
+//! This module implements the check that the lifetime of a borrow
+//! does not exceed the lifetime of the value being borrowed.
 
 use middle::borrowck::*;
 use middle::expr_use_visitor as euv;
diff --git a/src/librustc/middle/borrowck/gather_loans/mod.rs b/src/librustc/middle/borrowck/gather_loans/mod.rs
index 088b62a12cf98..4f7ecc99c8938 100644
--- a/src/librustc/middle/borrowck/gather_loans/mod.rs
+++ b/src/librustc/middle/borrowck/gather_loans/mod.rs
@@ -225,6 +225,9 @@ fn check_aliasability<'a, 'tcx>(bccx: &BorrowckCtxt<'a, 'tcx>,
 impl<'a, 'tcx> GatherLoanCtxt<'a, 'tcx> {
     pub fn tcx(&self) -> &'a ty::ctxt<'tcx> { self.bccx.tcx }
 
+    /// Guarantees that `addr_of(cmt)` will be valid for the duration of `static_scope_r`, or
+    /// reports an error.  This may entail taking out loans, which will be added to the
+    /// `req_loan_map`.
     fn guarantee_valid(&mut self,
                        borrow_id: ast::NodeId,
                        borrow_span: Span,
@@ -232,12 +235,6 @@ impl<'a, 'tcx> GatherLoanCtxt<'a, 'tcx> {
                        req_kind: ty::BorrowKind,
                        loan_region: ty::Region,
                        cause: euv::LoanCause) {
-        /*!
-         * Guarantees that `addr_of(cmt)` will be valid for the duration of
-         * `static_scope_r`, or reports an error.  This may entail taking
-         * out loans, which will be added to the `req_loan_map`.
-         */
-
         debug!("guarantee_valid(borrow_id={}, cmt={}, \
                 req_mutbl={}, loan_region={})",
                borrow_id,
diff --git a/src/librustc/middle/borrowck/gather_loans/restrictions.rs b/src/librustc/middle/borrowck/gather_loans/restrictions.rs
index adae34b49dca2..bd9cf8f84b643 100644
--- a/src/librustc/middle/borrowck/gather_loans/restrictions.rs
+++ b/src/librustc/middle/borrowck/gather_loans/restrictions.rs
@@ -8,9 +8,7 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-/*!
- * Computes the restrictions that result from a borrow.
- */
+//! Computes the restrictions that result from a borrow.
 
 pub use self::RestrictionResult::*;
 
diff --git a/src/librustc/middle/borrowck/mod.rs b/src/librustc/middle/borrowck/mod.rs
index 45040cd7b102e..0bbcdfe61bb46 100644
--- a/src/librustc/middle/borrowck/mod.rs
+++ b/src/librustc/middle/borrowck/mod.rs
@@ -8,7 +8,7 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-/*! See doc.rs for a thorough explanation of the borrow checker */
+//! See doc.rs for a thorough explanation of the borrow checker
 
 #![allow(non_camel_case_types)]
 
diff --git a/src/librustc/middle/borrowck/move_data.rs b/src/librustc/middle/borrowck/move_data.rs
index dc9516ccc5da2..7bf3458f0ae3d 100644
--- a/src/librustc/middle/borrowck/move_data.rs
+++ b/src/librustc/middle/borrowck/move_data.rs
@@ -8,12 +8,8 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-/*!
-
-Data structures used for tracking moves. Please see the extensive
-comments in the section "Moves and initialization" in `doc.rs`.
-
-*/
+//! Data structures used for tracking moves. Please see the extensive
+//! comments in the section "Moves and initialization" in `doc.rs`.
 
 pub use self::MoveKind::*;
 
@@ -297,15 +293,11 @@ impl<'tcx> MoveData<'tcx> {
         self.path_parent(index) == InvalidMovePathIndex
     }
 
+    /// Returns the existing move path index for `lp`, if any, and otherwise adds a new index for
+    /// `lp` and any of its base paths that do not yet have an index.
     pub fn move_path(&self,
                      tcx: &ty::ctxt<'tcx>,
                      lp: Rc<LoanPath<'tcx>>) -> MovePathIndex {
-        /*!
-         * Returns the existing move path index for `lp`, if any,
-         * and otherwise adds a new index for `lp` and any of its
-         * base paths that do not yet have an index.
-         */
-
         match self.path_map.borrow().get(&lp) {
             Some(&index) => {
                 return index;
@@ -370,13 +362,10 @@ impl<'tcx> MoveData<'tcx> {
         result
     }
 
+    /// Adds any existing move path indices for `lp` and any base paths of `lp` to `result`, but
+    /// does not add new move paths
     fn add_existing_base_paths(&self, lp: &Rc<LoanPath<'tcx>>,
                                result: &mut Vec<MovePathIndex>) {
-        /*!
-         * Adds any existing move path indices for `lp` and any base
-         * paths of `lp` to `result`, but does not add new move paths
-         */
-
         match self.path_map.borrow().get(lp).cloned() {
             Some(index) => {
                 self.each_base_path(index, |p| {
@@ -397,16 +386,12 @@ impl<'tcx> MoveData<'tcx> {
 
     }
 
+    /// Adds a new move entry for a move of `lp` that occurs at location `id` with kind `kind`.
     pub fn add_move(&self,
                     tcx: &ty::ctxt<'tcx>,
                     lp: Rc<LoanPath<'tcx>>,
                     id: ast::NodeId,
                     kind: MoveKind) {
-        /*!
-         * Adds a new move entry for a move of `lp` that occurs at
-         * location `id` with kind `kind`.
-         */
-
         debug!("add_move(lp={}, id={}, kind={})",
                lp.repr(tcx),
                id,
@@ -428,6 +413,8 @@ impl<'tcx> MoveData<'tcx> {
         });
     }
 
+    /// Adds a new record for an assignment to `lp` that occurs at location `id` with the given
+    /// `span`.
     pub fn add_assignment(&self,
                           tcx: &ty::ctxt<'tcx>,
                           lp: Rc<LoanPath<'tcx>>,
@@ -435,11 +422,6 @@ impl<'tcx> MoveData<'tcx> {
                           span: Span,
                           assignee_id: ast::NodeId,
                           mode: euv::MutateMode) {
-        /*!
-         * Adds a new record for an assignment to `lp` that occurs at
-         * location `id` with the given `span`.
-         */
-
         debug!("add_assignment(lp={}, assign_id={}, assignee_id={}",
                lp.repr(tcx), assign_id, assignee_id);
 
@@ -473,18 +455,16 @@ impl<'tcx> MoveData<'tcx> {
         }
     }
 
+    /// Adds a new record for a match of `base_lp`, downcast to
+    /// variant `lp`, that occurs at location `pattern_id`.  (One
+    /// should be able to recover the span info from the
+    /// `pattern_id` and the ast_map, I think.)
     pub fn add_variant_match(&self,
                              tcx: &ty::ctxt<'tcx>,
                              lp: Rc<LoanPath<'tcx>>,
                              pattern_id: ast::NodeId,
                              base_lp: Rc<LoanPath<'tcx>>,
                              mode: euv::MatchMode) {
-        /*!
-         * Adds a new record for a match of `base_lp`, downcast to
-         * variant `lp`, that occurs at location `pattern_id`.  (One
-         * should be able to recover the span info from the
-         * `pattern_id` and the ast_map, I think.)
-         */
         debug!("add_variant_match(lp={}, pattern_id={})",
                lp.repr(tcx), pattern_id);
 
@@ -507,18 +487,15 @@ impl<'tcx> MoveData<'tcx> {
         fragments::fixup_fragment_sets(self, tcx)
     }
 
+    /// Adds the gen/kills for the various moves and
+    /// assignments into the provided data flow contexts.
+    /// Moves are generated by moves and killed by assignments and
+    /// scoping. Assignments are generated by assignment to variables and
+    /// killed by scoping. See `doc.rs` for more details.
     fn add_gen_kills(&self,
                      tcx: &ty::ctxt<'tcx>,
                      dfcx_moves: &mut MoveDataFlow,
                      dfcx_assign: &mut AssignDataFlow) {
-        /*!
-         * Adds the gen/kills for the various moves and
-         * assignments into the provided data flow contexts.
-         * Moves are generated by moves and killed by assignments and
-         * scoping. Assignments are generated by assignment to variables and
-         * killed by scoping. See `doc.rs` for more details.
-         */
-
         for (i, the_move) in self.moves.borrow().iter().enumerate() {
             dfcx_moves.add_gen(the_move.id, i);
         }
@@ -695,18 +672,14 @@ impl<'a, 'tcx> FlowedMoveData<'a, 'tcx> {
         ret
     }
 
+    /// Iterates through each move of `loan_path` (or some base path of `loan_path`) that *may*
+    /// have occurred on entry to `id` without an intervening assignment. In other words, any moves
+    /// that would invalidate a reference to `loan_path` at location `id`.
     pub fn each_move_of(&self,
                         id: ast::NodeId,
                         loan_path: &Rc<LoanPath<'tcx>>,
                         f: |&Move, &LoanPath<'tcx>| -> bool)
                         -> bool {
-        /*!
-         * Iterates through each move of `loan_path` (or some base path
-         * of `loan_path`) that *may* have occurred on entry to `id` without
-         * an intervening assignment. In other words, any moves that
-         * would invalidate a reference to `loan_path` at location `id`.
-         */
-
         // Bad scenarios:
         //
         // 1. Move of `a.b.c`, use of `a.b.c`
@@ -755,17 +728,13 @@ impl<'a, 'tcx> FlowedMoveData<'a, 'tcx> {
         })
     }
 
+    /// Iterates through every assignment to `loan_path` that may have occurred on entry to `id`.
+    /// `loan_path` must be a single variable.
     pub fn each_assignment_of(&self,
                               id: ast::NodeId,
                               loan_path: &Rc<LoanPath<'tcx>>,
                               f: |&Assignment| -> bool)
                               -> bool {
-        /*!
-         * Iterates through every assignment to `loan_path` that
-         * may have occurred on entry to `id`. `loan_path` must be
-         * a single variable.
-         */
-
         let loan_path_index = {
             match self.move_data.existing_move_path(loan_path) {
                 Some(i) => i,
diff --git a/src/librustc/middle/cfg/mod.rs b/src/librustc/middle/cfg/mod.rs
index bb758ec7c38b7..a2e8ba8d65c33 100644
--- a/src/librustc/middle/cfg/mod.rs
+++ b/src/librustc/middle/cfg/mod.rs
@@ -8,12 +8,8 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-/*!
-
-Module that constructs a control-flow graph representing an item.
-Uses `Graph` as the underlying representation.
-
-*/
+//! Module that constructs a control-flow graph representing an item.
+//! Uses `Graph` as the underlying representation.
 
 use middle::graph;
 use middle::ty;
diff --git a/src/librustc/middle/dataflow.rs b/src/librustc/middle/dataflow.rs
index 141504cb6f7d5..53fea8ffc86c6 100644
--- a/src/librustc/middle/dataflow.rs
+++ b/src/librustc/middle/dataflow.rs
@@ -9,12 +9,10 @@
 // except according to those terms.
 
 
-/*!
- * A module for propagating forward dataflow information. The analysis
- * assumes that the items to be propagated can be represented as bits
- * and thus uses bitvectors. Your job is simply to specify the so-called
- * GEN and KILL bits for each expression.
- */
+//! A module for propagating forward dataflow information. The analysis
+//! assumes that the items to be propagated can be represented as bits
+//! and thus uses bitvectors. Your job is simply to specify the so-called
+//! GEN and KILL bits for each expression.
 
 pub use self::EntryOrExit::*;
 
diff --git a/src/librustc/middle/expr_use_visitor.rs b/src/librustc/middle/expr_use_visitor.rs
index 656feb51a1d3c..9bb5a6f9a2447 100644
--- a/src/librustc/middle/expr_use_visitor.rs
+++ b/src/librustc/middle/expr_use_visitor.rs
@@ -8,11 +8,9 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-/*!
- * A different sort of visitor for walking fn bodies.  Unlike the
- * normal visitor, which just walks the entire body in one shot, the
- * `ExprUseVisitor` determines how expressions are being used.
- */
+//! A different sort of visitor for walking fn bodies.  Unlike the
+//! normal visitor, which just walks the entire body in one shot, the
+//! `ExprUseVisitor` determines how expressions are being used.
 
 pub use self::MutateMode::*;
 pub use self::LoanCause::*;
@@ -716,12 +714,9 @@ impl<'d,'t,'tcx,TYPER:mc::Typer<'tcx>> ExprUseVisitor<'d,'t,'tcx,TYPER> {
         }
     }
 
+    /// Indicates that the value of `blk` will be consumed, meaning either copied or moved
+    /// depending on its type.
     fn walk_block(&mut self, blk: &ast::Block) {
-        /*!
-         * Indicates that the value of `blk` will be consumed,
-         * meaning either copied or moved depending on its type.
-         */
-
         debug!("walk_block(blk.id={})", blk.id);
 
         for stmt in blk.stmts.iter() {
@@ -821,16 +816,12 @@ impl<'d,'t,'tcx,TYPER:mc::Typer<'tcx>> ExprUseVisitor<'d,'t,'tcx,TYPER> {
         }
     }
 
+    /// Autoderefs for overloaded Deref calls in fact reference their receiver. That is, if we have
+    /// `(*x)` where `x` is of type `Rc<T>`, then this in fact is equivalent to `x.deref()`. Since
+    /// `deref()` is declared with `&self`, this is an autoref of `x`.
     fn walk_autoderefs(&mut self,
                        expr: &ast::Expr,
                        autoderefs: uint) {
-        /*!
-         * Autoderefs for overloaded Deref calls in fact reference
-         * their receiver. That is, if we have `(*x)` where `x` is of
-         * type `Rc<T>`, then this in fact is equivalent to
-         * `x.deref()`. Since `deref()` is declared with `&self`, this
-         * is an autoref of `x`.
-         */
         debug!("walk_autoderefs expr={} autoderefs={}", expr.repr(self.tcx()), autoderefs);
 
         for i in range(0, autoderefs) {
diff --git a/src/librustc/middle/fast_reject.rs b/src/librustc/middle/fast_reject.rs
index 7514a63c7fa58..da467c3d0d555 100644
--- a/src/librustc/middle/fast_reject.rs
+++ b/src/librustc/middle/fast_reject.rs
@@ -33,26 +33,20 @@ pub enum SimplifiedType {
     ParameterSimplifiedType,
 }
 
+/// Tries to simplify a type by dropping type parameters, deref'ing away any reference types, etc.
+/// The idea is to get something simple that we can use to quickly decide if two types could unify
+/// during method lookup.
+///
+/// If `can_simplify_params` is false, then we will fail to simplify type parameters entirely. This
+/// is useful when those type parameters would be instantiated with fresh type variables, since
+/// then we can't say much about whether two types would unify. Put another way,
+/// `can_simplify_params` should be true if type parameters appear free in `ty` and `false` if they
+/// are to be considered bound.
 pub fn simplify_type(tcx: &ty::ctxt,
                      ty: Ty,
                      can_simplify_params: bool)
                      -> Option<SimplifiedType>
 {
-    /*!
-     * Tries to simplify a type by dropping type parameters, deref'ing
-     * away any reference types, etc. The idea is to get something
-     * simple that we can use to quickly decide if two types could
-     * unify during method lookup.
-     *
-     * If `can_simplify_params` is false, then we will fail to
-     * simplify type parameters entirely. This is useful when those
-     * type parameters would be instantiated with fresh type
-     * variables, since then we can't say much about whether two types
-     * would unify. Put another way, `can_simplify_params` should be
-     * true if type parameters appear free in `ty` and `false` if they
-     * are to be considered bound.
-     */
-
     match ty.sty {
         ty::ty_bool => Some(BoolSimplifiedType),
         ty::ty_char => Some(CharSimplifiedType),
diff --git a/src/librustc/middle/graph.rs b/src/librustc/middle/graph.rs
index ac132477b8772..2f50a96402302 100644
--- a/src/librustc/middle/graph.rs
+++ b/src/librustc/middle/graph.rs
@@ -8,31 +8,27 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-/*!
-
-A graph module for use in dataflow, region resolution, and elsewhere.
-
-# Interface details
-
-You customize the graph by specifying a "node data" type `N` and an
-"edge data" type `E`. You can then later gain access (mutable or
-immutable) to these "user-data" bits. Currently, you can only add
-nodes or edges to the graph. You cannot remove or modify them once
-added. This could be changed if we have a need.
-
-# Implementation details
-
-The main tricky thing about this code is the way that edges are
-stored. The edges are stored in a central array, but they are also
-threaded onto two linked lists for each node, one for incoming edges
-and one for outgoing edges. Note that every edge is a member of some
-incoming list and some outgoing list.  Basically you can load the
-first index of the linked list from the node data structures (the
-field `first_edge`) and then, for each edge, load the next index from
-the field `next_edge`). Each of those fields is an array that should
-be indexed by the direction (see the type `Direction`).
-
-*/
+//! A graph module for use in dataflow, region resolution, and elsewhere.
+//!
+//! # Interface details
+//!
+//! You customize the graph by specifying a "node data" type `N` and an
+//! "edge data" type `E`. You can then later gain access (mutable or
+//! immutable) to these "user-data" bits. Currently, you can only add
+//! nodes or edges to the graph. You cannot remove or modify them once
+//! added. This could be changed if we have a need.
+//!
+//! # Implementation details
+//!
+//! The main tricky thing about this code is the way that edges are
+//! stored. The edges are stored in a central array, but they are also
+//! threaded onto two linked lists for each node, one for incoming edges
+//! and one for outgoing edges. Note that every edge is a member of some
+//! incoming list and some outgoing list.  Basically you can load the
+//! first index of the linked list from the node data structures (the
+//! field `first_edge`) and then, for each edge, load the next index from
+//! the field `next_edge`). Each of those fields is an array that should
+//! be indexed by the direction (see the type `Direction`).
 
 #![allow(dead_code)] // still WIP
 
diff --git a/src/librustc/middle/liveness.rs b/src/librustc/middle/liveness.rs
index 15d9e87a9d5a3..a09ceac11a53d 100644
--- a/src/librustc/middle/liveness.rs
+++ b/src/librustc/middle/liveness.rs
@@ -8,105 +8,103 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-/*!
- * A classic liveness analysis based on dataflow over the AST.  Computes,
- * for each local variable in a function, whether that variable is live
- * at a given point.  Program execution points are identified by their
- * id.
- *
- * # Basic idea
- *
- * The basic model is that each local variable is assigned an index.  We
- * represent sets of local variables using a vector indexed by this
- * index.  The value in the vector is either 0, indicating the variable
- * is dead, or the id of an expression that uses the variable.
- *
- * We conceptually walk over the AST in reverse execution order.  If we
- * find a use of a variable, we add it to the set of live variables.  If
- * we find an assignment to a variable, we remove it from the set of live
- * variables.  When we have to merge two flows, we take the union of
- * those two flows---if the variable is live on both paths, we simply
- * pick one id.  In the event of loops, we continue doing this until a
- * fixed point is reached.
- *
- * ## Checking initialization
- *
- * At the function entry point, all variables must be dead.  If this is
- * not the case, we can report an error using the id found in the set of
- * live variables, which identifies a use of the variable which is not
- * dominated by an assignment.
- *
- * ## Checking moves
- *
- * After each explicit move, the variable must be dead.
- *
- * ## Computing last uses
- *
- * Any use of the variable where the variable is dead afterwards is a
- * last use.
- *
- * # Implementation details
- *
- * The actual implementation contains two (nested) walks over the AST.
- * The outer walk has the job of building up the ir_maps instance for the
- * enclosing function.  On the way down the tree, it identifies those AST
- * nodes and variable IDs that will be needed for the liveness analysis
- * and assigns them contiguous IDs.  The liveness id for an AST node is
- * called a `live_node` (it's a newtype'd uint) and the id for a variable
- * is called a `variable` (another newtype'd uint).
- *
- * On the way back up the tree, as we are about to exit from a function
- * declaration we allocate a `liveness` instance.  Now that we know
- * precisely how many nodes and variables we need, we can allocate all
- * the various arrays that we will need to precisely the right size.  We then
- * perform the actual propagation on the `liveness` instance.
- *
- * This propagation is encoded in the various `propagate_through_*()`
- * methods.  It effectively does a reverse walk of the AST; whenever we
- * reach a loop node, we iterate until a fixed point is reached.
- *
- * ## The `Users` struct
- *
- * At each live node `N`, we track three pieces of information for each
- * variable `V` (these are encapsulated in the `Users` struct):
- *
- * - `reader`: the `LiveNode` ID of some node which will read the value
- *    that `V` holds on entry to `N`.  Formally: a node `M` such
- *    that there exists a path `P` from `N` to `M` where `P` does not
- *    write `V`.  If the `reader` is `invalid_node()`, then the current
- *    value will never be read (the variable is dead, essentially).
- *
- * - `writer`: the `LiveNode` ID of some node which will write the
- *    variable `V` and which is reachable from `N`.  Formally: a node `M`
- *    such that there exists a path `P` from `N` to `M` and `M` writes
- *    `V`.  If the `writer` is `invalid_node()`, then there is no writer
- *    of `V` that follows `N`.
- *
- * - `used`: a boolean value indicating whether `V` is *used*.  We
- *   distinguish a *read* from a *use* in that a *use* is some read that
- *   is not just used to generate a new value.  For example, `x += 1` is
- *   a read but not a use.  This is used to generate better warnings.
- *
- * ## Special Variables
- *
- * We generate various special variables for various, well, special purposes.
- * These are described in the `specials` struct:
- *
- * - `exit_ln`: a live node that is generated to represent every 'exit' from
- *   the function, whether it be by explicit return, panic, or other means.
- *
- * - `fallthrough_ln`: a live node that represents a fallthrough
- *
- * - `no_ret_var`: a synthetic variable that is only 'read' from, the
- *   fallthrough node.  This allows us to detect functions where we fail
- *   to return explicitly.
- * - `clean_exit_var`: a synthetic variable that is only 'read' from the
- *   fallthrough node.  It is only live if the function could converge
- *   via means other than an explicit `return` expression. That is, it is
- *   only dead if the end of the function's block can never be reached.
- *   It is the responsibility of typeck to ensure that there are no
- *   `return` expressions in a function declared as diverging.
- */
+//! A classic liveness analysis based on dataflow over the AST.  Computes,
+//! for each local variable in a function, whether that variable is live
+//! at a given point.  Program execution points are identified by their
+//! id.
+//!
+//! # Basic idea
+//!
+//! The basic model is that each local variable is assigned an index.  We
+//! represent sets of local variables using a vector indexed by this
+//! index.  The value in the vector is either 0, indicating the variable
+//! is dead, or the id of an expression that uses the variable.
+//!
+//! We conceptually walk over the AST in reverse execution order.  If we
+//! find a use of a variable, we add it to the set of live variables.  If
+//! we find an assignment to a variable, we remove it from the set of live
+//! variables.  When we have to merge two flows, we take the union of
+//! those two flows---if the variable is live on both paths, we simply
+//! pick one id.  In the event of loops, we continue doing this until a
+//! fixed point is reached.
+//!
+//! ## Checking initialization
+//!
+//! At the function entry point, all variables must be dead.  If this is
+//! not the case, we can report an error using the id found in the set of
+//! live variables, which identifies a use of the variable which is not
+//! dominated by an assignment.
+//!
+//! ## Checking moves
+//!
+//! After each explicit move, the variable must be dead.
+//!
+//! ## Computing last uses
+//!
+//! Any use of the variable where the variable is dead afterwards is a
+//! last use.
+//!
+//! # Implementation details
+//!
+//! The actual implementation contains two (nested) walks over the AST.
+//! The outer walk has the job of building up the ir_maps instance for the
+//! enclosing function.  On the way down the tree, it identifies those AST
+//! nodes and variable IDs that will be needed for the liveness analysis
+//! and assigns them contiguous IDs.  The liveness id for an AST node is
+//! called a `live_node` (it's a newtype'd uint) and the id for a variable
+//! is called a `variable` (another newtype'd uint).
+//!
+//! On the way back up the tree, as we are about to exit from a function
+//! declaration we allocate a `liveness` instance.  Now that we know
+//! precisely how many nodes and variables we need, we can allocate all
+//! the various arrays that we will need to precisely the right size.  We then
+//! perform the actual propagation on the `liveness` instance.
+//!
+//! This propagation is encoded in the various `propagate_through_*()`
+//! methods.  It effectively does a reverse walk of the AST; whenever we
+//! reach a loop node, we iterate until a fixed point is reached.
+//!
+//! ## The `Users` struct
+//!
+//! At each live node `N`, we track three pieces of information for each
+//! variable `V` (these are encapsulated in the `Users` struct):
+//!
+//! - `reader`: the `LiveNode` ID of some node which will read the value
+//!    that `V` holds on entry to `N`.  Formally: a node `M` such
+//!    that there exists a path `P` from `N` to `M` where `P` does not
+//!    write `V`.  If the `reader` is `invalid_node()`, then the current
+//!    value will never be read (the variable is dead, essentially).
+//!
+//! - `writer`: the `LiveNode` ID of some node which will write the
+//!    variable `V` and which is reachable from `N`.  Formally: a node `M`
+//!    such that there exists a path `P` from `N` to `M` and `M` writes
+//!    `V`.  If the `writer` is `invalid_node()`, then there is no writer
+//!    of `V` that follows `N`.
+//!
+//! - `used`: a boolean value indicating whether `V` is *used*.  We
+//!   distinguish a *read* from a *use* in that a *use* is some read that
+//!   is not just used to generate a new value.  For example, `x += 1` is
+//!   a read but not a use.  This is used to generate better warnings.
+//!
+//! ## Special Variables
+//!
+//! We generate various special variables for various, well, special purposes.
+//! These are described in the `specials` struct:
+//!
+//! - `exit_ln`: a live node that is generated to represent every 'exit' from
+//!   the function, whether it be by explicit return, panic, or other means.
+//!
+//! - `fallthrough_ln`: a live node that represents a fallthrough
+//!
+//! - `no_ret_var`: a synthetic variable that is only 'read' from, the
+//!   fallthrough node.  This allows us to detect functions where we fail
+//!   to return explicitly.
+//! - `clean_exit_var`: a synthetic variable that is only 'read' from the
+//!   fallthrough node.  It is only live if the function could converge
+//!   via means other than an explicit `return` expression. That is, it is
+//!   only dead if the end of the function's block can never be reached.
+//!   It is the responsibility of typeck to ensure that there are no
+//!   `return` expressions in a function declared as diverging.
 use self::LoopKind::*;
 use self::LiveNodeKind::*;
 use self::VarKind::*;
diff --git a/src/librustc/middle/mem_categorization.rs b/src/librustc/middle/mem_categorization.rs
index 046ab162cfcb0..53a5ac7a09342 100644
--- a/src/librustc/middle/mem_categorization.rs
+++ b/src/librustc/middle/mem_categorization.rs
@@ -8,57 +8,55 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-/*!
- * # Categorization
- *
- * The job of the categorization module is to analyze an expression to
- * determine what kind of memory is used in evaluating it (for example,
- * where dereferences occur and what kind of pointer is dereferenced;
- * whether the memory is mutable; etc)
- *
- * Categorization effectively transforms all of our expressions into
- * expressions of the following forms (the actual enum has many more
- * possibilities, naturally, but they are all variants of these base
- * forms):
- *
- *     E = rvalue    // some computed rvalue
- *       | x         // address of a local variable or argument
- *       | *E        // deref of a ptr
- *       | E.comp    // access to an interior component
- *
- * Imagine a routine ToAddr(Expr) that evaluates an expression and returns an
- * address where the result is to be found.  If Expr is an lvalue, then this
- * is the address of the lvalue.  If Expr is an rvalue, this is the address of
- * some temporary spot in memory where the result is stored.
- *
- * Now, cat_expr() classifies the expression Expr and the address A=ToAddr(Expr)
- * as follows:
- *
- * - cat: what kind of expression was this?  This is a subset of the
- *   full expression forms which only includes those that we care about
- *   for the purpose of the analysis.
- * - mutbl: mutability of the address A
- * - ty: the type of data found at the address A
- *
- * The resulting categorization tree differs somewhat from the expressions
- * themselves.  For example, auto-derefs are explicit.  Also, an index a[b] is
- * decomposed into two operations: a dereference to reach the array data and
- * then an index to jump forward to the relevant item.
- *
- * ## By-reference upvars
- *
- * One part of the translation which may be non-obvious is that we translate
- * closure upvars into the dereference of a borrowed pointer; this more closely
- * resembles the runtime translation. So, for example, if we had:
- *
- *     let mut x = 3;
- *     let y = 5;
- *     let inc = || x += y;
- *
- * Then when we categorize `x` (*within* the closure) we would yield a
- * result of `*x'`, effectively, where `x'` is a `cat_upvar` reference
- * tied to `x`. The type of `x'` will be a borrowed pointer.
- */
+//! # Categorization
+//!
+//! The job of the categorization module is to analyze an expression to
+//! determine what kind of memory is used in evaluating it (for example,
+//! where dereferences occur and what kind of pointer is dereferenced;
+//! whether the memory is mutable; etc)
+//!
+//! Categorization effectively transforms all of our expressions into
+//! expressions of the following forms (the actual enum has many more
+//! possibilities, naturally, but they are all variants of these base
+//! forms):
+//!
+//!     E = rvalue    // some computed rvalue
+//!       | x         // address of a local variable or argument
+//!       | *E        // deref of a ptr
+//!       | E.comp    // access to an interior component
+//!
+//! Imagine a routine ToAddr(Expr) that evaluates an expression and returns an
+//! address where the result is to be found.  If Expr is an lvalue, then this
+//! is the address of the lvalue.  If Expr is an rvalue, this is the address of
+//! some temporary spot in memory where the result is stored.
+//!
+//! Now, cat_expr() classifies the expression Expr and the address A=ToAddr(Expr)
+//! as follows:
+//!
+//! - cat: what kind of expression was this?  This is a subset of the
+//!   full expression forms which only includes those that we care about
+//!   for the purpose of the analysis.
+//! - mutbl: mutability of the address A
+//! - ty: the type of data found at the address A
+//!
+//! The resulting categorization tree differs somewhat from the expressions
+//! themselves.  For example, auto-derefs are explicit.  Also, an index a[b] is
+//! decomposed into two operations: a dereference to reach the array data and
+//! then an index to jump forward to the relevant item.
+//!
+//! ## By-reference upvars
+//!
+//! One part of the translation which may be non-obvious is that we translate
+//! closure upvars into the dereference of a borrowed pointer; this more closely
+//! resembles the runtime translation. So, for example, if we had:
+//!
+//!     let mut x = 3;
+//!     let y = 5;
+//!     let inc = || x += y;
+//!
+//! Then when we categorize `x` (*within* the closure) we would yield a
+//! result of `*x'`, effectively, where `x'` is a `cat_upvar` reference
+//! tied to `x`. The type of `x'` will be a borrowed pointer.
 
 #![allow(non_camel_case_types)]
 
@@ -1058,20 +1056,17 @@ impl<'t,'tcx,TYPER:Typer<'tcx>> MemCategorizationContext<'t,TYPER> {
         }
     }
 
+    /// Given a pattern P like: `[_, ..Q, _]`, where `vec_cmt` is the cmt for `P`, `slice_pat` is
+    /// the pattern `Q`, returns:
+    ///
+    /// * a cmt for `Q`
+    /// * the mutability and region of the slice `Q`
+    ///
+    /// These last two bits of info happen to be things that borrowck needs.
     pub fn cat_slice_pattern(&self,
                              vec_cmt: cmt<'tcx>,
                              slice_pat: &ast::Pat)
                              -> McResult<(cmt<'tcx>, ast::Mutability, ty::Region)> {
-        /*!
-         * Given a pattern P like: `[_, ..Q, _]`, where `vec_cmt` is
-         * the cmt for `P`, `slice_pat` is the pattern `Q`, returns:
-         * - a cmt for `Q`
-         * - the mutability and region of the slice `Q`
-         *
-         * These last two bits of info happen to be things that
-         * borrowck needs.
-         */
-
         let slice_ty = if_ok!(self.node_ty(slice_pat.id));
         let (slice_mutbl, slice_r) = vec_slice_info(self.tcx(),
                                                     slice_pat,
@@ -1079,17 +1074,13 @@ impl<'t,'tcx,TYPER:Typer<'tcx>> MemCategorizationContext<'t,TYPER> {
         let cmt_slice = self.cat_index(slice_pat, self.deref_vec(slice_pat, vec_cmt));
         return Ok((cmt_slice, slice_mutbl, slice_r));
 
+        /// In a pattern like [a, b, ..c], normally `c` has slice type, but if you have [a, b,
+        /// ..ref c], then the type of `ref c` will be `&&[]`, so to extract the slice details we
+        /// have to recurse through rptrs.
         fn vec_slice_info(tcx: &ty::ctxt,
                           pat: &ast::Pat,
                           slice_ty: Ty)
                           -> (ast::Mutability, ty::Region) {
-            /*!
-             * In a pattern like [a, b, ..c], normally `c` has slice type,
-             * but if you have [a, b, ..ref c], then the type of `ref c`
-             * will be `&&[]`, so to extract the slice details we have
-             * to recurse through rptrs.
-             */
-
             match slice_ty.sty {
                 ty::ty_rptr(r, ref mt) => match mt.ty.sty {
                     ty::ty_vec(_, None) => (mt.mutbl, r),
@@ -1428,13 +1419,9 @@ impl<'tcx> cmt_<'tcx> {
         }
     }
 
+    /// Returns `Some(_)` if this lvalue represents a freely aliasable pointer type.
     pub fn freely_aliasable(&self, ctxt: &ty::ctxt<'tcx>)
                             -> Option<AliasableReason> {
-        /*!
-         * Returns `Some(_)` if this lvalue represents a freely aliasable
-         * pointer type.
-         */
-
         // Maybe non-obvious: copied upvars can only be considered
         // non-aliasable in once closures, since any other kind can be
         // aliased and eventually recused.
diff --git a/src/librustc/middle/region.rs b/src/librustc/middle/region.rs
index c5511f995bc68..20be98ca977d1 100644
--- a/src/librustc/middle/region.rs
+++ b/src/librustc/middle/region.rs
@@ -8,18 +8,13 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-/*!
-
-This file actually contains two passes related to regions.  The first
-pass builds up the `scope_map`, which describes the parent links in
-the region hierarchy.  The second pass infers which types must be
-region parameterized.
-
-Most of the documentation on regions can be found in
-`middle/typeck/infer/region_inference.rs`
-
-*/
-
+//! This file actually contains two passes related to regions.  The first
+//! pass builds up the `scope_map`, which describes the parent links in
+//! the region hierarchy.  The second pass infers which types must be
+//! region parameterized.
+//!
+//! Most of the documentation on regions can be found in
+//! `middle/typeck/infer/region_inference.rs`
 
 use session::Session;
 use middle::ty::{mod, Ty, FreeRegion};
@@ -171,14 +166,10 @@ impl RegionMaps {
         self.rvalue_scopes.borrow_mut().insert(var, lifetime);
     }
 
+    /// Records that a scope is a TERMINATING SCOPE. Whenever we create automatic temporaries --
+    /// e.g. by an expression like `a().f` -- they will be freed within the innermost terminating
+    /// scope.
     pub fn mark_as_terminating_scope(&self, scope_id: CodeExtent) {
-        /*!
-         * Records that a scope is a TERMINATING SCOPE. Whenever we
-         * create automatic temporaries -- e.g. by an
-         * expression like `a().f` -- they will be freed within
-         * the innermost terminating scope.
-         */
-
         debug!("record_terminating_scope(scope_id={})", scope_id);
         self.terminating_scopes.borrow_mut().insert(scope_id);
     }
@@ -197,10 +188,8 @@ impl RegionMaps {
         }
     }
 
+    /// Returns the lifetime of the local variable `var_id`
     pub fn var_scope(&self, var_id: ast::NodeId) -> CodeExtent {
-        /*!
-         * Returns the lifetime of the local variable `var_id`
-         */
         match self.var_map.borrow().get(&var_id) {
             Some(&r) => r,
             None => { panic!("no enclosing scope for id {}", var_id); }
@@ -257,15 +246,12 @@ impl RegionMaps {
         self.is_subscope_of(scope2, scope1)
     }
 
+    /// Returns true if `subscope` is equal to or is lexically nested inside `superscope` and false
+    /// otherwise.
     pub fn is_subscope_of(&self,
                           subscope: CodeExtent,
                           superscope: CodeExtent)
                           -> bool {
-        /*!
-         * Returns true if `subscope` is equal to or is lexically
-         * nested inside `superscope` and false otherwise.
-         */
-
         let mut s = subscope;
         while superscope != s {
             match self.scope_map.borrow().get(&s) {
@@ -285,27 +271,20 @@ impl RegionMaps {
         return true;
     }
 
+    /// Determines whether two free regions have a subregion relationship
+    /// by walking the graph encoded in `free_region_map`.  Note that
+    /// it is possible that `sub != sup` and `sub <= sup` and `sup <= sub`
+    /// (that is, the user can give two different names to the same lifetime).
     pub fn sub_free_region(&self, sub: FreeRegion, sup: FreeRegion) -> bool {
-        /*!
-         * Determines whether two free regions have a subregion relationship
-         * by walking the graph encoded in `free_region_map`.  Note that
-         * it is possible that `sub != sup` and `sub <= sup` and `sup <= sub`
-         * (that is, the user can give two different names to the same lifetime).
-         */
-
         can_reach(&*self.free_region_map.borrow(), sub, sup)
     }
 
+    /// Determines whether one region is a subregion of another.  This is intended to run *after
+    /// inference* and sadly the logic is somewhat duplicated with the code in infer.rs.
     pub fn is_subregion_of(&self,
                            sub_region: ty::Region,
                            super_region: ty::Region)
                            -> bool {
-        /*!
-         * Determines whether one region is a subregion of another.  This is
-         * intended to run *after inference* and sadly the logic is somewhat
-         * duplicated with the code in infer.rs.
-         */
-
         debug!("is_subregion_of(sub_region={}, super_region={})",
                sub_region, super_region);
 
@@ -345,16 +324,12 @@ impl RegionMaps {
         }
     }
 
+    /// Finds the nearest common ancestor (if any) of two scopes.  That is, finds the smallest
+    /// scope which is greater than or equal to both `scope_a` and `scope_b`.
     pub fn nearest_common_ancestor(&self,
                                    scope_a: CodeExtent,
                                    scope_b: CodeExtent)
                                    -> Option<CodeExtent> {
-        /*!
-         * Finds the nearest common ancestor (if any) of two scopes.  That
-         * is, finds the smallest scope which is greater than or equal to
-         * both `scope_a` and `scope_b`.
-         */
-
         if scope_a == scope_b { return Some(scope_a); }
 
         let a_ancestors = ancestors_of(self, scope_a);
@@ -681,18 +656,15 @@ fn resolve_local(visitor: &mut RegionResolutionVisitor, local: &ast::Local) {
 
     visit::walk_local(visitor, local);
 
+    /// True if `pat` match the `P&` nonterminal:
+    ///
+    ///     P& = ref X
+    ///        | StructName { ..., P&, ... }
+    ///        | VariantName(..., P&, ...)
+    ///        | [ ..., P&, ... ]
+    ///        | ( ..., P&, ... )
+    ///        | box P&
     fn is_binding_pat(pat: &ast::Pat) -> bool {
-        /*!
-         * True if `pat` match the `P&` nonterminal:
-         *
-         *     P& = ref X
-         *        | StructName { ..., P&, ... }
-         *        | VariantName(..., P&, ...)
-         *        | [ ..., P&, ... ]
-         *        | ( ..., P&, ... )
-         *        | box P&
-         */
-
         match pat.node {
             ast::PatIdent(ast::BindByRef(_), _, _) => true,
 
@@ -719,35 +691,27 @@ fn resolve_local(visitor: &mut RegionResolutionVisitor, local: &ast::Local) {
         }
     }
 
+    /// True if `ty` is a borrowed pointer type like `&int` or `&[...]`.
     fn is_borrowed_ty(ty: &ast::Ty) -> bool {
-        /*!
-         * True if `ty` is a borrowed pointer type
-         * like `&int` or `&[...]`.
-         */
-
         match ty.node {
             ast::TyRptr(..) => true,
             _ => false
         }
     }
 
+    /// If `expr` matches the `E&` grammar, then records an extended rvalue scope as appropriate:
+    ///
+    ///     E& = & ET
+    ///        | StructName { ..., f: E&, ... }
+    ///        | [ ..., E&, ... ]
+    ///        | ( ..., E&, ... )
+    ///        | {...; E&}
+    ///        | box E&
+    ///        | E& as ...
+    ///        | ( E& )
     fn record_rvalue_scope_if_borrow_expr(visitor: &mut RegionResolutionVisitor,
                                           expr: &ast::Expr,
                                           blk_id: CodeExtent) {
-        /*!
-         * If `expr` matches the `E&` grammar, then records an extended
-         * rvalue scope as appropriate:
-         *
-         *     E& = & ET
-         *        | StructName { ..., f: E&, ... }
-         *        | [ ..., E&, ... ]
-         *        | ( ..., E&, ... )
-         *        | {...; E&}
-         *        | box E&
-         *        | E& as ...
-         *        | ( E& )
-         */
-
         match expr.node {
             ast::ExprAddrOf(_, ref subexpr) => {
                 record_rvalue_scope_if_borrow_expr(visitor, &**subexpr, blk_id);
@@ -787,29 +751,24 @@ fn resolve_local(visitor: &mut RegionResolutionVisitor, local: &ast::Local) {
         }
     }
 
+    /// Applied to an expression `expr` if `expr` -- or something owned or partially owned by
+    /// `expr` -- is going to be indirectly referenced by a variable in a let statement. In that
+    /// case, the "temporary lifetime" or `expr` is extended to be the block enclosing the `let`
+    /// statement.
+    ///
+    /// More formally, if `expr` matches the grammar `ET`, record the rvalue scope of the matching
+    /// `<rvalue>` as `blk_id`:
+    ///
+    ///     ET = *ET
+    ///        | ET[...]
+    ///        | ET.f
+    ///        | (ET)
+    ///        | <rvalue>
+    ///
+    /// Note: ET is intended to match "rvalues or lvalues based on rvalues".
     fn record_rvalue_scope<'a>(visitor: &mut RegionResolutionVisitor,
                                expr: &'a ast::Expr,
                                blk_scope: CodeExtent) {
-        /*!
-         * Applied to an expression `expr` if `expr` -- or something
-         * owned or partially owned by `expr` -- is going to be
-         * indirectly referenced by a variable in a let statement. In
-         * that case, the "temporary lifetime" or `expr` is extended
-         * to be the block enclosing the `let` statement.
-         *
-         * More formally, if `expr` matches the grammar `ET`, record
-         * the rvalue scope of the matching `<rvalue>` as `blk_id`:
-         *
-         *     ET = *ET
-         *        | ET[...]
-         *        | ET.f
-         *        | (ET)
-         *        | <rvalue>
-         *
-         * Note: ET is intended to match "rvalues or
-         * lvalues based on rvalues".
-         */
-
         let mut expr = expr;
         loop {
             // Note: give all the expressions matching `ET` with the
diff --git a/src/librustc/middle/resolve_lifetime.rs b/src/librustc/middle/resolve_lifetime.rs
index fae64ff924274..9c32410ecbfaf 100644
--- a/src/librustc/middle/resolve_lifetime.rs
+++ b/src/librustc/middle/resolve_lifetime.rs
@@ -8,14 +8,12 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-/*!
- * Name resolution for lifetimes.
- *
- * Name resolution for lifetimes follows MUCH simpler rules than the
- * full resolve. For example, lifetime names are never exported or
- * used between functions, and they operate in a purely top-down
- * way. Therefore we break lifetime name resolution into a separate pass.
- */
+//! Name resolution for lifetimes.
+//!
+//! Name resolution for lifetimes follows MUCH simpler rules than the
+//! full resolve. For example, lifetime names are never exported or
+//! used between functions, and they operate in a purely top-down
+//! way. Therefore we break lifetime name resolution into a separate pass.
 
 pub use self::DefRegion::*;
 use self::ScopeChain::*;
@@ -254,34 +252,27 @@ impl<'a> LifetimeContext<'a> {
     }
 
     /// Visits self by adding a scope and handling recursive walk over the contents with `walk`.
+    ///
+    /// Handles visiting fns and methods. These are a bit complicated because we must distinguish
+    /// early- vs late-bound lifetime parameters. We do this by checking which lifetimes appear
+    /// within type bounds; those are early bound lifetimes, and the rest are late bound.
+    ///
+    /// For example:
+    ///
+    ///    fn foo<'a,'b,'c,T:Trait<'b>>(...)
+    ///
+    /// Here `'a` and `'c` are late bound but `'b` is early bound. Note that early- and late-bound
+    /// lifetimes may be interspersed together.
+    ///
+    /// If early bound lifetimes are present, we separate them into their own list (and likewise
+    /// for late bound). They will be numbered sequentially, starting from the lowest index that is
+    /// already in scope (for a fn item, that will be 0, but for a method it might not be). Late
+    /// bound lifetimes are resolved by name and associated with a binder id (`binder_id`), so the
+    /// ordering is not important there.
     fn visit_early_late(&mut self,
                         early_space: subst::ParamSpace,
                         generics: &ast::Generics,
                         walk: |&mut LifetimeContext|) {
-        /*!
-         * Handles visiting fns and methods. These are a bit
-         * complicated because we must distinguish early- vs late-bound
-         * lifetime parameters. We do this by checking which lifetimes
-         * appear within type bounds; those are early bound lifetimes,
-         * and the rest are late bound.
-         *
-         * For example:
-         *
-         *    fn foo<'a,'b,'c,T:Trait<'b>>(...)
-         *
-         * Here `'a` and `'c` are late bound but `'b` is early
-         * bound. Note that early- and late-bound lifetimes may be
-         * interspersed together.
-         *
-         * If early bound lifetimes are present, we separate them into
-         * their own list (and likewise for late bound). They will be
-         * numbered sequentially, starting from the lowest index that
-         * is already in scope (for a fn item, that will be 0, but for
-         * a method it might not be). Late bound lifetimes are
-         * resolved by name and associated with a binder id (`binder_id`), so
-         * the ordering is not important there.
-         */
-
         let referenced_idents = early_bound_lifetime_names(generics);
 
         debug!("visit_early_late: referenced_idents={}",
@@ -479,13 +470,9 @@ pub fn early_bound_lifetimes<'a>(generics: &'a ast::Generics) -> Vec<ast::Lifeti
         .collect()
 }
 
+/// Given a set of generic declarations, returns a list of names containing all early bound
+/// lifetime names for those generics. (In fact, this list may also contain other names.)
 fn early_bound_lifetime_names(generics: &ast::Generics) -> Vec<ast::Name> {
-    /*!
-     * Given a set of generic declarations, returns a list of names
-     * containing all early bound lifetime names for those
-     * generics. (In fact, this list may also contain other names.)
-     */
-
     // Create two lists, dividing the lifetimes into early/late bound.
     // Initially, all of them are considered late, but we will move
     // things from late into early as we go if we find references to
diff --git a/src/librustc/middle/subst.rs b/src/librustc/middle/subst.rs
index b030867fc841c..365c2ed39dbc0 100644
--- a/src/librustc/middle/subst.rs
+++ b/src/librustc/middle/subst.rs
@@ -131,26 +131,18 @@ pub fn self_ty(&self) -> Option<Ty<'tcx>> {
         Substs { types: types, regions: ErasedRegions }
     }
 
+    /// Since ErasedRegions are only to be used in trans, most of the compiler can use this method
+    /// to easily access the set of region substitutions.
     pub fn regions<'a>(&'a self) -> &'a VecPerParamSpace<ty::Region> {
-        /*!
-         * Since ErasedRegions are only to be used in trans, most of
-         * the compiler can use this method to easily access the set
-         * of region substitutions.
-         */
-
         match self.regions {
             ErasedRegions => panic!("Erased regions only expected in trans"),
             NonerasedRegions(ref r) => r
         }
     }
 
+    /// Since ErasedRegions are only to be used in trans, most of the compiler can use this method
+    /// to easily access the set of region substitutions.
     pub fn mut_regions<'a>(&'a mut self) -> &'a mut VecPerParamSpace<ty::Region> {
-        /*!
-         * Since ErasedRegions are only to be used in trans, most of
-         * the compiler can use this method to easily access the set
-         * of region substitutions.
-         */
-
         match self.regions {
             ErasedRegions => panic!("Erased regions only expected in trans"),
             NonerasedRegions(ref mut r) => r
@@ -688,59 +680,49 @@ impl<'a,'tcx> SubstFolder<'a,'tcx> {
         self.shift_regions_through_binders(ty)
     }
 
+    /// It is sometimes necessary to adjust the debruijn indices during substitution. This occurs
+    /// when we are substituting a type with escaping regions into a context where we have passed
+    /// through region binders. That's quite a mouthful. Let's see an example:
+    ///
+    /// ```
+    /// type Func<A> = fn(A);
+    /// type MetaFunc = for<'a> fn(Func<&'a int>)
+    /// ```
+    ///
+    /// The type `MetaFunc`, when fully expanded, will be
+    ///
+    ///     for<'a> fn(fn(&'a int))
+    ///             ^~ ^~ ^~~
+    ///             |  |  |
+    ///             |  |  DebruijnIndex of 2
+    ///             Binders
+    ///
+    /// Here the `'a` lifetime is bound in the outer function, but appears as an argument of the
+    /// inner one. Therefore, that appearance will have a DebruijnIndex of 2, because we must skip
+    /// over the inner binder (remember that we count Debruijn indices from 1). However, in the
+    /// definition of `MetaFunc`, the binder is not visible, so the type `&'a int` will have a
+    /// debruijn index of 1. It's only during the substitution that we can see we must increase the
+    /// depth by 1 to account for the binder that we passed through.
+    ///
+    /// As a second example, consider this twist:
+    ///
+    /// ```
+    /// type FuncTuple<A> = (A,fn(A));
+    /// type MetaFuncTuple = for<'a> fn(FuncTuple<&'a int>)
+    /// ```
+    ///
+    /// Here the final type will be:
+    ///
+    ///     for<'a> fn((&'a int, fn(&'a int)))
+    ///                 ^~~         ^~~
+    ///                 |           |
+    ///          DebruijnIndex of 1 |
+    ///                      DebruijnIndex of 2
+    ///
+    /// As indicated in the diagram, here the same type `&'a int` is substituted once, but in the
+    /// first case we do not increase the Debruijn index and in the second case we do. The reason
+    /// is that only in the second case have we passed through a fn binder.
     fn shift_regions_through_binders(&self, ty: Ty<'tcx>) -> Ty<'tcx> {
-        /*!
-         * It is sometimes necessary to adjust the debruijn indices
-         * during substitution. This occurs when we are substituting a
-         * type with escaping regions into a context where we have
-         * passed through region binders. That's quite a
-         * mouthful. Let's see an example:
-         *
-         * ```
-         * type Func<A> = fn(A);
-         * type MetaFunc = for<'a> fn(Func<&'a int>)
-         * ```
-         *
-         * The type `MetaFunc`, when fully expanded, will be
-         *
-         *     for<'a> fn(fn(&'a int))
-         *             ^~ ^~ ^~~
-         *             |  |  |
-         *             |  |  DebruijnIndex of 2
-         *             Binders
-         *
-         * Here the `'a` lifetime is bound in the outer function, but
-         * appears as an argument of the inner one. Therefore, that
-         * appearance will have a DebruijnIndex of 2, because we must
-         * skip over the inner binder (remember that we count Debruijn
-         * indices from 1). However, in the definition of `MetaFunc`,
-         * the binder is not visible, so the type `&'a int` will have
-         * a debruijn index of 1. It's only during the substitution
-         * that we can see we must increase the depth by 1 to account
-         * for the binder that we passed through.
-         *
-         * As a second example, consider this twist:
-         *
-         * ```
-         * type FuncTuple<A> = (A,fn(A));
-         * type MetaFuncTuple = for<'a> fn(FuncTuple<&'a int>)
-         * ```
-         *
-         * Here the final type will be:
-         *
-         *     for<'a> fn((&'a int, fn(&'a int)))
-         *                 ^~~         ^~~
-         *                 |           |
-         *          DebruijnIndex of 1 |
-         *                      DebruijnIndex of 2
-         *
-         * As indicated in the diagram, here the same type `&'a int`
-         * is substituted once, but in the first case we do not
-         * increase the Debruijn index and in the second case we
-         * do. The reason is that only in the second case have we
-         * passed through a fn binder.
-         */
-
         debug!("shift_regions(ty={}, region_binders_passed={}, type_has_escaping_regions={})",
                ty.repr(self.tcx()), self.region_binders_passed, ty::type_has_escaping_regions(ty));
 
diff --git a/src/librustc/middle/traits/coherence.rs b/src/librustc/middle/traits/coherence.rs
index c84a2a0d11e6b..048f394224cf0 100644
--- a/src/librustc/middle/traits/coherence.rs
+++ b/src/librustc/middle/traits/coherence.rs
@@ -8,7 +8,7 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-/*! See `doc.rs` for high-level documentation */
+//! See `doc.rs` for high-level documentation
 
 use super::SelectionContext;
 use super::Obligation;
diff --git a/src/librustc/middle/traits/doc.rs b/src/librustc/middle/traits/doc.rs
index c014bc0c164f2..62246b77ee940 100644
--- a/src/librustc/middle/traits/doc.rs
+++ b/src/librustc/middle/traits/doc.rs
@@ -8,403 +8,399 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-/*!
-
-# TRAIT RESOLUTION
-
-This document describes the general process and points out some non-obvious
-things.
-
-## Major concepts
-
-Trait resolution is the process of pairing up an impl with each
-reference to a trait. So, for example, if there is a generic function like:
-
-    fn clone_slice<T:Clone>(x: &[T]) -> Vec<T> { ... }
-
-and then a call to that function:
-
-    let v: Vec<int> = clone_slice([1, 2, 3].as_slice())
-
-it is the job of trait resolution to figure out (in which case)
-whether there exists an impl of `int : Clone`
-
-Note that in some cases, like generic functions, we may not be able to
-find a specific impl, but we can figure out that the caller must
-provide an impl. To see what I mean, consider the body of `clone_slice`:
-
-    fn clone_slice<T:Clone>(x: &[T]) -> Vec<T> {
-        let mut v = Vec::new();
-        for e in x.iter() {
-            v.push((*e).clone()); // (*)
-        }
-    }
-
-The line marked `(*)` is only legal if `T` (the type of `*e`)
-implements the `Clone` trait. Naturally, since we don't know what `T`
-is, we can't find the specific impl; but based on the bound `T:Clone`,
-we can say that there exists an impl which the caller must provide.
-
-We use the term *obligation* to refer to a trait reference in need of
-an impl.
-
-## Overview
-
-Trait resolution consists of three major parts:
-
-- SELECTION: Deciding how to resolve a specific obligation. For
-  example, selection might decide that a specific obligation can be
-  resolved by employing an impl which matches the self type, or by
-  using a parameter bound. In the case of an impl, Selecting one
-  obligation can create *nested obligations* because of where clauses
-  on the impl itself. It may also require evaluating those nested
-  obligations to resolve ambiguities.
-
-- FULFILLMENT: The fulfillment code is what tracks that obligations
-  are completely fulfilled. Basically it is a worklist of obligations
-  to be selected: once selection is successful, the obligation is
-  removed from the worklist and any nested obligations are enqueued.
-
-- COHERENCE: The coherence checks are intended to ensure that there
-  are never overlapping impls, where two impls could be used with
-  equal precedence.
-
-## Selection
-
-Selection is the process of deciding whether an obligation can be
-resolved and, if so, how it is to be resolved (via impl, where clause, etc).
-The main interface is the `select()` function, which takes an obligation
-and returns a `SelectionResult`. There are three possible outcomes:
-
-- `Ok(Some(selection))` -- yes, the obligation can be resolved, and
-  `selection` indicates how. If the impl was resolved via an impl,
-  then `selection` may also indicate nested obligations that are required
-  by the impl.
-
-- `Ok(None)` -- we are not yet sure whether the obligation can be
-  resolved or not. This happens most commonly when the obligation
-  contains unbound type variables.
-
-- `Err(err)` -- the obligation definitely cannot be resolved due to a
-  type error, or because there are no impls that could possibly apply,
-  etc.
-
-The basic algorithm for selection is broken into two big phases:
-candidate assembly and confirmation.
-
-### Candidate assembly
-
-Searches for impls/where-clauses/etc that might
-possibly be used to satisfy the obligation. Each of those is called
-a candidate. To avoid ambiguity, we want to find exactly one
-candidate that is definitively applicable. In some cases, we may not
-know whether an impl/where-clause applies or not -- this occurs when
-the obligation contains unbound inference variables.
-
-The basic idea for candidate assembly is to do a first pass in which
-we identify all possible candidates. During this pass, all that we do
-is try and unify the type parameters. (In particular, we ignore any
-nested where clauses.) Presuming that this unification succeeds, the
-impl is added as a candidate.
-
-Once this first pass is done, we can examine the set of candidates. If
-it is a singleton set, then we are done: this is the only impl in
-scope that could possibly apply. Otherwise, we can winnow down the set
-of candidates by using where clauses and other conditions. If this
-reduced set yields a single, unambiguous entry, we're good to go,
-otherwise the result is considered ambiguous.
-
-#### The basic process: Inferring based on the impls we see
-
-This process is easier if we work through some examples. Consider
-the following trait:
-
-```
-trait Convert<Target> {
-    fn convert(&self) -> Target;
-}
-```
-
-This trait just has one method. It's about as simple as it gets. It
-converts from the (implicit) `Self` type to the `Target` type. If we
-wanted to permit conversion between `int` and `uint`, we might
-implement `Convert` like so:
-
-```rust
-impl Convert<uint> for int { ... } // int -> uint
-impl Convert<int> for uint { ... } // uint -> uint
-```
-
-Now imagine there is some code like the following:
-
-```rust
-let x: int = ...;
-let y = x.convert();
-```
-
-The call to convert will generate a trait reference `Convert<$Y> for
-int`, where `$Y` is the type variable representing the type of
-`y`. When we match this against the two impls we can see, we will find
-that only one remains: `Convert<uint> for int`. Therefore, we can
-select this impl, which will cause the type of `$Y` to be unified to
-`uint`. (Note that while assembling candidates, we do the initial
-unifications in a transaction, so that they don't affect one another.)
-
-There are tests to this effect in src/test/run-pass:
-
-   traits-multidispatch-infer-convert-source-and-target.rs
-   traits-multidispatch-infer-convert-target.rs
-
-#### Winnowing: Resolving ambiguities
-
-But what happens if there are multiple impls where all the types
-unify? Consider this example:
-
-```rust
-trait Get {
-    fn get(&self) -> Self;
-}
-
-impl<T:Copy> Get for T {
-    fn get(&self) -> T { *self }
-}
-
-impl<T:Get> Get for Box<T> {
-    fn get(&self) -> Box<T> { box get_it(&**self) }
-}
-```
-
-What happens when we invoke `get_it(&box 1_u16)`, for example? In this
-case, the `Self` type is `Box<u16>` -- that unifies with both impls,
-because the first applies to all types, and the second to all
-boxes. In the olden days we'd have called this ambiguous. But what we
-do now is do a second *winnowing* pass that considers where clauses
-and attempts to remove candidates -- in this case, the first impl only
-applies if `Box<u16> : Copy`, which doesn't hold. After winnowing,
-then, we are left with just one candidate, so we can proceed. There is
-a test of this in `src/test/run-pass/traits-conditional-dispatch.rs`.
-
-#### Matching
-
-The subroutines that decide whether a particular impl/where-clause/etc
-applies to a particular obligation. At the moment, this amounts to
-unifying the self types, but in the future we may also recursively
-consider some of the nested obligations, in the case of an impl.
-
-#### Lifetimes and selection
-
-Because of how that lifetime inference works, it is not possible to
-give back immediate feedback as to whether a unification or subtype
-relationship between lifetimes holds or not. Therefore, lifetime
-matching is *not* considered during selection. This is reflected in
-the fact that subregion assignment is infallible. This may yield
-lifetime constraints that will later be found to be in error (in
-contrast, the non-lifetime-constraints have already been checked
-during selection and can never cause an error, though naturally they
-may lead to other errors downstream).
-
-#### Where clauses
-
-Besides an impl, the other major way to resolve an obligation is via a
-where clause. The selection process is always given a *parameter
-environment* which contains a list of where clauses, which are
-basically obligations that can assume are satisfiable. We will iterate
-over that list and check whether our current obligation can be found
-in that list, and if so it is considered satisfied. More precisely, we
-want to check whether there is a where-clause obligation that is for
-the same trait (or some subtrait) and for which the self types match,
-using the definition of *matching* given above.
-
-Consider this simple example:
-
-     trait A1 { ... }
-     trait A2 : A1 { ... }
-
-     trait B { ... }
-
-     fn foo<X:A2+B> { ... }
-
-Clearly we can use methods offered by `A1`, `A2`, or `B` within the
-body of `foo`. In each case, that will incur an obligation like `X :
-A1` or `X : A2`. The parameter environment will contain two
-where-clauses, `X : A2` and `X : B`. For each obligation, then, we
-search this list of where-clauses.  To resolve an obligation `X:A1`,
-we would note that `X:A2` implies that `X:A1`.
-
-### Confirmation
-
-Confirmation unifies the output type parameters of the trait with the
-values found in the obligation, possibly yielding a type error.  If we
-return to our example of the `Convert` trait from the previous
-section, confirmation is where an error would be reported, because the
-impl specified that `T` would be `uint`, but the obligation reported
-`char`. Hence the result of selection would be an error.
-
-### Selection during translation
-
-During type checking, we do not store the results of trait selection.
-We simply wish to verify that trait selection will succeed. Then
-later, at trans time, when we have all concrete types available, we
-can repeat the trait selection.  In this case, we do not consider any
-where-clauses to be in scope. We know that therefore each resolution
-will resolve to a particular impl.
-
-One interesting twist has to do with nested obligations. In general, in trans,
-we only need to do a "shallow" selection for an obligation. That is, we wish to
-identify which impl applies, but we do not (yet) need to decide how to select
-any nested obligations. Nonetheless, we *do* currently do a complete resolution,
-and that is because it can sometimes inform the results of type inference. That is,
-we do not have the full substitutions in terms of the type varibales of the impl available
-to us, so we must run trait selection to figure everything out.
-
-Here is an example:
-
-    trait Foo { ... }
-    impl<U,T:Bar<U>> Foo for Vec<T> { ... }
-
-    impl Bar<uint> for int { ... }
-
-After one shallow round of selection for an obligation like `Vec<int>
-: Foo`, we would know which impl we want, and we would know that
-`T=int`, but we do not know the type of `U`.  We must select the
-nested obligation `int : Bar<U>` to find out that `U=uint`.
-
-It would be good to only do *just as much* nested resolution as
-necessary. Currently, though, we just do a full resolution.
-
-## Method matching
-
-Method dispach follows a slightly different path than normal trait
-selection. This is because it must account for the transformed self
-type of the receiver and various other complications. The procedure is
-described in `select.rs` in the "METHOD MATCHING" section.
-
-# Caching and subtle considerations therewith
-
-In general we attempt to cache the results of trait selection.  This
-is a somewhat complex process. Part of the reason for this is that we
-want to be able to cache results even when all the types in the trait
-reference are not fully known. In that case, it may happen that the
-trait selection process is also influencing type variables, so we have
-to be able to not only cache the *result* of the selection process,
-but *replay* its effects on the type variables.
-
-## An example
-
-The high-level idea of how the cache works is that we first replace
-all unbound inference variables with skolemized versions. Therefore,
-if we had a trait reference `uint : Foo<$1>`, where `$n` is an unbound
-inference variable, we might replace it with `uint : Foo<%0>`, where
-`%n` is a skolemized type. We would then look this up in the cache.
-If we found a hit, the hit would tell us the immediate next step to
-take in the selection process: i.e., apply impl #22, or apply where
-clause `X : Foo<Y>`. Let's say in this case there is no hit.
-Therefore, we search through impls and where clauses and so forth, and
-we come to the conclusion that the only possible impl is this one,
-with def-id 22:
-
-    impl Foo<int> for uint { ... } // Impl #22
-
-We would then record in the cache `uint : Foo<%0> ==>
-ImplCandidate(22)`. Next we would confirm `ImplCandidate(22)`, which
-would (as a side-effect) unify `$1` with `int`.
-
-Now, at some later time, we might come along and see a `uint :
-Foo<$3>`.  When skolemized, this would yield `uint : Foo<%0>`, just as
-before, and hence the cache lookup would succeed, yielding
-`ImplCandidate(22)`. We would confirm `ImplCandidate(22)` which would
-(as a side-effect) unify `$3` with `int`.
-
-## Where clauses and the local vs global cache
-
-One subtle interaction is that the results of trait lookup will vary
-depending on what where clauses are in scope. Therefore, we actually
-have *two* caches, a local and a global cache. The local cache is
-attached to the `ParameterEnvironment` and the global cache attached
-to the `tcx`. We use the local cache whenever the result might depend
-on the where clauses that are in scope. The determination of which
-cache to use is done by the method `pick_candidate_cache` in
-`select.rs`.
-
-There are two cases where we currently use the local cache. The
-current rules are probably more conservative than necessary.
-
-### Trait references that involve parameter types
-
-The most obvious case where you need the local environment is
-when the trait reference includes parameter types. For example,
-consider the following function:
-
-    impl<T> Vec<T> {
-        fn foo(x: T)
-            where T : Foo
-        { ... }
-
-        fn bar(x: T)
-        { ... }
-    }
-
-If there is an obligation `T : Foo`, or `int : Bar<T>`, or whatever,
-clearly the results from `foo` and `bar` are potentially different,
-since the set of where clauses in scope are different.
-
-### Trait references with unbound variables when where clauses are in scope
-
-There is another less obvious interaction which involves unbound variables
-where *only* where clauses are in scope (no impls). This manifested as
-issue #18209 (`run-pass/trait-cache-issue-18209.rs`). Consider
-this snippet:
-
-```
-pub trait Foo {
-    fn load_from() -> Box<Self>;
-    fn load() -> Box<Self> {
-        Foo::load_from()
-    }
-}
-```
-
-The default method will incur an obligation `$0 : Foo` from the call
-to `load_from`. If there are no impls, this can be eagerly resolved to
-`VtableParam(Self : Foo)` and cached. Because the trait reference
-doesn't involve any parameters types (only the resolution does), this
-result was stored in the global cache, causing later calls to
-`Foo::load_from()` to get nonsense.
-
-To fix this, we always use the local cache if there are unbound
-variables and where clauses in scope. This is more conservative than
-necessary as far as I can tell. However, it still seems to be a simple
-rule and I observe ~99% hit rate on rustc, so it doesn't seem to hurt
-us in particular.
-
-Here is an example of the kind of subtle case that I would be worried
-about with a more complex rule (although this particular case works
-out ok). Imagine the trait reference doesn't directly reference a
-where clause, but the where clause plays a role in the winnowing
-phase. Something like this:
-
-```
-pub trait Foo<T> { ... }
-pub trait Bar { ... }
-impl<U,T:Bar> Foo<U> for T { ... } // Impl A
-impl Foo<char> for uint { ... }    // Impl B
-```
-
-Now, in some function, we have no where clauses in scope, and we have
-an obligation `$1 : Foo<$0>`. We might then conclude that `$0=char`
-and `$1=uint`: this is because for impl A to apply, `uint:Bar` would
-have to hold, and we know it does not or else the coherence check
-would have failed.  So we might enter into our global cache: `$1 :
-Foo<$0> => Impl B`.  Then we come along in a different scope, where a
-generic type `A` is around with the bound `A:Bar`. Now suddenly the
-impl is viable.
-
-The flaw in this imaginary DOOMSDAY SCENARIO is that we would not
-currently conclude that `$1 : Foo<$0>` implies that `$0 == uint` and
-`$1 == char`, even though it is true that (absent type parameters)
-there is no other type the user could enter. However, it is not
-*completely* implausible that we *could* draw this conclusion in the
-future; we wouldn't have to guess types, in particular, we could be
-led by the impls.
-
-*/
+//! # TRAIT RESOLUTION
+//!
+//! This document describes the general process and points out some non-obvious
+//! things.
+//!
+//! ## Major concepts
+//!
+//! Trait resolution is the process of pairing up an impl with each
+//! reference to a trait. So, for example, if there is a generic function like:
+//!
+//!     fn clone_slice<T:Clone>(x: &[T]) -> Vec<T> { ... }
+//!
+//! and then a call to that function:
+//!
+//!     let v: Vec<int> = clone_slice([1, 2, 3].as_slice())
+//!
+//! it is the job of trait resolution to figure out (in which case)
+//! whether there exists an impl of `int : Clone`
+//!
+//! Note that in some cases, like generic functions, we may not be able to
+//! find a specific impl, but we can figure out that the caller must
+//! provide an impl. To see what I mean, consider the body of `clone_slice`:
+//!
+//!     fn clone_slice<T:Clone>(x: &[T]) -> Vec<T> {
+//!         let mut v = Vec::new();
+//!         for e in x.iter() {
+//!             v.push((*e).clone()); // (*)
+//!         }
+//!     }
+//!
+//! The line marked `(*)` is only legal if `T` (the type of `*e`)
+//! implements the `Clone` trait. Naturally, since we don't know what `T`
+//! is, we can't find the specific impl; but based on the bound `T:Clone`,
+//! we can say that there exists an impl which the caller must provide.
+//!
+//! We use the term *obligation* to refer to a trait reference in need of
+//! an impl.
+//!
+//! ## Overview
+//!
+//! Trait resolution consists of three major parts:
+//!
+//! - SELECTION: Deciding how to resolve a specific obligation. For
+//!   example, selection might decide that a specific obligation can be
+//!   resolved by employing an impl which matches the self type, or by
+//!   using a parameter bound. In the case of an impl, Selecting one
+//!   obligation can create *nested obligations* because of where clauses
+//!   on the impl itself. It may also require evaluating those nested
+//!   obligations to resolve ambiguities.
+//!
+//! - FULFILLMENT: The fulfillment code is what tracks that obligations
+//!   are completely fulfilled. Basically it is a worklist of obligations
+//!   to be selected: once selection is successful, the obligation is
+//!   removed from the worklist and any nested obligations are enqueued.
+//!
+//! - COHERENCE: The coherence checks are intended to ensure that there
+//!   are never overlapping impls, where two impls could be used with
+//!   equal precedence.
+//!
+//! ## Selection
+//!
+//! Selection is the process of deciding whether an obligation can be
+//! resolved and, if so, how it is to be resolved (via impl, where clause, etc).
+//! The main interface is the `select()` function, which takes an obligation
+//! and returns a `SelectionResult`. There are three possible outcomes:
+//!
+//! - `Ok(Some(selection))` -- yes, the obligation can be resolved, and
+//!   `selection` indicates how. If the impl was resolved via an impl,
+//!   then `selection` may also indicate nested obligations that are required
+//!   by the impl.
+//!
+//! - `Ok(None)` -- we are not yet sure whether the obligation can be
+//!   resolved or not. This happens most commonly when the obligation
+//!   contains unbound type variables.
+//!
+//! - `Err(err)` -- the obligation definitely cannot be resolved due to a
+//!   type error, or because there are no impls that could possibly apply,
+//!   etc.
+//!
+//! The basic algorithm for selection is broken into two big phases:
+//! candidate assembly and confirmation.
+//!
+//! ### Candidate assembly
+//!
+//! Searches for impls/where-clauses/etc that might
+//! possibly be used to satisfy the obligation. Each of those is called
+//! a candidate. To avoid ambiguity, we want to find exactly one
+//! candidate that is definitively applicable. In some cases, we may not
+//! know whether an impl/where-clause applies or not -- this occurs when
+//! the obligation contains unbound inference variables.
+//!
+//! The basic idea for candidate assembly is to do a first pass in which
+//! we identify all possible candidates. During this pass, all that we do
+//! is try and unify the type parameters. (In particular, we ignore any
+//! nested where clauses.) Presuming that this unification succeeds, the
+//! impl is added as a candidate.
+//!
+//! Once this first pass is done, we can examine the set of candidates. If
+//! it is a singleton set, then we are done: this is the only impl in
+//! scope that could possibly apply. Otherwise, we can winnow down the set
+//! of candidates by using where clauses and other conditions. If this
+//! reduced set yields a single, unambiguous entry, we're good to go,
+//! otherwise the result is considered ambiguous.
+//!
+//! #### The basic process: Inferring based on the impls we see
+//!
+//! This process is easier if we work through some examples. Consider
+//! the following trait:
+//!
+//! ```
+//! trait Convert<Target> {
+//!     fn convert(&self) -> Target;
+//! }
+//! ```
+//!
+//! This trait just has one method. It's about as simple as it gets. It
+//! converts from the (implicit) `Self` type to the `Target` type. If we
+//! wanted to permit conversion between `int` and `uint`, we might
+//! implement `Convert` like so:
+//!
+//! ```rust
+//! impl Convert<uint> for int { ... } // int -> uint
+//! impl Convert<int> for uint { ... } // uint -> uint
+//! ```
+//!
+//! Now imagine there is some code like the following:
+//!
+//! ```rust
+//! let x: int = ...;
+//! let y = x.convert();
+//! ```
+//!
+//! The call to convert will generate a trait reference `Convert<$Y> for
+//! int`, where `$Y` is the type variable representing the type of
+//! `y`. When we match this against the two impls we can see, we will find
+//! that only one remains: `Convert<uint> for int`. Therefore, we can
+//! select this impl, which will cause the type of `$Y` to be unified to
+//! `uint`. (Note that while assembling candidates, we do the initial
+//! unifications in a transaction, so that they don't affect one another.)
+//!
+//! There are tests to this effect in src/test/run-pass:
+//!
+//!    traits-multidispatch-infer-convert-source-and-target.rs
+//!    traits-multidispatch-infer-convert-target.rs
+//!
+//! #### Winnowing: Resolving ambiguities
+//!
+//! But what happens if there are multiple impls where all the types
+//! unify? Consider this example:
+//!
+//! ```rust
+//! trait Get {
+//!     fn get(&self) -> Self;
+//! }
+//!
+//! impl<T:Copy> Get for T {
+//!     fn get(&self) -> T { *self }
+//! }
+//!
+//! impl<T:Get> Get for Box<T> {
+//!     fn get(&self) -> Box<T> { box get_it(&**self) }
+//! }
+//! ```
+//!
+//! What happens when we invoke `get_it(&box 1_u16)`, for example? In this
+//! case, the `Self` type is `Box<u16>` -- that unifies with both impls,
+//! because the first applies to all types, and the second to all
+//! boxes. In the olden days we'd have called this ambiguous. But what we
+//! do now is do a second *winnowing* pass that considers where clauses
+//! and attempts to remove candidates -- in this case, the first impl only
+//! applies if `Box<u16> : Copy`, which doesn't hold. After winnowing,
+//! then, we are left with just one candidate, so we can proceed. There is
+//! a test of this in `src/test/run-pass/traits-conditional-dispatch.rs`.
+//!
+//! #### Matching
+//!
+//! The subroutines that decide whether a particular impl/where-clause/etc
+//! applies to a particular obligation. At the moment, this amounts to
+//! unifying the self types, but in the future we may also recursively
+//! consider some of the nested obligations, in the case of an impl.
+//!
+//! #### Lifetimes and selection
+//!
+//! Because of how that lifetime inference works, it is not possible to
+//! give back immediate feedback as to whether a unification or subtype
+//! relationship between lifetimes holds or not. Therefore, lifetime
+//! matching is *not* considered during selection. This is reflected in
+//! the fact that subregion assignment is infallible. This may yield
+//! lifetime constraints that will later be found to be in error (in
+//! contrast, the non-lifetime-constraints have already been checked
+//! during selection and can never cause an error, though naturally they
+//! may lead to other errors downstream).
+//!
+//! #### Where clauses
+//!
+//! Besides an impl, the other major way to resolve an obligation is via a
+//! where clause. The selection process is always given a *parameter
+//! environment* which contains a list of where clauses, which are
+//! basically obligations that can assume are satisfiable. We will iterate
+//! over that list and check whether our current obligation can be found
+//! in that list, and if so it is considered satisfied. More precisely, we
+//! want to check whether there is a where-clause obligation that is for
+//! the same trait (or some subtrait) and for which the self types match,
+//! using the definition of *matching* given above.
+//!
+//! Consider this simple example:
+//!
+//!      trait A1 { ... }
+//!      trait A2 : A1 { ... }
+//!
+//!      trait B { ... }
+//!
+//!      fn foo<X:A2+B> { ... }
+//!
+//! Clearly we can use methods offered by `A1`, `A2`, or `B` within the
+//! body of `foo`. In each case, that will incur an obligation like `X :
+//! A1` or `X : A2`. The parameter environment will contain two
+//! where-clauses, `X : A2` and `X : B`. For each obligation, then, we
+//! search this list of where-clauses.  To resolve an obligation `X:A1`,
+//! we would note that `X:A2` implies that `X:A1`.
+//!
+//! ### Confirmation
+//!
+//! Confirmation unifies the output type parameters of the trait with the
+//! values found in the obligation, possibly yielding a type error.  If we
+//! return to our example of the `Convert` trait from the previous
+//! section, confirmation is where an error would be reported, because the
+//! impl specified that `T` would be `uint`, but the obligation reported
+//! `char`. Hence the result of selection would be an error.
+//!
+//! ### Selection during translation
+//!
+//! During type checking, we do not store the results of trait selection.
+//! We simply wish to verify that trait selection will succeed. Then
+//! later, at trans time, when we have all concrete types available, we
+//! can repeat the trait selection.  In this case, we do not consider any
+//! where-clauses to be in scope. We know that therefore each resolution
+//! will resolve to a particular impl.
+//!
+//! One interesting twist has to do with nested obligations. In general, in trans,
+//! we only need to do a "shallow" selection for an obligation. That is, we wish to
+//! identify which impl applies, but we do not (yet) need to decide how to select
+//! any nested obligations. Nonetheless, we *do* currently do a complete resolution,
+//! and that is because it can sometimes inform the results of type inference. That is,
+//! we do not have the full substitutions in terms of the type varibales of the impl available
+//! to us, so we must run trait selection to figure everything out.
+//!
+//! Here is an example:
+//!
+//!     trait Foo { ... }
+//!     impl<U,T:Bar<U>> Foo for Vec<T> { ... }
+//!
+//!     impl Bar<uint> for int { ... }
+//!
+//! After one shallow round of selection for an obligation like `Vec<int>
+//! : Foo`, we would know which impl we want, and we would know that
+//! `T=int`, but we do not know the type of `U`.  We must select the
+//! nested obligation `int : Bar<U>` to find out that `U=uint`.
+//!
+//! It would be good to only do *just as much* nested resolution as
+//! necessary. Currently, though, we just do a full resolution.
+//!
+//! ## Method matching
+//!
+//! Method dispach follows a slightly different path than normal trait
+//! selection. This is because it must account for the transformed self
+//! type of the receiver and various other complications. The procedure is
+//! described in `select.rs` in the "METHOD MATCHING" section.
+//!
+//! # Caching and subtle considerations therewith
+//!
+//! In general we attempt to cache the results of trait selection.  This
+//! is a somewhat complex process. Part of the reason for this is that we
+//! want to be able to cache results even when all the types in the trait
+//! reference are not fully known. In that case, it may happen that the
+//! trait selection process is also influencing type variables, so we have
+//! to be able to not only cache the *result* of the selection process,
+//! but *replay* its effects on the type variables.
+//!
+//! ## An example
+//!
+//! The high-level idea of how the cache works is that we first replace
+//! all unbound inference variables with skolemized versions. Therefore,
+//! if we had a trait reference `uint : Foo<$1>`, where `$n` is an unbound
+//! inference variable, we might replace it with `uint : Foo<%0>`, where
+//! `%n` is a skolemized type. We would then look this up in the cache.
+//! If we found a hit, the hit would tell us the immediate next step to
+//! take in the selection process: i.e., apply impl #22, or apply where
+//! clause `X : Foo<Y>`. Let's say in this case there is no hit.
+//! Therefore, we search through impls and where clauses and so forth, and
+//! we come to the conclusion that the only possible impl is this one,
+//! with def-id 22:
+//!
+//!     impl Foo<int> for uint { ... } // Impl #22
+//!
+//! We would then record in the cache `uint : Foo<%0> ==>
+//! ImplCandidate(22)`. Next we would confirm `ImplCandidate(22)`, which
+//! would (as a side-effect) unify `$1` with `int`.
+//!
+//! Now, at some later time, we might come along and see a `uint :
+//! Foo<$3>`.  When skolemized, this would yield `uint : Foo<%0>`, just as
+//! before, and hence the cache lookup would succeed, yielding
+//! `ImplCandidate(22)`. We would confirm `ImplCandidate(22)` which would
+//! (as a side-effect) unify `$3` with `int`.
+//!
+//! ## Where clauses and the local vs global cache
+//!
+//! One subtle interaction is that the results of trait lookup will vary
+//! depending on what where clauses are in scope. Therefore, we actually
+//! have *two* caches, a local and a global cache. The local cache is
+//! attached to the `ParameterEnvironment` and the global cache attached
+//! to the `tcx`. We use the local cache whenever the result might depend
+//! on the where clauses that are in scope. The determination of which
+//! cache to use is done by the method `pick_candidate_cache` in
+//! `select.rs`.
+//!
+//! There are two cases where we currently use the local cache. The
+//! current rules are probably more conservative than necessary.
+//!
+//! ### Trait references that involve parameter types
+//!
+//! The most obvious case where you need the local environment is
+//! when the trait reference includes parameter types. For example,
+//! consider the following function:
+//!
+//!     impl<T> Vec<T> {
+//!         fn foo(x: T)
+//!             where T : Foo
+//!         { ... }
+//!
+//!         fn bar(x: T)
+//!         { ... }
+//!     }
+//!
+//! If there is an obligation `T : Foo`, or `int : Bar<T>`, or whatever,
+//! clearly the results from `foo` and `bar` are potentially different,
+//! since the set of where clauses in scope are different.
+//!
+//! ### Trait references with unbound variables when where clauses are in scope
+//!
+//! There is another less obvious interaction which involves unbound variables
+//! where *only* where clauses are in scope (no impls). This manifested as
+//! issue #18209 (`run-pass/trait-cache-issue-18209.rs`). Consider
+//! this snippet:
+//!
+//! ```
+//! pub trait Foo {
+//!     fn load_from() -> Box<Self>;
+//!     fn load() -> Box<Self> {
+//!         Foo::load_from()
+//!     }
+//! }
+//! ```
+//!
+//! The default method will incur an obligation `$0 : Foo` from the call
+//! to `load_from`. If there are no impls, this can be eagerly resolved to
+//! `VtableParam(Self : Foo)` and cached. Because the trait reference
+//! doesn't involve any parameters types (only the resolution does), this
+//! result was stored in the global cache, causing later calls to
+//! `Foo::load_from()` to get nonsense.
+//!
+//! To fix this, we always use the local cache if there are unbound
+//! variables and where clauses in scope. This is more conservative than
+//! necessary as far as I can tell. However, it still seems to be a simple
+//! rule and I observe ~99% hit rate on rustc, so it doesn't seem to hurt
+//! us in particular.
+//!
+//! Here is an example of the kind of subtle case that I would be worried
+//! about with a more complex rule (although this particular case works
+//! out ok). Imagine the trait reference doesn't directly reference a
+//! where clause, but the where clause plays a role in the winnowing
+//! phase. Something like this:
+//!
+//! ```
+//! pub trait Foo<T> { ... }
+//! pub trait Bar { ... }
+//! impl<U,T:Bar> Foo<U> for T { ... } // Impl A
+//! impl Foo<char> for uint { ... }    // Impl B
+//! ```
+//!
+//! Now, in some function, we have no where clauses in scope, and we have
+//! an obligation `$1 : Foo<$0>`. We might then conclude that `$0=char`
+//! and `$1=uint`: this is because for impl A to apply, `uint:Bar` would
+//! have to hold, and we know it does not or else the coherence check
+//! would have failed.  So we might enter into our global cache: `$1 :
+//! Foo<$0> => Impl B`.  Then we come along in a different scope, where a
+//! generic type `A` is around with the bound `A:Bar`. Now suddenly the
+//! impl is viable.
+//!
+//! The flaw in this imaginary DOOMSDAY SCENARIO is that we would not
+//! currently conclude that `$1 : Foo<$0>` implies that `$0 == uint` and
+//! `$1 == char`, even though it is true that (absent type parameters)
+//! there is no other type the user could enter. However, it is not
+//! *completely* implausible that we *could* draw this conclusion in the
+//! future; we wouldn't have to guess types, in particular, we could be
+//! led by the impls.
diff --git a/src/librustc/middle/traits/fulfill.rs b/src/librustc/middle/traits/fulfill.rs
index 62382ac386fcd..a22eba486e8b9 100644
--- a/src/librustc/middle/traits/fulfill.rs
+++ b/src/librustc/middle/traits/fulfill.rs
@@ -81,20 +81,16 @@ impl<'tcx> FulfillmentContext<'tcx> {
         }
     }
 
+    /// Attempts to select obligations that were registered since the call to a selection routine.
+    /// This is used by the type checker to eagerly attempt to resolve obligations in hopes of
+    /// gaining type information. It'd be equally valid to use `select_where_possible` but it
+    /// results in `O(n^2)` performance (#18208).
     pub fn select_new_obligations<'a>(&mut self,
                                       infcx: &InferCtxt<'a,'tcx>,
                                       param_env: &ty::ParameterEnvironment<'tcx>,
                                       typer: &Typer<'tcx>)
                                       -> Result<(),Vec<FulfillmentError<'tcx>>>
     {
-        /*!
-         * Attempts to select obligations that were registered since
-         * the call to a selection routine. This is used by the type checker
-         * to eagerly attempt to resolve obligations in hopes of gaining
-         * type information. It'd be equally valid to use `select_where_possible`
-         * but it results in `O(n^2)` performance (#18208).
-         */
-
         let mut selcx = SelectionContext::new(infcx, param_env, typer);
         self.select(&mut selcx, true)
     }
@@ -113,16 +109,13 @@ impl<'tcx> FulfillmentContext<'tcx> {
         self.trait_obligations[]
     }
 
+    /// Attempts to select obligations using `selcx`. If `only_new_obligations` is true, then it
+    /// only attempts to select obligations that haven't been seen before.
     fn select<'a>(&mut self,
                   selcx: &mut SelectionContext<'a, 'tcx>,
                   only_new_obligations: bool)
                   -> Result<(),Vec<FulfillmentError<'tcx>>>
     {
-        /*!
-         * Attempts to select obligations using `selcx`. If
-         * `only_new_obligations` is true, then it only attempts to
-         * select obligations that haven't been seen before.
-         */
         debug!("select({} obligations, only_new_obligations={}) start",
                self.trait_obligations.len(),
                only_new_obligations);
diff --git a/src/librustc/middle/traits/mod.rs b/src/librustc/middle/traits/mod.rs
index 0a47d64789038..c4eeff8caf64a 100644
--- a/src/librustc/middle/traits/mod.rs
+++ b/src/librustc/middle/traits/mod.rs
@@ -8,9 +8,7 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-/*!
- * Trait Resolution. See doc.rs.
- */
+//! Trait Resolution. See doc.rs.
 
 pub use self::SelectionError::*;
 pub use self::FulfillmentErrorCode::*;
@@ -226,6 +224,10 @@ pub struct VtableParamData<'tcx> {
     pub bound: Rc<ty::TraitRef<'tcx>>,
 }
 
+/// Matches the self type of the inherent impl `impl_def_id`
+/// against `self_ty` and returns the resulting resolution.  This
+/// routine may modify the surrounding type context (for example,
+/// it may unify variables).
 pub fn select_inherent_impl<'a,'tcx>(infcx: &InferCtxt<'a,'tcx>,
                                      param_env: &ty::ParameterEnvironment<'tcx>,
                                      typer: &Typer<'tcx>,
@@ -235,13 +237,6 @@ pub fn select_inherent_impl<'a,'tcx>(infcx: &InferCtxt<'a,'tcx>,
                                      -> SelectionResult<'tcx,
                                             VtableImplData<'tcx, Obligation<'tcx>>>
 {
-    /*!
-     * Matches the self type of the inherent impl `impl_def_id`
-     * against `self_ty` and returns the resulting resolution.  This
-     * routine may modify the surrounding type context (for example,
-     * it may unify variables).
-     */
-
     // This routine is only suitable for inherent impls. This is
     // because it does not attempt to unify the output type parameters
     // from the trait ref against the values from the obligation.
@@ -256,53 +251,41 @@ pub fn select_inherent_impl<'a,'tcx>(infcx: &InferCtxt<'a,'tcx>,
     selcx.select_inherent_impl(impl_def_id, cause, self_ty)
 }
 
+/// True if neither the trait nor self type is local. Note that `impl_def_id` must refer to an impl
+/// of a trait, not an inherent impl.
 pub fn is_orphan_impl(tcx: &ty::ctxt,
                       impl_def_id: ast::DefId)
                       -> bool
 {
-    /*!
-     * True if neither the trait nor self type is local. Note that
-     * `impl_def_id` must refer to an impl of a trait, not an inherent
-     * impl.
-     */
-
     !coherence::impl_is_local(tcx, impl_def_id)
 }
 
+/// True if there exist types that satisfy both of the two given impls.
 pub fn overlapping_impls(infcx: &InferCtxt,
                          impl1_def_id: ast::DefId,
                          impl2_def_id: ast::DefId)
                          -> bool
 {
-    /*!
-     * True if there exist types that satisfy both of the two given impls.
-     */
-
     coherence::impl_can_satisfy(infcx, impl1_def_id, impl2_def_id) &&
     coherence::impl_can_satisfy(infcx, impl2_def_id, impl1_def_id)
 }
 
+/// Given generic bounds from an impl like:
+///
+///    impl<A:Foo, B:Bar+Qux> ...
+///
+/// along with the bindings for the types `A` and `B` (e.g., `<A=A0, B=B0>`), yields a result like
+///
+///    [[Foo for A0, Bar for B0, Qux for B0], [], []]
+///
+/// Expects that `generic_bounds` have already been fully substituted, late-bound regions liberated
+/// and so forth, so that they are in the same namespace as `type_substs`.
 pub fn obligations_for_generics<'tcx>(tcx: &ty::ctxt<'tcx>,
                                       cause: ObligationCause<'tcx>,
                                       generic_bounds: &ty::GenericBounds<'tcx>,
                                       type_substs: &subst::VecPerParamSpace<Ty<'tcx>>)
                                       -> subst::VecPerParamSpace<Obligation<'tcx>>
 {
-    /*!
-     * Given generic bounds from an impl like:
-     *
-     *    impl<A:Foo, B:Bar+Qux> ...
-     *
-     * along with the bindings for the types `A` and `B` (e.g.,
-     * `<A=A0, B=B0>`), yields a result like
-     *
-     *    [[Foo for A0, Bar for B0, Qux for B0], [], []]
-     *
-     * Expects that `generic_bounds` have already been fully
-     * substituted, late-bound regions liberated and so forth,
-     * so that they are in the same namespace as `type_substs`.
-     */
-
     util::obligations_for_generics(tcx, cause, 0, generic_bounds, type_substs)
 }
 
diff --git a/src/librustc/middle/traits/select.rs b/src/librustc/middle/traits/select.rs
index d1cc851c41f20..f49cd2dd19f7f 100644
--- a/src/librustc/middle/traits/select.rs
+++ b/src/librustc/middle/traits/select.rs
@@ -8,7 +8,7 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-/*! See `doc.rs` for high-level documentation */
+//! See `doc.rs` for high-level documentation
 #![allow(dead_code)] // FIXME -- just temporarily
 
 pub use self::MethodMatchResult::*;
@@ -201,15 +201,11 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
     //    is `Vec<Foo>:Iterable<Bar>`, but the impl specifies
     //    `impl<T> Iterable<T> for Vec<T>`, than an error would result.
 
+    /// Evaluates whether the obligation can be satisfied. Returns an indication of whether the
+    /// obligation can be satisfied and, if so, by what means. Never affects surrounding typing
+    /// environment.
     pub fn select(&mut self, obligation: &Obligation<'tcx>)
                   -> SelectionResult<'tcx, Selection<'tcx>> {
-        /*!
-         * Evaluates whether the obligation can be satisfied. Returns
-         * an indication of whether the obligation can be satisfied
-         * and, if so, by what means. Never affects surrounding typing
-         * environment.
-         */
-
         debug!("select({})", obligation.repr(self.tcx()));
         assert!(!obligation.trait_ref.has_escaping_regions());
 
@@ -253,15 +249,11 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
     // The result is "true" if the obligation *may* hold and "false" if
     // we can be sure it does not.
 
+    /// Evaluates whether the obligation `obligation` can be satisfied (by any means).
     pub fn evaluate_obligation(&mut self,
                                obligation: &Obligation<'tcx>)
                                -> bool
     {
-        /*!
-         * Evaluates whether the obligation `obligation` can be
-         * satisfied (by any means).
-         */
-
         debug!("evaluate_obligation({})",
                obligation.repr(self.tcx()));
         assert!(!obligation.trait_ref.has_escaping_regions());
@@ -387,17 +379,13 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
         }
     }
 
+    /// Evaluates whether the impl with id `impl_def_id` could be applied to the self type
+    /// `obligation_self_ty`. This can be used either for trait or inherent impls.
     pub fn evaluate_impl(&mut self,
                          impl_def_id: ast::DefId,
                          obligation: &Obligation<'tcx>)
                          -> bool
     {
-        /*!
-         * Evaluates whether the impl with id `impl_def_id` could be
-         * applied to the self type `obligation_self_ty`. This can be
-         * used either for trait or inherent impls.
-         */
-
         debug!("evaluate_impl(impl_def_id={}, obligation={})",
                impl_def_id.repr(self.tcx()),
                obligation.repr(self.tcx()));
@@ -435,23 +423,20 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
     // the body of `evaluate_method_obligation()` for more details on
     // the algorithm.
 
+    /// Determine whether a trait-method is applicable to a receiver of
+    /// type `rcvr_ty`. *Does not affect the inference state.*
+    ///
+    /// - `rcvr_ty` -- type of the receiver
+    /// - `xform_self_ty` -- transformed self type declared on the method, with `Self`
+    ///   to a fresh type variable
+    /// - `obligation` -- a reference to the trait where the method is declared, with
+    ///   the input types on the trait replaced with fresh type variables
     pub fn evaluate_method_obligation(&mut self,
                                       rcvr_ty: Ty<'tcx>,
                                       xform_self_ty: Ty<'tcx>,
                                       obligation: &Obligation<'tcx>)
                                       -> MethodMatchResult
     {
-        /*!
-         * Determine whether a trait-method is applicable to a receiver of
-         * type `rcvr_ty`. *Does not affect the inference state.*
-         *
-         * - `rcvr_ty` -- type of the receiver
-         * - `xform_self_ty` -- transformed self type declared on the method, with `Self`
-         *   to a fresh type variable
-         * - `obligation` -- a reference to the trait where the method is declared, with
-         *   the input types on the trait replaced with fresh type variables
-         */
-
         // Here is the situation. We have a trait method declared (say) like so:
         //
         //     trait TheTrait {
@@ -563,19 +548,15 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
         }
     }
 
+    /// Given the successful result of a method match, this function "confirms" the result, which
+    /// basically repeats the various matching operations, but outside of any snapshot so that
+    /// their effects are committed into the inference state.
     pub fn confirm_method_match(&mut self,
                                 rcvr_ty: Ty<'tcx>,
                                 xform_self_ty: Ty<'tcx>,
                                 obligation: &Obligation<'tcx>,
                                 data: MethodMatchedData)
     {
-        /*!
-         * Given the successful result of a method match, this
-         * function "confirms" the result, which basically repeats the
-         * various matching operations, but outside of any snapshot so
-         * that their effects are committed into the inference state.
-         */
-
         let is_ok = match data {
             PreciseMethodMatch => {
                 self.match_method_precise(rcvr_ty, xform_self_ty, obligation).is_ok()
@@ -597,17 +578,14 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
         }
     }
 
+    /// Implements the *precise method match* procedure described in
+    /// `evaluate_method_obligation()`.
     fn match_method_precise(&mut self,
                             rcvr_ty: Ty<'tcx>,
                             xform_self_ty: Ty<'tcx>,
                             obligation: &Obligation<'tcx>)
                             -> Result<(),()>
     {
-        /*!
-         * Implements the *precise method match* procedure described in
-         * `evaluate_method_obligation()`.
-         */
-
         self.infcx.commit_if_ok(|| {
             match self.infcx.sub_types(false, infer::RelateSelfType(obligation.cause.span),
                                        rcvr_ty, xform_self_ty) {
@@ -623,18 +601,14 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
         })
     }
 
+    /// Assembles a list of potentially applicable impls using the *coercive match* procedure
+    /// described in `evaluate_method_obligation()`.
     fn assemble_method_candidates_from_impls(&mut self,
                                              rcvr_ty: Ty<'tcx>,
                                              xform_self_ty: Ty<'tcx>,
                                              obligation: &Obligation<'tcx>)
                                              -> Vec<ast::DefId>
     {
-        /*!
-         * Assembles a list of potentially applicable impls using the
-         * *coercive match* procedure described in
-         * `evaluate_method_obligation()`.
-         */
-
         let mut candidates = Vec::new();
 
         let all_impls = self.all_impls(obligation.trait_ref.def_id);
@@ -650,6 +624,8 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
         candidates
     }
 
+    /// Applies the *coercive match* procedure described in `evaluate_method_obligation()` to a
+    /// particular impl.
     fn match_method_coerce(&mut self,
                            impl_def_id: ast::DefId,
                            rcvr_ty: Ty<'tcx>,
@@ -657,11 +633,6 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
                            obligation: &Obligation<'tcx>)
                            -> Result<Substs<'tcx>, ()>
     {
-        /*!
-         * Applies the *coercive match* procedure described in
-         * `evaluate_method_obligation()` to a particular impl.
-         */
-
         // This is almost always expected to succeed. It
         // causes the impl's self-type etc to be unified with
         // the type variable that is shared between
@@ -683,6 +654,8 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
         Ok(substs)
     }
 
+    /// A version of `winnow_impl` applicable to coerice method matching.  This is basically the
+    /// same as `winnow_impl` but it uses the method matching procedure and is specific to impls.
     fn winnow_method_impl(&mut self,
                           impl_def_id: ast::DefId,
                           rcvr_ty: Ty<'tcx>,
@@ -690,13 +663,6 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
                           obligation: &Obligation<'tcx>)
                           -> bool
     {
-        /*!
-         * A version of `winnow_impl` applicable to coerice method
-         * matching.  This is basically the same as `winnow_impl` but
-         * it uses the method matching procedure and is specific to
-         * impls.
-         */
-
         debug!("winnow_method_impl: impl_def_id={} rcvr_ty={} xform_self_ty={} obligation={}",
                impl_def_id.repr(self.tcx()),
                rcvr_ty.repr(self.tcx()),
@@ -962,19 +928,15 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
         Ok(candidates)
     }
 
+    /// Given an obligation like `<SomeTrait for T>`, search the obligations that the caller
+    /// supplied to find out whether it is listed among them.
+    ///
+    /// Never affects inference environment.
     fn assemble_candidates_from_caller_bounds(&mut self,
                                               obligation: &Obligation<'tcx>,
                                               candidates: &mut CandidateSet<'tcx>)
                                               -> Result<(),SelectionError<'tcx>>
     {
-        /*!
-         * Given an obligation like `<SomeTrait for T>`, search the obligations
-         * that the caller supplied to find out whether it is listed among
-         * them.
-         *
-         * Never affects inference environment.
-         */
-
         debug!("assemble_candidates_from_caller_bounds({})",
                obligation.repr(self.tcx()));
 
@@ -1002,22 +964,17 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
         Ok(())
     }
 
+    /// Check for the artificial impl that the compiler will create for an obligation like `X :
+    /// FnMut<..>` where `X` is an unboxed closure type.
+    ///
+    /// Note: the type parameters on an unboxed closure candidate are modeled as *output* type
+    /// parameters and hence do not affect whether this trait is a match or not. They will be
+    /// unified during the confirmation step.
     fn assemble_unboxed_candidates(&mut self,
                                    obligation: &Obligation<'tcx>,
                                    candidates: &mut CandidateSet<'tcx>)
                                    -> Result<(),SelectionError<'tcx>>
     {
-        /*!
-         * Check for the artificial impl that the compiler will create
-         * for an obligation like `X : FnMut<..>` where `X` is an
-         * unboxed closure type.
-         *
-         * Note: the type parameters on an unboxed closure candidate
-         * are modeled as *output* type parameters and hence do not
-         * affect whether this trait is a match or not. They will be
-         * unified during the confirmation step.
-         */
-
         let tcx = self.tcx();
         let kind = if Some(obligation.trait_ref.def_id) == tcx.lang_items.fn_trait() {
             ty::FnUnboxedClosureKind
@@ -1060,15 +1017,12 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
         Ok(())
     }
 
+    /// Search for impls that might apply to `obligation`.
     fn assemble_candidates_from_impls(&mut self,
                                       obligation: &Obligation<'tcx>,
                                       candidates: &mut CandidateSet<'tcx>)
                                       -> Result<(), SelectionError<'tcx>>
     {
-        /*!
-         * Search for impls that might apply to `obligation`.
-         */
-
         let all_impls = self.all_impls(obligation.trait_ref.def_id);
         for &impl_def_id in all_impls.iter() {
             self.infcx.probe(|| {
@@ -1092,17 +1046,14 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
     // attempt to evaluate recursive bounds to see if they are
     // satisfied.
 
+    /// Further evaluate `candidate` to decide whether all type parameters match and whether nested
+    /// obligations are met. Returns true if `candidate` remains viable after this further
+    /// scrutiny.
     fn winnow_candidate<'o>(&mut self,
                             stack: &ObligationStack<'o, 'tcx>,
                             candidate: &Candidate<'tcx>)
                             -> EvaluationResult
     {
-        /*!
-         * Further evaluate `candidate` to decide whether all type parameters match
-         * and whether nested obligations are met. Returns true if `candidate` remains
-         * viable after this further scrutiny.
-         */
-
         debug!("winnow_candidate: candidate={}", candidate.repr(self.tcx()));
         self.infcx.probe(|| {
             let candidate = (*candidate).clone();
@@ -1129,37 +1080,35 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
         result
     }
 
+    /// Returns true if `candidate_i` should be dropped in favor of `candidate_j`.
+    ///
+    /// This is generally true if either:
+    /// - candidate i and candidate j are equivalent; or,
+    /// - candidate i is a conrete impl and candidate j is a where clause bound,
+    ///   and the concrete impl is applicable to the types in the where clause bound.
+    ///
+    /// The last case refers to cases where there are blanket impls (often conditional
+    /// blanket impls) as well as a where clause. This can come down to one of two cases:
+    ///
+    /// - The impl is truly unconditional (it has no where clauses
+    ///   of its own), in which case the where clause is
+    ///   unnecessary, because coherence requires that we would
+    ///   pick that particular impl anyhow (at least so long as we
+    ///   don't have specialization).
+    ///
+    /// - The impl is conditional, in which case we may not have winnowed it out
+    ///   because we don't know if the conditions apply, but the where clause is basically
+    ///   telling us taht there is some impl, though not necessarily the one we see.
+    ///
+    /// In both cases we prefer to take the where clause, which is
+    /// essentially harmless.  See issue #18453 for more details of
+    /// a case where doing the opposite caused us harm.
     fn candidate_should_be_dropped_in_favor_of<'o>(&mut self,
                                                    stack: &ObligationStack<'o, 'tcx>,
                                                    candidate_i: &Candidate<'tcx>,
                                                    candidate_j: &Candidate<'tcx>)
                                                    -> bool
     {
-        /*!
-         * Returns true if `candidate_i` should be dropped in favor of `candidate_j`.
-         * This is generally true if either:
-         * - candidate i and candidate j are equivalent; or,
-         * - candidate i is a conrete impl and candidate j is a where clause bound,
-         *   and the concrete impl is applicable to the types in the where clause bound.
-         *
-         * The last case refers to cases where there are blanket impls (often conditional
-         * blanket impls) as well as a where clause. This can come down to one of two cases:
-         *
-         * - The impl is truly unconditional (it has no where clauses
-         *   of its own), in which case the where clause is
-         *   unnecessary, because coherence requires that we would
-         *   pick that particular impl anyhow (at least so long as we
-         *   don't have specialization).
-         *
-         * - The impl is conditional, in which case we may not have winnowed it out
-         *   because we don't know if the conditions apply, but the where clause is basically
-         *   telling us taht there is some impl, though not necessarily the one we see.
-         *
-         * In both cases we prefer to take the where clause, which is
-         * essentially harmless.  See issue #18453 for more details of
-         * a case where doing the opposite caused us harm.
-         */
-
         match (candidate_i, candidate_j) {
             (&ImplCandidate(impl_def_id), &ParamCandidate(ref vt)) => {
                 debug!("Considering whether to drop param {} in favor of impl {}",
@@ -1848,26 +1797,23 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
         }
     }
 
+    /// Determines whether the self type declared against
+    /// `impl_def_id` matches `obligation_self_ty`. If successful,
+    /// returns the substitutions used to make them match. See
+    /// `match_impl()`. For example, if `impl_def_id` is declared
+    /// as:
+    ///
+    ///    impl<T:Copy> Foo for ~T { ... }
+    ///
+    /// and `obligation_self_ty` is `int`, we'd back an `Err(_)`
+    /// result. But if `obligation_self_ty` were `~int`, we'd get
+    /// back `Ok(T=int)`.
     fn match_inherent_impl(&mut self,
                            impl_def_id: ast::DefId,
                            obligation_cause: ObligationCause,
                            obligation_self_ty: Ty<'tcx>)
                            -> Result<Substs<'tcx>,()>
     {
-        /*!
-         * Determines whether the self type declared against
-         * `impl_def_id` matches `obligation_self_ty`. If successful,
-         * returns the substitutions used to make them match. See
-         * `match_impl()`.  For example, if `impl_def_id` is declared
-         * as:
-         *
-         *    impl<T:Copy> Foo for ~T { ... }
-         *
-         * and `obligation_self_ty` is `int`, we'd back an `Err(_)`
-         * result. But if `obligation_self_ty` were `~int`, we'd get
-         * back `Ok(T=int)`.
-         */
-
         // Create fresh type variables for each type parameter declared
         // on the impl etc.
         let impl_substs = util::fresh_substs_for_impl(self.infcx,
@@ -1928,6 +1874,19 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
     // the output type parameters from the obligation with those found
     // on the impl/bound, which may yield type errors.
 
+    /// Relates the output type parameters from an impl to the
+    /// trait.  This may lead to type errors. The confirmation step
+    /// is separated from the main match procedure because these
+    /// type errors do not cause us to select another impl.
+    ///
+    /// As an example, consider matching the obligation
+    /// `Iterator<char> for Elems<int>` using the following impl:
+    ///
+    ///    impl<T> Iterator<T> for Elems<T> { ... }
+    ///
+    /// The match phase will succeed with substitution `T=int`.
+    /// The confirm step will then try to unify `int` and `char`
+    /// and yield an error.
     fn confirm_impl_vtable(&mut self,
                            impl_def_id: ast::DefId,
                            obligation_cause: ObligationCause<'tcx>,
@@ -1935,22 +1894,6 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
                            substs: &Substs<'tcx>)
                            -> Result<(), SelectionError<'tcx>>
     {
-        /*!
-         * Relates the output type parameters from an impl to the
-         * trait.  This may lead to type errors. The confirmation step
-         * is separated from the main match procedure because these
-         * type errors do not cause us to select another impl.
-         *
-         * As an example, consider matching the obligation
-         * `Iterator<char> for Elems<int>` using the following impl:
-         *
-         *    impl<T> Iterator<T> for Elems<T> { ... }
-         *
-         * The match phase will succeed with substitution `T=int`.
-         * The confirm step will then try to unify `int` and `char`
-         * and yield an error.
-         */
-
         let impl_trait_ref = ty::impl_trait_ref(self.tcx(),
                                                 impl_def_id).unwrap();
         let impl_trait_ref = impl_trait_ref.subst(self.tcx(),
@@ -1958,38 +1901,30 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
         self.confirm(obligation_cause, obligation_trait_ref, impl_trait_ref)
     }
 
+    /// After we have determined which impl applies, and with what substitutions, there is one last
+    /// step. We have to go back and relate the "output" type parameters from the obligation to the
+    /// types that are specified in the impl.
+    ///
+    /// For example, imagine we have:
+    ///
+    ///     impl<T> Iterator<T> for Vec<T> { ... }
+    ///
+    /// and our obligation is `Iterator<Foo> for Vec<int>` (note the mismatch in the obligation
+    /// types). Up until this step, no error would be reported: the self type is `Vec<int>`, and
+    /// that matches `Vec<T>` with the substitution `T=int`. At this stage, we could then go and
+    /// check that the type parameters to the `Iterator` trait match. (In terms of the parameters,
+    /// the `expected_trait_ref` here would be `Iterator<int> for Vec<int>`, and the
+    /// `obligation_trait_ref` would be `Iterator<Foo> for Vec<int>`.
+    ///
+    /// Note that this checking occurs *after* the impl has selected, because these output type
+    /// parameters should not affect the selection of the impl. Therefore, if there is a mismatch,
+    /// we report an error to the user.
     fn confirm(&mut self,
                obligation_cause: ObligationCause,
                obligation_trait_ref: Rc<ty::TraitRef<'tcx>>,
                expected_trait_ref: Rc<ty::TraitRef<'tcx>>)
                -> Result<(), SelectionError<'tcx>>
     {
-        /*!
-         * After we have determined which impl applies, and with what
-         * substitutions, there is one last step. We have to go back
-         * and relate the "output" type parameters from the obligation
-         * to the types that are specified in the impl.
-         *
-         * For example, imagine we have:
-         *
-         *     impl<T> Iterator<T> for Vec<T> { ... }
-         *
-         * and our obligation is `Iterator<Foo> for Vec<int>` (note
-         * the mismatch in the obligation types). Up until this step,
-         * no error would be reported: the self type is `Vec<int>`,
-         * and that matches `Vec<T>` with the substitution `T=int`.
-         * At this stage, we could then go and check that the type
-         * parameters to the `Iterator` trait match.
-         * (In terms of the parameters, the `expected_trait_ref`
-         * here would be `Iterator<int> for Vec<int>`, and the
-         * `obligation_trait_ref` would be `Iterator<Foo> for Vec<int>`.
-         *
-         * Note that this checking occurs *after* the impl has
-         * selected, because these output type parameters should not
-         * affect the selection of the impl. Therefore, if there is a
-         * mismatch, we report an error to the user.
-         */
-
         let origin = infer::RelateOutputImplTypes(obligation_cause.span);
 
         let obligation_trait_ref = obligation_trait_ref.clone();
@@ -2019,11 +1954,8 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
         }
     }
 
+    /// Returns set of all impls for a given trait.
     fn all_impls(&self, trait_def_id: ast::DefId) -> Vec<ast::DefId> {
-        /*!
-         * Returns set of all impls for a given trait.
-         */
-
         ty::populate_implementations_for_trait_if_necessary(self.tcx(),
                                                             trait_def_id);
         match self.tcx().trait_impls.borrow().get(&trait_def_id) {
diff --git a/src/librustc/middle/traits/util.rs b/src/librustc/middle/traits/util.rs
index ec49d5010562b..b9e694ff4e2b2 100644
--- a/src/librustc/middle/traits/util.rs
+++ b/src/librustc/middle/traits/util.rs
@@ -42,22 +42,18 @@ pub fn supertraits<'cx, 'tcx>(tcx: &'cx ty::ctxt<'tcx>,
                               trait_ref: Rc<ty::TraitRef<'tcx>>)
                               -> Supertraits<'cx, 'tcx>
 {
-    /*!
-     * Returns an iterator over the trait reference `T` and all of its
-     * supertrait references. May contain duplicates. In general
-     * the ordering is not defined.
-     *
-     * Example:
-     *
-     * ```
-     * trait Foo { ... }
-     * trait Bar : Foo { ... }
-     * trait Baz : Bar+Foo { ... }
-     * ```
-     *
-     * `supertraits(Baz)` yields `[Baz, Bar, Foo, Foo]` in some order.
-     */
-
+    /// Returns an iterator over the trait reference `T` and all of its supertrait references. May
+    /// contain duplicates. In general the ordering is not defined.
+    ///
+    /// Example:
+    ///
+    /// ```
+    /// trait Foo { ... }
+    /// trait Bar : Foo { ... }
+    /// trait Baz : Bar+Foo { ... }
+    /// ```
+    ///
+    /// `supertraits(Baz)` yields `[Baz, Bar, Foo, Foo]` in some order.
     transitive_bounds(tcx, &[trait_ref])
 }
 
@@ -97,12 +93,8 @@ impl<'cx, 'tcx> Supertraits<'cx, 'tcx> {
         self.stack.push(entry);
     }
 
+    /// Returns the path taken through the trait supertraits to reach the current point.
     pub fn indices(&self) -> Vec<uint> {
-        /*!
-         * Returns the path taken through the trait supertraits to
-         * reach the current point.
-         */
-
         self.stack.iter().map(|e| e.position).collect()
     }
 }
@@ -171,6 +163,7 @@ impl<'tcx> fmt::Show for VtableParamData<'tcx> {
     }
 }
 
+/// See `super::obligations_for_generics`
 pub fn obligations_for_generics<'tcx>(tcx: &ty::ctxt<'tcx>,
                                       cause: ObligationCause<'tcx>,
                                       recursion_depth: uint,
@@ -178,7 +171,6 @@ pub fn obligations_for_generics<'tcx>(tcx: &ty::ctxt<'tcx>,
                                       type_substs: &VecPerParamSpace<Ty<'tcx>>)
                                       -> VecPerParamSpace<Obligation<'tcx>>
 {
-    /*! See `super::obligations_for_generics` */
 
     debug!("obligations_for_generics(generic_bounds={}, type_substs={})",
            generic_bounds.repr(tcx), type_substs.repr(tcx));
@@ -272,20 +264,15 @@ pub fn obligation_for_builtin_bound<'tcx>(
     }
 }
 
+/// Starting from a caller obligation `caller_bound` (which has coordinates `space`/`i` in the list
+/// of caller obligations), search through the trait and supertraits to find one where `test(d)` is
+/// true, where `d` is the def-id of the trait/supertrait. If any is found, return `Some(p)` where
+/// `p` is the path to that trait/supertrait. Else `None`.
 pub fn search_trait_and_supertraits_from_bound<'tcx>(tcx: &ty::ctxt<'tcx>,
                                                      caller_bound: Rc<ty::TraitRef<'tcx>>,
                                                      test: |ast::DefId| -> bool)
                                                      -> Option<VtableParamData<'tcx>>
 {
-    /*!
-     * Starting from a caller obligation `caller_bound` (which has
-     * coordinates `space`/`i` in the list of caller obligations),
-     * search through the trait and supertraits to find one where
-     * `test(d)` is true, where `d` is the def-id of the
-     * trait/supertrait.  If any is found, return `Some(p)` where `p`
-     * is the path to that trait/supertrait. Else `None`.
-     */
-
     for bound in transitive_bounds(tcx, &[caller_bound]) {
         if test(bound.def_id) {
             let vtable_param = VtableParamData { bound: bound };
diff --git a/src/librustc/middle/ty.rs b/src/librustc/middle/ty.rs
index 2c8465e62d7c3..b79bce62f0b77 100644
--- a/src/librustc/middle/ty.rs
+++ b/src/librustc/middle/ty.rs
@@ -671,39 +671,29 @@ pub fn type_has_late_bound_regions(ty: Ty) -> bool {
     ty.flags.intersects(HAS_RE_LATE_BOUND)
 }
 
+/// An "escaping region" is a bound region whose binder is not part of `t`.
+///
+/// So, for example, consider a type like the following, which has two binders:
+///
+///    for<'a> fn(x: for<'b> fn(&'a int, &'b int))
+///    ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ outer scope
+///                  ^~~~~~~~~~~~~~~~~~~~~~~~~~~~  inner scope
+///
+/// This type has *bound regions* (`'a`, `'b`), but it does not have escaping regions, because the
+/// binders of both `'a` and `'b` are part of the type itself. However, if we consider the *inner
+/// fn type*, that type has an escaping region: `'a`.
+///
+/// Note that what I'm calling an "escaping region" is often just called a "free region". However,
+/// we already use the term "free region". It refers to the regions that we use to represent bound
+/// regions on a fn definition while we are typechecking its body.
+///
+/// To clarify, conceptually there is no particular difference between an "escaping" region and a
+/// "free" region. However, there is a big difference in practice. Basically, when "entering" a
+/// binding level, one is generally required to do some sort of processing to a bound region, such
+/// as replacing it with a fresh/skolemized region, or making an entry in the environment to
+/// represent the scope to which it is attached, etc. An escaping region represents a bound region
+/// for which this processing has not yet been done.
 pub fn type_has_escaping_regions(ty: Ty) -> bool {
-    /*!
-     * An "escaping region" is a bound region whose binder is not part of `t`.
-     *
-     * So, for example, consider a type like the following, which has two
-     * binders:
-     *
-     *    for<'a> fn(x: for<'b> fn(&'a int, &'b int))
-     *    ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ outer scope
-     *                  ^~~~~~~~~~~~~~~~~~~~~~~~~~~~  inner scope
-     *
-     * This type has *bound regions* (`'a`, `'b`), but it does not
-     * have escaping regions, because the binders of both `'a` and
-     * `'b` are part of the type itself. However, if we consider the
-     * *inner fn type*, that type has an escaping region: `'a`.
-     *
-     * Note that what I'm calling an "escaping region" is often just
-     * called a "free region". However, we already use the term "free
-     * region". It refers to the regions that we use to represent
-     * bound regions on a fn definition while we are typechecking its
-     * body.
-     *
-     * To clarify, conceptually there is no particular difference
-     * between an "escaping" region and a "free" region. However,
-     * there is a big difference in practice. Basically, when
-     * "entering" a binding level, one is generally required to do
-     * some sort of processing to a bound region, such as replacing it
-     * with a fresh/skolemized region, or making an entry in the
-     * environment to represent the scope to which it is attached,
-     * etc. An escaping region represents a bound region for which
-     * this processing has not yet been done.
-     */
-
     type_escapes_depth(ty, 0)
 }
 
@@ -1248,11 +1238,8 @@ pub fn all_builtin_bounds() -> BuiltinBounds {
     set
 }
 
+/// An existential bound that does not implement any traits.
 pub fn region_existential_bound(r: ty::Region) -> ExistentialBounds {
-    /*!
-     * An existential bound that does not implement any traits.
-     */
-
     ty::ExistentialBounds { region_bound: r,
                             builtin_bounds: empty_builtin_bounds() }
 }
@@ -1834,12 +1821,9 @@ impl FlagComputation {
         }
     }
 
+    /// Adds the flags/depth from a set of types that appear within the current type, but within a
+    /// region binder.
     fn add_bound_computation(&mut self, computation: &FlagComputation) {
-        /*!
-         * Adds the flags/depth from a set of types that appear within
-         * the current type, but within a region binder.
-         */
-
         self.add_flags(computation.flags);
 
         // The types that contributed to `computation` occured within
@@ -2575,38 +2559,26 @@ impl TypeContents {
         self.intersects(TC::NeedsDrop)
     }
 
+    /// Includes only those bits that still apply when indirected through a `Box` pointer
     pub fn owned_pointer(&self) -> TypeContents {
-        /*!
-         * Includes only those bits that still apply
-         * when indirected through a `Box` pointer
-         */
         TC::OwnsOwned | (
             *self & (TC::OwnsAll | TC::ReachesAll))
     }
 
+    /// Includes only those bits that still apply when indirected through a reference (`&`)
     pub fn reference(&self, bits: TypeContents) -> TypeContents {
-        /*!
-         * Includes only those bits that still apply
-         * when indirected through a reference (`&`)
-         */
         bits | (
             *self & TC::ReachesAll)
     }
 
+    /// Includes only those bits that still apply when indirected through a managed pointer (`@`)
     pub fn managed_pointer(&self) -> TypeContents {
-        /*!
-         * Includes only those bits that still apply
-         * when indirected through a managed pointer (`@`)
-         */
         TC::Managed | (
             *self & TC::ReachesAll)
     }
 
+    /// Includes only those bits that still apply when indirected through an unsafe pointer (`*`)
     pub fn unsafe_pointer(&self) -> TypeContents {
-        /*!
-         * Includes only those bits that still apply
-         * when indirected through an unsafe pointer (`*`)
-         */
         *self & TC::ReachesAll
     }
 
@@ -2883,14 +2855,10 @@ pub fn type_contents<'tcx>(cx: &ctxt<'tcx>, ty: Ty<'tcx>) -> TypeContents {
         }
     }
 
+    /// Type contents due to containing a reference with the region `region` and borrow kind `bk`
     fn borrowed_contents(region: ty::Region,
                          mutbl: ast::Mutability)
                          -> TypeContents {
-        /*!
-         * Type contents due to containing a reference
-         * with the region `region` and borrow kind `bk`
-         */
-
         let b = match mutbl {
             ast::MutMutable => TC::ReachesMutable | TC::OwnsAffine,
             ast::MutImmutable => TC::None,
@@ -3648,20 +3616,16 @@ pub fn expr_ty_opt<'tcx>(cx: &ctxt<'tcx>, expr: &ast::Expr) -> Option<Ty<'tcx>>
     return node_id_to_type_opt(cx, expr.id);
 }
 
+/// Returns the type of `expr`, considering any `AutoAdjustment`
+/// entry recorded for that expression.
+///
+/// It would almost certainly be better to store the adjusted ty in with
+/// the `AutoAdjustment`, but I opted not to do this because it would
+/// require serializing and deserializing the type and, although that's not
+/// hard to do, I just hate that code so much I didn't want to touch it
+/// unless it was to fix it properly, which seemed a distraction from the
+/// task at hand! -nmatsakis
 pub fn expr_ty_adjusted<'tcx>(cx: &ctxt<'tcx>, expr: &ast::Expr) -> Ty<'tcx> {
-    /*!
-     *
-     * Returns the type of `expr`, considering any `AutoAdjustment`
-     * entry recorded for that expression.
-     *
-     * It would almost certainly be better to store the adjusted ty in with
-     * the `AutoAdjustment`, but I opted not to do this because it would
-     * require serializing and deserializing the type and, although that's not
-     * hard to do, I just hate that code so much I didn't want to touch it
-     * unless it was to fix it properly, which seemed a distraction from the
-     * task at hand! -nmatsakis
-     */
-
     adjust_ty(cx, expr.span, expr.id, expr_ty(cx, expr),
               cx.adjustments.borrow().get(&expr.id),
               |method_call| cx.method_map.borrow().get(&method_call).map(|method| method.ty))
@@ -3707,6 +3671,7 @@ pub fn local_var_name_str(cx: &ctxt, id: NodeId) -> InternedString {
     }
 }
 
+/// See `expr_ty_adjusted`
 pub fn adjust_ty<'tcx>(cx: &ctxt<'tcx>,
                        span: Span,
                        expr_id: ast::NodeId,
@@ -3714,7 +3679,6 @@ pub fn adjust_ty<'tcx>(cx: &ctxt<'tcx>,
                        adjustment: Option<&AutoAdjustment<'tcx>>,
                        method_type: |typeck::MethodCall| -> Option<Ty<'tcx>>)
                        -> Ty<'tcx> {
-    /*! See `expr_ty_adjusted` */
 
     match unadjusted_ty.sty {
         ty_err => return unadjusted_ty,
@@ -4128,16 +4092,11 @@ pub fn ty_sort_string<'tcx>(cx: &ctxt<'tcx>, ty: Ty<'tcx>) -> String {
     }
 }
 
+/// Explains the source of a type err in a short, human readable way. This is meant to be placed
+/// in parentheses after some larger message. You should also invoke `note_and_explain_type_err()`
+/// afterwards to present additional details, particularly when it comes to lifetime-related
+/// errors.
 pub fn type_err_to_str<'tcx>(cx: &ctxt<'tcx>, err: &type_err<'tcx>) -> String {
-    /*!
-     *
-     * Explains the source of a type err in a short,
-     * human readable way.  This is meant to be placed in
-     * parentheses after some larger message.  You should
-     * also invoke `note_and_explain_type_err()` afterwards
-     * to present additional details, particularly when
-     * it comes to lifetime-related errors. */
-
     fn tstore_to_closure(s: &TraitStore) -> String {
         match s {
             &UniqTraitStore => "proc".to_string(),
@@ -4352,21 +4311,16 @@ pub fn provided_trait_methods<'tcx>(cx: &ctxt<'tcx>, id: ast::DefId)
     }
 }
 
+/// Helper for looking things up in the various maps that are populated during typeck::collect
+/// (e.g., `cx.impl_or_trait_items`, `cx.tcache`, etc).  All of these share the pattern that if the
+/// id is local, it should have been loaded into the map by the `typeck::collect` phase.  If the
+/// def-id is external, then we have to go consult the crate loading code (and cache the result for
+/// the future).
 fn lookup_locally_or_in_crate_store<V:Clone>(
                                     descr: &str,
                                     def_id: ast::DefId,
                                     map: &mut DefIdMap<V>,
                                     load_external: || -> V) -> V {
-    /*!
-     * Helper for looking things up in the various maps
-     * that are populated during typeck::collect (e.g.,
-     * `cx.impl_or_trait_items`, `cx.tcache`, etc).  All of these share
-     * the pattern that if the id is local, it should have
-     * been loaded into the map by the `typeck::collect` phase.
-     * If the def-id is external, then we have to go consult
-     * the crate loading code (and cache the result for the future).
-     */
-
     match map.get(&def_id).cloned() {
         Some(v) => { return v; }
         None => { }
@@ -5238,19 +5192,16 @@ pub fn each_bound_trait_and_supertraits<'tcx>(tcx: &ctxt<'tcx>,
     return true;
 }
 
+/// Given a type which must meet the builtin bounds and trait bounds, returns a set of lifetimes
+/// which the type must outlive.
+///
+/// Requires that trait definitions have been processed.
 pub fn required_region_bounds<'tcx>(tcx: &ctxt<'tcx>,
                                     region_bounds: &[ty::Region],
                                     builtin_bounds: BuiltinBounds,
                                     trait_bounds: &[Rc<TraitRef<'tcx>>])
                                     -> Vec<ty::Region>
 {
-    /*!
-     * Given a type which must meet the builtin bounds and trait
-     * bounds, returns a set of lifetimes which the type must outlive.
-     *
-     * Requires that trait definitions have been processed.
-     */
-
     let mut all_bounds = Vec::new();
 
     debug!("required_region_bounds(builtin_bounds={}, trait_bounds={})",
@@ -5636,13 +5587,9 @@ impl Variance {
     }
 }
 
+/// Construct a parameter environment suitable for static contexts or other contexts where there
+/// are no free type/lifetime parameters in scope.
 pub fn empty_parameter_environment<'tcx>() -> ParameterEnvironment<'tcx> {
-    /*!
-     * Construct a parameter environment suitable for static contexts
-     * or other contexts where there are no free type/lifetime
-     * parameters in scope.
-     */
-
     ty::ParameterEnvironment { free_substs: Substs::empty(),
                                bounds: VecPerParamSpace::empty(),
                                caller_obligations: VecPerParamSpace::empty(),
@@ -5650,6 +5597,7 @@ pub fn empty_parameter_environment<'tcx>() -> ParameterEnvironment<'tcx> {
                                selection_cache: traits::SelectionCache::new(), }
 }
 
+/// See `ParameterEnvironment` struct def'n for details
 pub fn construct_parameter_environment<'tcx>(
     tcx: &ctxt<'tcx>,
     span: Span,
@@ -5657,7 +5605,6 @@ pub fn construct_parameter_environment<'tcx>(
     free_id: ast::NodeId)
     -> ParameterEnvironment<'tcx>
 {
-    /*! See `ParameterEnvironment` struct def'n for details */
 
     //
     // Construct the free substs.
@@ -5786,15 +5733,11 @@ impl BorrowKind {
         }
     }
 
+    /// Returns a mutability `m` such that an `&m T` pointer could be used to obtain this borrow
+    /// kind. Because borrow kinds are richer than mutabilities, we sometimes have to pick a
+    /// mutability that is stronger than necessary so that it at least *would permit* the borrow in
+    /// question.
     pub fn to_mutbl_lossy(self) -> ast::Mutability {
-        /*!
-         * Returns a mutability `m` such that an `&m T` pointer could
-         * be used to obtain this borrow kind. Because borrow kinds
-         * are richer than mutabilities, we sometimes have to pick a
-         * mutability that is stronger than necessary so that it at
-         * least *would permit* the borrow in question.
-         */
-
         match self {
             MutBorrow => ast::MutMutable,
             ImmBorrow => ast::MutImmutable,
@@ -5959,6 +5902,8 @@ impl<'tcx> AutoDerefRef<'tcx> {
     }
 }
 
+/// Replace any late-bound regions bound in `value` with free variants attached to scope-id
+/// `scope_id`.
 pub fn liberate_late_bound_regions<'tcx, HR>(
     tcx: &ty::ctxt<'tcx>,
     scope: region::CodeExtent,
@@ -5966,31 +5911,23 @@ pub fn liberate_late_bound_regions<'tcx, HR>(
     -> HR
     where HR : HigherRankedFoldable<'tcx>
 {
-    /*!
-     * Replace any late-bound regions bound in `value` with free variants
-     * attached to scope-id `scope_id`.
-     */
-
     replace_late_bound_regions(
         tcx, value,
         |br, _| ty::ReFree(ty::FreeRegion{scope: scope, bound_region: br})).0
 }
 
+/// Replace any late-bound regions bound in `value` with `'static`. Useful in trans but also
+/// method lookup and a few other places where precise region relationships are not required.
 pub fn erase_late_bound_regions<'tcx, HR>(
     tcx: &ty::ctxt<'tcx>,
     value: &HR)
     -> HR
     where HR : HigherRankedFoldable<'tcx>
 {
-    /*!
-     * Replace any late-bound regions bound in `value` with `'static`.
-     * Useful in trans but also method lookup and a few other places
-     * where precise region relationships are not required.
-     */
-
     replace_late_bound_regions(tcx, value, |_, _| ty::ReStatic).0
 }
 
+/// Replaces the late-bound-regions in `value` that are bound by `value`.
 pub fn replace_late_bound_regions<'tcx, HR>(
     tcx: &ty::ctxt<'tcx>,
     value: &HR,
@@ -5998,10 +5935,6 @@ pub fn replace_late_bound_regions<'tcx, HR>(
     -> (HR, FnvHashMap<ty::BoundRegion,ty::Region>)
     where HR : HigherRankedFoldable<'tcx>
 {
-    /*!
-     * Replaces the late-bound-regions in `value` that are bound by `value`.
-     */
-
     debug!("replace_late_bound_regions({})", value.repr(tcx));
 
     let mut map = FnvHashMap::new();
diff --git a/src/librustc/middle/ty_fold.rs b/src/librustc/middle/ty_fold.rs
index 913919fe774f3..0d7b9b99c57e6 100644
--- a/src/librustc/middle/ty_fold.rs
+++ b/src/librustc/middle/ty_fold.rs
@@ -8,33 +8,31 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-/*!
- * Generalized type folding mechanism. The setup is a bit convoluted
- * but allows for convenient usage. Let T be an instance of some
- * "foldable type" (one which implements `TypeFoldable`) and F be an
- * instance of a "folder" (a type which implements `TypeFolder`). Then
- * the setup is intended to be:
- *
- *     T.fold_with(F) --calls--> F.fold_T(T) --calls--> super_fold_T(F, T)
- *
- * This way, when you define a new folder F, you can override
- * `fold_T()` to customize the behavior, and invoke `super_fold_T()`
- * to get the original behavior. Meanwhile, to actually fold
- * something, you can just write `T.fold_with(F)`, which is
- * convenient. (Note that `fold_with` will also transparently handle
- * things like a `Vec<T>` where T is foldable and so on.)
- *
- * In this ideal setup, the only function that actually *does*
- * anything is `super_fold_T`, which traverses the type `T`. Moreover,
- * `super_fold_T` should only ever call `T.fold_with()`.
- *
- * In some cases, we follow a degenerate pattern where we do not have
- * a `fold_T` nor `super_fold_T` method. Instead, `T.fold_with`
- * traverses the structure directly. This is suboptimal because the
- * behavior cannot be overriden, but it's much less work to implement.
- * If you ever *do* need an override that doesn't exist, it's not hard
- * to convert the degenerate pattern into the proper thing.
- */
+//! Generalized type folding mechanism. The setup is a bit convoluted
+//! but allows for convenient usage. Let T be an instance of some
+//! "foldable type" (one which implements `TypeFoldable`) and F be an
+//! instance of a "folder" (a type which implements `TypeFolder`). Then
+//! the setup is intended to be:
+//!
+//!     T.fold_with(F) --calls--> F.fold_T(T) --calls--> super_fold_T(F, T)
+//!
+//! This way, when you define a new folder F, you can override
+//! `fold_T()` to customize the behavior, and invoke `super_fold_T()`
+//! to get the original behavior. Meanwhile, to actually fold
+//! something, you can just write `T.fold_with(F)`, which is
+//! convenient. (Note that `fold_with` will also transparently handle
+//! things like a `Vec<T>` where T is foldable and so on.)
+//!
+//! In this ideal setup, the only function that actually *does*
+//! anything is `super_fold_T`, which traverses the type `T`. Moreover,
+//! `super_fold_T` should only ever call `T.fold_with()`.
+//!
+//! In some cases, we follow a degenerate pattern where we do not have
+//! a `fold_T` nor `super_fold_T` method. Instead, `T.fold_with`
+//! traverses the structure directly. This is suboptimal because the
+//! behavior cannot be overriden, but it's much less work to implement.
+//! If you ever *do* need an override that doesn't exist, it's not hard
+//! to convert the degenerate pattern into the proper thing.
 
 use middle::subst;
 use middle::subst::VecPerParamSpace;
diff --git a/src/librustc/middle/typeck/astconv.rs b/src/librustc/middle/typeck/astconv.rs
index fd5b1bd4793b5..5dfe3fc3a58cd 100644
--- a/src/librustc/middle/typeck/astconv.rs
+++ b/src/librustc/middle/typeck/astconv.rs
@@ -8,46 +8,44 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-/*!
- * Conversion from AST representation of types to the ty.rs
- * representation.  The main routine here is `ast_ty_to_ty()`: each use
- * is parameterized by an instance of `AstConv` and a `RegionScope`.
- *
- * The parameterization of `ast_ty_to_ty()` is because it behaves
- * somewhat differently during the collect and check phases,
- * particularly with respect to looking up the types of top-level
- * items.  In the collect phase, the crate context is used as the
- * `AstConv` instance; in this phase, the `get_item_ty()` function
- * triggers a recursive call to `ty_of_item()`  (note that
- * `ast_ty_to_ty()` will detect recursive types and report an error).
- * In the check phase, when the FnCtxt is used as the `AstConv`,
- * `get_item_ty()` just looks up the item type in `tcx.tcache`.
- *
- * The `RegionScope` trait controls what happens when the user does
- * not specify a region in some location where a region is required
- * (e.g., if the user writes `&Foo` as a type rather than `&'a Foo`).
- * See the `rscope` module for more details.
- *
- * Unlike the `AstConv` trait, the region scope can change as we descend
- * the type.  This is to accommodate the fact that (a) fn types are binding
- * scopes and (b) the default region may change.  To understand case (a),
- * consider something like:
- *
- *   type foo = { x: &a.int, y: |&a.int| }
- *
- * The type of `x` is an error because there is no region `a` in scope.
- * In the type of `y`, however, region `a` is considered a bound region
- * as it does not already appear in scope.
- *
- * Case (b) says that if you have a type:
- *   type foo<'a> = ...;
- *   type bar = fn(&foo, &a.foo)
- * The fully expanded version of type bar is:
- *   type bar = fn(&'foo &, &a.foo<'a>)
- * Note that the self region for the `foo` defaulted to `&` in the first
- * case but `&a` in the second.  Basically, defaults that appear inside
- * an rptr (`&r.T`) use the region `r` that appears in the rptr.
- */
+//! Conversion from AST representation of types to the ty.rs
+//! representation.  The main routine here is `ast_ty_to_ty()`: each use
+//! is parameterized by an instance of `AstConv` and a `RegionScope`.
+//!
+//! The parameterization of `ast_ty_to_ty()` is because it behaves
+//! somewhat differently during the collect and check phases,
+//! particularly with respect to looking up the types of top-level
+//! items.  In the collect phase, the crate context is used as the
+//! `AstConv` instance; in this phase, the `get_item_ty()` function
+//! triggers a recursive call to `ty_of_item()`  (note that
+//! `ast_ty_to_ty()` will detect recursive types and report an error).
+//! In the check phase, when the FnCtxt is used as the `AstConv`,
+//! `get_item_ty()` just looks up the item type in `tcx.tcache`.
+//!
+//! The `RegionScope` trait controls what happens when the user does
+//! not specify a region in some location where a region is required
+//! (e.g., if the user writes `&Foo` as a type rather than `&'a Foo`).
+//! See the `rscope` module for more details.
+//!
+//! Unlike the `AstConv` trait, the region scope can change as we descend
+//! the type.  This is to accommodate the fact that (a) fn types are binding
+//! scopes and (b) the default region may change.  To understand case (a),
+//! consider something like:
+//!
+//!   type foo = { x: &a.int, y: |&a.int| }
+//!
+//! The type of `x` is an error because there is no region `a` in scope.
+//! In the type of `y`, however, region `a` is considered a bound region
+//! as it does not already appear in scope.
+//!
+//! Case (b) says that if you have a type:
+//!   type foo<'a> = ...;
+//!   type bar = fn(&foo, &a.foo)
+//! The fully expanded version of type bar is:
+//!   type bar = fn(&'foo &, &a.foo<'a>)
+//! Note that the self region for the `foo` defaulted to `&` in the first
+//! case but `&a` in the second.  Basically, defaults that appear inside
+//! an rptr (`&r.T`) use the region `r` that appears in the rptr.
 use middle::const_eval;
 use middle::def;
 use middle::resolve_lifetime as rl;
@@ -201,6 +199,8 @@ pub fn opt_ast_region_to_region<'tcx, AC: AstConv<'tcx>, RS: RegionScope>(
     r
 }
 
+/// Given a path `path` that refers to an item `I` with the declared generics `decl_generics`,
+/// returns an appropriate set of substitutions for this particular reference to `I`.
 fn ast_path_substs_for_ty<'tcx,AC,RS>(
     this: &AC,
     rscope: &RS,
@@ -211,12 +211,6 @@ fn ast_path_substs_for_ty<'tcx,AC,RS>(
     -> Substs<'tcx>
     where AC: AstConv<'tcx>, RS: RegionScope
 {
-    /*!
-     * Given a path `path` that refers to an item `I` with the
-     * declared generics `decl_generics`, returns an appropriate
-     * set of substitutions for this particular reference to `I`.
-     */
-
     let tcx = this.tcx();
 
     // ast_path_substs() is only called to convert paths that are
@@ -422,6 +416,9 @@ pub fn instantiate_poly_trait_ref<'tcx,AC,RS>(
     instantiate_trait_ref(this, rscope, &ast_trait_ref.trait_ref, self_ty)
 }
 
+/// Instantiates the path for the given trait reference, assuming that it's bound to a valid trait
+/// type. Returns the def_id for the defining trait. Fails if the type is a type other than a trait
+/// type.
 pub fn instantiate_trait_ref<'tcx,AC,RS>(this: &AC,
                                          rscope: &RS,
                                          ast_trait_ref: &ast::TraitRef,
@@ -430,12 +427,6 @@ pub fn instantiate_trait_ref<'tcx,AC,RS>(this: &AC,
                                          where AC: AstConv<'tcx>,
                                                RS: RegionScope
 {
-    /*!
-     * Instantiates the path for the given trait reference, assuming that
-     * it's bound to a valid trait type. Returns the def_id for the defining
-     * trait. Fails if the type is a type other than a trait type.
-     */
-
     match lookup_def_tcx(this.tcx(),
                          ast_trait_ref.path.span,
                          ast_trait_ref.ref_id) {
@@ -1318,6 +1309,10 @@ pub fn ty_of_closure<'tcx, AC: AstConv<'tcx>>(
     }
 }
 
+/// Given an existential type like `Foo+'a+Bar`, this routine converts the `'a` and `Bar` intos an
+/// `ExistentialBounds` struct. The `main_trait_refs` argument specifies the `Foo` -- it is absent
+/// for closures. Eventually this should all be normalized, I think, so that there is no "main
+/// trait ref" and instead we just have a flat list of bounds as the existential type.
 pub fn conv_existential_bounds<'tcx, AC: AstConv<'tcx>, RS:RegionScope>(
     this: &AC,
     rscope: &RS,
@@ -1326,16 +1321,6 @@ pub fn conv_existential_bounds<'tcx, AC: AstConv<'tcx>, RS:RegionScope>(
     ast_bounds: &[ast::TyParamBound])
     -> ty::ExistentialBounds
 {
-    /*!
-     * Given an existential type like `Foo+'a+Bar`, this routine
-     * converts the `'a` and `Bar` intos an `ExistentialBounds`
-     * struct. The `main_trait_refs` argument specifies the `Foo` --
-     * it is absent for closures. Eventually this should all be
-     * normalized, I think, so that there is no "main trait ref" and
-     * instead we just have a flat list of bounds as the existential
-     * type.
-     */
-
     let ast_bound_refs: Vec<&ast::TyParamBound> =
         ast_bounds.iter().collect();
 
@@ -1432,6 +1417,10 @@ pub fn conv_existential_bounds_from_partitioned_bounds<'tcx, AC, RS>(
     }
 }
 
+/// Given the bounds on a type parameter / existential type, determines what single region bound
+/// (if any) we can use to summarize this type. The basic idea is that we will use the bound the
+/// user provided, if they provided one, and otherwise search the supertypes of trait bounds for
+/// region bounds. It may be that we can derive no bound at all, in which case we return `None`.
 pub fn compute_opt_region_bound<'tcx>(tcx: &ty::ctxt<'tcx>,
                                       span: Span,
                                       builtin_bounds: ty::BuiltinBounds,
@@ -1439,16 +1428,6 @@ pub fn compute_opt_region_bound<'tcx>(tcx: &ty::ctxt<'tcx>,
                                       trait_bounds: &[Rc<ty::TraitRef<'tcx>>])
                                       -> Option<ty::Region>
 {
-    /*!
-     * Given the bounds on a type parameter / existential type,
-     * determines what single region bound (if any) we can use to
-     * summarize this type. The basic idea is that we will use the
-     * bound the user provided, if they provided one, and otherwise
-     * search the supertypes of trait bounds for region bounds. It may
-     * be that we can derive no bound at all, in which case we return
-     * `None`.
-     */
-
     if region_bounds.len() > 1 {
         tcx.sess.span_err(
             region_bounds[1].span,
@@ -1495,6 +1474,9 @@ pub fn compute_opt_region_bound<'tcx>(tcx: &ty::ctxt<'tcx>,
     return Some(r);
 }
 
+/// A version of `compute_opt_region_bound` for use where some region bound is required
+/// (existential types, basically). Reports an error if no region bound can be derived and we are
+/// in an `rscope` that does not provide a default.
 fn compute_region_bound<'tcx, AC: AstConv<'tcx>, RS:RegionScope>(
     this: &AC,
     rscope: &RS,
@@ -1504,13 +1486,6 @@ fn compute_region_bound<'tcx, AC: AstConv<'tcx>, RS:RegionScope>(
     trait_bounds: &[Rc<ty::TraitRef<'tcx>>])
     -> ty::Region
 {
-    /*!
-     * A version of `compute_opt_region_bound` for use where some
-     * region bound is required (existential types,
-     * basically). Reports an error if no region bound can be derived
-     * and we are in an `rscope` that does not provide a default.
-     */
-
     match compute_opt_region_bound(this.tcx(), span, builtin_bounds,
                                    region_bounds, trait_bounds) {
         Some(r) => r,
@@ -1534,17 +1509,13 @@ pub struct PartitionedBounds<'a> {
     pub region_bounds: Vec<&'a ast::Lifetime>,
 }
 
+/// Divides a list of bounds from the AST into three groups: builtin bounds (Copy, Sized etc),
+/// general trait bounds, and region bounds.
 pub fn partition_bounds<'a>(tcx: &ty::ctxt,
                             _span: Span,
                             ast_bounds: &'a [&ast::TyParamBound])
                             -> PartitionedBounds<'a>
 {
-    /*!
-     * Divides a list of bounds from the AST into three groups:
-     * builtin bounds (Copy, Sized etc), general trait bounds,
-     * and region bounds.
-     */
-
     let mut builtin_bounds = ty::empty_builtin_bounds();
     let mut region_bounds = Vec::new();
     let mut trait_bounds = Vec::new();
diff --git a/src/librustc/middle/typeck/check/closure.rs b/src/librustc/middle/typeck/check/closure.rs
index 51636f00c391a..0a93b3a5ec7dc 100644
--- a/src/librustc/middle/typeck/check/closure.rs
+++ b/src/librustc/middle/typeck/check/closure.rs
@@ -8,9 +8,7 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-/*!
- * Code for type-checking closure expressions.
- */
+//! Code for type-checking closure expressions.
 
 use super::check_fn;
 use super::{Expectation, ExpectCastableToType, ExpectHasType, NoExpectation};
diff --git a/src/librustc/middle/typeck/check/method/confirm.rs b/src/librustc/middle/typeck/check/method/confirm.rs
index 5bcd96e66efc2..e866627be3d29 100644
--- a/src/librustc/middle/typeck/check/method/confirm.rs
+++ b/src/librustc/middle/typeck/check/method/confirm.rs
@@ -189,22 +189,17 @@ impl<'a,'tcx> ConfirmContext<'a,'tcx> {
     ///////////////////////////////////////////////////////////////////////////
     //
 
+    /// Returns a set of substitutions for the method *receiver* where all type and region
+    /// parameters are instantiated with fresh variables. This substitution does not include any
+    /// parameters declared on the method itself.
+    ///
+    /// Note that this substitution may include late-bound regions from the impl level. If so,
+    /// these are instantiated later in the `instantiate_method_sig` routine.
     fn fresh_receiver_substs(&mut self,
                              self_ty: Ty<'tcx>,
                              pick: &probe::Pick<'tcx>)
                              -> (subst::Substs<'tcx>, MethodOrigin<'tcx>)
     {
-        /*!
-         * Returns a set of substitutions for the method *receiver*
-         * where all type and region parameters are instantiated with
-         * fresh variables. This substitution does not include any
-         * parameters declared on the method itself.
-         *
-         * Note that this substitution may include late-bound regions
-         * from the impl level. If so, these are instantiated later in
-         * the `instantiate_method_sig` routine.
-         */
-
         match pick.kind {
             probe::InherentImplPick(impl_def_id) => {
                 assert!(ty::impl_trait_ref(self.tcx(), impl_def_id).is_none(),
@@ -478,14 +473,11 @@ impl<'a,'tcx> ConfirmContext<'a,'tcx> {
     ///////////////////////////////////////////////////////////////////////////
     // RECONCILIATION
 
+    /// When we select a method with an `&mut self` receiver, we have to go convert any
+    /// auto-derefs, indices, etc from `Deref` and `Index` into `DerefMut` and `IndexMut`
+    /// respectively.
     fn fixup_derefs_on_method_receiver_if_necessary(&self,
                                                     method_callee: &MethodCallee) {
-        /*!
-         * When we select a method with an `&mut self` receiver, we have to go
-         * convert any auto-derefs, indices, etc from `Deref` and `Index` into
-         * `DerefMut` and `IndexMut` respectively.
-         */
-
         let sig = match method_callee.ty.sty {
             ty::ty_bare_fn(ref f) => f.sig.clone(),
             ty::ty_closure(ref f) => f.sig.clone(),
diff --git a/src/librustc/middle/typeck/check/method/doc.rs b/src/librustc/middle/typeck/check/method/doc.rs
index 8c691e02ca9d0..6129e38e39c12 100644
--- a/src/librustc/middle/typeck/check/method/doc.rs
+++ b/src/librustc/middle/typeck/check/method/doc.rs
@@ -8,119 +8,114 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-/*!
-
-# Method lookup
-
-Method lookup can be rather complex due to the interaction of a number
-of factors, such as self types, autoderef, trait lookup, etc. This
-file provides an overview of the process. More detailed notes are in
-the code itself, naturally.
-
-One way to think of method lookup is that we convert an expression of
-the form:
-
-    receiver.method(...)
-
-into a more explicit UFCS form:
-
-    Trait::method(ADJ(receiver), ...) // for a trait call
-    ReceiverType::method(ADJ(receiver), ...) // for an inherent method call
-
-Here `ADJ` is some kind of adjustment, which is typically a series of
-autoderefs and then possibly an autoref (e.g., `&**receiver`). However
-we sometimes do other adjustments and coercions along the way, in
-particular unsizing (e.g., converting from `[T, ..n]` to `[T]`).
-
-## The Two Phases
-
-Method lookup is divided into two major phases: probing (`probe.rs`)
-and confirmation (`confirm.rs`). The probe phase is when we decide
-what method to call and how to adjust the receiver. The confirmation
-phase "applies" this selection, updating the side-tables, unifying
-type variables, and otherwise doing side-effectful things.
-
-One reason for this division is to be more amenable to caching.  The
-probe phase produces a "pick" (`probe::Pick`), which is designed to be
-cacheable across method-call sites. Therefore, it does not include
-inference variables or other information.
-
-## Probe phase
-
-The probe phase (`probe.rs`) decides what method is being called and
-how to adjust the receiver.
-
-### Steps
-
-The first thing that the probe phase does is to create a series of
-*steps*. This is done by progressively dereferencing the receiver type
-until it cannot be deref'd anymore, as well as applying an optional
-"unsize" step. So if the receiver has type `Rc<Box<[T, ..3]>>`, this
-might yield:
-
-    Rc<Box<[T, ..3]>>
-    Box<[T, ..3]>
-    [T, ..3]
-    [T]
-
-### Candidate assembly
-
-We then search along those steps to create a list of *candidates*. A
-`Candidate` is a method item that might plausibly be the method being
-invoked. For each candidate, we'll derive a "transformed self type"
-that takes into account explicit self.
-
-Candidates are grouped into two kinds, inherent and extension.
-
-**Inherent candidates** are those that are derived from the
-type of the receiver itself.  So, if you have a receiver of some
-nominal type `Foo` (e.g., a struct), any methods defined within an
-impl like `impl Foo` are inherent methods.  Nothing needs to be
-imported to use an inherent method, they are associated with the type
-itself (note that inherent impls can only be defined in the same
-module as the type itself).
-
-FIXME: Inherent candidates are not always derived from impls.  If you
-have a trait object, such as a value of type `Box<ToString>`, then the
-trait methods (`to_string()`, in this case) are inherently associated
-with it. Another case is type parameters, in which case the methods of
-their bounds are inherent. However, this part of the rules is subject
-to change: when DST's "impl Trait for Trait" is complete, trait object
-dispatch could be subsumed into trait matching, and the type parameter
-behavior should be reconsidered in light of where clauses.
-
-**Extension candidates** are derived from imported traits.  If I have
-the trait `ToString` imported, and I call `to_string()` on a value of
-type `T`, then we will go off to find out whether there is an impl of
-`ToString` for `T`.  These kinds of method calls are called "extension
-methods".  They can be defined in any module, not only the one that
-defined `T`.  Furthermore, you must import the trait to call such a
-method.
-
-So, let's continue our example. Imagine that we were calling a method
-`foo` with the receiver `Rc<Box<[T, ..3]>>` and there is a trait `Foo`
-that defines it with `&self` for the type `Rc<U>` as well as a method
-on the type `Box` that defines `Foo` but with `&mut self`. Then we
-might have two candidates:
-
-    &Rc<Box<[T, ..3]>> from the impl of `Foo` for `Rc<U>` where `U=Box<T, ..3]>
-    &mut Box<[T, ..3]>> from the inherent impl on `Box<U>` where `U=[T, ..3]`
-
-### Candidate search
-
-Finally, to actually pick the method, we will search down the steps,
-trying to match the receiver type against the candidate types. At
-each step, we also consider an auto-ref and auto-mut-ref to see whether
-that makes any of the candidates match. We pick the first step where
-we find a match.
-
-In the case of our example, the first step is `Rc<Box<[T, ..3]>>`,
-which does not itself match any candidate. But when we autoref it, we
-get the type `&Rc<Box<[T, ..3]>>` which does match. We would then
-recursively consider all where-clauses that appear on the impl: if
-those match (or we cannot rule out that they do), then this is the
-method we would pick. Otherwise, we would continue down the series of
-steps.
-
-*/
-
+//! # Method lookup
+//!
+//! Method lookup can be rather complex due to the interaction of a number
+//! of factors, such as self types, autoderef, trait lookup, etc. This
+//! file provides an overview of the process. More detailed notes are in
+//! the code itself, naturally.
+//!
+//! One way to think of method lookup is that we convert an expression of
+//! the form:
+//!
+//!     receiver.method(...)
+//!
+//! into a more explicit UFCS form:
+//!
+//!     Trait::method(ADJ(receiver), ...) // for a trait call
+//!     ReceiverType::method(ADJ(receiver), ...) // for an inherent method call
+//!
+//! Here `ADJ` is some kind of adjustment, which is typically a series of
+//! autoderefs and then possibly an autoref (e.g., `&**receiver`). However
+//! we sometimes do other adjustments and coercions along the way, in
+//! particular unsizing (e.g., converting from `[T, ..n]` to `[T]`).
+//!
+//! ## The Two Phases
+//!
+//! Method lookup is divided into two major phases: probing (`probe.rs`)
+//! and confirmation (`confirm.rs`). The probe phase is when we decide
+//! what method to call and how to adjust the receiver. The confirmation
+//! phase "applies" this selection, updating the side-tables, unifying
+//! type variables, and otherwise doing side-effectful things.
+//!
+//! One reason for this division is to be more amenable to caching.  The
+//! probe phase produces a "pick" (`probe::Pick`), which is designed to be
+//! cacheable across method-call sites. Therefore, it does not include
+//! inference variables or other information.
+//!
+//! ## Probe phase
+//!
+//! The probe phase (`probe.rs`) decides what method is being called and
+//! how to adjust the receiver.
+//!
+//! ### Steps
+//!
+//! The first thing that the probe phase does is to create a series of
+//! *steps*. This is done by progressively dereferencing the receiver type
+//! until it cannot be deref'd anymore, as well as applying an optional
+//! "unsize" step. So if the receiver has type `Rc<Box<[T, ..3]>>`, this
+//! might yield:
+//!
+//!     Rc<Box<[T, ..3]>>
+//!     Box<[T, ..3]>
+//!     [T, ..3]
+//!     [T]
+//!
+//! ### Candidate assembly
+//!
+//! We then search along those steps to create a list of *candidates*. A
+//! `Candidate` is a method item that might plausibly be the method being
+//! invoked. For each candidate, we'll derive a "transformed self type"
+//! that takes into account explicit self.
+//!
+//! Candidates are grouped into two kinds, inherent and extension.
+//!
+//! **Inherent candidates** are those that are derived from the
+//! type of the receiver itself.  So, if you have a receiver of some
+//! nominal type `Foo` (e.g., a struct), any methods defined within an
+//! impl like `impl Foo` are inherent methods.  Nothing needs to be
+//! imported to use an inherent method, they are associated with the type
+//! itself (note that inherent impls can only be defined in the same
+//! module as the type itself).
+//!
+//! FIXME: Inherent candidates are not always derived from impls.  If you
+//! have a trait object, such as a value of type `Box<ToString>`, then the
+//! trait methods (`to_string()`, in this case) are inherently associated
+//! with it. Another case is type parameters, in which case the methods of
+//! their bounds are inherent. However, this part of the rules is subject
+//! to change: when DST's "impl Trait for Trait" is complete, trait object
+//! dispatch could be subsumed into trait matching, and the type parameter
+//! behavior should be reconsidered in light of where clauses.
+//!
+//! **Extension candidates** are derived from imported traits.  If I have
+//! the trait `ToString` imported, and I call `to_string()` on a value of
+//! type `T`, then we will go off to find out whether there is an impl of
+//! `ToString` for `T`.  These kinds of method calls are called "extension
+//! methods".  They can be defined in any module, not only the one that
+//! defined `T`.  Furthermore, you must import the trait to call such a
+//! method.
+//!
+//! So, let's continue our example. Imagine that we were calling a method
+//! `foo` with the receiver `Rc<Box<[T, ..3]>>` and there is a trait `Foo`
+//! that defines it with `&self` for the type `Rc<U>` as well as a method
+//! on the type `Box` that defines `Foo` but with `&mut self`. Then we
+//! might have two candidates:
+//!
+//!     &Rc<Box<[T, ..3]>> from the impl of `Foo` for `Rc<U>` where `U=Box<T, ..3]>
+//!     &mut Box<[T, ..3]>> from the inherent impl on `Box<U>` where `U=[T, ..3]`
+//!
+//! ### Candidate search
+//!
+//! Finally, to actually pick the method, we will search down the steps,
+//! trying to match the receiver type against the candidate types. At
+//! each step, we also consider an auto-ref and auto-mut-ref to see whether
+//! that makes any of the candidates match. We pick the first step where
+//! we find a match.
+//!
+//! In the case of our example, the first step is `Rc<Box<[T, ..3]>>`,
+//! which does not itself match any candidate. But when we autoref it, we
+//! get the type `&Rc<Box<[T, ..3]>>` which does match. We would then
+//! recursively consider all where-clauses that appear on the impl: if
+//! those match (or we cannot rule out that they do), then this is the
+//! method we would pick. Otherwise, we would continue down the series of
+//! steps.
diff --git a/src/librustc/middle/typeck/check/method/mod.rs b/src/librustc/middle/typeck/check/method/mod.rs
index 0f4152644adaf..34c3292f8cd69 100644
--- a/src/librustc/middle/typeck/check/method/mod.rs
+++ b/src/librustc/middle/typeck/check/method/mod.rs
@@ -8,7 +8,7 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-/*! Method lookup: the secret sauce of Rust. See `doc.rs`. */
+//! Method lookup: the secret sauce of Rust. See `doc.rs`.
 
 use middle::subst;
 use middle::subst::{Subst};
@@ -56,6 +56,7 @@ pub enum CandidateSource {
 
 type MethodIndex = uint; // just for doc purposes
 
+/// Determines whether the type `self_ty` supports a method name `method_name` or not.
 pub fn exists<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
                         span: Span,
                         method_name: ast::Name,
@@ -63,10 +64,6 @@ pub fn exists<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
                         call_expr_id: ast::NodeId)
                         -> bool
 {
-    /*!
-     * Determines whether the type `self_ty` supports a method name `method_name` or not.
-     */
-
     match probe::probe(fcx, span, method_name, self_ty, call_expr_id) {
         Ok(_) => true,
         Err(NoMatch(_)) => false,
@@ -74,6 +71,20 @@ pub fn exists<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
     }
 }
 
+/// Performs method lookup. If lookup is successful, it will return the callee and store an
+/// appropriate adjustment for the self-expr. In some cases it may report an error (e.g., invoking
+/// the `drop` method).
+///
+/// # Arguments
+///
+/// Given a method call like `foo.bar::<T1,...Tn>(...)`:
+///
+/// * `fcx`:                   the surrounding `FnCtxt` (!)
+/// * `span`:                  the span for the method call
+/// * `method_name`:           the name of the method being called (`bar`)
+/// * `self_ty`:               the (unadjusted) type of the self expression (`foo`)
+/// * `supplied_method_types`: the explicit method type parameters, if any (`T1..Tn`)
+/// * `self_expr`:             the self expression (`foo`)
 pub fn lookup<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
                         span: Span,
                         method_name: ast::Name,
@@ -83,23 +94,6 @@ pub fn lookup<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
                         self_expr: &ast::Expr)
                         -> Result<MethodCallee<'tcx>, MethodError>
 {
-    /*!
-     * Performs method lookup. If lookup is successful, it will return the callee
-     * and store an appropriate adjustment for the self-expr. In some cases it may
-     * report an error (e.g., invoking the `drop` method).
-     *
-     * # Arguments
-     *
-     * Given a method call like `foo.bar::<T1,...Tn>(...)`:
-     *
-     * - `fcx`:                   the surrounding `FnCtxt` (!)
-     * - `span`:                  the span for the method call
-     * - `method_name`:           the name of the method being called (`bar`)
-     * - `self_ty`:               the (unadjusted) type of the self expression (`foo`)
-     * - `supplied_method_types`: the explicit method type parameters, if any (`T1..Tn`)
-     * - `self_expr`:             the self expression (`foo`)
-     */
-
     debug!("lookup(method_name={}, self_ty={}, call_expr={}, self_expr={})",
            method_name.repr(fcx.tcx()),
            self_ty.repr(fcx.tcx()),
@@ -124,6 +118,15 @@ pub fn lookup_in_trait<'a, 'tcx>(fcx: &'a FnCtxt<'a, 'tcx>,
                              self_ty, opt_input_types)
 }
 
+/// `lookup_in_trait_adjusted` is used for overloaded operators. It does a very narrow slice of
+/// what the normal probe/confirm path does. In particular, it doesn't really do any probing: it
+/// simply constructs an obligation for a particular trait with the given self-type and checks
+/// whether that trait is implemented.
+///
+/// FIXME(#18741) -- It seems likely that we can consolidate some of this code with the other
+/// method-lookup code. In particular, autoderef on index is basically identical to autoderef with
+/// normal probes, except that the test also looks for built-in indexing. Also, the second half of
+/// this method is basically the same as confirmation.
 pub fn lookup_in_trait_adjusted<'a, 'tcx>(fcx: &'a FnCtxt<'a, 'tcx>,
                                           span: Span,
                                           self_expr: Option<&'a ast::Expr>,
@@ -134,21 +137,6 @@ pub fn lookup_in_trait_adjusted<'a, 'tcx>(fcx: &'a FnCtxt<'a, 'tcx>,
                                           opt_input_types: Option<Vec<Ty<'tcx>>>)
                                           -> Option<MethodCallee<'tcx>>
 {
-    /*!
-     * `lookup_in_trait_adjusted` is used for overloaded operators. It
-     * does a very narrow slice of what the normal probe/confirm path
-     * does. In particular, it doesn't really do any probing: it
-     * simply constructs an obligation for a particular trait with the
-     * given self-type and checks whether that trait is implemented.
-     *
-     * FIXME(#18741) -- It seems likely that we can consolidate some of this
-     * code with the other method-lookup code. In particular,
-     * autoderef on index is basically identical to autoderef with
-     * normal probes, except that the test also looks for built-in
-     * indexing. Also, the second half of this method is basically
-     * the same as confirmation.
-     */
-
     debug!("lookup_in_trait_adjusted(self_ty={}, self_expr={}, m_name={}, trait_def_id={})",
            self_ty.repr(fcx.tcx()),
            self_expr.repr(fcx.tcx()),
@@ -408,16 +396,13 @@ pub fn report_error<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
     }
 }
 
+/// Find method with name `method_name` defined in `trait_def_id` and return it, along with its
+/// index (or `None`, if no such method).
 fn trait_method<'tcx>(tcx: &ty::ctxt<'tcx>,
                       trait_def_id: ast::DefId,
                       method_name: ast::Name)
                       -> Option<(uint, Rc<ty::Method<'tcx>>)>
 {
-    /*!
-     * Find method with name `method_name` defined in `trait_def_id` and return it,
-     * along with its index (or `None`, if no such method).
-     */
-
     let trait_items = ty::trait_items(tcx, trait_def_id);
     trait_items
         .iter()
diff --git a/src/librustc/middle/typeck/check/method/probe.rs b/src/librustc/middle/typeck/check/method/probe.rs
index a98b4cf011d97..484d72130e61d 100644
--- a/src/librustc/middle/typeck/check/method/probe.rs
+++ b/src/librustc/middle/typeck/check/method/probe.rs
@@ -807,33 +807,26 @@ impl<'a,'tcx> ProbeContext<'a,'tcx> {
         })
     }
 
+    /// Sometimes we get in a situation where we have multiple probes that are all impls of the
+    /// same trait, but we don't know which impl to use. In this case, since in all cases the
+    /// external interface of the method can be determined from the trait, it's ok not to decide.
+    /// We can basically just collapse all of the probes for various impls into one where-clause
+    /// probe. This will result in a pending obligation so when more type-info is available we can
+    /// make the final decision.
+    ///
+    /// Example (`src/test/run-pass/method-two-trait-defer-resolution-1.rs`):
+    ///
+    /// ```
+    /// trait Foo { ... }
+    /// impl Foo for Vec<int> { ... }
+    /// impl Foo for Vec<uint> { ... }
+    /// ```
+    ///
+    /// Now imagine the receiver is `Vec<_>`. It doesn't really matter at this time which impl we
+    /// use, so it's ok to just commit to "using the method from the trait Foo".
     fn collapse_candidates_to_trait_pick(&self,
                                          probes: &[&Candidate<'tcx>])
                                          -> Option<Pick<'tcx>> {
-        /*!
-         * Sometimes we get in a situation where we have multiple
-         * probes that are all impls of the same trait, but we don't
-         * know which impl to use. In this case, since in all cases
-         * the external interface of the method can be determined from
-         * the trait, it's ok not to decide.  We can basically just
-         * collapse all of the probes for various impls into one
-         * where-clause probe. This will result in a pending
-         * obligation so when more type-info is available we can make
-         * the final decision.
-         *
-         * Example (`src/test/run-pass/method-two-trait-defer-resolution-1.rs`):
-         *
-         * ```
-         * trait Foo { ... }
-         * impl Foo for Vec<int> { ... }
-         * impl Foo for Vec<uint> { ... }
-         * ```
-         *
-         * Now imagine the receiver is `Vec<_>`. It doesn't really
-         * matter at this time which impl we use, so it's ok to just
-         * commit to "using the method from the trait Foo".
-         */
-
         // Do all probes correspond to the same trait?
         let trait_data = match probes[0].to_trait_data() {
             Some(data) => data,
@@ -952,36 +945,27 @@ impl<'a,'tcx> ProbeContext<'a,'tcx> {
         subst::Substs::new(type_vars, region_placeholders)
     }
 
+    /// Replace late-bound-regions bound by `value` with `'static` using
+    /// `ty::erase_late_bound_regions`.
+    ///
+    /// This is only a reasonable thing to do during the *probe* phase, not the *confirm* phase, of
+    /// method matching. It is reasonable during the probe phase because we don't consider region
+    /// relationships at all. Therefore, we can just replace all the region variables with 'static
+    /// rather than creating fresh region variables. This is nice for two reasons:
+    ///
+    /// 1. Because the numbers of the region variables would otherwise be fairly unique to this
+    ///    particular method call, it winds up creating fewer types overall, which helps for memory
+    ///    usage. (Admittedly, this is a rather small effect, though measureable.)
+    ///
+    /// 2. It makes it easier to deal with higher-ranked trait bounds, because we can replace any
+    ///    late-bound regions with 'static. Otherwise, if we were going to replace late-bound
+    ///    regions with actual region variables as is proper, we'd have to ensure that the same
+    ///    region got replaced with the same variable, which requires a bit more coordination
+    ///    and/or tracking the substitution and
+    ///    so forth.
     fn erase_late_bound_regions<T>(&self, value: &T) -> T
         where T : HigherRankedFoldable<'tcx>
     {
-        /*!
-         * Replace late-bound-regions bound by `value` with `'static`
-         * using `ty::erase_late_bound_regions`.
-         *
-         * This is only a reasonable thing to do during the *probe*
-         * phase, not the *confirm* phase, of method matching. It is
-         * reasonable during the probe phase because we don't consider
-         * region relationships at all. Therefore, we can just replace
-         * all the region variables with 'static rather than creating
-         * fresh region variables. This is nice for two reasons:
-         *
-         * 1. Because the numbers of the region variables would
-         *    otherwise be fairly unique to this particular method
-         *    call, it winds up creating fewer types overall, which
-         *    helps for memory usage. (Admittedly, this is a rather
-         *    small effect, though measureable.)
-         *
-         * 2. It makes it easier to deal with higher-ranked trait
-         *    bounds, because we can replace any late-bound regions
-         *    with 'static. Otherwise, if we were going to replace
-         *    late-bound regions with actual region variables as is
-         *    proper, we'd have to ensure that the same region got
-         *    replaced with the same variable, which requires a bit
-         *    more coordination and/or tracking the substitution and
-         *    so forth.
-         */
-
         ty::erase_late_bound_regions(self.tcx(), value)
     }
 }
@@ -1000,16 +984,13 @@ fn impl_method<'tcx>(tcx: &ty::ctxt<'tcx>,
         .and_then(|item| item.as_opt_method())
 }
 
+/// Find method with name `method_name` defined in `trait_def_id` and return it, along with its
+/// index (or `None`, if no such method).
 fn trait_method<'tcx>(tcx: &ty::ctxt<'tcx>,
                       trait_def_id: ast::DefId,
                       method_name: ast::Name)
                       -> Option<(uint, Rc<ty::Method<'tcx>>)>
 {
-    /*!
-     * Find method with name `method_name` defined in `trait_def_id` and return it,
-     * along with its index (or `None`, if no such method).
-     */
-
     let trait_items = ty::trait_items(tcx, trait_def_id);
     trait_items
         .iter()
diff --git a/src/librustc/middle/typeck/check/mod.rs b/src/librustc/middle/typeck/check/mod.rs
index 754bdc8c8ea01..b33ce04f5ebe7 100644
--- a/src/librustc/middle/typeck/check/mod.rs
+++ b/src/librustc/middle/typeck/check/mod.rs
@@ -486,6 +486,12 @@ impl<'a, 'tcx, 'v> Visitor<'v> for GatherLocalsVisitor<'a, 'tcx> {
 
 }
 
+/// Helper used by check_bare_fn and check_expr_fn. Does the grungy work of checking a function
+/// body and returns the function context used for that purpose, since in the case of a fn item
+/// there is still a bit more to do.
+///
+/// * ...
+/// * inherited: other fields inherited from the enclosing fn (if any)
 fn check_fn<'a, 'tcx>(ccx: &'a CrateCtxt<'a, 'tcx>,
                       fn_style: ast::FnStyle,
                       fn_style_id: ast::NodeId,
@@ -495,16 +501,6 @@ fn check_fn<'a, 'tcx>(ccx: &'a CrateCtxt<'a, 'tcx>,
                       body: &ast::Block,
                       inherited: &'a Inherited<'a, 'tcx>)
                       -> FnCtxt<'a, 'tcx> {
-    /*!
-     * Helper used by check_bare_fn and check_expr_fn.  Does the
-     * grungy work of checking a function body and returns the
-     * function context used for that purpose, since in the case of a
-     * fn item there is still a bit more to do.
-     *
-     * - ...
-     * - inherited: other fields inherited from the enclosing fn (if any)
-     */
-
     let tcx = ccx.tcx;
     let err_count_on_creation = tcx.sess.err_count();
 
@@ -701,19 +697,17 @@ pub fn check_item(ccx: &CrateCtxt, it: &ast::Item) {
     }
 }
 
+/// Type checks a method body.
+///
+/// # Parameters
+///
+/// * `item_generics`: generics defined on the impl/trait that contains
+///   the method
+/// * `self_bound`: bound for the `Self` type parameter, if any
+/// * `method`: the method definition
 fn check_method_body<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>,
                                item_generics: &ty::Generics<'tcx>,
                                method: &ast::Method) {
-    /*!
-     * Type checks a method body.
-     *
-     * # Parameters
-     * - `item_generics`: generics defined on the impl/trait that contains
-     *   the method
-     * - `self_bound`: bound for the `Self` type parameter, if any
-     * - `method`: the method definition
-     */
-
     debug!("check_method_body(item_generics={}, method.id={})",
             item_generics.repr(ccx.tcx),
             method.id);
@@ -1222,6 +1216,33 @@ fn compare_impl_method<'tcx>(tcx: &ty::ctxt<'tcx>,
     // parameters.
     infcx.resolve_regions_and_report_errors();
 
+    /// Check that region bounds on impl method are the same as those on the trait. In principle,
+    /// it could be ok for there to be fewer region bounds on the impl method, but this leads to an
+    /// annoying corner case that is painful to handle (described below), so for now we can just
+    /// forbid it.
+    ///
+    /// Example (see `src/test/compile-fail/regions-bound-missing-bound-in-impl.rs`):
+    ///
+    /// ```
+    /// trait Foo<'a> {
+    ///     fn method1<'b>();
+    ///     fn method2<'b:'a>();
+    /// }
+    ///
+    /// impl<'a> Foo<'a> for ... {
+    ///     fn method1<'b:'a>() { .. case 1, definitely bad .. }
+    ///     fn method2<'b>() { .. case 2, could be ok .. }
+    /// }
+    /// ```
+    ///
+    /// The "definitely bad" case is case #1. Here, the impl adds an extra constraint not present
+    /// in the trait.
+    ///
+    /// The "maybe bad" case is case #2. Here, the impl adds an extra constraint not present in the
+    /// trait. We could in principle allow this, but it interacts in a complex way with early/late
+    /// bound resolution of lifetimes. Basically the presence or absence of a lifetime bound
+    /// affects whether the lifetime is early/late bound, and right now the code breaks if the
+    /// trait has an early bound lifetime parameter and the method does not.
     fn check_region_bounds_on_impl_method<'tcx>(tcx: &ty::ctxt<'tcx>,
                                                 span: Span,
                                                 impl_m: &ty::Method<'tcx>,
@@ -1232,37 +1253,6 @@ fn compare_impl_method<'tcx>(tcx: &ty::ctxt<'tcx>,
                                                 impl_to_skol_substs: &Substs<'tcx>)
                                                 -> bool
     {
-        /*!
-
-        Check that region bounds on impl method are the same as those
-        on the trait. In principle, it could be ok for there to be
-        fewer region bounds on the impl method, but this leads to an
-        annoying corner case that is painful to handle (described
-        below), so for now we can just forbid it.
-
-        Example (see
-        `src/test/compile-fail/regions-bound-missing-bound-in-impl.rs`):
-
-            trait Foo<'a> {
-                fn method1<'b>();
-                fn method2<'b:'a>();
-            }
-
-            impl<'a> Foo<'a> for ... {
-                fn method1<'b:'a>() { .. case 1, definitely bad .. }
-                fn method2<'b>() { .. case 2, could be ok .. }
-            }
-
-        The "definitely bad" case is case #1. Here, the impl adds an
-        extra constraint not present in the trait.
-
-        The "maybe bad" case is case #2. Here, the impl adds an extra
-        constraint not present in the trait. We could in principle
-        allow this, but it interacts in a complex way with early/late
-        bound resolution of lifetimes. Basically the presence or
-        absence of a lifetime bound affects whether the lifetime is
-        early/late bound, and right now the code breaks if the trait
-        has an early bound lifetime parameter and the method does not.
 
         */
 
@@ -1770,23 +1760,17 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
         }
     }
 
+    /// Returns the type of `def_id` with all generics replaced by by fresh type/region variables.
+    /// Also returns the substitution from the type parameters on `def_id` to the fresh variables.
+    /// Registers any trait obligations specified on `def_id` at the same time.
+    ///
+    /// Note that function is only intended to be used with types (notably, not impls). This is
+    /// because it doesn't do any instantiation of late-bound regions.
     pub fn instantiate_type(&self,
                             span: Span,
                             def_id: ast::DefId)
                             -> TypeAndSubsts<'tcx>
     {
-        /*!
-         * Returns the type of `def_id` with all generics replaced by
-         * by fresh type/region variables. Also returns the
-         * substitution from the type parameters on `def_id` to the
-         * fresh variables. Registers any trait obligations specified
-         * on `def_id` at the same time.
-         *
-         * Note that function is only intended to be used with types
-         * (notably, not impls). This is because it doesn't do any
-         * instantiation of late-bound regions.
-         */
-
         let polytype =
             ty::lookup_item_type(self.tcx(), def_id);
         let substs =
@@ -1886,26 +1870,19 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
         }
     }
 
+    /// Fetch type of `expr` after applying adjustments that have been recorded in the fcx.
     pub fn expr_ty_adjusted(&self, expr: &ast::Expr) -> Ty<'tcx> {
-        /*!
-         * Fetch type of `expr` after applying adjustments that
-         * have been recorded in the fcx.
-         */
-
         let adjustments = self.inh.adjustments.borrow();
         let adjustment = adjustments.get(&expr.id);
         self.adjust_expr_ty(expr, adjustment)
     }
 
+    /// Apply `adjustment` to the type of `expr`
     pub fn adjust_expr_ty(&self,
                           expr: &ast::Expr,
                           adjustment: Option<&ty::AutoAdjustment<'tcx>>)
                           -> Ty<'tcx>
     {
-        /*!
-         * Apply `adjustment` to the type of `expr`
-         */
-
         let raw_ty = self.expr_ty(expr);
         let raw_ty = self.infcx().shallow_resolve(raw_ty);
         ty::adjust_ty(self.tcx(),
@@ -2013,16 +1990,13 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
         self.infcx().report_mismatched_types(sp, e, a, err)
     }
 
+    /// Registers an obligation for checking later, during regionck, that the type `ty` must
+    /// outlive the region `r`.
     pub fn register_region_obligation(&self,
                                       origin: infer::SubregionOrigin<'tcx>,
                                       ty: Ty<'tcx>,
                                       r: ty::Region)
     {
-        /*!
-         * Registers an obligation for checking later, during
-         * regionck, that the type `ty` must outlive the region `r`.
-         */
-
         let mut region_obligations = self.inh.region_obligations.borrow_mut();
         let region_obligation = RegionObligation { sub_region: r,
                                                    sup_type: ty,
@@ -2045,31 +2019,29 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
         }
     }
 
+    /// Given a fully substituted set of bounds (`generic_bounds`), and the values with which each
+    /// type/region parameter was instantiated (`substs`), creates and registers suitable
+    /// trait/region obligations.
+    ///
+    /// For example, if there is a function:
+    ///
+    /// ```
+    /// fn foo<'a,T:'a>(...)
+    /// ```
+    ///
+    /// and a reference:
+    ///
+    /// ```
+    /// let f = foo;
+    /// ```
+    ///
+    /// Then we will create a fresh region variable `'$0` and a fresh type variable `$1` for `'a`
+    /// and `T`. This routine will add a region obligation `$1:'$0` and register it locally.
     pub fn add_obligations_for_parameters(&self,
                                           cause: traits::ObligationCause<'tcx>,
                                           substs: &Substs<'tcx>,
                                           generic_bounds: &ty::GenericBounds<'tcx>)
     {
-        /*!
-         * Given a fully substituted set of bounds (`generic_bounds`),
-         * and the values with which each type/region parameter was
-         * instantiated (`substs`), creates and registers suitable
-         * trait/region obligations.
-         *
-         * For example, if there is a function:
-         *
-         *    fn foo<'a,T:'a>(...)
-         *
-         * and a reference:
-         *
-         *    let f = foo;
-         *
-         * Then we will create a fresh region variable `'$0` and a
-         * fresh type variable `$1` for `'a` and `T`. This routine
-         * will add a region obligation `$1:'$0` and register it
-         * locally.
-         */
-
         assert!(!generic_bounds.has_escaping_regions());
 
         debug!("add_obligations_for_parameters(substs={}, generic_bounds={})",
@@ -2160,22 +2132,17 @@ pub enum LvaluePreference {
     NoPreference
 }
 
+/// Executes an autoderef loop for the type `t`. At each step, invokes `should_stop` to decide
+/// whether to terminate the loop. Returns the final type and number of derefs that it performed.
+///
+/// Note: this method does not modify the adjustments table. The caller is responsible for
+/// inserting an AutoAdjustment record into the `fcx` using one of the suitable methods.
 pub fn autoderef<'a, 'tcx, T>(fcx: &FnCtxt<'a, 'tcx>, sp: Span,
                               base_ty: Ty<'tcx>,
                               expr_id: Option<ast::NodeId>,
                               mut lvalue_pref: LvaluePreference,
                               should_stop: |Ty<'tcx>, uint| -> Option<T>)
                               -> (Ty<'tcx>, uint, Option<T>) {
-    /*!
-     * Executes an autoderef loop for the type `t`. At each step, invokes
-     * `should_stop` to decide whether to terminate the loop. Returns
-     * the final type and number of derefs that it performed.
-     *
-     * Note: this method does not modify the adjustments table. The caller is
-     * responsible for inserting an AutoAdjustment record into the `fcx`
-     * using one of the suitable methods.
-     */
-
     let mut t = base_ty;
     for autoderefs in range(0, fcx.tcx().sess.recursion_limit.get()) {
         let resolved_t = structurally_resolved_type(fcx, sp, t);
@@ -2306,19 +2273,14 @@ fn try_overloaded_deref<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
     make_overloaded_lvalue_return_type(fcx, method_call, method)
 }
 
+/// For the overloaded lvalue expressions (`*x`, `x[3]`), the trait returns a type of `&T`, but the
+/// actual type we assign to the *expression* is `T`. So this function just peels off the return
+/// type by one layer to yield `T`. It also inserts the `method-callee` into the method map.
 fn make_overloaded_lvalue_return_type<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
                                                 method_call: Option<MethodCall>,
                                                 method: Option<MethodCallee<'tcx>>)
                                                 -> Option<ty::mt<'tcx>>
 {
-    /*!
-     * For the overloaded lvalue expressions (`*x`, `x[3]`), the trait
-     * returns a type of `&T`, but the actual type we assign to the
-     * *expression* is `T`. So this function just peels off the return
-     * type by one layer to yield `T`. It also inserts the
-     * `method-callee` into the method map.
-     */
-
     match method {
         Some(method) => {
             let ref_ty = ty::ty_fn_ret(method.ty);
@@ -2380,6 +2342,8 @@ fn autoderef_for_index<'a, 'tcx, T>(fcx: &FnCtxt<'a, 'tcx>,
     }
 }
 
+/// Autoderefs `base_expr`, looking for a `Slice` impl. If it finds one, installs the relevant
+/// method info and returns the result type (else None).
 fn try_overloaded_slice<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
                                   method_call: MethodCall,
                                   expr: &ast::Expr,
@@ -2390,12 +2354,6 @@ fn try_overloaded_slice<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
                                   mutbl: ast::Mutability)
                                   -> Option<Ty<'tcx>> // return type is result of slice
 {
-    /*!
-     * Autoderefs `base_expr`, looking for a `Slice` impl. If it
-     * finds one, installs the relevant method info and returns the
-     * result type (else None).
-     */
-
     let lvalue_pref = match mutbl {
         ast::MutMutable => PreferMutLvalue,
         ast::MutImmutable => NoPreference
@@ -2436,6 +2394,8 @@ fn try_overloaded_slice<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
     })
 }
 
+/// Checks for a `Slice` (or `SliceMut`) impl at the relevant level of autoderef. If it finds one,
+/// installs method info and returns type of method (else None).
 fn try_overloaded_slice_step<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
                                        method_call: MethodCall,
                                        expr: &ast::Expr,
@@ -2448,12 +2408,6 @@ fn try_overloaded_slice_step<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
                                        // result type is type of method being called
                                        -> Option<Ty<'tcx>>
 {
-    /*!
-     * Checks for a `Slice` (or `SliceMut`) impl at the relevant level
-     * of autoderef. If it finds one, installs method info and returns
-     * type of method (else None).
-     */
-
     let method = if mutbl == ast::MutMutable {
         // Try `SliceMut` first, if preferred.
         match fcx.tcx().lang_items.slice_mut_trait() {
@@ -2510,6 +2464,10 @@ fn try_overloaded_slice_step<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
     })
 }
 
+/// To type-check `base_expr[index_expr]`, we progressively autoderef (and otherwise adjust)
+/// `base_expr`, looking for a type which either supports builtin indexing or overloaded indexing.
+/// This loop implements one step in that search; the autoderef loop is implemented by
+/// `autoderef_for_index`.
 fn try_index_step<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
                             method_call: MethodCall,
                             expr: &ast::Expr,
@@ -2519,13 +2477,6 @@ fn try_index_step<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
                             lvalue_pref: LvaluePreference)
                             -> Option<(/*index type*/ Ty<'tcx>, /*element type*/ Ty<'tcx>)>
 {
-    /*!
-     * To type-check `base_expr[index_expr]`, we progressively autoderef (and otherwise adjust)
-     * `base_expr`, looking for a type which either supports builtin indexing or overloaded
-     * indexing. This loop implements one step in that search; the autoderef loop is implemented
-     * by `autoderef_for_index`.
-     */
-
     debug!("try_index_step(expr={}, base_expr.id={}, adjusted_ty={}, adjustment={})",
            expr.repr(fcx.tcx()),
            base_expr.repr(fcx.tcx()),
@@ -2712,6 +2663,8 @@ fn check_method_argument_types<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
     }
 }
 
+/// Generic function that factors out common logic from function calls, method calls and overloaded
+/// operators.
 fn check_argument_types<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
                                   sp: Span,
                                   fn_inputs: &[Ty<'tcx>],
@@ -2720,12 +2673,6 @@ fn check_argument_types<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
                                   deref_args: DerefArgs,
                                   variadic: bool,
                                   tuple_arguments: TupleArgumentsFlag) {
-    /*!
-     *
-     * Generic function that factors out common logic from
-     * function calls, method calls and overloaded operators.
-     */
-
     let tcx = fcx.ccx.tcx;
 
     // Grab the argument types, supplying fresh type variables
@@ -5289,6 +5236,15 @@ pub fn instantiate_path<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
         }
     }
 
+    /// Finds the parameters that the user provided and adds them to `substs`. If too many
+    /// parameters are provided, then reports an error and clears the output vector.
+    ///
+    /// We clear the output vector because that will cause the `adjust_XXX_parameters()` later to
+    /// use inference variables. This seems less likely to lead to derived errors.
+    ///
+    /// Note that we *do not* check for *too few* parameters here. Due to the presence of defaults
+    /// etc that is more complicated. I wanted however to do the reporting of *too many* parameters
+    /// here because we can easily use the precise span of the N+1'th parameter.
     fn push_explicit_parameters_from_segment_to_substs<'a, 'tcx>(
         fcx: &FnCtxt<'a, 'tcx>,
         space: subst::ParamSpace,
@@ -5298,23 +5254,6 @@ pub fn instantiate_path<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
         segment: &ast::PathSegment,
         substs: &mut Substs<'tcx>)
     {
-        /*!
-         * Finds the parameters that the user provided and adds them
-         * to `substs`. If too many parameters are provided, then
-         * reports an error and clears the output vector.
-         *
-         * We clear the output vector because that will cause the
-         * `adjust_XXX_parameters()` later to use inference
-         * variables. This seems less likely to lead to derived
-         * errors.
-         *
-         * Note that we *do not* check for *too few* parameters here.
-         * Due to the presence of defaults etc that is more
-         * complicated. I wanted however to do the reporting of *too
-         * many* parameters here because we can easily use the precise
-         * span of the N+1'th parameter.
-         */
-
         match segment.parameters {
             ast::AngleBracketedParameters(ref data) => {
                 push_explicit_angle_bracketed_parameters_from_segment_to_substs(
@@ -5373,6 +5312,12 @@ pub fn instantiate_path<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
         }
     }
 
+    /// As with
+    /// `push_explicit_angle_bracketed_parameters_from_segment_to_substs`,
+    /// but intended for `Foo(A,B) -> C` form. This expands to
+    /// roughly the same thing as `Foo<(A,B),C>`. One important
+    /// difference has to do with the treatment of anonymous
+    /// regions, which are translated into bound regions (NYI).
     fn push_explicit_parenthesized_parameters_from_segment_to_substs<'a, 'tcx>(
         fcx: &FnCtxt<'a, 'tcx>,
         space: subst::ParamSpace,
@@ -5381,15 +5326,6 @@ pub fn instantiate_path<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
         data: &ast::ParenthesizedParameterData,
         substs: &mut Substs<'tcx>)
     {
-        /*!
-         * As with
-         * `push_explicit_angle_bracketed_parameters_from_segment_to_substs`,
-         * but intended for `Foo(A,B) -> C` form. This expands to
-         * roughly the same thing as `Foo<(A,B),C>`. One important
-         * difference has to do with the treatment of anonymous
-         * regions, which are translated into bound regions (NYI).
-         */
-
         let type_count = type_defs.len(space);
         if type_count < 2 {
             span_err!(fcx.tcx().sess, span, E0167,
diff --git a/src/librustc/middle/typeck/check/regionck.rs b/src/librustc/middle/typeck/check/regionck.rs
index f12b5cdad9886..bc6e7d9d87ffe 100644
--- a/src/librustc/middle/typeck/check/regionck.rs
+++ b/src/librustc/middle/typeck/check/regionck.rs
@@ -8,115 +8,111 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-/*!
-
-The region check is a final pass that runs over the AST after we have
-inferred the type constraints but before we have actually finalized
-the types.  Its purpose is to embed a variety of region constraints.
-Inserting these constraints as a separate pass is good because (1) it
-localizes the code that has to do with region inference and (2) often
-we cannot know what constraints are needed until the basic types have
-been inferred.
-
-### Interaction with the borrow checker
-
-In general, the job of the borrowck module (which runs later) is to
-check that all soundness criteria are met, given a particular set of
-regions. The job of *this* module is to anticipate the needs of the
-borrow checker and infer regions that will satisfy its requirements.
-It is generally true that the inference doesn't need to be sound,
-meaning that if there is a bug and we inferred bad regions, the borrow
-checker should catch it. This is not entirely true though; for
-example, the borrow checker doesn't check subtyping, and it doesn't
-check that region pointers are always live when they are used. It
-might be worthwhile to fix this so that borrowck serves as a kind of
-verification step -- that would add confidence in the overall
-correctness of the compiler, at the cost of duplicating some type
-checks and effort.
-
-### Inferring the duration of borrows, automatic and otherwise
-
-Whenever we introduce a borrowed pointer, for example as the result of
-a borrow expression `let x = &data`, the lifetime of the pointer `x`
-is always specified as a region inference variable. `regionck` has the
-job of adding constraints such that this inference variable is as
-narrow as possible while still accommodating all uses (that is, every
-dereference of the resulting pointer must be within the lifetime).
-
-#### Reborrows
-
-Generally speaking, `regionck` does NOT try to ensure that the data
-`data` will outlive the pointer `x`. That is the job of borrowck.  The
-one exception is when "re-borrowing" the contents of another borrowed
-pointer. For example, imagine you have a borrowed pointer `b` with
-lifetime L1 and you have an expression `&*b`. The result of this
-expression will be another borrowed pointer with lifetime L2 (which is
-an inference variable). The borrow checker is going to enforce the
-constraint that L2 < L1, because otherwise you are re-borrowing data
-for a lifetime larger than the original loan.  However, without the
-routines in this module, the region inferencer would not know of this
-dependency and thus it might infer the lifetime of L2 to be greater
-than L1 (issue #3148).
-
-There are a number of troublesome scenarios in the tests
-`region-dependent-*.rs`, but here is one example:
-
-    struct Foo { i: int }
-    struct Bar { foo: Foo  }
-    fn get_i(x: &'a Bar) -> &'a int {
-       let foo = &x.foo; // Lifetime L1
-       &foo.i            // Lifetime L2
-    }
-
-Note that this comes up either with `&` expressions, `ref`
-bindings, and `autorefs`, which are the three ways to introduce
-a borrow.
-
-The key point here is that when you are borrowing a value that
-is "guaranteed" by a borrowed pointer, you must link the
-lifetime of that borrowed pointer (L1, here) to the lifetime of
-the borrow itself (L2).  What do I mean by "guaranteed" by a
-borrowed pointer? I mean any data that is reached by first
-dereferencing a borrowed pointer and then either traversing
-interior offsets or owned pointers.  We say that the guarantor
-of such data it the region of the borrowed pointer that was
-traversed.  This is essentially the same as the ownership
-relation, except that a borrowed pointer never owns its
-contents.
-
-### Inferring borrow kinds for upvars
-
-Whenever there is a closure expression, we need to determine how each
-upvar is used. We do this by initially assigning each upvar an
-immutable "borrow kind" (see `ty::BorrowKind` for details) and then
-"escalating" the kind as needed. The borrow kind proceeds according to
-the following lattice:
-
-    ty::ImmBorrow -> ty::UniqueImmBorrow -> ty::MutBorrow
-
-So, for example, if we see an assignment `x = 5` to an upvar `x`, we
-will promote its borrow kind to mutable borrow. If we see an `&mut x`
-we'll do the same. Naturally, this applies not just to the upvar, but
-to everything owned by `x`, so the result is the same for something
-like `x.f = 5` and so on (presuming `x` is not a borrowed pointer to a
-struct). These adjustments are performed in
-`adjust_upvar_borrow_kind()` (you can trace backwards through the code
-from there).
-
-The fact that we are inferring borrow kinds as we go results in a
-semi-hacky interaction with mem-categorization. In particular,
-mem-categorization will query the current borrow kind as it
-categorizes, and we'll return the *current* value, but this may get
-adjusted later. Therefore, in this module, we generally ignore the
-borrow kind (and derived mutabilities) that are returned from
-mem-categorization, since they may be inaccurate. (Another option
-would be to use a unification scheme, where instead of returning a
-concrete borrow kind like `ty::ImmBorrow`, we return a
-`ty::InferBorrow(upvar_id)` or something like that, but this would
-then mean that all later passes would have to check for these figments
-and report an error, and it just seems like more mess in the end.)
-
-*/
+//! The region check is a final pass that runs over the AST after we have
+//! inferred the type constraints but before we have actually finalized
+//! the types.  Its purpose is to embed a variety of region constraints.
+//! Inserting these constraints as a separate pass is good because (1) it
+//! localizes the code that has to do with region inference and (2) often
+//! we cannot know what constraints are needed until the basic types have
+//! been inferred.
+//!
+//! ### Interaction with the borrow checker
+//!
+//! In general, the job of the borrowck module (which runs later) is to
+//! check that all soundness criteria are met, given a particular set of
+//! regions. The job of *this* module is to anticipate the needs of the
+//! borrow checker and infer regions that will satisfy its requirements.
+//! It is generally true that the inference doesn't need to be sound,
+//! meaning that if there is a bug and we inferred bad regions, the borrow
+//! checker should catch it. This is not entirely true though; for
+//! example, the borrow checker doesn't check subtyping, and it doesn't
+//! check that region pointers are always live when they are used. It
+//! might be worthwhile to fix this so that borrowck serves as a kind of
+//! verification step -- that would add confidence in the overall
+//! correctness of the compiler, at the cost of duplicating some type
+//! checks and effort.
+//!
+//! ### Inferring the duration of borrows, automatic and otherwise
+//!
+//! Whenever we introduce a borrowed pointer, for example as the result of
+//! a borrow expression `let x = &data`, the lifetime of the pointer `x`
+//! is always specified as a region inference variable. `regionck` has the
+//! job of adding constraints such that this inference variable is as
+//! narrow as possible while still accommodating all uses (that is, every
+//! dereference of the resulting pointer must be within the lifetime).
+//!
+//! #### Reborrows
+//!
+//! Generally speaking, `regionck` does NOT try to ensure that the data
+//! `data` will outlive the pointer `x`. That is the job of borrowck.  The
+//! one exception is when "re-borrowing" the contents of another borrowed
+//! pointer. For example, imagine you have a borrowed pointer `b` with
+//! lifetime L1 and you have an expression `&*b`. The result of this
+//! expression will be another borrowed pointer with lifetime L2 (which is
+//! an inference variable). The borrow checker is going to enforce the
+//! constraint that L2 < L1, because otherwise you are re-borrowing data
+//! for a lifetime larger than the original loan.  However, without the
+//! routines in this module, the region inferencer would not know of this
+//! dependency and thus it might infer the lifetime of L2 to be greater
+//! than L1 (issue #3148).
+//!
+//! There are a number of troublesome scenarios in the tests
+//! `region-dependent-*.rs`, but here is one example:
+//!
+//!     struct Foo { i: int }
+//!     struct Bar { foo: Foo  }
+//!     fn get_i(x: &'a Bar) -> &'a int {
+//!        let foo = &x.foo; // Lifetime L1
+//!        &foo.i            // Lifetime L2
+//!     }
+//!
+//! Note that this comes up either with `&` expressions, `ref`
+//! bindings, and `autorefs`, which are the three ways to introduce
+//! a borrow.
+//!
+//! The key point here is that when you are borrowing a value that
+//! is "guaranteed" by a borrowed pointer, you must link the
+//! lifetime of that borrowed pointer (L1, here) to the lifetime of
+//! the borrow itself (L2).  What do I mean by "guaranteed" by a
+//! borrowed pointer? I mean any data that is reached by first
+//! dereferencing a borrowed pointer and then either traversing
+//! interior offsets or owned pointers.  We say that the guarantor
+//! of such data it the region of the borrowed pointer that was
+//! traversed.  This is essentially the same as the ownership
+//! relation, except that a borrowed pointer never owns its
+//! contents.
+//!
+//! ### Inferring borrow kinds for upvars
+//!
+//! Whenever there is a closure expression, we need to determine how each
+//! upvar is used. We do this by initially assigning each upvar an
+//! immutable "borrow kind" (see `ty::BorrowKind` for details) and then
+//! "escalating" the kind as needed. The borrow kind proceeds according to
+//! the following lattice:
+//!
+//!     ty::ImmBorrow -> ty::UniqueImmBorrow -> ty::MutBorrow
+//!
+//! So, for example, if we see an assignment `x = 5` to an upvar `x`, we
+//! will promote its borrow kind to mutable borrow. If we see an `&mut x`
+//! we'll do the same. Naturally, this applies not just to the upvar, but
+//! to everything owned by `x`, so the result is the same for something
+//! like `x.f = 5` and so on (presuming `x` is not a borrowed pointer to a
+//! struct). These adjustments are performed in
+//! `adjust_upvar_borrow_kind()` (you can trace backwards through the code
+//! from there).
+//!
+//! The fact that we are inferring borrow kinds as we go results in a
+//! semi-hacky interaction with mem-categorization. In particular,
+//! mem-categorization will query the current borrow kind as it
+//! categorizes, and we'll return the *current* value, but this may get
+//! adjusted later. Therefore, in this module, we generally ignore the
+//! borrow kind (and derived mutabilities) that are returned from
+//! mem-categorization, since they may be inaccurate. (Another option
+//! would be to use a unification scheme, where instead of returning a
+//! concrete borrow kind like `ty::ImmBorrow`, we return a
+//! `ty::InferBorrow(upvar_id)` or something like that, but this would
+//! then mean that all later passes would have to check for these figments
+//! and report an error, and it just seems like more mess in the end.)
 
 use middle::def;
 use middle::mem_categorization as mc;
@@ -177,15 +173,11 @@ pub fn regionck_fn(fcx: &FnCtxt, id: ast::NodeId, blk: &ast::Block) {
     fcx.infcx().resolve_regions_and_report_errors();
 }
 
+/// Checks that the types in `component_tys` are well-formed. This will add constraints into the
+/// region graph. Does *not* run `resolve_regions_and_report_errors` and so forth.
 pub fn regionck_ensure_component_tys_wf<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
                                                   span: Span,
                                                   component_tys: &[Ty<'tcx>]) {
-    /*!
-     * Checks that the types in `component_tys` are well-formed.
-     * This will add constraints into the region graph.
-     * Does *not* run `resolve_regions_and_report_errors` and so forth.
-     */
-
     let mut rcx = Rcx::new(fcx, 0);
     for &component_ty in component_tys.iter() {
         // Check that each type outlives the empty region. Since the
@@ -239,12 +231,8 @@ pub struct Rcx<'a, 'tcx: 'a> {
     maybe_links: MaybeLinkMap<'tcx>
 }
 
+/// Returns the validity region of `def` -- that is, how long is `def` valid?
 fn region_of_def(fcx: &FnCtxt, def: def::Def) -> ty::Region {
-    /*!
-     * Returns the validity region of `def` -- that is, how long
-     * is `def` valid?
-     */
-
     let tcx = fcx.tcx();
     match def {
         def::DefLocal(node_id) => {
@@ -283,35 +271,30 @@ impl<'a, 'tcx> Rcx<'a, 'tcx> {
         old_scope
     }
 
+    /// Try to resolve the type for the given node, returning t_err if an error results.  Note that
+    /// we never care about the details of the error, the same error will be detected and reported
+    /// in the writeback phase.
+    ///
+    /// Note one important point: we do not attempt to resolve *region variables* here.  This is
+    /// because regionck is essentially adding constraints to those region variables and so may yet
+    /// influence how they are resolved.
+    ///
+    /// Consider this silly example:
+    ///
+    /// ```
+    /// fn borrow(x: &int) -> &int {x}
+    /// fn foo(x: @int) -> int {  // block: B
+    ///     let b = borrow(x);    // region: <R0>
+    ///     *b
+    /// }
+    /// ```
+    ///
+    /// Here, the region of `b` will be `<R0>`.  `<R0>` is constrainted to be some subregion of the
+    /// block B and some superregion of the call.  If we forced it now, we'd choose the smaller
+    /// region (the call).  But that would make the *b illegal.  Since we don't resolve, the type
+    /// of b will be `&<R0>.int` and then `*b` will require that `<R0>` be bigger than the let and
+    /// the `*b` expression, so we will effectively resolve `<R0>` to be the block B.
     pub fn resolve_type(&self, unresolved_ty: Ty<'tcx>) -> Ty<'tcx> {
-        /*!
-         * Try to resolve the type for the given node, returning
-         * t_err if an error results.  Note that we never care
-         * about the details of the error, the same error will be
-         * detected and reported in the writeback phase.
-         *
-         * Note one important point: we do not attempt to resolve
-         * *region variables* here.  This is because regionck is
-         * essentially adding constraints to those region variables
-         * and so may yet influence how they are resolved.
-         *
-         * Consider this silly example:
-         *
-         *     fn borrow(x: &int) -> &int {x}
-         *     fn foo(x: @int) -> int {  // block: B
-         *         let b = borrow(x);    // region: <R0>
-         *         *b
-         *     }
-         *
-         * Here, the region of `b` will be `<R0>`.  `<R0>` is
-         * constrainted to be some subregion of the block B and some
-         * superregion of the call.  If we forced it now, we'd choose
-         * the smaller region (the call).  But that would make the *b
-         * illegal.  Since we don't resolve, the type of b will be
-         * `&<R0>.int` and then `*b` will require that `<R0>` be
-         * bigger than the let and the `*b` expression, so we will
-         * effectively resolve `<R0>` to be the block B.
-         */
         match resolve_type(self.fcx.infcx(), None, unresolved_ty,
                            resolve_and_force_all_but_regions) {
             Ok(t) => t,
@@ -384,25 +367,19 @@ impl<'a, 'tcx> Rcx<'a, 'tcx> {
         }
     }
 
+    /// This method populates the region map's `free_region_map`. It walks over the transformed
+    /// argument and return types for each function just before we check the body of that function,
+    /// looking for types where you have a borrowed pointer to other borrowed data (e.g., `&'a &'b
+    /// [uint]`.  We do not allow references to outlive the things they point at, so we can assume
+    /// that `'a <= 'b`. This holds for both the argument and return types, basically because, on
+    /// the caller side, the caller is responsible for checking that the type of every expression
+    /// (including the actual values for the arguments, as well as the return type of the fn call)
+    /// is well-formed.
+    ///
+    /// Tests: `src/test/compile-fail/regions-free-region-ordering-*.rs`
     fn relate_free_regions(&mut self,
                            fn_sig_tys: &[Ty<'tcx>],
                            body_id: ast::NodeId) {
-        /*!
-         * This method populates the region map's `free_region_map`.
-         * It walks over the transformed argument and return types for
-         * each function just before we check the body of that
-         * function, looking for types where you have a borrowed
-         * pointer to other borrowed data (e.g., `&'a &'b [uint]`.  We
-         * do not allow references to outlive the things they point
-         * at, so we can assume that `'a <= 'b`. This holds for both
-         * the argument and return types, basically because, on the caller
-         * side, the caller is responsible for checking that the type of
-         * every expression (including the actual values for the arguments,
-         * as well as the return type of the fn call) is well-formed.
-         *
-         * Tests: `src/test/compile-fail/regions-free-region-ordering-*.rs`
-         */
-
         debug!("relate_free_regions >>");
         let tcx = self.tcx();
 
@@ -921,19 +898,15 @@ fn check_expr_fn_block(rcx: &mut Rcx,
         _ => {}
     }
 
+    /// Make sure that the type of all free variables referenced inside a closure/proc outlive the
+    /// closure/proc's lifetime bound. This is just a special case of the usual rules about closed
+    /// over values outliving the object's lifetime bound.
     fn ensure_free_variable_types_outlive_closure_bound(
         rcx: &mut Rcx,
         bounds: ty::ExistentialBounds,
         expr: &ast::Expr,
         freevars: &[ty::Freevar])
     {
-        /*!
-         * Make sure that the type of all free variables referenced
-         * inside a closure/proc outlive the closure/proc's lifetime
-         * bound. This is just a special case of the usual rules about
-         * closed over values outliving the object's lifetime bound.
-         */
-
         let tcx = rcx.fcx.ccx.tcx;
 
         debug!("ensure_free_variable_types_outlive_closure_bound({}, {})",
@@ -984,18 +957,14 @@ fn check_expr_fn_block(rcx: &mut Rcx,
         }
     }
 
+    /// Make sure that all free variables referenced inside the closure outlive the closure's
+    /// lifetime bound. Also, create an entry in the upvar_borrows map with a region.
     fn constrain_free_variables_in_by_ref_closure(
         rcx: &mut Rcx,
         region_bound: ty::Region,
         expr: &ast::Expr,
         freevars: &[ty::Freevar])
     {
-        /*!
-         * Make sure that all free variables referenced inside the
-         * closure outlive the closure's lifetime bound. Also, create
-         * an entry in the upvar_borrows map with a region.
-         */
-
         let tcx = rcx.fcx.ccx.tcx;
         let infcx = rcx.fcx.infcx();
         debug!("constrain_free_variables({}, {})",
@@ -1183,15 +1152,12 @@ fn constrain_call<'a, I: Iterator<&'a ast::Expr>>(rcx: &mut Rcx,
     }
 }
 
+/// Invoked on any auto-dereference that occurs. Checks that if this is a region pointer being
+/// dereferenced, the lifetime of the pointer includes the deref expr.
 fn constrain_autoderefs<'a, 'tcx>(rcx: &mut Rcx<'a, 'tcx>,
                                   deref_expr: &ast::Expr,
                                   derefs: uint,
                                   mut derefd_ty: Ty<'tcx>) {
-    /*!
-     * Invoked on any auto-dereference that occurs.  Checks that if
-     * this is a region pointer being dereferenced, the lifetime of
-     * the pointer includes the deref expr.
-     */
     let r_deref_expr = ty::ReScope(CodeExtent::from_node_id(deref_expr.id));
     for i in range(0u, derefs) {
         debug!("constrain_autoderefs(deref_expr=?, derefd_ty={}, derefs={}/{}",
@@ -1259,16 +1225,12 @@ pub fn mk_subregion_due_to_dereference(rcx: &mut Rcx,
 }
 
 
+/// Invoked on any index expression that occurs. Checks that if this is a slice being indexed, the
+/// lifetime of the pointer includes the deref expr.
 fn constrain_index<'a, 'tcx>(rcx: &mut Rcx<'a, 'tcx>,
                              index_expr: &ast::Expr,
                              indexed_ty: Ty<'tcx>)
 {
-    /*!
-     * Invoked on any index expression that occurs.  Checks that if
-     * this is a slice being indexed, the lifetime of the pointer
-     * includes the deref expr.
-     */
-
     debug!("constrain_index(index_expr=?, indexed_ty={}",
            rcx.fcx.infcx().ty_to_string(indexed_ty));
 
@@ -1286,18 +1248,14 @@ fn constrain_index<'a, 'tcx>(rcx: &mut Rcx<'a, 'tcx>,
     }
 }
 
+/// Guarantees that any lifetimes which appear in the type of the node `id` (after applying
+/// adjustments) are valid for at least `minimum_lifetime`
 fn type_of_node_must_outlive<'a, 'tcx>(
     rcx: &mut Rcx<'a, 'tcx>,
     origin: infer::SubregionOrigin<'tcx>,
     id: ast::NodeId,
     minimum_lifetime: ty::Region)
 {
-    /*!
-     * Guarantees that any lifetimes which appear in the type of
-     * the node `id` (after applying adjustments) are valid for at
-     * least `minimum_lifetime`
-     */
-
     let tcx = rcx.fcx.tcx();
 
     // Try to resolve the type.  If we encounter an error, then typeck
@@ -1314,14 +1272,10 @@ fn type_of_node_must_outlive<'a, 'tcx>(
     type_must_outlive(rcx, origin, ty, minimum_lifetime);
 }
 
+/// Computes the guarantor for an expression `&base` and then ensures that the lifetime of the
+/// resulting pointer is linked to the lifetime of its guarantor (if any).
 fn link_addr_of(rcx: &mut Rcx, expr: &ast::Expr,
                mutability: ast::Mutability, base: &ast::Expr) {
-    /*!
-     * Computes the guarantor for an expression `&base` and then
-     * ensures that the lifetime of the resulting pointer is linked
-     * to the lifetime of its guarantor (if any).
-     */
-
     debug!("link_addr_of(base=?)");
 
     let cmt = {
@@ -1331,13 +1285,10 @@ fn link_addr_of(rcx: &mut Rcx, expr: &ast::Expr,
     link_region_from_node_type(rcx, expr.span, expr.id, mutability, cmt);
 }
 
+/// Computes the guarantors for any ref bindings in a `let` and
+/// then ensures that the lifetime of the resulting pointer is
+/// linked to the lifetime of the initialization expression.
 fn link_local(rcx: &Rcx, local: &ast::Local) {
-    /*!
-     * Computes the guarantors for any ref bindings in a `let` and
-     * then ensures that the lifetime of the resulting pointer is
-     * linked to the lifetime of the initialization expression.
-     */
-
     debug!("regionck::for_local()");
     let init_expr = match local.init {
         None => { return; }
@@ -1348,12 +1299,10 @@ fn link_local(rcx: &Rcx, local: &ast::Local) {
     link_pattern(rcx, mc, discr_cmt, &*local.pat);
 }
 
+/// Computes the guarantors for any ref bindings in a match and
+/// then ensures that the lifetime of the resulting pointer is
+/// linked to the lifetime of its guarantor (if any).
 fn link_match(rcx: &Rcx, discr: &ast::Expr, arms: &[ast::Arm]) {
-    /*!
-     * Computes the guarantors for any ref bindings in a match and
-     * then ensures that the lifetime of the resulting pointer is
-     * linked to the lifetime of its guarantor (if any).
-     */
 
     debug!("regionck::for_match()");
     let mc = mc::MemCategorizationContext::new(rcx);
@@ -1366,15 +1315,12 @@ fn link_match(rcx: &Rcx, discr: &ast::Expr, arms: &[ast::Arm]) {
     }
 }
 
+/// Link lifetimes of any ref bindings in `root_pat` to the pointers found in the discriminant, if
+/// needed.
 fn link_pattern<'a, 'tcx>(rcx: &Rcx<'a, 'tcx>,
                           mc: mc::MemCategorizationContext<Rcx<'a, 'tcx>>,
                           discr_cmt: mc::cmt<'tcx>,
                           root_pat: &ast::Pat) {
-    /*!
-     * Link lifetimes of any ref bindings in `root_pat` to
-     * the pointers found in the discriminant, if needed.
-     */
-
     let _ = mc.cat_pattern(discr_cmt, root_pat, |mc, sub_cmt, sub_pat| {
             match sub_pat.node {
                 // `ref x` pattern
@@ -1400,14 +1346,12 @@ fn link_pattern<'a, 'tcx>(rcx: &Rcx<'a, 'tcx>,
         });
 }
 
+/// Link lifetime of borrowed pointer resulting from autoref to lifetimes in the value being
+/// autoref'd.
 fn link_autoref(rcx: &Rcx,
                 expr: &ast::Expr,
                 autoderefs: uint,
                 autoref: &ty::AutoRef) {
-    /*!
-     * Link lifetime of borrowed pointer resulting from autoref
-     * to lifetimes in the value being autoref'd.
-     */
 
     debug!("link_autoref(autoref={})", autoref);
     let mc = mc::MemCategorizationContext::new(rcx);
@@ -1424,15 +1368,11 @@ fn link_autoref(rcx: &Rcx,
     }
 }
 
+/// Computes the guarantor for cases where the `expr` is being passed by implicit reference and
+/// must outlive `callee_scope`.
 fn link_by_ref(rcx: &Rcx,
                expr: &ast::Expr,
                callee_scope: CodeExtent) {
-    /*!
-     * Computes the guarantor for cases where the `expr` is
-     * being passed by implicit reference and must outlive
-     * `callee_scope`.
-     */
-
     let tcx = rcx.tcx();
     debug!("link_by_ref(expr={}, callee_scope={})",
            expr.repr(tcx), callee_scope);
@@ -1442,17 +1382,13 @@ fn link_by_ref(rcx: &Rcx,
     link_region(rcx, expr.span, borrow_region, ty::ImmBorrow, expr_cmt);
 }
 
+/// Like `link_region()`, except that the region is extracted from the type of `id`, which must be
+/// some reference (`&T`, `&str`, etc).
 fn link_region_from_node_type<'a, 'tcx>(rcx: &Rcx<'a, 'tcx>,
                                         span: Span,
                                         id: ast::NodeId,
                                         mutbl: ast::Mutability,
                                         cmt_borrowed: mc::cmt<'tcx>) {
-    /*!
-     * Like `link_region()`, except that the region is
-     * extracted from the type of `id`, which must be some
-     * reference (`&T`, `&str`, etc).
-     */
-
     let rptr_ty = rcx.resolve_node_type(id);
     if !ty::type_is_error(rptr_ty) {
         let tcx = rcx.fcx.ccx.tcx;
@@ -1463,19 +1399,14 @@ fn link_region_from_node_type<'a, 'tcx>(rcx: &Rcx<'a, 'tcx>,
     }
 }
 
+/// Informs the inference engine that `borrow_cmt` is being borrowed with kind `borrow_kind` and
+/// lifetime `borrow_region`. In order to ensure borrowck is satisfied, this may create constraints
+/// between regions, as explained in `link_reborrowed_region()`.
 fn link_region<'a, 'tcx>(rcx: &Rcx<'a, 'tcx>,
                          span: Span,
                          borrow_region: ty::Region,
                          borrow_kind: ty::BorrowKind,
                          borrow_cmt: mc::cmt<'tcx>) {
-    /*!
-     * Informs the inference engine that `borrow_cmt` is being
-     * borrowed with kind `borrow_kind` and lifetime `borrow_region`.
-     * In order to ensure borrowck is satisfied, this may create
-     * constraints between regions, as explained in
-     * `link_reborrowed_region()`.
-     */
-
     let mut borrow_cmt = borrow_cmt;
     let mut borrow_kind = borrow_kind;
 
@@ -1525,6 +1456,46 @@ fn link_region<'a, 'tcx>(rcx: &Rcx<'a, 'tcx>,
     }
 }
 
+/// This is the most complicated case: the path being borrowed is
+/// itself the referent of a borrowed pointer. Let me give an
+/// example fragment of code to make clear(er) the situation:
+///
+///    let r: &'a mut T = ...;  // the original reference "r" has lifetime 'a
+///    ...
+///    &'z *r                   // the reborrow has lifetime 'z
+///
+/// Now, in this case, our primary job is to add the inference
+/// constraint that `'z <= 'a`. Given this setup, let's clarify the
+/// parameters in (roughly) terms of the example:
+///
+///     A borrow of: `& 'z bk * r` where `r` has type `& 'a bk T`
+///     borrow_region   ^~                 ref_region    ^~
+///     borrow_kind        ^~               ref_kind        ^~
+///     ref_cmt                 ^
+///
+/// Here `bk` stands for some borrow-kind (e.g., `mut`, `uniq`, etc).
+///
+/// Unfortunately, there are some complications beyond the simple
+/// scenario I just painted:
+///
+/// 1. The reference `r` might in fact be a "by-ref" upvar. In that
+///    case, we have two jobs. First, we are inferring whether this reference
+///    should be an `&T`, `&mut T`, or `&uniq T` reference, and we must
+///    adjust that based on this borrow (e.g., if this is an `&mut` borrow,
+///    then `r` must be an `&mut` reference). Second, whenever we link
+///    two regions (here, `'z <= 'a`), we supply a *cause*, and in this
+///    case we adjust the cause to indicate that the reference being
+///    "reborrowed" is itself an upvar. This provides a nicer error message
+///    should something go wrong.
+///
+/// 2. There may in fact be more levels of reborrowing. In the
+///    example, I said the borrow was like `&'z *r`, but it might
+///    in fact be a borrow like `&'z **q` where `q` has type `&'a
+///    &'b mut T`. In that case, we want to ensure that `'z <= 'a`
+///    and `'z <= 'b`. This is explained more below.
+///
+/// The return value of this function indicates whether we need to
+/// recurse and process `ref_cmt` (see case 2 above).
 fn link_reborrowed_region<'a, 'tcx>(rcx: &Rcx<'a, 'tcx>,
                                     span: Span,
                                     borrow_region: ty::Region,
@@ -1535,49 +1506,6 @@ fn link_reborrowed_region<'a, 'tcx>(rcx: &Rcx<'a, 'tcx>,
                                     note: mc::Note)
                                     -> Option<(mc::cmt<'tcx>, ty::BorrowKind)>
 {
-    /*!
-     * This is the most complicated case: the path being borrowed is
-     * itself the referent of a borrowed pointer. Let me give an
-     * example fragment of code to make clear(er) the situation:
-     *
-     *    let r: &'a mut T = ...;  // the original reference "r" has lifetime 'a
-     *    ...
-     *    &'z *r                   // the reborrow has lifetime 'z
-     *
-     * Now, in this case, our primary job is to add the inference
-     * constraint that `'z <= 'a`. Given this setup, let's clarify the
-     * parameters in (roughly) terms of the example:
-     *
-     *     A borrow of: `& 'z bk * r` where `r` has type `& 'a bk T`
-     *     borrow_region   ^~                 ref_region    ^~
-     *     borrow_kind        ^~               ref_kind        ^~
-     *     ref_cmt                 ^
-     *
-     * Here `bk` stands for some borrow-kind (e.g., `mut`, `uniq`, etc).
-     *
-     * Unfortunately, there are some complications beyond the simple
-     * scenario I just painted:
-     *
-     * 1. The reference `r` might in fact be a "by-ref" upvar. In that
-     *    case, we have two jobs. First, we are inferring whether this reference
-     *    should be an `&T`, `&mut T`, or `&uniq T` reference, and we must
-     *    adjust that based on this borrow (e.g., if this is an `&mut` borrow,
-     *    then `r` must be an `&mut` reference). Second, whenever we link
-     *    two regions (here, `'z <= 'a`), we supply a *cause*, and in this
-     *    case we adjust the cause to indicate that the reference being
-     *    "reborrowed" is itself an upvar. This provides a nicer error message
-     *    should something go wrong.
-     *
-     * 2. There may in fact be more levels of reborrowing. In the
-     *    example, I said the borrow was like `&'z *r`, but it might
-     *    in fact be a borrow like `&'z **q` where `q` has type `&'a
-     *    &'b mut T`. In that case, we want to ensure that `'z <= 'a`
-     *    and `'z <= 'b`. This is explained more below.
-     *
-     * The return value of this function indicates whether we need to
-     * recurse and process `ref_cmt` (see case 2 above).
-     */
-
     // Possible upvar ID we may need later to create an entry in the
     // maybe link map.
 
@@ -1715,27 +1643,19 @@ fn link_reborrowed_region<'a, 'tcx>(rcx: &Rcx<'a, 'tcx>,
     }
 }
 
+/// Adjusts the inferred borrow_kind as needed to account for upvars that are assigned to in an
+/// assignment expression.
 fn adjust_borrow_kind_for_assignment_lhs(rcx: &Rcx,
                                          lhs: &ast::Expr) {
-    /*!
-     * Adjusts the inferred borrow_kind as needed to account
-     * for upvars that are assigned to in an assignment
-     * expression.
-     */
-
     let mc = mc::MemCategorizationContext::new(rcx);
     let cmt = ignore_err!(mc.cat_expr(lhs));
     adjust_upvar_borrow_kind_for_mut(rcx, cmt);
 }
 
+/// Indicates that `cmt` is being directly mutated (e.g., assigned to). If cmt contains any by-ref
+/// upvars, this implies that those upvars must be borrowed using an `&mut` borow.
 fn adjust_upvar_borrow_kind_for_mut<'a, 'tcx>(rcx: &Rcx<'a, 'tcx>,
                                               cmt: mc::cmt<'tcx>) {
-    /*!
-     * Indicates that `cmt` is being directly mutated (e.g., assigned
-     * to).  If cmt contains any by-ref upvars, this implies that
-     * those upvars must be borrowed using an `&mut` borow.
-     */
-
     let mut cmt = cmt;
     loop {
         debug!("adjust_upvar_borrow_kind_for_mut(cmt={})",
@@ -1834,16 +1754,12 @@ fn adjust_upvar_borrow_kind_for_unique<'a, 'tcx>(rcx: &Rcx<'a, 'tcx>, cmt: mc::c
     }
 }
 
+/// Indicates that the borrow_kind of `outer_upvar_id` must permit a reborrowing with the
+/// borrow_kind of `inner_upvar_id`. This occurs in nested closures, see comment above at the call
+/// to this function.
 fn link_upvar_borrow_kind_for_nested_closures(rcx: &mut Rcx,
                                               inner_upvar_id: ty::UpvarId,
                                               outer_upvar_id: ty::UpvarId) {
-    /*!
-     * Indicates that the borrow_kind of `outer_upvar_id` must
-     * permit a reborrowing with the borrow_kind of `inner_upvar_id`.
-     * This occurs in nested closures, see comment above at the call to
-     * this function.
-     */
-
     debug!("link_upvar_borrow_kind: inner_upvar_id={} outer_upvar_id={}",
            inner_upvar_id, outer_upvar_id);
 
@@ -1867,18 +1783,14 @@ fn adjust_upvar_borrow_kind_for_loan(rcx: &Rcx,
     adjust_upvar_borrow_kind(rcx, upvar_id, upvar_borrow, kind)
 }
 
+/// We infer the borrow_kind with which to borrow upvars in a stack closure. The borrow_kind
+/// basically follows a lattice of `imm < unique-imm < mut`, moving from left to right as needed
+/// (but never right to left). Here the argument `mutbl` is the borrow_kind that is required by
+/// some particular use.
 fn adjust_upvar_borrow_kind(rcx: &Rcx,
                             upvar_id: ty::UpvarId,
                             upvar_borrow: &mut ty::UpvarBorrow,
                             kind: ty::BorrowKind) {
-    /*!
-     * We infer the borrow_kind with which to borrow upvars in a stack
-     * closure. The borrow_kind basically follows a lattice of
-     * `imm < unique-imm < mut`, moving from left to right as needed (but never
-     * right to left). Here the argument `mutbl` is the borrow_kind that
-     * is required by some particular use.
-     */
-
     debug!("adjust_upvar_borrow_kind: id={} kind=({} -> {})",
            upvar_id, upvar_borrow.kind, kind);
 
@@ -1911,15 +1823,12 @@ fn adjust_upvar_borrow_kind(rcx: &Rcx,
     }
 }
 
+/// Ensures that all borrowed data reachable via `ty` outlives `region`.
 fn type_must_outlive<'a, 'tcx>(rcx: &mut Rcx<'a, 'tcx>,
                                origin: infer::SubregionOrigin<'tcx>,
                                ty: Ty<'tcx>,
                                region: ty::Region)
 {
-    /*!
-     * Ensures that all borrowed data reachable via `ty` outlives `region`.
-     */
-
     debug!("type_must_outlive(ty={}, region={})",
            ty.repr(rcx.tcx()),
            region.repr(rcx.tcx()));
diff --git a/src/librustc/middle/typeck/check/regionmanip.rs b/src/librustc/middle/typeck/check/regionmanip.rs
index 9fd24c4ee784e..55214618aa90b 100644
--- a/src/librustc/middle/typeck/check/regionmanip.rs
+++ b/src/librustc/middle/typeck/check/regionmanip.rs
@@ -33,18 +33,14 @@ struct Wf<'a, 'tcx: 'a> {
     out: Vec<WfConstraint<'tcx>>,
 }
 
+/// This routine computes the well-formedness constraints that must hold for the type `ty` to
+/// appear in a context with lifetime `outer_region`
 pub fn region_wf_constraints<'tcx>(
     tcx: &ty::ctxt<'tcx>,
     ty: Ty<'tcx>,
     outer_region: ty::Region)
     -> Vec<WfConstraint<'tcx>>
 {
-    /*!
-     * This routine computes the well-formedness constraints that must
-     * hold for the type `ty` to appear in a context with lifetime
-     * `outer_region`
-     */
-
     let mut stack = Vec::new();
     stack.push((outer_region, None));
     let mut wf = Wf { tcx: tcx,
@@ -168,12 +164,9 @@ impl<'a, 'tcx> Wf<'a, 'tcx> {
         self.stack.pop().unwrap();
     }
 
+    /// Pushes a constraint that `r_b` must outlive the top region on the stack.
     fn push_region_constraint_from_top(&mut self,
                                        r_b: ty::Region) {
-        /*!
-         * Pushes a constraint that `r_b` must outlive the
-         * top region on the stack.
-         */
 
         // Indicates that we have found borrowed content with a lifetime
         // of at least `r_b`. This adds a constraint that `r_b` must
@@ -192,30 +185,26 @@ impl<'a, 'tcx> Wf<'a, 'tcx> {
         self.push_sub_region_constraint(opt_ty, r_a, r_b);
     }
 
+    /// Pushes a constraint that `r_a <= r_b`, due to `opt_ty`
     fn push_sub_region_constraint(&mut self,
                                   opt_ty: Option<Ty<'tcx>>,
                                   r_a: ty::Region,
                                   r_b: ty::Region) {
-        /*! Pushes a constraint that `r_a <= r_b`, due to `opt_ty` */
         self.out.push(RegionSubRegionConstraint(opt_ty, r_a, r_b));
     }
 
+    /// Pushes a constraint that `param_ty` must outlive the top region on the stack.
     fn push_param_constraint_from_top(&mut self,
                                       param_ty: ty::ParamTy) {
-        /*!
-         * Pushes a constraint that `param_ty` must outlive the
-         * top region on the stack.
-         */
-
         let &(region, opt_ty) = self.stack.last().unwrap();
         self.push_param_constraint(region, opt_ty, param_ty);
     }
 
+    /// Pushes a constraint that `region <= param_ty`, due to `opt_ty`
     fn push_param_constraint(&mut self,
                              region: ty::Region,
                              opt_ty: Option<Ty<'tcx>>,
                              param_ty: ty::ParamTy) {
-        /*! Pushes a constraint that `region <= param_ty`, due to `opt_ty` */
         self.out.push(RegionSubParamConstraint(opt_ty, region, param_ty));
     }
 
diff --git a/src/librustc/middle/typeck/check/vtable.rs b/src/librustc/middle/typeck/check/vtable.rs
index 1619a4224f9f0..51978a01f7124 100644
--- a/src/librustc/middle/typeck/check/vtable.rs
+++ b/src/librustc/middle/typeck/check/vtable.rs
@@ -168,17 +168,14 @@ pub fn check_object_safety<'tcx>(tcx: &ty::ctxt<'tcx>,
         }
     }
 
-    // Returns a vec of error messages. If hte vec is empty - no errors!
+    /// Returns a vec of error messages. If hte vec is empty - no errors!
+    ///
+    /// There are some limitations to calling functions through an object, because (a) the self
+    /// type is not known (that's the whole point of a trait instance, after all, to obscure the
+    /// self type) and (b) the call must go through a vtable and hence cannot be monomorphized.
     fn check_object_safety_of_method<'tcx>(tcx: &ty::ctxt<'tcx>,
                                            method: &ty::Method<'tcx>)
                                            -> Vec<String> {
-        /*!
-         * There are some limitations to calling functions through an
-         * object, because (a) the self type is not known
-         * (that's the whole point of a trait instance, after all, to
-         * obscure the self type) and (b) the call must go through a
-         * vtable and hence cannot be monomorphized.
-         */
         let mut msgs = Vec::new();
 
         let method_name = method.name.repr(tcx);
@@ -455,8 +452,8 @@ pub fn maybe_report_ambiguity<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
     }
 }
 
+/// Select as many obligations as we can at present.
 pub fn select_fcx_obligations_where_possible(fcx: &FnCtxt) {
-    /*! Select as many obligations as we can at present. */
 
     match
         fcx.inh.fulfillment_cx
@@ -468,14 +465,10 @@ pub fn select_fcx_obligations_where_possible(fcx: &FnCtxt) {
     }
 }
 
+/// Try to select any fcx obligation that we haven't tried yet, in an effort to improve inference.
+/// You could just call `select_fcx_obligations_where_possible` except that it leads to repeated
+/// work.
 pub fn select_new_fcx_obligations(fcx: &FnCtxt) {
-    /*!
-     * Try to select any fcx obligation that we haven't tried yet,
-     * in an effort to improve inference. You could just call
-     * `select_fcx_obligations_where_possible` except that it leads
-     * to repeated work.
-     */
-
     match
         fcx.inh.fulfillment_cx
         .borrow_mut()
diff --git a/src/librustc/middle/typeck/check/wf.rs b/src/librustc/middle/typeck/check/wf.rs
index d9c6c3cb6262a..502e37aa9f370 100644
--- a/src/librustc/middle/typeck/check/wf.rs
+++ b/src/librustc/middle/typeck/check/wf.rs
@@ -38,24 +38,18 @@ impl<'ccx, 'tcx> CheckTypeWellFormedVisitor<'ccx, 'tcx> {
         CheckTypeWellFormedVisitor { ccx: ccx, cache: HashSet::new() }
     }
 
+    /// Checks that the field types (in a struct def'n) or argument types (in an enum def'n) are
+    /// well-formed, meaning that they do not require any constraints not declared in the struct
+    /// definition itself. For example, this definition would be illegal:
+    ///
+    ///     struct Ref<'a, T> { x: &'a T }
+    ///
+    /// because the type did not declare that `T:'a`.
+    ///
+    /// We do this check as a pre-pass before checking fn bodies because if these constraints are
+    /// not included it frequently leads to confusing errors in fn bodies. So it's better to check
+    /// the types first.
     fn check_item_well_formed(&mut self, item: &ast::Item) {
-        /*!
-         * Checks that the field types (in a struct def'n) or
-         * argument types (in an enum def'n) are well-formed,
-         * meaning that they do not require any constraints not
-         * declared in the struct definition itself.
-         * For example, this definition would be illegal:
-         *
-         *     struct Ref<'a, T> { x: &'a T }
-         *
-         * because the type did not declare that `T:'a`.
-         *
-         * We do this check as a pre-pass before checking fn bodies
-         * because if these constraints are not included it frequently
-         * leads to confusing errors in fn bodies. So it's better to check
-         * the types first.
-         */
-
         let ccx = self.ccx;
         debug!("check_item_well_formed(it.id={}, it.ident={})",
                item.id,
@@ -107,16 +101,12 @@ impl<'ccx, 'tcx> CheckTypeWellFormedVisitor<'ccx, 'tcx> {
         regionck::regionck_item(&fcx, item);
     }
 
+    /// In a type definition, we check that to ensure that the types of the fields are well-formed.
     fn check_type_defn(&mut self,
                        item: &ast::Item,
                        lookup_fields: for<'fcx> |&FnCtxt<'fcx, 'tcx>|
                                                  -> Vec<AdtVariant<'tcx>>)
     {
-        /*!
-         * In a type definition, we check that to ensure that the types of the fields are
-         * well-formed.
-         */
-
         self.with_fcx(item, |this, fcx| {
             let variants = lookup_fields(fcx);
             let mut bounds_checker = BoundsChecker::new(fcx,
@@ -282,22 +272,16 @@ impl<'cx,'tcx> BoundsChecker<'cx,'tcx> {
                         cache: cache, binding_count: 0 }
     }
 
+    /// Given a trait ref like `A : Trait<B>`, where `Trait` is defined as (say):
+    ///
+    ///     trait Trait<B:OtherTrait> : Copy { ... }
+    ///
+    /// This routine will check that `B : OtherTrait` and `A : Trait<B>`. It will also recursively
+    /// check that the types `A` and `B` are well-formed.
+    ///
+    /// Note that it does not (currently, at least) check that `A : Copy` (that check is delegated
+    /// to the point where impl `A : Trait<B>` is implemented).
     pub fn check_trait_ref(&mut self, trait_ref: &ty::TraitRef<'tcx>) {
-        /*!
-         * Given a trait ref like `A : Trait<B>`, where `Trait` is
-         * defined as (say):
-         *
-         *     trait Trait<B:OtherTrait> : Copy { ... }
-         *
-         * This routine will check that `B : OtherTrait` and `A :
-         * Trait<B>`. It will also recursively check that the types
-         * `A` and `B` are well-formed.
-         *
-         * Note that it does not (currently, at least)
-         * check that `A : Copy` (that check is delegated to the point
-         * where impl `A : Trait<B>` is implemented).
-         */
-
         let trait_def = ty::lookup_trait_def(self.fcx.tcx(), trait_ref.def_id);
 
         let bounds = trait_def.generics.to_bounds(self.tcx(), &trait_ref.substs);
diff --git a/src/librustc/middle/typeck/coherence/mod.rs b/src/librustc/middle/typeck/coherence/mod.rs
index 1f32110a09338..758608b79c2cb 100644
--- a/src/librustc/middle/typeck/coherence/mod.rs
+++ b/src/librustc/middle/typeck/coherence/mod.rs
@@ -477,17 +477,13 @@ impl<'a, 'tcx> CoherenceChecker<'a, 'tcx> {
     }
 }
 
+/// Substitutes the values for the receiver's type parameters that are found in method, leaving the
+/// method's type parameters intact.
 pub fn make_substs_for_receiver_types<'tcx>(tcx: &ty::ctxt<'tcx>,
                                             trait_ref: &ty::TraitRef<'tcx>,
                                             method: &ty::Method<'tcx>)
                                             -> subst::Substs<'tcx>
 {
-    /*!
-     * Substitutes the values for the receiver's type parameters
-     * that are found in method, leaving the method's type parameters
-     * intact.
-     */
-
     let meth_tps: Vec<Ty> =
         method.generics.types.get_slice(subst::FnSpace)
               .iter()
diff --git a/src/librustc/middle/typeck/coherence/orphan.rs b/src/librustc/middle/typeck/coherence/orphan.rs
index 57ce7f79e030a..dc3afaae35f61 100644
--- a/src/librustc/middle/typeck/coherence/orphan.rs
+++ b/src/librustc/middle/typeck/coherence/orphan.rs
@@ -8,10 +8,8 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-/*!
- * Orphan checker: every impl either implements a trait defined in this
- * crate or pertains to a type defined in this crate.
- */
+//! Orphan checker: every impl either implements a trait defined in this
+//! crate or pertains to a type defined in this crate.
 
 use middle::traits;
 use middle::ty;
diff --git a/src/librustc/middle/typeck/coherence/overlap.rs b/src/librustc/middle/typeck/coherence/overlap.rs
index 933c2c81ac269..9f10a58f45852 100644
--- a/src/librustc/middle/typeck/coherence/overlap.rs
+++ b/src/librustc/middle/typeck/coherence/overlap.rs
@@ -8,10 +8,8 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-/*!
- * Overlap: No two impls for the same trait are implemented for the
- * same type.
- */
+//! Overlap: No two impls for the same trait are implemented for the
+//! same type.
 
 use middle::traits;
 use middle::ty;
diff --git a/src/librustc/middle/typeck/collect.rs b/src/librustc/middle/typeck/collect.rs
index 13a0bf0bdcb52..3a62978ed007a 100644
--- a/src/librustc/middle/typeck/collect.rs
+++ b/src/librustc/middle/typeck/collect.rs
@@ -1944,6 +1944,9 @@ fn get_or_create_type_parameter_def<'tcx,AC>(this: &AC,
     def
 }
 
+/// Translate the AST's notion of ty param bounds (which are an enum consisting of a newtyped Ty or
+/// a region) to ty's notion of ty param bounds, which can either be user-defined traits, or the
+/// built-in trait (formerly known as kind): Send.
 fn compute_bounds<'tcx,AC>(this: &AC,
                            name_of_bounded_thing: ast::Name,
                            param_ty: ty::ParamTy,
@@ -1953,13 +1956,6 @@ fn compute_bounds<'tcx,AC>(this: &AC,
                            where_clause: &ast::WhereClause)
                            -> ty::ParamBounds<'tcx>
                            where AC: AstConv<'tcx> {
-    /*!
-     * Translate the AST's notion of ty param bounds (which are an
-     * enum consisting of a newtyped Ty or a region) to ty's
-     * notion of ty param bounds, which can either be user-defined
-     * traits, or the built-in trait (formerly known as kind): Send.
-     */
-
     let mut param_bounds = conv_param_bounds(this,
                                              span,
                                              param_ty,
@@ -2040,16 +2036,13 @@ fn conv_param_bounds<'tcx,AC>(this: &AC,
     }
 }
 
+/// Merges the bounds declared on a type parameter with those found from where clauses into a
+/// single list.
 fn merge_param_bounds<'a>(tcx: &ty::ctxt,
                           param_ty: ty::ParamTy,
                           ast_bounds: &'a [ast::TyParamBound],
                           where_clause: &'a ast::WhereClause)
                           -> Vec<&'a ast::TyParamBound> {
-    /*!
-     * Merges the bounds declared on a type parameter with those
-     * found from where clauses into a single list.
-     */
-
     let mut result = Vec::new();
 
     for ast_bound in ast_bounds.iter() {
diff --git a/src/librustc/middle/typeck/infer/coercion.rs b/src/librustc/middle/typeck/infer/coercion.rs
index 49ac7178eb8db..51f8668692ea7 100644
--- a/src/librustc/middle/typeck/infer/coercion.rs
+++ b/src/librustc/middle/typeck/infer/coercion.rs
@@ -8,61 +8,57 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-/*!
-
-# Type Coercion
-
-Under certain circumstances we will coerce from one type to another,
-for example by auto-borrowing.  This occurs in situations where the
-compiler has a firm 'expected type' that was supplied from the user,
-and where the actual type is similar to that expected type in purpose
-but not in representation (so actual subtyping is inappropriate).
-
-## Reborrowing
-
-Note that if we are expecting a reference, we will *reborrow*
-even if the argument provided was already a reference.  This is
-useful for freezing mut/const things (that is, when the expected is &T
-but you have &const T or &mut T) and also for avoiding the linearity
-of mut things (when the expected is &mut T and you have &mut T).  See
-the various `src/test/run-pass/coerce-reborrow-*.rs` tests for
-examples of where this is useful.
-
-## Subtle note
-
-When deciding what type coercions to consider, we do not attempt to
-resolve any type variables we may encounter.  This is because `b`
-represents the expected type "as the user wrote it", meaning that if
-the user defined a generic function like
-
-   fn foo<A>(a: A, b: A) { ... }
-
-and then we wrote `foo(&1, @2)`, we will not auto-borrow
-either argument.  In older code we went to some lengths to
-resolve the `b` variable, which could mean that we'd
-auto-borrow later arguments but not earlier ones, which
-seems very confusing.
-
-## Subtler note
-
-However, right now, if the user manually specifies the
-values for the type variables, as so:
-
-   foo::<&int>(@1, @2)
-
-then we *will* auto-borrow, because we can't distinguish this from a
-function that declared `&int`.  This is inconsistent but it's easiest
-at the moment. The right thing to do, I think, is to consider the
-*unsubstituted* type when deciding whether to auto-borrow, but the
-*substituted* type when considering the bounds and so forth. But most
-of our methods don't give access to the unsubstituted type, and
-rightly so because they'd be error-prone.  So maybe the thing to do is
-to actually determine the kind of coercions that should occur
-separately and pass them in.  Or maybe it's ok as is.  Anyway, it's
-sort of a minor point so I've opted to leave it for later---after all
-we may want to adjust precisely when coercions occur.
-
-*/
+//! # Type Coercion
+//!
+//! Under certain circumstances we will coerce from one type to another,
+//! for example by auto-borrowing.  This occurs in situations where the
+//! compiler has a firm 'expected type' that was supplied from the user,
+//! and where the actual type is similar to that expected type in purpose
+//! but not in representation (so actual subtyping is inappropriate).
+//!
+//! ## Reborrowing
+//!
+//! Note that if we are expecting a reference, we will *reborrow*
+//! even if the argument provided was already a reference.  This is
+//! useful for freezing mut/const things (that is, when the expected is &T
+//! but you have &const T or &mut T) and also for avoiding the linearity
+//! of mut things (when the expected is &mut T and you have &mut T).  See
+//! the various `src/test/run-pass/coerce-reborrow-*.rs` tests for
+//! examples of where this is useful.
+//!
+//! ## Subtle note
+//!
+//! When deciding what type coercions to consider, we do not attempt to
+//! resolve any type variables we may encounter.  This is because `b`
+//! represents the expected type "as the user wrote it", meaning that if
+//! the user defined a generic function like
+//!
+//!    fn foo<A>(a: A, b: A) { ... }
+//!
+//! and then we wrote `foo(&1, @2)`, we will not auto-borrow
+//! either argument.  In older code we went to some lengths to
+//! resolve the `b` variable, which could mean that we'd
+//! auto-borrow later arguments but not earlier ones, which
+//! seems very confusing.
+//!
+//! ## Subtler note
+//!
+//! However, right now, if the user manually specifies the
+//! values for the type variables, as so:
+//!
+//!    foo::<&int>(@1, @2)
+//!
+//! then we *will* auto-borrow, because we can't distinguish this from a
+//! function that declared `&int`.  This is inconsistent but it's easiest
+//! at the moment. The right thing to do, I think, is to consider the
+//! *unsubstituted* type when deciding whether to auto-borrow, but the
+//! *substituted* type when considering the bounds and so forth. But most
+//! of our methods don't give access to the unsubstituted type, and
+//! rightly so because they'd be error-prone.  So maybe the thing to do is
+//! to actually determine the kind of coercions that should occur
+//! separately and pass them in.  Or maybe it's ok as is.  Anyway, it's
+//! sort of a minor point so I've opted to leave it for later---after all
+//! we may want to adjust precisely when coercions occur.
 
 use middle::subst;
 use middle::ty::{AutoPtr, AutoDerefRef, AdjustDerefRef, AutoUnsize, AutoUnsafe};
@@ -512,14 +508,10 @@ impl<'f, 'tcx> Coerce<'f, 'tcx> {
         }
     }
 
+    ///  Attempts to coerce from a bare Rust function (`extern "Rust" fn`) into a closure or a
+    ///  `proc`.
     fn coerce_from_bare_fn(&self, a: Ty<'tcx>, fn_ty_a: &ty::BareFnTy<'tcx>, b: Ty<'tcx>)
                            -> CoerceResult<'tcx> {
-        /*!
-         *
-         * Attempts to coerce from a bare Rust function (`extern
-         * "Rust" fn`) into a closure or a `proc`.
-         */
-
         self.unpack_actual_value(b, |sty_b| {
 
             debug!("coerce_from_bare_fn(a={}, b={})",
diff --git a/src/librustc/middle/typeck/infer/combine.rs b/src/librustc/middle/typeck/infer/combine.rs
index 763f204dc98bc..ba6ae00b6671f 100644
--- a/src/librustc/middle/typeck/infer/combine.rs
+++ b/src/librustc/middle/typeck/infer/combine.rs
@@ -642,21 +642,16 @@ impl<'f, 'tcx> CombineFields<'f, 'tcx> {
         Ok(())
     }
 
+    /// Attempts to generalize `ty` for the type variable `for_vid`.  This checks for cycle -- that
+    /// is, whether the type `ty` references `for_vid`. If `make_region_vars` is true, it will also
+    /// replace all regions with fresh variables. Returns `ty_err` in the case of a cycle, `Ok`
+    /// otherwise.
     fn generalize(&self,
                   ty: Ty<'tcx>,
                   for_vid: ty::TyVid,
                   make_region_vars: bool)
                   -> cres<'tcx, Ty<'tcx>>
     {
-        /*!
-         * Attempts to generalize `ty` for the type variable
-         * `for_vid`.  This checks for cycle -- that is, whether the
-         * type `ty` references `for_vid`. If `make_region_vars` is
-         * true, it will also replace all regions with fresh
-         * variables. Returns `ty_err` in the case of a cycle, `Ok`
-         * otherwise.
-         */
-
         let mut generalize = Generalizer { infcx: self.infcx,
                                            span: self.trace.origin.span(),
                                            for_vid: for_vid,
diff --git a/src/librustc/middle/typeck/infer/doc.rs b/src/librustc/middle/typeck/infer/doc.rs
index 886550a3b2461..0e3cc5f68c868 100644
--- a/src/librustc/middle/typeck/infer/doc.rs
+++ b/src/librustc/middle/typeck/infer/doc.rs
@@ -8,244 +8,240 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-/*!
-
-# Type inference engine
-
-This is loosely based on standard HM-type inference, but with an
-extension to try and accommodate subtyping.  There is nothing
-principled about this extension; it's sound---I hope!---but it's a
-heuristic, ultimately, and does not guarantee that it finds a valid
-typing even if one exists (in fact, there are known scenarios where it
-fails, some of which may eventually become problematic).
-
-## Key idea
-
-The main change is that each type variable T is associated with a
-lower-bound L and an upper-bound U.  L and U begin as bottom and top,
-respectively, but gradually narrow in response to new constraints
-being introduced.  When a variable is finally resolved to a concrete
-type, it can (theoretically) select any type that is a supertype of L
-and a subtype of U.
-
-There are several critical invariants which we maintain:
-
-- the upper-bound of a variable only becomes lower and the lower-bound
-  only becomes higher over time;
-- the lower-bound L is always a subtype of the upper bound U;
-- the lower-bound L and upper-bound U never refer to other type variables,
-  but only to types (though those types may contain type variables).
-
-> An aside: if the terms upper- and lower-bound confuse you, think of
-> "supertype" and "subtype".  The upper-bound is a "supertype"
-> (super=upper in Latin, or something like that anyway) and the lower-bound
-> is a "subtype" (sub=lower in Latin).  I find it helps to visualize
-> a simple class hierarchy, like Java minus interfaces and
-> primitive types.  The class Object is at the root (top) and other
-> types lie in between.  The bottom type is then the Null type.
-> So the tree looks like:
->
-> ```text
->         Object
->         /    \
->     String   Other
->         \    /
->         (null)
-> ```
->
-> So the upper bound type is the "supertype" and the lower bound is the
-> "subtype" (also, super and sub mean upper and lower in Latin, or something
-> like that anyway).
-
-## Satisfying constraints
-
-At a primitive level, there is only one form of constraint that the
-inference understands: a subtype relation.  So the outside world can
-say "make type A a subtype of type B".  If there are variables
-involved, the inferencer will adjust their upper- and lower-bounds as
-needed to ensure that this relation is satisfied. (We also allow "make
-type A equal to type B", but this is translated into "A <: B" and "B
-<: A")
-
-As stated above, we always maintain the invariant that type bounds
-never refer to other variables.  This keeps the inference relatively
-simple, avoiding the scenario of having a kind of graph where we have
-to pump constraints along and reach a fixed point, but it does impose
-some heuristics in the case where the user is relating two type
-variables A <: B.
-
-Combining two variables such that variable A will forever be a subtype
-of variable B is the trickiest part of the algorithm because there is
-often no right choice---that is, the right choice will depend on
-future constraints which we do not yet know. The problem comes about
-because both A and B have bounds that can be adjusted in the future.
-Let's look at some of the cases that can come up.
-
-Imagine, to start, the best case, where both A and B have an upper and
-lower bound (that is, the bounds are not top nor bot respectively). In
-that case, if we're lucky, A.ub <: B.lb, and so we know that whatever
-A and B should become, they will forever have the desired subtyping
-relation.  We can just leave things as they are.
-
-### Option 1: Unify
-
-However, suppose that A.ub is *not* a subtype of B.lb.  In
-that case, we must make a decision.  One option is to unify A
-and B so that they are one variable whose bounds are:
-
-    UB = GLB(A.ub, B.ub)
-    LB = LUB(A.lb, B.lb)
-
-(Note that we will have to verify that LB <: UB; if it does not, the
-types are not intersecting and there is an error) In that case, A <: B
-holds trivially because A==B.  However, we have now lost some
-flexibility, because perhaps the user intended for A and B to end up
-as different types and not the same type.
-
-Pictorally, what this does is to take two distinct variables with
-(hopefully not completely) distinct type ranges and produce one with
-the intersection.
-
-```text
-                  B.ub                  B.ub
-                   /\                    /
-           A.ub   /  \           A.ub   /
-           /   \ /    \              \ /
-          /     X      \              UB
-         /     / \      \            / \
-        /     /   /      \          /   /
-        \     \  /       /          \  /
-         \      X       /             LB
-          \    / \     /             / \
-           \  /   \   /             /   \
-           A.lb    B.lb          A.lb    B.lb
-```
-
-
-### Option 2: Relate UB/LB
-
-Another option is to keep A and B as distinct variables but set their
-bounds in such a way that, whatever happens, we know that A <: B will hold.
-This can be achieved by ensuring that A.ub <: B.lb.  In practice there
-are two ways to do that, depicted pictorally here:
-
-```text
-    Before                Option #1            Option #2
-
-             B.ub                B.ub                B.ub
-              /\                 /  \                /  \
-      A.ub   /  \        A.ub   /(B')\       A.ub   /(B')\
-      /   \ /    \           \ /     /           \ /     /
-     /     X      \         __UB____/             UB    /
-    /     / \      \       /  |                   |    /
-   /     /   /      \     /   |                   |   /
-   \     \  /       /    /(A')|                   |  /
-    \      X       /    /     LB            ______LB/
-     \    / \     /    /     / \           / (A')/ \
-      \  /   \   /     \    /   \          \    /   \
-      A.lb    B.lb       A.lb    B.lb        A.lb    B.lb
-```
-
-In these diagrams, UB and LB are defined as before.  As you can see,
-the new ranges `A'` and `B'` are quite different from the range that
-would be produced by unifying the variables.
-
-### What we do now
-
-Our current technique is to *try* (transactionally) to relate the
-existing bounds of A and B, if there are any (i.e., if `UB(A) != top
-&& LB(B) != bot`).  If that succeeds, we're done.  If it fails, then
-we merge A and B into same variable.
-
-This is not clearly the correct course.  For example, if `UB(A) !=
-top` but `LB(B) == bot`, we could conceivably set `LB(B)` to `UB(A)`
-and leave the variables unmerged.  This is sometimes the better
-course, it depends on the program.
-
-The main case which fails today that I would like to support is:
-
-```text
-fn foo<T>(x: T, y: T) { ... }
-
-fn bar() {
-    let x: @mut int = @mut 3;
-    let y: @int = @3;
-    foo(x, y);
-}
-```
-
-In principle, the inferencer ought to find that the parameter `T` to
-`foo(x, y)` is `@const int`.  Today, however, it does not; this is
-because the type variable `T` is merged with the type variable for
-`X`, and thus inherits its UB/LB of `@mut int`.  This leaves no
-flexibility for `T` to later adjust to accommodate `@int`.
-
-### What to do when not all bounds are present
-
-In the prior discussion we assumed that A.ub was not top and B.lb was
-not bot.  Unfortunately this is rarely the case.  Often type variables
-have "lopsided" bounds.  For example, if a variable in the program has
-been initialized but has not been used, then its corresponding type
-variable will have a lower bound but no upper bound.  When that
-variable is then used, we would like to know its upper bound---but we
-don't have one!  In this case we'll do different things depending on
-how the variable is being used.
-
-## Transactional support
-
-Whenever we adjust merge variables or adjust their bounds, we always
-keep a record of the old value.  This allows the changes to be undone.
-
-## Regions
-
-I've only talked about type variables here, but region variables
-follow the same principle.  They have upper- and lower-bounds.  A
-region A is a subregion of a region B if A being valid implies that B
-is valid.  This basically corresponds to the block nesting structure:
-the regions for outer block scopes are superregions of those for inner
-block scopes.
-
-## Integral and floating-point type variables
-
-There is a third variety of type variable that we use only for
-inferring the types of unsuffixed integer literals.  Integral type
-variables differ from general-purpose type variables in that there's
-no subtyping relationship among the various integral types, so instead
-of associating each variable with an upper and lower bound, we just
-use simple unification.  Each integer variable is associated with at
-most one integer type.  Floating point types are handled similarly to
-integral types.
-
-## GLB/LUB
-
-Computing the greatest-lower-bound and least-upper-bound of two
-types/regions is generally straightforward except when type variables
-are involved. In that case, we follow a similar "try to use the bounds
-when possible but otherwise merge the variables" strategy.  In other
-words, `GLB(A, B)` where `A` and `B` are variables will often result
-in `A` and `B` being merged and the result being `A`.
-
-## Type coercion
-
-We have a notion of assignability which differs somewhat from
-subtyping; in particular it may cause region borrowing to occur.  See
-the big comment later in this file on Type Coercion for specifics.
-
-### In conclusion
-
-I showed you three ways to relate `A` and `B`.  There are also more,
-of course, though I'm not sure if there are any more sensible options.
-The main point is that there are various options, each of which
-produce a distinct range of types for `A` and `B`.  Depending on what
-the correct values for A and B are, one of these options will be the
-right choice: but of course we don't know the right values for A and B
-yet, that's what we're trying to find!  In our code, we opt to unify
-(Option #1).
-
-# Implementation details
-
-We make use of a trait-like implementation strategy to consolidate
-duplicated code between subtypes, GLB, and LUB computations.  See the
-section on "Type Combining" below for details.
-
-*/
+//! # Type inference engine
+//!
+//! This is loosely based on standard HM-type inference, but with an
+//! extension to try and accommodate subtyping.  There is nothing
+//! principled about this extension; it's sound---I hope!---but it's a
+//! heuristic, ultimately, and does not guarantee that it finds a valid
+//! typing even if one exists (in fact, there are known scenarios where it
+//! fails, some of which may eventually become problematic).
+//!
+//! ## Key idea
+//!
+//! The main change is that each type variable T is associated with a
+//! lower-bound L and an upper-bound U.  L and U begin as bottom and top,
+//! respectively, but gradually narrow in response to new constraints
+//! being introduced.  When a variable is finally resolved to a concrete
+//! type, it can (theoretically) select any type that is a supertype of L
+//! and a subtype of U.
+//!
+//! There are several critical invariants which we maintain:
+//!
+//! - the upper-bound of a variable only becomes lower and the lower-bound
+//!   only becomes higher over time;
+//! - the lower-bound L is always a subtype of the upper bound U;
+//! - the lower-bound L and upper-bound U never refer to other type variables,
+//!   but only to types (though those types may contain type variables).
+//!
+//! > An aside: if the terms upper- and lower-bound confuse you, think of
+//! > "supertype" and "subtype".  The upper-bound is a "supertype"
+//! > (super=upper in Latin, or something like that anyway) and the lower-bound
+//! > is a "subtype" (sub=lower in Latin).  I find it helps to visualize
+//! > a simple class hierarchy, like Java minus interfaces and
+//! > primitive types.  The class Object is at the root (top) and other
+//! > types lie in between.  The bottom type is then the Null type.
+//! > So the tree looks like:
+//! >
+//! > ```text
+//! >         Object
+//! >         /    \
+//! >     String   Other
+//! >         \    /
+//! >         (null)
+//! > ```
+//! >
+//! > So the upper bound type is the "supertype" and the lower bound is the
+//! > "subtype" (also, super and sub mean upper and lower in Latin, or something
+//! > like that anyway).
+//!
+//! ## Satisfying constraints
+//!
+//! At a primitive level, there is only one form of constraint that the
+//! inference understands: a subtype relation.  So the outside world can
+//! say "make type A a subtype of type B".  If there are variables
+//! involved, the inferencer will adjust their upper- and lower-bounds as
+//! needed to ensure that this relation is satisfied. (We also allow "make
+//! type A equal to type B", but this is translated into "A <: B" and "B
+//! <: A")
+//!
+//! As stated above, we always maintain the invariant that type bounds
+//! never refer to other variables.  This keeps the inference relatively
+//! simple, avoiding the scenario of having a kind of graph where we have
+//! to pump constraints along and reach a fixed point, but it does impose
+//! some heuristics in the case where the user is relating two type
+//! variables A <: B.
+//!
+//! Combining two variables such that variable A will forever be a subtype
+//! of variable B is the trickiest part of the algorithm because there is
+//! often no right choice---that is, the right choice will depend on
+//! future constraints which we do not yet know. The problem comes about
+//! because both A and B have bounds that can be adjusted in the future.
+//! Let's look at some of the cases that can come up.
+//!
+//! Imagine, to start, the best case, where both A and B have an upper and
+//! lower bound (that is, the bounds are not top nor bot respectively). In
+//! that case, if we're lucky, A.ub <: B.lb, and so we know that whatever
+//! A and B should become, they will forever have the desired subtyping
+//! relation.  We can just leave things as they are.
+//!
+//! ### Option 1: Unify
+//!
+//! However, suppose that A.ub is *not* a subtype of B.lb.  In
+//! that case, we must make a decision.  One option is to unify A
+//! and B so that they are one variable whose bounds are:
+//!
+//!     UB = GLB(A.ub, B.ub)
+//!     LB = LUB(A.lb, B.lb)
+//!
+//! (Note that we will have to verify that LB <: UB; if it does not, the
+//! types are not intersecting and there is an error) In that case, A <: B
+//! holds trivially because A==B.  However, we have now lost some
+//! flexibility, because perhaps the user intended for A and B to end up
+//! as different types and not the same type.
+//!
+//! Pictorally, what this does is to take two distinct variables with
+//! (hopefully not completely) distinct type ranges and produce one with
+//! the intersection.
+//!
+//! ```text
+//!                   B.ub                  B.ub
+//!                    /\                    /
+//!            A.ub   /  \           A.ub   /
+//!            /   \ /    \              \ /
+//!           /     X      \              UB
+//!          /     / \      \            / \
+//!         /     /   /      \          /   /
+//!         \     \  /       /          \  /
+//!          \      X       /             LB
+//!           \    / \     /             / \
+//!            \  /   \   /             /   \
+//!            A.lb    B.lb          A.lb    B.lb
+//! ```
+//!
+//!
+//! ### Option 2: Relate UB/LB
+//!
+//! Another option is to keep A and B as distinct variables but set their
+//! bounds in such a way that, whatever happens, we know that A <: B will hold.
+//! This can be achieved by ensuring that A.ub <: B.lb.  In practice there
+//! are two ways to do that, depicted pictorally here:
+//!
+//! ```text
+//!     Before                Option #1            Option #2
+//!
+//!              B.ub                B.ub                B.ub
+//!               /\                 /  \                /  \
+//!       A.ub   /  \        A.ub   /(B')\       A.ub   /(B')\
+//!       /   \ /    \           \ /     /           \ /     /
+//!      /     X      \         __UB____/             UB    /
+//!     /     / \      \       /  |                   |    /
+//!    /     /   /      \     /   |                   |   /
+//!    \     \  /       /    /(A')|                   |  /
+//!     \      X       /    /     LB            ______LB/
+//!      \    / \     /    /     / \           / (A')/ \
+//!       \  /   \   /     \    /   \          \    /   \
+//!       A.lb    B.lb       A.lb    B.lb        A.lb    B.lb
+//! ```
+//!
+//! In these diagrams, UB and LB are defined as before.  As you can see,
+//! the new ranges `A'` and `B'` are quite different from the range that
+//! would be produced by unifying the variables.
+//!
+//! ### What we do now
+//!
+//! Our current technique is to *try* (transactionally) to relate the
+//! existing bounds of A and B, if there are any (i.e., if `UB(A) != top
+//! && LB(B) != bot`).  If that succeeds, we're done.  If it fails, then
+//! we merge A and B into same variable.
+//!
+//! This is not clearly the correct course.  For example, if `UB(A) !=
+//! top` but `LB(B) == bot`, we could conceivably set `LB(B)` to `UB(A)`
+//! and leave the variables unmerged.  This is sometimes the better
+//! course, it depends on the program.
+//!
+//! The main case which fails today that I would like to support is:
+//!
+//! ```text
+//! fn foo<T>(x: T, y: T) { ... }
+//!
+//! fn bar() {
+//!     let x: @mut int = @mut 3;
+//!     let y: @int = @3;
+//!     foo(x, y);
+//! }
+//! ```
+//!
+//! In principle, the inferencer ought to find that the parameter `T` to
+//! `foo(x, y)` is `@const int`.  Today, however, it does not; this is
+//! because the type variable `T` is merged with the type variable for
+//! `X`, and thus inherits its UB/LB of `@mut int`.  This leaves no
+//! flexibility for `T` to later adjust to accommodate `@int`.
+//!
+//! ### What to do when not all bounds are present
+//!
+//! In the prior discussion we assumed that A.ub was not top and B.lb was
+//! not bot.  Unfortunately this is rarely the case.  Often type variables
+//! have "lopsided" bounds.  For example, if a variable in the program has
+//! been initialized but has not been used, then its corresponding type
+//! variable will have a lower bound but no upper bound.  When that
+//! variable is then used, we would like to know its upper bound---but we
+//! don't have one!  In this case we'll do different things depending on
+//! how the variable is being used.
+//!
+//! ## Transactional support
+//!
+//! Whenever we adjust merge variables or adjust their bounds, we always
+//! keep a record of the old value.  This allows the changes to be undone.
+//!
+//! ## Regions
+//!
+//! I've only talked about type variables here, but region variables
+//! follow the same principle.  They have upper- and lower-bounds.  A
+//! region A is a subregion of a region B if A being valid implies that B
+//! is valid.  This basically corresponds to the block nesting structure:
+//! the regions for outer block scopes are superregions of those for inner
+//! block scopes.
+//!
+//! ## Integral and floating-point type variables
+//!
+//! There is a third variety of type variable that we use only for
+//! inferring the types of unsuffixed integer literals.  Integral type
+//! variables differ from general-purpose type variables in that there's
+//! no subtyping relationship among the various integral types, so instead
+//! of associating each variable with an upper and lower bound, we just
+//! use simple unification.  Each integer variable is associated with at
+//! most one integer type.  Floating point types are handled similarly to
+//! integral types.
+//!
+//! ## GLB/LUB
+//!
+//! Computing the greatest-lower-bound and least-upper-bound of two
+//! types/regions is generally straightforward except when type variables
+//! are involved. In that case, we follow a similar "try to use the bounds
+//! when possible but otherwise merge the variables" strategy.  In other
+//! words, `GLB(A, B)` where `A` and `B` are variables will often result
+//! in `A` and `B` being merged and the result being `A`.
+//!
+//! ## Type coercion
+//!
+//! We have a notion of assignability which differs somewhat from
+//! subtyping; in particular it may cause region borrowing to occur.  See
+//! the big comment later in this file on Type Coercion for specifics.
+//!
+//! ### In conclusion
+//!
+//! I showed you three ways to relate `A` and `B`.  There are also more,
+//! of course, though I'm not sure if there are any more sensible options.
+//! The main point is that there are various options, each of which
+//! produce a distinct range of types for `A` and `B`.  Depending on what
+//! the correct values for A and B are, one of these options will be the
+//! right choice: but of course we don't know the right values for A and B
+//! yet, that's what we're trying to find!  In our code, we opt to unify
+//! (Option #1).
+//!
+//! # Implementation details
+//!
+//! We make use of a trait-like implementation strategy to consolidate
+//! duplicated code between subtypes, GLB, and LUB computations.  See the
+//! section on "Type Combining" below for details.
diff --git a/src/librustc/middle/typeck/infer/error_reporting.rs b/src/librustc/middle/typeck/infer/error_reporting.rs
index bc36a2bd801b4..abc68852f4bdf 100644
--- a/src/librustc/middle/typeck/infer/error_reporting.rs
+++ b/src/librustc/middle/typeck/infer/error_reporting.rs
@@ -8,56 +8,53 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-/*!
-
-Error Reporting Code for the inference engine
-
-Because of the way inference, and in particular region inference,
-works, it often happens that errors are not detected until far after
-the relevant line of code has been type-checked. Therefore, there is
-an elaborate system to track why a particular constraint in the
-inference graph arose so that we can explain to the user what gave
-rise to a particular error.
-
-The basis of the system are the "origin" types. An "origin" is the
-reason that a constraint or inference variable arose. There are
-different "origin" enums for different kinds of constraints/variables
-(e.g., `TypeOrigin`, `RegionVariableOrigin`). An origin always has
-a span, but also more information so that we can generate a meaningful
-error message.
-
-Having a catalogue of all the different reasons an error can arise is
-also useful for other reasons, like cross-referencing FAQs etc, though
-we are not really taking advantage of this yet.
-
-# Region Inference
-
-Region inference is particularly tricky because it always succeeds "in
-the moment" and simply registers a constraint. Then, at the end, we
-can compute the full graph and report errors, so we need to be able to
-store and later report what gave rise to the conflicting constraints.
-
-# Subtype Trace
-
-Determing whether `T1 <: T2` often involves a number of subtypes and
-subconstraints along the way. A "TypeTrace" is an extended version
-of an origin that traces the types and other values that were being
-compared. It is not necessarily comprehensive (in fact, at the time of
-this writing it only tracks the root values being compared) but I'd
-like to extend it to include significant "waypoints". For example, if
-you are comparing `(T1, T2) <: (T3, T4)`, and the problem is that `T2
-<: T4` fails, I'd like the trace to include enough information to say
-"in the 2nd element of the tuple". Similarly, failures when comparing
-arguments or return types in fn types should be able to cite the
-specific position, etc.
-
-# Reality vs plan
-
-Of course, there is still a LOT of code in typeck that has yet to be
-ported to this system, and which relies on string concatenation at the
-time of error detection.
-
-*/
+//! Error Reporting Code for the inference engine
+//!
+//! Because of the way inference, and in particular region inference,
+//! works, it often happens that errors are not detected until far after
+//! the relevant line of code has been type-checked. Therefore, there is
+//! an elaborate system to track why a particular constraint in the
+//! inference graph arose so that we can explain to the user what gave
+//! rise to a particular error.
+//!
+//! The basis of the system are the "origin" types. An "origin" is the
+//! reason that a constraint or inference variable arose. There are
+//! different "origin" enums for different kinds of constraints/variables
+//! (e.g., `TypeOrigin`, `RegionVariableOrigin`). An origin always has
+//! a span, but also more information so that we can generate a meaningful
+//! error message.
+//!
+//! Having a catalogue of all the different reasons an error can arise is
+//! also useful for other reasons, like cross-referencing FAQs etc, though
+//! we are not really taking advantage of this yet.
+//!
+//! # Region Inference
+//!
+//! Region inference is particularly tricky because it always succeeds "in
+//! the moment" and simply registers a constraint. Then, at the end, we
+//! can compute the full graph and report errors, so we need to be able to
+//! store and later report what gave rise to the conflicting constraints.
+//!
+//! # Subtype Trace
+//!
+//! Determing whether `T1 <: T2` often involves a number of subtypes and
+//! subconstraints along the way. A "TypeTrace" is an extended version
+//! of an origin that traces the types and other values that were being
+//! compared. It is not necessarily comprehensive (in fact, at the time of
+//! this writing it only tracks the root values being compared) but I'd
+//! like to extend it to include significant "waypoints". For example, if
+//! you are comparing `(T1, T2) <: (T3, T4)`, and the problem is that `T2
+//! <: T4` fails, I'd like the trace to include enough information to say
+//! "in the 2nd element of the tuple". Similarly, failures when comparing
+//! arguments or return types in fn types should be able to cite the
+//! specific position, etc.
+//!
+//! # Reality vs plan
+//!
+//! Of course, there is still a LOT of code in typeck that has yet to be
+//! ported to this system, and which relies on string concatenation at the
+//! time of error detection.
+
 use self::FreshOrKept::*;
 
 use std::collections::HashSet;
@@ -391,11 +388,9 @@ impl<'a, 'tcx> ErrorReporting<'tcx> for InferCtxt<'a, 'tcx> {
         ty::note_and_explain_type_err(self.tcx, terr);
     }
 
+    /// Returns a string of the form "expected `{}`, found `{}`", or None if this is a derived
+    /// error.
     fn values_str(&self, values: &ValuePairs<'tcx>) -> Option<String> {
-        /*!
-         * Returns a string of the form "expected `{}`, found `{}`",
-         * or None if this is a derived error.
-         */
         match *values {
             infer::Types(ref exp_found) => self.expected_found_str(exp_found),
             infer::TraitRefs(ref exp_found) => self.expected_found_str(exp_found)
diff --git a/src/librustc/middle/typeck/infer/higher_ranked/doc.rs b/src/librustc/middle/typeck/infer/higher_ranked/doc.rs
index 4c4452ac89236..2bad3616a05d1 100644
--- a/src/librustc/middle/typeck/infer/higher_ranked/doc.rs
+++ b/src/librustc/middle/typeck/infer/higher_ranked/doc.rs
@@ -8,408 +8,404 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-/*!
-
-# Skolemization and functions
-
-One of the trickiest and most subtle aspects of regions is dealing
-with higher-ranked things which include bound region variables, such
-as function types. I strongly suggest that if you want to understand
-the situation, you read this paper (which is, admittedly, very long,
-but you don't have to read the whole thing):
-
-http://research.microsoft.com/en-us/um/people/simonpj/papers/higher-rank/
-
-Although my explanation will never compete with SPJ's (for one thing,
-his is approximately 100 pages), I will attempt to explain the basic
-problem and also how we solve it. Note that the paper only discusses
-subtyping, not the computation of LUB/GLB.
-
-The problem we are addressing is that there is a kind of subtyping
-between functions with bound region parameters. Consider, for
-example, whether the following relation holds:
-
-    for<'a> fn(&'a int) <: for<'b> fn(&'b int)? (Yes, a => b)
-
-The answer is that of course it does. These two types are basically
-the same, except that in one we used the name `a` and one we used
-the name `b`.
-
-In the examples that follow, it becomes very important to know whether
-a lifetime is bound in a function type (that is, is a lifetime
-parameter) or appears free (is defined in some outer scope).
-Therefore, from now on I will always write the bindings explicitly,
-using the Rust syntax `for<'a> fn(&'a int)` to indicate that `a` is a
-lifetime parameter.
-
-Now let's consider two more function types. Here, we assume that the
-`'b` lifetime is defined somewhere outside and hence is not a lifetime
-parameter bound by the function type (it "appears free"):
-
-    for<'a> fn(&'a int) <: fn(&'b int)? (Yes, a => b)
-
-This subtyping relation does in fact hold. To see why, you have to
-consider what subtyping means. One way to look at `T1 <: T2` is to
-say that it means that it is always ok to treat an instance of `T1` as
-if it had the type `T2`. So, with our functions, it is always ok to
-treat a function that can take pointers with any lifetime as if it
-were a function that can only take a pointer with the specific
-lifetime `'b`. After all, `'b` is a lifetime, after all, and
-the function can take values of any lifetime.
-
-You can also look at subtyping as the *is a* relationship. This amounts
-to the same thing: a function that accepts pointers with any lifetime
-*is a* function that accepts pointers with some specific lifetime.
-
-So, what if we reverse the order of the two function types, like this:
-
-    fn(&'b int) <: for<'a> fn(&'a int)? (No)
-
-Does the subtyping relationship still hold?  The answer of course is
-no. In this case, the function accepts *only the lifetime `'b`*,
-so it is not reasonable to treat it as if it were a function that
-accepted any lifetime.
-
-What about these two examples:
-
-    for<'a,'b> fn(&'a int, &'b int) <: for<'a>    fn(&'a int, &'a int)? (Yes)
-    for<'a>    fn(&'a int, &'a int) <: for<'a,'b> fn(&'a int, &'b int)? (No)
-
-Here, it is true that functions which take two pointers with any two
-lifetimes can be treated as if they only accepted two pointers with
-the same lifetime, but not the reverse.
-
-## The algorithm
-
-Here is the algorithm we use to perform the subtyping check:
-
-1. Replace all bound regions in the subtype with new variables
-2. Replace all bound regions in the supertype with skolemized
-   equivalents. A "skolemized" region is just a new fresh region
-   name.
-3. Check that the parameter and return types match as normal
-4. Ensure that no skolemized regions 'leak' into region variables
-   visible from "the outside"
-
-Let's walk through some examples and see how this algorithm plays out.
-
-#### First example
-
-We'll start with the first example, which was:
-
-    1. for<'a> fn(&'a T) <: for<'b> fn(&'b T)?        Yes: a -> b
-
-After steps 1 and 2 of the algorithm we will have replaced the types
-like so:
-
-    1. fn(&'A T) <: fn(&'x T)?
-
-Here the upper case `&A` indicates a *region variable*, that is, a
-region whose value is being inferred by the system. I also replaced
-`&b` with `&x`---I'll use letters late in the alphabet (`x`, `y`, `z`)
-to indicate skolemized region names. We can assume they don't appear
-elsewhere. Note that neither the sub- nor the supertype bind any
-region names anymore (as indicated by the absence of `<` and `>`).
-
-The next step is to check that the parameter types match. Because
-parameters are contravariant, this means that we check whether:
-
-    &'x T <: &'A T
-
-Region pointers are contravariant so this implies that
-
-    &A <= &x
-
-must hold, where `<=` is the subregion relationship. Processing
-*this* constrain simply adds a constraint into our graph that `&A <=
-&x` and is considered successful (it can, for example, be satisfied by
-choosing the value `&x` for `&A`).
-
-So far we have encountered no error, so the subtype check succeeds.
-
-#### The third example
-
-Now let's look first at the third example, which was:
-
-    3. fn(&'a T)    <: for<'b> fn(&'b T)?        No!
-
-After steps 1 and 2 of the algorithm we will have replaced the types
-like so:
-
-    3. fn(&'a T) <: fn(&'x T)?
-
-This looks pretty much the same as before, except that on the LHS
-`'a` was not bound, and hence was left as-is and not replaced with
-a variable. The next step is again to check that the parameter types
-match. This will ultimately require (as before) that `'a` <= `&x`
-must hold: but this does not hold. `self` and `x` are both distinct
-free regions. So the subtype check fails.
-
-#### Checking for skolemization leaks
-
-You may be wondering about that mysterious last step in the algorithm.
-So far it has not been relevant. The purpose of that last step is to
-catch something like *this*:
-
-    for<'a> fn() -> fn(&'a T) <: fn() -> for<'b> fn(&'b T)?   No.
-
-Here the function types are the same but for where the binding occurs.
-The subtype returns a function that expects a value in precisely one
-region. The supertype returns a function that expects a value in any
-region. If we allow an instance of the subtype to be used where the
-supertype is expected, then, someone could call the fn and think that
-the return value has type `fn<b>(&'b T)` when it really has type
-`fn(&'a T)` (this is case #3, above). Bad.
-
-So let's step through what happens when we perform this subtype check.
-We first replace the bound regions in the subtype (the supertype has
-no bound regions). This gives us:
-
-    fn() -> fn(&'A T) <: fn() -> for<'b> fn(&'b T)?
-
-Now we compare the return types, which are covariant, and hence we have:
-
-    fn(&'A T) <: for<'b> fn(&'b T)?
-
-Here we skolemize the bound region in the supertype to yield:
-
-    fn(&'A T) <: fn(&'x T)?
-
-And then proceed to compare the argument types:
-
-    &'x T <: &'A T
-    'A <= 'x
-
-Finally, this is where it gets interesting!  This is where an error
-*should* be reported. But in fact this will not happen. The reason why
-is that `A` is a variable: we will infer that its value is the fresh
-region `x` and think that everything is happy. In fact, this behavior
-is *necessary*, it was key to the first example we walked through.
-
-The difference between this example and the first one is that the variable
-`A` already existed at the point where the skolemization occurred. In
-the first example, you had two functions:
-
-    for<'a> fn(&'a T) <: for<'b> fn(&'b T)
-
-and hence `&A` and `&x` were created "together". In general, the
-intention of the skolemized names is that they are supposed to be
-fresh names that could never be equal to anything from the outside.
-But when inference comes into play, we might not be respecting this
-rule.
-
-So the way we solve this is to add a fourth step that examines the
-constraints that refer to skolemized names. Basically, consider a
-non-directed verison of the constraint graph. Let `Tainted(x)` be the
-set of all things reachable from a skolemized variable `x`.
-`Tainted(x)` should not contain any regions that existed before the
-step at which the skolemization was performed. So this case here
-would fail because `&x` was created alone, but is relatable to `&A`.
-
-## Computing the LUB and GLB
-
-The paper I pointed you at is written for Haskell. It does not
-therefore considering subtyping and in particular does not consider
-LUB or GLB computation. We have to consider this. Here is the
-algorithm I implemented.
-
-First though, let's discuss what we are trying to compute in more
-detail. The LUB is basically the "common supertype" and the GLB is
-"common subtype"; one catch is that the LUB should be the
-*most-specific* common supertype and the GLB should be *most general*
-common subtype (as opposed to any common supertype or any common
-subtype).
-
-Anyway, to help clarify, here is a table containing some function
-pairs and their LUB/GLB (for conciseness, in this table, I'm just
-including the lifetimes here, not the rest of the types, and I'm
-writing `fn<>` instead of `for<> fn`):
-
-```
-Type 1                Type 2                LUB                    GLB
-fn<'a>('a)            fn('X)                fn('X)                 fn<'a>('a)
-fn('a)                fn('X)                --                     fn<'a>('a)
-fn<'a,'b>('a, 'b)     fn<'x>('x, 'x)        fn<'a>('a, 'a)         fn<'a,'b>('a, 'b)
-fn<'a,'b>('a, 'b, 'a) fn<'x,'y>('x, 'y, 'y) fn<'a>('a, 'a, 'a)     fn<'a,'b,'c>('a,'b,'c)
-```
-
-### Conventions
-
-I use lower-case letters (e.g., `&a`) for bound regions and upper-case
-letters for free regions (`&A`).  Region variables written with a
-dollar-sign (e.g., `$a`).  I will try to remember to enumerate the
-bound-regions on the fn type as well (e.g., `for<'a> fn(&a)`).
-
-### High-level summary
-
-Both the LUB and the GLB algorithms work in a similar fashion.  They
-begin by replacing all bound regions (on both sides) with fresh region
-inference variables.  Therefore, both functions are converted to types
-that contain only free regions.  We can then compute the LUB/GLB in a
-straightforward way, as described in `combine.rs`.  This results in an
-interim type T.  The algorithms then examine the regions that appear
-in T and try to, in some cases, replace them with bound regions to
-yield the final result.
-
-To decide whether to replace a region `R` that appears in `T` with a
-bound region, the algorithms make use of two bits of information.
-First is a set `V` that contains all region variables created as part
-of the LUB/GLB computation. `V` will contain the region variables
-created to replace the bound regions in the input types, but it also
-contains 'intermediate' variables created to represent the LUB/GLB of
-individual regions.  Basically, when asked to compute the LUB/GLB of a
-region variable with another region, the inferencer cannot oblige
-immediately since the values of that variables are not known.
-Therefore, it creates a new variable that is related to the two
-regions.  For example, the LUB of two variables `$x` and `$y` is a
-fresh variable `$z` that is constrained such that `$x <= $z` and `$y
-<= $z`.  So `V` will contain these intermediate variables as well.
-
-The other important factor in deciding how to replace a region in T is
-the function `Tainted($r)` which, for a region variable, identifies
-all regions that the region variable is related to in some way
-(`Tainted()` made an appearance in the subtype computation as well).
-
-### LUB
-
-The LUB algorithm proceeds in three steps:
-
-1. Replace all bound regions (on both sides) with fresh region
-   inference variables.
-2. Compute the LUB "as normal", meaning compute the GLB of each
-   pair of argument types and the LUB of the return types and
-   so forth.  Combine those to a new function type `F`.
-3. Replace each region `R` that appears in `F` as follows:
-   - Let `V` be the set of variables created during the LUB
-     computational steps 1 and 2, as described in the previous section.
-   - If `R` is not in `V`, replace `R` with itself.
-   - If `Tainted(R)` contains a region that is not in `V`,
-     replace `R` with itself.
-   - Otherwise, select the earliest variable in `Tainted(R)` that originates
-     from the left-hand side and replace `R` with the bound region that
-     this variable was a replacement for.
-
-So, let's work through the simplest example: `fn(&A)` and `for<'a> fn(&a)`.
-In this case, `&a` will be replaced with `$a` and the interim LUB type
-`fn($b)` will be computed, where `$b=GLB(&A,$a)`.  Therefore, `V =
-{$a, $b}` and `Tainted($b) = { $b, $a, &A }`.  When we go to replace
-`$b`, we find that since `&A \in Tainted($b)` is not a member of `V`,
-we leave `$b` as is.  When region inference happens, `$b` will be
-resolved to `&A`, as we wanted.
-
-Let's look at a more complex one: `fn(&a, &b)` and `fn(&x, &x)`.  In
-this case, we'll end up with a (pre-replacement) LUB type of `fn(&g,
-&h)` and a graph that looks like:
-
-```
-     $a        $b     *--$x
-       \        \    /  /
-        \        $h-*  /
-         $g-----------*
-```
-
-Here `$g` and `$h` are fresh variables that are created to represent
-the LUB/GLB of things requiring inference.  This means that `V` and
-`Tainted` will look like:
-
-```
-V = {$a, $b, $g, $h, $x}
-Tainted($g) = Tainted($h) = { $a, $b, $h, $g, $x }
-```
-
-Therefore we replace both `$g` and `$h` with `$a`, and end up
-with the type `fn(&a, &a)`.
-
-### GLB
-
-The procedure for computing the GLB is similar.  The difference lies
-in computing the replacements for the various variables. For each
-region `R` that appears in the type `F`, we again compute `Tainted(R)`
-and examine the results:
-
-1. If `R` is not in `V`, it is not replaced.
-2. Else, if `Tainted(R)` contains only variables in `V`, and it
-   contains exactly one variable from the LHS and one variable from
-   the RHS, then `R` can be mapped to the bound version of the
-   variable from the LHS.
-3. Else, if `Tainted(R)` contains no variable from the LHS and no
-   variable from the RHS, then `R` can be mapped to itself.
-4. Else, `R` is mapped to a fresh bound variable.
-
-These rules are pretty complex.  Let's look at some examples to see
-how they play out.
-
-Out first example was `fn(&a)` and `fn(&X)`.  In this case, `&a` will
-be replaced with `$a` and we will ultimately compute a
-(pre-replacement) GLB type of `fn($g)` where `$g=LUB($a,&X)`.
-Therefore, `V={$a,$g}` and `Tainted($g)={$g,$a,&X}.  To find the
-replacement for `$g` we consult the rules above:
-- Rule (1) does not apply because `$g \in V`
-- Rule (2) does not apply because `&X \in Tainted($g)`
-- Rule (3) does not apply because `$a \in Tainted($g)`
-- Hence, by rule (4), we replace `$g` with a fresh bound variable `&z`.
-So our final result is `fn(&z)`, which is correct.
-
-The next example is `fn(&A)` and `fn(&Z)`. In this case, we will again
-have a (pre-replacement) GLB of `fn(&g)`, where `$g = LUB(&A,&Z)`.
-Therefore, `V={$g}` and `Tainted($g) = {$g, &A, &Z}`.  In this case,
-by rule (3), `$g` is mapped to itself, and hence the result is
-`fn($g)`.  This result is correct (in this case, at least), but it is
-indicative of a case that *can* lead us into concluding that there is
-no GLB when in fact a GLB does exist.  See the section "Questionable
-Results" below for more details.
-
-The next example is `fn(&a, &b)` and `fn(&c, &c)`. In this case, as
-before, we'll end up with `F=fn($g, $h)` where `Tainted($g) =
-Tainted($h) = {$g, $h, $a, $b, $c}`.  Only rule (4) applies and hence
-we'll select fresh bound variables `y` and `z` and wind up with
-`fn(&y, &z)`.
-
-For the last example, let's consider what may seem trivial, but is
-not: `fn(&a, &a)` and `fn(&b, &b)`.  In this case, we'll get `F=fn($g,
-$h)` where `Tainted($g) = {$g, $a, $x}` and `Tainted($h) = {$h, $a,
-$x}`.  Both of these sets contain exactly one bound variable from each
-side, so we'll map them both to `&a`, resulting in `fn(&a, &a)`, which
-is the desired result.
-
-### Shortcomings and correctness
-
-You may be wondering whether this algorithm is correct.  The answer is
-"sort of".  There are definitely cases where they fail to compute a
-result even though a correct result exists.  I believe, though, that
-if they succeed, then the result is valid, and I will attempt to
-convince you.  The basic argument is that the "pre-replacement" step
-computes a set of constraints.  The replacements, then, attempt to
-satisfy those constraints, using bound identifiers where needed.
-
-For now I will briefly go over the cases for LUB/GLB and identify
-their intent:
-
-- LUB:
-  - The region variables that are substituted in place of bound regions
-    are intended to collect constraints on those bound regions.
-  - If Tainted(R) contains only values in V, then this region is unconstrained
-    and can therefore be generalized, otherwise it cannot.
-- GLB:
-  - The region variables that are substituted in place of bound regions
-    are intended to collect constraints on those bound regions.
-  - If Tainted(R) contains exactly one variable from each side, and
-    only variables in V, that indicates that those two bound regions
-    must be equated.
-  - Otherwise, if Tainted(R) references any variables from left or right
-    side, then it is trying to combine a bound region with a free one or
-    multiple bound regions, so we need to select fresh bound regions.
-
-Sorry this is more of a shorthand to myself.  I will try to write up something
-more convincing in the future.
-
-#### Where are the algorithms wrong?
-
-- The pre-replacement computation can fail even though using a
-  bound-region would have succeeded.
-- We will compute GLB(fn(fn($a)), fn(fn($b))) as fn($c) where $c is the
-  GLB of $a and $b.  But if inference finds that $a and $b must be mapped
-  to regions without a GLB, then this is effectively a failure to compute
-  the GLB.  However, the result `fn<$c>(fn($c))` is a valid GLB.
-
- */
+//! # Skolemization and functions
+//!
+//! One of the trickiest and most subtle aspects of regions is dealing
+//! with higher-ranked things which include bound region variables, such
+//! as function types. I strongly suggest that if you want to understand
+//! the situation, you read this paper (which is, admittedly, very long,
+//! but you don't have to read the whole thing):
+//!
+//! http://research.microsoft.com/en-us/um/people/simonpj/papers/higher-rank/
+//!
+//! Although my explanation will never compete with SPJ's (for one thing,
+//! his is approximately 100 pages), I will attempt to explain the basic
+//! problem and also how we solve it. Note that the paper only discusses
+//! subtyping, not the computation of LUB/GLB.
+//!
+//! The problem we are addressing is that there is a kind of subtyping
+//! between functions with bound region parameters. Consider, for
+//! example, whether the following relation holds:
+//!
+//!     for<'a> fn(&'a int) <: for<'b> fn(&'b int)? (Yes, a => b)
+//!
+//! The answer is that of course it does. These two types are basically
+//! the same, except that in one we used the name `a` and one we used
+//! the name `b`.
+//!
+//! In the examples that follow, it becomes very important to know whether
+//! a lifetime is bound in a function type (that is, is a lifetime
+//! parameter) or appears free (is defined in some outer scope).
+//! Therefore, from now on I will always write the bindings explicitly,
+//! using the Rust syntax `for<'a> fn(&'a int)` to indicate that `a` is a
+//! lifetime parameter.
+//!
+//! Now let's consider two more function types. Here, we assume that the
+//! `'b` lifetime is defined somewhere outside and hence is not a lifetime
+//! parameter bound by the function type (it "appears free"):
+//!
+//!     for<'a> fn(&'a int) <: fn(&'b int)? (Yes, a => b)
+//!
+//! This subtyping relation does in fact hold. To see why, you have to
+//! consider what subtyping means. One way to look at `T1 <: T2` is to
+//! say that it means that it is always ok to treat an instance of `T1` as
+//! if it had the type `T2`. So, with our functions, it is always ok to
+//! treat a function that can take pointers with any lifetime as if it
+//! were a function that can only take a pointer with the specific
+//! lifetime `'b`. After all, `'b` is a lifetime, after all, and
+//! the function can take values of any lifetime.
+//!
+//! You can also look at subtyping as the *is a* relationship. This amounts
+//! to the same thing: a function that accepts pointers with any lifetime
+//! *is a* function that accepts pointers with some specific lifetime.
+//!
+//! So, what if we reverse the order of the two function types, like this:
+//!
+//!     fn(&'b int) <: for<'a> fn(&'a int)? (No)
+//!
+//! Does the subtyping relationship still hold?  The answer of course is
+//! no. In this case, the function accepts *only the lifetime `'b`*,
+//! so it is not reasonable to treat it as if it were a function that
+//! accepted any lifetime.
+//!
+//! What about these two examples:
+//!
+//!     for<'a,'b> fn(&'a int, &'b int) <: for<'a>    fn(&'a int, &'a int)? (Yes)
+//!     for<'a>    fn(&'a int, &'a int) <: for<'a,'b> fn(&'a int, &'b int)? (No)
+//!
+//! Here, it is true that functions which take two pointers with any two
+//! lifetimes can be treated as if they only accepted two pointers with
+//! the same lifetime, but not the reverse.
+//!
+//! ## The algorithm
+//!
+//! Here is the algorithm we use to perform the subtyping check:
+//!
+//! 1. Replace all bound regions in the subtype with new variables
+//! 2. Replace all bound regions in the supertype with skolemized
+//!    equivalents. A "skolemized" region is just a new fresh region
+//!    name.
+//! 3. Check that the parameter and return types match as normal
+//! 4. Ensure that no skolemized regions 'leak' into region variables
+//!    visible from "the outside"
+//!
+//! Let's walk through some examples and see how this algorithm plays out.
+//!
+//! #### First example
+//!
+//! We'll start with the first example, which was:
+//!
+//!     1. for<'a> fn(&'a T) <: for<'b> fn(&'b T)?        Yes: a -> b
+//!
+//! After steps 1 and 2 of the algorithm we will have replaced the types
+//! like so:
+//!
+//!     1. fn(&'A T) <: fn(&'x T)?
+//!
+//! Here the upper case `&A` indicates a *region variable*, that is, a
+//! region whose value is being inferred by the system. I also replaced
+//! `&b` with `&x`---I'll use letters late in the alphabet (`x`, `y`, `z`)
+//! to indicate skolemized region names. We can assume they don't appear
+//! elsewhere. Note that neither the sub- nor the supertype bind any
+//! region names anymore (as indicated by the absence of `<` and `>`).
+//!
+//! The next step is to check that the parameter types match. Because
+//! parameters are contravariant, this means that we check whether:
+//!
+//!     &'x T <: &'A T
+//!
+//! Region pointers are contravariant so this implies that
+//!
+//!     &A <= &x
+//!
+//! must hold, where `<=` is the subregion relationship. Processing
+//! *this* constrain simply adds a constraint into our graph that `&A <=
+//! &x` and is considered successful (it can, for example, be satisfied by
+//! choosing the value `&x` for `&A`).
+//!
+//! So far we have encountered no error, so the subtype check succeeds.
+//!
+//! #### The third example
+//!
+//! Now let's look first at the third example, which was:
+//!
+//!     3. fn(&'a T)    <: for<'b> fn(&'b T)?        No!
+//!
+//! After steps 1 and 2 of the algorithm we will have replaced the types
+//! like so:
+//!
+//!     3. fn(&'a T) <: fn(&'x T)?
+//!
+//! This looks pretty much the same as before, except that on the LHS
+//! `'a` was not bound, and hence was left as-is and not replaced with
+//! a variable. The next step is again to check that the parameter types
+//! match. This will ultimately require (as before) that `'a` <= `&x`
+//! must hold: but this does not hold. `self` and `x` are both distinct
+//! free regions. So the subtype check fails.
+//!
+//! #### Checking for skolemization leaks
+//!
+//! You may be wondering about that mysterious last step in the algorithm.
+//! So far it has not been relevant. The purpose of that last step is to
+//! catch something like *this*:
+//!
+//!     for<'a> fn() -> fn(&'a T) <: fn() -> for<'b> fn(&'b T)?   No.
+//!
+//! Here the function types are the same but for where the binding occurs.
+//! The subtype returns a function that expects a value in precisely one
+//! region. The supertype returns a function that expects a value in any
+//! region. If we allow an instance of the subtype to be used where the
+//! supertype is expected, then, someone could call the fn and think that
+//! the return value has type `fn<b>(&'b T)` when it really has type
+//! `fn(&'a T)` (this is case #3, above). Bad.
+//!
+//! So let's step through what happens when we perform this subtype check.
+//! We first replace the bound regions in the subtype (the supertype has
+//! no bound regions). This gives us:
+//!
+//!     fn() -> fn(&'A T) <: fn() -> for<'b> fn(&'b T)?
+//!
+//! Now we compare the return types, which are covariant, and hence we have:
+//!
+//!     fn(&'A T) <: for<'b> fn(&'b T)?
+//!
+//! Here we skolemize the bound region in the supertype to yield:
+//!
+//!     fn(&'A T) <: fn(&'x T)?
+//!
+//! And then proceed to compare the argument types:
+//!
+//!     &'x T <: &'A T
+//!     'A <= 'x
+//!
+//! Finally, this is where it gets interesting!  This is where an error
+//! *should* be reported. But in fact this will not happen. The reason why
+//! is that `A` is a variable: we will infer that its value is the fresh
+//! region `x` and think that everything is happy. In fact, this behavior
+//! is *necessary*, it was key to the first example we walked through.
+//!
+//! The difference between this example and the first one is that the variable
+//! `A` already existed at the point where the skolemization occurred. In
+//! the first example, you had two functions:
+//!
+//!     for<'a> fn(&'a T) <: for<'b> fn(&'b T)
+//!
+//! and hence `&A` and `&x` were created "together". In general, the
+//! intention of the skolemized names is that they are supposed to be
+//! fresh names that could never be equal to anything from the outside.
+//! But when inference comes into play, we might not be respecting this
+//! rule.
+//!
+//! So the way we solve this is to add a fourth step that examines the
+//! constraints that refer to skolemized names. Basically, consider a
+//! non-directed verison of the constraint graph. Let `Tainted(x)` be the
+//! set of all things reachable from a skolemized variable `x`.
+//! `Tainted(x)` should not contain any regions that existed before the
+//! step at which the skolemization was performed. So this case here
+//! would fail because `&x` was created alone, but is relatable to `&A`.
+//!
+//! ## Computing the LUB and GLB
+//!
+//! The paper I pointed you at is written for Haskell. It does not
+//! therefore considering subtyping and in particular does not consider
+//! LUB or GLB computation. We have to consider this. Here is the
+//! algorithm I implemented.
+//!
+//! First though, let's discuss what we are trying to compute in more
+//! detail. The LUB is basically the "common supertype" and the GLB is
+//! "common subtype"; one catch is that the LUB should be the
+//! *most-specific* common supertype and the GLB should be *most general*
+//! common subtype (as opposed to any common supertype or any common
+//! subtype).
+//!
+//! Anyway, to help clarify, here is a table containing some function
+//! pairs and their LUB/GLB (for conciseness, in this table, I'm just
+//! including the lifetimes here, not the rest of the types, and I'm
+//! writing `fn<>` instead of `for<> fn`):
+//!
+//! ```
+//! Type 1                Type 2                LUB                    GLB
+//! fn<'a>('a)            fn('X)                fn('X)                 fn<'a>('a)
+//! fn('a)                fn('X)                --                     fn<'a>('a)
+//! fn<'a,'b>('a, 'b)     fn<'x>('x, 'x)        fn<'a>('a, 'a)         fn<'a,'b>('a, 'b)
+//! fn<'a,'b>('a, 'b, 'a) fn<'x,'y>('x, 'y, 'y) fn<'a>('a, 'a, 'a)     fn<'a,'b,'c>('a,'b,'c)
+//! ```
+//!
+//! ### Conventions
+//!
+//! I use lower-case letters (e.g., `&a`) for bound regions and upper-case
+//! letters for free regions (`&A`).  Region variables written with a
+//! dollar-sign (e.g., `$a`).  I will try to remember to enumerate the
+//! bound-regions on the fn type as well (e.g., `for<'a> fn(&a)`).
+//!
+//! ### High-level summary
+//!
+//! Both the LUB and the GLB algorithms work in a similar fashion.  They
+//! begin by replacing all bound regions (on both sides) with fresh region
+//! inference variables.  Therefore, both functions are converted to types
+//! that contain only free regions.  We can then compute the LUB/GLB in a
+//! straightforward way, as described in `combine.rs`.  This results in an
+//! interim type T.  The algorithms then examine the regions that appear
+//! in T and try to, in some cases, replace them with bound regions to
+//! yield the final result.
+//!
+//! To decide whether to replace a region `R` that appears in `T` with a
+//! bound region, the algorithms make use of two bits of information.
+//! First is a set `V` that contains all region variables created as part
+//! of the LUB/GLB computation. `V` will contain the region variables
+//! created to replace the bound regions in the input types, but it also
+//! contains 'intermediate' variables created to represent the LUB/GLB of
+//! individual regions.  Basically, when asked to compute the LUB/GLB of a
+//! region variable with another region, the inferencer cannot oblige
+//! immediately since the values of that variables are not known.
+//! Therefore, it creates a new variable that is related to the two
+//! regions.  For example, the LUB of two variables `$x` and `$y` is a
+//! fresh variable `$z` that is constrained such that `$x <= $z` and `$y
+//! <= $z`.  So `V` will contain these intermediate variables as well.
+//!
+//! The other important factor in deciding how to replace a region in T is
+//! the function `Tainted($r)` which, for a region variable, identifies
+//! all regions that the region variable is related to in some way
+//! (`Tainted()` made an appearance in the subtype computation as well).
+//!
+//! ### LUB
+//!
+//! The LUB algorithm proceeds in three steps:
+//!
+//! 1. Replace all bound regions (on both sides) with fresh region
+//!    inference variables.
+//! 2. Compute the LUB "as normal", meaning compute the GLB of each
+//!    pair of argument types and the LUB of the return types and
+//!    so forth.  Combine those to a new function type `F`.
+//! 3. Replace each region `R` that appears in `F` as follows:
+//!    - Let `V` be the set of variables created during the LUB
+//!      computational steps 1 and 2, as described in the previous section.
+//!    - If `R` is not in `V`, replace `R` with itself.
+//!    - If `Tainted(R)` contains a region that is not in `V`,
+//!      replace `R` with itself.
+//!    - Otherwise, select the earliest variable in `Tainted(R)` that originates
+//!      from the left-hand side and replace `R` with the bound region that
+//!      this variable was a replacement for.
+//!
+//! So, let's work through the simplest example: `fn(&A)` and `for<'a> fn(&a)`.
+//! In this case, `&a` will be replaced with `$a` and the interim LUB type
+//! `fn($b)` will be computed, where `$b=GLB(&A,$a)`.  Therefore, `V =
+//! {$a, $b}` and `Tainted($b) = { $b, $a, &A }`.  When we go to replace
+//! `$b`, we find that since `&A \in Tainted($b)` is not a member of `V`,
+//! we leave `$b` as is.  When region inference happens, `$b` will be
+//! resolved to `&A`, as we wanted.
+//!
+//! Let's look at a more complex one: `fn(&a, &b)` and `fn(&x, &x)`.  In
+//! this case, we'll end up with a (pre-replacement) LUB type of `fn(&g,
+//! &h)` and a graph that looks like:
+//!
+//! ```
+//!      $a        $b     *--$x
+//!        \        \    /  /
+//!         \        $h-*  /
+//!          $g-----------*
+//! ```
+//!
+//! Here `$g` and `$h` are fresh variables that are created to represent
+//! the LUB/GLB of things requiring inference.  This means that `V` and
+//! `Tainted` will look like:
+//!
+//! ```
+//! V = {$a, $b, $g, $h, $x}
+//! Tainted($g) = Tainted($h) = { $a, $b, $h, $g, $x }
+//! ```
+//!
+//! Therefore we replace both `$g` and `$h` with `$a`, and end up
+//! with the type `fn(&a, &a)`.
+//!
+//! ### GLB
+//!
+//! The procedure for computing the GLB is similar.  The difference lies
+//! in computing the replacements for the various variables. For each
+//! region `R` that appears in the type `F`, we again compute `Tainted(R)`
+//! and examine the results:
+//!
+//! 1. If `R` is not in `V`, it is not replaced.
+//! 2. Else, if `Tainted(R)` contains only variables in `V`, and it
+//!    contains exactly one variable from the LHS and one variable from
+//!    the RHS, then `R` can be mapped to the bound version of the
+//!    variable from the LHS.
+//! 3. Else, if `Tainted(R)` contains no variable from the LHS and no
+//!    variable from the RHS, then `R` can be mapped to itself.
+//! 4. Else, `R` is mapped to a fresh bound variable.
+//!
+//! These rules are pretty complex.  Let's look at some examples to see
+//! how they play out.
+//!
+//! Out first example was `fn(&a)` and `fn(&X)`.  In this case, `&a` will
+//! be replaced with `$a` and we will ultimately compute a
+//! (pre-replacement) GLB type of `fn($g)` where `$g=LUB($a,&X)`.
+//! Therefore, `V={$a,$g}` and `Tainted($g)={$g,$a,&X}.  To find the
+//! replacement for `$g` we consult the rules above:
+//! - Rule (1) does not apply because `$g \in V`
+//! - Rule (2) does not apply because `&X \in Tainted($g)`
+//! - Rule (3) does not apply because `$a \in Tainted($g)`
+//! - Hence, by rule (4), we replace `$g` with a fresh bound variable `&z`.
+//! So our final result is `fn(&z)`, which is correct.
+//!
+//! The next example is `fn(&A)` and `fn(&Z)`. In this case, we will again
+//! have a (pre-replacement) GLB of `fn(&g)`, where `$g = LUB(&A,&Z)`.
+//! Therefore, `V={$g}` and `Tainted($g) = {$g, &A, &Z}`.  In this case,
+//! by rule (3), `$g` is mapped to itself, and hence the result is
+//! `fn($g)`.  This result is correct (in this case, at least), but it is
+//! indicative of a case that *can* lead us into concluding that there is
+//! no GLB when in fact a GLB does exist.  See the section "Questionable
+//! Results" below for more details.
+//!
+//! The next example is `fn(&a, &b)` and `fn(&c, &c)`. In this case, as
+//! before, we'll end up with `F=fn($g, $h)` where `Tainted($g) =
+//! Tainted($h) = {$g, $h, $a, $b, $c}`.  Only rule (4) applies and hence
+//! we'll select fresh bound variables `y` and `z` and wind up with
+//! `fn(&y, &z)`.
+//!
+//! For the last example, let's consider what may seem trivial, but is
+//! not: `fn(&a, &a)` and `fn(&b, &b)`.  In this case, we'll get `F=fn($g,
+//! $h)` where `Tainted($g) = {$g, $a, $x}` and `Tainted($h) = {$h, $a,
+//! $x}`.  Both of these sets contain exactly one bound variable from each
+//! side, so we'll map them both to `&a`, resulting in `fn(&a, &a)`, which
+//! is the desired result.
+//!
+//! ### Shortcomings and correctness
+//!
+//! You may be wondering whether this algorithm is correct.  The answer is
+//! "sort of".  There are definitely cases where they fail to compute a
+//! result even though a correct result exists.  I believe, though, that
+//! if they succeed, then the result is valid, and I will attempt to
+//! convince you.  The basic argument is that the "pre-replacement" step
+//! computes a set of constraints.  The replacements, then, attempt to
+//! satisfy those constraints, using bound identifiers where needed.
+//!
+//! For now I will briefly go over the cases for LUB/GLB and identify
+//! their intent:
+//!
+//! - LUB:
+//!   - The region variables that are substituted in place of bound regions
+//!     are intended to collect constraints on those bound regions.
+//!   - If Tainted(R) contains only values in V, then this region is unconstrained
+//!     and can therefore be generalized, otherwise it cannot.
+//! - GLB:
+//!   - The region variables that are substituted in place of bound regions
+//!     are intended to collect constraints on those bound regions.
+//!   - If Tainted(R) contains exactly one variable from each side, and
+//!     only variables in V, that indicates that those two bound regions
+//!     must be equated.
+//!   - Otherwise, if Tainted(R) references any variables from left or right
+//!     side, then it is trying to combine a bound region with a free one or
+//!     multiple bound regions, so we need to select fresh bound regions.
+//!
+//! Sorry this is more of a shorthand to myself.  I will try to write up something
+//! more convincing in the future.
+//!
+//! #### Where are the algorithms wrong?
+//!
+//! - The pre-replacement computation can fail even though using a
+//!   bound-region would have succeeded.
+//! - We will compute GLB(fn(fn($a)), fn(fn($b))) as fn($c) where $c is the
+//!   GLB of $a and $b.  But if inference finds that $a and $b must be mapped
+//!   to regions without a GLB, then this is effectively a failure to compute
+//!   the GLB.  However, the result `fn<$c>(fn($c))` is a valid GLB.
diff --git a/src/librustc/middle/typeck/infer/higher_ranked/mod.rs b/src/librustc/middle/typeck/infer/higher_ranked/mod.rs
index 812aa5c555728..2f80a574bb18b 100644
--- a/src/librustc/middle/typeck/infer/higher_ranked/mod.rs
+++ b/src/librustc/middle/typeck/infer/higher_ranked/mod.rs
@@ -8,10 +8,8 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-/*!
- * Helper routines for higher-ranked things. See the `doc` module at
- * the end of the file for details.
- */
+//! Helper routines for higher-ranked things. See the `doc` module at
+//! the end of the file for details.
 
 use middle::ty::{mod, Ty, replace_late_bound_regions};
 use middle::typeck::infer::{mod, combine, cres, InferCtxt};
diff --git a/src/librustc/middle/typeck/infer/lattice.rs b/src/librustc/middle/typeck/infer/lattice.rs
index 6e6c631f00749..daec959d11cd3 100644
--- a/src/librustc/middle/typeck/infer/lattice.rs
+++ b/src/librustc/middle/typeck/infer/lattice.rs
@@ -8,28 +8,26 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-/*!
- * # Lattice Variables
- *
- * This file contains generic code for operating on inference variables
- * that are characterized by an upper- and lower-bound.  The logic and
- * reasoning is explained in detail in the large comment in `infer.rs`.
- *
- * The code in here is defined quite generically so that it can be
- * applied both to type variables, which represent types being inferred,
- * and fn variables, which represent function types being inferred.
- * It may eventually be applied to their types as well, who knows.
- * In some cases, the functions are also generic with respect to the
- * operation on the lattice (GLB vs LUB).
- *
- * Although all the functions are generic, we generally write the
- * comments in a way that is specific to type variables and the LUB
- * operation.  It's just easier that way.
- *
- * In general all of the functions are defined parametrically
- * over a `LatticeValue`, which is a value defined with respect to
- * a lattice.
- */
+//! # Lattice Variables
+//!
+//! This file contains generic code for operating on inference variables
+//! that are characterized by an upper- and lower-bound.  The logic and
+//! reasoning is explained in detail in the large comment in `infer.rs`.
+//!
+//! The code in here is defined quite generically so that it can be
+//! applied both to type variables, which represent types being inferred,
+//! and fn variables, which represent function types being inferred.
+//! It may eventually be applied to their types as well, who knows.
+//! In some cases, the functions are also generic with respect to the
+//! operation on the lattice (GLB vs LUB).
+//!
+//! Although all the functions are generic, we generally write the
+//! comments in a way that is specific to type variables and the LUB
+//! operation.  It's just easier that way.
+//!
+//! In general all of the functions are defined parametrically
+//! over a `LatticeValue`, which is a value defined with respect to
+//! a lattice.
 
 use middle::ty::{TyVar};
 use middle::ty::{mod, Ty};
diff --git a/src/librustc/middle/typeck/infer/mod.rs b/src/librustc/middle/typeck/infer/mod.rs
index 93c11693091c2..c5845b143af89 100644
--- a/src/librustc/middle/typeck/infer/mod.rs
+++ b/src/librustc/middle/typeck/infer/mod.rs
@@ -8,7 +8,7 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-/*! See doc.rs for documentation */
+//! See doc.rs for documentation
 
 #![allow(non_camel_case_types)]
 
@@ -305,6 +305,8 @@ pub fn new_infer_ctxt<'a, 'tcx>(tcx: &'a ty::ctxt<'tcx>)
     }
 }
 
+/// Computes the least upper-bound of `a` and `b`. If this is not possible, reports an error and
+/// returns ty::err.
 pub fn common_supertype<'a, 'tcx>(cx: &InferCtxt<'a, 'tcx>,
                                   origin: TypeOrigin,
                                   a_is_expected: bool,
@@ -312,11 +314,6 @@ pub fn common_supertype<'a, 'tcx>(cx: &InferCtxt<'a, 'tcx>,
                                   b: Ty<'tcx>)
                                   -> Ty<'tcx>
 {
-    /*!
-     * Computes the least upper-bound of `a` and `b`. If this is
-     * not possible, reports an error and returns ty::err.
-     */
-
     debug!("common_supertype({}, {})",
            a.repr(cx.tcx), b.repr(cx.tcx));
 
@@ -754,17 +751,13 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
             .collect()
     }
 
+    /// Given a set of generics defined on a type or impl, returns a substitution mapping each
+    /// type/region parameter to a fresh inference variable.
     pub fn fresh_substs_for_generics(&self,
                                      span: Span,
                                      generics: &ty::Generics<'tcx>)
                                      -> subst::Substs<'tcx>
     {
-        /*!
-         * Given a set of generics defined on a type or impl, returns
-         * a substitution mapping each type/region parameter to a
-         * fresh inference variable.
-         */
-
         let type_params =
             generics.types.map(
                 |_| self.next_ty_var());
@@ -774,18 +767,15 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
         subst::Substs::new(type_params, region_params)
     }
 
+    /// Given a set of generics defined on a trait, returns a substitution mapping each output
+    /// type/region parameter to a fresh inference variable, and mapping the self type to
+    /// `self_ty`.
     pub fn fresh_substs_for_trait(&self,
                                   span: Span,
                                   generics: &ty::Generics<'tcx>,
                                   self_ty: Ty<'tcx>)
                                   -> subst::Substs<'tcx>
     {
-        /*!
-         * Given a set of generics defined on a trait, returns a
-         * substitution mapping each output type/region parameter to a
-         * fresh inference variable, and mapping the self type to
-         * `self_ty`.
-         */
 
         assert!(generics.types.len(subst::SelfSpace) == 1);
         assert!(generics.types.len(subst::FnSpace) == 0);
diff --git a/src/librustc/middle/typeck/infer/region_inference/doc.rs b/src/librustc/middle/typeck/infer/region_inference/doc.rs
index 40b41deeb2b66..b4eac4c002677 100644
--- a/src/librustc/middle/typeck/infer/region_inference/doc.rs
+++ b/src/librustc/middle/typeck/infer/region_inference/doc.rs
@@ -8,371 +8,367 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-/*!
-
-Region inference module.
-
-# Terminology
-
-Note that we use the terms region and lifetime interchangeably,
-though the term `lifetime` is preferred.
-
-# Introduction
-
-Region inference uses a somewhat more involved algorithm than type
-inference.  It is not the most efficient thing ever written though it
-seems to work well enough in practice (famous last words).  The reason
-that we use a different algorithm is because, unlike with types, it is
-impractical to hand-annotate with regions (in some cases, there aren't
-even the requisite syntactic forms).  So we have to get it right, and
-it's worth spending more time on a more involved analysis.  Moreover,
-regions are a simpler case than types: they don't have aggregate
-structure, for example.
-
-Unlike normal type inference, which is similar in spirit to H-M and thus
-works progressively, the region type inference works by accumulating
-constraints over the course of a function.  Finally, at the end of
-processing a function, we process and solve the constraints all at
-once.
-
-The constraints are always of one of three possible forms:
-
-- ConstrainVarSubVar(R_i, R_j) states that region variable R_i
-  must be a subregion of R_j
-- ConstrainRegSubVar(R, R_i) states that the concrete region R
-  (which must not be a variable) must be a subregion of the varibale R_i
-- ConstrainVarSubReg(R_i, R) is the inverse
-
-# Building up the constraints
-
-Variables and constraints are created using the following methods:
-
-- `new_region_var()` creates a new, unconstrained region variable;
-- `make_subregion(R_i, R_j)` states that R_i is a subregion of R_j
-- `lub_regions(R_i, R_j) -> R_k` returns a region R_k which is
-  the smallest region that is greater than both R_i and R_j
-- `glb_regions(R_i, R_j) -> R_k` returns a region R_k which is
-  the greatest region that is smaller than both R_i and R_j
-
-The actual region resolution algorithm is not entirely
-obvious, though it is also not overly complex.
-
-## Snapshotting
-
-It is also permitted to try (and rollback) changes to the graph.  This
-is done by invoking `start_snapshot()`, which returns a value.  Then
-later you can call `rollback_to()` which undoes the work.
-Alternatively, you can call `commit()` which ends all snapshots.
-Snapshots can be recursive---so you can start a snapshot when another
-is in progress, but only the root snapshot can "commit".
-
-# Resolving constraints
-
-The constraint resolution algorithm is not super complex but also not
-entirely obvious.  Here I describe the problem somewhat abstractly,
-then describe how the current code works.  There may be other, smarter
-ways of doing this with which I am unfamiliar and can't be bothered to
-research at the moment. - NDM
-
-## The problem
-
-Basically our input is a directed graph where nodes can be divided
-into two categories: region variables and concrete regions.  Each edge
-`R -> S` in the graph represents a constraint that the region `R` is a
-subregion of the region `S`.
-
-Region variable nodes can have arbitrary degree.  There is one region
-variable node per region variable.
-
-Each concrete region node is associated with some, well, concrete
-region: e.g., a free lifetime, or the region for a particular scope.
-Note that there may be more than one concrete region node for a
-particular region value.  Moreover, because of how the graph is built,
-we know that all concrete region nodes have either in-degree 1 or
-out-degree 1.
-
-Before resolution begins, we build up the constraints in a hashmap
-that maps `Constraint` keys to spans.  During resolution, we construct
-the actual `Graph` structure that we describe here.
-
-## Our current algorithm
-
-We divide region variables into two groups: Expanding and Contracting.
-Expanding region variables are those that have a concrete region
-predecessor (direct or indirect).  Contracting region variables are
-all others.
-
-We first resolve the values of Expanding region variables and then
-process Contracting ones.  We currently use an iterative, fixed-point
-procedure (but read on, I believe this could be replaced with a linear
-walk).  Basically we iterate over the edges in the graph, ensuring
-that, if the source of the edge has a value, then this value is a
-subregion of the target value.  If the target does not yet have a
-value, it takes the value from the source.  If the target already had
-a value, then the resulting value is Least Upper Bound of the old and
-new values. When we are done, each Expanding node will have the
-smallest region that it could possibly have and still satisfy the
-constraints.
-
-We next process the Contracting nodes.  Here we again iterate over the
-edges, only this time we move values from target to source (if the
-source is a Contracting node).  For each contracting node, we compute
-its value as the GLB of all its successors.  Basically contracting
-nodes ensure that there is overlap between their successors; we will
-ultimately infer the largest overlap possible.
-
-# The Region Hierarchy
-
-## Without closures
-
-Let's first consider the region hierarchy without thinking about
-closures, because they add a lot of complications. The region
-hierarchy *basically* mirrors the lexical structure of the code.
-There is a region for every piece of 'evaluation' that occurs, meaning
-every expression, block, and pattern (patterns are considered to
-"execute" by testing the value they are applied to and creating any
-relevant bindings).  So, for example:
-
-    fn foo(x: int, y: int) { // -+
-    //  +------------+       //  |
-    //  |      +-----+       //  |
-    //  |  +-+ +-+ +-+       //  |
-    //  |  | | | | | |       //  |
-    //  v  v v v v v v       //  |
-        let z = x + y;       //  |
-        ...                  //  |
-    }                        // -+
-
-    fn bar() { ... }
-
-In this example, there is a region for the fn body block as a whole,
-and then a subregion for the declaration of the local variable.
-Within that, there are sublifetimes for the assignment pattern and
-also the expression `x + y`. The expression itself has sublifetimes
-for evaluating `x` and `y`.
-
-## Function calls
-
-Function calls are a bit tricky. I will describe how we handle them
-*now* and then a bit about how we can improve them (Issue #6268).
-
-Consider a function call like `func(expr1, expr2)`, where `func`,
-`arg1`, and `arg2` are all arbitrary expressions. Currently,
-we construct a region hierarchy like:
-
-    +----------------+
-    |                |
-    +--+ +---+  +---+|
-    v  v v   v  v   vv
-    func(expr1, expr2)
-
-Here you can see that the call as a whole has a region and the
-function plus arguments are subregions of that. As a side-effect of
-this, we get a lot of spurious errors around nested calls, in
-particular when combined with `&mut` functions. For example, a call
-like this one
-
-    self.foo(self.bar())
-
-where both `foo` and `bar` are `&mut self` functions will always yield
-an error.
-
-Here is a more involved example (which is safe) so we can see what's
-going on:
-
-    struct Foo { f: uint, g: uint }
-    ...
-    fn add(p: &mut uint, v: uint) {
-        *p += v;
-    }
-    ...
-    fn inc(p: &mut uint) -> uint {
-        *p += 1; *p
-    }
-    fn weird() {
-        let mut x: Box<Foo> = box Foo { ... };
-        'a: add(&mut (*x).f,
-                'b: inc(&mut (*x).f)) // (..)
-    }
-
-The important part is the line marked `(..)` which contains a call to
-`add()`. The first argument is a mutable borrow of the field `f`.  The
-second argument also borrows the field `f`. Now, in the current borrow
-checker, the first borrow is given the lifetime of the call to
-`add()`, `'a`.  The second borrow is given the lifetime of `'b` of the
-call to `inc()`. Because `'b` is considered to be a sublifetime of
-`'a`, an error is reported since there are two co-existing mutable
-borrows of the same data.
-
-However, if we were to examine the lifetimes a bit more carefully, we
-can see that this error is unnecessary. Let's examine the lifetimes
-involved with `'a` in detail. We'll break apart all the steps involved
-in a call expression:
-
-    'a: {
-        'a_arg1: let a_temp1: ... = add;
-        'a_arg2: let a_temp2: &'a mut uint = &'a mut (*x).f;
-        'a_arg3: let a_temp3: uint = {
-            let b_temp1: ... = inc;
-            let b_temp2: &'b = &'b mut (*x).f;
-            'b_call: b_temp1(b_temp2)
-        };
-        'a_call: a_temp1(a_temp2, a_temp3) // (**)
-    }
-
-Here we see that the lifetime `'a` includes a number of substatements.
-In particular, there is this lifetime I've called `'a_call` that
-corresponds to the *actual execution of the function `add()`*, after
-all arguments have been evaluated. There is a corresponding lifetime
-`'b_call` for the execution of `inc()`. If we wanted to be precise
-about it, the lifetime of the two borrows should be `'a_call` and
-`'b_call` respectively, since the references that were created
-will not be dereferenced except during the execution itself.
-
-However, this model by itself is not sound. The reason is that
-while the two references that are created will never be used
-simultaneously, it is still true that the first reference is
-*created* before the second argument is evaluated, and so even though
-it will not be *dereferenced* during the evaluation of the second
-argument, it can still be *invalidated* by that evaluation. Consider
-this similar but unsound example:
-
-    struct Foo { f: uint, g: uint }
-    ...
-    fn add(p: &mut uint, v: uint) {
-        *p += v;
-    }
-    ...
-    fn consume(x: Box<Foo>) -> uint {
-        x.f + x.g
-    }
-    fn weird() {
-        let mut x: Box<Foo> = box Foo { ... };
-        'a: add(&mut (*x).f, consume(x)) // (..)
-    }
-
-In this case, the second argument to `add` actually consumes `x`, thus
-invalidating the first argument.
-
-So, for now, we exclude the `call` lifetimes from our model.
-Eventually I would like to include them, but we will have to make the
-borrow checker handle this situation correctly. In particular, if
-there is a reference created whose lifetime does not enclose
-the borrow expression, we must issue sufficient restrictions to ensure
-that the pointee remains valid.
-
-## Adding closures
-
-The other significant complication to the region hierarchy is
-closures. I will describe here how closures should work, though some
-of the work to implement this model is ongoing at the time of this
-writing.
-
-The body of closures are type-checked along with the function that
-creates them. However, unlike other expressions that appear within the
-function body, it is not entirely obvious when a closure body executes
-with respect to the other expressions. This is because the closure
-body will execute whenever the closure is called; however, we can
-never know precisely when the closure will be called, especially
-without some sort of alias analysis.
-
-However, we can place some sort of limits on when the closure
-executes.  In particular, the type of every closure `fn:'r K` includes
-a region bound `'r`. This bound indicates the maximum lifetime of that
-closure; once we exit that region, the closure cannot be called
-anymore. Therefore, we say that the lifetime of the closure body is a
-sublifetime of the closure bound, but the closure body itself is unordered
-with respect to other parts of the code.
-
-For example, consider the following fragment of code:
-
-    'a: {
-         let closure: fn:'a() = || 'b: {
-             'c: ...
-         };
-         'd: ...
-    }
-
-Here we have four lifetimes, `'a`, `'b`, `'c`, and `'d`. The closure
-`closure` is bounded by the lifetime `'a`. The lifetime `'b` is the
-lifetime of the closure body, and `'c` is some statement within the
-closure body. Finally, `'d` is a statement within the outer block that
-created the closure.
-
-We can say that the closure body `'b` is a sublifetime of `'a` due to
-the closure bound. By the usual lexical scoping conventions, the
-statement `'c` is clearly a sublifetime of `'b`, and `'d` is a
-sublifetime of `'d`. However, there is no ordering between `'c` and
-`'d` per se (this kind of ordering between statements is actually only
-an issue for dataflow; passes like the borrow checker must assume that
-closures could execute at any time from the moment they are created
-until they go out of scope).
-
-### Complications due to closure bound inference
-
-There is only one problem with the above model: in general, we do not
-actually *know* the closure bounds during region inference! In fact,
-closure bounds are almost always region variables! This is very tricky
-because the inference system implicitly assumes that we can do things
-like compute the LUB of two scoped lifetimes without needing to know
-the values of any variables.
-
-Here is an example to illustrate the problem:
-
-    fn identify<T>(x: T) -> T { x }
-
-    fn foo() { // 'foo is the function body
-      'a: {
-           let closure = identity(|| 'b: {
-               'c: ...
-           });
-           'd: closure();
-      }
-      'e: ...;
-    }
-
-In this example, the closure bound is not explicit. At compile time,
-we will create a region variable (let's call it `V0`) to represent the
-closure bound.
-
-The primary difficulty arises during the constraint propagation phase.
-Imagine there is some variable with incoming edges from `'c` and `'d`.
-This means that the value of the variable must be `LUB('c,
-'d)`. However, without knowing what the closure bound `V0` is, we
-can't compute the LUB of `'c` and `'d`! Any we don't know the closure
-bound until inference is done.
-
-The solution is to rely on the fixed point nature of inference.
-Basically, when we must compute `LUB('c, 'd)`, we just use the current
-value for `V0` as the closure's bound. If `V0`'s binding should
-change, then we will do another round of inference, and the result of
-`LUB('c, 'd)` will change.
-
-One minor implication of this is that the graph does not in fact track
-the full set of dependencies between edges. We cannot easily know
-whether the result of a LUB computation will change, since there may
-be indirect dependencies on other variables that are not reflected on
-the graph. Therefore, we must *always* iterate over all edges when
-doing the fixed point calculation, not just those adjacent to nodes
-whose values have changed.
-
-Were it not for this requirement, we could in fact avoid fixed-point
-iteration altogether. In that universe, we could instead first
-identify and remove strongly connected components (SCC) in the graph.
-Note that such components must consist solely of region variables; all
-of these variables can effectively be unified into a single variable.
-Once SCCs are removed, we are left with a DAG.  At this point, we
-could walk the DAG in topological order once to compute the expanding
-nodes, and again in reverse topological order to compute the
-contracting nodes. However, as I said, this does not work given the
-current treatment of closure bounds, but perhaps in the future we can
-address this problem somehow and make region inference somewhat more
-efficient. Note that this is solely a matter of performance, not
-expressiveness.
-
-### Skolemization
-
-For a discussion on skolemization and higher-ranked subtyping, please
-see the module `middle::typeck::infer::higher_ranked::doc`.
-
-*/
+//! Region inference module.
+//!
+//! # Terminology
+//!
+//! Note that we use the terms region and lifetime interchangeably,
+//! though the term `lifetime` is preferred.
+//!
+//! # Introduction
+//!
+//! Region inference uses a somewhat more involved algorithm than type
+//! inference.  It is not the most efficient thing ever written though it
+//! seems to work well enough in practice (famous last words).  The reason
+//! that we use a different algorithm is because, unlike with types, it is
+//! impractical to hand-annotate with regions (in some cases, there aren't
+//! even the requisite syntactic forms).  So we have to get it right, and
+//! it's worth spending more time on a more involved analysis.  Moreover,
+//! regions are a simpler case than types: they don't have aggregate
+//! structure, for example.
+//!
+//! Unlike normal type inference, which is similar in spirit to H-M and thus
+//! works progressively, the region type inference works by accumulating
+//! constraints over the course of a function.  Finally, at the end of
+//! processing a function, we process and solve the constraints all at
+//! once.
+//!
+//! The constraints are always of one of three possible forms:
+//!
+//! - ConstrainVarSubVar(R_i, R_j) states that region variable R_i
+//!   must be a subregion of R_j
+//! - ConstrainRegSubVar(R, R_i) states that the concrete region R
+//!   (which must not be a variable) must be a subregion of the varibale R_i
+//! - ConstrainVarSubReg(R_i, R) is the inverse
+//!
+//! # Building up the constraints
+//!
+//! Variables and constraints are created using the following methods:
+//!
+//! - `new_region_var()` creates a new, unconstrained region variable;
+//! - `make_subregion(R_i, R_j)` states that R_i is a subregion of R_j
+//! - `lub_regions(R_i, R_j) -> R_k` returns a region R_k which is
+//!   the smallest region that is greater than both R_i and R_j
+//! - `glb_regions(R_i, R_j) -> R_k` returns a region R_k which is
+//!   the greatest region that is smaller than both R_i and R_j
+//!
+//! The actual region resolution algorithm is not entirely
+//! obvious, though it is also not overly complex.
+//!
+//! ## Snapshotting
+//!
+//! It is also permitted to try (and rollback) changes to the graph.  This
+//! is done by invoking `start_snapshot()`, which returns a value.  Then
+//! later you can call `rollback_to()` which undoes the work.
+//! Alternatively, you can call `commit()` which ends all snapshots.
+//! Snapshots can be recursive---so you can start a snapshot when another
+//! is in progress, but only the root snapshot can "commit".
+//!
+//! # Resolving constraints
+//!
+//! The constraint resolution algorithm is not super complex but also not
+//! entirely obvious.  Here I describe the problem somewhat abstractly,
+//! then describe how the current code works.  There may be other, smarter
+//! ways of doing this with which I am unfamiliar and can't be bothered to
+//! research at the moment. - NDM
+//!
+//! ## The problem
+//!
+//! Basically our input is a directed graph where nodes can be divided
+//! into two categories: region variables and concrete regions.  Each edge
+//! `R -> S` in the graph represents a constraint that the region `R` is a
+//! subregion of the region `S`.
+//!
+//! Region variable nodes can have arbitrary degree.  There is one region
+//! variable node per region variable.
+//!
+//! Each concrete region node is associated with some, well, concrete
+//! region: e.g., a free lifetime, or the region for a particular scope.
+//! Note that there may be more than one concrete region node for a
+//! particular region value.  Moreover, because of how the graph is built,
+//! we know that all concrete region nodes have either in-degree 1 or
+//! out-degree 1.
+//!
+//! Before resolution begins, we build up the constraints in a hashmap
+//! that maps `Constraint` keys to spans.  During resolution, we construct
+//! the actual `Graph` structure that we describe here.
+//!
+//! ## Our current algorithm
+//!
+//! We divide region variables into two groups: Expanding and Contracting.
+//! Expanding region variables are those that have a concrete region
+//! predecessor (direct or indirect).  Contracting region variables are
+//! all others.
+//!
+//! We first resolve the values of Expanding region variables and then
+//! process Contracting ones.  We currently use an iterative, fixed-point
+//! procedure (but read on, I believe this could be replaced with a linear
+//! walk).  Basically we iterate over the edges in the graph, ensuring
+//! that, if the source of the edge has a value, then this value is a
+//! subregion of the target value.  If the target does not yet have a
+//! value, it takes the value from the source.  If the target already had
+//! a value, then the resulting value is Least Upper Bound of the old and
+//! new values. When we are done, each Expanding node will have the
+//! smallest region that it could possibly have and still satisfy the
+//! constraints.
+//!
+//! We next process the Contracting nodes.  Here we again iterate over the
+//! edges, only this time we move values from target to source (if the
+//! source is a Contracting node).  For each contracting node, we compute
+//! its value as the GLB of all its successors.  Basically contracting
+//! nodes ensure that there is overlap between their successors; we will
+//! ultimately infer the largest overlap possible.
+//!
+//! # The Region Hierarchy
+//!
+//! ## Without closures
+//!
+//! Let's first consider the region hierarchy without thinking about
+//! closures, because they add a lot of complications. The region
+//! hierarchy *basically* mirrors the lexical structure of the code.
+//! There is a region for every piece of 'evaluation' that occurs, meaning
+//! every expression, block, and pattern (patterns are considered to
+//! "execute" by testing the value they are applied to and creating any
+//! relevant bindings).  So, for example:
+//!
+//!     fn foo(x: int, y: int) { // -+
+//!     //  +------------+       //  |
+//!     //  |      +-----+       //  |
+//!     //  |  +-+ +-+ +-+       //  |
+//!     //  |  | | | | | |       //  |
+//!     //  v  v v v v v v       //  |
+//!         let z = x + y;       //  |
+//!         ...                  //  |
+//!     }                        // -+
+//!
+//!     fn bar() { ... }
+//!
+//! In this example, there is a region for the fn body block as a whole,
+//! and then a subregion for the declaration of the local variable.
+//! Within that, there are sublifetimes for the assignment pattern and
+//! also the expression `x + y`. The expression itself has sublifetimes
+//! for evaluating `x` and `y`.
+//!
+//! ## Function calls
+//!
+//! Function calls are a bit tricky. I will describe how we handle them
+//! *now* and then a bit about how we can improve them (Issue #6268).
+//!
+//! Consider a function call like `func(expr1, expr2)`, where `func`,
+//! `arg1`, and `arg2` are all arbitrary expressions. Currently,
+//! we construct a region hierarchy like:
+//!
+//!     +----------------+
+//!     |                |
+//!     +--+ +---+  +---+|
+//!     v  v v   v  v   vv
+//!     func(expr1, expr2)
+//!
+//! Here you can see that the call as a whole has a region and the
+//! function plus arguments are subregions of that. As a side-effect of
+//! this, we get a lot of spurious errors around nested calls, in
+//! particular when combined with `&mut` functions. For example, a call
+//! like this one
+//!
+//!     self.foo(self.bar())
+//!
+//! where both `foo` and `bar` are `&mut self` functions will always yield
+//! an error.
+//!
+//! Here is a more involved example (which is safe) so we can see what's
+//! going on:
+//!
+//!     struct Foo { f: uint, g: uint }
+//!     ...
+//!     fn add(p: &mut uint, v: uint) {
+//!         *p += v;
+//!     }
+//!     ...
+//!     fn inc(p: &mut uint) -> uint {
+//!         *p += 1; *p
+//!     }
+//!     fn weird() {
+//!         let mut x: Box<Foo> = box Foo { ... };
+//!         'a: add(&mut (*x).f,
+//!                 'b: inc(&mut (*x).f)) // (..)
+//!     }
+//!
+//! The important part is the line marked `(..)` which contains a call to
+//! `add()`. The first argument is a mutable borrow of the field `f`.  The
+//! second argument also borrows the field `f`. Now, in the current borrow
+//! checker, the first borrow is given the lifetime of the call to
+//! `add()`, `'a`.  The second borrow is given the lifetime of `'b` of the
+//! call to `inc()`. Because `'b` is considered to be a sublifetime of
+//! `'a`, an error is reported since there are two co-existing mutable
+//! borrows of the same data.
+//!
+//! However, if we were to examine the lifetimes a bit more carefully, we
+//! can see that this error is unnecessary. Let's examine the lifetimes
+//! involved with `'a` in detail. We'll break apart all the steps involved
+//! in a call expression:
+//!
+//!     'a: {
+//!         'a_arg1: let a_temp1: ... = add;
+//!         'a_arg2: let a_temp2: &'a mut uint = &'a mut (*x).f;
+//!         'a_arg3: let a_temp3: uint = {
+//!             let b_temp1: ... = inc;
+//!             let b_temp2: &'b = &'b mut (*x).f;
+//!             'b_call: b_temp1(b_temp2)
+//!         };
+//!         'a_call: a_temp1(a_temp2, a_temp3) // (**)
+//!     }
+//!
+//! Here we see that the lifetime `'a` includes a number of substatements.
+//! In particular, there is this lifetime I've called `'a_call` that
+//! corresponds to the *actual execution of the function `add()`*, after
+//! all arguments have been evaluated. There is a corresponding lifetime
+//! `'b_call` for the execution of `inc()`. If we wanted to be precise
+//! about it, the lifetime of the two borrows should be `'a_call` and
+//! `'b_call` respectively, since the references that were created
+//! will not be dereferenced except during the execution itself.
+//!
+//! However, this model by itself is not sound. The reason is that
+//! while the two references that are created will never be used
+//! simultaneously, it is still true that the first reference is
+//! *created* before the second argument is evaluated, and so even though
+//! it will not be *dereferenced* during the evaluation of the second
+//! argument, it can still be *invalidated* by that evaluation. Consider
+//! this similar but unsound example:
+//!
+//!     struct Foo { f: uint, g: uint }
+//!     ...
+//!     fn add(p: &mut uint, v: uint) {
+//!         *p += v;
+//!     }
+//!     ...
+//!     fn consume(x: Box<Foo>) -> uint {
+//!         x.f + x.g
+//!     }
+//!     fn weird() {
+//!         let mut x: Box<Foo> = box Foo { ... };
+//!         'a: add(&mut (*x).f, consume(x)) // (..)
+//!     }
+//!
+//! In this case, the second argument to `add` actually consumes `x`, thus
+//! invalidating the first argument.
+//!
+//! So, for now, we exclude the `call` lifetimes from our model.
+//! Eventually I would like to include them, but we will have to make the
+//! borrow checker handle this situation correctly. In particular, if
+//! there is a reference created whose lifetime does not enclose
+//! the borrow expression, we must issue sufficient restrictions to ensure
+//! that the pointee remains valid.
+//!
+//! ## Adding closures
+//!
+//! The other significant complication to the region hierarchy is
+//! closures. I will describe here how closures should work, though some
+//! of the work to implement this model is ongoing at the time of this
+//! writing.
+//!
+//! The body of closures are type-checked along with the function that
+//! creates them. However, unlike other expressions that appear within the
+//! function body, it is not entirely obvious when a closure body executes
+//! with respect to the other expressions. This is because the closure
+//! body will execute whenever the closure is called; however, we can
+//! never know precisely when the closure will be called, especially
+//! without some sort of alias analysis.
+//!
+//! However, we can place some sort of limits on when the closure
+//! executes.  In particular, the type of every closure `fn:'r K` includes
+//! a region bound `'r`. This bound indicates the maximum lifetime of that
+//! closure; once we exit that region, the closure cannot be called
+//! anymore. Therefore, we say that the lifetime of the closure body is a
+//! sublifetime of the closure bound, but the closure body itself is unordered
+//! with respect to other parts of the code.
+//!
+//! For example, consider the following fragment of code:
+//!
+//!     'a: {
+//!          let closure: fn:'a() = || 'b: {
+//!              'c: ...
+//!          };
+//!          'd: ...
+//!     }
+//!
+//! Here we have four lifetimes, `'a`, `'b`, `'c`, and `'d`. The closure
+//! `closure` is bounded by the lifetime `'a`. The lifetime `'b` is the
+//! lifetime of the closure body, and `'c` is some statement within the
+//! closure body. Finally, `'d` is a statement within the outer block that
+//! created the closure.
+//!
+//! We can say that the closure body `'b` is a sublifetime of `'a` due to
+//! the closure bound. By the usual lexical scoping conventions, the
+//! statement `'c` is clearly a sublifetime of `'b`, and `'d` is a
+//! sublifetime of `'d`. However, there is no ordering between `'c` and
+//! `'d` per se (this kind of ordering between statements is actually only
+//! an issue for dataflow; passes like the borrow checker must assume that
+//! closures could execute at any time from the moment they are created
+//! until they go out of scope).
+//!
+//! ### Complications due to closure bound inference
+//!
+//! There is only one problem with the above model: in general, we do not
+//! actually *know* the closure bounds during region inference! In fact,
+//! closure bounds are almost always region variables! This is very tricky
+//! because the inference system implicitly assumes that we can do things
+//! like compute the LUB of two scoped lifetimes without needing to know
+//! the values of any variables.
+//!
+//! Here is an example to illustrate the problem:
+//!
+//!     fn identify<T>(x: T) -> T { x }
+//!
+//!     fn foo() { // 'foo is the function body
+//!       'a: {
+//!            let closure = identity(|| 'b: {
+//!                'c: ...
+//!            });
+//!            'd: closure();
+//!       }
+//!       'e: ...;
+//!     }
+//!
+//! In this example, the closure bound is not explicit. At compile time,
+//! we will create a region variable (let's call it `V0`) to represent the
+//! closure bound.
+//!
+//! The primary difficulty arises during the constraint propagation phase.
+//! Imagine there is some variable with incoming edges from `'c` and `'d`.
+//! This means that the value of the variable must be `LUB('c,
+//! 'd)`. However, without knowing what the closure bound `V0` is, we
+//! can't compute the LUB of `'c` and `'d`! Any we don't know the closure
+//! bound until inference is done.
+//!
+//! The solution is to rely on the fixed point nature of inference.
+//! Basically, when we must compute `LUB('c, 'd)`, we just use the current
+//! value for `V0` as the closure's bound. If `V0`'s binding should
+//! change, then we will do another round of inference, and the result of
+//! `LUB('c, 'd)` will change.
+//!
+//! One minor implication of this is that the graph does not in fact track
+//! the full set of dependencies between edges. We cannot easily know
+//! whether the result of a LUB computation will change, since there may
+//! be indirect dependencies on other variables that are not reflected on
+//! the graph. Therefore, we must *always* iterate over all edges when
+//! doing the fixed point calculation, not just those adjacent to nodes
+//! whose values have changed.
+//!
+//! Were it not for this requirement, we could in fact avoid fixed-point
+//! iteration altogether. In that universe, we could instead first
+//! identify and remove strongly connected components (SCC) in the graph.
+//! Note that such components must consist solely of region variables; all
+//! of these variables can effectively be unified into a single variable.
+//! Once SCCs are removed, we are left with a DAG.  At this point, we
+//! could walk the DAG in topological order once to compute the expanding
+//! nodes, and again in reverse topological order to compute the
+//! contracting nodes. However, as I said, this does not work given the
+//! current treatment of closure bounds, but perhaps in the future we can
+//! address this problem somehow and make region inference somewhat more
+//! efficient. Note that this is solely a matter of performance, not
+//! expressiveness.
+//!
+//! ### Skolemization
+//!
+//! For a discussion on skolemization and higher-ranked subtyping, please
+//! see the module `middle::typeck::infer::higher_ranked::doc`.
diff --git a/src/librustc/middle/typeck/infer/region_inference/mod.rs b/src/librustc/middle/typeck/infer/region_inference/mod.rs
index 6a447d467cfce..01533cba7ab6d 100644
--- a/src/librustc/middle/typeck/infer/region_inference/mod.rs
+++ b/src/librustc/middle/typeck/infer/region_inference/mod.rs
@@ -8,7 +8,7 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-/*! See doc.rs */
+//! See doc.rs
 
 pub use self::Constraint::*;
 pub use self::Verify::*;
@@ -597,15 +597,10 @@ impl<'a, 'tcx> RegionVarBindings<'a, 'tcx> {
             .collect()
     }
 
+    /// Computes all regions that have been related to `r0` in any way since the mark `mark` was
+    /// made---`r0` itself will be the first entry. This is used when checking whether skolemized
+    /// regions are being improperly related to other regions.
     pub fn tainted(&self, mark: RegionMark, r0: Region) -> Vec<Region> {
-        /*!
-         * Computes all regions that have been related to `r0` in any
-         * way since the mark `mark` was made---`r0` itself will be
-         * the first entry. This is used when checking whether
-         * skolemized regions are being improperly related to other
-         * regions.
-         */
-
         debug!("tainted(mark={}, r0={})", mark, r0.repr(self.tcx));
         let _indenter = indenter();
 
@@ -783,16 +778,12 @@ impl<'a, 'tcx> RegionVarBindings<'a, 'tcx> {
         }
     }
 
+    /// Computes a region that encloses both free region arguments. Guarantee that if the same two
+    /// regions are given as argument, in any order, a consistent result is returned.
     fn lub_free_regions(&self,
                         a: &FreeRegion,
                         b: &FreeRegion) -> ty::Region
     {
-        /*!
-         * Computes a region that encloses both free region arguments.
-         * Guarantee that if the same two regions are given as argument,
-         * in any order, a consistent result is returned.
-         */
-
         return match a.cmp(b) {
             Less => helper(self, a, b),
             Greater => helper(self, b, a),
@@ -884,16 +875,13 @@ impl<'a, 'tcx> RegionVarBindings<'a, 'tcx> {
         }
     }
 
+    /// Computes a region that is enclosed by both free region arguments, if any. Guarantees that
+    /// if the same two regions are given as argument, in any order, a consistent result is
+    /// returned.
     fn glb_free_regions(&self,
                         a: &FreeRegion,
                         b: &FreeRegion) -> cres<'tcx, ty::Region>
     {
-        /*!
-         * Computes a region that is enclosed by both free region arguments,
-         * if any. Guarantees that if the same two regions are given as argument,
-         * in any order, a consistent result is returned.
-         */
-
         return match a.cmp(b) {
             Less => helper(self, a, b),
             Greater => helper(self, b, a),
diff --git a/src/librustc/middle/typeck/infer/skolemize.rs b/src/librustc/middle/typeck/infer/skolemize.rs
index 5907a2bb9b61d..62bf1d0126a59 100644
--- a/src/librustc/middle/typeck/infer/skolemize.rs
+++ b/src/librustc/middle/typeck/infer/skolemize.rs
@@ -8,37 +8,27 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-/*!
- * Skolemization is the process of replacing unknown variables with
- * fresh types. The idea is that the type, after skolemization,
- * contains no inference variables but instead contains either a value
- * for each variable or fresh "arbitrary" types wherever a variable
- * would have been.
- *
- * Skolemization is used primarily to get a good type for inserting
- * into a cache. The result summarizes what the type inferencer knows
- * "so far". The primary place it is used right now is in the trait
- * matching algorithm, which needs to be able to cache whether an
- * `impl` self type matches some other type X -- *without* affecting
- * `X`. That means if that if the type `X` is in fact an unbound type
- * variable, we want the match to be regarded as ambiguous, because
- * depending on what type that type variable is ultimately assigned,
- * the match may or may not succeed.
- *
- * Note that you should be careful not to allow the output of
- * skolemization to leak to the user in error messages or in any other
- * form. Skolemization is only really useful as an internal detail.
- *
- * __An important detail concerning regions.__ The skolemizer also
- * replaces *all* regions with 'static. The reason behind this is
- * that, in general, we do not take region relationships into account
- * when making type-overloaded decisions. This is important because of
- * the design of the region inferencer, which is not based on
- * unification but rather on accumulating and then solving a set of
- * constraints. In contrast, the type inferencer assigns a value to
- * each type variable only once, and it does so as soon as it can, so
- * it is reasonable to ask what the type inferencer knows "so far".
- */
+//! Skolemization is the process of replacing unknown variables with fresh types. The idea is that
+//! the type, after skolemization, contains no inference variables but instead contains either a
+//! value for each variable or fresh "arbitrary" types wherever a variable would have been.
+//!
+//! Skolemization is used primarily to get a good type for inserting into a cache. The result
+//! summarizes what the type inferencer knows "so far". The primary place it is used right now is
+//! in the trait matching algorithm, which needs to be able to cache whether an `impl` self type
+//! matches some other type X -- *without* affecting `X`. That means if that if the type `X` is in
+//! fact an unbound type variable, we want the match to be regarded as ambiguous, because depending
+//! on what type that type variable is ultimately assigned, the match may or may not succeed.
+//!
+//! Note that you should be careful not to allow the output of skolemization to leak to the user in
+//! error messages or in any other form. Skolemization is only really useful as an internal detail.
+//!
+//! __An important detail concerning regions.__ The skolemizer also replaces *all* regions with
+//! 'static. The reason behind this is that, in general, we do not take region relationships into
+//! account when making type-overloaded decisions. This is important because of the design of the
+//! region inferencer, which is not based on unification but rather on accumulating and then
+//! solving a set of constraints. In contrast, the type inferencer assigns a value to each type
+//! variable only once, and it does so as soon as it can, so it is reasonable to ask what the type
+//! inferencer knows "so far".
 
 use middle::ty::{mod, Ty};
 use middle::ty_fold;
diff --git a/src/librustc/middle/typeck/infer/type_variable.rs b/src/librustc/middle/typeck/infer/type_variable.rs
index f7f7389602f82..3058f09a83a85 100644
--- a/src/librustc/middle/typeck/infer/type_variable.rs
+++ b/src/librustc/middle/typeck/infer/type_variable.rs
@@ -72,12 +72,10 @@ impl<'tcx> TypeVariableTable<'tcx> {
         self.values.get(vid.index).diverging
     }
 
+    /// Records that `a <: b`, `a :> b`, or `a == b`, depending on `dir`.
+    ///
+    /// Precondition: neither `a` nor `b` are known.
     pub fn relate_vars(&mut self, a: ty::TyVid, dir: RelationDir, b: ty::TyVid) {
-        /*!
-         * Records that `a <: b`, `a :> b`, or `a == b`, depending on `dir`.
-         *
-         * Precondition: neither `a` nor `b` are known.
-         */
 
         if a != b {
             self.relations(a).push((dir, b));
@@ -86,19 +84,15 @@ impl<'tcx> TypeVariableTable<'tcx> {
         }
     }
 
+    /// Instantiates `vid` with the type `ty` and then pushes an entry onto `stack` for each of the
+    /// relations of `vid` to other variables. The relations will have the form `(ty, dir, vid1)`
+    /// where `vid1` is some other variable id.
     pub fn instantiate_and_push(
         &mut self,
         vid: ty::TyVid,
         ty: Ty<'tcx>,
         stack: &mut Vec<(Ty<'tcx>, RelationDir, ty::TyVid)>)
     {
-        /*!
-         * Instantiates `vid` with the type `ty` and then pushes an
-         * entry onto `stack` for each of the relations of `vid` to
-         * other variables. The relations will have the form `(ty,
-         * dir, vid1)` where `vid1` is some other variable id.
-         */
-
         let old_value = {
             let value_ptr = &mut self.values.get_mut(vid.index).value;
             mem::replace(value_ptr, Known(ty))
diff --git a/src/librustc/middle/typeck/infer/unify.rs b/src/librustc/middle/typeck/infer/unify.rs
index fcf042b3f8b80..38f55cc3f467b 100644
--- a/src/librustc/middle/typeck/infer/unify.rs
+++ b/src/librustc/middle/typeck/infer/unify.rs
@@ -157,13 +157,9 @@ impl<'tcx, V:PartialEq+Clone+Repr<'tcx>, K:UnifyKey<'tcx, V>> UnificationTable<K
         k
     }
 
+    /// Find the root node for `vid`. This uses the standard union-find algorithm with path
+    /// compression: http://en.wikipedia.org/wiki/Disjoint-set_data_structure
     pub fn get(&mut self, tcx: &ty::ctxt, vid: K) -> Node<K,V> {
-        /*!
-         * Find the root node for `vid`. This uses the standard
-         * union-find algorithm with path compression:
-         * http://en.wikipedia.org/wiki/Disjoint-set_data_structure
-         */
-
         let index = vid.index();
         let value = (*self.values.get(index)).clone();
         match value {
@@ -188,16 +184,13 @@ impl<'tcx, V:PartialEq+Clone+Repr<'tcx>, K:UnifyKey<'tcx, V>> UnificationTable<K
         }
     }
 
+    /// Sets the value for `vid` to `new_value`. `vid` MUST be a root node! Also, we must be in the
+    /// middle of a snapshot.
     pub fn set(&mut self,
                tcx: &ty::ctxt<'tcx>,
                key: K,
                new_value: VarValue<K,V>)
     {
-        /*!
-         * Sets the value for `vid` to `new_value`. `vid` MUST be a
-         * root node! Also, we must be in the middle of a snapshot.
-         */
-
         assert!(self.is_root(&key));
 
         debug!("Updating variable {} to {}",
@@ -207,19 +200,15 @@ impl<'tcx, V:PartialEq+Clone+Repr<'tcx>, K:UnifyKey<'tcx, V>> UnificationTable<K
         self.values.set(key.index(), new_value);
     }
 
+    /// Either redirects node_a to node_b or vice versa, depending on the relative rank. Returns
+    /// the new root and rank. You should then update the value of the new root to something
+    /// suitable.
     pub fn unify(&mut self,
                  tcx: &ty::ctxt<'tcx>,
                  node_a: &Node<K,V>,
                  node_b: &Node<K,V>)
                  -> (K, uint)
     {
-        /*!
-         * Either redirects node_a to node_b or vice versa, depending
-         * on the relative rank. Returns the new root and rank.  You
-         * should then update the value of the new root to something
-         * suitable.
-         */
-
         debug!("unify(node_a(id={}, rank={}), node_b(id={}, rank={}))",
                node_a.key.repr(tcx),
                node_a.rank,
@@ -295,19 +284,15 @@ pub trait InferCtxtMethodsForSimplyUnifiableTypes<'tcx, V:SimplyUnifiable<'tcx>,
 impl<'a,'tcx,V:SimplyUnifiable<'tcx>,K:UnifyKey<'tcx, Option<V>>>
     InferCtxtMethodsForSimplyUnifiableTypes<'tcx, V, K> for InferCtxt<'a, 'tcx>
 {
+    /// Unifies two simple keys. Because simple keys do not have any subtyping relationships, if
+    /// both keys have already been associated with a value, then those two values must be the
+    /// same.
     fn simple_vars(&self,
                    a_is_expected: bool,
                    a_id: K,
                    b_id: K)
                    -> ures<'tcx>
     {
-        /*!
-         * Unifies two simple keys.  Because simple keys do
-         * not have any subtyping relationships, if both keys
-         * have already been associated with a value, then those two
-         * values must be the same.
-         */
-
         let tcx = self.tcx;
         let table = UnifyKey::unification_table(self);
         let node_a = table.borrow_mut().get(tcx, a_id);
@@ -341,19 +326,14 @@ impl<'a,'tcx,V:SimplyUnifiable<'tcx>,K:UnifyKey<'tcx, Option<V>>>
         return Ok(())
     }
 
+    /// Sets the value of the key `a_id` to `b`. Because simple keys do not have any subtyping
+    /// relationships, if `a_id` already has a value, it must be the same as `b`.
     fn simple_var_t(&self,
                     a_is_expected: bool,
                     a_id: K,
                     b: V)
                     -> ures<'tcx>
     {
-        /*!
-         * Sets the value of the key `a_id` to `b`.  Because
-         * simple keys do not have any subtyping relationships,
-         * if `a_id` already has a value, it must be the same as
-         * `b`.
-         */
-
         let tcx = self.tcx;
         let table = UnifyKey::unification_table(self);
         let node_a = table.borrow_mut().get(tcx, a_id);
diff --git a/src/librustc/middle/typeck/variance.rs b/src/librustc/middle/typeck/variance.rs
index 51b610dccce38..fa001f0434ffd 100644
--- a/src/librustc/middle/typeck/variance.rs
+++ b/src/librustc/middle/typeck/variance.rs
@@ -8,189 +8,186 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-/*!
-
-This file infers the variance of type and lifetime parameters. The
-algorithm is taken from Section 4 of the paper "Taming the Wildcards:
-Combining Definition- and Use-Site Variance" published in PLDI'11 and
-written by Altidor et al., and hereafter referred to as The Paper.
-
-This inference is explicitly designed *not* to consider the uses of
-types within code. To determine the variance of type parameters
-defined on type `X`, we only consider the definition of the type `X`
-and the definitions of any types it references.
-
-We only infer variance for type parameters found on *types*: structs,
-enums, and traits. We do not infer variance for type parameters found
-on fns or impls. This is because those things are not type definitions
-and variance doesn't really make sense in that context.
-
-It is worth covering what variance means in each case. For structs and
-enums, I think it is fairly straightforward. The variance of the type
-or lifetime parameters defines whether `T<A>` is a subtype of `T<B>`
-(resp. `T<'a>` and `T<'b>`) based on the relationship of `A` and `B`
-(resp. `'a` and `'b`). (FIXME #3598 -- we do not currently make use of
-the variances we compute for type parameters.)
-
-### Variance on traits
-
-The meaning of variance for trait parameters is more subtle and worth
-expanding upon. There are in fact two uses of the variance values we
-compute.
-
-#### Trait variance and object types
-
-The first is for object types. Just as with structs and enums, we can
-decide the subtyping relationship between two object types `&Trait<A>`
-and `&Trait<B>` based on the relationship of `A` and `B`. Note that
-for object types we ignore the `Self` type parameter -- it is unknown,
-and the nature of dynamic dispatch ensures that we will always call a
-function that is expected the appropriate `Self` type. However, we
-must be careful with the other type parameters, or else we could end
-up calling a function that is expecting one type but provided another.
-
-To see what I mean, consider a trait like so:
-
-    trait ConvertTo<A> {
-        fn convertTo(&self) -> A;
-    }
-
-Intuitively, If we had one object `O=&ConvertTo<Object>` and another
-`S=&ConvertTo<String>`, then `S <: O` because `String <: Object`
-(presuming Java-like "string" and "object" types, my go to examples
-for subtyping). The actual algorithm would be to compare the
-(explicit) type parameters pairwise respecting their variance: here,
-the type parameter A is covariant (it appears only in a return
-position), and hence we require that `String <: Object`.
-
-You'll note though that we did not consider the binding for the
-(implicit) `Self` type parameter: in fact, it is unknown, so that's
-good. The reason we can ignore that parameter is precisely because we
-don't need to know its value until a call occurs, and at that time (as
-you said) the dynamic nature of virtual dispatch means the code we run
-will be correct for whatever value `Self` happens to be bound to for
-the particular object whose method we called. `Self` is thus different
-from `A`, because the caller requires that `A` be known in order to
-know the return type of the method `convertTo()`. (As an aside, we
-have rules preventing methods where `Self` appears outside of the
-receiver position from being called via an object.)
-
-#### Trait variance and vtable resolution
-
-But traits aren't only used with objects. They're also used when
-deciding whether a given impl satisfies a given trait bound. To set the
-scene here, imagine I had a function:
-
-    fn convertAll<A,T:ConvertTo<A>>(v: &[T]) {
-        ...
-    }
-
-Now imagine that I have an implementation of `ConvertTo` for `Object`:
-
-    impl ConvertTo<int> for Object { ... }
-
-And I want to call `convertAll` on an array of strings. Suppose
-further that for whatever reason I specifically supply the value of
-`String` for the type parameter `T`:
-
-    let mut vector = ~["string", ...];
-    convertAll::<int, String>(v);
-
-Is this legal? To put another way, can we apply the `impl` for
-`Object` to the type `String`? The answer is yes, but to see why
-we have to expand out what will happen:
-
-- `convertAll` will create a pointer to one of the entries in the
-  vector, which will have type `&String`
-- It will then call the impl of `convertTo()` that is intended
-  for use with objects. This has the type:
-
-      fn(self: &Object) -> int
-
-  It is ok to provide a value for `self` of type `&String` because
-  `&String <: &Object`.
-
-OK, so intuitively we want this to be legal, so let's bring this back
-to variance and see whether we are computing the correct result. We
-must first figure out how to phrase the question "is an impl for
-`Object,int` usable where an impl for `String,int` is expected?"
-
-Maybe it's helpful to think of a dictionary-passing implementation of
-type classes. In that case, `convertAll()` takes an implicit parameter
-representing the impl. In short, we *have* an impl of type:
-
-    V_O = ConvertTo<int> for Object
-
-and the function prototype expects an impl of type:
-
-    V_S = ConvertTo<int> for String
-
-As with any argument, this is legal if the type of the value given
-(`V_O`) is a subtype of the type expected (`V_S`). So is `V_O <: V_S`?
-The answer will depend on the variance of the various parameters. In
-this case, because the `Self` parameter is contravariant and `A` is
-covariant, it means that:
+//! This file infers the variance of type and lifetime parameters. The
+//! algorithm is taken from Section 4 of the paper "Taming the Wildcards:
+//! Combining Definition- and Use-Site Variance" published in PLDI'11 and
+//! written by Altidor et al., and hereafter referred to as The Paper.
+//!
+//! This inference is explicitly designed *not* to consider the uses of
+//! types within code. To determine the variance of type parameters
+//! defined on type `X`, we only consider the definition of the type `X`
+//! and the definitions of any types it references.
+//!
+//! We only infer variance for type parameters found on *types*: structs,
+//! enums, and traits. We do not infer variance for type parameters found
+//! on fns or impls. This is because those things are not type definitions
+//! and variance doesn't really make sense in that context.
+//!
+//! It is worth covering what variance means in each case. For structs and
+//! enums, I think it is fairly straightforward. The variance of the type
+//! or lifetime parameters defines whether `T<A>` is a subtype of `T<B>`
+//! (resp. `T<'a>` and `T<'b>`) based on the relationship of `A` and `B`
+//! (resp. `'a` and `'b`). (FIXME #3598 -- we do not currently make use of
+//! the variances we compute for type parameters.)
+//!
+//! ### Variance on traits
+//!
+//! The meaning of variance for trait parameters is more subtle and worth
+//! expanding upon. There are in fact two uses of the variance values we
+//! compute.
+//!
+//! #### Trait variance and object types
+//!
+//! The first is for object types. Just as with structs and enums, we can
+//! decide the subtyping relationship between two object types `&Trait<A>`
+//! and `&Trait<B>` based on the relationship of `A` and `B`. Note that
+//! for object types we ignore the `Self` type parameter -- it is unknown,
+//! and the nature of dynamic dispatch ensures that we will always call a
+//! function that is expected the appropriate `Self` type. However, we
+//! must be careful with the other type parameters, or else we could end
+//! up calling a function that is expecting one type but provided another.
+//!
+//! To see what I mean, consider a trait like so:
+//!
+//!     trait ConvertTo<A> {
+//!         fn convertTo(&self) -> A;
+//!     }
+//!
+//! Intuitively, If we had one object `O=&ConvertTo<Object>` and another
+//! `S=&ConvertTo<String>`, then `S <: O` because `String <: Object`
+//! (presuming Java-like "string" and "object" types, my go to examples
+//! for subtyping). The actual algorithm would be to compare the
+//! (explicit) type parameters pairwise respecting their variance: here,
+//! the type parameter A is covariant (it appears only in a return
+//! position), and hence we require that `String <: Object`.
+//!
+//! You'll note though that we did not consider the binding for the
+//! (implicit) `Self` type parameter: in fact, it is unknown, so that's
+//! good. The reason we can ignore that parameter is precisely because we
+//! don't need to know its value until a call occurs, and at that time (as
+//! you said) the dynamic nature of virtual dispatch means the code we run
+//! will be correct for whatever value `Self` happens to be bound to for
+//! the particular object whose method we called. `Self` is thus different
+//! from `A`, because the caller requires that `A` be known in order to
+//! know the return type of the method `convertTo()`. (As an aside, we
+//! have rules preventing methods where `Self` appears outside of the
+//! receiver position from being called via an object.)
+//!
+//! #### Trait variance and vtable resolution
+//!
+//! But traits aren't only used with objects. They're also used when
+//! deciding whether a given impl satisfies a given trait bound. To set the
+//! scene here, imagine I had a function:
+//!
+//!     fn convertAll<A,T:ConvertTo<A>>(v: &[T]) {
+//!         ...
+//!     }
+//!
+//! Now imagine that I have an implementation of `ConvertTo` for `Object`:
+//!
+//!     impl ConvertTo<int> for Object { ... }
+//!
+//! And I want to call `convertAll` on an array of strings. Suppose
+//! further that for whatever reason I specifically supply the value of
+//! `String` for the type parameter `T`:
+//!
+//!     let mut vector = ~["string", ...];
+//!     convertAll::<int, String>(v);
+//!
+//! Is this legal? To put another way, can we apply the `impl` for
+//! `Object` to the type `String`? The answer is yes, but to see why
+//! we have to expand out what will happen:
+//!
+//! - `convertAll` will create a pointer to one of the entries in the
+//!   vector, which will have type `&String`
+//! - It will then call the impl of `convertTo()` that is intended
+//!   for use with objects. This has the type:
+//!
+//!       fn(self: &Object) -> int
+//!
+//!   It is ok to provide a value for `self` of type `&String` because
+//!   `&String <: &Object`.
+//!
+//! OK, so intuitively we want this to be legal, so let's bring this back
+//! to variance and see whether we are computing the correct result. We
+//! must first figure out how to phrase the question "is an impl for
+//! `Object,int` usable where an impl for `String,int` is expected?"
+//!
+//! Maybe it's helpful to think of a dictionary-passing implementation of
+//! type classes. In that case, `convertAll()` takes an implicit parameter
+//! representing the impl. In short, we *have* an impl of type:
+//!
+//!     V_O = ConvertTo<int> for Object
+//!
+//! and the function prototype expects an impl of type:
+//!
+//!     V_S = ConvertTo<int> for String
+//!
+//! As with any argument, this is legal if the type of the value given
+//! (`V_O`) is a subtype of the type expected (`V_S`). So is `V_O <: V_S`?
+//! The answer will depend on the variance of the various parameters. In
+//! this case, because the `Self` parameter is contravariant and `A` is
+//! covariant, it means that:
+//!
+//!     V_O <: V_S iff
+//!         int <: int
+//!         String <: Object
+//!
+//! These conditions are satisfied and so we are happy.
+//!
+//! ### The algorithm
+//!
+//! The basic idea is quite straightforward. We iterate over the types
+//! defined and, for each use of a type parameter X, accumulate a
+//! constraint indicating that the variance of X must be valid for the
+//! variance of that use site. We then iteratively refine the variance of
+//! X until all constraints are met. There is *always* a sol'n, because at
+//! the limit we can declare all type parameters to be invariant and all
+//! constraints will be satisfied.
+//!
+//! As a simple example, consider:
+//!
+//!     enum Option<A> { Some(A), None }
+//!     enum OptionalFn<B> { Some(|B|), None }
+//!     enum OptionalMap<C> { Some(|C| -> C), None }
+//!
+//! Here, we will generate the constraints:
+//!
+//!     1. V(A) <= +
+//!     2. V(B) <= -
+//!     3. V(C) <= +
+//!     4. V(C) <= -
+//!
+//! These indicate that (1) the variance of A must be at most covariant;
+//! (2) the variance of B must be at most contravariant; and (3, 4) the
+//! variance of C must be at most covariant *and* contravariant. All of these
+//! results are based on a variance lattice defined as follows:
+//!
+//!       *      Top (bivariant)
+//!    -     +
+//!       o      Bottom (invariant)
+//!
+//! Based on this lattice, the solution V(A)=+, V(B)=-, V(C)=o is the
+//! optimal solution. Note that there is always a naive solution which
+//! just declares all variables to be invariant.
+//!
+//! You may be wondering why fixed-point iteration is required. The reason
+//! is that the variance of a use site may itself be a function of the
+//! variance of other type parameters. In full generality, our constraints
+//! take the form:
+//!
+//!     V(X) <= Term
+//!     Term := + | - | * | o | V(X) | Term x Term
+//!
+//! Here the notation V(X) indicates the variance of a type/region
+//! parameter `X` with respect to its defining class. `Term x Term`
+//! represents the "variance transform" as defined in the paper:
+//!
+//!   If the variance of a type variable `X` in type expression `E` is `V2`
+//!   and the definition-site variance of the [corresponding] type parameter
+//!   of a class `C` is `V1`, then the variance of `X` in the type expression
+//!   `C<E>` is `V3 = V1.xform(V2)`.
 
-    V_O <: V_S iff
-        int <: int
-        String <: Object
-
-These conditions are satisfied and so we are happy.
-
-### The algorithm
-
-The basic idea is quite straightforward. We iterate over the types
-defined and, for each use of a type parameter X, accumulate a
-constraint indicating that the variance of X must be valid for the
-variance of that use site. We then iteratively refine the variance of
-X until all constraints are met. There is *always* a sol'n, because at
-the limit we can declare all type parameters to be invariant and all
-constraints will be satisfied.
-
-As a simple example, consider:
-
-    enum Option<A> { Some(A), None }
-    enum OptionalFn<B> { Some(|B|), None }
-    enum OptionalMap<C> { Some(|C| -> C), None }
-
-Here, we will generate the constraints:
-
-    1. V(A) <= +
-    2. V(B) <= -
-    3. V(C) <= +
-    4. V(C) <= -
-
-These indicate that (1) the variance of A must be at most covariant;
-(2) the variance of B must be at most contravariant; and (3, 4) the
-variance of C must be at most covariant *and* contravariant. All of these
-results are based on a variance lattice defined as follows:
-
-      *      Top (bivariant)
-   -     +
-      o      Bottom (invariant)
-
-Based on this lattice, the solution V(A)=+, V(B)=-, V(C)=o is the
-optimal solution. Note that there is always a naive solution which
-just declares all variables to be invariant.
-
-You may be wondering why fixed-point iteration is required. The reason
-is that the variance of a use site may itself be a function of the
-variance of other type parameters. In full generality, our constraints
-take the form:
-
-    V(X) <= Term
-    Term := + | - | * | o | V(X) | Term x Term
-
-Here the notation V(X) indicates the variance of a type/region
-parameter `X` with respect to its defining class. `Term x Term`
-represents the "variance transform" as defined in the paper:
-
-  If the variance of a type variable `X` in type expression `E` is `V2`
-  and the definition-site variance of the [corresponding] type parameter
-  of a class `C` is `V1`, then the variance of `X` in the type expression
-  `C<E>` is `V3 = V1.xform(V2)`.
-
-*/
 use self::VarianceTerm::*;
 use self::ParamKind::*;
 
@@ -632,6 +629,8 @@ impl<'a, 'tcx> ConstraintContext<'a, 'tcx> {
         return result;
     }
 
+    /// Returns a variance term representing the declared variance of the type/region parameter
+    /// with the given id.
     fn declared_variance(&self,
                          param_def_id: ast::DefId,
                          item_def_id: ast::DefId,
@@ -639,11 +638,6 @@ impl<'a, 'tcx> ConstraintContext<'a, 'tcx> {
                          space: ParamSpace,
                          index: uint)
                          -> VarianceTermPtr<'a> {
-        /*!
-         * Returns a variance term representing the declared variance of
-         * the type/region parameter with the given id.
-         */
-
         assert_eq!(param_def_id.krate, item_def_id.krate);
 
         if self.invariant_lang_items[kind as uint] == Some(item_def_id) {
diff --git a/src/librustc/plugin/mod.rs b/src/librustc/plugin/mod.rs
index a03ee471be681..8dd60880cdd56 100644
--- a/src/librustc/plugin/mod.rs
+++ b/src/librustc/plugin/mod.rs
@@ -8,54 +8,52 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-/*!
- * Infrastructure for compiler plugins.
- *
- * Plugins are Rust libraries which extend the behavior of `rustc`
- * in various ways.
- *
- * Plugin authors will use the `Registry` type re-exported by
- * this module, along with its methods.  The rest of the module
- * is for use by `rustc` itself.
- *
- * To define a plugin, build a dylib crate with a
- * `#[plugin_registrar]` function:
- *
- * ```rust,ignore
- * #![crate_name = "myplugin"]
- * #![crate_type = "dylib"]
- * #![feature(plugin_registrar)]
- *
- * extern crate rustc;
- *
- * use rustc::plugin::Registry;
- *
- * #[plugin_registrar]
- * pub fn plugin_registrar(reg: &mut Registry) {
- *     reg.register_macro("mymacro", expand_mymacro);
- * }
- *
- * fn expand_mymacro(...) {  // details elided
- * ```
- *
- * WARNING: We currently don't check that the registrar function
- * has the appropriate type!
- *
- * To use a plugin while compiling another crate:
- *
- * ```rust
- * #![feature(phase)]
- *
- * #[phase(plugin)]
- * extern crate myplugin;
- * ```
- *
- * If you also need the plugin crate available at runtime, use
- * `phase(plugin, link)`.
- *
- * See [the compiler plugin guide](../../guide-plugin.html)
- * for more examples.
- */
+//! Infrastructure for compiler plugins.
+//!
+//! Plugins are Rust libraries which extend the behavior of `rustc`
+//! in various ways.
+//!
+//! Plugin authors will use the `Registry` type re-exported by
+//! this module, along with its methods.  The rest of the module
+//! is for use by `rustc` itself.
+//!
+//! To define a plugin, build a dylib crate with a
+//! `#[plugin_registrar]` function:
+//!
+//! ```rust,ignore
+//! #![crate_name = "myplugin"]
+//! #![crate_type = "dylib"]
+//! #![feature(plugin_registrar)]
+//!
+//! extern crate rustc;
+//!
+//! use rustc::plugin::Registry;
+//!
+//! #[plugin_registrar]
+//! pub fn plugin_registrar(reg: &mut Registry) {
+//!     reg.register_macro("mymacro", expand_mymacro);
+//! }
+//!
+//! fn expand_mymacro(...) {  // details elided
+//! ```
+//!
+//! WARNING: We currently don't check that the registrar function
+//! has the appropriate type!
+//!
+//! To use a plugin while compiling another crate:
+//!
+//! ```rust
+//! #![feature(phase)]
+//!
+//! #[phase(plugin)]
+//! extern crate myplugin;
+//! ```
+//!
+//! If you also need the plugin crate available at runtime, use
+//! `phase(plugin, link)`.
+//!
+//! See [the compiler plugin guide](../../guide-plugin.html)
+//! for more examples.
 
 pub use self::registry::Registry;
 
diff --git a/src/librustc/util/common.rs b/src/librustc/util/common.rs
index 7973004d51519..ea252d9fd205c 100644
--- a/src/librustc/util/common.rs
+++ b/src/librustc/util/common.rs
@@ -122,24 +122,20 @@ pub fn block_query(b: &ast::Block, p: |&ast::Expr| -> bool) -> bool {
     return v.flag;
 }
 
-// K: Eq + Hash<S>, V, S, H: Hasher<S>
+/// K: Eq + Hash<S>, V, S, H: Hasher<S>
+///
+/// Determines whether there exists a path from `source` to `destination`.  The graph is defined by
+/// the `edges_map`, which maps from a node `S` to a list of its adjacent nodes `T`.
+///
+/// Efficiency note: This is implemented in an inefficient way because it is typically invoked on
+/// very small graphs. If the graphs become larger, a more efficient graph representation and
+/// algorithm would probably be advised.
 pub fn can_reach<S,H:Hasher<S>,T:Eq+Clone+Hash<S>>(
     edges_map: &HashMap<T,Vec<T>,H>,
     source: T,
     destination: T)
     -> bool
 {
-    /*!
-     * Determines whether there exists a path from `source` to
-     * `destination`.  The graph is defined by the `edges_map`, which
-     * maps from a node `S` to a list of its adjacent nodes `T`.
-     *
-     * Efficiency note: This is implemented in an inefficient way
-     * because it is typically invoked on very small graphs. If the graphs
-     * become larger, a more efficient graph representation and algorithm
-     * would probably be advised.
-     */
-
     if source == destination {
         return true;
     }
diff --git a/src/librustc/util/ppaux.rs b/src/librustc/util/ppaux.rs
index 761a1f66501ca..b739a97f734be 100644
--- a/src/librustc/util/ppaux.rs
+++ b/src/librustc/util/ppaux.rs
@@ -65,12 +65,9 @@ pub fn note_and_explain_region(cx: &ctxt,
     }
 }
 
+/// When a free region is associated with `item`, how should we describe the item in the error
+/// message.
 fn item_scope_tag(item: &ast::Item) -> &'static str {
-    /*!
-     * When a free region is associated with `item`, how should we describe
-     * the item in the error message.
-     */
-
     match item.node {
         ast::ItemImpl(..) => "impl",
         ast::ItemStruct(..) => "struct",
diff --git a/src/librustc/util/snapshot_vec.rs b/src/librustc/util/snapshot_vec.rs
index 91e67bbacc30f..64e67a1f4bf75 100644
--- a/src/librustc/util/snapshot_vec.rs
+++ b/src/librustc/util/snapshot_vec.rs
@@ -8,21 +8,16 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-/*!
- * A utility class for implementing "snapshottable" things; a
- * snapshottable data structure permits you to take a snapshot (via
- * `start_snapshot`) and then, after making some changes, elect either
- * to rollback to the start of the snapshot or commit those changes.
- *
- * This vector is intended to be used as part of an abstraction, not
- * serve as a complete abstraction on its own. As such, while it will
- * roll back most changes on its own, it also supports a `get_mut`
- * operation that gives you an abitrary mutable pointer into the
- * vector. To ensure that any changes you make this with this pointer
- * are rolled back, you must invoke `record` to record any changes you
- * make and also supplying a delegate capable of reversing those
- * changes.
- */
+//! A utility class for implementing "snapshottable" things; a snapshottable data structure permits
+//! you to take a snapshot (via `start_snapshot`) and then, after making some changes, elect either
+//! to rollback to the start of the snapshot or commit those changes.
+//!
+//! This vector is intended to be used as part of an abstraction, not serve as a complete
+//! abstraction on its own. As such, while it will roll back most changes on its own, it also
+//! supports a `get_mut` operation that gives you an abitrary mutable pointer into the vector. To
+//! ensure that any changes you make this with this pointer are rolled back, you must invoke
+//! `record` to record any changes you make and also supplying a delegate capable of reversing
+//! those changes.
 use self::UndoLog::*;
 
 use std::kinds::marker;
@@ -98,23 +93,16 @@ impl<T,U,D:SnapshotVecDelegate<T,U>> SnapshotVec<T,U,D> {
         &self.values[index]
     }
 
+    /// Returns a mutable pointer into the vec; whatever changes you make here cannot be undone
+    /// automatically, so you should be sure call `record()` with some sort of suitable undo
+    /// action.
     pub fn get_mut<'a>(&'a mut self, index: uint) -> &'a mut T {
-        /*!
-         * Returns a mutable pointer into the vec; whatever changes
-         * you make here cannot be undone automatically, so you should
-         * be sure call `record()` with some sort of suitable undo
-         * action.
-         */
-
         &mut self.values[index]
     }
 
+    /// Updates the element at the given index. The old value will saved (and perhaps restored) if
+    /// a snapshot is active.
     pub fn set(&mut self, index: uint, new_elem: T) {
-        /*!
-         * Updates the element at the given index. The old value will
-         * saved (and perhaps restored) if a snapshot is active.
-         */
-
         let old_elem = mem::replace(&mut self.values[index], new_elem);
         if self.in_snapshot() {
             self.undo_log.push(SetElem(index, old_elem));
diff --git a/src/librustc_trans/lib.rs b/src/librustc_trans/lib.rs
index f89580b768ea5..4186f479fcce6 100644
--- a/src/librustc_trans/lib.rs
+++ b/src/librustc_trans/lib.rs
@@ -8,15 +8,11 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-/*!
-
-The Rust compiler.
-
-# Note
-
-This API is completely unstable and subject to change.
-
-*/
+//! The Rust compiler.
+//!
+//! # Note
+//!
+//! This API is completely unstable and subject to change.
 
 #![crate_name = "rustc_trans"]
 #![experimental]
diff --git a/src/librustc_trans/test.rs b/src/librustc_trans/test.rs
index 1e8c1fd14787d..41fbe85576933 100644
--- a/src/librustc_trans/test.rs
+++ b/src/librustc_trans/test.rs
@@ -8,11 +8,7 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-/*!
-
-# Standalone Tests for the Inference Module
-
-*/
+//! # Standalone Tests for the Inference Module
 
 use driver::diagnostic;
 use driver::diagnostic::Emitter;
@@ -537,12 +533,10 @@ fn glb_bound_static() {
     })
 }
 
+/// Test substituting a bound region into a function, which introduces another level of binding.
+/// This requires adjusting the Debruijn index.
 #[test]
 fn subst_ty_renumber_bound() {
-    /*!
-     * Test substituting a bound region into a function, which introduces another
-     * level of binding. This requires adjusting the Debruijn index.
-     */
 
     test_env(EMPTY_SOURCE_STR, errors(&[]), |env| {
         // Situation:
@@ -575,13 +569,10 @@ fn subst_ty_renumber_bound() {
     })
 }
 
+/// Test substituting a bound region into a function, which introduces another level of binding.
+/// This requires adjusting the Debruijn index.
 #[test]
 fn subst_ty_renumber_some_bounds() {
-    /*!
-     * Test substituting a bound region into a function, which introduces another
-     * level of binding. This requires adjusting the Debruijn index.
-     */
-
     test_env(EMPTY_SOURCE_STR, errors(&[]), |env| {
         // Situation:
         // Theta = [A -> &'a foo]
@@ -615,12 +606,9 @@ fn subst_ty_renumber_some_bounds() {
     })
 }
 
+/// Test that we correctly compute whether a type has escaping regions or not.
 #[test]
 fn escaping() {
-    /*!
-     * Test that we correctly compute whether a type has escaping
-     * regions or not.
-     */
 
     test_env(EMPTY_SOURCE_STR, errors(&[]), |env| {
         // Situation:
@@ -658,12 +646,10 @@ fn escaping() {
     })
 }
 
+/// Test applying a substitution where the value being substituted for an early-bound region is a
+/// late-bound region.
 #[test]
 fn subst_region_renumber_region() {
-    /*!
-     * Test applying a substitution where the value being substituted
-     * for an early-bound region is a late-bound region.
-     */
 
     test_env(EMPTY_SOURCE_STR, errors(&[]), |env| {
         let re_bound1 = env.re_late_bound_with_debruijn(1, ty::DebruijnIndex::new(1));
diff --git a/src/librustc_trans/trans/_match.rs b/src/librustc_trans/trans/_match.rs
index 381220d587cbc..d83eeadc7b96f 100644
--- a/src/librustc_trans/trans/_match.rs
+++ b/src/librustc_trans/trans/_match.rs
@@ -8,183 +8,179 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-/*!
- *
- * # Compilation of match statements
- *
- * I will endeavor to explain the code as best I can.  I have only a loose
- * understanding of some parts of it.
- *
- * ## Matching
- *
- * The basic state of the code is maintained in an array `m` of `Match`
- * objects.  Each `Match` describes some list of patterns, all of which must
- * match against the current list of values.  If those patterns match, then
- * the arm listed in the match is the correct arm.  A given arm may have
- * multiple corresponding match entries, one for each alternative that
- * remains.  As we proceed these sets of matches are adjusted by the various
- * `enter_XXX()` functions, each of which adjusts the set of options given
- * some information about the value which has been matched.
- *
- * So, initially, there is one value and N matches, each of which have one
- * constituent pattern.  N here is usually the number of arms but may be
- * greater, if some arms have multiple alternatives.  For example, here:
- *
- *     enum Foo { A, B(int), C(uint, uint) }
- *     match foo {
- *         A => ...,
- *         B(x) => ...,
- *         C(1u, 2) => ...,
- *         C(_) => ...
- *     }
- *
- * The value would be `foo`.  There would be four matches, each of which
- * contains one pattern (and, in one case, a guard).  We could collect the
- * various options and then compile the code for the case where `foo` is an
- * `A`, a `B`, and a `C`.  When we generate the code for `C`, we would (1)
- * drop the two matches that do not match a `C` and (2) expand the other two
- * into two patterns each.  In the first case, the two patterns would be `1u`
- * and `2`, and the in the second case the _ pattern would be expanded into
- * `_` and `_`.  The two values are of course the arguments to `C`.
- *
- * Here is a quick guide to the various functions:
- *
- * - `compile_submatch()`: The main workhouse.  It takes a list of values and
- *   a list of matches and finds the various possibilities that could occur.
- *
- * - `enter_XXX()`: modifies the list of matches based on some information
- *   about the value that has been matched.  For example,
- *   `enter_rec_or_struct()` adjusts the values given that a record or struct
- *   has been matched.  This is an infallible pattern, so *all* of the matches
- *   must be either wildcards or record/struct patterns.  `enter_opt()`
- *   handles the fallible cases, and it is correspondingly more complex.
- *
- * ## Bindings
- *
- * We store information about the bound variables for each arm as part of the
- * per-arm `ArmData` struct.  There is a mapping from identifiers to
- * `BindingInfo` structs.  These structs contain the mode/id/type of the
- * binding, but they also contain an LLVM value which points at an alloca
- * called `llmatch`. For by value bindings that are Copy, we also create
- * an extra alloca that we copy the matched value to so that any changes
- * we do to our copy is not reflected in the original and vice-versa.
- * We don't do this if it's a move since the original value can't be used
- * and thus allowing us to cheat in not creating an extra alloca.
- *
- * The `llmatch` binding always stores a pointer into the value being matched
- * which points at the data for the binding.  If the value being matched has
- * type `T`, then, `llmatch` will point at an alloca of type `T*` (and hence
- * `llmatch` has type `T**`).  So, if you have a pattern like:
- *
- *    let a: A = ...;
- *    let b: B = ...;
- *    match (a, b) { (ref c, d) => { ... } }
- *
- * For `c` and `d`, we would generate allocas of type `C*` and `D*`
- * respectively.  These are called the `llmatch`.  As we match, when we come
- * up against an identifier, we store the current pointer into the
- * corresponding alloca.
- *
- * Once a pattern is completely matched, and assuming that there is no guard
- * pattern, we will branch to a block that leads to the body itself.  For any
- * by-value bindings, this block will first load the ptr from `llmatch` (the
- * one of type `D*`) and then load a second time to get the actual value (the
- * one of type `D`). For by ref bindings, the value of the local variable is
- * simply the first alloca.
- *
- * So, for the example above, we would generate a setup kind of like this:
- *
- *        +-------+
- *        | Entry |
- *        +-------+
- *            |
- *        +--------------------------------------------+
- *        | llmatch_c = (addr of first half of tuple)  |
- *        | llmatch_d = (addr of second half of tuple) |
- *        +--------------------------------------------+
- *            |
- *        +--------------------------------------+
- *        | *llbinding_d = **llmatch_d           |
- *        +--------------------------------------+
- *
- * If there is a guard, the situation is slightly different, because we must
- * execute the guard code.  Moreover, we need to do so once for each of the
- * alternatives that lead to the arm, because if the guard fails, they may
- * have different points from which to continue the search. Therefore, in that
- * case, we generate code that looks more like:
- *
- *        +-------+
- *        | Entry |
- *        +-------+
- *            |
- *        +-------------------------------------------+
- *        | llmatch_c = (addr of first half of tuple) |
- *        | llmatch_d = (addr of first half of tuple) |
- *        +-------------------------------------------+
- *            |
- *        +-------------------------------------------------+
- *        | *llbinding_d = **llmatch_d                      |
- *        | check condition                                 |
- *        | if false { goto next case }                     |
- *        | if true { goto body }                           |
- *        +-------------------------------------------------+
- *
- * The handling for the cleanups is a bit... sensitive.  Basically, the body
- * is the one that invokes `add_clean()` for each binding.  During the guard
- * evaluation, we add temporary cleanups and revoke them after the guard is
- * evaluated (it could fail, after all). Note that guards and moves are
- * just plain incompatible.
- *
- * Some relevant helper functions that manage bindings:
- * - `create_bindings_map()`
- * - `insert_lllocals()`
- *
- *
- * ## Notes on vector pattern matching.
- *
- * Vector pattern matching is surprisingly tricky. The problem is that
- * the structure of the vector isn't fully known, and slice matches
- * can be done on subparts of it.
- *
- * The way that vector pattern matches are dealt with, then, is as
- * follows. First, we make the actual condition associated with a
- * vector pattern simply a vector length comparison. So the pattern
- * [1, .. x] gets the condition "vec len >= 1", and the pattern
- * [.. x] gets the condition "vec len >= 0". The problem here is that
- * having the condition "vec len >= 1" hold clearly does not mean that
- * only a pattern that has exactly that condition will match. This
- * means that it may well be the case that a condition holds, but none
- * of the patterns matching that condition match; to deal with this,
- * when doing vector length matches, we have match failures proceed to
- * the next condition to check.
- *
- * There are a couple more subtleties to deal with. While the "actual"
- * condition associated with vector length tests is simply a test on
- * the vector length, the actual vec_len Opt entry contains more
- * information used to restrict which matches are associated with it.
- * So that all matches in a submatch are matching against the same
- * values from inside the vector, they are split up by how many
- * elements they match at the front and at the back of the vector. In
- * order to make sure that arms are properly checked in order, even
- * with the overmatching conditions, each vec_len Opt entry is
- * associated with a range of matches.
- * Consider the following:
- *
- *   match &[1, 2, 3] {
- *       [1, 1, .. _] => 0,
- *       [1, 2, 2, .. _] => 1,
- *       [1, 2, 3, .. _] => 2,
- *       [1, 2, .. _] => 3,
- *       _ => 4
- *   }
- * The proper arm to match is arm 2, but arms 0 and 3 both have the
- * condition "len >= 2". If arm 3 was lumped in with arm 0, then the
- * wrong branch would be taken. Instead, vec_len Opts are associated
- * with a contiguous range of matches that have the same "shape".
- * This is sort of ugly and requires a bunch of special handling of
- * vec_len options.
- *
- */
+//! # Compilation of match statements
+//!
+//! I will endeavor to explain the code as best I can.  I have only a loose
+//! understanding of some parts of it.
+//!
+//! ## Matching
+//!
+//! The basic state of the code is maintained in an array `m` of `Match`
+//! objects.  Each `Match` describes some list of patterns, all of which must
+//! match against the current list of values.  If those patterns match, then
+//! the arm listed in the match is the correct arm.  A given arm may have
+//! multiple corresponding match entries, one for each alternative that
+//! remains.  As we proceed these sets of matches are adjusted by the various
+//! `enter_XXX()` functions, each of which adjusts the set of options given
+//! some information about the value which has been matched.
+//!
+//! So, initially, there is one value and N matches, each of which have one
+//! constituent pattern.  N here is usually the number of arms but may be
+//! greater, if some arms have multiple alternatives.  For example, here:
+//!
+//!     enum Foo { A, B(int), C(uint, uint) }
+//!     match foo {
+//!         A => ...,
+//!         B(x) => ...,
+//!         C(1u, 2) => ...,
+//!         C(_) => ...
+//!     }
+//!
+//! The value would be `foo`.  There would be four matches, each of which
+//! contains one pattern (and, in one case, a guard).  We could collect the
+//! various options and then compile the code for the case where `foo` is an
+//! `A`, a `B`, and a `C`.  When we generate the code for `C`, we would (1)
+//! drop the two matches that do not match a `C` and (2) expand the other two
+//! into two patterns each.  In the first case, the two patterns would be `1u`
+//! and `2`, and the in the second case the _ pattern would be expanded into
+//! `_` and `_`.  The two values are of course the arguments to `C`.
+//!
+//! Here is a quick guide to the various functions:
+//!
+//! - `compile_submatch()`: The main workhouse.  It takes a list of values and
+//!   a list of matches and finds the various possibilities that could occur.
+//!
+//! - `enter_XXX()`: modifies the list of matches based on some information
+//!   about the value that has been matched.  For example,
+//!   `enter_rec_or_struct()` adjusts the values given that a record or struct
+//!   has been matched.  This is an infallible pattern, so *all* of the matches
+//!   must be either wildcards or record/struct patterns.  `enter_opt()`
+//!   handles the fallible cases, and it is correspondingly more complex.
+//!
+//! ## Bindings
+//!
+//! We store information about the bound variables for each arm as part of the
+//! per-arm `ArmData` struct.  There is a mapping from identifiers to
+//! `BindingInfo` structs.  These structs contain the mode/id/type of the
+//! binding, but they also contain an LLVM value which points at an alloca
+//! called `llmatch`. For by value bindings that are Copy, we also create
+//! an extra alloca that we copy the matched value to so that any changes
+//! we do to our copy is not reflected in the original and vice-versa.
+//! We don't do this if it's a move since the original value can't be used
+//! and thus allowing us to cheat in not creating an extra alloca.
+//!
+//! The `llmatch` binding always stores a pointer into the value being matched
+//! which points at the data for the binding.  If the value being matched has
+//! type `T`, then, `llmatch` will point at an alloca of type `T*` (and hence
+//! `llmatch` has type `T**`).  So, if you have a pattern like:
+//!
+//!    let a: A = ...;
+//!    let b: B = ...;
+//!    match (a, b) { (ref c, d) => { ... } }
+//!
+//! For `c` and `d`, we would generate allocas of type `C*` and `D*`
+//! respectively.  These are called the `llmatch`.  As we match, when we come
+//! up against an identifier, we store the current pointer into the
+//! corresponding alloca.
+//!
+//! Once a pattern is completely matched, and assuming that there is no guard
+//! pattern, we will branch to a block that leads to the body itself.  For any
+//! by-value bindings, this block will first load the ptr from `llmatch` (the
+//! one of type `D*`) and then load a second time to get the actual value (the
+//! one of type `D`). For by ref bindings, the value of the local variable is
+//! simply the first alloca.
+//!
+//! So, for the example above, we would generate a setup kind of like this:
+//!
+//!        +-------+
+//!        | Entry |
+//!        +-------+
+//!            |
+//!        +--------------------------------------------+
+//!        | llmatch_c = (addr of first half of tuple)  |
+//!        | llmatch_d = (addr of second half of tuple) |
+//!        +--------------------------------------------+
+//!            |
+//!        +--------------------------------------+
+//!        | *llbinding_d = **llmatch_d           |
+//!        +--------------------------------------+
+//!
+//! If there is a guard, the situation is slightly different, because we must
+//! execute the guard code.  Moreover, we need to do so once for each of the
+//! alternatives that lead to the arm, because if the guard fails, they may
+//! have different points from which to continue the search. Therefore, in that
+//! case, we generate code that looks more like:
+//!
+//!        +-------+
+//!        | Entry |
+//!        +-------+
+//!            |
+//!        +-------------------------------------------+
+//!        | llmatch_c = (addr of first half of tuple) |
+//!        | llmatch_d = (addr of first half of tuple) |
+//!        +-------------------------------------------+
+//!            |
+//!        +-------------------------------------------------+
+//!        | *llbinding_d = **llmatch_d                      |
+//!        | check condition                                 |
+//!        | if false { goto next case }                     |
+//!        | if true { goto body }                           |
+//!        +-------------------------------------------------+
+//!
+//! The handling for the cleanups is a bit... sensitive.  Basically, the body
+//! is the one that invokes `add_clean()` for each binding.  During the guard
+//! evaluation, we add temporary cleanups and revoke them after the guard is
+//! evaluated (it could fail, after all). Note that guards and moves are
+//! just plain incompatible.
+//!
+//! Some relevant helper functions that manage bindings:
+//! - `create_bindings_map()`
+//! - `insert_lllocals()`
+//!
+//!
+//! ## Notes on vector pattern matching.
+//!
+//! Vector pattern matching is surprisingly tricky. The problem is that
+//! the structure of the vector isn't fully known, and slice matches
+//! can be done on subparts of it.
+//!
+//! The way that vector pattern matches are dealt with, then, is as
+//! follows. First, we make the actual condition associated with a
+//! vector pattern simply a vector length comparison. So the pattern
+//! [1, .. x] gets the condition "vec len >= 1", and the pattern
+//! [.. x] gets the condition "vec len >= 0". The problem here is that
+//! having the condition "vec len >= 1" hold clearly does not mean that
+//! only a pattern that has exactly that condition will match. This
+//! means that it may well be the case that a condition holds, but none
+//! of the patterns matching that condition match; to deal with this,
+//! when doing vector length matches, we have match failures proceed to
+//! the next condition to check.
+//!
+//! There are a couple more subtleties to deal with. While the "actual"
+//! condition associated with vector length tests is simply a test on
+//! the vector length, the actual vec_len Opt entry contains more
+//! information used to restrict which matches are associated with it.
+//! So that all matches in a submatch are matching against the same
+//! values from inside the vector, they are split up by how many
+//! elements they match at the front and at the back of the vector. In
+//! order to make sure that arms are properly checked in order, even
+//! with the overmatching conditions, each vec_len Opt entry is
+//! associated with a range of matches.
+//! Consider the following:
+//!
+//!   match &[1, 2, 3] {
+//!       [1, 1, .. _] => 0,
+//!       [1, 2, 2, .. _] => 1,
+//!       [1, 2, 3, .. _] => 2,
+//!       [1, 2, .. _] => 3,
+//!       _ => 4
+//!   }
+//! The proper arm to match is arm 2, but arms 0 and 3 both have the
+//! condition "len >= 2". If arm 3 was lumped in with arm 0, then the
+//! wrong branch would be taken. Instead, vec_len Opts are associated
+//! with a contiguous range of matches that have the same "shape".
+//! This is sort of ugly and requires a bunch of special handling of
+//! vec_len options.
 
 pub use self::BranchKind::*;
 pub use self::OptResult::*;
@@ -620,12 +616,9 @@ fn extract_variant_args<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
     ExtractedBlock { vals: args, bcx: bcx }
 }
 
+/// Helper for converting from the ValueRef that we pass around in the match code, which is always
+/// an lvalue, into a Datum. Eventually we should just pass around a Datum and be done with it.
 fn match_datum<'tcx>(val: ValueRef, left_ty: Ty<'tcx>) -> Datum<'tcx, Lvalue> {
-    /*!
-     * Helper for converting from the ValueRef that we pass around in
-     * the match code, which is always an lvalue, into a Datum. Eventually
-     * we should just pass around a Datum and be done with it.
-     */
     Datum::new(val, left_ty, Lvalue)
 }
 
@@ -831,15 +824,11 @@ fn compare_values<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
     }
 }
 
+/// For each binding in `data.bindings_map`, adds an appropriate entry into the `fcx.lllocals` map
 fn insert_lllocals<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
                                bindings_map: &BindingsMap<'tcx>,
                                cs: Option<cleanup::ScopeId>)
                                -> Block<'blk, 'tcx> {
-    /*!
-     * For each binding in `data.bindings_map`, adds an appropriate entry into
-     * the `fcx.lllocals` map
-     */
-
     for (&ident, &binding_info) in bindings_map.iter() {
         let llval = match binding_info.trmode {
             // By value mut binding for a copy type: load from the ptr
@@ -1416,13 +1405,11 @@ fn trans_match_inner<'blk, 'tcx>(scope_cx: Block<'blk, 'tcx>,
     return bcx;
 }
 
+/// Generates code for a local variable declaration like `let <pat>;` or `let <pat> =
+/// <opt_init_expr>`.
 pub fn store_local<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
                                local: &ast::Local)
                                -> Block<'blk, 'tcx> {
-    /*!
-     * Generates code for a local variable declaration like
-     * `let <pat>;` or `let <pat> = <opt_init_expr>`.
-     */
     let _icx = push_ctxt("match::store_local");
     let mut bcx = bcx;
     let tcx = bcx.tcx();
@@ -1482,24 +1469,21 @@ pub fn store_local<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
     }
 }
 
+/// Generates code for argument patterns like `fn foo(<pat>: T)`.
+/// Creates entries in the `lllocals` map for each of the bindings
+/// in `pat`.
+///
+/// # Arguments
+///
+/// - `pat` is the argument pattern
+/// - `llval` is a pointer to the argument value (in other words,
+///   if the argument type is `T`, then `llval` is a `T*`). In some
+///   cases, this code may zero out the memory `llval` points at.
 pub fn store_arg<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
                              pat: &ast::Pat,
                              arg: Datum<'tcx, Rvalue>,
                              arg_scope: cleanup::ScopeId)
                              -> Block<'blk, 'tcx> {
-    /*!
-     * Generates code for argument patterns like `fn foo(<pat>: T)`.
-     * Creates entries in the `lllocals` map for each of the bindings
-     * in `pat`.
-     *
-     * # Arguments
-     *
-     * - `pat` is the argument pattern
-     * - `llval` is a pointer to the argument value (in other words,
-     *   if the argument type is `T`, then `llval` is a `T*`). In some
-     *   cases, this code may zero out the memory `llval` points at.
-     */
-
     let _icx = push_ctxt("match::store_arg");
 
     match simple_identifier(&*pat) {
@@ -1583,26 +1567,23 @@ fn mk_binding_alloca<'blk, 'tcx, A>(bcx: Block<'blk, 'tcx>,
     bcx
 }
 
+/// A simple version of the pattern matching code that only handles
+/// irrefutable patterns. This is used in let/argument patterns,
+/// not in match statements. Unifying this code with the code above
+/// sounds nice, but in practice it produces very inefficient code,
+/// since the match code is so much more general. In most cases,
+/// LLVM is able to optimize the code, but it causes longer compile
+/// times and makes the generated code nigh impossible to read.
+///
+/// # Arguments
+/// - bcx: starting basic block context
+/// - pat: the irrefutable pattern being matched.
+/// - val: the value being matched -- must be an lvalue (by ref, with cleanup)
 fn bind_irrefutable_pat<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
                                     pat: &ast::Pat,
                                     val: ValueRef,
                                     cleanup_scope: cleanup::ScopeId)
                                     -> Block<'blk, 'tcx> {
-    /*!
-     * A simple version of the pattern matching code that only handles
-     * irrefutable patterns. This is used in let/argument patterns,
-     * not in match statements. Unifying this code with the code above
-     * sounds nice, but in practice it produces very inefficient code,
-     * since the match code is so much more general. In most cases,
-     * LLVM is able to optimize the code, but it causes longer compile
-     * times and makes the generated code nigh impossible to read.
-     *
-     * # Arguments
-     * - bcx: starting basic block context
-     * - pat: the irrefutable pattern being matched.
-     * - val: the value being matched -- must be an lvalue (by ref, with cleanup)
-     */
-
     debug!("bind_irrefutable_pat(bcx={}, pat={})",
            bcx.to_str(),
            pat.repr(bcx.tcx()));
diff --git a/src/librustc_trans/trans/adt.rs b/src/librustc_trans/trans/adt.rs
index e7d1b9726a1b1..568805bee4047 100644
--- a/src/librustc_trans/trans/adt.rs
+++ b/src/librustc_trans/trans/adt.rs
@@ -8,40 +8,38 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-/*!
- * # Representation of Algebraic Data Types
- *
- * This module determines how to represent enums, structs, and tuples
- * based on their monomorphized types; it is responsible both for
- * choosing a representation and translating basic operations on
- * values of those types.  (Note: exporting the representations for
- * debuggers is handled in debuginfo.rs, not here.)
- *
- * Note that the interface treats everything as a general case of an
- * enum, so structs/tuples/etc. have one pseudo-variant with
- * discriminant 0; i.e., as if they were a univariant enum.
- *
- * Having everything in one place will enable improvements to data
- * structure representation; possibilities include:
- *
- * - User-specified alignment (e.g., cacheline-aligning parts of
- *   concurrently accessed data structures); LLVM can't represent this
- *   directly, so we'd have to insert padding fields in any structure
- *   that might contain one and adjust GEP indices accordingly.  See
- *   issue #4578.
- *
- * - Store nested enums' discriminants in the same word.  Rather, if
- *   some variants start with enums, and those enums representations
- *   have unused alignment padding between discriminant and body, the
- *   outer enum's discriminant can be stored there and those variants
- *   can start at offset 0.  Kind of fancy, and might need work to
- *   make copies of the inner enum type cooperate, but it could help
- *   with `Option` or `Result` wrapped around another enum.
- *
- * - Tagged pointers would be neat, but given that any type can be
- *   used unboxed and any field can have pointers (including mutable)
- *   taken to it, implementing them for Rust seems difficult.
- */
+//! # Representation of Algebraic Data Types
+//!
+//! This module determines how to represent enums, structs, and tuples
+//! based on their monomorphized types; it is responsible both for
+//! choosing a representation and translating basic operations on
+//! values of those types.  (Note: exporting the representations for
+//! debuggers is handled in debuginfo.rs, not here.)
+//!
+//! Note that the interface treats everything as a general case of an
+//! enum, so structs/tuples/etc. have one pseudo-variant with
+//! discriminant 0; i.e., as if they were a univariant enum.
+//!
+//! Having everything in one place will enable improvements to data
+//! structure representation; possibilities include:
+//!
+//! - User-specified alignment (e.g., cacheline-aligning parts of
+//!   concurrently accessed data structures); LLVM can't represent this
+//!   directly, so we'd have to insert padding fields in any structure
+//!   that might contain one and adjust GEP indices accordingly.  See
+//!   issue #4578.
+//!
+//! - Store nested enums' discriminants in the same word.  Rather, if
+//!   some variants start with enums, and those enums representations
+//!   have unused alignment padding between discriminant and body, the
+//!   outer enum's discriminant can be stored there and those variants
+//!   can start at offset 0.  Kind of fancy, and might need work to
+//!   make copies of the inner enum type cooperate, but it could help
+//!   with `Option` or `Result` wrapped around another enum.
+//!
+//! - Tagged pointers would be neat, but given that any type can be
+//!   used unboxed and any field can have pointers (including mutable)
+//!   taken to it, implementing them for Rust seems difficult.
 
 #![allow(unsigned_negation)]
 
diff --git a/src/librustc_trans/trans/asm.rs b/src/librustc_trans/trans/asm.rs
index 9b499b6d1a147..024df2a63adb5 100644
--- a/src/librustc_trans/trans/asm.rs
+++ b/src/librustc_trans/trans/asm.rs
@@ -8,9 +8,7 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-/*!
-# Translation of inline assembly.
-*/
+//! # Translation of inline assembly.
 
 use llvm;
 use trans::build::*;
diff --git a/src/librustc_trans/trans/base.rs b/src/librustc_trans/trans/base.rs
index 6fe5298393e7d..52e54a4a2613a 100644
--- a/src/librustc_trans/trans/base.rs
+++ b/src/librustc_trans/trans/base.rs
@@ -1050,14 +1050,11 @@ pub fn load_if_immediate<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
     return v;
 }
 
+/// Helper for loading values from memory. Does the necessary conversion if the in-memory type
+/// differs from the type used for SSA values. Also handles various special cases where the type
+/// gives us better information about what we are loading.
 pub fn load_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
                            ptr: ValueRef, t: Ty<'tcx>) -> ValueRef {
-    /*!
-     * Helper for loading values from memory. Does the necessary conversion if
-     * the in-memory type differs from the type used for SSA values. Also
-     * handles various special cases where the type gives us better information
-     * about what we are loading.
-     */
     if type_is_zero_size(cx.ccx(), t) {
         C_undef(type_of::type_of(cx.ccx(), t))
     } else if ty::type_is_bool(t) {
@@ -1071,11 +1068,9 @@ pub fn load_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
     }
 }
 
+/// Helper for storing values in memory. Does the necessary conversion if the in-memory type
+/// differs from the type used for SSA values.
 pub fn store_ty(cx: Block, v: ValueRef, dst: ValueRef, t: Ty) {
-    /*!
-     * Helper for storing values in memory. Does the necessary conversion if
-     * the in-memory type differs from the type used for SSA values.
-     */
     if ty::type_is_bool(t) {
         Store(cx, ZExt(cx, v, Type::i8(cx.ccx())), dst);
     } else {
diff --git a/src/librustc_trans/trans/callee.rs b/src/librustc_trans/trans/callee.rs
index 6d0f598044235..5d713526a3d6a 100644
--- a/src/librustc_trans/trans/callee.rs
+++ b/src/librustc_trans/trans/callee.rs
@@ -8,13 +8,11 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-/*!
- * Handles translation of callees as well as other call-related
- * things.  Callees are a superset of normal rust values and sometimes
- * have different representations.  In particular, top-level fn items
- * and methods are represented as just a fn ptr and not a full
- * closure.
- */
+//! Handles translation of callees as well as other call-related
+//! things.  Callees are a superset of normal rust values and sometimes
+//! have different representations.  In particular, top-level fn items
+//! and methods are represented as just a fn ptr and not a full
+//! closure.
 
 pub use self::AutorefArg::*;
 pub use self::CalleeData::*;
@@ -220,13 +218,9 @@ fn trans<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, expr: &ast::Expr)
     }
 }
 
+/// Translates a reference (with id `ref_id`) to the fn/method with id `def_id` into a function
+/// pointer. This may require monomorphization or inlining.
 pub fn trans_fn_ref(bcx: Block, def_id: ast::DefId, node: ExprOrMethodCall) -> ValueRef {
-    /*!
-     * Translates a reference (with id `ref_id`) to the fn/method
-     * with id `def_id` into a function pointer.  This may require
-     * monomorphization or inlining.
-     */
-
     let _icx = push_ctxt("trans_fn_ref");
 
     let substs = node_id_substs(bcx, node);
@@ -398,6 +392,17 @@ pub fn trans_unboxing_shim<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
     llfn
 }
 
+/// Translates a reference to a fn/method item, monomorphizing and
+/// inlining as it goes.
+///
+/// # Parameters
+///
+/// - `bcx`: the current block where the reference to the fn occurs
+/// - `def_id`: def id of the fn or method item being referenced
+/// - `node`: node id of the reference to the fn/method, if applicable.
+///   This parameter may be zero; but, if so, the resulting value may not
+///   have the right type, so it must be cast before being used.
+/// - `substs`: values for each of the fn/method's parameters
 pub fn trans_fn_ref_with_substs<'blk, 'tcx>(
     bcx: Block<'blk, 'tcx>,      //
     def_id: ast::DefId,          // def id of fn
@@ -405,20 +410,6 @@ pub fn trans_fn_ref_with_substs<'blk, 'tcx>(
     substs: subst::Substs<'tcx>) // vtables for the call
     -> ValueRef
 {
-    /*!
-     * Translates a reference to a fn/method item, monomorphizing and
-     * inlining as it goes.
-     *
-     * # Parameters
-     *
-     * - `bcx`: the current block where the reference to the fn occurs
-     * - `def_id`: def id of the fn or method item being referenced
-     * - `node`: node id of the reference to the fn/method, if applicable.
-     *   This parameter may be zero; but, if so, the resulting value may not
-     *   have the right type, so it must be cast before being used.
-     * - `substs`: values for each of the fn/method's parameters
-     */
-
     let _icx = push_ctxt("trans_fn_ref_with_substs");
     let ccx = bcx.ccx();
     let tcx = bcx.tcx();
@@ -668,6 +659,16 @@ pub fn trans_lang_call<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
                              dest)
 }
 
+/// This behemoth of a function translates function calls. Unfortunately, in order to generate more
+/// efficient LLVM output at -O0, it has quite a complex signature (refactoring this into two
+/// functions seems like a good idea).
+///
+/// In particular, for lang items, it is invoked with a dest of None, and in that case the return
+/// value contains the result of the fn. The lang item must not return a structural type or else
+/// all heck breaks loose.
+///
+/// For non-lang items, `dest` is always Some, and hence the result is written into memory
+/// somewhere. Nonetheless we return the actual return value of the function.
 pub fn trans_call_inner<'a, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
                                         call_info: Option<NodeInfo>,
                                         callee_ty: Ty<'tcx>,
@@ -677,22 +678,6 @@ pub fn trans_call_inner<'a, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
                                         args: CallArgs<'a, 'tcx>,
                                         dest: Option<expr::Dest>)
                                         -> Result<'blk, 'tcx> {
-    /*!
-     * This behemoth of a function translates function calls.
-     * Unfortunately, in order to generate more efficient LLVM
-     * output at -O0, it has quite a complex signature (refactoring
-     * this into two functions seems like a good idea).
-     *
-     * In particular, for lang items, it is invoked with a dest of
-     * None, and in that case the return value contains the result of
-     * the fn. The lang item must not return a structural type or else
-     * all heck breaks loose.
-     *
-     * For non-lang items, `dest` is always Some, and hence the result
-     * is written into memory somewhere. Nonetheless we return the
-     * actual return value of the function.
-     */
-
     // Introduce a temporary cleanup scope that will contain cleanups
     // for the arguments while they are being evaluated. The purpose
     // this cleanup is to ensure that, should a panic occur while
diff --git a/src/librustc_trans/trans/cleanup.rs b/src/librustc_trans/trans/cleanup.rs
index b0235be7497ea..d7da83ddb0d04 100644
--- a/src/librustc_trans/trans/cleanup.rs
+++ b/src/librustc_trans/trans/cleanup.rs
@@ -8,10 +8,8 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-/*!
- * Code pertaining to cleanup of temporaries as well as execution of
- * drop glue. See discussion in `doc.rs` for a high-level summary.
- */
+//! Code pertaining to cleanup of temporaries as well as execution of
+//! drop glue. See discussion in `doc.rs` for a high-level summary.
 
 pub use self::ScopeId::*;
 pub use self::CleanupScopeKind::*;
@@ -114,12 +112,8 @@ pub enum ScopeId {
 }
 
 impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> {
+    /// Invoked when we start to trans the code contained within a new cleanup scope.
     fn push_ast_cleanup_scope(&self, debug_loc: NodeInfo) {
-        /*!
-         * Invoked when we start to trans the code contained
-         * within a new cleanup scope.
-         */
-
         debug!("push_ast_cleanup_scope({})",
                self.ccx.tcx().map.node_to_string(debug_loc.id));
 
@@ -189,16 +183,12 @@ impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> {
         CustomScopeIndex { index: index }
     }
 
+    /// Removes the cleanup scope for id `cleanup_scope`, which must be at the top of the cleanup
+    /// stack, and generates the code to do its cleanups for normal exit.
     fn pop_and_trans_ast_cleanup_scope(&self,
                                        bcx: Block<'blk, 'tcx>,
                                        cleanup_scope: ast::NodeId)
                                        -> Block<'blk, 'tcx> {
-        /*!
-         * Removes the cleanup scope for id `cleanup_scope`, which
-         * must be at the top of the cleanup stack, and generates the
-         * code to do its cleanups for normal exit.
-         */
-
         debug!("pop_and_trans_ast_cleanup_scope({})",
                self.ccx.tcx().map.node_to_string(cleanup_scope));
 
@@ -208,15 +198,11 @@ impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> {
         self.trans_scope_cleanups(bcx, &scope)
     }
 
+    /// Removes the loop cleanup scope for id `cleanup_scope`, which must be at the top of the
+    /// cleanup stack. Does not generate any cleanup code, since loop scopes should exit by
+    /// branching to a block generated by `normal_exit_block`.
     fn pop_loop_cleanup_scope(&self,
                               cleanup_scope: ast::NodeId) {
-        /*!
-         * Removes the loop cleanup scope for id `cleanup_scope`, which
-         * must be at the top of the cleanup stack. Does not generate
-         * any cleanup code, since loop scopes should exit by
-         * branching to a block generated by `normal_exit_block`.
-         */
-
         debug!("pop_loop_cleanup_scope({})",
                self.ccx.tcx().map.node_to_string(cleanup_scope));
 
@@ -225,29 +211,21 @@ impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> {
         let _ = self.pop_scope();
     }
 
+    /// Removes the top cleanup scope from the stack without executing its cleanups. The top
+    /// cleanup scope must be the temporary scope `custom_scope`.
     fn pop_custom_cleanup_scope(&self,
                                 custom_scope: CustomScopeIndex) {
-        /*!
-         * Removes the top cleanup scope from the stack without
-         * executing its cleanups. The top cleanup scope must
-         * be the temporary scope `custom_scope`.
-         */
-
         debug!("pop_custom_cleanup_scope({})", custom_scope.index);
         assert!(self.is_valid_to_pop_custom_scope(custom_scope));
         let _ = self.pop_scope();
     }
 
+    /// Removes the top cleanup scope from the stack, which must be a temporary scope, and
+    /// generates the code to do its cleanups for normal exit.
     fn pop_and_trans_custom_cleanup_scope(&self,
                                           bcx: Block<'blk, 'tcx>,
                                           custom_scope: CustomScopeIndex)
                                           -> Block<'blk, 'tcx> {
-        /*!
-         * Removes the top cleanup scope from the stack, which must be
-         * a temporary scope, and generates the code to do its
-         * cleanups for normal exit.
-         */
-
         debug!("pop_and_trans_custom_cleanup_scope({})", custom_scope);
         assert!(self.is_valid_to_pop_custom_scope(custom_scope));
 
@@ -255,11 +233,8 @@ impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> {
         self.trans_scope_cleanups(bcx, &scope)
     }
 
+    /// Returns the id of the top-most loop scope
     fn top_loop_scope(&self) -> ast::NodeId {
-        /*!
-         * Returns the id of the top-most loop scope
-         */
-
         for scope in self.scopes.borrow().iter().rev() {
             match scope.kind {
                 LoopScopeKind(id, _) => {
@@ -271,24 +246,17 @@ impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> {
         self.ccx.sess().bug("no loop scope found");
     }
 
+    /// Returns a block to branch to which will perform all pending cleanups and then
+    /// break/continue (depending on `exit`) out of the loop with id `cleanup_scope`
     fn normal_exit_block(&'blk self,
                          cleanup_scope: ast::NodeId,
                          exit: uint) -> BasicBlockRef {
-        /*!
-         * Returns a block to branch to which will perform all pending
-         * cleanups and then break/continue (depending on `exit`) out
-         * of the loop with id `cleanup_scope`
-         */
-
         self.trans_cleanups_to_exit_scope(LoopExit(cleanup_scope, exit))
     }
 
+    /// Returns a block to branch to which will perform all pending cleanups and then return from
+    /// this function
     fn return_exit_block(&'blk self) -> BasicBlockRef {
-        /*!
-         * Returns a block to branch to which will perform all pending
-         * cleanups and then return from this function
-         */
-
         self.trans_cleanups_to_exit_scope(ReturnExit)
     }
 
@@ -306,15 +274,11 @@ impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> {
         self.schedule_clean(cleanup_scope, drop as CleanupObj);
     }
 
+    /// Schedules a (deep) drop of `val`, which is a pointer to an instance of `ty`
     fn schedule_drop_mem(&self,
                          cleanup_scope: ScopeId,
                          val: ValueRef,
                          ty: Ty<'tcx>) {
-        /*!
-         * Schedules a (deep) drop of `val`, which is a pointer to an
-         * instance of `ty`
-         */
-
         if !ty::type_needs_drop(self.ccx.tcx(), ty) { return; }
         let drop = box DropValue {
             is_immediate: false,
@@ -332,15 +296,11 @@ impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> {
         self.schedule_clean(cleanup_scope, drop as CleanupObj);
     }
 
+    /// Schedules a (deep) drop and zero-ing of `val`, which is a pointer to an instance of `ty`
     fn schedule_drop_and_zero_mem(&self,
                                   cleanup_scope: ScopeId,
                                   val: ValueRef,
                                   ty: Ty<'tcx>) {
-        /*!
-         * Schedules a (deep) drop and zero-ing of `val`, which is a pointer
-         * to an instance of `ty`
-         */
-
         if !ty::type_needs_drop(self.ccx.tcx(), ty) { return; }
         let drop = box DropValue {
             is_immediate: false,
@@ -359,13 +319,11 @@ impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> {
         self.schedule_clean(cleanup_scope, drop as CleanupObj);
     }
 
+    /// Schedules a (deep) drop of `val`, which is an instance of `ty`
     fn schedule_drop_immediate(&self,
                                cleanup_scope: ScopeId,
                                val: ValueRef,
                                ty: Ty<'tcx>) {
-        /*!
-         * Schedules a (deep) drop of `val`, which is an instance of `ty`
-         */
 
         if !ty::type_needs_drop(self.ccx.tcx(), ty) { return; }
         let drop = box DropValue {
@@ -384,16 +342,12 @@ impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> {
         self.schedule_clean(cleanup_scope, drop as CleanupObj);
     }
 
+    /// Schedules a call to `free(val)`. Note that this is a shallow operation.
     fn schedule_free_value(&self,
                            cleanup_scope: ScopeId,
                            val: ValueRef,
                            heap: Heap,
                            content_ty: Ty<'tcx>) {
-        /*!
-         * Schedules a call to `free(val)`. Note that this is a shallow
-         * operation.
-         */
-
         let drop = box FreeValue { ptr: val, heap: heap, content_ty: content_ty };
 
         debug!("schedule_free_value({}, val={}, heap={})",
@@ -404,17 +358,13 @@ impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> {
         self.schedule_clean(cleanup_scope, drop as CleanupObj);
     }
 
+    /// Schedules a call to `free(val)`. Note that this is a shallow operation.
     fn schedule_free_slice(&self,
                            cleanup_scope: ScopeId,
                            val: ValueRef,
                            size: ValueRef,
                            align: ValueRef,
                            heap: Heap) {
-        /*!
-         * Schedules a call to `free(val)`. Note that this is a shallow
-         * operation.
-         */
-
         let drop = box FreeSlice { ptr: val, size: size, align: align, heap: heap };
 
         debug!("schedule_free_slice({}, val={}, heap={})",
@@ -434,15 +384,12 @@ impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> {
         }
     }
 
+    /// Schedules a cleanup to occur upon exit from `cleanup_scope`. If `cleanup_scope` is not
+    /// provided, then the cleanup is scheduled in the topmost scope, which must be a temporary
+    /// scope.
     fn schedule_clean_in_ast_scope(&self,
                                    cleanup_scope: ast::NodeId,
                                    cleanup: CleanupObj<'tcx>) {
-        /*!
-         * Schedules a cleanup to occur upon exit from `cleanup_scope`.
-         * If `cleanup_scope` is not provided, then the cleanup is scheduled
-         * in the topmost scope, which must be a temporary scope.
-         */
-
         debug!("schedule_clean_in_ast_scope(cleanup_scope={})",
                cleanup_scope);
 
@@ -462,14 +409,10 @@ impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> {
                     self.ccx.tcx().map.node_to_string(cleanup_scope)).as_slice());
     }
 
+    /// Schedules a cleanup to occur in the top-most scope, which must be a temporary scope.
     fn schedule_clean_in_custom_scope(&self,
                                       custom_scope: CustomScopeIndex,
                                       cleanup: CleanupObj<'tcx>) {
-        /*!
-         * Schedules a cleanup to occur in the top-most scope,
-         * which must be a temporary scope.
-         */
-
         debug!("schedule_clean_in_custom_scope(custom_scope={})",
                custom_scope.index);
 
@@ -481,22 +424,14 @@ impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> {
         scope.clear_cached_exits();
     }
 
+    /// Returns true if there are pending cleanups that should execute on panic.
     fn needs_invoke(&self) -> bool {
-        /*!
-         * Returns true if there are pending cleanups that should
-         * execute on panic.
-         */
-
         self.scopes.borrow().iter().rev().any(|s| s.needs_invoke())
     }
 
+    /// Returns a basic block to branch to in the event of a panic. This block will run the panic
+    /// cleanups and eventually invoke the LLVM `Resume` instruction.
     fn get_landing_pad(&'blk self) -> BasicBlockRef {
-        /*!
-         * Returns a basic block to branch to in the event of a panic.
-         * This block will run the panic cleanups and eventually
-         * invoke the LLVM `Resume` instruction.
-         */
-
         let _icx = base::push_ctxt("get_landing_pad");
 
         debug!("get_landing_pad");
@@ -529,10 +464,8 @@ impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> {
 }
 
 impl<'blk, 'tcx> CleanupHelperMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> {
+    /// Returns the id of the current top-most AST scope, if any.
     fn top_ast_scope(&self) -> Option<ast::NodeId> {
-        /*!
-         * Returns the id of the current top-most AST scope, if any.
-         */
         for scope in self.scopes.borrow().iter().rev() {
             match scope.kind {
                 CustomScopeKind | LoopScopeKind(..) => {}
@@ -559,10 +492,10 @@ impl<'blk, 'tcx> CleanupHelperMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx
             (*scopes)[custom_scope.index].kind.is_temp()
     }
 
+    /// Generates the cleanups for `scope` into `bcx`
     fn trans_scope_cleanups(&self, // cannot borrow self, will recurse
                             bcx: Block<'blk, 'tcx>,
                             scope: &CleanupScope<'blk, 'tcx>) -> Block<'blk, 'tcx> {
-        /*! Generates the cleanups for `scope` into `bcx` */
 
         let mut bcx = bcx;
         if !bcx.unreachable.get() {
@@ -593,37 +526,31 @@ impl<'blk, 'tcx> CleanupHelperMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx
         f(self.scopes.borrow().last().unwrap())
     }
 
+    /// Used when the caller wishes to jump to an early exit, such as a return, break, continue, or
+    /// unwind. This function will generate all cleanups between the top of the stack and the exit
+    /// `label` and return a basic block that the caller can branch to.
+    ///
+    /// For example, if the current stack of cleanups were as follows:
+    ///
+    ///      AST 22
+    ///      Custom 1
+    ///      AST 23
+    ///      Loop 23
+    ///      Custom 2
+    ///      AST 24
+    ///
+    /// and the `label` specifies a break from `Loop 23`, then this function would generate a
+    /// series of basic blocks as follows:
+    ///
+    ///      Cleanup(AST 24) -> Cleanup(Custom 2) -> break_blk
+    ///
+    /// where `break_blk` is the block specified in `Loop 23` as the target for breaks. The return
+    /// value would be the first basic block in that sequence (`Cleanup(AST 24)`). The caller could
+    /// then branch to `Cleanup(AST 24)` and it will perform all cleanups and finally branch to the
+    /// `break_blk`.
     fn trans_cleanups_to_exit_scope(&'blk self,
                                     label: EarlyExitLabel)
                                     -> BasicBlockRef {
-        /*!
-         * Used when the caller wishes to jump to an early exit, such
-         * as a return, break, continue, or unwind. This function will
-         * generate all cleanups between the top of the stack and the
-         * exit `label` and return a basic block that the caller can
-         * branch to.
-         *
-         * For example, if the current stack of cleanups were as follows:
-         *
-         *      AST 22
-         *      Custom 1
-         *      AST 23
-         *      Loop 23
-         *      Custom 2
-         *      AST 24
-         *
-         * and the `label` specifies a break from `Loop 23`, then this
-         * function would generate a series of basic blocks as follows:
-         *
-         *      Cleanup(AST 24) -> Cleanup(Custom 2) -> break_blk
-         *
-         * where `break_blk` is the block specified in `Loop 23` as
-         * the target for breaks. The return value would be the first
-         * basic block in that sequence (`Cleanup(AST 24)`). The
-         * caller could then branch to `Cleanup(AST 24)` and it will
-         * perform all cleanups and finally branch to the `break_blk`.
-         */
-
         debug!("trans_cleanups_to_exit_scope label={} scopes={}",
                label, self.scopes_len());
 
@@ -756,20 +683,15 @@ impl<'blk, 'tcx> CleanupHelperMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx
         prev_llbb
     }
 
+    /// Creates a landing pad for the top scope, if one does not exist.  The landing pad will
+    /// perform all cleanups necessary for an unwind and then `resume` to continue error
+    /// propagation:
+    ///
+    ///     landing_pad -> ... cleanups ... -> [resume]
+    ///
+    /// (The cleanups and resume instruction are created by `trans_cleanups_to_exit_scope()`, not
+    /// in this function itself.)
     fn get_or_create_landing_pad(&'blk self) -> BasicBlockRef {
-        /*!
-         * Creates a landing pad for the top scope, if one does not
-         * exist.  The landing pad will perform all cleanups necessary
-         * for an unwind and then `resume` to continue error
-         * propagation:
-         *
-         *     landing_pad -> ... cleanups ... -> [resume]
-         *
-         * (The cleanups and resume instruction are created by
-         * `trans_cleanups_to_exit_scope()`, not in this function
-         * itself.)
-         */
-
         let pad_bcx;
 
         debug!("get_or_create_landing_pad");
@@ -883,19 +805,15 @@ impl<'blk, 'tcx> CleanupScope<'blk, 'tcx> {
                               cleanup_block: blk });
     }
 
+    /// True if this scope has cleanups that need unwinding
     fn needs_invoke(&self) -> bool {
-        /*! True if this scope has cleanups that need unwinding */
 
         self.cached_landing_pad.is_some() ||
             self.cleanups.iter().any(|c| c.must_unwind())
     }
 
+    /// Returns a suitable name to use for the basic block that handles this cleanup scope
     fn block_name(&self, prefix: &str) -> String {
-        /*!
-         * Returns a suitable name to use for the basic block that
-         * handles this cleanup scope
-         */
-
         match self.kind {
             CustomScopeKind => format!("{}_custom_", prefix),
             AstScopeKind(id) => format!("{}_ast_{}_", prefix, id),
@@ -930,14 +848,10 @@ impl<'blk, 'tcx> CleanupScopeKind<'blk, 'tcx> {
         }
     }
 
+    /// If this is a loop scope with id `id`, return the early exit block `exit`, else `None`
     fn early_exit_block(&self,
                         id: ast::NodeId,
                         exit: uint) -> Option<BasicBlockRef> {
-        /*!
-         * If this is a loop scope with id `id`, return the early
-         * exit block `exit`, else `None`
-         */
-
         match *self {
             LoopScopeKind(i, ref exits) if id == i => Some(exits[exit].llbb),
             _ => None,
diff --git a/src/librustc_trans/trans/closure.rs b/src/librustc_trans/trans/closure.rs
index ca955975dfb30..2f82b8286c2d5 100644
--- a/src/librustc_trans/trans/closure.rs
+++ b/src/librustc_trans/trans/closure.rs
@@ -386,6 +386,15 @@ impl<'a, 'tcx> ClosureEnv<'a, 'tcx> {
     }
 }
 
+/// Translates the body of a closure expression.
+///
+/// - `store`
+/// - `decl`
+/// - `body`
+/// - `id`: The id of the closure expression.
+/// - `cap_clause`: information about captured variables, if any.
+/// - `dest`: where to write the closure value, which must be a
+///   (fn ptr, env) pair
 pub fn trans_expr_fn<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
                                  store: ty::TraitStore,
                                  decl: &ast::FnDecl,
@@ -393,19 +402,6 @@ pub fn trans_expr_fn<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
                                  id: ast::NodeId,
                                  dest: expr::Dest)
                                  -> Block<'blk, 'tcx> {
-    /*!
-     *
-     * Translates the body of a closure expression.
-     *
-     * - `store`
-     * - `decl`
-     * - `body`
-     * - `id`: The id of the closure expression.
-     * - `cap_clause`: information about captured variables, if any.
-     * - `dest`: where to write the closure value, which must be a
-         (fn ptr, env) pair
-     */
-
     let _icx = push_ctxt("closure::trans_expr_fn");
 
     let dest_addr = match dest {
diff --git a/src/librustc_trans/trans/common.rs b/src/librustc_trans/trans/common.rs
index 235805a7c8308..febb33f6c54af 100644
--- a/src/librustc_trans/trans/common.rs
+++ b/src/librustc_trans/trans/common.rs
@@ -95,26 +95,19 @@ pub fn type_is_immediate<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -
     }
 }
 
+/// Identify types which have size zero at runtime.
 pub fn type_is_zero_size<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> bool {
-    /*!
-     * Identify types which have size zero at runtime.
-     */
-
     use trans::machine::llsize_of_alloc;
     use trans::type_of::sizing_type_of;
     let llty = sizing_type_of(ccx, ty);
     llsize_of_alloc(ccx, llty) == 0
 }
 
+/// Identifies types which we declare to be equivalent to `void` in C for the purpose of function
+/// return types. These are `()`, bot, and uninhabited enums. Note that all such types are also
+/// zero-size, but not all zero-size types use a `void` return type (in order to aid with C ABI
+/// compatibility).
 pub fn return_type_is_void(ccx: &CrateContext, ty: Ty) -> bool {
-    /*!
-     * Identifies types which we declare to be equivalent to `void`
-     * in C for the purpose of function return types. These are
-     * `()`, bot, and uninhabited enums. Note that all such types
-     * are also zero-size, but not all zero-size types use a `void`
-     * return type (in order to aid with C ABI compatibility).
-     */
-
     ty::type_is_nil(ty) || ty::type_is_empty(ccx.tcx(), ty)
 }
 
@@ -768,19 +761,14 @@ pub fn expr_ty_adjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ex: &ast::Expr) -> T
     monomorphize_type(bcx, ty::expr_ty_adjusted(bcx.tcx(), ex))
 }
 
+/// Attempts to resolve an obligation. The result is a shallow vtable resolution -- meaning that we
+/// do not (necessarily) resolve all nested obligations on the impl. Note that type check should
+/// guarantee to us that all nested obligations *could be* resolved if we wanted to.
 pub fn fulfill_obligation<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
                                     span: Span,
                                     trait_ref: Rc<ty::TraitRef<'tcx>>)
                                     -> traits::Vtable<'tcx, ()>
 {
-    /*!
-     * Attempts to resolve an obligation. The result is a shallow
-     * vtable resolution -- meaning that we do not (necessarily) resolve
-     * all nested obligations on the impl. Note that type check should
-     * guarantee to us that all nested obligations *could be* resolved
-     * if we wanted to.
-     */
-
     let tcx = ccx.tcx();
 
     // Remove any references to regions; this helps improve caching.
diff --git a/src/librustc_trans/trans/datum.rs b/src/librustc_trans/trans/datum.rs
index 354a607220715..22f030be3d653 100644
--- a/src/librustc_trans/trans/datum.rs
+++ b/src/librustc_trans/trans/datum.rs
@@ -8,10 +8,8 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-/*!
- * See the section on datums in `doc.rs` for an overview of what
- * Datums are and how they are intended to be used.
- */
+//! See the section on datums in `doc.rs` for an overview of what Datums are and how they are
+//! intended to be used.
 
 pub use self::Expr::*;
 pub use self::RvalueMode::*;
@@ -107,6 +105,10 @@ pub fn immediate_rvalue_bcx<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
 }
 
 
+/// Allocates temporary space on the stack using alloca() and returns a by-ref Datum pointing to
+/// it. The memory will be dropped upon exit from `scope`. The callback `populate` should
+/// initialize the memory. If `zero` is true, the space will be zeroed when it is allocated; this
+/// is not necessary unless `bcx` does not dominate the end of `scope`.
 pub fn lvalue_scratch_datum<'blk, 'tcx, A>(bcx: Block<'blk, 'tcx>,
                                            ty: Ty<'tcx>,
                                            name: &str,
@@ -116,15 +118,6 @@ pub fn lvalue_scratch_datum<'blk, 'tcx, A>(bcx: Block<'blk, 'tcx>,
                                            populate: |A, Block<'blk, 'tcx>, ValueRef|
                                                       -> Block<'blk, 'tcx>)
                                           -> DatumBlock<'blk, 'tcx, Lvalue> {
-    /*!
-     * Allocates temporary space on the stack using alloca() and
-     * returns a by-ref Datum pointing to it. The memory will be
-     * dropped upon exit from `scope`. The callback `populate` should
-     * initialize the memory. If `zero` is true, the space will be
-     * zeroed when it is allocated; this is not necessary unless `bcx`
-     * does not dominate the end of `scope`.
-     */
-
     let scratch = if zero {
         alloca_zeroed(bcx, ty, name)
     } else {
@@ -140,33 +133,24 @@ pub fn lvalue_scratch_datum<'blk, 'tcx, A>(bcx: Block<'blk, 'tcx>,
     DatumBlock::new(bcx, Datum::new(scratch, ty, Lvalue))
 }
 
+/// Allocates temporary space on the stack using alloca() and returns a by-ref Datum pointing to
+/// it.  If `zero` is true, the space will be zeroed when it is allocated; this is normally not
+/// necessary, but in the case of automatic rooting in match statements it is possible to have
+/// temporaries that may not get initialized if a certain arm is not taken, so we must zero them.
+/// You must arrange any cleanups etc yourself!
 pub fn rvalue_scratch_datum<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
                                         ty: Ty<'tcx>,
                                         name: &str)
                                         -> Datum<'tcx, Rvalue> {
-    /*!
-     * Allocates temporary space on the stack using alloca() and
-     * returns a by-ref Datum pointing to it.  If `zero` is true, the
-     * space will be zeroed when it is allocated; this is normally not
-     * necessary, but in the case of automatic rooting in match
-     * statements it is possible to have temporaries that may not get
-     * initialized if a certain arm is not taken, so we must zero
-     * them. You must arrange any cleanups etc yourself!
-     */
-
     let llty = type_of::type_of(bcx.ccx(), ty);
     let scratch = alloca(bcx, llty, name);
     Datum::new(scratch, ty, Rvalue::new(ByRef))
 }
 
+/// Indicates the "appropriate" mode for this value, which is either by ref or by value, depending
+/// on whether type is immediate or not.
 pub fn appropriate_rvalue_mode<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
                                          ty: Ty<'tcx>) -> RvalueMode {
-    /*!
-     * Indicates the "appropriate" mode for this value,
-     * which is either by ref or by value, depending
-     * on whether type is immediate or not.
-     */
-
     if type_is_immediate(ccx, ty) {
         ByValue
     } else {
@@ -234,17 +218,13 @@ impl KindOps for Rvalue {
 }
 
 impl KindOps for Lvalue {
+    /// If an lvalue is moved, we must zero out the memory in which it resides so as to cancel
+    /// cleanup. If an @T lvalue is copied, we must increment the reference count.
     fn post_store<'blk, 'tcx>(&self,
                               bcx: Block<'blk, 'tcx>,
                               val: ValueRef,
                               ty: Ty<'tcx>)
                               -> Block<'blk, 'tcx> {
-        /*!
-         * If an lvalue is moved, we must zero out the memory in which
-         * it resides so as to cancel cleanup. If an @T lvalue is
-         * copied, we must increment the reference count.
-         */
-
         if ty::type_needs_drop(bcx.tcx(), ty) {
             // cancel cleanup of affine values by zeroing out
             let () = zero_mem(bcx, val, ty);
@@ -288,31 +268,24 @@ impl KindOps for Expr {
 }
 
 impl<'tcx> Datum<'tcx, Rvalue> {
+    /// Schedules a cleanup for this datum in the given scope. That means that this datum is no
+    /// longer an rvalue datum; hence, this function consumes the datum and returns the contained
+    /// ValueRef.
     pub fn add_clean<'a>(self,
                          fcx: &FunctionContext<'a, 'tcx>,
                          scope: cleanup::ScopeId)
                          -> ValueRef {
-        /*!
-         * Schedules a cleanup for this datum in the given scope.
-         * That means that this datum is no longer an rvalue datum;
-         * hence, this function consumes the datum and returns the
-         * contained ValueRef.
-         */
-
         add_rvalue_clean(self.kind.mode, fcx, scope, self.val, self.ty);
         self.val
     }
 
+    /// Returns an lvalue datum (that is, a by ref datum with cleanup scheduled). If `self` is not
+    /// already an lvalue, cleanup will be scheduled in the temporary scope for `expr_id`.
     pub fn to_lvalue_datum_in_scope<'blk>(self,
                                           bcx: Block<'blk, 'tcx>,
                                           name: &str,
                                           scope: cleanup::ScopeId)
                                           -> DatumBlock<'blk, 'tcx, Lvalue> {
-        /*!
-         * Returns an lvalue datum (that is, a by ref datum with
-         * cleanup scheduled). If `self` is not already an lvalue,
-         * cleanup will be scheduled in the temporary scope for `expr_id`.
-         */
         let fcx = bcx.fcx;
 
         match self.kind.mode {
@@ -381,22 +354,16 @@ impl<'tcx> Datum<'tcx, Expr> {
         }
     }
 
+    /// Asserts that this datum *is* an lvalue and returns it.
     #[allow(dead_code)] // potentially useful
     pub fn assert_lvalue(self, bcx: Block) -> Datum<'tcx, Lvalue> {
-        /*!
-         * Asserts that this datum *is* an lvalue and returns it.
-         */
-
         self.match_kind(
             |d| d,
             |_| bcx.sess().bug("assert_lvalue given rvalue"))
     }
 
+    /// Asserts that this datum *is* an lvalue and returns it.
     pub fn assert_rvalue(self, bcx: Block) -> Datum<'tcx, Rvalue> {
-        /*!
-         * Asserts that this datum *is* an lvalue and returns it.
-         */
-
         self.match_kind(
             |_| bcx.sess().bug("assert_rvalue given lvalue"),
             |r| r)
@@ -418,14 +385,11 @@ impl<'tcx> Datum<'tcx, Expr> {
         }
     }
 
+    /// Arranges cleanup for `self` if it is an rvalue. Use when you are done working with a value
+    /// that may need drop.
     pub fn add_clean_if_rvalue<'blk>(self,
                                      bcx: Block<'blk, 'tcx>,
                                      expr_id: ast::NodeId) {
-        /*!
-         * Arranges cleanup for `self` if it is an rvalue. Use when
-         * you are done working with a value that may need drop.
-         */
-
         self.match_kind(
             |_| { /* Nothing to do, cleanup already arranged */ },
             |r| {
@@ -434,16 +398,12 @@ impl<'tcx> Datum<'tcx, Expr> {
             })
     }
 
+    /// Ensures that `self` will get cleaned up, if it is not an lvalue already.
     pub fn clean<'blk>(self,
                        bcx: Block<'blk, 'tcx>,
                        name: &'static str,
                        expr_id: ast::NodeId)
                        -> Block<'blk, 'tcx> {
-        /*!
-         * Ensures that `self` will get cleaned up, if it is not an lvalue
-         * already.
-         */
-
         self.to_lvalue_datum(bcx, name, expr_id).bcx
     }
 
@@ -464,15 +424,11 @@ impl<'tcx> Datum<'tcx, Expr> {
             })
     }
 
+    /// Ensures that we have an rvalue datum (that is, a datum with no cleanup scheduled).
     pub fn to_rvalue_datum<'blk>(self,
                                  bcx: Block<'blk, 'tcx>,
                                  name: &'static str)
                                  -> DatumBlock<'blk, 'tcx, Rvalue> {
-        /*!
-         * Ensures that we have an rvalue datum (that is, a datum with
-         * no cleanup scheduled).
-         */
-
         self.match_kind(
             |l| {
                 let mut bcx = bcx;
@@ -501,12 +457,9 @@ impl<'tcx> Datum<'tcx, Expr> {
  * from an array.
  */
 impl<'tcx> Datum<'tcx, Lvalue> {
+    /// Converts a datum into a by-ref value. The datum type must be one which is always passed by
+    /// reference.
     pub fn to_llref(self) -> ValueRef {
-        /*!
-         * Converts a datum into a by-ref value. The datum type must
-         * be one which is always passed by reference.
-         */
-
         self.val
     }
 
@@ -555,40 +508,30 @@ impl<'tcx, K: KindOps + fmt::Show> Datum<'tcx, K> {
         Datum { val: val, ty: ty, kind: kind.to_expr_kind() }
     }
 
+    /// Moves or copies this value into a new home, as appropriate depending on the type of the
+    /// datum. This method consumes the datum, since it would be incorrect to go on using the datum
+    /// if the value represented is affine (and hence the value is moved).
     pub fn store_to<'blk>(self,
                           bcx: Block<'blk, 'tcx>,
                           dst: ValueRef)
                           -> Block<'blk, 'tcx> {
-        /*!
-         * Moves or copies this value into a new home, as appropriate
-         * depending on the type of the datum. This method consumes
-         * the datum, since it would be incorrect to go on using the
-         * datum if the value represented is affine (and hence the value
-         * is moved).
-         */
-
         self.shallow_copy_raw(bcx, dst);
 
         self.kind.post_store(bcx, self.val, self.ty)
     }
 
+    /// Helper function that performs a shallow copy of this value into `dst`, which should be a
+    /// pointer to a memory location suitable for `self.ty`. `dst` should contain uninitialized
+    /// memory (either newly allocated, zeroed, or dropped).
+    ///
+    /// This function is private to datums because it leaves memory in an unstable state, where the
+    /// source value has been copied but not zeroed. Public methods are `store_to` (if you no
+    /// longer need the source value) or `shallow_copy` (if you wish the source value to remain
+    /// valid).
     fn shallow_copy_raw<'blk>(&self,
                               bcx: Block<'blk, 'tcx>,
                               dst: ValueRef)
                               -> Block<'blk, 'tcx> {
-        /*!
-         * Helper function that performs a shallow copy of this value
-         * into `dst`, which should be a pointer to a memory location
-         * suitable for `self.ty`. `dst` should contain uninitialized
-         * memory (either newly allocated, zeroed, or dropped).
-         *
-         * This function is private to datums because it leaves memory
-         * in an unstable state, where the source value has been
-         * copied but not zeroed. Public methods are `store_to`
-         * (if you no longer need the source value) or `shallow_copy`
-         * (if you wish the source value to remain valid).
-         */
-
         let _icx = push_ctxt("copy_to_no_check");
 
         if type_is_zero_size(bcx.ccx(), self.ty) {
@@ -604,17 +547,13 @@ impl<'tcx, K: KindOps + fmt::Show> Datum<'tcx, K> {
         return bcx;
     }
 
+    /// Copies the value into a new location. This function always preserves the existing datum as
+    /// a valid value. Therefore, it does not consume `self` and, also, cannot be applied to affine
+    /// values (since they must never be duplicated).
     pub fn shallow_copy<'blk>(&self,
                               bcx: Block<'blk, 'tcx>,
                               dst: ValueRef)
                               -> Block<'blk, 'tcx> {
-        /*!
-         * Copies the value into a new location. This function always
-         * preserves the existing datum as a valid value. Therefore,
-         * it does not consume `self` and, also, cannot be applied to
-         * affine values (since they must never be duplicated).
-         */
-
         assert!(!ty::type_moves_by_default(bcx.tcx(), self.ty));
         self.shallow_copy_raw(bcx, dst)
     }
@@ -627,23 +566,17 @@ impl<'tcx, K: KindOps + fmt::Show> Datum<'tcx, K> {
                 self.kind)
     }
 
+    //! See the `appropriate_rvalue_mode()` function
     pub fn appropriate_rvalue_mode<'a>(&self, ccx: &CrateContext<'a, 'tcx>)
                                        -> RvalueMode {
-        /*! See the `appropriate_rvalue_mode()` function */
-
         appropriate_rvalue_mode(ccx, self.ty)
     }
 
+    /// Converts `self` into a by-value `ValueRef`. Consumes this datum (i.e., absolves you of
+    /// responsibility to cleanup the value). For this to work, the value must be something
+    /// scalar-ish (like an int or a pointer) which (1) does not require drop glue and (2) is
+    /// naturally passed around by value, and not by reference.
     pub fn to_llscalarish<'blk>(self, bcx: Block<'blk, 'tcx>) -> ValueRef {
-        /*!
-         * Converts `self` into a by-value `ValueRef`. Consumes this
-         * datum (i.e., absolves you of responsibility to cleanup the
-         * value). For this to work, the value must be something
-         * scalar-ish (like an int or a pointer) which (1) does not
-         * require drop glue and (2) is naturally passed around by
-         * value, and not by reference.
-         */
-
         assert!(!ty::type_needs_drop(bcx.tcx(), self.ty));
         assert!(self.appropriate_rvalue_mode(bcx.ccx()) == ByValue);
         if self.kind.is_by_ref() {
diff --git a/src/librustc_trans/trans/debuginfo.rs b/src/librustc_trans/trans/debuginfo.rs
index a3472e194cf58..c35de3209c61f 100644
--- a/src/librustc_trans/trans/debuginfo.rs
+++ b/src/librustc_trans/trans/debuginfo.rs
@@ -8,181 +8,180 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-/*!
-# Debug Info Module
-
-This module serves the purpose of generating debug symbols. We use LLVM's
-[source level debugging](http://llvm.org/docs/SourceLevelDebugging.html)
-features for generating the debug information. The general principle is this:
-
-Given the right metadata in the LLVM IR, the LLVM code generator is able to
-create DWARF debug symbols for the given code. The
-[metadata](http://llvm.org/docs/LangRef.html#metadata-type) is structured much
-like DWARF *debugging information entries* (DIE), representing type information
-such as datatype layout, function signatures, block layout, variable location
-and scope information, etc. It is the purpose of this module to generate correct
-metadata and insert it into the LLVM IR.
-
-As the exact format of metadata trees may change between different LLVM
-versions, we now use LLVM
-[DIBuilder](http://llvm.org/docs/doxygen/html/classllvm_1_1DIBuilder.html) to
-create metadata where possible. This will hopefully ease the adaption of this
-module to future LLVM versions.
-
-The public API of the module is a set of functions that will insert the correct
-metadata into the LLVM IR when called with the right parameters. The module is
-thus driven from an outside client with functions like
-`debuginfo::create_local_var_metadata(bcx: block, local: &ast::local)`.
-
-Internally the module will try to reuse already created metadata by utilizing a
-cache. The way to get a shared metadata node when needed is thus to just call
-the corresponding function in this module:
-
-    let file_metadata = file_metadata(crate_context, path);
-
-The function will take care of probing the cache for an existing node for that
-exact file path.
-
-All private state used by the module is stored within either the
-CrateDebugContext struct (owned by the CrateContext) or the FunctionDebugContext
-(owned by the FunctionContext).
-
-This file consists of three conceptual sections:
-1. The public interface of the module
-2. Module-internal metadata creation functions
-3. Minor utility functions
-
-
-## Recursive Types
-
-Some kinds of types, such as structs and enums can be recursive. That means that
-the type definition of some type X refers to some other type which in turn
-(transitively) refers to X. This introduces cycles into the type referral graph.
-A naive algorithm doing an on-demand, depth-first traversal of this graph when
-describing types, can get trapped in an endless loop when it reaches such a
-cycle.
-
-For example, the following simple type for a singly-linked list...
-
-```
-struct List {
-    value: int,
-    tail: Option<Box<List>>,
-}
-```
-
-will generate the following callstack with a naive DFS algorithm:
-
-```
-describe(t = List)
-  describe(t = int)
-  describe(t = Option<Box<List>>)
-    describe(t = Box<List>)
-      describe(t = List) // at the beginning again...
-      ...
-```
-
-To break cycles like these, we use "forward declarations". That is, when the
-algorithm encounters a possibly recursive type (any struct or enum), it
-immediately creates a type description node and inserts it into the cache
-*before* describing the members of the type. This type description is just a
-stub (as type members are not described and added to it yet) but it allows the
-algorithm to already refer to the type. After the stub is inserted into the
-cache, the algorithm continues as before. If it now encounters a recursive
-reference, it will hit the cache and does not try to describe the type anew.
-
-This behaviour is encapsulated in the 'RecursiveTypeDescription' enum, which
-represents a kind of continuation, storing all state needed to continue
-traversal at the type members after the type has been registered with the cache.
-(This implementation approach might be a tad over-engineered and may change in
-the future)
-
-
-## Source Locations and Line Information
-
-In addition to data type descriptions the debugging information must also allow
-to map machine code locations back to source code locations in order to be useful.
-This functionality is also handled in this module. The following functions allow
-to control source mappings:
-
-+ set_source_location()
-+ clear_source_location()
-+ start_emitting_source_locations()
-
-`set_source_location()` allows to set the current source location. All IR
-instructions created after a call to this function will be linked to the given
-source location, until another location is specified with
-`set_source_location()` or the source location is cleared with
-`clear_source_location()`. In the later case, subsequent IR instruction will not
-be linked to any source location. As you can see, this is a stateful API
-(mimicking the one in LLVM), so be careful with source locations set by previous
-calls. It's probably best to not rely on any specific state being present at a
-given point in code.
-
-One topic that deserves some extra attention is *function prologues*. At the
-beginning of a function's machine code there are typically a few instructions
-for loading argument values into allocas and checking if there's enough stack
-space for the function to execute. This *prologue* is not visible in the source
-code and LLVM puts a special PROLOGUE END marker into the line table at the
-first non-prologue instruction of the function. In order to find out where the
-prologue ends, LLVM looks for the first instruction in the function body that is
-linked to a source location. So, when generating prologue instructions we have
-to make sure that we don't emit source location information until the 'real'
-function body begins. For this reason, source location emission is disabled by
-default for any new function being translated and is only activated after a call
-to the third function from the list above, `start_emitting_source_locations()`.
-This function should be called right before regularly starting to translate the
-top-level block of the given function.
-
-There is one exception to the above rule: `llvm.dbg.declare` instruction must be
-linked to the source location of the variable being declared. For function
-parameters these `llvm.dbg.declare` instructions typically occur in the middle
-of the prologue, however, they are ignored by LLVM's prologue detection. The
-`create_argument_metadata()` and related functions take care of linking the
-`llvm.dbg.declare` instructions to the correct source locations even while
-source location emission is still disabled, so there is no need to do anything
-special with source location handling here.
-
-## Unique Type Identification
-
-In order for link-time optimization to work properly, LLVM needs a unique type
-identifier that tells it across compilation units which types are the same as
-others. This type identifier is created by TypeMap::get_unique_type_id_of_type()
-using the following algorithm:
-
-(1) Primitive types have their name as ID
-(2) Structs, enums and traits have a multipart identifier
-
-    (1) The first part is the SVH (strict version hash) of the crate they were
-        originally defined in
-
-    (2) The second part is the ast::NodeId of the definition in their original
-        crate
-
-    (3) The final part is a concatenation of the type IDs of their concrete type
-        arguments if they are generic types.
-
-(3) Tuple-, pointer and function types are structurally identified, which means
-    that they are equivalent if their component types are equivalent (i.e. (int,
-    int) is the same regardless in which crate it is used).
-
-This algorithm also provides a stable ID for types that are defined in one crate
-but instantiated from metadata within another crate. We just have to take care
-to always map crate and node IDs back to the original crate context.
-
-As a side-effect these unique type IDs also help to solve a problem arising from
-lifetime parameters. Since lifetime parameters are completely omitted in
-debuginfo, more than one `Ty` instance may map to the same debuginfo type
-metadata, that is, some struct `Struct<'a>` may have N instantiations with
-different concrete substitutions for `'a`, and thus there will be N `Ty`
-instances for the type `Struct<'a>` even though it is not generic otherwise.
-Unfortunately this means that we cannot use `ty::type_id()` as cheap identifier
-for type metadata---we have done this in the past, but it led to unnecessary
-metadata duplication in the best case and LLVM assertions in the worst. However,
-the unique type ID as described above *can* be used as identifier. Since it is
-comparatively expensive to construct, though, `ty::type_id()` is still used
-additionally as an optimization for cases where the exact same type has been
-seen before (which is most of the time). */
+//! # Debug Info Module
+//!
+//! This module serves the purpose of generating debug symbols. We use LLVM's
+//! [source level debugging](http://llvm.org/docs/SourceLevelDebugging.html)
+//! features for generating the debug information. The general principle is this:
+//!
+//! Given the right metadata in the LLVM IR, the LLVM code generator is able to
+//! create DWARF debug symbols for the given code. The
+//! [metadata](http://llvm.org/docs/LangRef.html#metadata-type) is structured much
+//! like DWARF *debugging information entries* (DIE), representing type information
+//! such as datatype layout, function signatures, block layout, variable location
+//! and scope information, etc. It is the purpose of this module to generate correct
+//! metadata and insert it into the LLVM IR.
+//!
+//! As the exact format of metadata trees may change between different LLVM
+//! versions, we now use LLVM
+//! [DIBuilder](http://llvm.org/docs/doxygen/html/classllvm_1_1DIBuilder.html) to
+//! create metadata where possible. This will hopefully ease the adaption of this
+//! module to future LLVM versions.
+//!
+//! The public API of the module is a set of functions that will insert the correct
+//! metadata into the LLVM IR when called with the right parameters. The module is
+//! thus driven from an outside client with functions like
+//! `debuginfo::create_local_var_metadata(bcx: block, local: &ast::local)`.
+//!
+//! Internally the module will try to reuse already created metadata by utilizing a
+//! cache. The way to get a shared metadata node when needed is thus to just call
+//! the corresponding function in this module:
+//!
+//!     let file_metadata = file_metadata(crate_context, path);
+//!
+//! The function will take care of probing the cache for an existing node for that
+//! exact file path.
+//!
+//! All private state used by the module is stored within either the
+//! CrateDebugContext struct (owned by the CrateContext) or the FunctionDebugContext
+//! (owned by the FunctionContext).
+//!
+//! This file consists of three conceptual sections:
+//! 1. The public interface of the module
+//! 2. Module-internal metadata creation functions
+//! 3. Minor utility functions
+//!
+//!
+//! ## Recursive Types
+//!
+//! Some kinds of types, such as structs and enums can be recursive. That means that
+//! the type definition of some type X refers to some other type which in turn
+//! (transitively) refers to X. This introduces cycles into the type referral graph.
+//! A naive algorithm doing an on-demand, depth-first traversal of this graph when
+//! describing types, can get trapped in an endless loop when it reaches such a
+//! cycle.
+//!
+//! For example, the following simple type for a singly-linked list...
+//!
+//! ```
+//! struct List {
+//!     value: int,
+//!     tail: Option<Box<List>>,
+//! }
+//! ```
+//!
+//! will generate the following callstack with a naive DFS algorithm:
+//!
+//! ```
+//! describe(t = List)
+//!   describe(t = int)
+//!   describe(t = Option<Box<List>>)
+//!     describe(t = Box<List>)
+//!       describe(t = List) // at the beginning again...
+//!       ...
+//! ```
+//!
+//! To break cycles like these, we use "forward declarations". That is, when the
+//! algorithm encounters a possibly recursive type (any struct or enum), it
+//! immediately creates a type description node and inserts it into the cache
+//! *before* describing the members of the type. This type description is just a
+//! stub (as type members are not described and added to it yet) but it allows the
+//! algorithm to already refer to the type. After the stub is inserted into the
+//! cache, the algorithm continues as before. If it now encounters a recursive
+//! reference, it will hit the cache and does not try to describe the type anew.
+//!
+//! This behaviour is encapsulated in the 'RecursiveTypeDescription' enum, which
+//! represents a kind of continuation, storing all state needed to continue
+//! traversal at the type members after the type has been registered with the cache.
+//! (This implementation approach might be a tad over-engineered and may change in
+//! the future)
+//!
+//!
+//! ## Source Locations and Line Information
+//!
+//! In addition to data type descriptions the debugging information must also allow
+//! to map machine code locations back to source code locations in order to be useful.
+//! This functionality is also handled in this module. The following functions allow
+//! to control source mappings:
+//!
+//! + set_source_location()
+//! + clear_source_location()
+//! + start_emitting_source_locations()
+//!
+//! `set_source_location()` allows to set the current source location. All IR
+//! instructions created after a call to this function will be linked to the given
+//! source location, until another location is specified with
+//! `set_source_location()` or the source location is cleared with
+//! `clear_source_location()`. In the later case, subsequent IR instruction will not
+//! be linked to any source location. As you can see, this is a stateful API
+//! (mimicking the one in LLVM), so be careful with source locations set by previous
+//! calls. It's probably best to not rely on any specific state being present at a
+//! given point in code.
+//!
+//! One topic that deserves some extra attention is *function prologues*. At the
+//! beginning of a function's machine code there are typically a few instructions
+//! for loading argument values into allocas and checking if there's enough stack
+//! space for the function to execute. This *prologue* is not visible in the source
+//! code and LLVM puts a special PROLOGUE END marker into the line table at the
+//! first non-prologue instruction of the function. In order to find out where the
+//! prologue ends, LLVM looks for the first instruction in the function body that is
+//! linked to a source location. So, when generating prologue instructions we have
+//! to make sure that we don't emit source location information until the 'real'
+//! function body begins. For this reason, source location emission is disabled by
+//! default for any new function being translated and is only activated after a call
+//! to the third function from the list above, `start_emitting_source_locations()`.
+//! This function should be called right before regularly starting to translate the
+//! top-level block of the given function.
+//!
+//! There is one exception to the above rule: `llvm.dbg.declare` instruction must be
+//! linked to the source location of the variable being declared. For function
+//! parameters these `llvm.dbg.declare` instructions typically occur in the middle
+//! of the prologue, however, they are ignored by LLVM's prologue detection. The
+//! `create_argument_metadata()` and related functions take care of linking the
+//! `llvm.dbg.declare` instructions to the correct source locations even while
+//! source location emission is still disabled, so there is no need to do anything
+//! special with source location handling here.
+//!
+//! ## Unique Type Identification
+//!
+//! In order for link-time optimization to work properly, LLVM needs a unique type
+//! identifier that tells it across compilation units which types are the same as
+//! others. This type identifier is created by TypeMap::get_unique_type_id_of_type()
+//! using the following algorithm:
+//!
+//! (1) Primitive types have their name as ID
+//! (2) Structs, enums and traits have a multipart identifier
+//!
+//!     (1) The first part is the SVH (strict version hash) of the crate they were
+//!         originally defined in
+//!
+//!     (2) The second part is the ast::NodeId of the definition in their original
+//!         crate
+//!
+//!     (3) The final part is a concatenation of the type IDs of their concrete type
+//!         arguments if they are generic types.
+//!
+//! (3) Tuple-, pointer and function types are structurally identified, which means
+//!     that they are equivalent if their component types are equivalent (i.e. (int,
+//!     int) is the same regardless in which crate it is used).
+//!
+//! This algorithm also provides a stable ID for types that are defined in one crate
+//! but instantiated from metadata within another crate. We just have to take care
+//! to always map crate and node IDs back to the original crate context.
+//!
+//! As a side-effect these unique type IDs also help to solve a problem arising from
+//! lifetime parameters. Since lifetime parameters are completely omitted in
+//! debuginfo, more than one `Ty` instance may map to the same debuginfo type
+//! metadata, that is, some struct `Struct<'a>` may have N instantiations with
+//! different concrete substitutions for `'a`, and thus there will be N `Ty`
+//! instances for the type `Struct<'a>` even though it is not generic otherwise.
+//! Unfortunately this means that we cannot use `ty::type_id()` as cheap identifier
+//! for type metadata---we have done this in the past, but it led to unnecessary
+//! metadata duplication in the best case and LLVM assertions in the worst. However,
+//! the unique type ID as described above *can* be used as identifier. Since it is
+//! comparatively expensive to construct, though, `ty::type_id()` is still used
+//! additionally as an optimization for cases where the exact same type has been
+//! seen before (which is most of the time).
 use self::FunctionDebugContextRepr::*;
 use self::VariableAccess::*;
 use self::VariableKind::*;
diff --git a/src/librustc_trans/trans/doc.rs b/src/librustc_trans/trans/doc.rs
index a5281e582f136..c3ab8986372ad 100644
--- a/src/librustc_trans/trans/doc.rs
+++ b/src/librustc_trans/trans/doc.rs
@@ -8,230 +8,226 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-/*!
-
-# Documentation for the trans module
-
-This module contains high-level summaries of how the various modules
-in trans work. It is a work in progress. For detailed comments,
-naturally, you can refer to the individual modules themselves.
-
-## The Expr module
-
-The expr module handles translation of expressions. The most general
-translation routine is `trans()`, which will translate an expression
-into a datum. `trans_into()` is also available, which will translate
-an expression and write the result directly into memory, sometimes
-avoiding the need for a temporary stack slot. Finally,
-`trans_to_lvalue()` is available if you'd like to ensure that the
-result has cleanup scheduled.
-
-Internally, each of these functions dispatches to various other
-expression functions depending on the kind of expression. We divide
-up expressions into:
-
-- **Datum expressions:** Those that most naturally yield values.
-  Examples would be `22`, `box x`, or `a + b` (when not overloaded).
-- **DPS expressions:** Those that most naturally write into a location
-  in memory. Examples would be `foo()` or `Point { x: 3, y: 4 }`.
-- **Statement expressions:** That that do not generate a meaningful
-  result. Examples would be `while { ... }` or `return 44`.
-
-## The Datum module
-
-A `Datum` encapsulates the result of evaluating a Rust expression.  It
-contains a `ValueRef` indicating the result, a `Ty` describing
-the Rust type, but also a *kind*. The kind indicates whether the datum
-has cleanup scheduled (lvalue) or not (rvalue) and -- in the case of
-rvalues -- whether or not the value is "by ref" or "by value".
-
-The datum API is designed to try and help you avoid memory errors like
-forgetting to arrange cleanup or duplicating a value. The type of the
-datum incorporates the kind, and thus reflects whether it has cleanup
-scheduled:
-
-- `Datum<Lvalue>` -- by ref, cleanup scheduled
-- `Datum<Rvalue>` -- by value or by ref, no cleanup scheduled
-- `Datum<Expr>` -- either `Datum<Lvalue>` or `Datum<Rvalue>`
-
-Rvalue and expr datums are noncopyable, and most of the methods on
-datums consume the datum itself (with some notable exceptions). This
-reflects the fact that datums may represent affine values which ought
-to be consumed exactly once, and if you were to try to (for example)
-store an affine value multiple times, you would be duplicating it,
-which would certainly be a bug.
-
-Some of the datum methods, however, are designed to work only on
-copyable values such as ints or pointers. Those methods may borrow the
-datum (`&self`) rather than consume it, but they always include
-assertions on the type of the value represented to check that this
-makes sense. An example is `shallow_copy()`, which duplicates
-a datum value.
-
-Translating an expression always yields a `Datum<Expr>` result, but
-the methods `to_[lr]value_datum()` can be used to coerce a
-`Datum<Expr>` into a `Datum<Lvalue>` or `Datum<Rvalue>` as
-needed. Coercing to an lvalue is fairly common, and generally occurs
-whenever it is necessary to inspect a value and pull out its
-subcomponents (for example, a match, or indexing expression). Coercing
-to an rvalue is more unusual; it occurs when moving values from place
-to place, such as in an assignment expression or parameter passing.
-
-### Lvalues in detail
-
-An lvalue datum is one for which cleanup has been scheduled. Lvalue
-datums are always located in memory, and thus the `ValueRef` for an
-LLVM value is always a pointer to the actual Rust value. This means
-that if the Datum has a Rust type of `int`, then the LLVM type of the
-`ValueRef` will be `int*` (pointer to int).
-
-Because lvalues already have cleanups scheduled, the memory must be
-zeroed to prevent the cleanup from taking place (presuming that the
-Rust type needs drop in the first place, otherwise it doesn't
-matter). The Datum code automatically performs this zeroing when the
-value is stored to a new location, for example.
-
-Lvalues usually result from evaluating lvalue expressions. For
-example, evaluating a local variable `x` yields an lvalue, as does a
-reference to a field like `x.f` or an index `x[i]`.
-
-Lvalue datums can also arise by *converting* an rvalue into an lvalue.
-This is done with the `to_lvalue_datum` method defined on
-`Datum<Expr>`. Basically this method just schedules cleanup if the
-datum is an rvalue, possibly storing the value into a stack slot first
-if needed. Converting rvalues into lvalues occurs in constructs like
-`&foo()` or `match foo() { ref x => ... }`, where the user is
-implicitly requesting a temporary.
-
-Somewhat surprisingly, not all lvalue expressions yield lvalue datums
-when trans'd. Ultimately the reason for this is to micro-optimize
-the resulting LLVM. For example, consider the following code:
-
-    fn foo() -> Box<int> { ... }
-    let x = *foo();
-
-The expression `*foo()` is an lvalue, but if you invoke `expr::trans`,
-it will return an rvalue datum. See `deref_once` in expr.rs for
-more details.
-
-### Rvalues in detail
-
-Rvalues datums are values with no cleanup scheduled. One must be
-careful with rvalue datums to ensure that cleanup is properly
-arranged, usually by converting to an lvalue datum or by invoking the
-`add_clean` method.
-
-### Scratch datums
-
-Sometimes you need some temporary scratch space.  The functions
-`[lr]value_scratch_datum()` can be used to get temporary stack
-space. As their name suggests, they yield lvalues and rvalues
-respectively. That is, the slot from `lvalue_scratch_datum` will have
-cleanup arranged, and the slot from `rvalue_scratch_datum` does not.
-
-## The Cleanup module
-
-The cleanup module tracks what values need to be cleaned up as scopes
-are exited, either via panic or just normal control flow. The basic
-idea is that the function context maintains a stack of cleanup scopes
-that are pushed/popped as we traverse the AST tree. There is typically
-at least one cleanup scope per AST node; some AST nodes may introduce
-additional temporary scopes.
-
-Cleanup items can be scheduled into any of the scopes on the stack.
-Typically, when a scope is popped, we will also generate the code for
-each of its cleanups at that time. This corresponds to a normal exit
-from a block (for example, an expression completing evaluation
-successfully without panic). However, it is also possible to pop a
-block *without* executing its cleanups; this is typically used to
-guard intermediate values that must be cleaned up on panic, but not
-if everything goes right. See the section on custom scopes below for
-more details.
-
-Cleanup scopes come in three kinds:
-- **AST scopes:** each AST node in a function body has a corresponding
-  AST scope. We push the AST scope when we start generate code for an AST
-  node and pop it once the AST node has been fully generated.
-- **Loop scopes:** loops have an additional cleanup scope. Cleanups are
-  never scheduled into loop scopes; instead, they are used to record the
-  basic blocks that we should branch to when a `continue` or `break` statement
-  is encountered.
-- **Custom scopes:** custom scopes are typically used to ensure cleanup
-  of intermediate values.
-
-### When to schedule cleanup
-
-Although the cleanup system is intended to *feel* fairly declarative,
-it's still important to time calls to `schedule_clean()` correctly.
-Basically, you should not schedule cleanup for memory until it has
-been initialized, because if an unwind should occur before the memory
-is fully initialized, then the cleanup will run and try to free or
-drop uninitialized memory. If the initialization itself produces
-byproducts that need to be freed, then you should use temporary custom
-scopes to ensure that those byproducts will get freed on unwind.  For
-example, an expression like `box foo()` will first allocate a box in the
-heap and then call `foo()` -- if `foo()` should panic, this box needs
-to be *shallowly* freed.
-
-### Long-distance jumps
-
-In addition to popping a scope, which corresponds to normal control
-flow exiting the scope, we may also *jump out* of a scope into some
-earlier scope on the stack. This can occur in response to a `return`,
-`break`, or `continue` statement, but also in response to panic. In
-any of these cases, we will generate a series of cleanup blocks for
-each of the scopes that is exited. So, if the stack contains scopes A
-... Z, and we break out of a loop whose corresponding cleanup scope is
-X, we would generate cleanup blocks for the cleanups in X, Y, and Z.
-After cleanup is done we would branch to the exit point for scope X.
-But if panic should occur, we would generate cleanups for all the
-scopes from A to Z and then resume the unwind process afterwards.
-
-To avoid generating tons of code, we cache the cleanup blocks that we
-create for breaks, returns, unwinds, and other jumps. Whenever a new
-cleanup is scheduled, though, we must clear these cached blocks. A
-possible improvement would be to keep the cached blocks but simply
-generate a new block which performs the additional cleanup and then
-branches to the existing cached blocks.
-
-### AST and loop cleanup scopes
-
-AST cleanup scopes are pushed when we begin and end processing an AST
-node. They are used to house cleanups related to rvalue temporary that
-get referenced (e.g., due to an expression like `&Foo()`). Whenever an
-AST scope is popped, we always trans all the cleanups, adding the cleanup
-code after the postdominator of the AST node.
-
-AST nodes that represent breakable loops also push a loop scope; the
-loop scope never has any actual cleanups, it's just used to point to
-the basic blocks where control should flow after a "continue" or
-"break" statement. Popping a loop scope never generates code.
-
-### Custom cleanup scopes
-
-Custom cleanup scopes are used for a variety of purposes. The most
-common though is to handle temporary byproducts, where cleanup only
-needs to occur on panic. The general strategy is to push a custom
-cleanup scope, schedule *shallow* cleanups into the custom scope, and
-then pop the custom scope (without transing the cleanups) when
-execution succeeds normally. This way the cleanups are only trans'd on
-unwind, and only up until the point where execution succeeded, at
-which time the complete value should be stored in an lvalue or some
-other place where normal cleanup applies.
-
-To spell it out, here is an example. Imagine an expression `box expr`.
-We would basically:
-
-1. Push a custom cleanup scope C.
-2. Allocate the box.
-3. Schedule a shallow free in the scope C.
-4. Trans `expr` into the box.
-5. Pop the scope C.
-6. Return the box as an rvalue.
-
-This way, if a panic occurs while transing `expr`, the custom
-cleanup scope C is pushed and hence the box will be freed. The trans
-code for `expr` itself is responsible for freeing any other byproducts
-that may be in play.
-
-*/
+//! # Documentation for the trans module
+//!
+//! This module contains high-level summaries of how the various modules
+//! in trans work. It is a work in progress. For detailed comments,
+//! naturally, you can refer to the individual modules themselves.
+//!
+//! ## The Expr module
+//!
+//! The expr module handles translation of expressions. The most general
+//! translation routine is `trans()`, which will translate an expression
+//! into a datum. `trans_into()` is also available, which will translate
+//! an expression and write the result directly into memory, sometimes
+//! avoiding the need for a temporary stack slot. Finally,
+//! `trans_to_lvalue()` is available if you'd like to ensure that the
+//! result has cleanup scheduled.
+//!
+//! Internally, each of these functions dispatches to various other
+//! expression functions depending on the kind of expression. We divide
+//! up expressions into:
+//!
+//! - **Datum expressions:** Those that most naturally yield values.
+//!   Examples would be `22`, `box x`, or `a + b` (when not overloaded).
+//! - **DPS expressions:** Those that most naturally write into a location
+//!   in memory. Examples would be `foo()` or `Point { x: 3, y: 4 }`.
+//! - **Statement expressions:** That that do not generate a meaningful
+//!   result. Examples would be `while { ... }` or `return 44`.
+//!
+//! ## The Datum module
+//!
+//! A `Datum` encapsulates the result of evaluating a Rust expression.  It
+//! contains a `ValueRef` indicating the result, a `Ty` describing
+//! the Rust type, but also a *kind*. The kind indicates whether the datum
+//! has cleanup scheduled (lvalue) or not (rvalue) and -- in the case of
+//! rvalues -- whether or not the value is "by ref" or "by value".
+//!
+//! The datum API is designed to try and help you avoid memory errors like
+//! forgetting to arrange cleanup or duplicating a value. The type of the
+//! datum incorporates the kind, and thus reflects whether it has cleanup
+//! scheduled:
+//!
+//! - `Datum<Lvalue>` -- by ref, cleanup scheduled
+//! - `Datum<Rvalue>` -- by value or by ref, no cleanup scheduled
+//! - `Datum<Expr>` -- either `Datum<Lvalue>` or `Datum<Rvalue>`
+//!
+//! Rvalue and expr datums are noncopyable, and most of the methods on
+//! datums consume the datum itself (with some notable exceptions). This
+//! reflects the fact that datums may represent affine values which ought
+//! to be consumed exactly once, and if you were to try to (for example)
+//! store an affine value multiple times, you would be duplicating it,
+//! which would certainly be a bug.
+//!
+//! Some of the datum methods, however, are designed to work only on
+//! copyable values such as ints or pointers. Those methods may borrow the
+//! datum (`&self`) rather than consume it, but they always include
+//! assertions on the type of the value represented to check that this
+//! makes sense. An example is `shallow_copy()`, which duplicates
+//! a datum value.
+//!
+//! Translating an expression always yields a `Datum<Expr>` result, but
+//! the methods `to_[lr]value_datum()` can be used to coerce a
+//! `Datum<Expr>` into a `Datum<Lvalue>` or `Datum<Rvalue>` as
+//! needed. Coercing to an lvalue is fairly common, and generally occurs
+//! whenever it is necessary to inspect a value and pull out its
+//! subcomponents (for example, a match, or indexing expression). Coercing
+//! to an rvalue is more unusual; it occurs when moving values from place
+//! to place, such as in an assignment expression or parameter passing.
+//!
+//! ### Lvalues in detail
+//!
+//! An lvalue datum is one for which cleanup has been scheduled. Lvalue
+//! datums are always located in memory, and thus the `ValueRef` for an
+//! LLVM value is always a pointer to the actual Rust value. This means
+//! that if the Datum has a Rust type of `int`, then the LLVM type of the
+//! `ValueRef` will be `int*` (pointer to int).
+//!
+//! Because lvalues already have cleanups scheduled, the memory must be
+//! zeroed to prevent the cleanup from taking place (presuming that the
+//! Rust type needs drop in the first place, otherwise it doesn't
+//! matter). The Datum code automatically performs this zeroing when the
+//! value is stored to a new location, for example.
+//!
+//! Lvalues usually result from evaluating lvalue expressions. For
+//! example, evaluating a local variable `x` yields an lvalue, as does a
+//! reference to a field like `x.f` or an index `x[i]`.
+//!
+//! Lvalue datums can also arise by *converting* an rvalue into an lvalue.
+//! This is done with the `to_lvalue_datum` method defined on
+//! `Datum<Expr>`. Basically this method just schedules cleanup if the
+//! datum is an rvalue, possibly storing the value into a stack slot first
+//! if needed. Converting rvalues into lvalues occurs in constructs like
+//! `&foo()` or `match foo() { ref x => ... }`, where the user is
+//! implicitly requesting a temporary.
+//!
+//! Somewhat surprisingly, not all lvalue expressions yield lvalue datums
+//! when trans'd. Ultimately the reason for this is to micro-optimize
+//! the resulting LLVM. For example, consider the following code:
+//!
+//!     fn foo() -> Box<int> { ... }
+//!     let x = *foo();
+//!
+//! The expression `*foo()` is an lvalue, but if you invoke `expr::trans`,
+//! it will return an rvalue datum. See `deref_once` in expr.rs for
+//! more details.
+//!
+//! ### Rvalues in detail
+//!
+//! Rvalues datums are values with no cleanup scheduled. One must be
+//! careful with rvalue datums to ensure that cleanup is properly
+//! arranged, usually by converting to an lvalue datum or by invoking the
+//! `add_clean` method.
+//!
+//! ### Scratch datums
+//!
+//! Sometimes you need some temporary scratch space.  The functions
+//! `[lr]value_scratch_datum()` can be used to get temporary stack
+//! space. As their name suggests, they yield lvalues and rvalues
+//! respectively. That is, the slot from `lvalue_scratch_datum` will have
+//! cleanup arranged, and the slot from `rvalue_scratch_datum` does not.
+//!
+//! ## The Cleanup module
+//!
+//! The cleanup module tracks what values need to be cleaned up as scopes
+//! are exited, either via panic or just normal control flow. The basic
+//! idea is that the function context maintains a stack of cleanup scopes
+//! that are pushed/popped as we traverse the AST tree. There is typically
+//! at least one cleanup scope per AST node; some AST nodes may introduce
+//! additional temporary scopes.
+//!
+//! Cleanup items can be scheduled into any of the scopes on the stack.
+//! Typically, when a scope is popped, we will also generate the code for
+//! each of its cleanups at that time. This corresponds to a normal exit
+//! from a block (for example, an expression completing evaluation
+//! successfully without panic). However, it is also possible to pop a
+//! block *without* executing its cleanups; this is typically used to
+//! guard intermediate values that must be cleaned up on panic, but not
+//! if everything goes right. See the section on custom scopes below for
+//! more details.
+//!
+//! Cleanup scopes come in three kinds:
+//! - **AST scopes:** each AST node in a function body has a corresponding
+//!   AST scope. We push the AST scope when we start generate code for an AST
+//!   node and pop it once the AST node has been fully generated.
+//! - **Loop scopes:** loops have an additional cleanup scope. Cleanups are
+//!   never scheduled into loop scopes; instead, they are used to record the
+//!   basic blocks that we should branch to when a `continue` or `break` statement
+//!   is encountered.
+//! - **Custom scopes:** custom scopes are typically used to ensure cleanup
+//!   of intermediate values.
+//!
+//! ### When to schedule cleanup
+//!
+//! Although the cleanup system is intended to *feel* fairly declarative,
+//! it's still important to time calls to `schedule_clean()` correctly.
+//! Basically, you should not schedule cleanup for memory until it has
+//! been initialized, because if an unwind should occur before the memory
+//! is fully initialized, then the cleanup will run and try to free or
+//! drop uninitialized memory. If the initialization itself produces
+//! byproducts that need to be freed, then you should use temporary custom
+//! scopes to ensure that those byproducts will get freed on unwind.  For
+//! example, an expression like `box foo()` will first allocate a box in the
+//! heap and then call `foo()` -- if `foo()` should panic, this box needs
+//! to be *shallowly* freed.
+//!
+//! ### Long-distance jumps
+//!
+//! In addition to popping a scope, which corresponds to normal control
+//! flow exiting the scope, we may also *jump out* of a scope into some
+//! earlier scope on the stack. This can occur in response to a `return`,
+//! `break`, or `continue` statement, but also in response to panic. In
+//! any of these cases, we will generate a series of cleanup blocks for
+//! each of the scopes that is exited. So, if the stack contains scopes A
+//! ... Z, and we break out of a loop whose corresponding cleanup scope is
+//! X, we would generate cleanup blocks for the cleanups in X, Y, and Z.
+//! After cleanup is done we would branch to the exit point for scope X.
+//! But if panic should occur, we would generate cleanups for all the
+//! scopes from A to Z and then resume the unwind process afterwards.
+//!
+//! To avoid generating tons of code, we cache the cleanup blocks that we
+//! create for breaks, returns, unwinds, and other jumps. Whenever a new
+//! cleanup is scheduled, though, we must clear these cached blocks. A
+//! possible improvement would be to keep the cached blocks but simply
+//! generate a new block which performs the additional cleanup and then
+//! branches to the existing cached blocks.
+//!
+//! ### AST and loop cleanup scopes
+//!
+//! AST cleanup scopes are pushed when we begin and end processing an AST
+//! node. They are used to house cleanups related to rvalue temporary that
+//! get referenced (e.g., due to an expression like `&Foo()`). Whenever an
+//! AST scope is popped, we always trans all the cleanups, adding the cleanup
+//! code after the postdominator of the AST node.
+//!
+//! AST nodes that represent breakable loops also push a loop scope; the
+//! loop scope never has any actual cleanups, it's just used to point to
+//! the basic blocks where control should flow after a "continue" or
+//! "break" statement. Popping a loop scope never generates code.
+//!
+//! ### Custom cleanup scopes
+//!
+//! Custom cleanup scopes are used for a variety of purposes. The most
+//! common though is to handle temporary byproducts, where cleanup only
+//! needs to occur on panic. The general strategy is to push a custom
+//! cleanup scope, schedule *shallow* cleanups into the custom scope, and
+//! then pop the custom scope (without transing the cleanups) when
+//! execution succeeds normally. This way the cleanups are only trans'd on
+//! unwind, and only up until the point where execution succeeded, at
+//! which time the complete value should be stored in an lvalue or some
+//! other place where normal cleanup applies.
+//!
+//! To spell it out, here is an example. Imagine an expression `box expr`.
+//! We would basically:
+//!
+//! 1. Push a custom cleanup scope C.
+//! 2. Allocate the box.
+//! 3. Schedule a shallow free in the scope C.
+//! 4. Trans `expr` into the box.
+//! 5. Pop the scope C.
+//! 6. Return the box as an rvalue.
+//!
+//! This way, if a panic occurs while transing `expr`, the custom
+//! cleanup scope C is pushed and hence the box will be freed. The trans
+//! code for `expr` itself is responsible for freeing any other byproducts
+//! that may be in play.
diff --git a/src/librustc_trans/trans/expr.rs b/src/librustc_trans/trans/expr.rs
index 482b318e37202..60809c8644d2d 100644
--- a/src/librustc_trans/trans/expr.rs
+++ b/src/librustc_trans/trans/expr.rs
@@ -8,28 +8,26 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-/*!
- * # Translation of Expressions
- *
- * Public entry points:
- *
- * - `trans_into(bcx, expr, dest) -> bcx`: evaluates an expression,
- *   storing the result into `dest`. This is the preferred form, if you
- *   can manage it.
- *
- * - `trans(bcx, expr) -> DatumBlock`: evaluates an expression, yielding
- *   `Datum` with the result. You can then store the datum, inspect
- *   the value, etc. This may introduce temporaries if the datum is a
- *   structural type.
- *
- * - `trans_to_lvalue(bcx, expr, "...") -> DatumBlock`: evaluates an
- *   expression and ensures that the result has a cleanup associated with it,
- *   creating a temporary stack slot if necessary.
- *
- * - `trans_local_var -> Datum`: looks up a local variable or upvar.
- *
- * See doc.rs for more comments.
- */
+//! # Translation of Expressions
+//!
+//! Public entry points:
+//!
+//! - `trans_into(bcx, expr, dest) -> bcx`: evaluates an expression,
+//!   storing the result into `dest`. This is the preferred form, if you
+//!   can manage it.
+//!
+//! - `trans(bcx, expr) -> DatumBlock`: evaluates an expression, yielding
+//!   `Datum` with the result. You can then store the datum, inspect
+//!   the value, etc. This may introduce temporaries if the datum is a
+//!   structural type.
+//!
+//! - `trans_to_lvalue(bcx, expr, "...") -> DatumBlock`: evaluates an
+//!   expression and ensures that the result has a cleanup associated with it,
+//!   creating a temporary stack slot if necessary.
+//!
+//! - `trans_local_var -> Datum`: looks up a local variable or upvar.
+//!
+//! See doc.rs for more comments.
 
 #![allow(non_camel_case_types)]
 
@@ -82,15 +80,12 @@ impl Dest {
     }
 }
 
+/// This function is equivalent to `trans(bcx, expr).store_to_dest(dest)` but it may generate
+/// better optimized LLVM code.
 pub fn trans_into<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
                               expr: &ast::Expr,
                               dest: Dest)
                               -> Block<'blk, 'tcx> {
-    /*!
-     * This function is equivalent to `trans(bcx, expr).store_to_dest(dest)`
-     * but it may generate better optimized LLVM code.
-     */
-
     let mut bcx = bcx;
 
     if bcx.tcx().adjustments.borrow().contains_key(&expr.id) {
@@ -124,16 +119,12 @@ pub fn trans_into<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
     bcx.fcx.pop_and_trans_ast_cleanup_scope(bcx, expr.id)
 }
 
+/// Translates an expression, returning a datum (and new block) encapsulating the result. When
+/// possible, it is preferred to use `trans_into`, as that may avoid creating a temporary on the
+/// stack.
 pub fn trans<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
                          expr: &ast::Expr)
                          -> DatumBlock<'blk, 'tcx, Expr> {
-    /*!
-     * Translates an expression, returning a datum (and new block)
-     * encapsulating the result. When possible, it is preferred to
-     * use `trans_into`, as that may avoid creating a temporary on
-     * the stack.
-     */
-
     debug!("trans(expr={})", bcx.expr_to_string(expr));
 
     let mut bcx = bcx;
@@ -157,15 +148,12 @@ pub fn get_dataptr(bcx: Block, fat_ptr: ValueRef) -> ValueRef {
     GEPi(bcx, fat_ptr, &[0u, abi::FAT_PTR_ADDR])
 }
 
+/// Helper for trans that apply adjustments from `expr` to `datum`, which should be the unadjusted
+/// translation of `expr`.
 fn apply_adjustments<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
                                  expr: &ast::Expr,
                                  datum: Datum<'tcx, Expr>)
                                  -> DatumBlock<'blk, 'tcx, Expr> {
-    /*!
-     * Helper for trans that apply adjustments from `expr` to `datum`,
-     * which should be the unadjusted translation of `expr`.
-     */
-
     let mut bcx = bcx;
     let mut datum = datum;
     let adjustment = match bcx.tcx().adjustments.borrow().get(&expr.id).cloned() {
@@ -480,34 +468,27 @@ fn apply_adjustments<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
     }
 }
 
+/// Translates an expression in "lvalue" mode -- meaning that it returns a reference to the memory
+/// that the expr represents.
+///
+/// If this expression is an rvalue, this implies introducing a temporary.  In other words,
+/// something like `x().f` is translated into roughly the equivalent of
+///
+///   { tmp = x(); tmp.f }
 pub fn trans_to_lvalue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
                                    expr: &ast::Expr,
                                    name: &str)
                                    -> DatumBlock<'blk, 'tcx, Lvalue> {
-    /*!
-     * Translates an expression in "lvalue" mode -- meaning that it
-     * returns a reference to the memory that the expr represents.
-     *
-     * If this expression is an rvalue, this implies introducing a
-     * temporary.  In other words, something like `x().f` is
-     * translated into roughly the equivalent of
-     *
-     *   { tmp = x(); tmp.f }
-     */
-
     let mut bcx = bcx;
     let datum = unpack_datum!(bcx, trans(bcx, expr));
     return datum.to_lvalue_datum(bcx, name, expr.id);
 }
 
+/// A version of `trans` that ignores adjustments. You almost certainly do not want to call this
+/// directly.
 fn trans_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
                                 expr: &ast::Expr)
                                 -> DatumBlock<'blk, 'tcx, Expr> {
-    /*!
-     * A version of `trans` that ignores adjustments. You almost
-     * certainly do not want to call this directly.
-     */
-
     let mut bcx = bcx;
 
     debug!("trans_unadjusted(expr={})", bcx.expr_to_string(expr));
@@ -1218,14 +1199,10 @@ fn trans_def_fn_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
     DatumBlock::new(bcx, Datum::new(llfn, fn_ty, RvalueExpr(Rvalue::new(ByValue))))
 }
 
+/// Translates a reference to a local variable or argument. This always results in an lvalue datum.
 pub fn trans_local_var<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
                                    def: def::Def)
                                    -> Datum<'tcx, Lvalue> {
-    /*!
-     * Translates a reference to a local variable or argument.
-     * This always results in an lvalue datum.
-     */
-
     let _icx = push_ctxt("trans_local_var");
 
     match def {
@@ -1262,18 +1239,14 @@ pub fn trans_local_var<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
     }
 }
 
+/// Helper for enumerating the field types of structs, enums, or records. The optional node ID here
+/// is the node ID of the path identifying the enum variant in use. If none, this cannot possibly
+/// an enum variant (so, if it is and `node_id_opt` is none, this function panics).
 pub fn with_field_tys<'tcx, R>(tcx: &ty::ctxt<'tcx>,
                                ty: Ty<'tcx>,
                                node_id_opt: Option<ast::NodeId>,
                                op: |ty::Disr, (&[ty::field<'tcx>])| -> R)
                                -> R {
-    /*!
-     * Helper for enumerating the field types of structs, enums, or records.
-     * The optional node ID here is the node ID of the path identifying the enum
-     * variant in use. If none, this cannot possibly an enum variant (so, if it
-     * is and `node_id_opt` is none, this function panics).
-     */
-
     match ty.sty {
         ty::ty_struct(did, ref substs) => {
             op(0, struct_fields(tcx, did, substs).as_slice())
@@ -2189,24 +2162,18 @@ fn deref_once<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
 
     return r;
 
+    /// We microoptimize derefs of owned pointers a bit here. Basically, the idea is to make the
+    /// deref of an rvalue result in an rvalue. This helps to avoid intermediate stack slots in the
+    /// resulting LLVM. The idea here is that, if the `Box<T>` pointer is an rvalue, then we can
+    /// schedule a *shallow* free of the `Box<T>` pointer, and then return a ByRef rvalue into the
+    /// pointer. Because the free is shallow, it is legit to return an rvalue, because we know that
+    /// the contents are not yet scheduled to be freed. The language rules ensure that the contents
+    /// will be used (or moved) before the free occurs.
     fn deref_owned_pointer<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
                                        expr: &ast::Expr,
                                        datum: Datum<'tcx, Expr>,
                                        content_ty: Ty<'tcx>)
                                        -> DatumBlock<'blk, 'tcx, Expr> {
-        /*!
-         * We microoptimize derefs of owned pointers a bit here.
-         * Basically, the idea is to make the deref of an rvalue
-         * result in an rvalue. This helps to avoid intermediate stack
-         * slots in the resulting LLVM. The idea here is that, if the
-         * `Box<T>` pointer is an rvalue, then we can schedule a *shallow*
-         * free of the `Box<T>` pointer, and then return a ByRef rvalue
-         * into the pointer. Because the free is shallow, it is legit
-         * to return an rvalue, because we know that the contents are
-         * not yet scheduled to be freed. The language rules ensure that the
-         * contents will be used (or moved) before the free occurs.
-         */
-
         match datum.kind {
             RvalueExpr(Rvalue { mode: ByRef }) => {
                 let scope = cleanup::temporary_scope(bcx.tcx(), expr.id);
diff --git a/src/librustc_trans/trans/foreign.rs b/src/librustc_trans/trans/foreign.rs
index 1f6aeacc86058..6f97f6453fd91 100644
--- a/src/librustc_trans/trans/foreign.rs
+++ b/src/librustc_trans/trans/foreign.rs
@@ -161,14 +161,10 @@ pub fn register_static(ccx: &CrateContext,
     }
 }
 
+/// Registers a foreign function found in a library. Just adds a LLVM global.
 pub fn register_foreign_item_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
                                           abi: Abi, fty: Ty<'tcx>,
                                           name: &str) -> ValueRef {
-    /*!
-     * Registers a foreign function found in a library.
-     * Just adds a LLVM global.
-     */
-
     debug!("register_foreign_item_fn(abi={}, \
             ty={}, \
             name={})",
@@ -201,6 +197,20 @@ pub fn register_foreign_item_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
     llfn
 }
 
+/// Prepares a call to a native function. This requires adapting
+/// from the Rust argument passing rules to the native rules.
+///
+/// # Parameters
+///
+/// - `callee_ty`: Rust type for the function we are calling
+/// - `llfn`: the function pointer we are calling
+/// - `llretptr`: where to store the return value of the function
+/// - `llargs_rust`: a list of the argument values, prepared
+///   as they would be if calling a Rust function
+/// - `passed_arg_tys`: Rust type for the arguments. Normally we
+///   can derive these from callee_ty but in the case of variadic
+///   functions passed_arg_tys will include the Rust type of all
+///   the arguments including the ones not specified in the fn's signature.
 pub fn trans_native_call<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
                                      callee_ty: Ty<'tcx>,
                                      llfn: ValueRef,
@@ -208,23 +218,6 @@ pub fn trans_native_call<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
                                      llargs_rust: &[ValueRef],
                                      passed_arg_tys: Vec<Ty<'tcx>>)
                                      -> Block<'blk, 'tcx> {
-    /*!
-     * Prepares a call to a native function. This requires adapting
-     * from the Rust argument passing rules to the native rules.
-     *
-     * # Parameters
-     *
-     * - `callee_ty`: Rust type for the function we are calling
-     * - `llfn`: the function pointer we are calling
-     * - `llretptr`: where to store the return value of the function
-     * - `llargs_rust`: a list of the argument values, prepared
-     *   as they would be if calling a Rust function
-     * - `passed_arg_tys`: Rust type for the arguments. Normally we
-     *   can derive these from callee_ty but in the case of variadic
-     *   functions passed_arg_tys will include the Rust type of all
-     *   the arguments including the ones not specified in the fn's signature.
-     */
-
     let ccx = bcx.ccx();
     let tcx = bcx.tcx();
 
@@ -832,17 +825,13 @@ pub fn link_name(i: &ast::ForeignItem) -> InternedString {
     }
 }
 
+/// The ForeignSignature is the LLVM types of the arguments/return type of a function. Note that
+/// these LLVM types are not quite the same as the LLVM types would be for a native Rust function
+/// because foreign functions just plain ignore modes. They also don't pass aggregate values by
+/// pointer like we do.
 fn foreign_signature<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
                                fn_sig: &ty::FnSig<'tcx>, arg_tys: &[Ty<'tcx>])
                                -> LlvmSignature {
-    /*!
-     * The ForeignSignature is the LLVM types of the arguments/return type
-     * of a function.  Note that these LLVM types are not quite the same
-     * as the LLVM types would be for a native Rust function because foreign
-     * functions just plain ignore modes.  They also don't pass aggregate
-     * values by pointer like we do.
-     */
-
     let llarg_tys = arg_tys.iter().map(|&arg| arg_type_of(ccx, arg)).collect();
     let (llret_ty, ret_def) = match fn_sig.output {
         ty::FnConverging(ret_ty) =>
diff --git a/src/librustc_trans/trans/meth.rs b/src/librustc_trans/trans/meth.rs
index 0ff7f3ee71cc6..06d916c1ea6f2 100644
--- a/src/librustc_trans/trans/meth.rs
+++ b/src/librustc_trans/trans/meth.rs
@@ -377,28 +377,21 @@ fn trans_monomorphized_callee<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
     }
 }
 
+ /// Creates a concatenated set of substitutions which includes those from the impl and those from
+ /// the method.  This are some subtle complications here.  Statically, we have a list of type
+ /// parameters like `[T0, T1, T2, M1, M2, M3]` where `Tn` are type parameters that appear on the
+ /// receiver.  For example, if the receiver is a method parameter `A` with a bound like
+ /// `trait<B,C,D>` then `Tn` would be `[B,C,D]`.
+ ///
+ /// The weird part is that the type `A` might now be bound to any other type, such as `foo<X>`.
+ /// In that case, the vector we want is: `[X, M1, M2, M3]`.  Therefore, what we do now is to slice
+ /// off the method type parameters and append them to the type parameters from the type that the
+ /// receiver is mapped to.
 fn combine_impl_and_methods_tps<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
                                             node: ExprOrMethodCall,
                                             rcvr_substs: subst::Substs<'tcx>)
                                             -> subst::Substs<'tcx>
 {
-    /*!
-     * Creates a concatenated set of substitutions which includes
-     * those from the impl and those from the method.  This are
-     * some subtle complications here.  Statically, we have a list
-     * of type parameters like `[T0, T1, T2, M1, M2, M3]` where
-     * `Tn` are type parameters that appear on the receiver.  For
-     * example, if the receiver is a method parameter `A` with a
-     * bound like `trait<B,C,D>` then `Tn` would be `[B,C,D]`.
-     *
-     * The weird part is that the type `A` might now be bound to
-     * any other type, such as `foo<X>`.  In that case, the vector
-     * we want is: `[X, M1, M2, M3]`.  Therefore, what we do now is
-     * to slice off the method type parameters and append them to
-     * the type parameters from the type that the receiver is
-     * mapped to.
-     */
-
     let ccx = bcx.ccx();
 
     let node_substs = node_id_substs(bcx, node);
@@ -422,21 +415,16 @@ fn combine_impl_and_methods_tps<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
     }
 }
 
+/// Create a method callee where the method is coming from a trait object (e.g., Box<Trait> type).
+/// In this case, we must pull the fn pointer out of the vtable that is packaged up with the
+/// object. Objects are represented as a pair, so we first evaluate the self expression and then
+/// extract the self data and vtable out of the pair.
 fn trans_trait_callee<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
                                   method_ty: Ty<'tcx>,
                                   n_method: uint,
                                   self_expr: &ast::Expr,
                                   arg_cleanup_scope: cleanup::ScopeId)
                                   -> Callee<'blk, 'tcx> {
-    /*!
-     * Create a method callee where the method is coming from a trait
-     * object (e.g., Box<Trait> type).  In this case, we must pull the fn
-     * pointer out of the vtable that is packaged up with the object.
-     * Objects are represented as a pair, so we first evaluate the self
-     * expression and then extract the self data and vtable out of the
-     * pair.
-     */
-
     let _icx = push_ctxt("meth::trans_trait_callee");
     let mut bcx = bcx;
 
@@ -466,16 +454,13 @@ fn trans_trait_callee<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
     trans_trait_callee_from_llval(bcx, method_ty, n_method, llval)
 }
 
+/// Same as `trans_trait_callee()` above, except that it is given a by-ref pointer to the object
+/// pair.
 pub fn trans_trait_callee_from_llval<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
                                                  callee_ty: Ty<'tcx>,
                                                  n_method: uint,
                                                  llpair: ValueRef)
                                                  -> Callee<'blk, 'tcx> {
-    /*!
-     * Same as `trans_trait_callee()` above, except that it is given
-     * a by-ref pointer to the object pair.
-     */
-
     let _icx = push_ctxt("meth::trans_trait_callee");
     let ccx = bcx.ccx();
 
@@ -731,19 +716,15 @@ fn emit_vtable_methods<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
     }).collect()
 }
 
+/// Generates the code to convert from a pointer (`Box<T>`, `&T`, etc) into an object
+/// (`Box<Trait>`, `&Trait`, etc). This means creating a pair where the first word is the vtable
+/// and the second word is the pointer.
 pub fn trans_trait_cast<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
                                     datum: Datum<'tcx, Expr>,
                                     id: ast::NodeId,
                                     trait_ref: Rc<ty::TraitRef<'tcx>>,
                                     dest: expr::Dest)
                                     -> Block<'blk, 'tcx> {
-    /*!
-     * Generates the code to convert from a pointer (`Box<T>`, `&T`, etc)
-     * into an object (`Box<Trait>`, `&Trait`, etc). This means creating a
-     * pair where the first word is the vtable and the second word is
-     * the pointer.
-     */
-
     let mut bcx = bcx;
     let _icx = push_ctxt("meth::trans_trait_cast");
 
diff --git a/src/librustc_trans/trans/tvec.rs b/src/librustc_trans/trans/tvec.rs
index 8e986defb6a36..9aeb4cdb8a30a 100644
--- a/src/librustc_trans/trans/tvec.rs
+++ b/src/librustc_trans/trans/tvec.rs
@@ -134,17 +134,13 @@ pub fn trans_fixed_vstore<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
     };
 }
 
+/// &[...] allocates memory on the stack and writes the values into it, returning the vector (the
+/// caller must make the reference).  "..." is similar except that the memory can be statically
+/// allocated and we return a reference (strings are always by-ref).
 pub fn trans_slice_vec<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
                                    slice_expr: &ast::Expr,
                                    content_expr: &ast::Expr)
                                    -> DatumBlock<'blk, 'tcx, Expr> {
-    /*!
-     * &[...] allocates memory on the stack and writes the values into it,
-     * returning the vector (the caller must make the reference).  "..." is
-     * similar except that the memory can be statically allocated and we return
-     * a reference (strings are always by-ref).
-     */
-
     let fcx = bcx.fcx;
     let ccx = fcx.ccx;
     let mut bcx = bcx;
@@ -208,17 +204,13 @@ pub fn trans_slice_vec<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
     immediate_rvalue_bcx(bcx, llfixed, vec_ty).to_expr_datumblock()
 }
 
+/// Literal strings translate to slices into static memory.  This is different from
+/// trans_slice_vstore() above because it doesn't need to copy the content anywhere.
 pub fn trans_lit_str<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
                                  lit_expr: &ast::Expr,
                                  str_lit: InternedString,
                                  dest: Dest)
                                  -> Block<'blk, 'tcx> {
-    /*!
-     * Literal strings translate to slices into static memory.  This is
-     * different from trans_slice_vstore() above because it doesn't need to copy
-     * the content anywhere.
-     */
-
     debug!("trans_lit_str(lit_expr={}, dest={})",
            bcx.expr_to_string(lit_expr),
            dest.to_string(bcx.ccx()));
@@ -382,15 +374,12 @@ pub fn elements_required(bcx: Block, content_expr: &ast::Expr) -> uint {
     }
 }
 
+/// Converts a fixed-length vector into the slice pair. The vector should be stored in `llval`
+/// which should be by ref.
 pub fn get_fixed_base_and_len(bcx: Block,
                               llval: ValueRef,
                               vec_length: uint)
                               -> (ValueRef, ValueRef) {
-    /*!
-     * Converts a fixed-length vector into the slice pair.
-     * The vector should be stored in `llval` which should be by ref.
-     */
-
     let ccx = bcx.ccx();
 
     let base = expr::get_dataptr(bcx, llval);
@@ -406,18 +395,13 @@ fn get_slice_base_and_len(bcx: Block,
     (base, len)
 }
 
+/// Converts a vector into the slice pair.  The vector should be stored in `llval` which should be
+/// by-reference.  If you have a datum, you would probably prefer to call
+/// `Datum::get_base_and_len()` which will handle any conversions for you.
 pub fn get_base_and_len(bcx: Block,
                         llval: ValueRef,
                         vec_ty: Ty)
                         -> (ValueRef, ValueRef) {
-    /*!
-     * Converts a vector into the slice pair.  The vector should be
-     * stored in `llval` which should be by-reference.  If you have a
-     * datum, you would probably prefer to call
-     * `Datum::get_base_and_len()` which will handle any conversions
-     * for you.
-     */
-
     let ccx = bcx.ccx();
 
     match vec_ty.sty {
diff --git a/src/librustrt/c_str.rs b/src/librustrt/c_str.rs
index d62b1485db33a..261bd1b9f8cb8 100644
--- a/src/librustrt/c_str.rs
+++ b/src/librustrt/c_str.rs
@@ -8,68 +8,64 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-/*!
-
-C-string manipulation and management
-
-This modules provides the basic methods for creating and manipulating
-null-terminated strings for use with FFI calls (back to C). Most C APIs require
-that the string being passed to them is null-terminated, and by default rust's
-string types are *not* null terminated.
-
-The other problem with translating Rust strings to C strings is that Rust
-strings can validly contain a null-byte in the middle of the string (0 is a
-valid Unicode codepoint). This means that not all Rust strings can actually be
-translated to C strings.
-
-# Creation of a C string
-
-A C string is managed through the `CString` type defined in this module. It
-"owns" the internal buffer of characters and will automatically deallocate the
-buffer when the string is dropped. The `ToCStr` trait is implemented for `&str`
-and `&[u8]`, but the conversions can fail due to some of the limitations
-explained above.
-
-This also means that currently whenever a C string is created, an allocation
-must be performed to place the data elsewhere (the lifetime of the C string is
-not tied to the lifetime of the original string/data buffer). If C strings are
-heavily used in applications, then caching may be advisable to prevent
-unnecessary amounts of allocations.
-
-Be carefull to remember that the memory is managed by C allocator API and not
-by Rust allocator API.
-That means that the CString pointers should be freed with C allocator API
-if you intend to do that on your own, as the behaviour if you free them with
-Rust's allocator API is not well defined
-
-An example of creating and using a C string would be:
-
-```rust
-extern crate libc;
-
-extern {
-    fn puts(s: *const libc::c_char);
-}
-
-fn main() {
-    let my_string = "Hello, world!";
-
-    // Allocate the C string with an explicit local that owns the string. The
-    // `c_buffer` pointer will be deallocated when `my_c_string` goes out of scope.
-    let my_c_string = my_string.to_c_str();
-    unsafe {
-        puts(my_c_string.as_ptr());
-    }
-
-    // Don't save/return the pointer to the C string, the `c_buffer` will be
-    // deallocated when this block returns!
-    my_string.with_c_str(|c_buffer| {
-        unsafe { puts(c_buffer); }
-    });
-}
-```
-
-*/
+//! C-string manipulation and management
+//!
+//! This modules provides the basic methods for creating and manipulating
+//! null-terminated strings for use with FFI calls (back to C). Most C APIs require
+//! that the string being passed to them is null-terminated, and by default rust's
+//! string types are *not* null terminated.
+//!
+//! The other problem with translating Rust strings to C strings is that Rust
+//! strings can validly contain a null-byte in the middle of the string (0 is a
+//! valid Unicode codepoint). This means that not all Rust strings can actually be
+//! translated to C strings.
+//!
+//! # Creation of a C string
+//!
+//! A C string is managed through the `CString` type defined in this module. It
+//! "owns" the internal buffer of characters and will automatically deallocate the
+//! buffer when the string is dropped. The `ToCStr` trait is implemented for `&str`
+//! and `&[u8]`, but the conversions can fail due to some of the limitations
+//! explained above.
+//!
+//! This also means that currently whenever a C string is created, an allocation
+//! must be performed to place the data elsewhere (the lifetime of the C string is
+//! not tied to the lifetime of the original string/data buffer). If C strings are
+//! heavily used in applications, then caching may be advisable to prevent
+//! unnecessary amounts of allocations.
+//!
+//! Be carefull to remember that the memory is managed by C allocator API and not
+//! by Rust allocator API.
+//! That means that the CString pointers should be freed with C allocator API
+//! if you intend to do that on your own, as the behaviour if you free them with
+//! Rust's allocator API is not well defined
+//!
+//! An example of creating and using a C string would be:
+//!
+//! ```rust
+//! extern crate libc;
+//!
+//! extern {
+//!     fn puts(s: *const libc::c_char);
+//! }
+//!
+//! fn main() {
+//!     let my_string = "Hello, world!";
+//!
+//!     // Allocate the C string with an explicit local that owns the string. The
+//!     // `c_buffer` pointer will be deallocated when `my_c_string` goes out of scope.
+//!     let my_c_string = my_string.to_c_str();
+//!     unsafe {
+//!         puts(my_c_string.as_ptr());
+//!     }
+//!
+//!     // Don't save/return the pointer to the C string, the `c_buffer` will be
+//!     // deallocated when this block returns!
+//!     my_string.with_c_str(|c_buffer| {
+//!         unsafe { puts(c_buffer); }
+//!     });
+//! }
+//! ```
 
 use collections::string::String;
 use collections::hash;
diff --git a/src/libserialize/json.rs b/src/libserialize/json.rs
index 4a2ca58fc9269..3c03dc35f3b29 100644
--- a/src/libserialize/json.rs
+++ b/src/libserialize/json.rs
@@ -14,185 +14,182 @@
 #![forbid(non_camel_case_types)]
 #![allow(missing_docs)]
 
-/*!
-JSON parsing and serialization
-
-# What is JSON?
-
-JSON (JavaScript Object Notation) is a way to write data in Javascript.
-Like XML, it allows to encode structured data in a text format that can be easily read by humans.
-Its simple syntax and native compatibility with JavaScript have made it a widely used format.
-
-Data types that can be encoded are JavaScript types (see the `Json` enum for more details):
-
-* `Boolean`: equivalent to rust's `bool`
-* `Number`: equivalent to rust's `f64`
-* `String`: equivalent to rust's `String`
-* `Array`: equivalent to rust's `Vec<T>`, but also allowing objects of different types in the same
-array
-* `Object`: equivalent to rust's `Treemap<String, json::Json>`
-* `Null`
-
-An object is a series of string keys mapping to values, in `"key": value` format.
-Arrays are enclosed in square brackets ([ ... ]) and objects in curly brackets ({ ... }).
-A simple JSON document encoding a person, his/her age, address and phone numbers could look like:
-
-```ignore
-{
-    "FirstName": "John",
-    "LastName": "Doe",
-    "Age": 43,
-    "Address": {
-        "Street": "Downing Street 10",
-        "City": "London",
-        "Country": "Great Britain"
-    },
-    "PhoneNumbers": [
-        "+44 1234567",
-        "+44 2345678"
-    ]
-}
-```
-
-# Rust Type-based Encoding and Decoding
-
-Rust provides a mechanism for low boilerplate encoding & decoding of values to and from JSON via
-the serialization API.
-To be able to encode a piece of data, it must implement the `serialize::Encodable` trait.
-To be able to decode a piece of data, it must implement the `serialize::Decodable` trait.
-The Rust compiler provides an annotation to automatically generate the code for these traits:
-`#[deriving(Decodable, Encodable)]`
-
-The JSON API provides an enum `json::Json` and a trait `ToJson` to encode objects.
-The `ToJson` trait provides a `to_json` method to convert an object into a `json::Json` value.
-A `json::Json` value can be encoded as a string or buffer using the functions described above.
-You can also use the `json::Encoder` object, which implements the `Encoder` trait.
-
-When using `ToJson` the `Encodable` trait implementation is not mandatory.
-
-# Examples of use
-
-## Using Autoserialization
-
-Create a struct called `TestStruct` and serialize and deserialize it to and from JSON using the
-serialization API, using the derived serialization code.
-
-```rust
-extern crate serialize;
-use serialize::json;
-
-// Automatically generate `Decodable` and `Encodable` trait implementations
-#[deriving(Decodable, Encodable)]
-pub struct TestStruct  {
-    data_int: u8,
-    data_str: String,
-    data_vector: Vec<u8>,
-}
-
-fn main() {
-    let object = TestStruct {
-        data_int: 1,
-        data_str: "toto".to_string(),
-        data_vector: vec![2,3,4,5],
-    };
-
-    // Serialize using `json::encode`
-    let encoded = json::encode(&object);
-
-    // Deserialize using `json::decode`
-    let decoded: TestStruct = json::decode(encoded.as_slice()).unwrap();
-}
-```
-
-## Using the `ToJson` trait
-
-The examples above use the `ToJson` trait to generate the JSON string, which is required
-for custom mappings.
-
-### Simple example of `ToJson` usage
-
-```rust
-extern crate serialize;
-use serialize::json::ToJson;
-use serialize::json;
-
-// A custom data structure
-struct ComplexNum {
-    a: f64,
-    b: f64,
-}
-
-// JSON value representation
-impl ToJson for ComplexNum {
-    fn to_json(&self) -> json::Json {
-        json::String(format!("{}+{}i", self.a, self.b))
-    }
-}
-
-// Only generate `Encodable` trait implementation
-#[deriving(Encodable)]
-pub struct ComplexNumRecord {
-    uid: u8,
-    dsc: String,
-    val: json::Json,
-}
-
-fn main() {
-    let num = ComplexNum { a: 0.0001, b: 12.539 };
-    let data: String = json::encode(&ComplexNumRecord{
-        uid: 1,
-        dsc: "test".to_string(),
-        val: num.to_json(),
-    });
-    println!("data: {}", data);
-    // data: {"uid":1,"dsc":"test","val":"0.0001+12.539j"};
-}
-```
-
-### Verbose example of `ToJson` usage
-
-```rust
-extern crate serialize;
-use std::collections::TreeMap;
-use serialize::json::ToJson;
-use serialize::json;
-
-// Only generate `Decodable` trait implementation
-#[deriving(Decodable)]
-pub struct TestStruct {
-    data_int: u8,
-    data_str: String,
-    data_vector: Vec<u8>,
-}
-
-// Specify encoding method manually
-impl ToJson for TestStruct {
-    fn to_json(&self) -> json::Json {
-        let mut d = TreeMap::new();
-        // All standard types implement `to_json()`, so use it
-        d.insert("data_int".to_string(), self.data_int.to_json());
-        d.insert("data_str".to_string(), self.data_str.to_json());
-        d.insert("data_vector".to_string(), self.data_vector.to_json());
-        json::Object(d)
-    }
-}
-
-fn main() {
-    // Serialize using `ToJson`
-    let input_data = TestStruct {
-        data_int: 1,
-        data_str: "toto".to_string(),
-        data_vector: vec![2,3,4,5],
-    };
-    let json_obj: json::Json = input_data.to_json();
-    let json_str: String = json_obj.to_string();
-
-    // Deserialize like before
-    let decoded: TestStruct = json::decode(json_str.as_slice()).unwrap();
-}
-```
-
-*/
+//! JSON parsing and serialization
+//!
+//! # What is JSON?
+//!
+//! JSON (JavaScript Object Notation) is a way to write data in Javascript.
+//! Like XML, it allows to encode structured data in a text format that can be easily read by humans
+//! Its simple syntax and native compatibility with JavaScript have made it a widely used format.
+//!
+//! Data types that can be encoded are JavaScript types (see the `Json` enum for more details):
+//!
+//! * `Boolean`: equivalent to rust's `bool`
+//! * `Number`: equivalent to rust's `f64`
+//! * `String`: equivalent to rust's `String`
+//! * `Array`: equivalent to rust's `Vec<T>`, but also allowing objects of different types in the
+//!   same array
+//! * `Object`: equivalent to rust's `Treemap<String, json::Json>`
+//! * `Null`
+//!
+//! An object is a series of string keys mapping to values, in `"key": value` format.
+//! Arrays are enclosed in square brackets ([ ... ]) and objects in curly brackets ({ ... }).
+//! A simple JSON document encoding a person, his/her age, address and phone numbers could look like
+//!
+//! ```ignore
+//! {
+//!     "FirstName": "John",
+//!     "LastName": "Doe",
+//!     "Age": 43,
+//!     "Address": {
+//!         "Street": "Downing Street 10",
+//!         "City": "London",
+//!         "Country": "Great Britain"
+//!     },
+//!     "PhoneNumbers": [
+//!         "+44 1234567",
+//!         "+44 2345678"
+//!     ]
+//! }
+//! ```
+//!
+//! # Rust Type-based Encoding and Decoding
+//!
+//! Rust provides a mechanism for low boilerplate encoding & decoding of values to and from JSON via
+//! the serialization API.
+//! To be able to encode a piece of data, it must implement the `serialize::Encodable` trait.
+//! To be able to decode a piece of data, it must implement the `serialize::Decodable` trait.
+//! The Rust compiler provides an annotation to automatically generate the code for these traits:
+//! `#[deriving(Decodable, Encodable)]`
+//!
+//! The JSON API provides an enum `json::Json` and a trait `ToJson` to encode objects.
+//! The `ToJson` trait provides a `to_json` method to convert an object into a `json::Json` value.
+//! A `json::Json` value can be encoded as a string or buffer using the functions described above.
+//! You can also use the `json::Encoder` object, which implements the `Encoder` trait.
+//!
+//! When using `ToJson` the `Encodable` trait implementation is not mandatory.
+//!
+//! # Examples of use
+//!
+//! ## Using Autoserialization
+//!
+//! Create a struct called `TestStruct` and serialize and deserialize it to and from JSON using the
+//! serialization API, using the derived serialization code.
+//!
+//! ```rust
+//! extern crate serialize;
+//! use serialize::json;
+//!
+//! // Automatically generate `Decodable` and `Encodable` trait implementations
+//! #[deriving(Decodable, Encodable)]
+//! pub struct TestStruct  {
+//!     data_int: u8,
+//!     data_str: String,
+//!     data_vector: Vec<u8>,
+//! }
+//!
+//! fn main() {
+//!     let object = TestStruct {
+//!         data_int: 1,
+//!         data_str: "toto".to_string(),
+//!         data_vector: vec![2,3,4,5],
+//!     };
+//!
+//!     // Serialize using `json::encode`
+//!     let encoded = json::encode(&object);
+//!
+//!     // Deserialize using `json::decode`
+//!     let decoded: TestStruct = json::decode(encoded.as_slice()).unwrap();
+//! }
+//! ```
+//!
+//! ## Using the `ToJson` trait
+//!
+//! The examples above use the `ToJson` trait to generate the JSON string, which is required
+//! for custom mappings.
+//!
+//! ### Simple example of `ToJson` usage
+//!
+//! ```rust
+//! extern crate serialize;
+//! use serialize::json::ToJson;
+//! use serialize::json;
+//!
+//! // A custom data structure
+//! struct ComplexNum {
+//!     a: f64,
+//!     b: f64,
+//! }
+//!
+//! // JSON value representation
+//! impl ToJson for ComplexNum {
+//!     fn to_json(&self) -> json::Json {
+//!         json::String(format!("{}+{}i", self.a, self.b))
+//!     }
+//! }
+//!
+//! // Only generate `Encodable` trait implementation
+//! #[deriving(Encodable)]
+//! pub struct ComplexNumRecord {
+//!     uid: u8,
+//!     dsc: String,
+//!     val: json::Json,
+//! }
+//!
+//! fn main() {
+//!     let num = ComplexNum { a: 0.0001, b: 12.539 };
+//!     let data: String = json::encode(&ComplexNumRecord{
+//!         uid: 1,
+//!         dsc: "test".to_string(),
+//!         val: num.to_json(),
+//!     });
+//!     println!("data: {}", data);
+//!     // data: {"uid":1,"dsc":"test","val":"0.0001+12.539j"};
+//! }
+//! ```
+//!
+//! ### Verbose example of `ToJson` usage
+//!
+//! ```rust
+//! extern crate serialize;
+//! use std::collections::TreeMap;
+//! use serialize::json::ToJson;
+//! use serialize::json;
+//!
+//! // Only generate `Decodable` trait implementation
+//! #[deriving(Decodable)]
+//! pub struct TestStruct {
+//!     data_int: u8,
+//!     data_str: String,
+//!     data_vector: Vec<u8>,
+//! }
+//!
+//! // Specify encoding method manually
+//! impl ToJson for TestStruct {
+//!     fn to_json(&self) -> json::Json {
+//!         let mut d = TreeMap::new();
+//!         // All standard types implement `to_json()`, so use it
+//!         d.insert("data_int".to_string(), self.data_int.to_json());
+//!         d.insert("data_str".to_string(), self.data_str.to_json());
+//!         d.insert("data_vector".to_string(), self.data_vector.to_json());
+//!         json::Object(d)
+//!     }
+//! }
+//!
+//! fn main() {
+//!     // Serialize using `ToJson`
+//!     let input_data = TestStruct {
+//!         data_int: 1,
+//!         data_str: "toto".to_string(),
+//!         data_vector: vec![2,3,4,5],
+//!     };
+//!     let json_obj: json::Json = input_data.to_json();
+//!     let json_str: String = json_obj.to_string();
+//!
+//!     // Deserialize like before
+//!     let decoded: TestStruct = json::decode(json_str.as_slice()).unwrap();
+//! }
+//! ```
 
 pub use self::JsonEvent::*;
 pub use self::StackElement::*;
diff --git a/src/libstd/dynamic_lib.rs b/src/libstd/dynamic_lib.rs
index 4b868f6a95b7c..3cd0c0eeaf290 100644
--- a/src/libstd/dynamic_lib.rs
+++ b/src/libstd/dynamic_lib.rs
@@ -8,13 +8,9 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-/*!
-
-Dynamic library facilities.
-
-A simple wrapper over the platform's dynamic library facilities
-
-*/
+//! Dynamic library facilities.
+//!
+//! A simple wrapper over the platform's dynamic library facilities
 
 #![experimental]
 #![allow(missing_docs)]
diff --git a/src/libstd/fmt.rs b/src/libstd/fmt.rs
index c817e6a806bb7..62ca3483c21b3 100644
--- a/src/libstd/fmt.rs
+++ b/src/libstd/fmt.rs
@@ -10,392 +10,388 @@
 //
 // ignore-lexer-test FIXME #15679
 
-/*!
-
-Utilities for formatting and printing strings
-
-This module contains the runtime support for the `format!` syntax extension.
-This macro is implemented in the compiler to emit calls to this module in order
-to format arguments at runtime into strings and streams.
-
-The functions contained in this module should not normally be used in everyday
-use cases of `format!`. The assumptions made by these functions are unsafe for
-all inputs, and the compiler performs a large amount of validation on the
-arguments to `format!` in order to ensure safety at runtime. While it is
-possible to call these functions directly, it is not recommended to do so in the
-general case.
-
-## Usage
-
-The `format!` macro is intended to be familiar to those coming from C's
-printf/fprintf functions or Python's `str.format` function. In its current
-revision, the `format!` macro returns a `String` type which is the result of
-the formatting. In the future it will also be able to pass in a stream to
-format arguments directly while performing minimal allocations.
-
-Some examples of the `format!` extension are:
-
-```rust
-# fn main() {
-format!("Hello");                  // => "Hello"
-format!("Hello, {}!", "world");    // => "Hello, world!"
-format!("The number is {}", 1i);   // => "The number is 1"
-format!("{}", (3i, 4i));           // => "(3, 4)"
-format!("{value}", value=4i);      // => "4"
-format!("{} {}", 1i, 2u);          // => "1 2"
-# }
-```
-
-From these, you can see that the first argument is a format string. It is
-required by the compiler for this to be a string literal; it cannot be a
-variable passed in (in order to perform validity checking). The compiler will
-then parse the format string and determine if the list of arguments provided is
-suitable to pass to this format string.
-
-### Positional parameters
-
-Each formatting argument is allowed to specify which value argument it's
-referencing, and if omitted it is assumed to be "the next argument". For
-example, the format string `{} {} {}` would take three parameters, and they
-would be formatted in the same order as they're given. The format string
-`{2} {1} {0}`, however, would format arguments in reverse order.
-
-Things can get a little tricky once you start intermingling the two types of
-positional specifiers. The "next argument" specifier can be thought of as an
-iterator over the argument. Each time a "next argument" specifier is seen, the
-iterator advances. This leads to behavior like this:
-
-```rust
-format!("{1} {} {0} {}", 1i, 2i); // => "2 1 1 2"
-```
-
-The internal iterator over the argument has not been advanced by the time the
-first `{}` is seen, so it prints the first argument. Then upon reaching the
-second `{}`, the iterator has advanced forward to the second argument.
-Essentially, parameters which explicitly name their argument do not affect
-parameters which do not name an argument in terms of positional specifiers.
-
-A format string is required to use all of its arguments, otherwise it is a
-compile-time error. You may refer to the same argument more than once in the
-format string, although it must always be referred to with the same type.
-
-### Named parameters
-
-Rust itself does not have a Python-like equivalent of named parameters to a
-function, but the `format!` macro is a syntax extension which allows it to
-leverage named parameters. Named parameters are listed at the end of the
-argument list and have the syntax:
-
-```text
-identifier '=' expression
-```
-
-For example, the following `format!` expressions all use named argument:
-
-```rust
-# fn main() {
-format!("{argument}", argument = "test");   // => "test"
-format!("{name} {}", 1i, name = 2i);        // => "2 1"
-format!("{a} {c} {b}", a="a", b=(), c=3i);  // => "a 3 ()"
-# }
-```
-
-It is illegal to put positional parameters (those without names) after arguments
-which have names. Like with positional parameters, it is illegal to provide
-named parameters that are unused by the format string.
-
-### Argument types
-
-Each argument's type is dictated by the format string. It is a requirement that every argument is
-only ever referred to by one type. For example, this is an invalid format string:
-
-```text
-{0:x} {0:o}
-```
-
-This is invalid because the first argument is both referred to as a hexidecimal as well as an
-octal.
-
-There are various parameters which do require a particular type, however. Namely if the syntax
-`{:.*}` is used, then the number of characters to print precedes the actual object being formatted,
-and the number of characters must have the type `uint`. Although a `uint` can be printed with
-`{}`, it is illegal to reference an argument as such. For example this is another invalid
-format string:
-
-```text
-{:.*} {0}
-```
-
-### Formatting traits
-
-When requesting that an argument be formatted with a particular type, you are
-actually requesting that an argument ascribes to a particular trait. This allows
-multiple actual types to be formatted via `{:x}` (like `i8` as well as `int`).
-The current mapping of types to traits is:
-
-* *nothing* ⇒ `Show`
-* `o` ⇒ `Octal`
-* `x` ⇒ `LowerHex`
-* `X` ⇒ `UpperHex`
-* `p` ⇒ `Pointer`
-* `b` ⇒ `Binary`
-* `e` ⇒ `LowerExp`
-* `E` ⇒ `UpperExp`
-
-What this means is that any type of argument which implements the
-`std::fmt::Binary` trait can then be formatted with `{:b}`. Implementations are
-provided for these traits for a number of primitive types by the standard
-library as well. If no format is specified (as in `{}` or `{:6}`), then the
-format trait used is the `Show` trait. This is one of the more commonly
-implemented traits when formatting a custom type.
-
-When implementing a format trait for your own type, you will have to implement a
-method of the signature:
-
-```rust
-# use std::fmt;
-# struct Foo; // our custom type
-# impl fmt::Show for Foo {
-fn fmt(&self, f: &mut std::fmt::Formatter) -> fmt::Result {
-# write!(f, "testing, testing")
-# } }
-```
-
-Your type will be passed as `self` by-reference, and then the function should
-emit output into the `f.buf` stream. It is up to each format trait
-implementation to correctly adhere to the requested formatting parameters. The
-values of these parameters will be listed in the fields of the `Formatter`
-struct. In order to help with this, the `Formatter` struct also provides some
-helper methods.
-
-Additionally, the return value of this function is `fmt::Result` which is a
-typedef to `Result<(), IoError>` (also known as `IoResult<()>`). Formatting
-implementations should ensure that they return errors from `write!` correctly
-(propagating errors upward).
-
-An example of implementing the formatting traits would look
-like:
-
-```rust
-use std::fmt;
-use std::f64;
-use std::num::Float;
-
-struct Vector2D {
-    x: int,
-    y: int,
-}
-
-impl fmt::Show for Vector2D {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        // The `f` value implements the `Writer` trait, which is what the
-        // write! macro is expecting. Note that this formatting ignores the
-        // various flags provided to format strings.
-        write!(f, "({}, {})", self.x, self.y)
-    }
-}
-
-// Different traits allow different forms of output of a type. The meaning of
-// this format is to print the magnitude of a vector.
-impl fmt::Binary for Vector2D {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        let magnitude = (self.x * self.x + self.y * self.y) as f64;
-        let magnitude = magnitude.sqrt();
-
-        // Respect the formatting flags by using the helper method
-        // `pad_integral` on the Formatter object. See the method documentation
-        // for details, and the function `pad` can be used to pad strings.
-        let decimals = f.precision().unwrap_or(3);
-        let string = f64::to_str_exact(magnitude, decimals);
-        f.pad_integral(true, "", string.as_bytes())
-    }
-}
-
-fn main() {
-    let myvector = Vector2D { x: 3, y: 4 };
-
-    println!("{}", myvector);       // => "(3, 4)"
-    println!("{:10.3b}", myvector); // => "     5.000"
-}
-```
-
-### Related macros
-
-There are a number of related macros in the `format!` family. The ones that are
-currently implemented are:
-
-```ignore
-format!      // described above
-write!       // first argument is a &mut io::Writer, the destination
-writeln!     // same as write but appends a newline
-print!       // the format string is printed to the standard output
-println!     // same as print but appends a newline
-format_args! // described below.
-```
-
-#### `write!`
-
-This and `writeln` are two macros which are used to emit the format string to a
-specified stream. This is used to prevent intermediate allocations of format
-strings and instead directly write the output. Under the hood, this function is
-actually invoking the `write` function defined in this module. Example usage is:
-
-```rust
-# #![allow(unused_must_use)]
-use std::io;
-
-let mut w = Vec::new();
-write!(&mut w as &mut io::Writer, "Hello {}!", "world");
-```
-
-#### `print!`
-
-This and `println` emit their output to stdout. Similarly to the `write!` macro,
-the goal of these macros is to avoid intermediate allocations when printing
-output. Example usage is:
-
-```rust
-print!("Hello {}!", "world");
-println!("I have a newline {}", "character at the end");
-```
-
-#### `format_args!`
-This is a curious macro which is used to safely pass around
-an opaque object describing the format string. This object
-does not require any heap allocations to create, and it only
-references information on the stack. Under the hood, all of
-the related macros are implemented in terms of this. First
-off, some example usage is:
-
-```
-use std::fmt;
-use std::io;
-
-# #[allow(unused_must_use)]
-# fn main() {
-format_args!(fmt::format, "this returns {}", "String");
-
-let some_writer: &mut io::Writer = &mut io::stdout();
-format_args!(|args| { write!(some_writer, "{}", args) }, "print with a {}", "closure");
-
-fn my_fmt_fn(args: &fmt::Arguments) {
-    write!(&mut io::stdout(), "{}", args);
-}
-format_args!(my_fmt_fn, "or a {} too", "function");
-# }
-```
-
-The first argument of the `format_args!` macro is a function (or closure) which
-takes one argument of type `&fmt::Arguments`. This structure can then be
-passed to the `write` and `format` functions inside this module in order to
-process the format string. The goal of this macro is to even further prevent
-intermediate allocations when dealing formatting strings.
-
-For example, a logging library could use the standard formatting syntax, but it
-would internally pass around this structure until it has been determined where
-output should go to.
-
-It is unsafe to programmatically create an instance of `fmt::Arguments` because
-the operations performed when executing a format string require the compile-time
-checks provided by the compiler. The `format_args!` macro is the only method of
-safely creating these structures, but they can be unsafely created with the
-constructor provided.
-
-## Syntax
-
-The syntax for the formatting language used is drawn from other languages, so it
-should not be too alien. Arguments are formatted with python-like syntax,
-meaning that arguments are surrounded by `{}` instead of the C-like `%`. The
-actual grammar for the formatting syntax is:
-
-```text
-format_string := <text> [ format <text> ] *
-format := '{' [ argument ] [ ':' format_spec ] '}'
-argument := integer | identifier
-
-format_spec := [[fill]align][sign]['#'][0][width]['.' precision][type]
-fill := character
-align := '<' | '^' | '>'
-sign := '+' | '-'
-width := count
-precision := count | '*'
-type := identifier | ''
-count := parameter | integer
-parameter := integer '$'
-```
-
-## Formatting Parameters
-
-Each argument being formatted can be transformed by a number of formatting
-parameters (corresponding to `format_spec` in the syntax above). These
-parameters affect the string representation of what's being formatted. This
-syntax draws heavily from Python's, so it may seem a bit familiar.
-
-### Fill/Alignment
-
-The fill character is provided normally in conjunction with the `width`
-parameter. This indicates that if the value being formatted is smaller than
-`width` some extra characters will be printed around it. The extra characters
-are specified by `fill`, and the alignment can be one of two options:
-
-* `<` - the argument is left-aligned in `width` columns
-* `^` - the argument is center-aligned in `width` columns
-* `>` - the argument is right-aligned in `width` columns
-
-### Sign/#/0
-
-These can all be interpreted as flags for a particular formatter.
-
-* '+' - This is intended for numeric types and indicates that the sign should
-        always be printed. Positive signs are never printed by default, and the
-        negative sign is only printed by default for the `Signed` trait. This
-        flag indicates that the correct sign (+ or -) should always be printed.
-* '-' - Currently not used
-* '#' - This flag is indicates that the "alternate" form of printing should be
-        used. By default, this only applies to the integer formatting traits and
-        performs like:
-    * `x` - precedes the argument with a "0x"
-    * `X` - precedes the argument with a "0x"
-    * `t` - precedes the argument with a "0b"
-    * `o` - precedes the argument with a "0o"
-* '0' - This is used to indicate for integer formats that the padding should
-        both be done with a `0` character as well as be sign-aware. A format
-        like `{:08d}` would yield `00000001` for the integer `1`, while the same
-        format would yield `-0000001` for the integer `-1`. Notice that the
-        negative version has one fewer zero than the positive version.
-
-### Width
-
-This is a parameter for the "minimum width" that the format should take up. If
-the value's string does not fill up this many characters, then the padding
-specified by fill/alignment will be used to take up the required space.
-
-The default fill/alignment for non-numerics is a space and left-aligned. The
-defaults for numeric formatters is also a space but with right-alignment. If the
-'0' flag is specified for numerics, then the implicit fill character is '0'.
-
-The value for the width can also be provided as a `uint` in the list of
-parameters by using the `2$` syntax indicating that the second argument is a
-`uint` specifying the width.
-
-### Precision
-
-For non-numeric types, this can be considered a "maximum width". If the
-resulting string is longer than this width, then it is truncated down to this
-many characters and only those are emitted.
-
-For integral types, this has no meaning currently.
-
-For floating-point types, this indicates how many digits after the decimal point
-should be printed.
-
-## Escaping
-
-The literal characters `{` and `}` may be included in a string by preceding them
-with the same character. For example, the `{` character is escaped with `{{` and
-the `}` character is escaped with `}}`.
-
-*/
+//! Utilities for formatting and printing strings
+//!
+//! This module contains the runtime support for the `format!` syntax extension.
+//! This macro is implemented in the compiler to emit calls to this module in order
+//! to format arguments at runtime into strings and streams.
+//!
+//! The functions contained in this module should not normally be used in everyday
+//! use cases of `format!`. The assumptions made by these functions are unsafe for
+//! all inputs, and the compiler performs a large amount of validation on the
+//! arguments to `format!` in order to ensure safety at runtime. While it is
+//! possible to call these functions directly, it is not recommended to do so in the
+//! general case.
+//!
+//! ## Usage
+//!
+//! The `format!` macro is intended to be familiar to those coming from C's
+//! printf/fprintf functions or Python's `str.format` function. In its current
+//! revision, the `format!` macro returns a `String` type which is the result of
+//! the formatting. In the future it will also be able to pass in a stream to
+//! format arguments directly while performing minimal allocations.
+//!
+//! Some examples of the `format!` extension are:
+//!
+//! ```rust
+//! # fn main() {
+//! format!("Hello");                  // => "Hello"
+//! format!("Hello, {}!", "world");    // => "Hello, world!"
+//! format!("The number is {}", 1i);   // => "The number is 1"
+//! format!("{}", (3i, 4i));           // => "(3, 4)"
+//! format!("{value}", value=4i);      // => "4"
+//! format!("{} {}", 1i, 2u);          // => "1 2"
+//! # }
+//! ```
+//!
+//! From these, you can see that the first argument is a format string. It is
+//! required by the compiler for this to be a string literal; it cannot be a
+//! variable passed in (in order to perform validity checking). The compiler will
+//! then parse the format string and determine if the list of arguments provided is
+//! suitable to pass to this format string.
+//!
+//! ### Positional parameters
+//!
+//! Each formatting argument is allowed to specify which value argument it's
+//! referencing, and if omitted it is assumed to be "the next argument". For
+//! example, the format string `{} {} {}` would take three parameters, and they
+//! would be formatted in the same order as they're given. The format string
+//! `{2} {1} {0}`, however, would format arguments in reverse order.
+//!
+//! Things can get a little tricky once you start intermingling the two types of
+//! positional specifiers. The "next argument" specifier can be thought of as an
+//! iterator over the argument. Each time a "next argument" specifier is seen, the
+//! iterator advances. This leads to behavior like this:
+//!
+//! ```rust
+//! format!("{1} {} {0} {}", 1i, 2i); // => "2 1 1 2"
+//! ```
+//!
+//! The internal iterator over the argument has not been advanced by the time the
+//! first `{}` is seen, so it prints the first argument. Then upon reaching the
+//! second `{}`, the iterator has advanced forward to the second argument.
+//! Essentially, parameters which explicitly name their argument do not affect
+//! parameters which do not name an argument in terms of positional specifiers.
+//!
+//! A format string is required to use all of its arguments, otherwise it is a
+//! compile-time error. You may refer to the same argument more than once in the
+//! format string, although it must always be referred to with the same type.
+//!
+//! ### Named parameters
+//!
+//! Rust itself does not have a Python-like equivalent of named parameters to a
+//! function, but the `format!` macro is a syntax extension which allows it to
+//! leverage named parameters. Named parameters are listed at the end of the
+//! argument list and have the syntax:
+//!
+//! ```text
+//! identifier '=' expression
+//! ```
+//!
+//! For example, the following `format!` expressions all use named argument:
+//!
+//! ```rust
+//! # fn main() {
+//! format!("{argument}", argument = "test");   // => "test"
+//! format!("{name} {}", 1i, name = 2i);        // => "2 1"
+//! format!("{a} {c} {b}", a="a", b=(), c=3i);  // => "a 3 ()"
+//! # }
+//! ```
+//!
+//! It is illegal to put positional parameters (those without names) after arguments
+//! which have names. Like with positional parameters, it is illegal to provide
+//! named parameters that are unused by the format string.
+//!
+//! ### Argument types
+//!
+//! Each argument's type is dictated by the format string. It is a requirement that every argument is
+//! only ever referred to by one type. For example, this is an invalid format string:
+//!
+//! ```text
+//! {0:x} {0:o}
+//! ```
+//!
+//! This is invalid because the first argument is both referred to as a hexidecimal as well as an
+//! octal.
+//!
+//! There are various parameters which do require a particular type, however. Namely if the syntax
+//! `{:.*}` is used, then the number of characters to print precedes the actual object being formatted,
+//! and the number of characters must have the type `uint`. Although a `uint` can be printed with
+//! `{}`, it is illegal to reference an argument as such. For example this is another invalid
+//! format string:
+//!
+//! ```text
+//! {:.*} {0}
+//! ```
+//!
+//! ### Formatting traits
+//!
+//! When requesting that an argument be formatted with a particular type, you are
+//! actually requesting that an argument ascribes to a particular trait. This allows
+//! multiple actual types to be formatted via `{:x}` (like `i8` as well as `int`).
+//! The current mapping of types to traits is:
+//!
+//! * *nothing* ⇒ `Show`
+//! * `o` ⇒ `Octal`
+//! * `x` ⇒ `LowerHex`
+//! * `X` ⇒ `UpperHex`
+//! * `p` ⇒ `Pointer`
+//! * `b` ⇒ `Binary`
+//! * `e` ⇒ `LowerExp`
+//! * `E` ⇒ `UpperExp`
+//!
+//! What this means is that any type of argument which implements the
+//! `std::fmt::Binary` trait can then be formatted with `{:b}`. Implementations are
+//! provided for these traits for a number of primitive types by the standard
+//! library as well. If no format is specified (as in `{}` or `{:6}`), then the
+//! format trait used is the `Show` trait. This is one of the more commonly
+//! implemented traits when formatting a custom type.
+//!
+//! When implementing a format trait for your own type, you will have to implement a
+//! method of the signature:
+//!
+//! ```rust
+//! # use std::fmt;
+//! # struct Foo; // our custom type
+//! # impl fmt::Show for Foo {
+//! fn fmt(&self, f: &mut std::fmt::Formatter) -> fmt::Result {
+//! # write!(f, "testing, testing")
+//! # } }
+//! ```
+//!
+//! Your type will be passed as `self` by-reference, and then the function should
+//! emit output into the `f.buf` stream. It is up to each format trait
+//! implementation to correctly adhere to the requested formatting parameters. The
+//! values of these parameters will be listed in the fields of the `Formatter`
+//! struct. In order to help with this, the `Formatter` struct also provides some
+//! helper methods.
+//!
+//! Additionally, the return value of this function is `fmt::Result` which is a
+//! typedef to `Result<(), IoError>` (also known as `IoResult<()>`). Formatting
+//! implementations should ensure that they return errors from `write!` correctly
+//! (propagating errors upward).
+//!
+//! An example of implementing the formatting traits would look
+//! like:
+//!
+//! ```rust
+//! use std::fmt;
+//! use std::f64;
+//! use std::num::Float;
+//!
+//! struct Vector2D {
+//!     x: int,
+//!     y: int,
+//! }
+//!
+//! impl fmt::Show for Vector2D {
+//!     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+//!         // The `f` value implements the `Writer` trait, which is what the
+//!         // write! macro is expecting. Note that this formatting ignores the
+//!         // various flags provided to format strings.
+//!         write!(f, "({}, {})", self.x, self.y)
+//!     }
+//! }
+//!
+//! // Different traits allow different forms of output of a type. The meaning of
+//! // this format is to print the magnitude of a vector.
+//! impl fmt::Binary for Vector2D {
+//!     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+//!         let magnitude = (self.x * self.x + self.y * self.y) as f64;
+//!         let magnitude = magnitude.sqrt();
+//!
+//!         // Respect the formatting flags by using the helper method
+//!         // `pad_integral` on the Formatter object. See the method documentation
+//!         // for details, and the function `pad` can be used to pad strings.
+//!         let decimals = f.precision().unwrap_or(3);
+//!         let string = f64::to_str_exact(magnitude, decimals);
+//!         f.pad_integral(true, "", string.as_bytes())
+//!     }
+//! }
+//!
+//! fn main() {
+//!     let myvector = Vector2D { x: 3, y: 4 };
+//!
+//!     println!("{}", myvector);       // => "(3, 4)"
+//!     println!("{:10.3b}", myvector); // => "     5.000"
+//! }
+//! ```
+//!
+//! ### Related macros
+//!
+//! There are a number of related macros in the `format!` family. The ones that are
+//! currently implemented are:
+//!
+//! ```ignore
+//! format!      // described above
+//! write!       // first argument is a &mut io::Writer, the destination
+//! writeln!     // same as write but appends a newline
+//! print!       // the format string is printed to the standard output
+//! println!     // same as print but appends a newline
+//! format_args! // described below.
+//! ```
+//!
+//! #### `write!`
+//!
+//! This and `writeln` are two macros which are used to emit the format string to a
+//! specified stream. This is used to prevent intermediate allocations of format
+//! strings and instead directly write the output. Under the hood, this function is
+//! actually invoking the `write` function defined in this module. Example usage is:
+//!
+//! ```rust
+//! # #![allow(unused_must_use)]
+//! use std::io;
+//!
+//! let mut w = Vec::new();
+//! write!(&mut w as &mut io::Writer, "Hello {}!", "world");
+//! ```
+//!
+//! #### `print!`
+//!
+//! This and `println` emit their output to stdout. Similarly to the `write!` macro,
+//! the goal of these macros is to avoid intermediate allocations when printing
+//! output. Example usage is:
+//!
+//! ```rust
+//! print!("Hello {}!", "world");
+//! println!("I have a newline {}", "character at the end");
+//! ```
+//!
+//! #### `format_args!`
+//! This is a curious macro which is used to safely pass around
+//! an opaque object describing the format string. This object
+//! does not require any heap allocations to create, and it only
+//! references information on the stack. Under the hood, all of
+//! the related macros are implemented in terms of this. First
+//! off, some example usage is:
+//!
+//! ```
+//! use std::fmt;
+//! use std::io;
+//!
+//! # #[allow(unused_must_use)]
+//! # fn main() {
+//! format_args!(fmt::format, "this returns {}", "String");
+//!
+//! let some_writer: &mut io::Writer = &mut io::stdout();
+//! format_args!(|args| { write!(some_writer, "{}", args) }, "print with a {}", "closure");
+//!
+//! fn my_fmt_fn(args: &fmt::Arguments) {
+//!     write!(&mut io::stdout(), "{}", args);
+//! }
+//! format_args!(my_fmt_fn, "or a {} too", "function");
+//! # }
+//! ```
+//!
+//! The first argument of the `format_args!` macro is a function (or closure) which
+//! takes one argument of type `&fmt::Arguments`. This structure can then be
+//! passed to the `write` and `format` functions inside this module in order to
+//! process the format string. The goal of this macro is to even further prevent
+//! intermediate allocations when dealing formatting strings.
+//!
+//! For example, a logging library could use the standard formatting syntax, but it
+//! would internally pass around this structure until it has been determined where
+//! output should go to.
+//!
+//! It is unsafe to programmatically create an instance of `fmt::Arguments` because
+//! the operations performed when executing a format string require the compile-time
+//! checks provided by the compiler. The `format_args!` macro is the only method of
+//! safely creating these structures, but they can be unsafely created with the
+//! constructor provided.
+//!
+//! ## Syntax
+//!
+//! The syntax for the formatting language used is drawn from other languages, so it
+//! should not be too alien. Arguments are formatted with python-like syntax,
+//! meaning that arguments are surrounded by `{}` instead of the C-like `%`. The
+//! actual grammar for the formatting syntax is:
+//!
+//! ```text
+//! format_string := <text> [ format <text> ] *
+//! format := '{' [ argument ] [ ':' format_spec ] '}'
+//! argument := integer | identifier
+//!
+//! format_spec := [[fill]align][sign]['#'][0][width]['.' precision][type]
+//! fill := character
+//! align := '<' | '^' | '>'
+//! sign := '+' | '-'
+//! width := count
+//! precision := count | '*'
+//! type := identifier | ''
+//! count := parameter | integer
+//! parameter := integer '$'
+//! ```
+//!
+//! ## Formatting Parameters
+//!
+//! Each argument being formatted can be transformed by a number of formatting
+//! parameters (corresponding to `format_spec` in the syntax above). These
+//! parameters affect the string representation of what's being formatted. This
+//! syntax draws heavily from Python's, so it may seem a bit familiar.
+//!
+//! ### Fill/Alignment
+//!
+//! The fill character is provided normally in conjunction with the `width`
+//! parameter. This indicates that if the value being formatted is smaller than
+//! `width` some extra characters will be printed around it. The extra characters
+//! are specified by `fill`, and the alignment can be one of two options:
+//!
+//! * `<` - the argument is left-aligned in `width` columns
+//! * `^` - the argument is center-aligned in `width` columns
+//! * `>` - the argument is right-aligned in `width` columns
+//!
+//! ### Sign/#/0
+//!
+//! These can all be interpreted as flags for a particular formatter.
+//!
+//! * '+' - This is intended for numeric types and indicates that the sign should
+//!         always be printed. Positive signs are never printed by default, and the
+//!         negative sign is only printed by default for the `Signed` trait. This
+//!         flag indicates that the correct sign (+ or -) should always be printed.
+//! * '-' - Currently not used
+//! * '#' - This flag is indicates that the "alternate" form of printing should be
+//!         used. By default, this only applies to the integer formatting traits and
+//!         performs like:
+//!     * `x` - precedes the argument with a "0x"
+//!     * `X` - precedes the argument with a "0x"
+//!     * `t` - precedes the argument with a "0b"
+//!     * `o` - precedes the argument with a "0o"
+//! * '0' - This is used to indicate for integer formats that the padding should
+//!         both be done with a `0` character as well as be sign-aware. A format
+//!         like `{:08d}` would yield `00000001` for the integer `1`, while the same
+//!         format would yield `-0000001` for the integer `-1`. Notice that the
+//!         negative version has one fewer zero than the positive version.
+//!
+//! ### Width
+//!
+//! This is a parameter for the "minimum width" that the format should take up. If
+//! the value's string does not fill up this many characters, then the padding
+//! specified by fill/alignment will be used to take up the required space.
+//!
+//! The default fill/alignment for non-numerics is a space and left-aligned. The
+//! defaults for numeric formatters is also a space but with right-alignment. If the
+//! '0' flag is specified for numerics, then the implicit fill character is '0'.
+//!
+//! The value for the width can also be provided as a `uint` in the list of
+//! parameters by using the `2$` syntax indicating that the second argument is a
+//! `uint` specifying the width.
+//!
+//! ### Precision
+//!
+//! For non-numeric types, this can be considered a "maximum width". If the
+//! resulting string is longer than this width, then it is truncated down to this
+//! many characters and only those are emitted.
+//!
+//! For integral types, this has no meaning currently.
+//!
+//! For floating-point types, this indicates how many digits after the decimal point
+//! should be printed.
+//!
+//! ## Escaping
+//!
+//! The literal characters `{` and `}` may be included in a string by preceding them
+//! with the same character. For example, the `{` character is escaped with `{{` and
+//! the `}` character is escaped with `}}`.
 
 #![experimental]
 
diff --git a/src/libstd/hash.rs b/src/libstd/hash.rs
index e4017ea5a47fa..ac68e1ef121fb 100644
--- a/src/libstd/hash.rs
+++ b/src/libstd/hash.rs
@@ -8,58 +8,56 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-/*!
- * Generic hashing support.
- *
- * This module provides a generic way to compute the hash of a value. The
- * simplest way to make a type hashable is to use `#[deriving(Hash)]`:
- *
- * # Example
- *
- * ```rust
- * use std::hash;
- * use std::hash::Hash;
- *
- * #[deriving(Hash)]
- * struct Person {
- *     id: uint,
- *     name: String,
- *     phone: u64,
- * }
- *
- * let person1 = Person { id: 5, name: "Janet".to_string(), phone: 555_666_7777 };
- * let person2 = Person { id: 5, name: "Bob".to_string(), phone: 555_666_7777 };
- *
- * assert!(hash::hash(&person1) != hash::hash(&person2));
- * ```
- *
- * If you need more control over how a value is hashed, you need to implement
- * the trait `Hash`:
- *
- * ```rust
- * use std::hash;
- * use std::hash::Hash;
- * use std::hash::sip::SipState;
- *
- * struct Person {
- *     id: uint,
- *     name: String,
- *     phone: u64,
- * }
- *
- * impl Hash for Person {
- *     fn hash(&self, state: &mut SipState) {
- *         self.id.hash(state);
- *         self.phone.hash(state);
- *     }
- * }
- *
- * let person1 = Person { id: 5, name: "Janet".to_string(), phone: 555_666_7777 };
- * let person2 = Person { id: 5, name: "Bob".to_string(), phone: 555_666_7777 };
- *
- * assert!(hash::hash(&person1) == hash::hash(&person2));
- * ```
- */
+//! Generic hashing support.
+//!
+//! This module provides a generic way to compute the hash of a value. The
+//! simplest way to make a type hashable is to use `#[deriving(Hash)]`:
+//!
+//! # Example
+//!
+//! ```rust
+//! use std::hash;
+//! use std::hash::Hash;
+//!
+//! #[deriving(Hash)]
+//! struct Person {
+//!     id: uint,
+//!     name: String,
+//!     phone: u64,
+//! }
+//!
+//! let person1 = Person { id: 5, name: "Janet".to_string(), phone: 555_666_7777 };
+//! let person2 = Person { id: 5, name: "Bob".to_string(), phone: 555_666_7777 };
+//!
+//! assert!(hash::hash(&person1) != hash::hash(&person2));
+//! ```
+//!
+//! If you need more control over how a value is hashed, you need to implement
+//! the trait `Hash`:
+//!
+//! ```rust
+//! use std::hash;
+//! use std::hash::Hash;
+//! use std::hash::sip::SipState;
+//!
+//! struct Person {
+//!     id: uint,
+//!     name: String,
+//!     phone: u64,
+//! }
+//!
+//! impl Hash for Person {
+//!     fn hash(&self, state: &mut SipState) {
+//!         self.id.hash(state);
+//!         self.phone.hash(state);
+//!     }
+//! }
+//!
+//! let person1 = Person { id: 5, name: "Janet".to_string(), phone: 555_666_7777 };
+//! let person2 = Person { id: 5, name: "Bob".to_string(), phone: 555_666_7777 };
+//!
+//! assert!(hash::hash(&person1) == hash::hash(&person2));
+//! ```
 
 #![experimental]
 
diff --git a/src/libstd/io/fs.rs b/src/libstd/io/fs.rs
index 6d29f3d25382c..da69cee69e650 100644
--- a/src/libstd/io/fs.rs
+++ b/src/libstd/io/fs.rs
@@ -10,47 +10,45 @@
 //
 // ignore-lexer-test FIXME #15679
 
-/*! Synchronous File I/O
-
-This module provides a set of functions and traits for working
-with regular files & directories on a filesystem.
-
-At the top-level of the module are a set of freestanding functions, associated
-with various filesystem operations. They all operate on `Path` objects.
-
-All operations in this module, including those as part of `File` et al
-block the task during execution. In the event of failure, all functions/methods
-will return an `IoResult` type with an `Err` value.
-
-Also included in this module is an implementation block on the `Path` object
-defined in `std::path::Path`. The impl adds useful methods about inspecting the
-metadata of a file. This includes getting the `stat` information, reading off
-particular bits of it, etc.
-
-# Example
-
-```rust
-# #![allow(unused_must_use)]
-use std::io::fs::PathExtensions;
-use std::io::{File, fs};
-
-let path = Path::new("foo.txt");
-
-// create the file, whether it exists or not
-let mut file = File::create(&path);
-file.write(b"foobar");
-# drop(file);
-
-// open the file in read-only mode
-let mut file = File::open(&path);
-file.read_to_end();
-
-println!("{}", path.stat().unwrap().size);
-# drop(file);
-fs::unlink(&path);
-```
-
-*/
+//! Synchronous File I/O
+//!
+//! This module provides a set of functions and traits for working
+//! with regular files & directories on a filesystem.
+//!
+//! At the top-level of the module are a set of freestanding functions, associated
+//! with various filesystem operations. They all operate on `Path` objects.
+//!
+//! All operations in this module, including those as part of `File` et al
+//! block the task during execution. In the event of failure, all functions/methods
+//! will return an `IoResult` type with an `Err` value.
+//!
+//! Also included in this module is an implementation block on the `Path` object
+//! defined in `std::path::Path`. The impl adds useful methods about inspecting the
+//! metadata of a file. This includes getting the `stat` information, reading off
+//! particular bits of it, etc.
+//!
+//! # Example
+//!
+//! ```rust
+//! # #![allow(unused_must_use)]
+//! use std::io::fs::PathExtensions;
+//! use std::io::{File, fs};
+//!
+//! let path = Path::new("foo.txt");
+//!
+//! // create the file, whether it exists or not
+//! let mut file = File::create(&path);
+//! file.write(b"foobar");
+//! # drop(file);
+//!
+//! // open the file in read-only mode
+//! let mut file = File::open(&path);
+//! file.read_to_end();
+//!
+//! println!("{}", path.stat().unwrap().size);
+//! # drop(file);
+//! fs::unlink(&path);
+//! ```
 
 use clone::Clone;
 use io::standard_error;
diff --git a/src/libstd/io/mod.rs b/src/libstd/io/mod.rs
index a25674030aeac..fc6ee58346dec 100644
--- a/src/libstd/io/mod.rs
+++ b/src/libstd/io/mod.rs
@@ -16,207 +16,205 @@
 //        error handling
 
 
-/*! I/O, including files, networking, timers, and processes
-
-`std::io` provides Rust's basic I/O types,
-for reading and writing to files, TCP, UDP,
-and other types of sockets and pipes,
-manipulating the file system, spawning processes.
-
-# Examples
-
-Some examples of obvious things you might want to do
-
-* Read lines from stdin
-
-    ```rust
-    use std::io;
-
-    for line in io::stdin().lines() {
-        print!("{}", line.unwrap());
-    }
-    ```
-
-* Read a complete file
-
-    ```rust
-    use std::io::File;
-
-    let contents = File::open(&Path::new("message.txt")).read_to_end();
-    ```
-
-* Write a line to a file
-
-    ```rust
-    # #![allow(unused_must_use)]
-    use std::io::File;
-
-    let mut file = File::create(&Path::new("message.txt"));
-    file.write(b"hello, file!\n");
-    # drop(file);
-    # ::std::io::fs::unlink(&Path::new("message.txt"));
-    ```
-
-* Iterate over the lines of a file
-
-    ```rust,no_run
-    use std::io::BufferedReader;
-    use std::io::File;
-
-    let path = Path::new("message.txt");
-    let mut file = BufferedReader::new(File::open(&path));
-    for line in file.lines() {
-        print!("{}", line.unwrap());
-    }
-    ```
-
-* Pull the lines of a file into a vector of strings
-
-    ```rust,no_run
-    use std::io::BufferedReader;
-    use std::io::File;
-
-    let path = Path::new("message.txt");
-    let mut file = BufferedReader::new(File::open(&path));
-    let lines: Vec<String> = file.lines().map(|x| x.unwrap()).collect();
-    ```
-
-* Make a simple TCP client connection and request
-
-    ```rust
-    # #![allow(unused_must_use)]
-    use std::io::TcpStream;
-
-    # // connection doesn't fail if a server is running on 8080
-    # // locally, we still want to be type checking this code, so lets
-    # // just stop it running (#11576)
-    # if false {
-    let mut socket = TcpStream::connect("127.0.0.1:8080").unwrap();
-    socket.write(b"GET / HTTP/1.0\n\n");
-    let response = socket.read_to_end();
-    # }
-    ```
-
-* Make a simple TCP server
-
-    ```rust
-    # fn main() { }
-    # fn foo() {
-    # #![allow(dead_code)]
-    use std::io::{TcpListener, TcpStream};
-    use std::io::{Acceptor, Listener};
-
-    let listener = TcpListener::bind("127.0.0.1:80");
-
-    // bind the listener to the specified address
-    let mut acceptor = listener.listen();
-
-    fn handle_client(mut stream: TcpStream) {
-        // ...
-    # &mut stream; // silence unused mutability/variable warning
-    }
-    // accept connections and process them, spawning a new tasks for each one
-    for stream in acceptor.incoming() {
-        match stream {
-            Err(e) => { /* connection failed */ }
-            Ok(stream) => spawn(proc() {
-                // connection succeeded
-                handle_client(stream)
-            })
-        }
-    }
-
-    // close the socket server
-    drop(acceptor);
-    # }
-    ```
-
-
-# Error Handling
-
-I/O is an area where nearly every operation can result in unexpected
-errors. Errors should be painfully visible when they happen, and handling them
-should be easy to work with. It should be convenient to handle specific I/O
-errors, and it should also be convenient to not deal with I/O errors.
-
-Rust's I/O employs a combination of techniques to reduce boilerplate
-while still providing feedback about errors. The basic strategy:
-
-* All I/O operations return `IoResult<T>` which is equivalent to
-  `Result<T, IoError>`. The `Result` type is defined in the `std::result`
-  module.
-* If the `Result` type goes unused, then the compiler will by default emit a
-  warning about the unused result. This is because `Result` has the
-  `#[must_use]` attribute.
-* Common traits are implemented for `IoResult`, e.g.
-  `impl<R: Reader> Reader for IoResult<R>`, so that error values do not have
-  to be 'unwrapped' before use.
-
-These features combine in the API to allow for expressions like
-`File::create(&Path::new("diary.txt")).write(b"Met a girl.\n")`
-without having to worry about whether "diary.txt" exists or whether
-the write succeeds. As written, if either `new` or `write_line`
-encounters an error then the result of the entire expression will
-be an error.
-
-If you wanted to handle the error though you might write:
-
-```rust
-# #![allow(unused_must_use)]
-use std::io::File;
-
-match File::create(&Path::new("diary.txt")).write(b"Met a girl.\n") {
-    Ok(()) => (), // succeeded
-    Err(e) => println!("failed to write to my diary: {}", e),
-}
-
-# ::std::io::fs::unlink(&Path::new("diary.txt"));
-```
-
-So what actually happens if `create` encounters an error?
-It's important to know that what `new` returns is not a `File`
-but an `IoResult<File>`.  If the file does not open, then `new` will simply
-return `Err(..)`. Because there is an implementation of `Writer` (the trait
-required ultimately required for types to implement `write_line`) there is no
-need to inspect or unwrap the `IoResult<File>` and we simply call `write_line`
-on it. If `new` returned an `Err(..)` then the followup call to `write_line`
-will also return an error.
-
-## `try!`
-
-Explicit pattern matching on `IoResult`s can get quite verbose, especially
-when performing many I/O operations. Some examples (like those above) are
-alleviated with extra methods implemented on `IoResult`, but others have more
-complex interdependencies among each I/O operation.
-
-The `try!` macro from `std::macros` is provided as a method of early-return
-inside `Result`-returning functions. It expands to an early-return on `Err`
-and otherwise unwraps the contained `Ok` value.
-
-If you wanted to read several `u32`s from a file and return their product:
-
-```rust
-use std::io::{File, IoResult};
-
-fn file_product(p: &Path) -> IoResult<u32> {
-    let mut f = File::open(p);
-    let x1 = try!(f.read_le_u32());
-    let x2 = try!(f.read_le_u32());
-
-    Ok(x1 * x2)
-}
-
-match file_product(&Path::new("numbers.bin")) {
-    Ok(x) => println!("{}", x),
-    Err(e) => println!("Failed to read numbers!")
-}
-```
-
-With `try!` in `file_product`, each `read_le_u32` need not be directly
-concerned with error handling; instead its caller is responsible for
-responding to errors that may occur while attempting to read the numbers.
-
-*/
+//! I/O, including files, networking, timers, and processes
+//!
+//! `std::io` provides Rust's basic I/O types,
+//! for reading and writing to files, TCP, UDP,
+//! and other types of sockets and pipes,
+//! manipulating the file system, spawning processes.
+//!
+//! # Examples
+//!
+//! Some examples of obvious things you might want to do
+//!
+//! * Read lines from stdin
+//!
+//!     ```rust
+//!     use std::io;
+//!
+//!     for line in io::stdin().lines() {
+//!         print!("{}", line.unwrap());
+//!     }
+//!     ```
+//!
+//! * Read a complete file
+//!
+//!     ```rust
+//!     use std::io::File;
+//!
+//!     let contents = File::open(&Path::new("message.txt")).read_to_end();
+//!     ```
+//!
+//! * Write a line to a file
+//!
+//!     ```rust
+//!     # #![allow(unused_must_use)]
+//!     use std::io::File;
+//!
+//!     let mut file = File::create(&Path::new("message.txt"));
+//!     file.write(b"hello, file!\n");
+//!     # drop(file);
+//!     # ::std::io::fs::unlink(&Path::new("message.txt"));
+//!     ```
+//!
+//! * Iterate over the lines of a file
+//!
+//!     ```rust,no_run
+//!     use std::io::BufferedReader;
+//!     use std::io::File;
+//!
+//!     let path = Path::new("message.txt");
+//!     let mut file = BufferedReader::new(File::open(&path));
+//!     for line in file.lines() {
+//!         print!("{}", line.unwrap());
+//!     }
+//!     ```
+//!
+//! * Pull the lines of a file into a vector of strings
+//!
+//!     ```rust,no_run
+//!     use std::io::BufferedReader;
+//!     use std::io::File;
+//!
+//!     let path = Path::new("message.txt");
+//!     let mut file = BufferedReader::new(File::open(&path));
+//!     let lines: Vec<String> = file.lines().map(|x| x.unwrap()).collect();
+//!     ```
+//!
+//! * Make a simple TCP client connection and request
+//!
+//!     ```rust
+//!     # #![allow(unused_must_use)]
+//!     use std::io::TcpStream;
+//!
+//!     # // connection doesn't fail if a server is running on 8080
+//!     # // locally, we still want to be type checking this code, so lets
+//!     # // just stop it running (#11576)
+//!     # if false {
+//!     let mut socket = TcpStream::connect("127.0.0.1:8080").unwrap();
+//!     socket.write(b"GET / HTTP/1.0\n\n");
+//!     let response = socket.read_to_end();
+//!     # }
+//!     ```
+//!
+//! * Make a simple TCP server
+//!
+//!     ```rust
+//!     # fn main() { }
+//!     # fn foo() {
+//!     # #![allow(dead_code)]
+//!     use std::io::{TcpListener, TcpStream};
+//!     use std::io::{Acceptor, Listener};
+//!
+//!     let listener = TcpListener::bind("127.0.0.1:80");
+//!
+//!     // bind the listener to the specified address
+//!     let mut acceptor = listener.listen();
+//!
+//!     fn handle_client(mut stream: TcpStream) {
+//!         // ...
+//!     # &mut stream; // silence unused mutability/variable warning
+//!     }
+//!     // accept connections and process them, spawning a new tasks for each one
+//!     for stream in acceptor.incoming() {
+//!         match stream {
+//!             Err(e) => { /* connection failed */ }
+//!             Ok(stream) => spawn(proc() {
+//!                 // connection succeeded
+//!                 handle_client(stream)
+//!             })
+//!         }
+//!     }
+//!
+//!     // close the socket server
+//!     drop(acceptor);
+//!     # }
+//!     ```
+//!
+//!
+//! # Error Handling
+//!
+//! I/O is an area where nearly every operation can result in unexpected
+//! errors. Errors should be painfully visible when they happen, and handling them
+//! should be easy to work with. It should be convenient to handle specific I/O
+//! errors, and it should also be convenient to not deal with I/O errors.
+//!
+//! Rust's I/O employs a combination of techniques to reduce boilerplate
+//! while still providing feedback about errors. The basic strategy:
+//!
+//! * All I/O operations return `IoResult<T>` which is equivalent to
+//!   `Result<T, IoError>`. The `Result` type is defined in the `std::result`
+//!   module.
+//! * If the `Result` type goes unused, then the compiler will by default emit a
+//!   warning about the unused result. This is because `Result` has the
+//!   `#[must_use]` attribute.
+//! * Common traits are implemented for `IoResult`, e.g.
+//!   `impl<R: Reader> Reader for IoResult<R>`, so that error values do not have
+//!   to be 'unwrapped' before use.
+//!
+//! These features combine in the API to allow for expressions like
+//! `File::create(&Path::new("diary.txt")).write(b"Met a girl.\n")`
+//! without having to worry about whether "diary.txt" exists or whether
+//! the write succeeds. As written, if either `new` or `write_line`
+//! encounters an error then the result of the entire expression will
+//! be an error.
+//!
+//! If you wanted to handle the error though you might write:
+//!
+//! ```rust
+//! # #![allow(unused_must_use)]
+//! use std::io::File;
+//!
+//! match File::create(&Path::new("diary.txt")).write(b"Met a girl.\n") {
+//!     Ok(()) => (), // succeeded
+//!     Err(e) => println!("failed to write to my diary: {}", e),
+//! }
+//!
+//! # ::std::io::fs::unlink(&Path::new("diary.txt"));
+//! ```
+//!
+//! So what actually happens if `create` encounters an error?
+//! It's important to know that what `new` returns is not a `File`
+//! but an `IoResult<File>`.  If the file does not open, then `new` will simply
+//! return `Err(..)`. Because there is an implementation of `Writer` (the trait
+//! required ultimately required for types to implement `write_line`) there is no
+//! need to inspect or unwrap the `IoResult<File>` and we simply call `write_line`
+//! on it. If `new` returned an `Err(..)` then the followup call to `write_line`
+//! will also return an error.
+//!
+//! ## `try!`
+//!
+//! Explicit pattern matching on `IoResult`s can get quite verbose, especially
+//! when performing many I/O operations. Some examples (like those above) are
+//! alleviated with extra methods implemented on `IoResult`, but others have more
+//! complex interdependencies among each I/O operation.
+//!
+//! The `try!` macro from `std::macros` is provided as a method of early-return
+//! inside `Result`-returning functions. It expands to an early-return on `Err`
+//! and otherwise unwraps the contained `Ok` value.
+//!
+//! If you wanted to read several `u32`s from a file and return their product:
+//!
+//! ```rust
+//! use std::io::{File, IoResult};
+//!
+//! fn file_product(p: &Path) -> IoResult<u32> {
+//!     let mut f = File::open(p);
+//!     let x1 = try!(f.read_le_u32());
+//!     let x2 = try!(f.read_le_u32());
+//!
+//!     Ok(x1 * x2)
+//! }
+//!
+//! match file_product(&Path::new("numbers.bin")) {
+//!     Ok(x) => println!("{}", x),
+//!     Err(e) => println!("Failed to read numbers!")
+//! }
+//! ```
+//!
+//! With `try!` in `file_product`, each `read_le_u32` need not be directly
+//! concerned with error handling; instead its caller is responsible for
+//! responding to errors that may occur while attempting to read the numbers.
 
 #![experimental]
 #![deny(unused_must_use)]
diff --git a/src/libstd/io/net/addrinfo.rs b/src/libstd/io/net/addrinfo.rs
index d6a48fd39e623..7de7869213091 100644
--- a/src/libstd/io/net/addrinfo.rs
+++ b/src/libstd/io/net/addrinfo.rs
@@ -8,14 +8,10 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-/*!
-
-Synchronous DNS Resolution
-
-Contains the functionality to perform DNS resolution in a style related to
-getaddrinfo()
-
-*/
+//! Synchronous DNS Resolution
+//!
+//! Contains the functionality to perform DNS resolution in a style related to
+//! `getaddrinfo()`
 
 #![allow(missing_docs)]
 
diff --git a/src/libstd/io/net/pipe.rs b/src/libstd/io/net/pipe.rs
index 2984fa5963147..ec997b71986cc 100644
--- a/src/libstd/io/net/pipe.rs
+++ b/src/libstd/io/net/pipe.rs
@@ -8,19 +8,15 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-/*!
-
-Named pipes
-
-This module contains the ability to communicate over named pipes with
-synchronous I/O. On windows, this corresponds to talking over a Named Pipe,
-while on Unix it corresponds to UNIX domain sockets.
-
-These pipes are similar to TCP in the sense that you can have both a stream to a
-server and a server itself. The server provided accepts other `UnixStream`
-instances as clients.
-
-*/
+//! Named pipes
+//!
+//! This module contains the ability to communicate over named pipes with
+//! synchronous I/O. On windows, this corresponds to talking over a Named Pipe,
+//! while on Unix it corresponds to UNIX domain sockets.
+//!
+//! These pipes are similar to TCP in the sense that you can have both a stream to a
+//! server and a server itself. The server provided accepts other `UnixStream`
+//! instances as clients.
 
 #![allow(missing_docs)]
 
diff --git a/src/libstd/io/stdio.rs b/src/libstd/io/stdio.rs
index e6dd20f63fbbc..665000eae8837 100644
--- a/src/libstd/io/stdio.rs
+++ b/src/libstd/io/stdio.rs
@@ -8,24 +8,22 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-/*! Non-blocking access to stdin, stdout, and stderr.
-
-This module provides bindings to the local event loop's TTY interface, using it
-to offer synchronous but non-blocking versions of stdio. These handles can be
-inspected for information about terminal dimensions or for related information
-about the stream or terminal to which it is attached.
-
-# Example
-
-```rust
-# #![allow(unused_must_use)]
-use std::io;
-
-let mut out = io::stdout();
-out.write(b"Hello, world!");
-```
-
-*/
+//! Non-blocking access to stdin, stdout, and stderr.
+//!
+//! This module provides bindings to the local event loop's TTY interface, using it
+//! to offer synchronous but non-blocking versions of stdio. These handles can be
+//! inspected for information about terminal dimensions or for related information
+//! about the stream or terminal to which it is attached.
+//!
+//! # Example
+//!
+//! ```rust
+//! # #![allow(unused_must_use)]
+//! use std::io;
+//!
+//! let mut out = io::stdout();
+//! out.write(b"Hello, world!");
+//! ```
 
 use self::StdSource::*;
 
diff --git a/src/libstd/io/test.rs b/src/libstd/io/test.rs
index a153ead2a3843..af56735021e86 100644
--- a/src/libstd/io/test.rs
+++ b/src/libstd/io/test.rs
@@ -8,7 +8,7 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-/*! Various utility functions useful for writing I/O tests */
+//! Various utility functions useful for writing I/O tests
 
 #![macro_escape]
 
@@ -95,17 +95,14 @@ pub fn raise_fd_limit() {
     unsafe { darwin_fd_limit::raise_fd_limit() }
 }
 
+/// darwin_fd_limit exists to work around an issue where launchctl on Mac OS X defaults the rlimit
+/// maxfiles to 256/unlimited. The default soft limit of 256 ends up being far too low for our
+/// multithreaded scheduler testing, depending on the number of cores available.
+///
+/// This fixes issue #7772.
 #[cfg(target_os="macos")]
 #[allow(non_camel_case_types)]
 mod darwin_fd_limit {
-    /*!
-     * darwin_fd_limit exists to work around an issue where launchctl on Mac OS X defaults the
-     * rlimit maxfiles to 256/unlimited. The default soft limit of 256 ends up being far too low
-     * for our multithreaded scheduler testing, depending on the number of cores available.
-     *
-     * This fixes issue #7772.
-     */
-
     use libc;
     type rlim_t = libc::uint64_t;
     #[repr(C)]
diff --git a/src/libstd/io/timer.rs b/src/libstd/io/timer.rs
index ec588f134784a..ad02b534d04c6 100644
--- a/src/libstd/io/timer.rs
+++ b/src/libstd/io/timer.rs
@@ -8,14 +8,10 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-/*!
-
-Synchronous Timers
-
-This module exposes the functionality to create timers, block the current task,
-and create receivers which will receive notifications after a period of time.
-
-*/
+//! Synchronous Timers
+//!
+//! This module exposes the functionality to create timers, block the current task,
+//! and create receivers which will receive notifications after a period of time.
 
 // FIXME: These functions take Durations but only pass ms to the backend impls.
 
diff --git a/src/libstd/io/util.rs b/src/libstd/io/util.rs
index 8e0cd6608164a..393283ff64c5b 100644
--- a/src/libstd/io/util.rs
+++ b/src/libstd/io/util.rs
@@ -8,7 +8,7 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-/*! Utility implementations of Reader and Writer */
+//! Utility implementations of Reader and Writer
 
 use prelude::*;
 use cmp;
diff --git a/src/libstd/os.rs b/src/libstd/os.rs
index 6b5ec983a8071..b9a103d3d9b2e 100644
--- a/src/libstd/os.rs
+++ b/src/libstd/os.rs
@@ -8,23 +8,19 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-/*!
- * Higher-level interfaces to libc::* functions and operating system services.
- *
- * In general these take and return rust types, use rust idioms (enums,
- * closures, vectors) rather than C idioms, and do more extensive safety
- * checks.
- *
- * This module is not meant to only contain 1:1 mappings to libc entries; any
- * os-interface code that is reasonably useful and broadly applicable can go
- * here. Including utility routines that merely build on other os code.
- *
- * We assume the general case is that users do not care, and do not want to
- * be made to care, which operating system they are on. While they may want
- * to special case various special cases -- and so we will not _hide_ the
- * facts of which OS the user is on -- they should be given the opportunity
- * to write OS-ignorant code by default.
- */
+//! Higher-level interfaces to libc::* functions and operating system services.
+//!
+//! In general these take and return rust types, use rust idioms (enums, closures, vectors) rather
+//! than C idioms, and do more extensive safety checks.
+//!
+//! This module is not meant to only contain 1:1 mappings to libc entries; any os-interface code
+//! that is reasonably useful and broadly applicable can go here. Including utility routines that
+//! merely build on other os code.
+//!
+//! We assume the general case is that users do not care, and do not want to be made to care, which
+//! operating system they are on. While they may want to special case various special cases -- and
+//! so we will not _hide_ the facts of which OS the user is on -- they should be given the
+//! opportunity to write OS-ignorant code by default.
 
 #![experimental]
 
diff --git a/src/libstd/path/mod.rs b/src/libstd/path/mod.rs
index 047fa51b92f60..b17106e811f62 100644
--- a/src/libstd/path/mod.rs
+++ b/src/libstd/path/mod.rs
@@ -8,62 +8,56 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-/*!
-
-Cross-platform path support
-
-This module implements support for two flavors of paths. `PosixPath` represents
-a path on any unix-like system, whereas `WindowsPath` represents a path on
-Windows. This module also exposes a typedef `Path` which is equal to the
-appropriate platform-specific path variant.
-
-Both `PosixPath` and `WindowsPath` implement a trait `GenericPath`, which
-contains the set of methods that behave the same for both paths. They each also
-implement some methods that could not be expressed in `GenericPath`, yet behave
-identically for both path flavors, such as `.components()`.
-
-The three main design goals of this module are 1) to avoid unnecessary
-allocation, 2) to behave the same regardless of which flavor of path is being
-used, and 3) to support paths that cannot be represented in UTF-8 (as Linux has
-no restriction on paths beyond disallowing NUL).
-
-## Usage
-
-Usage of this module is fairly straightforward. Unless writing platform-specific
-code, `Path` should be used to refer to the platform-native path.
-
-Creation of a path is typically done with either `Path::new(some_str)` or
-`Path::new(some_vec)`. This path can be modified with `.push()` and
-`.pop()` (and other setters). The resulting Path can either be passed to another
-API that expects a path, or can be turned into a `&[u8]` with `.as_vec()` or a
-`Option<&str>` with `.as_str()`. Similarly, attributes of the path can be queried
-with methods such as `.filename()`. There are also methods that return a new
-path instead of modifying the receiver, such as `.join()` or `.dir_path()`.
-
-Paths are always kept in normalized form. This means that creating the path
-`Path::new("a/b/../c")` will return the path `a/c`. Similarly any attempt
-to mutate the path will always leave it in normalized form.
-
-When rendering a path to some form of output, there is a method `.display()`
-which is compatible with the `format!()` parameter `{}`. This will render the
-path as a string, replacing all non-utf8 sequences with the Replacement
-Character (U+FFFD). As such it is not suitable for passing to any API that
-actually operates on the path; it is only intended for display.
-
-## Example
-
-```rust
-use std::io::fs::PathExtensions;
-
-let mut path = Path::new("/tmp/path");
-println!("path: {}", path.display());
-path.set_filename("foo");
-path.push("bar");
-println!("new path: {}", path.display());
-println!("path exists: {}", path.exists());
-```
-
-*/
+//! Cross-platform path support
+//!
+//! This module implements support for two flavors of paths. `PosixPath` represents a path on any
+//! unix-like system, whereas `WindowsPath` represents a path on Windows. This module also exposes
+//! a typedef `Path` which is equal to the appropriate platform-specific path variant.
+//!
+//! Both `PosixPath` and `WindowsPath` implement a trait `GenericPath`, which contains the set of
+//! methods that behave the same for both paths. They each also implement some methods that could
+//! not be expressed in `GenericPath`, yet behave identically for both path flavors, such as
+//! `.components()`.
+//!
+//! The three main design goals of this module are 1) to avoid unnecessary allocation, 2) to behave
+//! the same regardless of which flavor of path is being used, and 3) to support paths that cannot
+//! be represented in UTF-8 (as Linux has no restriction on paths beyond disallowing NUL).
+//!
+//! ## Usage
+//!
+//! Usage of this module is fairly straightforward. Unless writing platform-specific code, `Path`
+//! should be used to refer to the platform-native path.
+//!
+//! Creation of a path is typically done with either `Path::new(some_str)` or
+//! `Path::new(some_vec)`. This path can be modified with `.push()` and `.pop()` (and other
+//! setters). The resulting Path can either be passed to another API that expects a path, or can be
+//! turned into a `&[u8]` with `.as_vec()` or a `Option<&str>` with `.as_str()`. Similarly,
+//! attributes of the path can be queried with methods such as `.filename()`. There are also
+//! methods that return a new path instead of modifying the receiver, such as `.join()` or
+//! `.dir_path()`.
+//!
+//! Paths are always kept in normalized form. This means that creating the path
+//! `Path::new("a/b/../c")` will return the path `a/c`. Similarly any attempt to mutate the path
+//! will always leave it in normalized form.
+//!
+//! When rendering a path to some form of output, there is a method `.display()` which is
+//! compatible with the `format!()` parameter `{}`. This will render the path as a string,
+//! replacing all non-utf8 sequences with the Replacement Character (U+FFFD). As such it is not
+//! suitable for passing to any API that actually operates on the path; it is only intended for
+//! display.
+//!
+//! ## Example
+//!
+//! ```rust
+//! use std::io::fs::PathExtensions;
+//!
+//! let mut path = Path::new("/tmp/path");
+//! println!("path: {}", path.display());
+//! path.set_filename("foo");
+//! path.push("bar");
+//! println!("new path: {}", path.display());
+//! println!("path exists: {}", path.exists());
+//! ```
 
 #![experimental]
 
diff --git a/src/libstd/rt/mod.rs b/src/libstd/rt/mod.rs
index 872a545224184..5ecd3ff04f1cd 100644
--- a/src/libstd/rt/mod.rs
+++ b/src/libstd/rt/mod.rs
@@ -8,46 +8,38 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-/*! Runtime services, including the task scheduler and I/O dispatcher
-
-The `rt` module provides the private runtime infrastructure necessary
-to support core language features like the exchange and local heap,
-logging, local data and unwinding. It also implements the default task
-scheduler and task model. Initialization routines are provided for setting
-up runtime resources in common configurations, including that used by
-`rustc` when generating executables.
-
-It is intended that the features provided by `rt` can be factored in a
-way such that the core library can be built with different 'profiles'
-for different use cases, e.g. excluding the task scheduler. A number
-of runtime features though are critical to the functioning of the
-language and an implementation must be provided regardless of the
-execution environment.
-
-Of foremost importance is the global exchange heap, in the module
-`heap`. Very little practical Rust code can be written without
-access to the global heap. Unlike most of `rt` the global heap is
-truly a global resource and generally operates independently of the
-rest of the runtime.
-
-All other runtime features are task-local, including the local heap,
-local storage, logging and the stack unwinder.
-
-The relationship between `rt` and the rest of the core library is
-not entirely clear yet and some modules will be moving into or
-out of `rt` as development proceeds.
-
-Several modules in `core` are clients of `rt`:
-
-* `std::task` - The user-facing interface to the Rust task model.
-* `std::local_data` - The interface to local data.
-* `std::unstable::lang` - Miscellaneous lang items, some of which rely on `std::rt`.
-* `std::cleanup` - Local heap destruction.
-* `std::io` - In the future `std::io` will use an `rt` implementation.
-* `std::logging`
-* `std::comm`
-
-*/
+//! Runtime services, including the task scheduler and I/O dispatcher
+//!
+//! The `rt` module provides the private runtime infrastructure necessary to support core language
+//! features like the exchange and local heap, logging, local data and unwinding. It also
+//! implements the default task scheduler and task model. Initialization routines are provided for
+//! setting up runtime resources in common configurations, including that used by `rustc` when
+//! generating executables.
+//!
+//! It is intended that the features provided by `rt` can be factored in a way such that the core
+//! library can be built with different 'profiles' for different use cases, e.g. excluding the task
+//! scheduler. A number of runtime features though are critical to the functioning of the language
+//! and an implementation must be provided regardless of the execution environment.
+//!
+//! Of foremost importance is the global exchange heap, in the module `heap`. Very little practical
+//! Rust code can be written without access to the global heap. Unlike most of `rt` the global heap
+//! is truly a global resource and generally operates independently of the rest of the runtime.
+//!
+//! All other runtime features are task-local, including the local heap, local storage, logging and
+//! the stack unwinder.
+//!
+//! The relationship between `rt` and the rest of the core library is not entirely clear yet and
+//! some modules will be moving into or out of `rt` as development proceeds.
+//!
+//! Several modules in `core` are clients of `rt`:
+//!
+//! * `std::task` - The user-facing interface to the Rust task model.
+//! * `std::local_data` - The interface to local data.
+//! * `std::unstable::lang` - Miscellaneous lang items, some of which rely on `std::rt`.
+//! * `std::cleanup` - Local heap destruction.
+//! * `std::io` - In the future `std::io` will use an `rt` implementation.
+//! * `std::logging`
+//! * `std::comm`
 
 #![experimental]
 
diff --git a/src/libstd/sync/future.rs b/src/libstd/sync/future.rs
index d6f413a082854..f2f9351fd0d58 100644
--- a/src/libstd/sync/future.rs
+++ b/src/libstd/sync/future.rs
@@ -8,21 +8,19 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-/*!
- * A type representing values that may be computed concurrently and
- * operations for working with them.
- *
- * # Example
- *
- * ```rust
- * use std::sync::Future;
- * # fn fib(n: uint) -> uint {42};
- * # fn make_a_sandwich() {};
- * let mut delayed_fib = Future::spawn(proc() { fib(5000) });
- * make_a_sandwich();
- * println!("fib(5000) = {}", delayed_fib.get())
- * ```
- */
+//! A type representing values that may be computed concurrently and operations for working with
+//! them.
+//!
+//! # Example
+//!
+//! ```rust
+//! use std::sync::Future;
+//! # fn fib(n: uint) -> uint {42};
+//! # fn make_a_sandwich() {};
+//! let mut delayed_fib = Future::spawn(proc() { fib(5000) });
+//! make_a_sandwich();
+//! println!("fib(5000) = {}", delayed_fib.get())
+//! ```
 
 #![allow(missing_docs)]
 
diff --git a/src/libsyntax/ast.rs b/src/libsyntax/ast.rs
index 3d33774aa55e1..26c049d267dfb 100644
--- a/src/libsyntax/ast.rs
+++ b/src/libsyntax/ast.rs
@@ -276,11 +276,9 @@ impl PathParameters {
         }
     }
 
+    /// Returns the types that the user wrote. Note that these do not necessarily map to the type
+    /// parameters in the parenthesized case.
     pub fn types(&self) -> Vec<&P<Ty>> {
-        /*!
-         * Returns the types that the user wrote. Note that these do not
-         * necessarily map to the type parameters in the parenthesized case.
-         */
         match *self {
             AngleBracketedParameters(ref data) => {
                 data.types.iter().collect()
diff --git a/src/libsyntax/ast_util.rs b/src/libsyntax/ast_util.rs
index 043e79bffd9e9..4d35fbc143723 100644
--- a/src/libsyntax/ast_util.rs
+++ b/src/libsyntax/ast_util.rs
@@ -569,6 +569,7 @@ pub fn compute_id_range_for_inlined_item(item: &InlinedItem) -> IdRange {
     visitor.result
 }
 
+/// Computes the id range for a single fn body, ignoring nested items.
 pub fn compute_id_range_for_fn_body(fk: visit::FnKind,
                                     decl: &FnDecl,
                                     body: &Block,
@@ -576,11 +577,6 @@ pub fn compute_id_range_for_fn_body(fk: visit::FnKind,
                                     id: NodeId)
                                     -> IdRange
 {
-    /*!
-     * Computes the id range for a single fn body,
-     * ignoring nested items.
-     */
-
     let mut visitor = IdRangeComputingVisitor {
         result: IdRange::max()
     };
diff --git a/src/libsyntax/codemap.rs b/src/libsyntax/codemap.rs
index b019b31de5f3f..1c1e1acab1c57 100644
--- a/src/libsyntax/codemap.rs
+++ b/src/libsyntax/codemap.rs
@@ -10,18 +10,12 @@
 //
 // ignore-lexer-test FIXME #15679
 
-/*!
-
-The CodeMap tracks all the source code used within a single crate, mapping
-from integer byte positions to the original source code location. Each bit of
-source parsed during crate parsing (typically files, in-memory strings, or
-various bits of macro expansion) cover a continuous range of bytes in the
-CodeMap and are represented by FileMaps. Byte positions are stored in `spans`
-and used pervasively in the compiler. They are absolute positions within the
-CodeMap, which upon request can be converted to line and column information,
-source code snippets, etc.
-
-*/
+//! The CodeMap tracks all the source code used within a single crate, mapping from integer byte
+//! positions to the original source code location. Each bit of source parsed during crate parsing
+//! (typically files, in-memory strings, or various bits of macro expansion) cover a continuous
+//! range of bytes in the CodeMap and are represented by FileMaps. Byte positions are stored in
+//! `spans` and used pervasively in the compiler. They are absolute positions within the CodeMap,
+//! which upon request can be converted to line and column information, source code snippets, etc.
 
 pub use self::MacroFormat::*;
 
diff --git a/src/libsyntax/ext/deriving/decodable.rs b/src/libsyntax/ext/deriving/decodable.rs
index d0a0365838604..e3cf2b68752fd 100644
--- a/src/libsyntax/ext/deriving/decodable.rs
+++ b/src/libsyntax/ext/deriving/decodable.rs
@@ -8,10 +8,7 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-/*!
-The compiler code necessary for `#[deriving(Decodable)]`. See
-encodable.rs for more.
-*/
+//! The compiler code necessary for `#[deriving(Decodable)]`. See encodable.rs for more.
 
 use ast;
 use ast::{MetaItem, Item, Expr, MutMutable};
diff --git a/src/libsyntax/ext/deriving/generic/ty.rs b/src/libsyntax/ext/deriving/generic/ty.rs
index 700ada8b4ad8f..f285d2cc2ff3a 100644
--- a/src/libsyntax/ext/deriving/generic/ty.rs
+++ b/src/libsyntax/ext/deriving/generic/ty.rs
@@ -8,10 +8,8 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-/*!
-A mini version of ast::Ty, which is easier to use, and features an
-explicit `Self` type to use when specifying impls to be derived.
-*/
+//! A mini version of ast::Ty, which is easier to use, and features an explicit `Self` type to use
+//! when specifying impls to be derived.
 
 pub use self::PtrTy::*;
 pub use self::Ty::*;
diff --git a/src/libsyntax/ext/deriving/mod.rs b/src/libsyntax/ext/deriving/mod.rs
index b8cebd8ea201c..fccef47d1ea2c 100644
--- a/src/libsyntax/ext/deriving/mod.rs
+++ b/src/libsyntax/ext/deriving/mod.rs
@@ -8,15 +8,10 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-/*!
-The compiler code necessary to implement the `#[deriving]` extensions.
-
-
-FIXME (#2810): hygiene. Search for "__" strings (in other files too).
-We also assume "extra" is the standard library, and "std" is the core
-library.
-
-*/
+//! The compiler code necessary to implement the `#[deriving]` extensions.
+//!
+//! FIXME (#2810): hygiene. Search for "__" strings (in other files too). We also assume "extra" is
+//! the standard library, and "std" is the core library.
 
 use ast::{Item, MetaItem, MetaList, MetaNameValue, MetaWord};
 use ext::base::ExtCtxt;
diff --git a/src/libsyntax/parse/obsolete.rs b/src/libsyntax/parse/obsolete.rs
index e2dee607c6925..86a96fc521642 100644
--- a/src/libsyntax/parse/obsolete.rs
+++ b/src/libsyntax/parse/obsolete.rs
@@ -8,14 +8,10 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-/*!
-Support for parsing unsupported, old syntaxes, for the
-purpose of reporting errors. Parsing of these syntaxes
-is tested by compile-test/obsolete-syntax.rs.
-
-Obsolete syntax that becomes too hard to parse can be
-removed.
-*/
+//! Support for parsing unsupported, old syntaxes, for the purpose of reporting errors. Parsing of
+//! these syntaxes is tested by compile-test/obsolete-syntax.rs.
+//!
+//! Obsolete syntax that becomes too hard to parse can be removed.
 
 pub use self::ObsoleteSyntax::*;
 
diff --git a/src/libsyntax/parse/parser.rs b/src/libsyntax/parse/parser.rs
index c731a0005f884..b620799cc97e8 100644
--- a/src/libsyntax/parse/parser.rs
+++ b/src/libsyntax/parse/parser.rs
@@ -1963,11 +1963,9 @@ impl<'a> Parser<'a> {
         }
     }
 
+    /// Parses `lifetime_defs = [ lifetime_defs { ',' lifetime_defs } ]` where `lifetime_def  =
+    /// lifetime [':' lifetimes]`
     pub fn parse_lifetime_defs(&mut self) -> Vec<ast::LifetimeDef> {
-        /*!
-         * Parses `lifetime_defs = [ lifetime_defs { ',' lifetime_defs } ]`
-         * where `lifetime_def  = lifetime [':' lifetimes]`
-         */
 
         let mut res = Vec::new();
         loop {
@@ -2003,16 +2001,13 @@ impl<'a> Parser<'a> {
         }
     }
 
-    // matches lifetimes = ( lifetime ) | ( lifetime , lifetimes )
-    // actually, it matches the empty one too, but putting that in there
-    // messes up the grammar....
+    /// matches lifetimes = ( lifetime ) | ( lifetime , lifetimes ) actually, it matches the empty
+    /// one too, but putting that in there messes up the grammar....
+    ///
+    /// Parses zero or more comma separated lifetimes. Expects each lifetime to be followed by
+    /// either a comma or `>`.  Used when parsing type parameter lists, where we expect something
+    /// like `<'a, 'b, T>`.
     pub fn parse_lifetimes(&mut self, sep: token::Token) -> Vec<ast::Lifetime> {
-        /*!
-         * Parses zero or more comma separated lifetimes.
-         * Expects each lifetime to be followed by either
-         * a comma or `>`.  Used when parsing type parameter
-         * lists, where we expect something like `<'a, 'b, T>`.
-         */
 
         let mut res = Vec::new();
         loop {
diff --git a/src/libsyntax/visit.rs b/src/libsyntax/visit.rs
index 3f87dbc0740ec..84afa56b07d5e 100644
--- a/src/libsyntax/visit.rs
+++ b/src/libsyntax/visit.rs
@@ -92,14 +92,12 @@ pub trait Visitor<'v> {
     }
     fn visit_struct_field(&mut self, s: &'v StructField) { walk_struct_field(self, s) }
     fn visit_variant(&mut self, v: &'v Variant, g: &'v Generics) { walk_variant(self, v, g) }
+
+    /// Visits an optional reference to a lifetime. The `span` is the span of some surrounding
+    /// reference should opt_lifetime be None.
     fn visit_opt_lifetime_ref(&mut self,
                               _span: Span,
                               opt_lifetime: &'v Option<Lifetime>) {
-        /*!
-         * Visits an optional reference to a lifetime. The `span` is
-         * the span of some surrounding reference should opt_lifetime
-         * be None.
-         */
         match *opt_lifetime {
             Some(ref l) => self.visit_lifetime_ref(l),
             None => ()
diff --git a/src/libunicode/normalize.rs b/src/libunicode/normalize.rs
index ad36215c11bcb..962be3d5acdc9 100644
--- a/src/libunicode/normalize.rs
+++ b/src/libunicode/normalize.rs
@@ -8,10 +8,7 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-/*!
-  Functions for computing canonical and compatible decompositions
-  for Unicode characters.
-  */
+//! Functions for computing canonical and compatible decompositions for Unicode characters.
 
 use core::cmp::{Equal, Less, Greater};
 use core::option::{Option, Some, None};
diff --git a/src/libunicode/u_char.rs b/src/libunicode/u_char.rs
index 369336639a7fe..a73dac1a61866 100644
--- a/src/libunicode/u_char.rs
+++ b/src/libunicode/u_char.rs
@@ -8,12 +8,10 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-/*!
- * Unicode-intensive `char` methods.
- *
- * These methods implement functionality for `char` that requires knowledge of
- * Unicode definitions, including normalization, categorization, and display information.
- */
+//! Unicode-intensive `char` methods.
+//!
+//! These methods implement functionality for `char` that requires knowledge of
+//! Unicode definitions, including normalization, categorization, and display information.
 
 use core::option::Option;
 use tables::{derived_property, property, general_category, conversions, charwidth};
diff --git a/src/libunicode/u_str.rs b/src/libunicode/u_str.rs
index 03a50409d7e52..a5f7614257595 100644
--- a/src/libunicode/u_str.rs
+++ b/src/libunicode/u_str.rs
@@ -10,12 +10,10 @@
 //
 // ignore-lexer-test FIXME #15679
 
-/*!
- * Unicode-intensive string manipulations.
- *
- * This module provides functionality to `str` that requires the Unicode
- * methods provided by the UnicodeChar trait.
- */
+//! Unicode-intensive string manipulations.
+//!
+//! This module provides functionality to `str` that requires the Unicode methods provided by the
+//! UnicodeChar trait.
 
 use self::GraphemeState::*;
 use core::cmp;

From c064961016fd530b4bb00603f0fa04a1ac45dded Mon Sep 17 00:00:00 2001
From: Alex Crichton <alex@alexcrichton.com>
Date: Wed, 26 Nov 2014 11:17:23 -0800
Subject: [PATCH 38/40] More test fixes and rebase conflicts!

---
 src/libcollections/binary_heap.rs       |   2 +-
 src/libcollections/ring_buf.rs          |   2 +-
 src/librustc/middle/traits/util.rs      |  25 +--
 src/librustc/middle/typeck/check/mod.rs |   2 -
 src/librustc_trans/trans/datum.rs       |   2 +-
 src/libstd/fmt.rs                       | 207 ++++++++++++------------
 6 files changed, 124 insertions(+), 116 deletions(-)

diff --git a/src/libcollections/binary_heap.rs b/src/libcollections/binary_heap.rs
index d927a8a685afb..cbf45ee36a3d8 100644
--- a/src/libcollections/binary_heap.rs
+++ b/src/libcollections/binary_heap.rs
@@ -572,7 +572,7 @@ impl<'a, T> DoubleEndedIterator<&'a T> for Items<'a, T> {
     fn next_back(&mut self) -> Option<(&'a T)> { self.iter.next_back() }
 }
 
-impl<'a, T> ExactSize<&'a T> for Items<'a, T> {}
+impl<'a, T> ExactSizeIterator<&'a T> for Items<'a, T> {}
 
 /// An iterator that moves out of a `BinaryHeap`.
 pub struct MoveItems<T> {
diff --git a/src/libcollections/ring_buf.rs b/src/libcollections/ring_buf.rs
index e719a6c6da373..e11ba35367e2e 100644
--- a/src/libcollections/ring_buf.rs
+++ b/src/libcollections/ring_buf.rs
@@ -797,7 +797,7 @@ impl<T> DoubleEndedIterator<T> for MoveItems<T> {
 }
 
 
-impl<T> ExactSize<T> for MoveItems<T> {}
+impl<T> ExactSizeIterator<T> for MoveItems<T> {}
 
 impl<A: PartialEq> PartialEq for RingBuf<A> {
     fn eq(&self, other: &RingBuf<A>) -> bool {
diff --git a/src/librustc/middle/traits/util.rs b/src/librustc/middle/traits/util.rs
index b9e694ff4e2b2..cd7260b1812a3 100644
--- a/src/librustc/middle/traits/util.rs
+++ b/src/librustc/middle/traits/util.rs
@@ -42,18 +42,19 @@ pub fn supertraits<'cx, 'tcx>(tcx: &'cx ty::ctxt<'tcx>,
                               trait_ref: Rc<ty::TraitRef<'tcx>>)
                               -> Supertraits<'cx, 'tcx>
 {
-    /// Returns an iterator over the trait reference `T` and all of its supertrait references. May
-    /// contain duplicates. In general the ordering is not defined.
-    ///
-    /// Example:
-    ///
-    /// ```
-    /// trait Foo { ... }
-    /// trait Bar : Foo { ... }
-    /// trait Baz : Bar+Foo { ... }
-    /// ```
-    ///
-    /// `supertraits(Baz)` yields `[Baz, Bar, Foo, Foo]` in some order.
+    //! Returns an iterator over the trait reference `T` and all of its supertrait references. May
+    //! contain duplicates. In general the ordering is not defined.
+    //!
+    //! Example:
+    //!
+    //! ```
+    //! trait Foo { ... }
+    //! trait Bar : Foo { ... }
+    //! trait Baz : Bar+Foo { ... }
+    //! ```
+    //!
+    //! `supertraits(Baz)` yields `[Baz, Bar, Foo, Foo]` in some order.
+
     transitive_bounds(tcx, &[trait_ref])
 }
 
diff --git a/src/librustc/middle/typeck/check/mod.rs b/src/librustc/middle/typeck/check/mod.rs
index acacb1727faab..40a38d45fa078 100644
--- a/src/librustc/middle/typeck/check/mod.rs
+++ b/src/librustc/middle/typeck/check/mod.rs
@@ -1252,8 +1252,6 @@ fn compare_impl_method<'tcx>(tcx: &ty::ctxt<'tcx>,
                                                 -> bool
     {
 
-        */
-
         let trait_params = trait_generics.regions.get_slice(subst::FnSpace);
         let impl_params = impl_generics.regions.get_slice(subst::FnSpace);
 
diff --git a/src/librustc_trans/trans/datum.rs b/src/librustc_trans/trans/datum.rs
index 8b92b0e05a8c0..f0fd94958ee97 100644
--- a/src/librustc_trans/trans/datum.rs
+++ b/src/librustc_trans/trans/datum.rs
@@ -552,7 +552,7 @@ impl<'tcx, K: KindOps + fmt::Show> Datum<'tcx, K> {
                 self.kind)
     }
 
-    //! See the `appropriate_rvalue_mode()` function
+    /// See the `appropriate_rvalue_mode()` function
     pub fn appropriate_rvalue_mode<'a>(&self, ccx: &CrateContext<'a, 'tcx>)
                                        -> RvalueMode {
         appropriate_rvalue_mode(ccx, self.ty)
diff --git a/src/libstd/fmt.rs b/src/libstd/fmt.rs
index 62ca3483c21b3..7e1bfd704a927 100644
--- a/src/libstd/fmt.rs
+++ b/src/libstd/fmt.rs
@@ -13,15 +13,15 @@
 //! Utilities for formatting and printing strings
 //!
 //! This module contains the runtime support for the `format!` syntax extension.
-//! This macro is implemented in the compiler to emit calls to this module in order
-//! to format arguments at runtime into strings and streams.
+//! This macro is implemented in the compiler to emit calls to this module in
+//! order to format arguments at runtime into strings and streams.
 //!
-//! The functions contained in this module should not normally be used in everyday
-//! use cases of `format!`. The assumptions made by these functions are unsafe for
-//! all inputs, and the compiler performs a large amount of validation on the
-//! arguments to `format!` in order to ensure safety at runtime. While it is
-//! possible to call these functions directly, it is not recommended to do so in the
-//! general case.
+//! The functions contained in this module should not normally be used in
+//! everyday use cases of `format!`. The assumptions made by these functions are
+//! unsafe for all inputs, and the compiler performs a large amount of
+//! validation on the arguments to `format!` in order to ensure safety at
+//! runtime. While it is possible to call these functions directly, it is not
+//! recommended to do so in the general case.
 //!
 //! ## Usage
 //!
@@ -46,9 +46,9 @@
 //!
 //! From these, you can see that the first argument is a format string. It is
 //! required by the compiler for this to be a string literal; it cannot be a
-//! variable passed in (in order to perform validity checking). The compiler will
-//! then parse the format string and determine if the list of arguments provided is
-//! suitable to pass to this format string.
+//! variable passed in (in order to perform validity checking). The compiler
+//! will then parse the format string and determine if the list of arguments
+//! provided is suitable to pass to this format string.
 //!
 //! ### Positional parameters
 //!
@@ -60,16 +60,16 @@
 //!
 //! Things can get a little tricky once you start intermingling the two types of
 //! positional specifiers. The "next argument" specifier can be thought of as an
-//! iterator over the argument. Each time a "next argument" specifier is seen, the
-//! iterator advances. This leads to behavior like this:
+//! iterator over the argument. Each time a "next argument" specifier is seen,
+//! the iterator advances. This leads to behavior like this:
 //!
 //! ```rust
 //! format!("{1} {} {0} {}", 1i, 2i); // => "2 1 1 2"
 //! ```
 //!
-//! The internal iterator over the argument has not been advanced by the time the
-//! first `{}` is seen, so it prints the first argument. Then upon reaching the
-//! second `{}`, the iterator has advanced forward to the second argument.
+//! The internal iterator over the argument has not been advanced by the time
+//! the first `{}` is seen, so it prints the first argument. Then upon reaching
+//! the second `{}`, the iterator has advanced forward to the second argument.
 //! Essentially, parameters which explicitly name their argument do not affect
 //! parameters which do not name an argument in terms of positional specifiers.
 //!
@@ -98,27 +98,30 @@
 //! # }
 //! ```
 //!
-//! It is illegal to put positional parameters (those without names) after arguments
-//! which have names. Like with positional parameters, it is illegal to provide
-//! named parameters that are unused by the format string.
+//! It is illegal to put positional parameters (those without names) after
+//! arguments which have names. Like with positional parameters, it is illegal
+//! to provide named parameters that are unused by the format string.
 //!
 //! ### Argument types
 //!
-//! Each argument's type is dictated by the format string. It is a requirement that every argument is
-//! only ever referred to by one type. For example, this is an invalid format string:
+//! Each argument's type is dictated by the format string. It is a requirement
+//! that every argument is only ever referred to by one type. For example, this
+//! is an invalid format string:
 //!
 //! ```text
 //! {0:x} {0:o}
 //! ```
 //!
-//! This is invalid because the first argument is both referred to as a hexidecimal as well as an
+//! This is invalid because the first argument is both referred to as a
+//! hexidecimal as well as an
 //! octal.
 //!
-//! There are various parameters which do require a particular type, however. Namely if the syntax
-//! `{:.*}` is used, then the number of characters to print precedes the actual object being formatted,
-//! and the number of characters must have the type `uint`. Although a `uint` can be printed with
-//! `{}`, it is illegal to reference an argument as such. For example this is another invalid
-//! format string:
+//! There are various parameters which do require a particular type, however.
+//! Namely if the syntax `{:.*}` is used, then the number of characters to print
+//! precedes the actual object being formatted, and the number of characters
+//! must have the type `uint`. Although a `uint` can be printed with `{}`, it is
+//! illegal to reference an argument as such. For example this is another
+//! invalid format string:
 //!
 //! ```text
 //! {:.*} {0}
@@ -126,10 +129,10 @@
 //!
 //! ### Formatting traits
 //!
-//! When requesting that an argument be formatted with a particular type, you are
-//! actually requesting that an argument ascribes to a particular trait. This allows
-//! multiple actual types to be formatted via `{:x}` (like `i8` as well as `int`).
-//! The current mapping of types to traits is:
+//! When requesting that an argument be formatted with a particular type, you
+//! are actually requesting that an argument ascribes to a particular trait.
+//! This allows multiple actual types to be formatted via `{:x}` (like `i8` as
+//! well as `int`).  The current mapping of types to traits is:
 //!
 //! * *nothing* ⇒ `Show`
 //! * `o` ⇒ `Octal`
@@ -141,14 +144,14 @@
 //! * `E` ⇒ `UpperExp`
 //!
 //! What this means is that any type of argument which implements the
-//! `std::fmt::Binary` trait can then be formatted with `{:b}`. Implementations are
-//! provided for these traits for a number of primitive types by the standard
-//! library as well. If no format is specified (as in `{}` or `{:6}`), then the
-//! format trait used is the `Show` trait. This is one of the more commonly
-//! implemented traits when formatting a custom type.
+//! `std::fmt::Binary` trait can then be formatted with `{:b}`. Implementations
+//! are provided for these traits for a number of primitive types by the
+//! standard library as well. If no format is specified (as in `{}` or `{:6}`),
+//! then the format trait used is the `Show` trait. This is one of the more
+//! commonly implemented traits when formatting a custom type.
 //!
-//! When implementing a format trait for your own type, you will have to implement a
-//! method of the signature:
+//! When implementing a format trait for your own type, you will have to
+//! implement a method of the signature:
 //!
 //! ```rust
 //! # use std::fmt;
@@ -159,17 +162,17 @@
 //! # } }
 //! ```
 //!
-//! Your type will be passed as `self` by-reference, and then the function should
-//! emit output into the `f.buf` stream. It is up to each format trait
-//! implementation to correctly adhere to the requested formatting parameters. The
-//! values of these parameters will be listed in the fields of the `Formatter`
-//! struct. In order to help with this, the `Formatter` struct also provides some
-//! helper methods.
+//! Your type will be passed as `self` by-reference, and then the function
+//! should emit output into the `f.buf` stream. It is up to each format trait
+//! implementation to correctly adhere to the requested formatting parameters.
+//! The values of these parameters will be listed in the fields of the
+//! `Formatter` struct. In order to help with this, the `Formatter` struct also
+//! provides some helper methods.
 //!
 //! Additionally, the return value of this function is `fmt::Result` which is a
 //! typedef to `Result<(), IoError>` (also known as `IoResult<()>`). Formatting
-//! implementations should ensure that they return errors from `write!` correctly
-//! (propagating errors upward).
+//! implementations should ensure that they return errors from `write!`
+//! correctly (propagating errors upward).
 //!
 //! An example of implementing the formatting traits would look
 //! like:
@@ -193,8 +196,8 @@
 //!     }
 //! }
 //!
-//! // Different traits allow different forms of output of a type. The meaning of
-//! // this format is to print the magnitude of a vector.
+//! // Different traits allow different forms of output of a type. The meaning
+//! // of this format is to print the magnitude of a vector.
 //! impl fmt::Binary for Vector2D {
 //!     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
 //!         let magnitude = (self.x * self.x + self.y * self.y) as f64;
@@ -219,8 +222,8 @@
 //!
 //! ### Related macros
 //!
-//! There are a number of related macros in the `format!` family. The ones that are
-//! currently implemented are:
+//! There are a number of related macros in the `format!` family. The ones that
+//! are currently implemented are:
 //!
 //! ```ignore
 //! format!      // described above
@@ -233,10 +236,11 @@
 //!
 //! #### `write!`
 //!
-//! This and `writeln` are two macros which are used to emit the format string to a
-//! specified stream. This is used to prevent intermediate allocations of format
-//! strings and instead directly write the output. Under the hood, this function is
-//! actually invoking the `write` function defined in this module. Example usage is:
+//! This and `writeln` are two macros which are used to emit the format string
+//! to a specified stream. This is used to prevent intermediate allocations of
+//! format strings and instead directly write the output. Under the hood, this
+//! function is actually invoking the `write` function defined in this module.
+//! Example usage is:
 //!
 //! ```rust
 //! # #![allow(unused_must_use)]
@@ -248,9 +252,9 @@
 //!
 //! #### `print!`
 //!
-//! This and `println` emit their output to stdout. Similarly to the `write!` macro,
-//! the goal of these macros is to avoid intermediate allocations when printing
-//! output. Example usage is:
+//! This and `println` emit their output to stdout. Similarly to the `write!`
+//! macro, the goal of these macros is to avoid intermediate allocations when
+//! printing output. Example usage is:
 //!
 //! ```rust
 //! print!("Hello {}!", "world");
@@ -274,7 +278,8 @@
 //! format_args!(fmt::format, "this returns {}", "String");
 //!
 //! let some_writer: &mut io::Writer = &mut io::stdout();
-//! format_args!(|args| { write!(some_writer, "{}", args) }, "print with a {}", "closure");
+//! format_args!(|args| { write!(some_writer, "{}", args) },
+//!              "print with a {}", "closure");
 //!
 //! fn my_fmt_fn(args: &fmt::Arguments) {
 //!     write!(&mut io::stdout(), "{}", args);
@@ -283,28 +288,28 @@
 //! # }
 //! ```
 //!
-//! The first argument of the `format_args!` macro is a function (or closure) which
-//! takes one argument of type `&fmt::Arguments`. This structure can then be
-//! passed to the `write` and `format` functions inside this module in order to
-//! process the format string. The goal of this macro is to even further prevent
-//! intermediate allocations when dealing formatting strings.
+//! The first argument of the `format_args!` macro is a function (or closure)
+//! which takes one argument of type `&fmt::Arguments`. This structure can then
+//! be passed to the `write` and `format` functions inside this module in order
+//! to process the format string. The goal of this macro is to even further
+//! prevent intermediate allocations when dealing formatting strings.
 //!
-//! For example, a logging library could use the standard formatting syntax, but it
-//! would internally pass around this structure until it has been determined where
-//! output should go to.
+//! For example, a logging library could use the standard formatting syntax, but
+//! it would internally pass around this structure until it has been determined
+//! where output should go to.
 //!
-//! It is unsafe to programmatically create an instance of `fmt::Arguments` because
-//! the operations performed when executing a format string require the compile-time
-//! checks provided by the compiler. The `format_args!` macro is the only method of
-//! safely creating these structures, but they can be unsafely created with the
-//! constructor provided.
+//! It is unsafe to programmatically create an instance of `fmt::Arguments`
+//! because the operations performed when executing a format string require the
+//! compile-time checks provided by the compiler. The `format_args!` macro is
+//! the only method of safely creating these structures, but they can be
+//! unsafely created with the constructor provided.
 //!
 //! ## Syntax
 //!
-//! The syntax for the formatting language used is drawn from other languages, so it
-//! should not be too alien. Arguments are formatted with python-like syntax,
-//! meaning that arguments are surrounded by `{}` instead of the C-like `%`. The
-//! actual grammar for the formatting syntax is:
+//! The syntax for the formatting language used is drawn from other languages,
+//! so it should not be too alien. Arguments are formatted with python-like
+//! syntax, meaning that arguments are surrounded by `{}` instead of the C-like
+//! `%`. The actual grammar for the formatting syntax is:
 //!
 //! ```text
 //! format_string := <text> [ format <text> ] *
@@ -333,8 +338,9 @@
 //!
 //! The fill character is provided normally in conjunction with the `width`
 //! parameter. This indicates that if the value being formatted is smaller than
-//! `width` some extra characters will be printed around it. The extra characters
-//! are specified by `fill`, and the alignment can be one of two options:
+//! `width` some extra characters will be printed around it. The extra
+//! characters are specified by `fill`, and the alignment can be one of two
+//! options:
 //!
 //! * `<` - the argument is left-aligned in `width` columns
 //! * `^` - the argument is center-aligned in `width` columns
@@ -344,33 +350,36 @@
 //!
 //! These can all be interpreted as flags for a particular formatter.
 //!
-//! * '+' - This is intended for numeric types and indicates that the sign should
-//!         always be printed. Positive signs are never printed by default, and the
-//!         negative sign is only printed by default for the `Signed` trait. This
-//!         flag indicates that the correct sign (+ or -) should always be printed.
+//! * '+' - This is intended for numeric types and indicates that the sign
+//!         should always be printed. Positive signs are never printed by
+//!         default, and the negative sign is only printed by default for the
+//!         `Signed` trait. This flag indicates that the correct sign (+ or -)
+//!         should always be printed.
 //! * '-' - Currently not used
-//! * '#' - This flag is indicates that the "alternate" form of printing should be
-//!         used. By default, this only applies to the integer formatting traits and
-//!         performs like:
+//! * '#' - This flag is indicates that the "alternate" form of printing should
+//!         be used. By default, this only applies to the integer formatting
+//!         traits and performs like:
 //!     * `x` - precedes the argument with a "0x"
 //!     * `X` - precedes the argument with a "0x"
 //!     * `t` - precedes the argument with a "0b"
 //!     * `o` - precedes the argument with a "0o"
 //! * '0' - This is used to indicate for integer formats that the padding should
 //!         both be done with a `0` character as well as be sign-aware. A format
-//!         like `{:08d}` would yield `00000001` for the integer `1`, while the same
-//!         format would yield `-0000001` for the integer `-1`. Notice that the
-//!         negative version has one fewer zero than the positive version.
+//!         like `{:08d}` would yield `00000001` for the integer `1`, while the
+//!         same format would yield `-0000001` for the integer `-1`. Notice that
+//!         the negative version has one fewer zero than the positive version.
 //!
 //! ### Width
 //!
-//! This is a parameter for the "minimum width" that the format should take up. If
-//! the value's string does not fill up this many characters, then the padding
-//! specified by fill/alignment will be used to take up the required space.
+//! This is a parameter for the "minimum width" that the format should take up.
+//! If the value's string does not fill up this many characters, then the
+//! padding specified by fill/alignment will be used to take up the required
+//! space.
 //!
 //! The default fill/alignment for non-numerics is a space and left-aligned. The
-//! defaults for numeric formatters is also a space but with right-alignment. If the
-//! '0' flag is specified for numerics, then the implicit fill character is '0'.
+//! defaults for numeric formatters is also a space but with right-alignment. If
+//! the '0' flag is specified for numerics, then the implicit fill character is
+//! '0'.
 //!
 //! The value for the width can also be provided as a `uint` in the list of
 //! parameters by using the `2$` syntax indicating that the second argument is a
@@ -379,19 +388,19 @@
 //! ### Precision
 //!
 //! For non-numeric types, this can be considered a "maximum width". If the
-//! resulting string is longer than this width, then it is truncated down to this
-//! many characters and only those are emitted.
+//! resulting string is longer than this width, then it is truncated down to
+//! this many characters and only those are emitted.
 //!
 //! For integral types, this has no meaning currently.
 //!
-//! For floating-point types, this indicates how many digits after the decimal point
-//! should be printed.
+//! For floating-point types, this indicates how many digits after the decimal
+//! point should be printed.
 //!
 //! ## Escaping
 //!
-//! The literal characters `{` and `}` may be included in a string by preceding them
-//! with the same character. For example, the `{` character is escaped with `{{` and
-//! the `}` character is escaped with `}}`.
+//! The literal characters `{` and `}` may be included in a string by preceding
+//! them with the same character. For example, the `{` character is escaped with
+//! `{{` and the `}` character is escaped with `}}`.
 
 #![experimental]
 

From 1ae0ed43cd2fe1ad62b5f70ab01a0a0b3cec7dd2 Mon Sep 17 00:00:00 2001
From: Niko Matsakis <niko@alum.mit.edu>
Date: Wed, 26 Nov 2014 20:39:45 -0500
Subject: [PATCH 39/40] Fix extraneous +Send

---
 src/libterm/lib.rs | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/src/libterm/lib.rs b/src/libterm/lib.rs
index 611c10ea71a09..f41c93764647e 100644
--- a/src/libterm/lib.rs
+++ b/src/libterm/lib.rs
@@ -124,7 +124,7 @@ pub fn stderr() -> Option<Box<Terminal<WriterWrapper> + Send>> {
 #[cfg(windows)]
 /// Return a Terminal wrapping stderr, or None if a terminal couldn't be
 /// opened.
-pub fn stderr() -> Option<Box<Terminal<WriterWrapper> + Send> + Send> {
+pub fn stderr() -> Option<Box<Terminal<WriterWrapper> + Send>> {
     let ti = TerminfoTerminal::new(WriterWrapper {
         wrapped: box std::io::stderr() as Box<Writer + Send>,
     });

From dabc62d9cd478455b1792bd590eeb4d21cd84a60 Mon Sep 17 00:00:00 2001
From: Niko Matsakis <niko@alum.mit.edu>
Date: Wed, 26 Nov 2014 20:42:50 -0500
Subject: [PATCH 40/40] Fix diagnostic conflict

---
 src/librustc/diagnostics.rs           | 3 ++-
 src/librustc/middle/typeck/astconv.rs | 2 +-
 2 files changed, 3 insertions(+), 2 deletions(-)

diff --git a/src/librustc/diagnostics.rs b/src/librustc/diagnostics.rs
index 1873213fadf1d..81209763a0c5a 100644
--- a/src/librustc/diagnostics.rs
+++ b/src/librustc/diagnostics.rs
@@ -147,5 +147,6 @@ register_diagnostics!(
     E0168,
     E0169,
     E0170,
-    E0171
+    E0171,
+    E0172
 )
diff --git a/src/librustc/middle/typeck/astconv.rs b/src/librustc/middle/typeck/astconv.rs
index c10fb55c9bdd9..89c004fc64596 100644
--- a/src/librustc/middle/typeck/astconv.rs
+++ b/src/librustc/middle/typeck/astconv.rs
@@ -703,7 +703,7 @@ fn ast_ty_to_trait_ref<'tcx,AC,RS>(this: &AC,
                                                     path));
                 }
                 _ => {
-                    span_err!(this.tcx().sess, ty.span, E0170, "expected a reference to a trait");
+                    span_err!(this.tcx().sess, ty.span, E0172, "expected a reference to a trait");
                     Err(ErrorReported)
                 }
             }