diff --git a/configure b/configure
index 12782fa9fdb62..8f757a0715f8c 100755
--- a/configure
+++ b/configure
@@ -731,6 +731,7 @@ do
     make_dir $h/test/perf
     make_dir $h/test/pretty
     make_dir $h/test/debug-info
+    make_dir $h/test/codegen
     make_dir $h/test/doc-tutorial
     make_dir $h/test/doc-tutorial-ffi
     make_dir $h/test/doc-tutorial-macros
diff --git a/mk/tests.mk b/mk/tests.mk
index 6b6f515ce2b5a..7a5a5dc15c30e 100644
--- a/mk/tests.mk
+++ b/mk/tests.mk
@@ -246,6 +246,7 @@ check-stage$(1)-T-$(2)-H-$(3)-exec:     				\
         check-stage$(1)-T-$(2)-H-$(3)-crates-exec                      \
 	check-stage$(1)-T-$(2)-H-$(3)-bench-exec			\
 	check-stage$(1)-T-$(2)-H-$(3)-debuginfo-exec \
+	check-stage$(1)-T-$(2)-H-$(3)-codegen-exec \
 	check-stage$(1)-T-$(2)-H-$(3)-doc-exec \
 	check-stage$(1)-T-$(2)-H-$(3)-pretty-exec
 
@@ -430,6 +431,8 @@ CFAIL_RS := $(wildcard $(S)src/test/compile-fail/*.rs)
 BENCH_RS := $(wildcard $(S)src/test/bench/*.rs)
 PRETTY_RS := $(wildcard $(S)src/test/pretty/*.rs)
 DEBUGINFO_RS := $(wildcard $(S)src/test/debug-info/*.rs)
+CODEGEN_RS := $(wildcard $(S)src/test/codegen/*.rs)
+CODEGEN_CC := $(wildcard $(S)src/test/codegen/*.cc)
 
 # perf tests are the same as bench tests only they run under
 # a performance monitor.
@@ -443,6 +446,7 @@ BENCH_TESTS := $(BENCH_RS)
 PERF_TESTS := $(PERF_RS)
 PRETTY_TESTS := $(PRETTY_RS)
 DEBUGINFO_TESTS := $(DEBUGINFO_RS)
+CODEGEN_TESTS := $(CODEGEN_RS) $(CODEGEN_CC)
 
 CTEST_SRC_BASE_rpass = run-pass
 CTEST_BUILD_BASE_rpass = run-pass
@@ -479,10 +483,19 @@ CTEST_BUILD_BASE_debuginfo = debug-info
 CTEST_MODE_debuginfo = debug-info
 CTEST_RUNTOOL_debuginfo = $(CTEST_RUNTOOL)
 
+CTEST_SRC_BASE_codegen = codegen
+CTEST_BUILD_BASE_codegen = codegen
+CTEST_MODE_codegen = codegen
+CTEST_RUNTOOL_codegen = $(CTEST_RUNTOOL)
+
 ifeq ($(CFG_GDB),)
 CTEST_DISABLE_debuginfo = "no gdb found"
 endif
 
+ifeq ($(CFG_CLANG),)
+CTEST_DISABLE_codegen = "no clang found"
+endif
+
 ifeq ($(CFG_OSTYPE),apple-darwin)
 CTEST_DISABLE_debuginfo = "gdb on darwing needs root"
 endif
@@ -507,6 +520,8 @@ CTEST_COMMON_ARGS$(1)-T-$(2)-H-$(3) :=						\
 		--compile-lib-path $$(HLIB$(1)_H_$(3))				\
         --run-lib-path $$(TLIB$(1)_T_$(2)_H_$(3))			\
         --rustc-path $$(HBIN$(1)_H_$(3))/rustc$$(X_$(3))			\
+        --clang-path $(if $(CFG_CLANG),$(CFG_CLANG),clang) \
+        --llvm-bin-path $(CFG_LLVM_INST_DIR_$(CFG_BUILD_TRIPLE))/bin \
         --aux-base $$(S)src/test/auxiliary/                 \
         --stage-id stage$(1)-$(2)							\
         --target $(2)                                       \
@@ -522,6 +537,7 @@ CTEST_DEPS_cfail_$(1)-T-$(2)-H-$(3) = $$(CFAIL_TESTS)
 CTEST_DEPS_bench_$(1)-T-$(2)-H-$(3) = $$(BENCH_TESTS)
 CTEST_DEPS_perf_$(1)-T-$(2)-H-$(3) = $$(PERF_TESTS)
 CTEST_DEPS_debuginfo_$(1)-T-$(2)-H-$(3) = $$(DEBUGINFO_TESTS)
+CTEST_DEPS_codegen_$(1)-T-$(2)-H-$(3) = $$(CODEGEN_TESTS)
 
 endef
 
@@ -565,7 +581,7 @@ endif
 
 endef
 
-CTEST_NAMES = rpass rpass-full rfail cfail bench perf debuginfo
+CTEST_NAMES = rpass rpass-full rfail cfail bench perf debuginfo codegen
 
 $(foreach host,$(CFG_HOST_TRIPLES), \
  $(eval $(foreach target,$(CFG_TARGET_TRIPLES), \
@@ -674,6 +690,7 @@ TEST_GROUPS = \
 	bench \
 	perf \
 	debuginfo \
+	codegen \
 	doc \
 	$(foreach docname,$(DOC_TEST_NAMES),doc-$(docname)) \
 	pretty \
diff --git a/src/compiletest/common.rs b/src/compiletest/common.rs
index 38289f6274180..df00286c87f25 100644
--- a/src/compiletest/common.rs
+++ b/src/compiletest/common.rs
@@ -15,6 +15,7 @@ pub enum mode {
     mode_run_pass,
     mode_pretty,
     mode_debug_info,
+    mode_codegen
 }
 
 pub struct config {
@@ -27,6 +28,12 @@ pub struct config {
     // The rustc executable
     rustc_path: Path,
 
+    // The clang executable
+    clang_path: Option<Path>,
+
+    // The llvm binaries path
+    llvm_bin_path: Option<Path>,
+
     // The directory containing the tests to run
     src_base: Path,
 
diff --git a/src/compiletest/compiletest.rs b/src/compiletest/compiletest.rs
index 7d9a7c3ea75dc..a411e714247ed 100644
--- a/src/compiletest/compiletest.rs
+++ b/src/compiletest/compiletest.rs
@@ -19,6 +19,7 @@ extern mod extra;
 use std::os;
 
 use extra::getopts;
+use extra::getopts::groups::{optopt, optflag, reqopt};
 use extra::test;
 
 use common::config;
@@ -27,6 +28,7 @@ use common::mode_run_fail;
 use common::mode_compile_fail;
 use common::mode_pretty;
 use common::mode_debug_info;
+use common::mode_codegen;
 use common::mode;
 use util::logv;
 
@@ -45,31 +47,54 @@ pub fn main() {
 }
 
 pub fn parse_config(args: ~[~str]) -> config {
-    let opts =
-        ~[getopts::reqopt("compile-lib-path"),
-          getopts::reqopt("run-lib-path"),
-          getopts::reqopt("rustc-path"), getopts::reqopt("src-base"),
-          getopts::reqopt("build-base"), getopts::reqopt("aux-base"),
-          getopts::reqopt("stage-id"),
-          getopts::reqopt("mode"), getopts::optflag("ignored"),
-          getopts::optopt("runtool"), getopts::optopt("rustcflags"),
-          getopts::optflag("verbose"),
-          getopts::optopt("logfile"),
-          getopts::optflag("jit"),
-          getopts::optflag("newrt"),
-          getopts::optopt("target"),
-          getopts::optopt("adb-path"),
-          getopts::optopt("adb-test-dir")
+
+    let groups : ~[getopts::groups::OptGroup] =
+        ~[reqopt("", "compile-lib-path", "path to host shared libraries", "PATH"),
+          reqopt("", "run-lib-path", "path to target shared libraries", "PATH"),
+          reqopt("", "rustc-path", "path to rustc to use for compiling", "PATH"),
+          optopt("", "clang-path", "path to  executable for codegen tests", "PATH"),
+          optopt("", "llvm-bin-path", "path to directory holding llvm binaries", "DIR"),
+          reqopt("", "src-base", "directory to scan for test files", "PATH"),
+          reqopt("", "build-base", "directory to deposit test outputs", "PATH"),
+          reqopt("", "aux-base", "directory to find auxiliary test files", "PATH"),
+          reqopt("", "stage-id", "the target-stage identifier", "stageN-TARGET"),
+          reqopt("", "mode", "which sort of compile tests to run",
+                 "(compile-fail|run-fail|run-pass|pretty|debug-info)"),
+          optflag("", "ignored", "run tests marked as ignored / xfailed"),
+          optopt("", "runtool", "supervisor program to run tests under \
+                                 (eg. emulator, valgrind)", "PROGRAM"),
+          optopt("", "rustcflags", "flags to pass to rustc", "FLAGS"),
+          optflag("", "verbose", "run tests verbosely, showing all output"),
+          optopt("", "logfile", "file to log test execution to", "FILE"),
+          optflag("", "jit", "run tests under the JIT"),
+          optflag("", "newrt", "run tests on the new runtime / scheduler"),
+          optopt("", "target", "the target to build for", "TARGET"),
+          optopt("", "adb-path", "path to the android debugger", "PATH"),
+          optopt("", "adb-test-dir", "path to tests for the android debugger", "PATH"),
+          optflag("h", "help", "show this message"),
          ];
 
     assert!(!args.is_empty());
+    let argv0 = copy args[0];
     let args_ = args.tail();
+    if args[1] == ~"-h" || args[1] == ~"--help" {
+        let message = fmt!("Usage: %s [OPTIONS] [TESTNAME...]", argv0);
+        println(getopts::groups::usage(message, groups));
+        fail!()
+    }
+
     let matches =
-        &match getopts::getopts(args_, opts) {
+        &match getopts::groups::getopts(args_, groups) {
           Ok(m) => m,
           Err(f) => fail!(getopts::fail_str(f))
         };
 
+    if getopts::opt_present(matches, "h") || getopts::opt_present(matches, "help") {
+        let message = fmt!("Usage: %s [OPTIONS]  [TESTNAME...]", argv0);
+        println(getopts::groups::usage(message, groups));
+        fail!()
+    }
+
     fn opt_path(m: &getopts::Matches, nm: &str) -> Path {
         Path(getopts::opt_str(m, nm))
     }
@@ -78,6 +103,8 @@ pub fn parse_config(args: ~[~str]) -> config {
         compile_lib_path: getopts::opt_str(matches, "compile-lib-path"),
         run_lib_path: getopts::opt_str(matches, "run-lib-path"),
         rustc_path: opt_path(matches, "rustc-path"),
+        clang_path: getopts::opt_maybe_str(matches, "clang-path").map(|s| Path(*s)),
+        llvm_bin_path: getopts::opt_maybe_str(matches, "llvm-bin-path").map(|s| Path(*s)),
         src_base: opt_path(matches, "src-base"),
         build_base: opt_path(matches, "build-base"),
         aux_base: opt_path(matches, "aux-base"),
@@ -159,6 +186,7 @@ pub fn str_mode(s: ~str) -> mode {
       ~"run-pass" => mode_run_pass,
       ~"pretty" => mode_pretty,
       ~"debug-info" => mode_debug_info,
+      ~"codegen" => mode_codegen,
       _ => fail!("invalid mode")
     }
 }
@@ -170,6 +198,7 @@ pub fn mode_str(mode: mode) -> ~str {
       mode_run_pass => ~"run-pass",
       mode_pretty => ~"pretty",
       mode_debug_info => ~"debug-info",
+      mode_codegen => ~"codegen",
     }
 }
 
@@ -187,8 +216,9 @@ pub fn test_opts(config: &config) -> test::TestOpts {
         logfile: copy config.logfile,
         run_tests: true,
         run_benchmarks: false,
-        save_results: None,
-        compare_results: None
+        ratchet_metrics: None,
+        ratchet_noise_percent: None,
+        save_metrics: None,
     }
 }
 
diff --git a/src/compiletest/runtest.rs b/src/compiletest/runtest.rs
index 91016ba91fa55..dee07c6de495d 100644
--- a/src/compiletest/runtest.rs
+++ b/src/compiletest/runtest.rs
@@ -39,7 +39,8 @@ pub fn run(config: config, testfile: ~str) {
       mode_run_fail => run_rfail_test(&config, &props, &testfile),
       mode_run_pass => run_rpass_test(&config, &props, &testfile),
       mode_pretty => run_pretty_test(&config, &props, &testfile),
-      mode_debug_info => run_debuginfo_test(&config, &props, &testfile)
+      mode_debug_info => run_debuginfo_test(&config, &props, &testfile),
+      mode_codegen => run_codegen_test(&config, &props, &testfile)
     }
 }
 
@@ -835,3 +836,118 @@ fn _arm_push_aux_shared_library(config: &config, testfile: &Path) {
         }
     }
 }
+
+// codegen tests (vs. clang)
+
+fn make_o_name(config: &config, testfile: &Path) -> Path {
+    output_base_name(config, testfile).with_filetype("o")
+}
+
+fn append_suffix_to_stem(p: &Path, suffix: &str) -> Path {
+    if suffix.len() == 0 {
+        copy *p
+    } else {
+        let stem = p.filestem().get();
+        p.with_filestem(stem + "-" + suffix)
+    }
+}
+
+fn compile_test_and_save_bitcode(config: &config, props: &TestProps,
+                                 testfile: &Path) -> ProcRes {
+    let link_args = ~[~"-L", aux_output_dir_name(config, testfile).to_str()];
+    let llvm_args = ~[~"-c", ~"--lib", ~"--save-temps"];
+    let args = make_compile_args(config, props,
+                                 link_args + llvm_args,
+                                 make_o_name, testfile);
+    compose_and_run_compiler(config, props, testfile, args, None)
+}
+
+fn compile_cc_with_clang_and_save_bitcode(config: &config, _props: &TestProps,
+                                          testfile: &Path) -> ProcRes {
+    let bitcodefile = output_base_name(config, testfile).with_filetype("bc");
+    let bitcodefile = append_suffix_to_stem(&bitcodefile, "clang");
+    let ProcArgs = ProcArgs {
+        prog: config.clang_path.get_ref().to_str(),
+        args: ~[~"-c",
+                ~"-emit-llvm",
+                ~"-o", bitcodefile.to_str(),
+                testfile.with_filetype("cc").to_str() ]
+    };
+    compose_and_run(config, testfile, ProcArgs, ~[], "", None)
+}
+
+fn extract_function_from_bitcode(config: &config, _props: &TestProps,
+                                 fname: &str, testfile: &Path,
+                                 suffix: &str) -> ProcRes {
+    let bitcodefile = output_base_name(config, testfile).with_filetype("bc");
+    let bitcodefile = append_suffix_to_stem(&bitcodefile, suffix);
+    let extracted_bc = append_suffix_to_stem(&bitcodefile, "extract");
+    let ProcArgs = ProcArgs {
+        prog: config.llvm_bin_path.get_ref().push("llvm-extract").to_str(),
+        args: ~[~"-func=" + fname,
+                ~"-o=" + extracted_bc.to_str(),
+                bitcodefile.to_str() ]
+    };
+    compose_and_run(config, testfile, ProcArgs, ~[], "", None)
+}
+
+fn disassemble_extract(config: &config, _props: &TestProps,
+                       testfile: &Path, suffix: &str) -> ProcRes {
+    let bitcodefile = output_base_name(config, testfile).with_filetype("bc");
+    let bitcodefile = append_suffix_to_stem(&bitcodefile, suffix);
+    let extracted_bc = append_suffix_to_stem(&bitcodefile, "extract");
+    let extracted_ll = extracted_bc.with_filetype("ll");
+    let ProcArgs = ProcArgs {
+        prog: config.llvm_bin_path.get_ref().push("llvm-dis").to_str(),
+        args: ~[~"-o=" + extracted_ll.to_str(),
+                extracted_bc.to_str() ]
+    };
+    compose_and_run(config, testfile, ProcArgs, ~[], "", None)
+}
+
+
+fn run_codegen_test(config: &config, props: &TestProps, testfile: &Path) {
+
+    if config.llvm_bin_path.is_none() {
+        fatal(~"missing --llvm-bin-path");
+    }
+
+    if config.clang_path.is_none() {
+        fatal(~"missing --clang-path");
+    }
+
+    let mut ProcRes = compile_test_and_save_bitcode(config, props, testfile);
+    if ProcRes.status != 0 {
+        fatal_ProcRes(~"compilation failed!", &ProcRes);
+    }
+
+    ProcRes = extract_function_from_bitcode(config, props, "test", testfile, "");
+    if ProcRes.status != 0 {
+        fatal_ProcRes(~"extracting 'test' function failed", &ProcRes);
+    }
+
+    ProcRes = disassemble_extract(config, props, testfile, "");
+    if ProcRes.status != 0 {
+        fatal_ProcRes(~"disassembling extract failed", &ProcRes);
+    }
+
+
+    let mut ProcRes = compile_cc_with_clang_and_save_bitcode(config, props, testfile);
+    if ProcRes.status != 0 {
+        fatal_ProcRes(~"compilation failed!", &ProcRes);
+    }
+
+    ProcRes = extract_function_from_bitcode(config, props, "test", testfile, "clang");
+    if ProcRes.status != 0 {
+        fatal_ProcRes(~"extracting 'test' function failed", &ProcRes);
+    }
+
+    ProcRes = disassemble_extract(config, props, testfile, "clang");
+    if ProcRes.status != 0 {
+        fatal_ProcRes(~"disassembling extract failed", &ProcRes);
+    }
+
+
+
+}
+
diff --git a/src/libextra/json.rs b/src/libextra/json.rs
index 06b6d0cb29e8a..2f17e4a741761 100644
--- a/src/libextra/json.rs
+++ b/src/libextra/json.rs
@@ -27,6 +27,7 @@ use std::to_str;
 use serialize::Encodable;
 use serialize;
 use sort::Sort;
+use treemap::TreeMap;
 
 /// Represents a json value
 pub enum Json {
@@ -1225,7 +1226,7 @@ impl Ord for Json {
 }
 
 /// A trait for converting values to JSON
-trait ToJson {
+pub trait ToJson {
     /// Converts the value of `self` to an instance of JSON
     fn to_json(&self) -> Json;
 }
@@ -1330,7 +1331,17 @@ impl<A:ToJson> ToJson for ~[A] {
     fn to_json(&self) -> Json { List(self.map(|elt| elt.to_json())) }
 }
 
-impl<A:ToJson + Copy> ToJson for HashMap<~str, A> {
+impl<A:ToJson> ToJson for HashMap<~str, A> {
+    fn to_json(&self) -> Json {
+        let mut d = HashMap::new();
+        for self.iter().advance |(key, value)| {
+            d.insert(copy *key, value.to_json());
+        }
+        Object(~d)
+    }
+}
+
+impl<A:ToJson> ToJson for TreeMap<~str, A> {
     fn to_json(&self) -> Json {
         let mut d = HashMap::new();
         for self.iter().advance |(key, value)| {
diff --git a/src/libextra/stats.rs b/src/libextra/stats.rs
index 5446515c1efad..b6a2deb166331 100644
--- a/src/libextra/stats.rs
+++ b/src/libextra/stats.rs
@@ -100,6 +100,7 @@ pub trait Stats {
 }
 
 /// Extracted collection of all the summary statistics of a sample set.
+#[deriving(Eq)]
 struct Summary {
     sum: f64,
     min: f64,
@@ -116,7 +117,9 @@ struct Summary {
 }
 
 impl Summary {
-    fn new(samples: &[f64]) -> Summary {
+
+    /// Construct a new summary of a sample set.
+    pub fn new(samples: &[f64]) -> Summary {
         Summary {
             sum: samples.sum(),
             min: samples.min(),
diff --git a/src/libextra/test.rs b/src/libextra/test.rs
index 1c6e2a25c01b7..98338b4eaedfb 100644
--- a/src/libextra/test.rs
+++ b/src/libextra/test.rs
@@ -17,24 +17,26 @@
 
 
 use getopts;
+use json::ToJson;
+use json;
+use serialize::Decodable;
 use sort;
 use stats::Stats;
+use stats;
 use term;
 use time::precise_time_ns;
+use treemap::TreeMap;
 
 use std::comm::{stream, SharedChan};
 use std::either;
 use std::io;
-use std::num;
-use std::option;
-use std::rand::RngUtil;
-use std::rand;
 use std::result;
 use std::task;
 use std::to_str::ToStr;
 use std::u64;
-use std::uint;
-use std::vec;
+use std::f64;
+use std::hashmap::HashMap;
+use std::os;
 
 
 // The name of a test. By convention this follows the rules for rust
@@ -87,6 +89,26 @@ pub struct TestDescAndFn {
     testfn: TestFn,
 }
 
+#[deriving(Encodable,Decodable,Eq)]
+pub struct Metric {
+    value: f64,
+    noise: f64
+}
+
+pub struct MetricMap(TreeMap<~str,Metric>);
+
+/// Analysis of a single change in metric
+#[deriving(Eq)]
+pub enum MetricChange {
+    LikelyNoise,
+    MetricAdded,
+    MetricRemoved,
+    Improvement(f64),
+    Regression(f64)
+}
+
+pub type MetricDiff = TreeMap<~str,MetricChange>;
+
 // The default console test runner. It accepts the command line
 // arguments and a vector of test_descs.
 pub fn test_main(args: &[~str], tests: ~[TestDescAndFn]) {
@@ -127,8 +149,9 @@ pub struct TestOpts {
     run_ignored: bool,
     run_tests: bool,
     run_benchmarks: bool,
-    save_results: Option<Path>,
-    compare_results: Option<Path>,
+    ratchet_metrics: Option<Path>,
+    ratchet_noise_percent: Option<f64>,
+    save_metrics: Option<Path>,
     logfile: Option<Path>
 }
 
@@ -140,8 +163,9 @@ pub fn parse_opts(args: &[~str]) -> OptRes {
     let opts = ~[getopts::optflag("ignored"),
                  getopts::optflag("test"),
                  getopts::optflag("bench"),
-                 getopts::optopt("save"),
-                 getopts::optopt("diff"),
+                 getopts::optopt("save-metrics"),
+                 getopts::optopt("ratchet-metrics"),
+                 getopts::optopt("ratchet-noise-percent"),
                  getopts::optopt("logfile")];
     let matches =
         match getopts::getopts(args_, opts) {
@@ -151,8 +175,8 @@ pub fn parse_opts(args: &[~str]) -> OptRes {
 
     let filter =
         if matches.free.len() > 0 {
-            option::Some(copy (matches).free[0])
-        } else { option::None };
+            Some(copy (matches).free[0])
+        } else { None };
 
     let run_ignored = getopts::opt_present(&matches, "ignored");
 
@@ -163,19 +187,24 @@ pub fn parse_opts(args: &[~str]) -> OptRes {
     let run_tests = ! run_benchmarks ||
         getopts::opt_present(&matches, "test");
 
-    let save_results = getopts::opt_maybe_str(&matches, "save");
-    let save_results = save_results.map(|s| Path(*s));
+    let ratchet_metrics = getopts::opt_maybe_str(&matches, "ratchet-metrics");
+    let ratchet_metrics = ratchet_metrics.map(|s| Path(*s));
 
-    let compare_results = getopts::opt_maybe_str(&matches, "diff");
-    let compare_results = compare_results.map(|s| Path(*s));
+    let ratchet_noise_percent =
+        getopts::opt_maybe_str(&matches, "ratchet-noise-percent");
+    let ratchet_noise_percent = ratchet_noise_percent.map(|s| f64::from_str(*s).get());
+
+    let save_metrics = getopts::opt_maybe_str(&matches, "save-metrics");
+    let save_metrics = save_metrics.map(|s| Path(*s));
 
     let test_opts = TestOpts {
         filter: filter,
         run_ignored: run_ignored,
         run_tests: run_tests,
         run_benchmarks: run_benchmarks,
-        save_results: save_results,
-        compare_results: compare_results,
+        ratchet_metrics: ratchet_metrics,
+        ratchet_noise_percent: ratchet_noise_percent,
+        save_metrics: save_metrics,
         logfile: logfile
     };
 
@@ -184,7 +213,7 @@ pub fn parse_opts(args: &[~str]) -> OptRes {
 
 #[deriving(Eq)]
 pub struct BenchSamples {
-    ns_iter_samples: ~[f64],
+    ns_iter_summ: stats::Summary,
     mb_s: uint
 }
 
@@ -194,181 +223,288 @@ pub enum TestResult { TrOk, TrFailed, TrIgnored, TrBench(BenchSamples) }
 struct ConsoleTestState {
     out: @io::Writer,
     log_out: Option<@io::Writer>,
+    term: Option<term::Terminal>,
     use_color: bool,
     total: uint,
     passed: uint,
     failed: uint,
     ignored: uint,
     benchmarked: uint,
+    metrics: MetricMap,
     failures: ~[TestDesc]
 }
 
-// A simple console test runner
-pub fn run_tests_console(opts: &TestOpts,
-                         tests: ~[TestDescAndFn]) -> bool {
-    fn callback(event: &TestEvent, st: &mut ConsoleTestState) {
-        debug!("callback(event=%?)", event);
-        match copy *event {
-          TeFiltered(ref filtered_tests) => {
-            st.total = filtered_tests.len();
-            let noun = if st.total != 1 { ~"tests" } else { ~"test" };
-            st.out.write_line(fmt!("\nrunning %u %s", st.total, noun));
-          }
-          TeWait(ref test) => st.out.write_str(
-              fmt!("test %s ... ", test.name.to_str())),
-          TeResult(test, result) => {
-            match st.log_out {
-                Some(f) => write_log(f, copy result, &test),
-                None => ()
-            }
-            match result {
-              TrOk => {
-                st.passed += 1;
-                write_ok(st.out, st.use_color);
-                st.out.write_line("");
-              }
-              TrFailed => {
-                st.failed += 1;
-                write_failed(st.out, st.use_color);
-                st.out.write_line("");
-                st.failures.push(test);
-              }
-              TrIgnored => {
-                st.ignored += 1;
-                write_ignored(st.out, st.use_color);
-                st.out.write_line("");
-              }
-              TrBench(bs) => {
-                st.benchmarked += 1u;
-                write_bench(st.out, st.use_color);
-                st.out.write_line(fmt!(": %s",
-                                       fmt_bench_samples(&bs)));
-              }
-            }
-          }
+impl ConsoleTestState {
+    pub fn new(opts: &TestOpts) -> ConsoleTestState {
+        let log_out = match opts.logfile {
+            Some(ref path) => match io::file_writer(path,
+                                                    [io::Create,
+                                                     io::Truncate]) {
+                result::Ok(w) => Some(w),
+                result::Err(ref s) => {
+                    fail!("can't open output file: %s", *s)
+                }
+            },
+            None => None
+        };
+        let out = io::stdout();
+        let term = match term::Terminal::new(out) {
+            Err(_) => None,
+            Ok(t) => Some(t)
+        };
+        ConsoleTestState {
+            out: out,
+            log_out: log_out,
+            use_color: use_color(),
+            term: term,
+            total: 0u,
+            passed: 0u,
+            failed: 0u,
+            ignored: 0u,
+            benchmarked: 0u,
+            metrics: MetricMap::new(),
+            failures: ~[]
         }
     }
 
-    let log_out = match opts.logfile {
-        Some(ref path) => match io::file_writer(path,
-                                                [io::Create,
-                                                 io::Truncate]) {
-          result::Ok(w) => Some(w),
-          result::Err(ref s) => {
-              fail!("can't open output file: %s", *s)
-          }
-        },
-        None => None
-    };
+    pub fn write_ok(&self) {
+        self.write_pretty("ok", term::color::GREEN);
+    }
 
-    let st = @mut ConsoleTestState {
-        out: io::stdout(),
-        log_out: log_out,
-        use_color: use_color(),
-        total: 0u,
-        passed: 0u,
-        failed: 0u,
-        ignored: 0u,
-        benchmarked: 0u,
-        failures: ~[]
-    };
+    pub fn write_failed(&self) {
+        self.write_pretty("FAILED", term::color::RED);
+    }
 
-    run_tests(opts, tests, |x| callback(&x, st));
+    pub fn write_ignored(&self) {
+        self.write_pretty("ignored", term::color::YELLOW);
+    }
 
-    assert!(st.passed + st.failed +
-                 st.ignored + st.benchmarked == st.total);
-    let success = st.failed == 0u;
+    pub fn write_bench(&self) {
+        self.write_pretty("bench", term::color::CYAN);
+    }
 
-    if !success {
-        print_failures(st);
+
+    pub fn write_added(&self) {
+        self.write_pretty("added", term::color::GREEN);
     }
 
-    {
-      let st: &mut ConsoleTestState = st;
-      st.out.write_str(fmt!("\nresult: "));
-      if success {
-          // There's no parallelism at this point so it's safe to use color
-          write_ok(st.out, true);
-      } else {
-          write_failed(st.out, true);
-      }
-      st.out.write_str(fmt!(". %u passed; %u failed; %u ignored\n\n",
-                            st.passed, st.failed, st.ignored));
+    pub fn write_improved(&self) {
+        self.write_pretty("improved", term::color::GREEN);
     }
 
-    return success;
+    pub fn write_removed(&self) {
+        self.write_pretty("removed", term::color::YELLOW);
+    }
 
-    fn fmt_bench_samples(bs: &BenchSamples) -> ~str {
-        use stats::Stats;
-        if bs.mb_s != 0 {
-            fmt!("%u ns/iter (+/- %u) = %u MB/s",
-                 bs.ns_iter_samples.median() as uint,
-                 3 * (bs.ns_iter_samples.median_abs_dev() as uint),
-                 bs.mb_s)
-        } else {
-            fmt!("%u ns/iter (+/- %u)",
-                 bs.ns_iter_samples.median() as uint,
-                 3 * (bs.ns_iter_samples.median_abs_dev() as uint))
+    pub fn write_regressed(&self) {
+        self.write_pretty("regressed", term::color::RED);
+    }
+
+    pub fn write_pretty(&self,
+                        word: &str,
+                        color: term::color::Color) {
+        match self.term {
+            None => self.out.write_str(word),
+            Some(ref t) => {
+                if self.use_color {
+                    t.fg(color);
+                }
+                self.out.write_str(word);
+                if self.use_color {
+                    t.reset();
+                }
+            }
         }
     }
 
-    fn write_log(out: @io::Writer, result: TestResult, test: &TestDesc) {
-        out.write_line(fmt!("%s %s",
-                    match result {
-                        TrOk => ~"ok",
-                        TrFailed => ~"failed",
-                        TrIgnored => ~"ignored",
-                        TrBench(ref bs) => fmt_bench_samples(bs)
-                    }, test.name.to_str()));
+    pub fn write_run_start(&mut self, len: uint) {
+        self.total = len;
+        let noun = if len != 1 { &"tests" } else { &"test" };
+        self.out.write_line(fmt!("\nrunning %u %s", len, noun));
     }
 
-    fn write_ok(out: @io::Writer, use_color: bool) {
-        write_pretty(out, "ok", term::color::GREEN, use_color);
+    pub fn write_test_start(&self, test: &TestDesc) {
+        self.out.write_str(fmt!("test %s ... ", test.name.to_str()));
     }
 
-    fn write_failed(out: @io::Writer, use_color: bool) {
-        write_pretty(out, "FAILED", term::color::RED, use_color);
+    pub fn write_result(&self, result: &TestResult) {
+        match *result {
+            TrOk => self.write_ok(),
+            TrFailed => self.write_failed(),
+            TrIgnored => self.write_ignored(),
+            TrBench(ref bs) => {
+                self.write_bench();
+                self.out.write_str(": " + fmt_bench_samples(bs))
+            }
+        }
+        self.out.write_str(&"\n");
     }
 
-    fn write_ignored(out: @io::Writer, use_color: bool) {
-        write_pretty(out, "ignored", term::color::YELLOW, use_color);
+    pub fn write_log(&self, test: &TestDesc, result: &TestResult) {
+        match self.log_out {
+            None => (),
+            Some(out) => {
+                out.write_line(fmt!("%s %s",
+                                    match *result {
+                                        TrOk => ~"ok",
+                                        TrFailed => ~"failed",
+                                        TrIgnored => ~"ignored",
+                                        TrBench(ref bs) => fmt_bench_samples(bs)
+                                    }, test.name.to_str()));
+            }
+        }
     }
 
-    fn write_bench(out: @io::Writer, use_color: bool) {
-        write_pretty(out, "bench", term::color::CYAN, use_color);
+    pub fn write_failures(&self) {
+        self.out.write_line("\nfailures:");
+        let mut failures = ~[];
+        for self.failures.iter().advance() |f| {
+            failures.push(f.name.to_str());
+        }
+        sort::tim_sort(failures);
+        for failures.iter().advance |name| {
+            self.out.write_line(fmt!("    %s", name.to_str()));
+        }
     }
 
-    fn write_pretty(out: @io::Writer,
-                    word: &str,
-                    color: term::color::Color,
-                    use_color: bool) {
-        let t = term::Terminal::new(out);
-        match t {
-            Ok(term)  => {
-                if use_color {
-                    term.fg(color);
+    pub fn write_metric_diff(&self, diff: &MetricDiff) {
+        let mut noise = 0;
+        let mut improved = 0;
+        let mut regressed = 0;
+        let mut added = 0;
+        let mut removed = 0;
+
+        for diff.iter().advance() |(k, v)| {
+            match *v {
+                LikelyNoise => noise += 1,
+                MetricAdded => {
+                    added += 1;
+                    self.write_added();
+                    self.out.write_line(fmt!(": %s", *k));
                 }
-                out.write_str(word);
-                if use_color {
-                    term.reset();
+                MetricRemoved => {
+                    removed += 1;
+                    self.write_removed();
+                    self.out.write_line(fmt!(": %s", *k));
                 }
-            },
-            Err(_) => out.write_str(word)
+                Improvement(pct) => {
+                    improved += 1;
+                    self.out.write_str(*k);
+                    self.out.write_str(": ");
+                    self.write_improved();
+                    self.out.write_line(fmt!(" by %.2f%%", pct as float))
+                }
+                Regression(pct) => {
+                    regressed += 1;
+                    self.out.write_str(*k);
+                    self.out.write_str(": ");
+                    self.write_regressed();
+                    self.out.write_line(fmt!(" by %.2f%%", pct as float))
+                }
+            }
+        }
+        self.out.write_line(fmt!("result of ratchet: %u matrics added, %u removed, \
+                                  %u improved, %u regressed, %u noise",
+                                 added, removed, improved, regressed, noise));
+        if regressed == 0 {
+            self.out.write_line("updated ratchet file")
+        } else {
+            self.out.write_line("left ratchet file untouched")
+        }
+    }
+
+    pub fn write_run_finish(&self,
+                            ratchet_metrics: &Option<Path>,
+                            ratchet_pct: Option<f64>) -> bool {
+        assert!(self.passed + self.failed + self.ignored + self.benchmarked == self.total);
+
+        let ratchet_success = match *ratchet_metrics {
+            None => true,
+            Some(ref pth) => {
+                self.out.write_str(fmt!("\nusing metrics ratchet: %s\n", pth.to_str()));
+                match ratchet_pct {
+                    None => (),
+                    Some(pct) =>
+                    self.out.write_str(fmt!("with noise-tolerance forced to: %f%%\n",
+                                            pct as float))
+                }
+                let (diff, ok) = self.metrics.ratchet(pth, ratchet_pct);
+                self.write_metric_diff(&diff);
+                ok
+            }
+        };
+
+        let test_success = self.failed == 0u;
+        if !test_success {
+            self.write_failures();
+        }
+
+        let success = ratchet_success && test_success;
+
+        self.out.write_str("\ntest result: ");
+        if success {
+            // There's no parallelism at this point so it's safe to use color
+            self.write_ok();
+        } else {
+            self.write_failed();
         }
+        self.out.write_str(fmt!(". %u passed; %u failed; %u ignored, %u benchmarked\n\n",
+                                self.passed, self.failed, self.ignored, self.benchmarked));
+        return success;
     }
 }
 
-fn print_failures(st: &ConsoleTestState) {
-    st.out.write_line("\nfailures:");
-    let mut failures = ~[];
-    for uint::range(0, st.failures.len()) |i| {
-        let name = copy st.failures[i].name;
-        failures.push(name.to_str());
+pub fn fmt_bench_samples(bs: &BenchSamples) -> ~str {
+    if bs.mb_s != 0 {
+        fmt!("%u ns/iter (+/- %u) = %u MB/s",
+             bs.ns_iter_summ.median as uint,
+             (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as uint,
+             bs.mb_s)
+    } else {
+        fmt!("%u ns/iter (+/- %u)",
+             bs.ns_iter_summ.median as uint,
+             (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as uint)
+    }
+}
+
+// A simple console test runner
+pub fn run_tests_console(opts: &TestOpts,
+                         tests: ~[TestDescAndFn]) -> bool {
+    fn callback(event: &TestEvent, st: &mut ConsoleTestState) {
+        debug!("callback(event=%?)", event);
+        match copy *event {
+            TeFiltered(ref filtered_tests) => st.write_run_start(filtered_tests.len()),
+            TeWait(ref test) => st.write_test_start(test),
+            TeResult(test, result) => {
+                st.write_log(&test, &result);
+                st.write_result(&result);
+                match result {
+                    TrOk => st.passed += 1,
+                    TrIgnored => st.ignored += 1,
+                    TrBench(bs) => {
+                        st.metrics.insert_metric(test.name.to_str(),
+                                                 bs.ns_iter_summ.median,
+                                                 bs.ns_iter_summ.max - bs.ns_iter_summ.min);
+                        st.benchmarked += 1
+                    }
+                    TrFailed => {
+                        st.failed += 1;
+                        st.failures.push(test);
+                    }
+                }
+            }
+        }
     }
-    sort::tim_sort(failures);
-    for failures.iter().advance |name| {
-        st.out.write_line(fmt!("    %s", name.to_str()));
+    let st = @mut ConsoleTestState::new(opts);
+    run_tests(opts, tests, |x| callback(&x, st));
+    match opts.save_metrics {
+        None => (),
+        Some(ref pth) => {
+            st.metrics.save(pth);
+            st.out.write_str(fmt!("\nmetrics saved to: %s", pth.to_str()));
+        }
     }
+    return st.write_run_finish(&opts.ratchet_metrics, opts.ratchet_noise_percent);
 }
 
 #[test]
@@ -390,17 +526,19 @@ fn should_sort_failures_before_printing_them() {
 
         let st = @ConsoleTestState {
             out: wr,
-            log_out: option::None,
+            log_out: None,
+            term: None,
             use_color: false,
             total: 0u,
             passed: 0u,
             failed: 0u,
             ignored: 0u,
             benchmarked: 0u,
+            metrics: MetricMap::new(),
             failures: ~[test_b, test_a]
         };
 
-        print_failures(st);
+        st.write_failures();
     };
 
     let apos = s.find_str("a").get();
@@ -503,15 +641,17 @@ pub fn filter_tests(
         filtered
     } else {
         let filter_str = match opts.filter {
-          option::Some(ref f) => copy *f,
-          option::None => ~""
+          Some(ref f) => copy *f,
+          None => ~""
         };
 
         fn filter_fn(test: TestDescAndFn, filter_str: &str) ->
             Option<TestDescAndFn> {
             if test.desc.name.to_str().contains(filter_str) {
-                return option::Some(test);
-            } else { return option::None; }
+                return Some(test);
+            } else {
+                return None;
+            }
         }
 
         filtered.consume_iter().filter_map(|x| filter_fn(x, filter_str)).collect()
@@ -605,6 +745,143 @@ fn calc_result(desc: &TestDesc, task_succeeded: bool) -> TestResult {
     }
 }
 
+
+impl ToJson for Metric {
+    fn to_json(&self) -> json::Json {
+        let mut map = ~HashMap::new();
+        map.insert(~"value", json::Number(self.value as float));
+        map.insert(~"noise", json::Number(self.noise as float));
+        json::Object(map)
+    }
+}
+
+impl MetricMap {
+
+    fn new() -> MetricMap {
+        MetricMap(TreeMap::new())
+    }
+
+    /// Load MetricDiff from a file.
+    fn load(p: &Path) -> MetricMap {
+        assert!(os::path_exists(p));
+        let f = io::file_reader(p).get();
+        let mut decoder = json::Decoder(json::from_reader(f).get());
+        MetricMap(Decodable::decode(&mut decoder))
+    }
+
+    /// Write MetricDiff to a file.
+    pub fn save(&self, p: &Path) {
+        let f = io::file_writer(p, [io::Create, io::Truncate]).get();
+        json::to_pretty_writer(f, &self.to_json());
+    }
+
+    /// Compare against another MetricMap. Optionally compare all
+    /// measurements in the maps using the provided `noise_pct` as a
+    /// percentage of each value to consider noise. If `None`, each
+    /// measurement's noise threshold is independently chosen as the
+    /// maximum of that measurement's recorded noise quantity in either
+    /// map.
+    pub fn compare_to_old(&self, old: &MetricMap,
+                          noise_pct: Option<f64>) -> MetricDiff {
+        let mut diff : MetricDiff = TreeMap::new();
+        for old.iter().advance |(k, vold)| {
+            let r = match self.find(k) {
+                None => MetricRemoved,
+                Some(v) => {
+                    let delta = (v.value - vold.value);
+                    let noise = match noise_pct {
+                        None => f64::max(vold.noise.abs(), v.noise.abs()),
+                        Some(pct) => vold.value * pct / 100.0
+                    };
+                    if delta.abs() < noise {
+                        LikelyNoise
+                    } else {
+                        let pct = delta.abs() / vold.value * 100.0;
+                        if vold.noise < 0.0 {
+                            // When 'noise' is negative, it means we want
+                            // to see deltas that go up over time, and can
+                            // only tolerate slight negative movement.
+                            if delta < 0.0 {
+                                Regression(pct)
+                            } else {
+                                Improvement(pct)
+                            }
+                        } else {
+                            // When 'noise' is positive, it means we want
+                            // to see deltas that go down over time, and
+                            // can only tolerate slight positive movements.
+                            if delta < 0.0 {
+                                Improvement(pct)
+                            } else {
+                                Regression(pct)
+                            }
+                        }
+                    }
+                }
+            };
+            diff.insert(copy *k, r);
+        }
+        for self.iter().advance |(k, _)| {
+            if !diff.contains_key(k) {
+                diff.insert(copy *k, MetricAdded);
+            }
+        }
+        diff
+    }
+
+    /// Insert a named `value` (+/- `noise`) metric into the map. The value
+    /// must be non-negative. The `noise` indicates the uncertainty of the
+    /// metric, which doubles as the "noise range" of acceptable
+    /// pairwise-regressions on this named value, when comparing from one
+    /// metric to the next using `compare_to_old`.
+    ///
+    /// If `noise` is positive, then it means this metric is of a value
+    /// you want to see grow smaller, so a change larger than `noise` in the
+    /// positive direction represents a regression.
+    ///
+    /// If `noise` is negative, then it means this metric is of a value
+    /// you want to see grow larger, so a change larger than `noise` in the
+    /// negative direction represents a regression.
+    pub fn insert_metric(&mut self, name: &str, value: f64, noise: f64) {
+        let m = Metric {
+            value: value,
+            noise: noise
+        };
+        self.insert(name.to_owned(), m);
+    }
+
+    /// Attempt to "ratchet" an external metric file. This involves loading
+    /// metrics from a metric file (if it exists), comparing against
+    /// the metrics in `self` using `compare_to_old`, and rewriting the
+    /// file to contain the metrics in `self` if none of the
+    /// `MetricChange`s are `Regression`. Returns the diff as well
+    /// as a boolean indicating whether the ratchet succeeded.
+    pub fn ratchet(&self, p: &Path, pct: Option<f64>) -> (MetricDiff, bool) {
+        let old = if os::path_exists(p) {
+            MetricMap::load(p)
+        } else {
+            MetricMap::new()
+        };
+
+        let diff : MetricDiff = self.compare_to_old(&old, pct);
+        let ok = do diff.iter().all() |(_, v)| {
+            match *v {
+                Regression(_) => false,
+                _ => true
+            }
+        };
+
+        if ok {
+            debug!("rewriting file '%s' with updated metrics");
+            self.save(p);
+        }
+        return (diff, ok)
+    }
+}
+
+
+// Benchmarking
+
 impl BenchHarness {
     /// Callback for benchmark functions to run in their body.
     pub fn iter(&mut self, inner:&fn()) {
@@ -639,105 +916,72 @@ impl BenchHarness {
         f(self);
     }
 
-    // This is the Go benchmark algorithm. It produces a single
-    // datapoint and always tries to run for 1s.
-    pub fn go_bench(&mut self, f: &fn(&mut BenchHarness)) {
-
-        // Rounds a number down to the nearest power of 10.
-        fn round_down_10(n: u64) -> u64 {
-            let mut n = n;
-            let mut res = 1;
-            while n > 10 {
-                n = n / 10;
-                res *= 10;
-            }
-            res
-        }
-
-        // Rounds x up to a number of the form [1eX, 2eX, 5eX].
-        fn round_up(n: u64) -> u64 {
-            let base = round_down_10(n);
-            if n < (2 * base) {
-                2 * base
-            } else if n < (5 * base) {
-                5 * base
-            } else {
-                10 * base
-            }
-        }
+    // This is a more statistics-driven benchmark algorithm
+    pub fn auto_bench(&mut self, f: &fn(&mut BenchHarness)) -> stats::Summary {
 
         // Initial bench run to get ballpark figure.
         let mut n = 1_u64;
         self.bench_n(n, |x| f(x));
 
-        while n < 1_000_000_000 &&
-            self.ns_elapsed() < 1_000_000_000 {
-            let last = n;
-
-            // Try to estimate iter count for 1s falling back to 1bn
-            // iterations if first run took < 1ns.
-            if self.ns_per_iter() == 0 {
-                n = 1_000_000_000;
-            } else {
-                n = 1_000_000_000 / self.ns_per_iter();
-            }
-
-            n = u64::max(u64::min(n+n/2, 100*last), last+1);
-            n = round_up(n);
-            self.bench_n(n, |x| f(x));
+        // Try to estimate iter count for 1ms falling back to 1m
+        // iterations if first run took < 1ns.
+        if self.ns_per_iter() == 0 {
+            n = 1_000_000;
+        } else {
+            n = 1_000_000 / self.ns_per_iter();
         }
-    }
 
-    // This is a more statistics-driven benchmark algorithm.
-    // It stops as quickly as 50ms, so long as the statistical
-    // properties are satisfactory. If those properties are
-    // not met, it may run as long as the Go algorithm.
-    pub fn auto_bench(&mut self, f: &fn(&mut BenchHarness)) -> ~[f64] {
+        let mut total_run = 0;
+        let samples : &mut [f64] = [0.0_f64, ..50];
+        loop {
+            let loop_start = precise_time_ns();
 
-        let mut rng = rand::rng();
-        let mut magnitude = 10;
-        let mut prev_madp = 0.0;
+            for samples.mut_iter().advance() |p| {
+                self.bench_n(n as u64, |x| f(x));
+                *p = self.ns_per_iter() as f64;
+            };
 
-        loop {
-            let n_samples = rng.gen_uint_range(50, 60);
-            let n_iter = rng.gen_uint_range(magnitude,
-                                            magnitude * 2);
+            stats::winsorize(samples, 5.0);
+            let summ = stats::Summary::new(samples);
 
-            let samples = do vec::from_fn(n_samples) |_| {
-                self.bench_n(n_iter as u64, |x| f(x));
-                self.ns_per_iter() as f64
+            for samples.mut_iter().advance() |p| {
+                self.bench_n(5 * n as u64, |x| f(x));
+                *p = self.ns_per_iter() as f64;
             };
 
-            // Eliminate outliers
-            let med = samples.median();
-            let mad = samples.median_abs_dev();
-            let samples = do samples.consume_iter().filter |f| {
-                num::abs(*f - med) <= 3.0 * mad
-            }.collect::<~[f64]>();
-
-            debug!("%u samples, median %f, MAD=%f, %u survived filter",
-                   n_samples, med as float, mad as float,
-                   samples.len());
-
-            if samples.len() != 0 {
-                // If we have _any_ cluster of signal...
-                let curr_madp = samples.median_abs_dev_pct();
-                if self.ns_elapsed() > 1_000_000 &&
-                    (curr_madp < 1.0 ||
-                     num::abs(curr_madp - prev_madp) < 0.1) {
-                    return samples;
-                }
-                prev_madp = curr_madp;
+            stats::winsorize(samples, 5.0);
+            let summ5 = stats::Summary::new(samples);
 
-                if n_iter > 20_000_000 ||
-                    self.ns_elapsed() > 20_000_000 {
-                    return samples;
-                }
+            debug!("%u samples, median %f, MAD=%f, MADP=%f",
+                   samples.len(),
+                   summ.median as float,
+                   summ.median_abs_dev as float,
+                   summ.median_abs_dev_pct as float);
+
+            let now = precise_time_ns();
+            let loop_run = now - loop_start;
+
+            // If we've run for 100ms an seem to have converged to a
+            // stable median.
+            if loop_run > 100_000_000 &&
+                summ.median_abs_dev_pct < 1.0 &&
+                summ.median - summ5.median < summ5.median_abs_dev {
+                return summ5;
+            }
+
+            total_run += loop_run;
+            // Longest we ever run for is 10s.
+            if total_run > 10_000_000_000 {
+                return summ5;
             }
 
-            magnitude *= 2;
+            n *= 2;
         }
     }
+
+
+
+
 }
 
 pub mod bench {
@@ -752,13 +996,13 @@ pub mod bench {
             bytes: 0
         };
 
-        let ns_iter_samples = bs.auto_bench(f);
+        let ns_iter_summ = bs.auto_bench(f);
 
-        let iter_s = 1_000_000_000 / (ns_iter_samples.median() as u64);
+        let iter_s = 1_000_000_000 / (ns_iter_summ.median as u64);
         let mb_s = (bs.bytes * iter_s) / 1_000_000;
 
         BenchSamples {
-            ns_iter_samples: ns_iter_samples,
+            ns_iter_summ: ns_iter_summ,
             mb_s: mb_s as uint
         }
     }
@@ -768,13 +1012,16 @@ pub mod bench {
 mod tests {
     use test::{TrFailed, TrIgnored, TrOk, filter_tests, parse_opts,
                TestDesc, TestDescAndFn,
+               Metric, MetricMap, MetricAdded, MetricRemoved,
+               Improvement, Regression, LikelyNoise,
                StaticTestName, DynTestName, DynTestFn};
     use test::{TestOpts, run_test};
 
     use std::either;
     use std::comm::{stream, SharedChan};
-    use std::option;
     use std::vec;
+    use tempfile;
+    use std::os;
 
     #[test]
     pub fn do_not_run_ignored_tests() {
@@ -877,13 +1124,14 @@ mod tests {
         // unignored tests and flip the ignore flag on the rest to false
 
         let opts = TestOpts {
-            filter: option::None,
+            filter: None,
             run_ignored: true,
-            logfile: option::None,
+            logfile: None,
             run_tests: true,
             run_benchmarks: false,
-            save_results: option::None,
-            compare_results: option::None
+            ratchet_noise_percent: None,
+            ratchet_metrics: None,
+            save_metrics: None,
         };
 
         let tests = ~[
@@ -914,13 +1162,14 @@ mod tests {
     #[test]
     pub fn sort_tests() {
         let opts = TestOpts {
-            filter: option::None,
+            filter: None,
             run_ignored: false,
-            logfile: option::None,
+            logfile: None,
             run_tests: true,
             run_benchmarks: false,
-            save_results: option::None,
-            compare_results: option::None
+            ratchet_noise_percent: None,
+            ratchet_metrics: None,
+            save_metrics: None,
         };
 
         let names =
@@ -968,4 +1217,95 @@ mod tests {
             }
         }
     }
+
+    #[test]
+    pub fn test_metricmap_compare() {
+        let mut m1 = MetricMap::new();
+        let mut m2 = MetricMap::new();
+        m1.insert_metric("in-both-noise", 1000.0, 200.0);
+        m2.insert_metric("in-both-noise", 1100.0, 200.0);
+
+        m1.insert_metric("in-first-noise", 1000.0, 2.0);
+        m2.insert_metric("in-second-noise", 1000.0, 2.0);
+
+        m1.insert_metric("in-both-want-downwards-but-regressed", 1000.0, 10.0);
+        m2.insert_metric("in-both-want-downwards-but-regressed", 2000.0, 10.0);
+
+        m1.insert_metric("in-both-want-downwards-and-improved", 2000.0, 10.0);
+        m2.insert_metric("in-both-want-downwards-and-improved", 1000.0, 10.0);
+
+        m1.insert_metric("in-both-want-upwards-but-regressed", 2000.0, -10.0);
+        m2.insert_metric("in-both-want-upwards-but-regressed", 1000.0, -10.0);
+
+        m1.insert_metric("in-both-want-upwards-and-improved", 1000.0, -10.0);
+        m2.insert_metric("in-both-want-upwards-and-improved", 2000.0, -10.0);
+
+        let diff1 = m2.compare_to_old(&m1, None);
+
+        assert_eq!(*(diff1.find(&~"in-both-noise").get()), LikelyNoise);
+        assert_eq!(*(diff1.find(&~"in-first-noise").get()), MetricRemoved);
+        assert_eq!(*(diff1.find(&~"in-second-noise").get()), MetricAdded);
+        assert_eq!(*(diff1.find(&~"in-both-want-downwards-but-regressed").get()), Regression(100.0));
+        assert_eq!(*(diff1.find(&~"in-both-want-downwards-and-improved").get()), Improvement(50.0));
+        assert_eq!(*(diff1.find(&~"in-both-want-upwards-but-regressed").get()), Regression(50.0));
+        assert_eq!(*(diff1.find(&~"in-both-want-upwards-and-improved").get()), Improvement(100.0));
+        assert_eq!(diff1.len(), 7);
+
+        let diff2 = m2.compare_to_old(&m1, Some(200.0));
+
+        assert_eq!(*(diff2.find(&~"in-both-noise").get()), LikelyNoise);
+        assert_eq!(*(diff2.find(&~"in-first-noise").get()), MetricRemoved);
+        assert_eq!(*(diff2.find(&~"in-second-noise").get()), MetricAdded);
+        assert_eq!(*(diff2.find(&~"in-both-want-downwards-but-regressed").get()), LikelyNoise);
+        assert_eq!(*(diff2.find(&~"in-both-want-downwards-and-improved").get()), LikelyNoise);
+        assert_eq!(*(diff2.find(&~"in-both-want-upwards-but-regressed").get()), LikelyNoise);
+        assert_eq!(*(diff2.find(&~"in-both-want-upwards-and-improved").get()), LikelyNoise);
+        assert_eq!(diff2.len(), 7);
+    }
+
+    pub fn ratchet_test() {
+
+        let dpth = tempfile::mkdtemp(&os::tmpdir(),
+                                     "test-ratchet").expect("missing test for ratchet");
+        let pth = dpth.push("ratchet.json");
+
+        let mut m1 = MetricMap::new();
+        m1.insert_metric("runtime", 1000.0, 2.0);
+        m1.insert_metric("throughput", 50.0, 2.0);
+
+        let mut m2 = MetricMap::new();
+        m2.insert_metric("runtime", 1100.0, 2.0);
+        m2.insert_metric("throughput", 50.0, 2.0);
+
+        m1.save(&pth);
+
+        // Ask for a ratchet that should fail to advance.
+        let (diff1, ok1) = m2.ratchet(&pth, None);
+        assert_eq!(ok1, false);
+        assert_eq!(diff1.len(), 2);
+        assert_eq!(*(diff1.find(&~"runtime").get()), Regression(10.0));
+        assert_eq!(*(diff1.find(&~"throughput").get()), LikelyNoise);
+
+        // Check that it was not rewritten.
+        let m3 = MetricMap::load(&pth);
+        assert_eq!(m3.len(), 2);
+        assert_eq!(*(m3.find(&~"runtime").get()), Metric { value: 1000.0, noise: 2.0 });
+        assert_eq!(*(m3.find(&~"throughput").get()), Metric { value: 50.0, noise: 2.0 });
+
+        // Ask for a ratchet with an explicit noise-percentage override,
+        // that should advance.
+        let (diff2, ok2) = m2.ratchet(&pth, Some(10.0));
+        assert_eq!(ok2, true);
+        assert_eq!(diff2.len(), 2);
+        assert_eq!(*(diff2.find(&~"runtime").get()), LikelyNoise);
+        assert_eq!(*(diff2.find(&~"throughput").get()), LikelyNoise);
+
+        // Check that it was rewritten.
+        let m4 = MetricMap::load(&pth);
+        assert_eq!(m4.len(), 2);
+        assert_eq!(*(m4.find(&~"runtime").get()), Metric { value: 1100.0, noise: 2.0 });
+        assert_eq!(*(m4.find(&~"throughput").get()), Metric { value: 50.0, noise: 2.0 });
+
+        os::remove_dir_recursive(&dpth);
+    }
 }
diff --git a/src/test/codegen/hello.cc b/src/test/codegen/hello.cc
new file mode 100644
index 0000000000000..01eae9b16bb6f
--- /dev/null
+++ b/src/test/codegen/hello.cc
@@ -0,0 +1,12 @@
+#include <stddef.h>
+
+struct slice {
+  char const *p;
+  size_t len;
+};
+
+extern "C"
+void test() {
+  struct slice s = { .p = "hello",
+                     .len = 5 };
+}
diff --git a/src/test/codegen/hello.rs b/src/test/codegen/hello.rs
new file mode 100644
index 0000000000000..e7cd84f63f2b0
--- /dev/null
+++ b/src/test/codegen/hello.rs
@@ -0,0 +1,4 @@
+#[no_mangle]
+fn test() {
+    let _x = "hello";
+}