From 77823c907b272836108175dee546fb846b925465 Mon Sep 17 00:00:00 2001
From: Graydon Hoare <graydon@mozilla.com>
Date: Thu, 11 Jul 2013 17:05:23 -0700
Subject: [PATCH 1/7] extra: add tests for test::MetricMap, MetricDiff,
 ratchet.

---
 src/libextra/test.rs | 110 +++++++++++++++++++++++++++++++++++++++++--
 1 file changed, 105 insertions(+), 5 deletions(-)

diff --git a/src/libextra/test.rs b/src/libextra/test.rs
index 96ca429676850..98338b4eaedfb 100644
--- a/src/libextra/test.rs
+++ b/src/libextra/test.rs
@@ -98,6 +98,7 @@ pub struct Metric {
 pub struct MetricMap(TreeMap<~str,Metric>);
 
 /// Analysis of a single change in metric
+#[deriving(Eq)]
 pub enum MetricChange {
     LikelyNoise,
     MetricAdded,
@@ -774,8 +775,13 @@ impl MetricMap {
         json::to_pretty_writer(f, &self.to_json());
     }
 
-    /// Compare against another MetricMap
-    pub fn compare_to_old(&self, old: MetricMap,
+    /// Compare against another MetricMap. Optionally compare all
+    /// measurements in the maps using the provided `noise_pct` as a
+    /// percentage of each value to consider noise. If `None`, each
+    /// measurement's noise threshold is independently chosen as the
+    /// maximum of that measurement's recorded noise quantity in either
+    /// map.
+    pub fn compare_to_old(&self, old: &MetricMap,
                           noise_pct: Option<f64>) -> MetricDiff {
         let mut diff : MetricDiff = TreeMap::new();
         for old.iter().advance |(k, vold)| {
@@ -790,7 +796,7 @@ impl MetricMap {
                     if delta.abs() < noise {
                         LikelyNoise
                     } else {
-                        let pct = delta.abs() / v.value * 100.0;
+                        let pct = delta.abs() / vold.value * 100.0;
                         if vold.noise < 0.0 {
                             // When 'noise' is negative, it means we want
                             // to see deltas that go up over time, and can
@@ -857,7 +863,7 @@ impl MetricMap {
             MetricMap::new()
         };
 
-        let diff : MetricDiff = self.compare_to_old(old, pct);
+        let diff : MetricDiff = self.compare_to_old(&old, pct);
         let ok = do diff.iter().all() |(_, v)| {
             match *v {
                 Regression(_) => false,
@@ -1006,13 +1012,16 @@ pub mod bench {
 mod tests {
     use test::{TrFailed, TrIgnored, TrOk, filter_tests, parse_opts,
                TestDesc, TestDescAndFn,
+               Metric, MetricMap, MetricAdded, MetricRemoved,
+               Improvement, Regression, LikelyNoise,
                StaticTestName, DynTestName, DynTestFn};
     use test::{TestOpts, run_test};
 
     use std::either;
     use std::comm::{stream, SharedChan};
-    use std::option;
     use std::vec;
+    use tempfile;
+    use std::os;
 
     #[test]
     pub fn do_not_run_ignored_tests() {
@@ -1208,4 +1217,95 @@ mod tests {
             }
         }
     }
+
+    #[test]
+    pub fn test_metricmap_compare() {
+        let mut m1 = MetricMap::new();
+        let mut m2 = MetricMap::new();
+        m1.insert_metric("in-both-noise", 1000.0, 200.0);
+        m2.insert_metric("in-both-noise", 1100.0, 200.0);
+
+        m1.insert_metric("in-first-noise", 1000.0, 2.0);
+        m2.insert_metric("in-second-noise", 1000.0, 2.0);
+
+        m1.insert_metric("in-both-want-downwards-but-regressed", 1000.0, 10.0);
+        m2.insert_metric("in-both-want-downwards-but-regressed", 2000.0, 10.0);
+
+        m1.insert_metric("in-both-want-downwards-and-improved", 2000.0, 10.0);
+        m2.insert_metric("in-both-want-downwards-and-improved", 1000.0, 10.0);
+
+        m1.insert_metric("in-both-want-upwards-but-regressed", 2000.0, -10.0);
+        m2.insert_metric("in-both-want-upwards-but-regressed", 1000.0, -10.0);
+
+        m1.insert_metric("in-both-want-upwards-and-improved", 1000.0, -10.0);
+        m2.insert_metric("in-both-want-upwards-and-improved", 2000.0, -10.0);
+
+        let diff1 = m2.compare_to_old(&m1, None);
+
+        assert_eq!(*(diff1.find(&~"in-both-noise").get()), LikelyNoise);
+        assert_eq!(*(diff1.find(&~"in-first-noise").get()), MetricRemoved);
+        assert_eq!(*(diff1.find(&~"in-second-noise").get()), MetricAdded);
+        assert_eq!(*(diff1.find(&~"in-both-want-downwards-but-regressed").get()), Regression(100.0));
+        assert_eq!(*(diff1.find(&~"in-both-want-downwards-and-improved").get()), Improvement(50.0));
+        assert_eq!(*(diff1.find(&~"in-both-want-upwards-but-regressed").get()), Regression(50.0));
+        assert_eq!(*(diff1.find(&~"in-both-want-upwards-and-improved").get()), Improvement(100.0));
+        assert_eq!(diff1.len(), 7);
+
+        let diff2 = m2.compare_to_old(&m1, Some(200.0));
+
+        assert_eq!(*(diff2.find(&~"in-both-noise").get()), LikelyNoise);
+        assert_eq!(*(diff2.find(&~"in-first-noise").get()), MetricRemoved);
+        assert_eq!(*(diff2.find(&~"in-second-noise").get()), MetricAdded);
+        assert_eq!(*(diff2.find(&~"in-both-want-downwards-but-regressed").get()), LikelyNoise);
+        assert_eq!(*(diff2.find(&~"in-both-want-downwards-and-improved").get()), LikelyNoise);
+        assert_eq!(*(diff2.find(&~"in-both-want-upwards-but-regressed").get()), LikelyNoise);
+        assert_eq!(*(diff2.find(&~"in-both-want-upwards-and-improved").get()), LikelyNoise);
+        assert_eq!(diff2.len(), 7);
+    }
+
+    pub fn ratchet_test() {
+
+        let dpth = tempfile::mkdtemp(&os::tmpdir(),
+                                     "test-ratchet").expect("missing test for ratchet");
+        let pth = dpth.push("ratchet.json");
+
+        let mut m1 = MetricMap::new();
+        m1.insert_metric("runtime", 1000.0, 2.0);
+        m1.insert_metric("throughput", 50.0, 2.0);
+
+        let mut m2 = MetricMap::new();
+        m2.insert_metric("runtime", 1100.0, 2.0);
+        m2.insert_metric("throughput", 50.0, 2.0);
+
+        m1.save(&pth);
+
+        // Ask for a ratchet that should fail to advance.
+        let (diff1, ok1) = m2.ratchet(&pth, None);
+        assert_eq!(ok1, false);
+        assert_eq!(diff1.len(), 2);
+        assert_eq!(*(diff1.find(&~"runtime").get()), Regression(10.0));
+        assert_eq!(*(diff1.find(&~"throughput").get()), LikelyNoise);
+
+        // Check that it was not rewritten.
+        let m3 = MetricMap::load(&pth);
+        assert_eq!(m3.len(), 2);
+        assert_eq!(*(m3.find(&~"runtime").get()), Metric { value: 1000.0, noise: 2.0 });
+        assert_eq!(*(m3.find(&~"throughput").get()), Metric { value: 50.0, noise: 2.0 });
+
+        // Ask for a ratchet with an explicit noise-percentage override,
+        // that should advance.
+        let (diff2, ok2) = m2.ratchet(&pth, Some(10.0));
+        assert_eq!(ok2, true);
+        assert_eq!(diff2.len(), 2);
+        assert_eq!(*(diff2.find(&~"runtime").get()), LikelyNoise);
+        assert_eq!(*(diff2.find(&~"throughput").get()), LikelyNoise);
+
+        // Check that it was rewritten.
+        let m4 = MetricMap::load(&pth);
+        assert_eq!(m4.len(), 2);
+        assert_eq!(*(m4.find(&~"runtime").get()), Metric { value: 1100.0, noise: 2.0 });
+        assert_eq!(*(m4.find(&~"throughput").get()), Metric { value: 50.0, noise: 2.0 });
+
+        os::remove_dir_recursive(&dpth);
+    }
 }

From f2f7fb3ae1f64517c0b30371e2457cac2e2c1248 Mon Sep 17 00:00:00 2001
From: Graydon Hoare <graydon@mozilla.com>
Date: Mon, 15 Jul 2013 18:50:32 -0700
Subject: [PATCH 2/7] extra: Add metrics functions to test.

---
 src/libextra/test.rs | 91 +++++++++++++++++++++++++++++++++++---------
 1 file changed, 72 insertions(+), 19 deletions(-)

diff --git a/src/libextra/test.rs b/src/libextra/test.rs
index 98338b4eaedfb..eef3df41b459e 100644
--- a/src/libextra/test.rs
+++ b/src/libextra/test.rs
@@ -64,7 +64,9 @@ impl ToStr for TestName {
 pub enum TestFn {
     StaticTestFn(extern fn()),
     StaticBenchFn(extern fn(&mut BenchHarness)),
+    StaticMetricFn(~fn(&mut MetricMap)),
     DynTestFn(~fn()),
+    DynMetricFn(~fn(&mut MetricMap)),
     DynBenchFn(~fn(&mut BenchHarness))
 }
 
@@ -95,6 +97,7 @@ pub struct Metric {
     noise: f64
 }
 
+#[deriving(Eq)]
 pub struct MetricMap(TreeMap<~str,Metric>);
 
 /// Analysis of a single change in metric
@@ -218,7 +221,13 @@ pub struct BenchSamples {
 }
 
 #[deriving(Eq)]
-pub enum TestResult { TrOk, TrFailed, TrIgnored, TrBench(BenchSamples) }
+pub enum TestResult {
+    TrOk,
+    TrFailed,
+    TrIgnored,
+    TrMetrics(MetricMap),
+    TrBench(BenchSamples)
+}
 
 struct ConsoleTestState {
     out: @io::Writer,
@@ -229,7 +238,7 @@ struct ConsoleTestState {
     passed: uint,
     failed: uint,
     ignored: uint,
-    benchmarked: uint,
+    measured: uint,
     metrics: MetricMap,
     failures: ~[TestDesc]
 }
@@ -261,7 +270,7 @@ impl ConsoleTestState {
             passed: 0u,
             failed: 0u,
             ignored: 0u,
-            benchmarked: 0u,
+            measured: 0u,
             metrics: MetricMap::new(),
             failures: ~[]
         }
@@ -279,11 +288,14 @@ impl ConsoleTestState {
         self.write_pretty("ignored", term::color::YELLOW);
     }
 
+    pub fn write_metric(&self) {
+        self.write_pretty("metric", term::color::CYAN);
+    }
+
     pub fn write_bench(&self) {
         self.write_pretty("bench", term::color::CYAN);
     }
 
-
     pub fn write_added(&self) {
         self.write_pretty("added", term::color::GREEN);
     }
@@ -332,6 +344,10 @@ impl ConsoleTestState {
             TrOk => self.write_ok(),
             TrFailed => self.write_failed(),
             TrIgnored => self.write_ignored(),
+            TrMetrics(ref mm) => {
+                self.write_metric();
+                self.out.write_str(": " + fmt_metrics(mm));
+            }
             TrBench(ref bs) => {
                 self.write_bench();
                 self.out.write_str(": " + fmt_bench_samples(bs))
@@ -349,6 +365,7 @@ impl ConsoleTestState {
                                         TrOk => ~"ok",
                                         TrFailed => ~"failed",
                                         TrIgnored => ~"ignored",
+                                        TrMetrics(ref mm) => fmt_metrics(mm),
                                         TrBench(ref bs) => fmt_bench_samples(bs)
                                     }, test.name.to_str()));
             }
@@ -416,7 +433,7 @@ impl ConsoleTestState {
     pub fn write_run_finish(&self,
                             ratchet_metrics: &Option<Path>,
                             ratchet_pct: Option<f64>) -> bool {
-        assert!(self.passed + self.failed + self.ignored + self.benchmarked == self.total);
+        assert!(self.passed + self.failed + self.ignored + self.measured == self.total);
 
         let ratchet_success = match *ratchet_metrics {
             None => true,
@@ -448,12 +465,23 @@ impl ConsoleTestState {
         } else {
             self.write_failed();
         }
-        self.out.write_str(fmt!(". %u passed; %u failed; %u ignored, %u benchmarked\n\n",
-                                self.passed, self.failed, self.ignored, self.benchmarked));
+        self.out.write_str(fmt!(". %u passed; %u failed; %u ignored; %u measured\n\n",
+                                self.passed, self.failed, self.ignored, self.measured));
         return success;
     }
 }
 
+pub fn fmt_metrics(mm: &MetricMap) -> ~str {
+    use std::iterator::IteratorUtil;
+    let v : ~[~str] = mm.iter()
+        .transform(|(k,v)| fmt!("%s: %f (+/- %f)",
+                                *k,
+                                v.value as float,
+                                v.noise as float))
+        .collect();
+    v.connect(", ")
+}
+
 pub fn fmt_bench_samples(bs: &BenchSamples) -> ~str {
     if bs.mb_s != 0 {
         fmt!("%u ns/iter (+/- %u) = %u MB/s",
@@ -481,11 +509,19 @@ pub fn run_tests_console(opts: &TestOpts,
                 match result {
                     TrOk => st.passed += 1,
                     TrIgnored => st.ignored += 1,
+                    TrMetrics(mm) => {
+                        let tname = test.name.to_str();
+                        for mm.iter().advance() |(k,v)| {
+                            st.metrics.insert_metric(tname + "." + *k,
+                                                     v.value, v.noise);
+                        }
+                        st.measured += 1
+                    }
                     TrBench(bs) => {
                         st.metrics.insert_metric(test.name.to_str(),
                                                  bs.ns_iter_summ.median,
                                                  bs.ns_iter_summ.max - bs.ns_iter_summ.min);
-                        st.benchmarked += 1
+                        st.measured += 1
                     }
                     TrFailed => {
                         st.failed += 1;
@@ -533,7 +569,7 @@ fn should_sort_failures_before_printing_them() {
             passed: 0u,
             failed: 0u,
             ignored: 0u,
-            benchmarked: 0u,
+            measured: 0u,
             metrics: MetricMap::new(),
             failures: ~[test_b, test_a]
         };
@@ -565,11 +601,11 @@ fn run_tests(opts: &TestOpts,
 
     callback(TeFiltered(filtered_descs));
 
-    let (filtered_tests, filtered_benchs) =
+    let (filtered_tests, filtered_benchs_and_metrics) =
         do filtered_tests.partition |e| {
         match e.testfn {
             StaticTestFn(_) | DynTestFn(_) => true,
-            StaticBenchFn(_) | DynBenchFn(_) => false
+            _ => false
         }
     };
 
@@ -607,7 +643,8 @@ fn run_tests(opts: &TestOpts,
     }
 
     // All benchmarks run at the end, in serial.
-    for filtered_benchs.consume_iter().advance |b| {
+    // (this includes metric fns)
+    for filtered_benchs_and_metrics.consume_iter().advance |b| {
         callback(TeWait(copy b.desc));
         run_test(!opts.run_benchmarks, b, ch.clone());
         let (test, result) = p.recv();
@@ -730,6 +767,18 @@ pub fn run_test(force_ignore: bool,
             monitor_ch.send((desc, TrBench(bs)));
             return;
         }
+        DynMetricFn(f) => {
+            let mut mm = MetricMap::new();
+            f(&mut mm);
+            monitor_ch.send((desc, TrMetrics(mm)));
+            return;
+        }
+        StaticMetricFn(f) => {
+            let mut mm = MetricMap::new();
+            f(&mut mm);
+            monitor_ch.send((desc, TrMetrics(mm)));
+            return;
+        }
         DynTestFn(f) => run_test_inner(desc, monitor_ch, f),
         StaticTestFn(f) => run_test_inner(desc, monitor_ch, || f())
     }
@@ -757,12 +806,12 @@ impl ToJson for Metric {
 
 impl MetricMap {
 
-    fn new() -> MetricMap {
+    pub fn new() -> MetricMap {
         MetricMap(TreeMap::new())
     }
 
     /// Load MetricDiff from a file.
-    fn load(p: &Path) -> MetricMap {
+    pub fn load(p: &Path) -> MetricMap {
         assert!(os::path_exists(p));
         let f = io::file_reader(p).get();
         let mut decoder = json::Decoder(json::from_reader(f).get());
@@ -793,7 +842,7 @@ impl MetricMap {
                         None => f64::max(vold.noise.abs(), v.noise.abs()),
                         Some(pct) => vold.value * pct / 100.0
                     };
-                    if delta.abs() < noise {
+                    if delta.abs() <= noise {
                         LikelyNoise
                     } else {
                         let pct = delta.abs() / vold.value * 100.0;
@@ -1245,10 +1294,14 @@ mod tests {
         assert_eq!(*(diff1.find(&~"in-both-noise").get()), LikelyNoise);
         assert_eq!(*(diff1.find(&~"in-first-noise").get()), MetricRemoved);
         assert_eq!(*(diff1.find(&~"in-second-noise").get()), MetricAdded);
-        assert_eq!(*(diff1.find(&~"in-both-want-downwards-but-regressed").get()), Regression(100.0));
-        assert_eq!(*(diff1.find(&~"in-both-want-downwards-and-improved").get()), Improvement(50.0));
-        assert_eq!(*(diff1.find(&~"in-both-want-upwards-but-regressed").get()), Regression(50.0));
-        assert_eq!(*(diff1.find(&~"in-both-want-upwards-and-improved").get()), Improvement(100.0));
+        assert_eq!(*(diff1.find(&~"in-both-want-downwards-but-regressed").get()),
+                   Regression(100.0));
+        assert_eq!(*(diff1.find(&~"in-both-want-downwards-and-improved").get()),
+                   Improvement(50.0));
+        assert_eq!(*(diff1.find(&~"in-both-want-upwards-but-regressed").get()),
+                   Regression(50.0));
+        assert_eq!(*(diff1.find(&~"in-both-want-upwards-and-improved").get()),
+                   Improvement(100.0));
         assert_eq!(diff1.len(), 7);
 
         let diff2 = m2.compare_to_old(&m1, Some(200.0));

From bc4c89c10a02abda1c2c83b7fcde756c84913e5f Mon Sep 17 00:00:00 2001
From: Graydon Hoare <graydon@mozilla.com>
Date: Mon, 15 Jul 2013 18:51:20 -0700
Subject: [PATCH 3/7] compiletest: Add support for metrics and ratchet modes.

---
 src/compiletest/common.rs      |  9 ++++++++
 src/compiletest/compiletest.rs | 40 ++++++++++++++++++++++++++++------
 src/compiletest/runtest.rs     | 28 ++++++++++++++++++++++--
 3 files changed, 68 insertions(+), 9 deletions(-)

diff --git a/src/compiletest/common.rs b/src/compiletest/common.rs
index df00286c87f25..4add16fd7a95b 100644
--- a/src/compiletest/common.rs
+++ b/src/compiletest/common.rs
@@ -58,6 +58,15 @@ pub struct config {
     // Write out a parseable log of tests that were run
     logfile: Option<Path>,
 
+    // Write out a json file containing any metrics of the run
+    save_metrics: Option<Path>,
+
+    // Write and ratchet a metrics file
+    ratchet_metrics: Option<Path>,
+
+    // Percent change in metrics to consider noise
+    ratchet_noise_percent: Option<f64>,
+
     // A command line to prefix program execution with,
     // for running under valgrind
     runtool: Option<~str>,
diff --git a/src/compiletest/compiletest.rs b/src/compiletest/compiletest.rs
index a411e714247ed..39dc55b44f4ca 100644
--- a/src/compiletest/compiletest.rs
+++ b/src/compiletest/compiletest.rs
@@ -17,6 +17,7 @@
 extern mod extra;
 
 use std::os;
+use std::f64;
 
 use extra::getopts;
 use extra::getopts::groups::{optopt, optflag, reqopt};
@@ -66,6 +67,10 @@ pub fn parse_config(args: ~[~str]) -> config {
           optopt("", "rustcflags", "flags to pass to rustc", "FLAGS"),
           optflag("", "verbose", "run tests verbosely, showing all output"),
           optopt("", "logfile", "file to log test execution to", "FILE"),
+          optopt("", "save-metrics", "file to save metrics to", "FILE"),
+          optopt("", "ratchet-metrics", "file to ratchet metrics against", "FILE"),
+          optopt("", "ratchet-noise-percent",
+                 "percent change in metrics to consider noise", "N"),
           optflag("", "jit", "run tests under the JIT"),
           optflag("", "newrt", "run tests on the new runtime / scheduler"),
           optopt("", "target", "the target to build for", "TARGET"),
@@ -116,6 +121,13 @@ pub fn parse_config(args: ~[~str]) -> config {
                  Some(copy matches.free[0])
              } else { None },
         logfile: getopts::opt_maybe_str(matches, "logfile").map(|s| Path(*s)),
+        save_metrics: getopts::opt_maybe_str(matches, "save-metrics").map(|s| Path(*s)),
+        ratchet_metrics:
+            getopts::opt_maybe_str(matches, "ratchet-metrics").map(|s| Path(*s)),
+        ratchet_noise_percent:
+            getopts::opt_maybe_str(matches,
+                                   "ratchet-noise-percent").map(|s|
+                                                                f64::from_str(*s).get()),
         runtool: getopts::opt_maybe_str(matches, "runtool"),
         rustcflags: getopts::opt_maybe_str(matches, "rustcflags"),
         jit: getopts::opt_present(matches, "jit"),
@@ -215,10 +227,10 @@ pub fn test_opts(config: &config) -> test::TestOpts {
         run_ignored: config.run_ignored,
         logfile: copy config.logfile,
         run_tests: true,
-        run_benchmarks: false,
-        ratchet_metrics: None,
-        ratchet_noise_percent: None,
-        save_metrics: None,
+        run_benchmarks: true,
+        ratchet_metrics: copy config.ratchet_metrics,
+        ratchet_noise_percent: copy config.ratchet_noise_percent,
+        save_metrics: copy config.save_metrics,
     }
 }
 
@@ -231,7 +243,13 @@ pub fn make_tests(config: &config) -> ~[test::TestDescAndFn] {
         let file = copy *file;
         debug!("inspecting file %s", file.to_str());
         if is_test(config, file) {
-            tests.push(make_test(config, file))
+            let t = do make_test(config, file) {
+                match config.mode {
+                    mode_codegen => make_metrics_test_closure(config, file),
+                    _ => make_test_closure(config, file)
+                }
+            };
+            tests.push(t)
         }
     }
     tests
@@ -260,14 +278,15 @@ pub fn is_test(config: &config, testfile: &Path) -> bool {
     return valid;
 }
 
-pub fn make_test(config: &config, testfile: &Path) -> test::TestDescAndFn {
+pub fn make_test(config: &config, testfile: &Path,
+                 f: &fn()->test::TestFn) -> test::TestDescAndFn {
     test::TestDescAndFn {
         desc: test::TestDesc {
             name: make_test_name(config, testfile),
             ignore: header::is_test_ignored(config, testfile),
             should_fail: false
         },
-        testfn: make_test_closure(config, testfile),
+        testfn: f(),
     }
 }
 
@@ -291,3 +310,10 @@ pub fn make_test_closure(config: &config, testfile: &Path) -> test::TestFn {
     let testfile = Cell::new(testfile.to_str());
     test::DynTestFn(|| { runtest::run(config.take(), testfile.take()) })
 }
+
+pub fn make_metrics_test_closure(config: &config, testfile: &Path) -> test::TestFn {
+    use std::cell::Cell;
+    let config = Cell::new(copy *config);
+    let testfile = Cell::new(testfile.to_str());
+    test::DynMetricFn(|mm| { runtest::run_metrics(config.take(), testfile.take(), mm) })
+}
diff --git a/src/compiletest/runtest.rs b/src/compiletest/runtest.rs
index dee07c6de495d..a51ab8208566d 100644
--- a/src/compiletest/runtest.rs
+++ b/src/compiletest/runtest.rs
@@ -25,7 +25,14 @@ use std::os;
 use std::uint;
 use std::vec;
 
+use extra::test::MetricMap;
+
 pub fn run(config: config, testfile: ~str) {
+    let mut _mm = MetricMap::new();
+    run_metrics(config, testfile, &mut _mm);
+}
+
+pub fn run_metrics(config: config, testfile: ~str, mm: &mut MetricMap) {
     if config.verbose {
         // We're going to be dumping a lot of info. Start on a new line.
         io::stdout().write_str("\n\n");
@@ -40,7 +47,7 @@ pub fn run(config: config, testfile: ~str) {
       mode_run_pass => run_rpass_test(&config, &props, &testfile),
       mode_pretty => run_pretty_test(&config, &props, &testfile),
       mode_debug_info => run_debuginfo_test(&config, &props, &testfile),
-      mode_codegen => run_codegen_test(&config, &props, &testfile)
+      mode_codegen => run_codegen_test(&config, &props, &testfile, mm)
     }
 }
 
@@ -906,7 +913,14 @@ fn disassemble_extract(config: &config, _props: &TestProps,
 }
 
 
-fn run_codegen_test(config: &config, props: &TestProps, testfile: &Path) {
+fn count_extracted_lines(p: &Path) -> uint {
+    let x = io::read_whole_file_str(&p.with_filetype("ll")).get();
+    x.line_iter().len_()
+}
+
+
+fn run_codegen_test(config: &config, props: &TestProps,
+                    testfile: &Path, mm: &mut MetricMap) {
 
     if config.llvm_bin_path.is_none() {
         fatal(~"missing --llvm-bin-path");
@@ -947,7 +961,17 @@ fn run_codegen_test(config: &config, props: &TestProps, testfile: &Path) {
         fatal_ProcRes(~"disassembling extract failed", &ProcRes);
     }
 
+    let base = output_base_name(config, testfile);
+    let base_extract = append_suffix_to_stem(&base, "extract");
+
+    let base_clang = append_suffix_to_stem(&base, "clang");
+    let base_clang_extract = append_suffix_to_stem(&base_clang, "extract");
 
+    let base_lines = count_extracted_lines(&base_extract);
+    let clang_lines = count_extracted_lines(&base_clang_extract);
 
+    mm.insert_metric("clang-codegen-ratio",
+                     (base_lines as f64) / (clang_lines as f64),
+                     0.001);
 }
 

From cdce33a421c793b8969ad521388f79e738a54d65 Mon Sep 17 00:00:00 2001
From: Graydon Hoare <graydon@mozilla.com>
Date: Mon, 15 Jul 2013 18:52:08 -0700
Subject: [PATCH 4/7] Add configure and make machinery to activate perf metrics
 and ratchets.

---
 configure   |  1 +
 mk/tests.mk | 19 +++++++++++++++++++
 2 files changed, 20 insertions(+)

diff --git a/configure b/configure
index 8f757a0715f8c..7138c395513cc 100755
--- a/configure
+++ b/configure
@@ -372,6 +372,7 @@ opt optimize 1 "build optimized rust code"
 opt optimize-cxx 1 "build optimized C++ code"
 opt optimize-llvm 1 "build optimized LLVM"
 opt debug 0 "build with extra debug fun"
+opt ratchet-bench 0 "ratchet benchmarks"
 opt fast-make 0 "use .gitmodules as timestamp for submodule deps"
 opt manage-submodules 1 "let the build manage the git submodules"
 opt mingw-cross 0 "cross-compile for win32 using mingw"
diff --git a/mk/tests.mk b/mk/tests.mk
index 7a5a5dc15c30e..625e2ea14619e 100644
--- a/mk/tests.mk
+++ b/mk/tests.mk
@@ -60,6 +60,21 @@ endif
 TEST_LOG_FILE=tmp/check-stage$(1)-T-$(2)-H-$(3)-$(4).log
 TEST_OK_FILE=tmp/check-stage$(1)-T-$(2)-H-$(3)-$(4).ok
 
+TEST_RATCHET_FILE=tmp/check-stage$(1)-T-$(2)-H-$(3)-$(4)-metrics.json
+TEST_RATCHET_NOISE_PERCENT=10.0
+
+# Whether to ratchet or merely save benchmarks
+ifdef CFG_RATCHET_BENCH
+CRATE_TEST_BENCH_ARGS=\
+  --test --bench \
+  --ratchet-metrics $(call TEST_RATCHET_FILE,$(1),$(2),$(3),$(4)) \
+  --ratchet-noise-percent $(TEST_RATCHET_NOISE_PERCENT)
+else
+CRATE_TEST_BENCH_ARGS=\
+  --test --bench \
+  --save-metrics $(call TEST_RATCHET_FILE,$(1),$(2),$(3),$(4))
+endif
+
 define DEF_TARGET_COMMANDS
 
 ifdef CFG_UNIXY_$(1)
@@ -359,11 +374,14 @@ $(foreach host,$(CFG_HOST_TRIPLES), \
 define DEF_TEST_CRATE_RULES
 check-stage$(1)-T-$(2)-H-$(3)-$(4)-exec: $$(call TEST_OK_FILE,$(1),$(2),$(3),$(4))
 
+check-stage$(1)-T-$(2)-H-$(3)-$(4)-exec: $$(call TEST_OK_FILE,$(1),$(2),$(3),$(4))
+
 $$(call TEST_OK_FILE,$(1),$(2),$(3),$(4)): \
 		$(3)/stage$(1)/test/$(4)test-$(2)$$(X_$(2))
 	@$$(call E, run: $$<)
 	$$(Q)$$(call CFG_RUN_TEST_$(2),$$<,$(2),$(3)) $$(TESTARGS)	\
 	--logfile $$(call TEST_LOG_FILE,$(1),$(2),$(3),$(4)) \
+	$$(call CRATE_TEST_BENCH_ARGS,$(1),$(2),$(3),$(4)) \
 	&& touch $$@
 endef
 
@@ -552,6 +570,7 @@ CTEST_ARGS$(1)-T-$(2)-H-$(3)-$(4) := \
         $$(CTEST_COMMON_ARGS$(1)-T-$(2)-H-$(3))	\
         --src-base $$(S)src/test/$$(CTEST_SRC_BASE_$(4))/ \
         --build-base $(3)/test/$$(CTEST_BUILD_BASE_$(4))/ \
+        --ratchet-metrics $(call TEST_RATCHET_FILE,$(1),$(2),$(3),$(4)) \
         --mode $$(CTEST_MODE_$(4)) \
 	$$(CTEST_RUNTOOL_$(4))
 

From 8e58a272cc440ed90b15043125138e8f1aabba98 Mon Sep 17 00:00:00 2001
From: Graydon Hoare <graydon@mozilla.com>
Date: Mon, 15 Jul 2013 20:34:11 -0700
Subject: [PATCH 5/7] extra: reduce bench loop max time to 3s.

---
 src/libextra/test.rs | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/src/libextra/test.rs b/src/libextra/test.rs
index eef3df41b459e..cebf518f09b02 100644
--- a/src/libextra/test.rs
+++ b/src/libextra/test.rs
@@ -1019,8 +1019,8 @@ impl BenchHarness {
             }
 
             total_run += loop_run;
-            // Longest we ever run for is 10s.
-            if total_run > 10_000_000_000 {
+            // Longest we ever run for is 3s.
+            if total_run > 3_000_000_000 {
                 return summ5;
             }
 

From 4bf6b84a4d23606394fb7176b4f2f216dff06e5b Mon Sep 17 00:00:00 2001
From: Graydon Hoare <graydon@mozilla.com>
Date: Wed, 17 Jul 2013 11:52:21 -0700
Subject: [PATCH 6/7] make: turn off --bench when running under valgrind

---
 mk/tests.mk | 7 +++++--
 1 file changed, 5 insertions(+), 2 deletions(-)

diff --git a/mk/tests.mk b/mk/tests.mk
index 625e2ea14619e..770e72804913a 100644
--- a/mk/tests.mk
+++ b/mk/tests.mk
@@ -34,9 +34,12 @@ ifdef CHECK_XFAILS
   TESTARGS += --ignored
 endif
 
+CTEST_BENCH = --bench
+
 # Arguments to the cfail/rfail/rpass/bench tests
 ifdef CFG_VALGRIND
   CTEST_RUNTOOL = --runtool "$(CFG_VALGRIND)"
+  CTEST_BENCH =
 endif
 
 # Arguments to the perf tests
@@ -66,12 +69,12 @@ TEST_RATCHET_NOISE_PERCENT=10.0
 # Whether to ratchet or merely save benchmarks
 ifdef CFG_RATCHET_BENCH
 CRATE_TEST_BENCH_ARGS=\
-  --test --bench \
+  --test $(CTEST_BENCH) \
   --ratchet-metrics $(call TEST_RATCHET_FILE,$(1),$(2),$(3),$(4)) \
   --ratchet-noise-percent $(TEST_RATCHET_NOISE_PERCENT)
 else
 CRATE_TEST_BENCH_ARGS=\
-  --test --bench \
+  --test $(CTEST_BENCH) \
   --save-metrics $(call TEST_RATCHET_FILE,$(1),$(2),$(3),$(4))
 endif
 

From 6d78a367b1f5721624c7f8b66b1796303f0b6f45 Mon Sep 17 00:00:00 2001
From: Graydon Hoare <graydon@mozilla.com>
Date: Wed, 17 Jul 2013 12:28:48 -0700
Subject: [PATCH 7/7] extra: avoid possible divide-by-zero conditions test.

---
 src/libextra/test.rs | 9 +++++----
 1 file changed, 5 insertions(+), 4 deletions(-)

diff --git a/src/libextra/test.rs b/src/libextra/test.rs
index cebf518f09b02..deef1fc36138d 100644
--- a/src/libextra/test.rs
+++ b/src/libextra/test.rs
@@ -845,7 +845,7 @@ impl MetricMap {
                     if delta.abs() <= noise {
                         LikelyNoise
                     } else {
-                        let pct = delta.abs() / vold.value * 100.0;
+                        let pct = delta.abs() / (vold.value).max(&f64::epsilon) * 100.0;
                         if vold.noise < 0.0 {
                             // When 'noise' is negative, it means we want
                             // to see deltas that go up over time, and can
@@ -954,7 +954,7 @@ impl BenchHarness {
         if self.iterations == 0 {
             0
         } else {
-            self.ns_elapsed() / self.iterations
+            self.ns_elapsed() / self.iterations.max(&1)
         }
     }
 
@@ -977,7 +977,7 @@ impl BenchHarness {
         if self.ns_per_iter() == 0 {
             n = 1_000_000;
         } else {
-            n = 1_000_000 / self.ns_per_iter();
+            n = 1_000_000 / self.ns_per_iter().max(&1);
         }
 
         let mut total_run = 0;
@@ -1047,7 +1047,8 @@ pub mod bench {
 
         let ns_iter_summ = bs.auto_bench(f);
 
-        let iter_s = 1_000_000_000 / (ns_iter_summ.median as u64);
+        let ns_iter = (ns_iter_summ.median as u64).max(&1);
+        let iter_s = 1_000_000_000 / ns_iter;
         let mb_s = (bs.bytes * iter_s) / 1_000_000;
 
         BenchSamples {