Add more interesting metrics
diff --git a/tools/profiling/microbenchmarks/bm_diff.py b/tools/profiling/microbenchmarks/bm_diff.py
index 4dbcc76..c9e7213 100755
--- a/tools/profiling/microbenchmarks/bm_diff.py
+++ b/tools/profiling/microbenchmarks/bm_diff.py
@@ -25,8 +25,13 @@
   return lambda n, o: abs((n-o)/o - 1) > pct/100
 
 _INTERESTING = (
-  ('cpu_time', min_change(5)),
-  ('real_time', min_change(5)),
+  ('cpu_time', min_change(10)),
+  ('real_time', min_change(10)),
+  ('locks_per_iteration', min_change(5)),
+  ('allocs_per_iteration', min_change(5)),
+  ('writes_per_iteration', min_change(5)),
+  ('atm_cas_per_iteration', min_change(1)),
+  ('atm_add_per_iteration', min_change(5)),
 )
 
 for bm in sorted(new.keys()):
@@ -44,4 +49,3 @@
         hdr = True
       print '   %s changed %r --> %r' % (fld, o[fld], n[fld])
   sys.exit(0)
-
diff --git a/tools/run_tests/run_microbenchmark.py b/tools/run_tests/run_microbenchmark.py
index 137e0be..f41bde1 100755
--- a/tools/run_tests/run_microbenchmark.py
+++ b/tools/run_tests/run_microbenchmark.py
@@ -255,10 +255,10 @@
       subprocess.check_call(['git', 'checkout', where_am_i])
     for bm_name in comparables:
       diff = subprocess.check_output(['tools/profiling/microbenchmarks/bm_diff.py',
-                                      '%s.opt.json' % bm_name,
                                       '%s.counters.json' % bm_name,
-                                      '%s.old.opt.json' % bm_name,
-                                      '%s.old.counters.json' % bm_name]).strip()
+                                      '%s.opt.json' % bm_name,
+                                      '%s.old.counters.json' % bm_name,
+                                      '%s.old.opt.json' % bm_name]).strip()
       if diff:
         heading('Performance diff: %s' % bm_name)
         text(diff)