Fix EINTR forever
diff --git a/tools/profiling/microbenchmarks/bm_diff.py b/tools/profiling/microbenchmarks/bm_diff.py
index ba0c225..f1b6ef1 100755
--- a/tools/profiling/microbenchmarks/bm_diff.py
+++ b/tools/profiling/microbenchmarks/bm_diff.py
@@ -192,52 +192,59 @@
     return [self.final[f] if f in self.final else '' for f in flds]
 
 
-def read_file(filename):
+def eintr_be_gone(fn):
+  """Run fn until it doesn't stop because of EINTR"""
   while True:
     try:
-      with open(filename) as f:
-        return f.read()
+      return fn()
     except IOError, e:
       if e.errno != errno.EINTR:
         raise
 
+
 def read_json(filename):
-  return json.loads(read_file(filename))
+  with open(filename) as f: return json.loads(f.read())
 
-benchmarks = collections.defaultdict(Benchmark)
 
-for bm in args.benchmarks:
-  for loop in range(0, args.loops):
-    js_new_ctr = read_json('%s.counters.new.%d.json' % (bm, loop))
-    js_new_opt = read_json('%s.opt.new.%d.json' % (bm, loop))
-    js_old_ctr = read_json('%s.counters.old.%d.json' % (bm, loop))
-    js_old_opt = read_json('%s.opt.old.%d.json' % (bm, loop))
+def finalize():
+  benchmarks = collections.defaultdict(Benchmark)
 
-    for row in bm_json.expand_json(js_new_ctr, js_new_opt):
-      print row
-      name = row['cpp_name']
-      if name.endswith('_mean') or name.endswith('_stddev'): continue
-      benchmarks[name].add_sample(row, True)
-    for row in bm_json.expand_json(js_old_ctr, js_old_opt):
-      print row
-      name = row['cpp_name']
-      if name.endswith('_mean') or name.endswith('_stddev'): continue
-      benchmarks[name].add_sample(row, False)
+  for bm in args.benchmarks:
+    for loop in range(0, args.loops):
+      js_new_ctr = read_json('%s.counters.new.%d.json' % (bm, loop))
+      js_new_opt = read_json('%s.opt.new.%d.json' % (bm, loop))
+      js_old_ctr = read_json('%s.counters.old.%d.json' % (bm, loop))
+      js_old_opt = read_json('%s.opt.old.%d.json' % (bm, loop))
 
-really_interesting = set()
-for name, bm in benchmarks.items():
-  print name
-  really_interesting.update(bm.process())
-fields = [f for f in args.track if f in really_interesting]
+      for row in bm_json.expand_json(js_new_ctr, js_new_opt):
+        print row
+        name = row['cpp_name']
+        if name.endswith('_mean') or name.endswith('_stddev'): continue
+        benchmarks[name].add_sample(row, True)
+      for row in bm_json.expand_json(js_old_ctr, js_old_opt):
+        print row
+        name = row['cpp_name']
+        if name.endswith('_mean') or name.endswith('_stddev'): continue
+        benchmarks[name].add_sample(row, False)
 
-headers = ['Benchmark'] + fields
-rows = []
-for name in sorted(benchmarks.keys()):
-  if benchmarks[name].skip(): continue
-  rows.append([name] + benchmarks[name].row(fields))
-if rows:
-  text = 'Performance differences noted:\n' + tabulate.tabulate(rows, headers=headers, floatfmt='+.2f')
-else:
-  text = 'No significant performance differences'
-comment_on_pr.comment_on_pr('```\n%s\n```' % text)
-print text
+  really_interesting = set()
+  for name, bm in benchmarks.items():
+    print name
+    really_interesting.update(bm.process())
+  fields = [f for f in args.track if f in really_interesting]
+
+  headers = ['Benchmark'] + fields
+  rows = []
+  for name in sorted(benchmarks.keys()):
+    if benchmarks[name].skip(): continue
+    rows.append([name] + benchmarks[name].row(fields))
+  if rows:
+    text = 'Performance differences noted:\n' + tabulate.tabulate(rows, headers=headers, floatfmt='+.2f')
+  else:
+    text = 'No significant performance differences'
+  comment_on_pr.comment_on_pr('```\n%s\n```' % text)
+  print text
+
+
+eintr_be_gone(finalize)
+