graphite: Separate out configuration from the statsd classes.

The new version of the statsd classes should be created using an instance of
the new Statsd class which sets up some defaults without having to specify
them over and over. This makes it essentially compatible with the existing
usage in autotest, but will allow chromite to configure things differently and
avoid having side effects from importing the module or global state.

BUG=chromium:446291
TEST=Ran unit tests, ran stats_es_functionaltest.py, ran the
stats_mock_unittest, ran a butterfly-paladin tryjob with --hwtest, testing by
fdeng.
DEPLOY=apache,scheduler,host-scheduler

Change-Id: I1071813db197c0e5e035b4d8db615030386f1c1c
Reviewed-on: https://chromium-review.googlesource.com/246428
Reviewed-by: Fang Deng <fdeng@chromium.org>
Reviewed-by: Dan Shi <dshi@chromium.org>
Commit-Queue: Gabe Black <gabeblack@chromium.org>
Tested-by: Gabe Black <gabeblack@chromium.org>
diff --git a/client/common_lib/cros/dev_server.py b/client/common_lib/cros/dev_server.py
index 8e08354..9ed4335 100644
--- a/client/common_lib/cros/dev_server.py
+++ b/client/common_lib/cros/dev_server.py
@@ -18,7 +18,7 @@
 from autotest_lib.client.common_lib import global_config
 from autotest_lib.client.common_lib import utils
 from autotest_lib.client.common_lib.cros import retry
-from autotest_lib.client.common_lib.cros.graphite import stats
+from autotest_lib.client.common_lib.cros.graphite import autotest_stats
 # TODO(cmasone): redo this class using requests module; http://crosbug.com/30107
 
 
@@ -238,7 +238,7 @@
         try:
             result_dict = json.load(cStringIO.StringIO(make_call()))
             free_disk = result_dict['free_disk']
-            stats.Gauge(server_name).send('free_disk', free_disk)
+            autotest_stats.Gauge(server_name).send('free_disk', free_disk)
 
             skip_devserver_health_check = CONFIG.get_config_value('CROS',
                                               'skip_devserver_health_check',
@@ -248,19 +248,21 @@
             elif (free_disk < DevServer._MIN_FREE_DISK_SPACE_GB):
                 logging.error('Devserver check_health failed. Free disk space '
                               'is low. Only %dGB is available.', free_disk)
-                stats.Counter(server_name +
-                              '.devserver_not_healthy').increment()
+                autotest_stats.Counter(server_name +
+                                       '.devserver_not_healthy').increment()
                 return False
 
             # This counter indicates the load of a devserver. By comparing the
             # value of this counter for all devservers, we can evaluate the
             # load balancing across all devservers.
-            stats.Counter(server_name + '.devserver_healthy').increment()
+            autotest_stats.Counter(server_name +
+                                   '.devserver_healthy').increment()
             return True
         except Exception as e:
             logging.error('Devserver call failed: "%s", timeout: %s seconds,'
                           ' Error: %s', call, timeout_min * 60, e)
-            stats.Counter(server_name + '.devserver_not_healthy').increment()
+            autotest_stats.Counter(server_name +
+                                   '.devserver_not_healthy').increment()
             return False
 
 
@@ -358,8 +360,8 @@
             logging.warning("Can't 'import requests' to connect to dev server.")
             return ''
 
-        stats.Counter('CrashServer.symbolicate_dump').increment()
-        timer = stats.Timer('CrashServer.symbolicate_dump')
+        autotest_stats.Counter('CrashServer.symbolicate_dump').increment()
+        timer = autotest_stats.Timer('CrashServer.symbolicate_dump')
         timer.start()
         # Symbolicate minidump.
         call = self.build_call('symbolicate_dump',
@@ -616,8 +618,8 @@
                     'stage_artifacts_count', server_name, artifacts)
             metadata = self.create_metadata(server_name, image, artifacts,
                                             files)
-            stats.Counter(counter_key, metadata=metadata).increment()
-            timer = stats.Timer(timer_key, metadata=metadata)
+            autotest_stats.Counter(counter_key, metadata=metadata).increment()
+            timer = autotest_stats.Timer(timer_key, metadata=metadata)
             timer.start()
         try:
             self.call_and_wait(call_name='stage',
@@ -633,7 +635,8 @@
             if artifacts:
                 timeout_key = self.create_stats_str(
                         'stage_artifacts_timeout', server_name, artifacts)
-                stats.Counter(timeout_key, metadata=metadata).increment()
+                autotest_stats.Counter(timeout_key,
+                                       metadata=metadata).increment()
             raise DevServerException(
                     'stage_artifacts timed out: %s' % staging_info)
 
@@ -686,7 +689,7 @@
         counter_key = self.create_stats_str(
                     'trigger_download_count', server_name, artifacts_list)
         metadata = self.create_metadata(server_name, image, artifacts_list)
-        stats.Counter(counter_key, metadata=metadata).increment()
+        autotest_stats.Counter(counter_key, metadata=metadata).increment()
         try:
             response = self.call_and_wait(call_name='stage',
                                           archive_url=archive_url,
@@ -698,7 +701,7 @@
             logging.error('trigger_download timed out for %s.', image)
             timeout_key = self.create_stats_str(
                     'trigger_download_timeout', server_name, artifacts_list)
-            stats.Counter(timeout_key, metadata=metadata).increment()
+            autotest_stats.Counter(timeout_key, metadata=metadata).increment()
             raise DevServerException(
                     'trigger_download timed out for %s.' % image)
         was_successful = response == 'Success'
@@ -759,7 +762,7 @@
             timeout_key = self.create_stats_str(
                     'finish_download_timeout', server_name, artifacts_list)
             metadata = self.create_metadata(server_name, image, artifacts_list)
-            stats.Counter(timeout_key, metadata=metadata).increment()
+            autotest_stats.Counter(timeout_key, metadata=metadata).increment()
             raise DevServerException(
                     'finish_download timed out for %s.' % image)
 
diff --git a/client/common_lib/cros/graphite/autotest_stats.py b/client/common_lib/cros/graphite/autotest_stats.py
new file mode 100644
index 0000000..ae63d54
--- /dev/null
+++ b/client/common_lib/cros/graphite/autotest_stats.py
@@ -0,0 +1,62 @@
+# Copyright 2015 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import common
+from autotest_lib.client.common_lib import global_config
+from autotest_lib.client.common_lib.cros.graphite import autotest_es
+from autotest_lib.client.common_lib.cros.graphite import es_utils
+from autotest_lib.client.common_lib.cros.graphite import stats
+
+
+# Pylint locally complains about "No value passed for parameter 'key'" here
+# pylint: disable=E1120
+# If one has their hostname listed including a domain, ie. |milleral.mtv|,
+# then this will show up on Graphite as milleral/mtv/<stats>.  This seems
+# silly, so let's replace '.'s with '_'s to disambiguate Graphite folders
+# from FQDN hostnames.
+AUTOTEST_SERVER = global_config.global_config.get_config_value(
+        'SERVER', 'hostname', default='localhost').replace('.', '_')
+STATSD_SERVER = global_config.global_config.get_config_value('CROS',
+        'STATSD_SERVER', default='')
+STATSD_PORT = global_config.global_config.get_config_value('CROS',
+        'STATSD_PORT', type=int, default=0)
+
+_default_es = es_utils.ESMetadata(use_http=autotest_es.ES_USE_HTTP,
+                                  host=autotest_es.METADATA_ES_SERVER,
+                                  port=autotest_es.ES_PORT,
+                                  index=autotest_es.INDEX_METADATA,
+                                  udp_port=autotest_es.ES_UDP_PORT)
+_statsd = stats.Statsd(es=_default_es, host=STATSD_SERVER, port=STATSD_PORT,
+                       prefix=AUTOTEST_SERVER)
+
+
+def _es_init(original):
+    class _Derived(original):
+        def __init__(self, *args, **kwargs):
+            es = kwargs.pop('es', None)
+            super(_Derived, self).__init__(*args, **kwargs)
+            if es:
+                self.es = es
+    return _Derived
+
+
+@_es_init
+class Average(_statsd.Average):
+    """Wrapper around _statsd.Average"""
+
+@_es_init
+class Counter(_statsd.Counter):
+    """Wrapper around _statsd.Counter"""
+
+@_es_init
+class Gauge(_statsd.Gauge):
+    """Wrapper around _statsd.Gauge"""
+
+@_es_init
+class Timer(_statsd.Timer):
+    """Wrapper around _statsd.Timer"""
+
+@_es_init
+class Raw(_statsd.Raw):
+    """Wrapper around _statd.Raw"""
diff --git a/client/common_lib/cros/graphite/es_test_utils.py b/client/common_lib/cros/graphite/es_test_utils.py
index 42154b1..6d2dafd 100644
--- a/client/common_lib/cros/graphite/es_test_utils.py
+++ b/client/common_lib/cros/graphite/es_test_utils.py
@@ -13,16 +13,16 @@
 import elasticsearch
 
 from autotest_lib.client.common_lib.cros.graphite import es_utils
-from autotest_lib.client.common_lib.cros.graphite import stats
+from autotest_lib.client.common_lib.cros.graphite import autotest_stats
 
 
 # Defines methods in the stats class that can take in metadata.
 TARGET_TO_STATS_CLASS = {
-    'timer': stats.Timer,
-    'gauge': stats.Gauge,
-    'raw': stats.Raw,
-    'average': stats.Average,
-    'counter': stats.Counter,
+    'timer': autotest_stats.Timer,
+    'gauge': autotest_stats.Gauge,
+    'raw': autotest_stats.Raw,
+    'average': autotest_stats.Average,
+    'counter': autotest_stats.Counter,
 }
 
 # Maps target type to method to trigger sending of metadata.
diff --git a/client/common_lib/cros/graphite/stats.py b/client/common_lib/cros/graphite/stats.py
index 56ff289..b3b426e 100644
--- a/client/common_lib/cros/graphite/stats.py
+++ b/client/common_lib/cros/graphite/stats.py
@@ -6,10 +6,6 @@
 
 import logging
 
-import common
-from autotest_lib.client.common_lib import global_config
-from autotest_lib.client.common_lib.cros.graphite import autotest_es
-from autotest_lib.client.common_lib.cros.graphite import es_utils
 
 try:
     import statsd
@@ -17,188 +13,175 @@
     import statsd_mock as statsd
 
 
-# Pylint locally complains about "No value passed for parameter 'key'" here
-# pylint: disable=E1120
-# If one has their hostname listed including a domain, ie. |milleral.mtv|,
-# then this will show up on Graphite as milleral/mtv/<stats>.  This seems
-# silly, so let's replace '.'s with '_'s to disambiguate Graphite folders
-# from FQDN hostnames.
-AUTOTEST_SERVER = global_config.global_config.get_config_value(
-        'SERVER', 'hostname', default='localhost').replace('.', '_')
-STATSD_SERVER = global_config.global_config.get_config_value('CROS',
-        'STATSD_SERVER', default='')
-STATSD_PORT = global_config.global_config.get_config_value('CROS',
-        'STATSD_PORT', type=int, default=0)
-
 # This is _type for all metadata logged to elasticsearch from here.
 STATS_ES_TYPE = 'stats_metadata'
 
 
-def _prepend_server(name, bare=False):
-    """
-    Since many people run their own local AFE, stats from a local setup
-    shouldn't get mixed into stats from prod.  Therefore, this function
-    exists to prepend the name of the local server to the stats if |name|
-    doesn't start with the server name, so that each person has their own
-    "folder" of stats that they can look at.
-
-    However, this functionality might not always be wanted, so we allow
-    one to pass in |bare=True| to force us to not prepend the local
-    server name. (I'm not sure when one would use this, but I don't see why
-    I should disallow it...)
-
-    >>> AUTOTEST_SERVER = 'potato_nyc'
-    >>> _prepend_server('rpc.create_job', bare=False)
-    'potato_nyc.rpc.create_job'
-    >>> _prepend_server('rpc.create_job', bare=True)
-    'rpc.create_job'
-
-    @param name The name to append to the server name if it doesn't start
-                with the server name.
-    @param bare If True, |name| will be returned un-altered.
-    @return A string to use as the stat name.
-
-    """
-    if not bare and not name.startswith(AUTOTEST_SERVER):
-        name = '%s.%s' % (AUTOTEST_SERVER, name)
-    return name
-
-
 # statsd logs details about what its sending at the DEBUG level, which I really
 # don't want to see tons of stats in logs, so all of these are silenced by
 # setting the logging level for all of statsdto WARNING.
 logging.getLogger('statsd').setLevel(logging.WARNING)
 
 
-# In case someone uses statsd off of site-packages instead of here
-# let's still override the defaults in case one starts using clients
-# from statsd instead of from here.  It can't hurt?
-statsd.Connection.set_defaults(host=STATSD_SERVER, port=STATSD_PORT)
+def _prepend_init(_es, _conn, _prefix):
+    def wrapper(original):
+        """Decorator to override __init__."""
+
+        class _Derived(original):
+            def __init__(self, name, connection=None, bare=False,
+                         metadata=None):
+                name = self._add_prefix(name, _prefix, bare)
+                conn = connection if connection else _conn
+                super(_Derived, self).__init__(name, conn)
+                self.metadata = metadata
+                self.es = _es
+
+            def _add_prefix(self, name, prefix, bare=False):
+                """
+                Since many people run their own local AFE, stats from a local
+                setup shouldn't get mixed into stats from prod.  Therefore,
+                this function exists to add a prefix, nominally the name of
+                the local server, if |name| doesn't already start with the
+                server name, so that each person has their own "folder" of
+                stats that they can look at.
+
+                However, this functionality might not always be wanted, so we
+                allow one to pass in |bare=True| to force us to not prepend
+                the local server name. (I'm not sure when one would use this,
+                but I don't see why I should disallow it...)
+
+                >>> prefix = 'potato_nyc'
+                >>> _add_prefix('rpc.create_job', bare=False)
+                'potato_nyc.rpc.create_job'
+                >>> _add_prefix('rpc.create_job', bare=True)
+                'rpc.create_job'
+
+                @param name The name to append to the server name if it
+                            doesn't start with the server name.
+                @param bare If True, |name| will be returned un-altered.
+                @return A string to use as the stat name.
+
+                """
+                if not bare and not name.startswith(prefix):
+                    name = '%s.%s' % (prefix, name)
+                return name
+
+        return _Derived
+    return wrapper
 
 
-# This is the connection that we're going to reuse for every client that gets
-# created.  This should maximally reduce overhead of stats logging.
-_conn = statsd.Connection(host=STATSD_SERVER, port=STATSD_PORT)
+class Statsd(object):
+    def __init__(self, es, host, port, prefix):
+        # This is the connection that we're going to reuse for every client
+        # that gets created. This should maximally reduce overhead of stats
+        # logging.
+        self.conn = statsd.Connection(host=host, port=port)
+
+        @_prepend_init(es, self.conn, prefix)
+        class Average(statsd.Average):
+            """Wrapper around statsd.Average."""
+
+            def send(self, subname, value):
+                """Sends time-series data to graphite and metadata (if any)
+                to es.
+
+                @param subname: The subname to report the data to (i.e.
+                    'daisy.reboot')
+                @param value: Value to be sent.
+                """
+                statsd.Average.send(self, subname, value)
+                self.es.post(type_str=STATS_ES_TYPE, metadata=self.metadata,
+                             subname=subname, value=value)
+
+        self.Average = Average
+
+        @_prepend_init(es, self.conn, prefix)
+        class Counter(statsd.Counter):
+            """Wrapper around statsd.Counter."""
+
+            def _send(self, subname, value):
+                """Sends time-series data to graphite and metadata (if any)
+                to es.
+
+                @param subname: The subname to report the data to (i.e.
+                    'daisy.reboot')
+                @param value: Value to be sent.
+                """
+                statsd.Counter._send(self, subname, value)
+                self.es.post(type_str=STATS_ES_TYPE, metadata=self.metadata,
+                             subname=subname, value=value)
+
+        self.Counter = Counter
+
+        @_prepend_init(es, self.conn, prefix)
+        class Gauge(statsd.Gauge):
+            """Wrapper around statsd.Gauge."""
+
+            def send(self, subname, value):
+                """Sends time-series data to graphite and metadata (if any)
+                to es.
+
+                @param subname: The subname to report the data to (i.e.
+                    'daisy.reboot')
+                @param value: Value to be sent.
+                """
+                statsd.Gauge.send(self, subname, value)
+                self.es.post(type_str=STATS_ES_TYPE, metadata=self.metadata,
+                             subname=subname, value=value)
+
+        self.Gauge = Gauge
+
+        @_prepend_init(es, self.conn, prefix)
+        class Timer(statsd.Timer):
+            """Wrapper around statsd.Timer."""
+
+            # To override subname to not implicitly append 'total'.
+            def stop(self, subname=''):
+                statsd.Timer.stop(self, subname)
 
 
-# We now need to wrap around the stats in statsd so that the server name gets
-# automagically prepended.
+            def send(self, subname, value):
+                """Sends time-series data to graphite and metadata (if any)
+                to es.
 
-# I was tempted to do this as just factory functions, ie.
-#   def Average(name, bare): return statsd.Average(_prepended(name, bare))
-# but then we'd have things that look like a class and wrap a class but
-# is not a class and that feels confusing. And
-#   Average = _prepend_to_stat(statsd.Average)
-# just feels like too much magic, so we're left with lots of mini-classes.
+                @param subname: The subname to report the data to (i.e.
+                    'daisy.reboot')
+                @param value: Value to be sent.
+                """
+                statsd.Timer.send(self, subname, value)
+                self.es.post(type_str=STATS_ES_TYPE, metadata=self.metadata,
+                             subname=self.name, value=value)
 
 
-def _prepend_init(original):
-    """Decorator to override __init__."""
-
-    class _Derived(original):
-        default_es = es_utils.ESMetadata(use_http=autotest_es.ES_USE_HTTP,
-                                         host=autotest_es.METADATA_ES_SERVER,
-                                         port=autotest_es.ES_PORT,
-                                         index=autotest_es.INDEX_METADATA,
-                                         udp_port=autotest_es.ES_UDP_PORT)
-        def __init__(self, name, es=default_es, connection=None,
-                     bare=False, metadata=None):
-            conn = connection or _conn
-            super(_Derived, self).__init__(_prepend_server(name, bare), conn)
-            self.metadata = metadata
-            self.es = es
-
-    return _Derived
+            def __enter__(self):
+                self.start()
+                return self
 
 
-@_prepend_init
-class Average(statsd.Average):
-    """Wrapper around statsd.Average."""
+            def __exit__(self, exn_type, exn_value, traceback):
+                if exn_type is None:
+                    self.stop()
 
-    def send(self, subname, value):
-        """Sends time-series data to graphite and metadata (if any) to es.
+        self.Timer = Timer
 
-        @param subname: The subname to report the data to (i.e. 'daisy.reboot')
-        @param value: Value to be sent.
-        """
-        statsd.Average.send(self, subname, value)
-        self.es.post(type_str=STATS_ES_TYPE, metadata=self.metadata,
-                     subname=subname, value=value)
+        @_prepend_init(es, self.conn, prefix)
+        class Raw(statsd.Raw):
+            """Wrapper around statsd.Raw."""
 
+            def send(self, subname, value, timestamp=None):
+                """Sends time-series data to graphite and metadata (if any)
+                to es.
 
-@_prepend_init
-class Counter(statsd.Counter):
-    """Wrapper around statsd.Counter."""
+                The datapoint we send is pretty much unchanged (will not be
+                aggregated)
 
-    def _send(self, subname, value):
-        """Sends time-series data to graphite and metadata (if any) to es.
+                @param subname: The subname to report the data to (i.e.
+                    'daisy.reboot')
+                @param value: Value to be sent.
+                @param timestamp: Time associated with when this stat was sent.
+                """
+                statsd.Raw.send(self, subname, value, timestamp)
+                self.es.post(type_str=STATS_ES_TYPE, metadata=self.metadata,
+                             subname=subname, value=value, timestamp=timestamp)
 
-        @param subname: The subname to report the data to (i.e. 'daisy.reboot')
-        @param value: Value to be sent.
-        """
-        statsd.Counter._send(self, subname, value)
-        self.es.post(type_str=STATS_ES_TYPE, metadata=self.metadata,
-                     subname=subname, value=value)
-
-
-@_prepend_init
-class Gauge(statsd.Gauge):
-    """Wrapper around statsd.Gauge."""
-
-    def send(self, subname, value):
-        """Sends time-series data to graphite and metadata (if any) to es.
-
-        @param subname: The subname to report the data to (i.e. 'daisy.reboot')
-        @param value: Value to be sent.
-        """
-        statsd.Gauge.send(self, subname, value)
-        self.es.post(type_str=STATS_ES_TYPE, metadata=self.metadata,
-                     subname=subname, value=value)
-
-
-@_prepend_init
-class Timer(statsd.Timer):
-    """Wrapper around statsd.Timer."""
-
-    # To override subname to not implicitly append 'total'.
-    def stop(self, subname=''):
-        statsd.Timer.stop(self, subname)
-
-
-    def send(self, subname, value):
-        """Sends time-series data to graphite and metadata (if any) to es.
-
-        @param subname: The subname to report the data to (i.e. 'daisy.reboot')
-        @param value: Value to be sent.
-        """
-        statsd.Timer.send(self, subname, value)
-        self.es.post(type_str=STATS_ES_TYPE, metadata=self.metadata,
-                     subname=self.name, value=value)
-
-
-    def __enter__(self):
-        self.start()
-        return self
-
-
-    def __exit__(self, exn_type, exn_value, traceback):
-        if exn_type is None:
-            self.stop()
-
-
-@_prepend_init
-class Raw(statsd.Raw):
-    """Wrapper around statsd.Raw."""
-
-    def send(self, subname, value, timestamp=None):
-        """Sends time-series data to graphite and metadata (if any) to es.
-
-        The datapoint we send is pretty much unchanged (will not be aggregated)
-
-        @param subname: The subname to report the data to (i.e. 'daisy.reboot')
-        @param value: Value to be sent.
-        @param timestamp: Time associated with when this stat was sent.
-        """
-        statsd.Raw.send(self, subname, value, timestamp)
-        self.es.post(type_str=STATS_ES_TYPE, metadata=self.metadata,
-                     subname=subname, value=value, timestamp=timestamp)
+        self.Raw = Raw
diff --git a/client/common_lib/hosts/base_classes.py b/client/common_lib/hosts/base_classes.py
index 8752bb2..12d8eef 100644
--- a/client/common_lib/hosts/base_classes.py
+++ b/client/common_lib/hosts/base_classes.py
@@ -19,7 +19,7 @@
 
 from autotest_lib.client.common_lib import global_config, error, utils
 from autotest_lib.client.common_lib import host_protections
-from autotest_lib.client.common_lib.cros.graphite import stats
+from autotest_lib.client.common_lib.cros.graphite import autotest_stats
 from autotest_lib.client.bin import partition
 
 
@@ -221,9 +221,9 @@
         implementation based entirely on wait_up and wait_down. """
         key_string = 'Reboot.%s' % dargs.get('board')
 
-        total_reboot_timer = stats.Timer('%s.total' % key_string,
+        total_reboot_timer = autotest_stats.Timer('%s.total' % key_string,
                 metadata=self.construct_host_metadata('reboot_total'))
-        wait_down_timer = stats.Timer('%s.wait_down' % key_string,
+        wait_down_timer = autotest_stats.Timer('%s.wait_down' % key_string,
                 metadata=self.construct_host_metadata('reboot_down'))
 
         total_reboot_timer.start()
@@ -235,7 +235,7 @@
                 self.record("ABORT", None, "reboot.verify", "shut down failed")
             raise error.AutoservShutdownError("Host did not shut down")
         wait_down_timer.stop()
-        wait_up_timer = stats.Timer('%s.wait_up' % key_string,
+        wait_up_timer = autotest_stats.Timer('%s.wait_up' % key_string,
                 metadata=self.construct_host_metadata('reboot_up'))
         wait_up_timer.start()
         if self.wait_up(timeout):
diff --git a/contrib/db_optimize.py b/contrib/db_optimize.py
index 8a40400..830999f 100755
--- a/contrib/db_optimize.py
+++ b/contrib/db_optimize.py
@@ -14,13 +14,13 @@
 import subprocess
 
 import common
-from autotest_lib.client.common_lib.cros.graphite import stats
+from autotest_lib.client.common_lib.cros.graphite import autotest_stats
 from autotest_lib.frontend import database_settings_helper
 from autotest_lib.scheduler import email_manager
 
 
 STATS_KEY = 'db_optimize.%s' % socket.gethostname()
-timer = stats.Timer(STATS_KEY)
+timer = autotest_stats.Timer(STATS_KEY)
 
 @timer.decorate
 def main_without_exception_handling():
diff --git a/frontend/afe/json_rpc/serviceHandler.py b/frontend/afe/json_rpc/serviceHandler.py
index 2ecb000..52e38f8 100644
--- a/frontend/afe/json_rpc/serviceHandler.py
+++ b/frontend/afe/json_rpc/serviceHandler.py
@@ -32,7 +32,7 @@
     from json import encoder
     json_encoder = encoder.JSONEncoder()
 
-from autotest_lib.client.common_lib.cros.graphite import stats
+from autotest_lib.client.common_lib.cros.graphite import autotest_stats
 
 
 json_decoder = decoder.JSONDecoder()
@@ -103,11 +103,11 @@
         except KeyError:
             raise BadServiceRequest(request)
 
-        stats.Counter('rpc').increment(methName)
+        autotest_stats.Counter('rpc').increment(methName)
 
         metadata = request.copy()
         metadata['_type'] = 'rpc'
-        timer = stats.Timer('rpc', metadata=metadata)
+        timer = autotest_stats.Timer('rpc', metadata=metadata)
 
         try:
             timer.start()
diff --git a/frontend/afe/rpc_interface.py b/frontend/afe/rpc_interface.py
index 7a058db..4b2e90d 100644
--- a/frontend/afe/rpc_interface.py
+++ b/frontend/afe/rpc_interface.py
@@ -35,7 +35,7 @@
 from django.db.models import Count
 import common
 from autotest_lib.client.common_lib import priorities
-from autotest_lib.client.common_lib.cros.graphite import stats
+from autotest_lib.client.common_lib.cros.graphite import autotest_stats
 from autotest_lib.frontend.afe import models, model_logic, model_attributes
 from autotest_lib.frontend.afe import control_file, rpc_utils
 from autotest_lib.frontend.afe import site_rpc_interface
@@ -45,7 +45,7 @@
 from autotest_lib.server.cros.dynamic_suite import tools
 
 
-_timer = stats.Timer('rpc_interface')
+_timer = autotest_stats.Timer('rpc_interface')
 
 def get_parameterized_autoupdate_image_url(job):
     """Get the parameterized autoupdate image url from a parameterized job."""
diff --git a/frontend/afe/site_rpc_interface.py b/frontend/afe/site_rpc_interface.py
index dc097b3..334340c 100644
--- a/frontend/afe/site_rpc_interface.py
+++ b/frontend/afe/site_rpc_interface.py
@@ -18,7 +18,7 @@
 from autotest_lib.client.common_lib import priorities
 from autotest_lib.client.common_lib import time_utils
 from autotest_lib.client.common_lib.cros import dev_server
-from autotest_lib.client.common_lib.cros.graphite import stats
+from autotest_lib.client.common_lib.cros.graphite import autotest_stats
 from autotest_lib.frontend.afe import rpc_utils
 from autotest_lib.server import utils
 from autotest_lib.server.cros.dynamic_suite import constants
@@ -62,9 +62,10 @@
     @return the contents of the desired control file.
     """
     getter = control_file_getter.DevServerGetter.create(build, ds)
-    timer = stats.Timer('control_files.parse.%s.%s' %
-                        (ds.get_server_name(ds.url()).replace('.', '_'),
-                            suite_name.rsplit('.')[-1]))
+    timer = autotest_stats.Timer('control_files.parse.%s.%s' %
+                                 (ds.get_server_name(ds.url()
+                                                     ).replace('.', '_'),
+                                  suite_name.rsplit('.')[-1]))
     # Get the control file for the suite.
     try:
         with timer:
@@ -102,8 +103,8 @@
     # components to be downloaded in the background.
     ds = dev_server.ImageServer.resolve(build)
     timings[constants.DOWNLOAD_STARTED_TIME] = formatted_now()
-    timer = stats.Timer('control_files.stage.%s' % (
-                    ds.get_server_name(ds.url()).replace('.', '_')))
+    timer = autotest_stats.Timer('control_files.stage.%s' % (
+            ds.get_server_name(ds.url()).replace('.', '_')))
     try:
         with timer:
             ds.stage_artifacts(build, ['test_suites'])
@@ -373,7 +374,7 @@
     # A NOT IN query with 5000 ids took about 30ms in tests made.
     # These numbers seem low enough to outweigh the disadvantages of the
     # solutions described above.
-    timer = stats.Timer('shard_heartbeat')
+    timer = autotest_stats.Timer('shard_heartbeat')
     with timer:
         shard_obj = rpc_utils.retrieve_shard(shard_hostname=shard_hostname)
         rpc_utils.persist_records_sent_from_shard(shard_obj, jobs, hqes)
diff --git a/scheduler/drone_manager.py b/scheduler/drone_manager.py
index a4895b1..065949d 100644
--- a/scheduler/drone_manager.py
+++ b/scheduler/drone_manager.py
@@ -9,7 +9,7 @@
 
 import common
 from autotest_lib.client.common_lib import error, global_config, utils
-from autotest_lib.client.common_lib.cros.graphite import stats
+from autotest_lib.client.common_lib.cros.graphite import autotest_stats
 from autotest_lib.scheduler import email_manager, drone_utility, drones
 from autotest_lib.scheduler import scheduler_config
 from autotest_lib.scheduler import thread_lib
@@ -153,7 +153,7 @@
     # about a drone hitting process limit is sent.
     NOTIFY_INTERVAL = 60 * 60 * 24 # one day
     _STATS_KEY = 'drone_manager'
-    _timer = stats.Timer(_STATS_KEY)
+    _timer = autotest_stats.Timer(_STATS_KEY)
 
 
     def __init__(self):
@@ -408,7 +408,7 @@
                 info = self._registered_pidfile_info[pidfile_id]
                 if info.num_processes is not None:
                     drone.active_processes += info.num_processes
-        stats.Gauge(self._STATS_KEY).send(
+        autotest_stats.Gauge(self._STATS_KEY).send(
                 '%s.%s' % (drone.hostname.replace('.', '_'),
                            'active_processes'), drone.active_processes)
 
diff --git a/scheduler/drone_utility.py b/scheduler/drone_utility.py
index dfb4905..8485d93 100755
--- a/scheduler/drone_utility.py
+++ b/scheduler/drone_utility.py
@@ -18,7 +18,7 @@
 from autotest_lib.client.common_lib import utils, global_config, error
 from autotest_lib.client.common_lib import logging_manager
 from autotest_lib.client.common_lib.cros import retry
-from autotest_lib.client.common_lib.cros.graphite import stats
+from autotest_lib.client.common_lib.cros.graphite import autotest_stats
 from autotest_lib.scheduler import drone_logging_config
 from autotest_lib.scheduler import email_manager, scheduler_config
 from autotest_lib.server import hosts, subcommand
@@ -32,7 +32,7 @@
 _TRANSFER_FAILED_FILE = '.transfer_failed'
 
 _STATS_KEY = 'drone_utility'
-timer = stats.Timer(_STATS_KEY)
+timer = autotest_stats.Timer(_STATS_KEY)
 
 class _MethodCall(object):
     def __init__(self, method, args, kwargs):
@@ -224,16 +224,17 @@
             the processes to kill.
         """
         kill_proc_key = 'kill_processes'
-        stats.Gauge(_STATS_KEY).send('%s.%s' % (kill_proc_key, 'net'),
-                                     len(process_list))
+        autotest_stats.Gauge(_STATS_KEY).send('%s.%s' % (kill_proc_key, 'net'),
+                                              len(process_list))
         try:
             logging.info('List of process to be killed: %s', process_list)
             sig_counts = utils.nuke_pids(
                             [-process.pid for process in process_list],
                             signal_queue=(signal.SIGKILL,))
             for name, count in sig_counts.iteritems():
-                stats.Gauge(_STATS_KEY).send('%s.%s' % (kill_proc_key, name),
-                                             count)
+                autotest_stats.Gauge(_STATS_KEY).send('%s.%s' %
+                                                      (kill_proc_key, name),
+                                                      count)
         except error.AutoservRunError as e:
             self._warn('Error occured when killing processes. Error: %s' % e)
 
@@ -523,8 +524,8 @@
         calls = parse_input()
     args = _parse_args(sys.argv[1:])
     if args.call_time is not None:
-        stats.Gauge(_STATS_KEY).send('invocation_overhead',
-                                     time.time() - args.call_time)
+        autotest_stats.Gauge(_STATS_KEY).send('invocation_overhead',
+                                              time.time() - args.call_time)
 
     drone_utility = DroneUtility()
     return_value = drone_utility.execute_calls(calls)
diff --git a/scheduler/drones.py b/scheduler/drones.py
index edca060..d3d3562 100644
--- a/scheduler/drones.py
+++ b/scheduler/drones.py
@@ -9,7 +9,7 @@
 from autotest_lib.scheduler import drone_utility, email_manager
 from autotest_lib.client.bin import local_host
 from autotest_lib.client.common_lib import error, global_config, utils
-from autotest_lib.client.common_lib.cros.graphite import stats
+from autotest_lib.client.common_lib.cros.graphite import autotest_stats
 
 
 AUTOTEST_INSTALL_DIR = global_config.global_config.get_config_value('SCHEDULER',
@@ -96,7 +96,7 @@
 
 
     def _execute_calls(self, calls):
-        stats.Gauge('drone_execute_call_count').send(
+        autotest_stats.Gauge('drone_execute_call_count').send(
                     self.hostname.replace('.', '_'), len(calls))
         return_message = self._execute_calls_impl(calls)
         for warning in return_message['warnings']:
diff --git a/scheduler/host_scheduler.py b/scheduler/host_scheduler.py
index 719b2f2..c670355 100755
--- a/scheduler/host_scheduler.py
+++ b/scheduler/host_scheduler.py
@@ -63,7 +63,7 @@
 from autotest_lib.frontend import setup_django_environment
 
 from autotest_lib.client.common_lib import global_config
-from autotest_lib.client.common_lib.cros.graphite import stats
+from autotest_lib.client.common_lib.cros.graphite import autotest_stats
 from autotest_lib.scheduler import email_manager
 from autotest_lib.scheduler import query_managers
 from autotest_lib.scheduler import rdb_lib
@@ -102,7 +102,7 @@
     """
 
 
-    _timer = stats.Timer('suite_recorder')
+    _timer = autotest_stats.Timer('suite_recorder')
 
 
     def __init__(self, job_query_manager):
@@ -189,7 +189,7 @@
     """
 
 
-    _timer = stats.Timer('base_host_scheduler')
+    _timer = autotest_stats.Timer('base_host_scheduler')
     host_assignment = collections.namedtuple('host_assignment', ['host', 'job'])
 
 
@@ -289,7 +289,7 @@
 class HostScheduler(BaseHostScheduler):
     """A scheduler capable managing host acquisition for new jobs."""
 
-    _timer = stats.Timer('host_scheduler')
+    _timer = autotest_stats.Timer('host_scheduler')
 
 
     def __init__(self):
@@ -334,9 +334,11 @@
             self.schedule_host_job(acquisition.host, acquisition.job)
             self._record_host_assignment(acquisition.host, acquisition.job)
             new_jobs_with_hosts += 1
-        stats.Gauge(key).send('new_jobs_with_hosts', new_jobs_with_hosts)
-        stats.Gauge(key).send('new_jobs_without_hosts',
-                              len(unverified_host_jobs) - new_jobs_with_hosts)
+        autotest_stats.Gauge(key).send('new_jobs_with_hosts',
+                                       new_jobs_with_hosts)
+        autotest_stats.Gauge(key).send('new_jobs_without_hosts',
+                                       len(unverified_host_jobs) -
+                                       new_jobs_with_hosts)
 
 
     @_timer.decorate
diff --git a/scheduler/monitor_db.py b/scheduler/monitor_db.py
index 3be9eb0..59af937 100755
--- a/scheduler/monitor_db.py
+++ b/scheduler/monitor_db.py
@@ -21,7 +21,7 @@
 
 from autotest_lib.client.common_lib import global_config
 from autotest_lib.client.common_lib import utils
-from autotest_lib.client.common_lib.cros.graphite import stats
+from autotest_lib.client.common_lib.cros.graphite import autotest_stats
 from autotest_lib.frontend.afe import models, rpc_utils
 from autotest_lib.scheduler import agent_task, drone_manager
 from autotest_lib.scheduler import email_manager, gc_stats, host_scheduler
@@ -317,7 +317,7 @@
         major step begins so we can try to figure out where we are using most
         of the tick time.
         """
-        timer = stats.Timer('scheduler.tick')
+        timer = autotest_stats.Timer('scheduler.tick')
         self._log_tick_msg('Calling new tick, starting garbage collection().')
         self._garbage_collection()
         self._log_tick_msg('Calling _drone_manager.trigger_refresh().')
@@ -483,7 +483,7 @@
         status_list = ','.join("'%s'" % status for status in statuses)
         queue_entries = scheduler_models.HostQueueEntry.fetch(
                 where='status IN (%s)' % status_list)
-        stats.Gauge('scheduler.jobs_per_tick').send(
+        autotest_stats.Gauge('scheduler.jobs_per_tick').send(
                 'running', len(queue_entries))
 
         agent_tasks = []
@@ -801,7 +801,7 @@
                 host_jobs.append(queue_entry)
                 new_jobs_need_hosts = new_jobs_need_hosts + 1
 
-        stats.Gauge(key).send('new_hostless_jobs', new_hostless_jobs)
+        autotest_stats.Gauge(key).send('new_hostless_jobs', new_hostless_jobs)
         if not host_jobs:
             return
         if not _inline_host_acquisition:
@@ -816,9 +816,11 @@
             self._schedule_host_job(host_assignment.host, host_assignment.job)
             new_jobs_with_hosts = new_jobs_with_hosts + 1
 
-        stats.Gauge(key).send('new_jobs_with_hosts', new_jobs_with_hosts)
-        stats.Gauge(key).send('new_jobs_without_hosts',
-                              new_jobs_need_hosts - new_jobs_with_hosts)
+        autotest_stats.Gauge(key).send('new_jobs_with_hosts',
+                                       new_jobs_with_hosts)
+        autotest_stats.Gauge(key).send('new_jobs_without_hosts',
+                                       new_jobs_need_hosts -
+                                       new_jobs_with_hosts)
 
 
     def _schedule_running_host_queue_entries(self):
@@ -1008,9 +1010,9 @@
                 num_finished_this_cycle += agent.task.num_processes
                 self._log_extra_msg("Agent finished")
                 self.remove_agent(agent)
-        stats.Gauge('scheduler.jobs_per_tick').send(
+        autotest_stats.Gauge('scheduler.jobs_per_tick').send(
                 'agents_started', num_started_this_cycle)
-        stats.Gauge('scheduler.jobs_per_tick').send(
+        autotest_stats.Gauge('scheduler.jobs_per_tick').send(
                 'agents_finished', num_finished_this_cycle)
         logging.info('%d running processes. %d added this cycle.',
                      _drone_manager.total_running_processes(),
diff --git a/scheduler/monitor_db_cleanup.py b/scheduler/monitor_db_cleanup.py
index 3114cf4..8136933 100644
--- a/scheduler/monitor_db_cleanup.py
+++ b/scheduler/monitor_db_cleanup.py
@@ -8,7 +8,7 @@
 from autotest_lib.frontend.afe import models
 from autotest_lib.scheduler import email_manager, scheduler_config
 from autotest_lib.client.common_lib import host_protections
-from autotest_lib.client.common_lib.cros.graphite import stats
+from autotest_lib.client.common_lib.cros.graphite import autotest_stats
 
 class PeriodicCleanup(object):
 
@@ -43,7 +43,7 @@
     """User cleanup that is controlled by the global config variable
        clean_interval_minutes in the SCHEDULER section.
     """
-    timer = stats.Timer('monitor_db_cleanup.user_cleanup')
+    timer = autotest_stats.Timer('monitor_db_cleanup.user_cleanup')
 
 
     def __init__(self, db, clean_interval_minutes):
@@ -219,7 +219,7 @@
     """Cleanup that runs at the startup of monitor_db and every subsequent
        twenty four hours.
     """
-    timer = stats.Timer('monitor_db_cleanup.twentyfourhour_cleanup')
+    timer = autotest_stats.Timer('monitor_db_cleanup.twentyfourhour_cleanup')
 
 
     def __init__(self, db, run_at_initialize=True):
diff --git a/scheduler/postjob_task.py b/scheduler/postjob_task.py
index de42bea..8c7c204 100644
--- a/scheduler/postjob_task.py
+++ b/scheduler/postjob_task.py
@@ -10,7 +10,7 @@
 
 import os
 
-from autotest_lib.client.common_lib.cros.graphite import stats
+from autotest_lib.client.common_lib.cros.graphite import autotest_stats
 from autotest_lib.frontend.afe import models, model_attributes
 from autotest_lib.scheduler import agent_task, drones, drone_manager
 from autotest_lib.scheduler import email_manager, pidfile_monitor
@@ -122,15 +122,17 @@
     @classmethod
     def _increment_running_processes(cls):
         cls._num_running_processes += 1
-        stats.Gauge('scheduler').send('%s.num_running_processes' % cls.__name__,
-                                      cls._num_running_processes)
+        autotest_stats.Gauge('scheduler').send(
+                '%s.num_running_processes' % cls.__name__,
+                cls._num_running_processes)
 
 
     @classmethod
     def _decrement_running_processes(cls):
         cls._num_running_processes -= 1
-        stats.Gauge('scheduler').send('%s.num_running_processes' % cls.__name__,
-                                      cls._num_running_processes)
+        autotest_stats.Gauge('scheduler').send(
+                '%s.num_running_processes' % cls.__name__,
+                cls._num_running_processes)
 
 
     @classmethod
diff --git a/scheduler/query_managers.py b/scheduler/query_managers.py
index 9cf0cdf..488490c 100644
--- a/scheduler/query_managers.py
+++ b/scheduler/query_managers.py
@@ -12,7 +12,7 @@
 
 import common
 
-from autotest_lib.client.common_lib.cros.graphite import stats
+from autotest_lib.client.common_lib.cros.graphite import autotest_stats
 from autotest_lib.frontend import setup_django_environment
 from autotest_lib.frontend.afe import models
 from autotest_lib.server.cros.dynamic_suite import constants
@@ -20,7 +20,7 @@
 from autotest_lib.scheduler import scheduler_lib
 
 
-_job_timer = stats.Timer('scheduler.job_query_manager')
+_job_timer = autotest_stats.Timer('scheduler.job_query_manager')
 class AFEJobQueryManager(object):
     """Query manager for AFE Jobs."""
 
@@ -210,7 +210,7 @@
         return dict((keyval.job_id, int(keyval.value)) for keyval in query)
 
 
-_host_timer = stats.Timer('scheduler.host_query_manager')
+_host_timer = autotest_stats.Timer('scheduler.host_query_manager')
 class AFEHostQueryManager(object):
     """Query manager for AFE Hosts."""
 
diff --git a/scheduler/rdb.py b/scheduler/rdb.py
index b7fa002..23a124b 100644
--- a/scheduler/rdb.py
+++ b/scheduler/rdb.py
@@ -12,7 +12,7 @@
 from django.core import exceptions as django_exceptions
 from django.db.models import fields
 from django.db.models import Q
-from autotest_lib.client.common_lib.cros.graphite import stats
+from autotest_lib.client.common_lib.cros.graphite import autotest_stats
 from autotest_lib.frontend.afe import models
 from autotest_lib.scheduler import rdb_cache_manager
 from autotest_lib.scheduler import rdb_hosts
@@ -21,7 +21,7 @@
 from autotest_lib.server import utils
 
 
-_timer = stats.Timer(rdb_utils.RDB_STATS_KEY)
+_timer = autotest_stats.Timer(rdb_utils.RDB_STATS_KEY)
 _is_master = not utils.is_shard()
 
 
@@ -414,10 +414,10 @@
         logging.debug('Host acquisition stats: distinct requests: %s, leased '
                       'hosts: %s, unsatisfied requests: %s', distinct_requests,
                       self.leased_hosts_count, self.unsatisfied_requests)
-        stats.Gauge(rdb_utils.RDB_STATS_KEY).send('leased_hosts',
-                                                  self.leased_hosts_count)
-        stats.Gauge(rdb_utils.RDB_STATS_KEY).send('unsatisfied_requests',
-                                                  self.unsatisfied_requests)
+        autotest_stats.Gauge(rdb_utils.RDB_STATS_KEY).send(
+                'leased_hosts', self.leased_hosts_count)
+        autotest_stats.Gauge(rdb_utils.RDB_STATS_KEY).send(
+                'unsatisfied_requests', self.unsatisfied_requests)
 
 
     @_timer.decorate
diff --git a/scheduler/rdb_cache_manager.py b/scheduler/rdb_cache_manager.py
index 8ad1627..cd3cc17 100644
--- a/scheduler/rdb_cache_manager.py
+++ b/scheduler/rdb_cache_manager.py
@@ -81,7 +81,7 @@
 import logging
 
 import common
-from autotest_lib.client.common_lib.cros.graphite import stats
+from autotest_lib.client.common_lib.cros.graphite import autotest_stats
 from autotest_lib.client.common_lib.global_config import global_config
 from autotest_lib.scheduler import rdb_utils
 
@@ -233,9 +233,10 @@
         staleness = self.mean_staleness()
         logging.debug('Cache stats: hit ratio: %.2f%%, '
                       'avg staleness per line: %.2f%%.', hit_ratio, staleness)
-        stats.Gauge(rdb_utils.RDB_STATS_KEY).send('cache.hit_ratio', hit_ratio)
-        stats.Gauge(rdb_utils.RDB_STATS_KEY).send('cache.stale_entries',
-                                                  staleness)
+        autotest_stats.Gauge(rdb_utils.RDB_STATS_KEY).send(
+                'cache.hit_ratio', hit_ratio)
+        autotest_stats.Gauge(rdb_utils.RDB_STATS_KEY).send(
+                'cache.stale_entries', staleness)
 
 
     @classmethod
diff --git a/scheduler/rdb_utils.py b/scheduler/rdb_utils.py
index 6e29d7b..5e07342 100644
--- a/scheduler/rdb_utils.py
+++ b/scheduler/rdb_utils.py
@@ -10,7 +10,7 @@
 import collections
 
 import common
-from autotest_lib.client.common_lib.cros.graphite import stats
+from autotest_lib.client.common_lib.cros.graphite import autotest_stats
 from autotest_lib.client.common_lib import priorities
 
 RDB_STATS_KEY = 'rdb'
@@ -99,7 +99,7 @@
     get_rest requests, it will not be fullfilled anyway.
     """
 
-    _gauge = stats.Gauge(RDB_STATS_KEY)
+    _gauge = autotest_stats.Gauge(RDB_STATS_KEY)
 
 
     def __init__(self, host_requests):
diff --git a/scheduler/scheduler_models.py b/scheduler/scheduler_models.py
index 0757ee3..0fb17c2 100644
--- a/scheduler/scheduler_models.py
+++ b/scheduler/scheduler_models.py
@@ -24,7 +24,7 @@
 from autotest_lib.client.common_lib import time_utils
 from autotest_lib.client.common_lib import utils
 from autotest_lib.client.common_lib.cros.graphite import autotest_es
-from autotest_lib.client.common_lib.cros.graphite import stats
+from autotest_lib.client.common_lib.cros.graphite import autotest_stats
 from autotest_lib.frontend.afe import models, model_attributes
 from autotest_lib.scheduler import drone_manager, email_manager
 from autotest_lib.scheduler import rdb_lib
@@ -389,7 +389,7 @@
     _fields = ('id', 'hostname', 'locked', 'synch_id', 'status',
                'invalid', 'protection', 'locked_by_id', 'lock_time', 'dirty',
                'leased', 'shard_id')
-    _timer = stats.Timer("scheduler_models.Host")
+    _timer = autotest_stats.Timer("scheduler_models.Host")
 
 
     @_timer.decorate
@@ -466,7 +466,7 @@
     _fields = ('id', 'job_id', 'host_id', 'status', 'meta_host',
                'active', 'complete', 'deleted', 'execution_subdir',
                'atomic_group_id', 'aborted', 'started_on', 'finished_on')
-    _timer = stats.Timer('scheduler_models.HostQueueEntry')
+    _timer = autotest_stats.Timer('scheduler_models.HostQueueEntry')
 
 
     def __init__(self, id=None, row=None, **kwargs):
@@ -929,7 +929,7 @@
                'parse_failed_repair', 'max_runtime_hrs', 'drone_set_id',
                'parameterized_job_id', 'max_runtime_mins', 'parent_job_id',
                'test_retry', 'run_reset', 'timeout_mins', 'shard_id')
-    _timer = stats.Timer("scheduler_models.Job")
+    _timer = autotest_stats.Timer("scheduler_models.Job")
 
     # This does not need to be a column in the DB.  The delays are likely to
     # be configured short.  If the scheduler is stopped and restarted in
diff --git a/scheduler/shard/shard_client.py b/scheduler/shard/shard_client.py
index 2e76b3b..66a3792 100755
--- a/scheduler/shard/shard_client.py
+++ b/scheduler/shard/shard_client.py
@@ -16,7 +16,7 @@
 from autotest_lib.frontend import setup_django_environment
 from autotest_lib.client.common_lib import error
 from autotest_lib.client.common_lib import global_config
-from autotest_lib.client.common_lib.cros.graphite import stats
+from autotest_lib.client.common_lib.cros.graphite import autotest_stats
 from autotest_lib.frontend.afe import models
 from autotest_lib.scheduler import email_manager
 from autotest_lib.scheduler import scheduler_lib
@@ -86,7 +86,7 @@
 RPC_DELAY_SEC = 5
 
 STATS_KEY = 'shard_client.%s' % socket.gethostname()
-timer = stats.Timer(STATS_KEY)
+timer = autotest_stats.Timer(STATS_KEY)
 _heartbeat_client = None
 
 
@@ -121,9 +121,9 @@
         hosts_serialized = heartbeat_response['hosts']
         jobs_serialized = heartbeat_response['jobs']
 
-        stats.Gauge(STATS_KEY).send(
+        autotest_stats.Gauge(STATS_KEY).send(
             'hosts_received', len(hosts_serialized))
-        stats.Gauge(STATS_KEY).send(
+        autotest_stats.Gauge(STATS_KEY).send(
             'jobs_received', len(jobs_serialized))
 
         for host in hosts_serialized:
@@ -248,10 +248,10 @@
         logging.info("Performing heartbeat.")
 
         packet = self._heartbeat_packet()
-        stats.Gauge(STATS_KEY).send(
+        autotest_stats.Gauge(STATS_KEY).send(
                 'heartbeat.request_size', len(str(packet)))
         response = self.afe.run(HEARTBEAT_AFE_ENDPOINT, **packet)
-        stats.Gauge(STATS_KEY).send(
+        autotest_stats.Gauge(STATS_KEY).send(
                 'heartbeat.response_size', len(str(response)))
 
         self._mark_jobs_as_uploaded([job['id'] for job in packet['jobs']])
@@ -326,13 +326,13 @@
 
 def main():
     try:
-        stats.Counter(STATS_KEY + 'starts').increment()
+        autotest_stats.Counter(STATS_KEY + 'starts').increment()
         main_without_exception_handling()
     except Exception as e:
         message = 'Uncaught exception; terminating shard_client.'
         email_manager.manager.log_stacktrace(message)
         logging.exception(message)
-        stats.Counter(STATS_KEY + 'uncaught_exceptions').increment()
+        autotest_stats.Counter(STATS_KEY + 'uncaught_exceptions').increment()
         raise
     finally:
         email_manager.manager.send_queued_emails()
diff --git a/scheduler/site_drone_manager.py b/scheduler/site_drone_manager.py
index 95ab106..1f7db3a 100644
--- a/scheduler/site_drone_manager.py
+++ b/scheduler/site_drone_manager.py
@@ -4,7 +4,7 @@
 import logging
 
 from autotest_lib.client.common_lib import global_config, error
-from autotest_lib.client.common_lib.cros.graphite import stats
+from autotest_lib.client.common_lib.cros.graphite import autotest_stats
 from autotest_lib.scheduler import drones, scheduler_config
 
 HOSTS_JOB_SUBDIR = 'hosts/'
@@ -16,7 +16,7 @@
 class SiteDroneManager(object):
 
 
-    _timer = stats.Timer('drone_manager')
+    _timer = autotest_stats.Timer('drone_manager')
 
 
     def copy_to_results_repository(self, process, source_path,
diff --git a/scheduler/site_monitor_db.py b/scheduler/site_monitor_db.py
index 1c53859..322bb01 100644
--- a/scheduler/site_monitor_db.py
+++ b/scheduler/site_monitor_db.py
@@ -8,7 +8,7 @@
 import logging
 
 from autotest_lib.client.common_lib import global_config
-from autotest_lib.client.common_lib.cros.graphite import stats
+from autotest_lib.client.common_lib.cros.graphite import autotest_stats
 from autotest_lib.frontend.afe import models
 from autotest_lib.scheduler import email_manager
 from autotest_lib.scheduler import scheduler_config, scheduler_models
@@ -94,7 +94,7 @@
     DEFAULT_REQUESTED_BY_USER_ID = 1
 
 
-    _timer = stats.Timer('scheduler')
+    _timer = autotest_stats.Timer('scheduler')
 
 
     @_timer.decorate
diff --git a/scheduler/thread_lib.py b/scheduler/thread_lib.py
index 6f9e5d0..80c2030 100644
--- a/scheduler/thread_lib.py
+++ b/scheduler/thread_lib.py
@@ -26,7 +26,7 @@
 import logging
 
 import common
-from autotest_lib.client.common_lib.cros.graphite import stats
+from autotest_lib.client.common_lib.cros.graphite import autotest_stats
 
 
 class DroneTaskQueueException(Exception):
@@ -40,7 +40,7 @@
     def run(self):
         """Wrapper around the thread's run method."""
         try:
-            with stats.Timer(self.name):
+            with autotest_stats.Timer(self.name):
                 super(ExceptionRememberingThread, self).run()
         except Exception as self.err:
             logging.error('%s raised an exception that will be re-raised by '
@@ -63,7 +63,7 @@
 
     def start(self):
         """Create and start a new timer."""
-        self.timer = stats.Timer(self.name)
+        self.timer = autotest_stats.Timer(self.name)
         self.timer.start()
 
 
diff --git a/server/autoserv b/server/autoserv
index a357910..24ec567 100755
--- a/server/autoserv
+++ b/server/autoserv
@@ -58,7 +58,7 @@
 from autotest_lib.server import utils as server_utils
 from autotest_lib.site_utils import job_overhead
 from autotest_lib.client.common_lib import pidfile, logging_manager
-from autotest_lib.client.common_lib.cros.graphite import stats
+from autotest_lib.client.common_lib.cros.graphite import autotest_stats
 
 def log_alarm(signum, frame):
     logging.error("Received SIGALARM. Ignoring and continuing on.")
@@ -380,8 +380,8 @@
                 try:
                     afe = frontend.AFE()
                     board = server_utils.get_board_from_afe(machines[0], afe)
-                    timer = stats.Timer('autoserv_run_time.%s.%s' %
-                                        (board, test_name))
+                    timer = autotest_stats.Timer('autoserv_run_time.%s.%s' %
+                                                 (board, test_name))
                     timer.start()
                 except (urllib2.HTTPError, urllib2.URLError):
                     # Ignore error if RPC failed to get board
diff --git a/server/autotest.py b/server/autotest.py
index fdd6a70..10f8376 100644
--- a/server/autotest.py
+++ b/server/autotest.py
@@ -6,7 +6,7 @@
 from autotest_lib.client.common_lib import base_job, error, autotemp
 from autotest_lib.client.common_lib import global_config, packages
 from autotest_lib.client.common_lib import utils as client_utils
-from autotest_lib.client.common_lib.cros.graphite import stats
+from autotest_lib.client.common_lib.cros.graphite import autotest_stats
 
 AUTOTEST_SVN = 'svn://test.kernel.org/autotest/trunk/client'
 AUTOTEST_HTTP = 'http://test.kernel.org/svn/autotest/trunk/client'
@@ -940,7 +940,7 @@
             pass
 
         # Copy all dirs in default to results_dir
-        timer = stats.Timer('collect_client_job_results')
+        timer = autotest_stats.Timer('collect_client_job_results')
         timer.start()
         try:
             self.host.get_file(self.client_results_dir + '/',
diff --git a/server/control_segments/cleanup b/server/control_segments/cleanup
index cb6b637..d8b585c 100644
--- a/server/control_segments/cleanup
+++ b/server/control_segments/cleanup
@@ -2,7 +2,7 @@
 import os
 
 from autotest_lib.client.common_lib import error
-from autotest_lib.client.common_lib.cros.graphite import stats
+from autotest_lib.client.common_lib.cros.graphite import autotest_stats
 from autotest_lib.server.cros import provision
 
 
@@ -17,7 +17,7 @@
         job.record('START', None, 'cleanup')
         host = hosts.create_host(machine, initialize=False, auto_monitor=False,
                                  try_lab_servo=True)
-        timer = stats.Timer('cleanup_time')
+        timer = autotest_stats.Timer('cleanup_time')
         timer.start()
 
         # Try to save /var/log files. If the dut is not sshable, try to restart
diff --git a/server/control_segments/reset b/server/control_segments/reset
index c472e8e..ebe01d1 100644
--- a/server/control_segments/reset
+++ b/server/control_segments/reset
@@ -1,6 +1,6 @@
 import sys
 
-from autotest_lib.client.common_lib.cros.graphite import stats
+from autotest_lib.client.common_lib.cros.graphite import autotest_stats
 from autotest_lib.server.cros import provision
 
 
@@ -15,7 +15,7 @@
     try:
         job.record('START', None, 'reset')
         host = hosts.create_host(machine, initialize=False, auto_monitor=False)
-        timer = stats.Timer('reset_time')
+        timer = autotest_stats.Timer('reset_time')
         timer.start()
 
         # Assume cleanup always runs first.
diff --git a/server/control_segments/verify b/server/control_segments/verify
index ae54783..268a5e7 100644
--- a/server/control_segments/verify
+++ b/server/control_segments/verify
@@ -1,4 +1,4 @@
-from autotest_lib.client.common_lib.cros.graphite import stats
+from autotest_lib.client.common_lib.cros.graphite import autotest_stats
 from autotest_lib.server.cros import provision
 
 
@@ -16,7 +16,7 @@
         # servo update if needed.
         host = hosts.create_host(machine, initialize=False, auto_monitor=False,
                                  try_lab_servo=True)
-        timer = stats.Timer('verify_time')
+        timer = autotest_stats.Timer('verify_time')
         timer.start()
 
         host.verify()
diff --git a/server/crashcollect.py b/server/crashcollect.py
index b82a036..3294eec 100644
--- a/server/crashcollect.py
+++ b/server/crashcollect.py
@@ -1,7 +1,7 @@
 import os, time, logging, shutil
 
 from autotest_lib.client.common_lib import global_config
-from autotest_lib.client.common_lib.cros.graphite import stats
+from autotest_lib.client.common_lib.cros.graphite import autotest_stats
 from autotest_lib.client.cros import constants
 from autotest_lib.server import utils
 
@@ -15,7 +15,7 @@
     lambda host, test_start_time: None)
 
 
-_timer = stats.Timer('crash_collection')
+_timer = autotest_stats.Timer('crash_collection')
 
 @_timer.decorate
 def get_crashdumps(host, test_start_time):
@@ -81,7 +81,7 @@
     logging.info("Waiting %s hours for %s to come up (%s)",
                  hours_to_wait, host.hostname, current_time)
     if not host.wait_up(timeout=hours_to_wait * 3600):
-        stats.Counter('collect_crashinfo_timeout').increment()
+        autotest_stats.Counter('collect_crashinfo_timeout').increment()
         logging.warning("%s down, unable to collect crash info",
                         host.hostname)
         return False
diff --git a/server/hosts/cros_host.py b/server/hosts/cros_host.py
index 2beecf9..8acabab 100644
--- a/server/hosts/cros_host.py
+++ b/server/hosts/cros_host.py
@@ -22,7 +22,7 @@
 from autotest_lib.client.common_lib.cros import dev_server
 from autotest_lib.client.common_lib.cros import retry
 from autotest_lib.client.common_lib.cros.graphite import autotest_es
-from autotest_lib.client.common_lib.cros.graphite import stats
+from autotest_lib.client.common_lib.cros.graphite import autotest_stats
 from autotest_lib.client.cros import constants as client_constants
 from autotest_lib.client.cros import cros_ui
 from autotest_lib.client.cros.audio import cras_utils
@@ -480,7 +480,7 @@
                 'branch': branch,
                 'devserver': devserver.replace('.', '_'),
             }
-            stats.Gauge('verify_job_repo_url').send(
+            autotest_stats.Gauge('verify_job_repo_url').send(
                 '%(board)s.%(build_type)s.%(branch)s.%(devserver)s' % stats_key,
                 stage_time)
 
@@ -945,7 +945,7 @@
                               % usb_boot_timeout)
         logging.info('Downloading image to USB, then booting from it. Usb boot '
                      'timeout = %s', usb_boot_timeout)
-        timer = stats.Timer(usb_boot_timer_key)
+        timer = autotest_stats.Timer(usb_boot_timer_key)
         timer.start()
         self.servo.install_recovery_image(image_url)
         if not self.wait_up(timeout=usb_boot_timeout):
@@ -956,7 +956,7 @@
 
         install_timer_key = ('servo_install.install_timeout_%s'
                              % install_timeout)
-        timer = stats.Timer(install_timer_key)
+        timer = autotest_stats.Timer(install_timer_key)
         timer.start()
         logging.info('Installing image through chromeos-install.')
         self.run('chromeos-install --yes --lab_preserve_logs=%s' %
@@ -1153,35 +1153,35 @@
                 repair_func()
                 self.try_collect_crashlogs()
                 self.check_device()
-                stats.Counter(
+                autotest_stats.Counter(
                         '%s.SUCCEEDED' % repair_func.__name__).increment()
                 if board:
-                    stats.Counter(
+                    autotest_stats.Counter(
                         '%s.%s.SUCCEEDED' % (repair_func.__name__,
                                              board)).increment()
                 return
             except error.AutoservRepairMethodNA as e:
-                stats.Counter(
+                autotest_stats.Counter(
                         '%s.RepairNA' % repair_func.__name__).increment()
                 if board:
-                    stats.Counter(
+                    autotest_stats.Counter(
                         '%s.%s.RepairNA' % (repair_func.__name__,
                                             board)).increment()
                 logging.warning('Repair function NA: %s', e)
                 errors.append(str(e))
             except Exception as e:
-                stats.Counter(
+                autotest_stats.Counter(
                         '%s.FAILED' % repair_func.__name__).increment()
                 if board:
-                    stats.Counter(
+                    autotest_stats.Counter(
                         '%s.%s.FAILED' % (repair_func.__name__,
                                           board)).increment()
                 logging.warning('Failed to repair device: %s', e)
                 errors.append(str(e))
 
-        stats.Counter('Full_Repair_Failed').increment()
+        autotest_stats.Counter('Full_Repair_Failed').increment()
         if board:
-            stats.Counter(
+            autotest_stats.Counter(
                 'Full_Repair_Failed.%s' % board).increment()
         raise error.AutoservRepairTotalFailure(
                 'All attempts at repairing the device failed:\n%s' %
diff --git a/server/hosts/servo_host.py b/server/hosts/servo_host.py
index 7711e6e..c0f2a9b 100644
--- a/server/hosts/servo_host.py
+++ b/server/hosts/servo_host.py
@@ -21,7 +21,7 @@
 from autotest_lib.client.common_lib.cros import autoupdater
 from autotest_lib.client.common_lib.cros import dev_server
 from autotest_lib.client.common_lib.cros import retry
-from autotest_lib.client.common_lib.cros.graphite import stats
+from autotest_lib.client.common_lib.cros.graphite import autotest_stats
 from autotest_lib.client.common_lib.cros.network import ping_runner
 from autotest_lib.server import site_utils as server_site_utils
 from autotest_lib.server.cros.servo import servo
@@ -83,7 +83,7 @@
     INITIALIZE_SERVO_TIMEOUT_SECS = 30
 
     _MAX_POWER_CYCLE_ATTEMPTS = 3
-    _timer = stats.Timer('servo_host')
+    _timer = autotest_stats.Timer('servo_host')
 
 
     def _initialize(self, servo_host='localhost', servo_port=9999,
@@ -454,7 +454,8 @@
                 updater.trigger_update()
             except autoupdater.RootFSUpdateError as e:
                 trigger_download_status = 'failed with %s' % str(e)
-                stats.Counter('servo_host.RootFSUpdateError').increment()
+                autotest_stats.Counter('servo_host.RootFSUpdateError'
+                                       ).increment()
             else:
                 trigger_download_status = 'passed'
             logging.info('Triggered download and update %s for %s, '
@@ -583,17 +584,18 @@
             try:
                 repair_func()
                 self.verify()
-                stats.Counter(counter_prefix + 'SUCCEEDED').increment()
+                autotest_stats.Counter(counter_prefix + 'SUCCEEDED').increment()
                 return
             except ServoHostRepairMethodNA as e:
                 logging.warning('Repair method NA: %s', e)
-                stats.Counter(counter_prefix + 'RepairNA').increment()
+                autotest_stats.Counter(counter_prefix + 'RepairNA').increment()
                 errors.append(str(e))
             except Exception as e:
                 logging.warning('Failed to repair servo: %s', e)
-                stats.Counter(counter_prefix + 'FAILED').increment()
+                autotest_stats.Counter(counter_prefix + 'FAILED').increment()
                 errors.append(str(e))
-        stats.Counter('servo_host_repair.Full_Repair_Failed').increment()
+        autotest_stats.Counter('servo_host_repair.Full_Repair_Failed'). \
+                increment()
         raise ServoHostRepairTotalFailure(
                 'All attempts at repairing the servo failed:\n%s' %
                 '\n'.join(errors))
diff --git a/site_utils/abort_suite.py b/site_utils/abort_suite.py
index 1921294..3f23377 100755
--- a/site_utils/abort_suite.py
+++ b/site_utils/abort_suite.py
@@ -31,14 +31,14 @@
 from datetime import datetime
 
 import common
-from autotest_lib.client.common_lib.cros.graphite import stats
+from autotest_lib.client.common_lib.cros.graphite import autotest_stats
 from autotest_lib.server import frontend
 from autotest_lib.server import utils
 
 
 LOG_NAME_TEMPLATE = 'abort_suite-%s.log'
 SUITE_JOB_NAME_TEMPLATE = '%s-test_suites/control.%s'
-_timer = stats.Timer('abort_suites')
+_timer = autotest_stats.Timer('abort_suites')
 
 
 def parse_args():
diff --git a/site_utils/collect_host_stats.py b/site_utils/collect_host_stats.py
index d88bc17..af5ebf7 100755
--- a/site_utils/collect_host_stats.py
+++ b/site_utils/collect_host_stats.py
@@ -17,7 +17,7 @@
 import common
 from autotest_lib.client.common_lib import mail
 from autotest_lib.client.common_lib import time_utils
-from autotest_lib.client.common_lib.cros.graphite import stats
+from autotest_lib.client.common_lib.cros.graphite import autotest_stats
 from autotest_lib.site_utils import host_history
 from autotest_lib.site_utils import host_history_utils
 from autotest_lib.site_utils import host_label_utils
@@ -79,12 +79,14 @@
         print 'Machine utilization rate  = %-4.2f%%' % (100*mur)
         print 'Machine availability rate = %-4.2f%%' % (100*mar)
 
-    stats.Gauge('machine_utilization_rate').send('%s_hours.%s.%s' %
-                                                 (span, board, pool), mur)
-    stats.Gauge('machine_availability_rate').send('%s_hours.%s.%s' %
-                                                  (span, board, pool), mar)
-    stats.Gauge('machine_idle_rate').send('%s_hours.%s.%s' %
-                                          (span, board, pool), mir)
+    autotest_stats.Gauge('machine_utilization_rate').send('%s_hours.%s.%s' %
+                                                          (span, board, pool),
+                                                          mur)
+    autotest_stats.Gauge('machine_availability_rate').send('%s_hours.%s.%s' %
+                                                           (span, board, pool),
+                                                           mar)
+    autotest_stats.Gauge('machine_idle_rate').send('%s_hours.%s.%s' %
+                                                   (span, board, pool), mir)
 
 
 def main():
diff --git a/site_utils/collect_suite_time_stats.py b/site_utils/collect_suite_time_stats.py
index 9d75f2c..e786d9d 100755
--- a/site_utils/collect_suite_time_stats.py
+++ b/site_utils/collect_suite_time_stats.py
@@ -66,7 +66,7 @@
 from autotest_lib.client.common_lib import host_queue_entry_states
 from autotest_lib.client.common_lib import time_utils
 from autotest_lib.client.common_lib.cros.graphite import autotest_es
-from autotest_lib.client.common_lib.cros.graphite import stats
+from autotest_lib.client.common_lib.cros.graphite import autotest_stats
 from autotest_lib.frontend import setup_django_environment
 from autotest_lib.frontend.afe import models
 from autotest_lib.frontend.tko import models as tko_models
@@ -400,9 +400,9 @@
                 key = utils.get_data_key(
                         'suite_time_stats', suite_name, hit['build'],
                         hit['board'])
-                stats.Timer(key).send('suite_runtime', suite_runtime)
+                autotest_stats.Timer(key).send('suite_runtime', suite_runtime)
                 for stat, val in suite_stats.iteritems():
-                    stats.Timer(key).send(stat, val)
+                    autotest_stats.Timer(key).send(stat, val)
         except Exception as e:
             print('ERROR: Exception is raised while processing suite %s' % (
                     suite_job_id))
diff --git a/site_utils/gs_offloader.py b/site_utils/gs_offloader.py
index d488c0e..4debf09 100755
--- a/site_utils/gs_offloader.py
+++ b/site_utils/gs_offloader.py
@@ -35,7 +35,7 @@
 
 import job_directories
 from autotest_lib.client.common_lib import global_config
-from autotest_lib.client.common_lib.cros.graphite import stats
+from autotest_lib.client.common_lib.cros.graphite import autotest_stats
 from autotest_lib.scheduler import email_manager
 from chromite.lib import parallel
 
@@ -45,7 +45,7 @@
 
 STATS_KEY = 'gs_offloader.%s' % socket.gethostname()
 
-timer = stats.Timer(STATS_KEY)
+timer = autotest_stats.Timer(STATS_KEY)
 
 # Nice setting for process, the higher the number the lower the priority.
 NICENESS = 10
@@ -171,7 +171,7 @@
 
     """
     try:
-      counter = stats.Counter(STATS_KEY)
+      counter = autotest_stats.Counter(STATS_KEY)
       counter.increment('jobs_offload_started')
 
       error = False
@@ -189,8 +189,8 @@
         kibibytes_transferred = get_directory_size_kibibytes(dir_entry)
 
         counter.increment('kibibytes_transferred_total', kibibytes_transferred)
-        stats.Gauge(STATS_KEY).send(
-            'kibibytes_transferred', kibibytes_transferred)
+        autotest_stats.Gauge(STATS_KEY).send(
+                'kibibytes_transferred', kibibytes_transferred)
         counter.increment('jobs_offloaded')
         shutil.rmtree(dir_entry)
       else:
diff --git a/site_utils/run_suite.py b/site_utils/run_suite.py
index b275720..302bde9 100755
--- a/site_utils/run_suite.py
+++ b/site_utils/run_suite.py
@@ -48,7 +48,7 @@
 from autotest_lib.client.common_lib import global_config, enum
 from autotest_lib.client.common_lib import priorities
 from autotest_lib.client.common_lib import time_utils
-from autotest_lib.client.common_lib.cros.graphite import stats
+from autotest_lib.client.common_lib.cros.graphite import autotest_stats
 from autotest_lib.client.common_lib.cros import retry
 from autotest_lib.frontend.afe.json_rpc import proxy
 from autotest_lib.server import utils
@@ -458,25 +458,25 @@
         # value the member artifact_end_time is set to None.
         if self.download_start_time:
             if self.payload_end_time:
-                stats.Timer(data_key).send('payload_download_time',
-                    (self.payload_end_time -
-                     self.download_start_time).total_seconds())
+                autotest_stats.Timer(data_key).send('payload_download_time',
+                        (self.payload_end_time -
+                         self.download_start_time).total_seconds())
 
             if self.artifact_end_time:
-                stats.Timer(data_key).send('artifact_download_time',
-                    (self.artifact_end_time -
-                     self.download_start_time).total_seconds())
+                autotest_stats.Timer(data_key).send('artifact_download_time',
+                        (self.artifact_end_time -
+                         self.download_start_time).total_seconds())
 
         if self.tests_end_time:
             if self.suite_start_time:
-                stats.Timer(data_key).send('suite_run_time',
-                    (self.tests_end_time -
-                     self.suite_start_time).total_seconds())
+                autotest_stats.Timer(data_key).send('suite_run_time',
+                        (self.tests_end_time -
+                         self.suite_start_time).total_seconds())
 
             if self.tests_start_time:
-                stats.Timer(data_key).send('tests_run_time',
-                    (self.tests_end_time -
-                     self.tests_start_time).total_seconds())
+                autotest_stats.Timer(data_key).send('tests_run_time',
+                        (self.tests_end_time -
+                         self.tests_start_time).total_seconds())
 
 
 _DEFAULT_AUTOTEST_INSTANCE = CONFIG.get_config_value(
@@ -1381,7 +1381,8 @@
 
     logging.info('Will return from run_suite with status: %s',
                   RETURN_CODES.get_string(code))
-    stats.Counter('run_suite.%s' % RETURN_CODES.get_string(code)).increment()
+    autotest_stats.Counter('run_suite.%s' %
+                           RETURN_CODES.get_string(code)).increment()
     return code
 
 
diff --git a/site_utils/stats/apacheinfo.py b/site_utils/stats/apacheinfo.py
index 1a430fe..0b0d6bd 100644
--- a/site_utils/stats/apacheinfo.py
+++ b/site_utils/stats/apacheinfo.py
@@ -8,7 +8,7 @@
 
 import common
 import requests
-from autotest_lib.client.common_lib.cros.graphite import stats
+from autotest_lib.client.common_lib.cros.graphite import autotest_stats
 from autotest_lib.site_utils.stats import registry
 
 
@@ -35,5 +35,5 @@
     m = re.search("(\d+) requests/sec", page)
     if m:
         val = int(m.groups(0)[0])
-        stat = stats.Gauge(server, bare=True)
+        stat = autotest_stats.Gauge(server, bare=True)
         stat.send('requests_per_sec', val)
diff --git a/site_utils/stats/devserverinfo.py b/site_utils/stats/devserverinfo.py
index 4b2338f..e57389c 100644
--- a/site_utils/stats/devserverinfo.py
+++ b/site_utils/stats/devserverinfo.py
@@ -4,7 +4,7 @@
 
 
 import common
-from autotest_lib.client.common_lib.cros.graphite import stats
+from autotest_lib.client.common_lib.cros.graphite import autotest_stats
 from autotest_lib.site_utils.lib import infra
 from autotest_lib.site_utils.stats import registry
 
@@ -17,7 +17,7 @@
     @param server: The AFE server.
     """
     out = infra.execute_command(server, 'ps -C devserver.py| wc -l')
-    stat = stats.Gauge(server, bare=True)
+    stat = autotest_stats.Gauge(server, bare=True)
     # ps prints out a header for the columns also, so we subtract one to report
     # about only the data.
     stat.send('num_devserver_processes', int(out.strip())-1)
diff --git a/site_utils/stats/hostinfo.py b/site_utils/stats/hostinfo.py
index f9711d6..b1edb13 100644
--- a/site_utils/stats/hostinfo.py
+++ b/site_utils/stats/hostinfo.py
@@ -4,7 +4,7 @@
 
 
 import common
-from autotest_lib.client.common_lib.cros.graphite import stats
+from autotest_lib.client.common_lib.cros.graphite import autotest_stats
 from autotest_lib.site_utils.lib import infra
 from autotest_lib.site_utils.stats import registry
 
@@ -17,7 +17,7 @@
     @param server: The AFE server.
     """
     out = infra.execute_command(server, 'ss -o state time-wait | wc -l')
-    stat = stats.Gauge(server, bare=True)
+    stat = autotest_stats.Gauge(server, bare=True)
     # ss prints out a header for the columns also, so we subtract one to report
     # about only the data.
     stat.send('time_wait_sockets', int(out.strip())-1)