Adding the tools directory to the git export.
diff --git a/tools/README.md b/tools/README.md
new file mode 100644
index 0000000..7cee610
--- /dev/null
+++ b/tools/README.md
@@ -0,0 +1,4 @@
+buildgen: contains the template renderer for our build system.
+dockerfile: contains all of the docker files to test gRPC.
+gce_setup: contains boilerplate for running the docker files under GCE.
+run_tests: contains python scripts to properly run the tests in parallel.
diff --git a/tools/buildgen/bunch.py b/tools/buildgen/bunch.py
new file mode 100755
index 0000000..e859d53
--- /dev/null
+++ b/tools/buildgen/bunch.py
@@ -0,0 +1,37 @@
+"""Allows dot-accessible dictionaries."""
+
+
+class Bunch(dict):
+
+  def __init__(self, d):
+    dict.__init__(self, d)
+    self.__dict__.update(d)
+
+
+# Converts any kind of variable to a Bunch
+def to_bunch(var):
+  if isinstance(var, list):
+    return [to_bunch(i) for i in var]
+  if isinstance(var, dict):
+    ret = {}
+    for k, v in var.items():
+      if isinstance(v, (list, dict)):
+        v = to_bunch(v)
+      ret[k] = v
+    return Bunch(ret)
+  else:
+    return var
+
+
+# Merges JSON 'add' into JSON 'dst'
+def merge_json(dst, add):
+  if isinstance(dst, dict) and isinstance(add, dict):
+    for k, v in add.items():
+      if k in dst:
+        merge_json(dst[k], v)
+      else:
+        dst[k] = v
+  elif isinstance(dst, list) and isinstance(add, list):
+    dst.extend(add)
+  else:
+    raise Exception('Tried to merge incompatible objects %r, %r' % (dst, add))
diff --git a/tools/buildgen/generate_projects.sh b/tools/buildgen/generate_projects.sh
new file mode 100755
index 0000000..f4e367d
--- /dev/null
+++ b/tools/buildgen/generate_projects.sh
@@ -0,0 +1,48 @@
+#!/bin/bash
+
+set -ex
+
+if [ "x$TEST" == "x" ] ; then
+  TEST=false
+fi
+
+
+cd `dirname $0`/..
+mako_renderer=tools/buildgen/mako_renderer.py
+gen_build_json=test/core/end2end/gen_build_json.py
+
+end2end_test_build=`mktemp`
+$gen_build_json > $end2end_test_build
+
+global_plugins=`find ./tools/buildgen/plugins -name '*.py' |
+  sort | grep -v __init__ |
+  while read p ; do echo -n "-p $p " ; done`
+
+for dir in . ; do
+  local_plugins=`find $dir/templates -name '*.py' |
+    sort | grep -v __init__ |
+    while read p ; do echo -n "-p $p " ; done`
+
+  plugins="$global_plugins $local_plugins"
+
+  find -L $dir/templates -type f -and -name *.template | while read file ; do
+    out=${dir}/${file#$dir/templates/}  # strip templates dir prefix
+    out=${out%.*}  # strip template extension
+    json_files="build.json $end2end_test_build"
+    data=`for i in $json_files; do echo -n "-d $i "; done`
+    if [ $TEST == true ] ; then
+      actual_out=$out
+      out=`mktemp`
+    else
+      g4 open $out || true
+    fi
+    $mako_renderer $plugins $data -o $out $file
+    if [ $TEST == true ] ; then
+      diff -q $out $actual_out
+      rm $out
+    fi
+  done
+done
+
+rm $end2end_test_build
+
diff --git a/tools/buildgen/mako_renderer.py b/tools/buildgen/mako_renderer.py
new file mode 100755
index 0000000..29c7cf0
--- /dev/null
+++ b/tools/buildgen/mako_renderer.py
@@ -0,0 +1,105 @@
+#!/usr/bin/python
+
+"""Simple Mako renderer.
+
+Just a wrapper around the mako rendering library.
+
+"""
+
+import getopt
+import imp
+import os
+import sys
+
+
+from mako.lookup import TemplateLookup
+from mako.runtime import Context
+from mako.template import Template
+import simplejson
+import bunch
+
+
+# Imports a plugin
+def import_plugin(name):
+  _, base_ex = os.path.split(name)
+  base, _ = os.path.splitext(base_ex)
+
+  with open(name, 'r') as plugin_file:
+    plugin_code = plugin_file.read()
+  plugin_module = imp.new_module(base)
+  exec plugin_code in plugin_module.__dict__
+  return plugin_module
+
+
+def out(msg):
+  print >> sys.stderr, msg
+
+
+def showhelp():
+  out('mako-renderer.py [-o out] [-m cache] [-d dict] [-d dict...] template')
+
+
+def main(argv):
+  got_input = False
+  module_directory = None
+  dictionary = {}
+  json_dict = {}
+  got_output = False
+  output_file = sys.stdout
+  plugins = []
+
+  try:
+    opts, args = getopt.getopt(argv, 'hm:d:o:p:')
+  except getopt.GetoptError:
+    out('Unknown option')
+    showhelp()
+    sys.exit(2)
+
+  for opt, arg in opts:
+    if opt == '-h':
+      out('Displaying showhelp')
+      showhelp()
+      sys.exit()
+    elif opt == '-o':
+      if got_output:
+        out('Got more than one output')
+        showhelp()
+        sys.exit(3)
+      got_output = True
+      output_file = open(arg, 'w')
+    elif opt == '-m':
+      if module_directory is not None:
+        out('Got more than one cache directory')
+        showhelp()
+        sys.exit(4)
+      module_directory = arg
+    elif opt == '-d':
+      dict_file = open(arg, 'r')
+      bunch.merge_json(json_dict, simplejson.loads(dict_file.read()))
+      dict_file.close()
+    elif opt == '-p':
+      plugins.append(import_plugin(arg))
+
+  for plugin in plugins:
+    plugin.mako_plugin(json_dict)
+
+  for k, v in json_dict.items():
+    dictionary[k] = bunch.to_bunch(v)
+
+  ctx = Context(output_file, **dictionary)
+
+  for arg in args:
+    got_input = True
+    template = Template(filename=arg,
+                        module_directory=module_directory,
+                        lookup=TemplateLookup(directories=['.']))
+    template.render_context(ctx)
+
+  if not got_input:
+    out('Got nothing to do')
+    showhelp()
+
+  output_file.close()
+
+if __name__ == '__main__':
+  main(sys.argv[1:])
diff --git a/tools/buildgen/plugins/expand_filegroups.py b/tools/buildgen/plugins/expand_filegroups.py
new file mode 100755
index 0000000..108debe
--- /dev/null
+++ b/tools/buildgen/plugins/expand_filegroups.py
@@ -0,0 +1,45 @@
+"""Buildgen expand filegroups plugin.
+
+This takes the list of libs from our json dictionary,
+and expands any and all filegroup.
+
+"""
+
+
+def excluded(filename, exclude_res):
+  for r in exclude_res:
+    if r.search(filename):
+      return True
+  return False
+
+
+def mako_plugin(dictionary):
+  """The exported plugin code for expand_filegroups.
+
+  The list of libs in the build.json file can contain "filegroups" tags.
+  These refer to the filegroups in the root object. We will expand and
+  merge filegroups on the src, headers and public_headers properties.
+
+  """
+  libs = dictionary.get('libs')
+  filegroups_list = dictionary.get('filegroups')
+  filegroups = {}
+
+  for fg in filegroups_list:
+    filegroups[fg['name']] = fg
+
+  for lib in libs:
+    for fg_name in lib.get('filegroups', []):
+      fg = filegroups[fg_name]
+
+      src = lib.get('src', [])
+      src.extend(fg.get('src', []))
+      lib['src'] = src
+
+      headers = lib.get('headers', [])
+      headers.extend(fg.get('headers', []))
+      lib['headers'] = headers
+
+      public_headers = lib.get('public_headers', [])
+      public_headers.extend(fg.get('public_headers', []))
+      lib['public_headers'] = public_headers
diff --git a/tools/buildgen/plugins/list_protos.py b/tools/buildgen/plugins/list_protos.py
new file mode 100755
index 0000000..c5a09dd
--- /dev/null
+++ b/tools/buildgen/plugins/list_protos.py
@@ -0,0 +1,41 @@
+"""Buildgen .proto files list plugin.
+
+This parses the list of targets from the json build file, and creates
+a list called "protos" that contains all of the proto file names.
+
+"""
+
+
+import re
+
+
+def mako_plugin(dictionary):
+  """The exported plugin code for list_protos.
+
+  Some projects generators may want to get the full list of unique .proto files
+  that are being included in a project. This code extracts all files referenced
+  in any library or target that ends in .proto, and builds and exports that as
+  a list called "protos".
+
+  """
+
+  libs = dictionary.get('libs', [])
+  targets = dictionary.get('targets', [])
+
+  proto_re = re.compile('(.*)\\.proto')
+
+  protos = set()
+  for lib in libs:
+    for src in lib.get('src', []):
+      m = proto_re.match(src)
+      if m:
+        protos.add(m.group(1))
+  for tgt in targets:
+    for src in tgt.get('src', []):
+      m = proto_re.match(src)
+      if m:
+        protos.add(m.group(1))
+
+  protos = sorted(protos)
+
+  dictionary['protos'] = protos
diff --git a/tools/dockerfile/grpc_base/Dockerfile b/tools/dockerfile/grpc_base/Dockerfile
new file mode 100644
index 0000000..76e585a
--- /dev/null
+++ b/tools/dockerfile/grpc_base/Dockerfile
@@ -0,0 +1,57 @@
+# Base Dockerfile for gRPC dev images
+FROM debian:latest
+
+# Install Git.
+RUN apt-get update && apt-get install -y \
+  autoconf \
+  autotools-dev \
+  build-essential \
+  bzip2 \
+  curl \
+  gcc \
+  git \
+  libc6 \
+  libc6-dbg \
+  libc6-dev \
+  libevent-dev \
+  libtool \
+  make \
+  strace \
+  python-dev \
+  python-setuptools \
+  telnet \
+  unzip \
+  wget \
+  zip && apt-get clean
+
+# Install useful useful python modules
+RUN easy_install -U pip
+RUN pip install -U crcmod  # makes downloads from cloud storage faster
+
+# Install GCloud
+RUN wget https://dl.google.com/dl/cloudsdk/release/google-cloud-sdk.zip \
+  && unzip google-cloud-sdk.zip && rm google-cloud-sdk.zip
+ENV CLOUD_SDK /google-cloud-sdk
+RUN $CLOUD_SDK/install.sh --usage-reporting=true --path-update=true --bash-completion=true --rc-path=/.bashrc --disable-installation-options
+ENV PATH $CLOUD_SDK/bin:$PATH
+
+# Install gcompute-tools to allow access to private git-on-borg repos
+RUN git clone https://gerrit.googlesource.com/gcompute-tools /var/local/git/gcompute-tools
+
+# Start the daemon that allows access to private git-on-borg repos
+RUN /var/local/git/gcompute-tools/git-cookie-authdaemon
+
+# Install the grpc-tools scripts dir from git
+RUN git clone https://team.googlesource.com/one-platform-grpc-team/grpc-tools /var/local/git/grpc-tools
+
+# Install the grpc-protobuf dir that has the protoc patch
+RUN git clone https://team.googlesource.com/one-platform-grpc-team/protobuf /var/local/git/protobuf
+
+# Install the patched version of protoc
+RUN cd /var/local/git/protobuf && \
+  ./autogen.sh && \
+  ./configure --prefix=/usr && \
+  make && make check && make install && make clean
+
+# Define the default command.
+CMD ["bash"]
diff --git a/tools/dockerfile/grpc_base/README.md b/tools/dockerfile/grpc_base/README.md
new file mode 100644
index 0000000..4745141
--- /dev/null
+++ b/tools/dockerfile/grpc_base/README.md
@@ -0,0 +1,11 @@
+Base GRPC Dockerfile
+====================
+
+Dockerfile for creating the base gRPC development Docker instance.
+For now, this assumes that the development will be done on GCE instances, with source code on Git-on-Borg.
+
+As of 2014/09/29, it includes
+- git
+- some useful tools like curl, emacs, strace, telnet etc
+- downloads the gerrit-compute-tools and installs the script that allows access to gerrit when on git-on-borg
+- a patched version of protoc, to allow protos with stream tags to work
diff --git a/tools/dockerfile/grpc_cxx/Dockerfile b/tools/dockerfile/grpc_cxx/Dockerfile
new file mode 100644
index 0000000..cf38e97
--- /dev/null
+++ b/tools/dockerfile/grpc_cxx/Dockerfile
@@ -0,0 +1,15 @@
+# Dockerfile for gRPC C++
+FROM grpc/base
+
+# Start the daemon that allows access to the protected git-on-borg repos
+RUN /var/local/git/gcompute-tools/git-cookie-authdaemon
+
+RUN git clone https://team.googlesource.com/one-platform-grpc-team/grpc /var/local/git/grpc
+RUN cd /var/local/git/grpc \
+  && git pull --recurse-submodules \
+  && git submodule update --init --recursive
+
+RUN make install -C /var/local/git/grpc
+
+# Define the default command.
+CMD ["bash"]
\ No newline at end of file
diff --git a/tools/dockerfile/grpc_java/Dockerfile b/tools/dockerfile/grpc_java/Dockerfile
new file mode 100644
index 0000000..f7156c7
--- /dev/null
+++ b/tools/dockerfile/grpc_java/Dockerfile
@@ -0,0 +1,16 @@
+# Dockerfile for the gRPC Java dev image
+FROM grpc/java_base
+
+# Start the daemon that allows access to private git-on-borg repos
+RUN /var/local/git/gcompute-tools/git-cookie-authdaemon
+
+RUN  cd /var/local/git/grpc-java/lib/okhttp && \
+  mvn -pl okhttp -am install
+RUN  cd /var/local/git/grpc-java/lib/netty && \
+  mvn -pl codec-http2 -am -DskipTests install
+RUN cd /var/local/git/grpc-java && \
+  protoc --version>ver.txt && \
+  mvn install
+
+# Specify the default command such that the interop server runs on its known testing port
+CMD ["/var/local/git/grpc-java/run-test-server.sh", "--transport=NETTY_TLS", "--grpc_version=2", "--port=8030"]
diff --git a/tools/dockerfile/grpc_java/README.md b/tools/dockerfile/grpc_java/README.md
new file mode 100644
index 0000000..2da2393
--- /dev/null
+++ b/tools/dockerfile/grpc_java/README.md
@@ -0,0 +1,9 @@
+GRPC Java Dockerfile
+====================
+
+Dockerfile for creating the Java development image
+
+As of 2014/12 this
+ - is based on the gRPC Java base
+ - pulls from gRPC Java on git-on-borg
+ - installs it and runs the tests
\ No newline at end of file
diff --git a/tools/dockerfile/grpc_java_base/Dockerfile b/tools/dockerfile/grpc_java_base/Dockerfile
new file mode 100644
index 0000000..fe8edeb
--- /dev/null
+++ b/tools/dockerfile/grpc_java_base/Dockerfile
@@ -0,0 +1,25 @@
+# Base Dockerfile for the gRPC Java dev image
+FROM grpc/base
+
+RUN apt-get update && apt-get -y install java7-jdk
+
+# Install maven
+RUN wget http://mirror.olnevhost.net/pub/apache/maven/binaries/apache-maven-3.2.1-bin.tar.gz && \
+  tar xvf apache-maven-3.2.1-bin.tar.gz -C /var/local
+
+ENV JAVA_HOME /usr/lib/jvm/java-7-openjdk-amd64
+ENV M2_HOME /var/local/apache-maven-3.2.1
+ENV PATH $PATH:$JAVA_HOME/bin:$M2_HOME/bin
+ENV LD_LIBRARY_PATH /usr/local/lib
+
+# Start the daemon that allows access to the protected git-on-borg repos
+RUN /var/local/git/gcompute-tools/git-cookie-authdaemon
+
+RUN git clone --recursive https://team.googlesource.com/one-platform-grpc-team/grpc-java /var/local/git/grpc-java
+
+RUN cd /var/local/git/grpc-java/lib/okhttp && \
+  mvn -pl okhttp -am validate
+RUN cd /var/local/git/grpc-java/lib/netty && \
+  mvn -pl codec-http2 -am validate
+RUN cd /var/local/git/grpc-java && \
+  mvn validate
\ No newline at end of file
diff --git a/tools/dockerfile/grpc_java_base/README.md b/tools/dockerfile/grpc_java_base/README.md
new file mode 100644
index 0000000..446287f
--- /dev/null
+++ b/tools/dockerfile/grpc_java_base/README.md
@@ -0,0 +1,9 @@
+GRPC Java Base Dockerfile
+=========================
+
+Dockerfile for creating the Java gRPC development Docker instance.
+
+As of 2014/12 this
+ - installs tools and dependencies needed to build gRPC Java
+ - does not install gRPC Java itself; a separate Dockerfile that depends on
+   this one will do that.
\ No newline at end of file
diff --git a/tools/dockerfile/grpc_php/Dockerfile b/tools/dockerfile/grpc_php/Dockerfile
new file mode 100644
index 0000000..0e50af7
--- /dev/null
+++ b/tools/dockerfile/grpc_php/Dockerfile
@@ -0,0 +1,18 @@
+# Dockerfile for gRPC PHP
+FROM grpc/php_base
+
+# Start the daemon that allows access to the protected git-on-borg repos
+RUN /var/local/git/gcompute-tools/git-cookie-authdaemon
+
+RUN cd /var/local/git/grpc \
+  && git pull --recurse-submodules \
+  && git submodule update --init --recursive
+
+RUN make install_c -j12 -C /var/local/git/grpc
+
+RUN cd /var/local/git/grpc/src/php/ext/grpc && git pull && phpize
+
+# Build the grpc PHP extension
+RUN cd /var/local/git/grpc/src/php/ext/grpc \
+  && ./configure \
+  && make
\ No newline at end of file
diff --git a/tools/dockerfile/grpc_php/README.md b/tools/dockerfile/grpc_php/README.md
new file mode 100644
index 0000000..a37389f
--- /dev/null
+++ b/tools/dockerfile/grpc_php/README.md
@@ -0,0 +1,10 @@
+GRPC PHP Dockerfile
+===================
+
+Dockerfile for creating the PHP development instances
+
+As of 2014/10 this
+- is based on the GRPC PHP base
+- adds a pull of the HEAD GRPC PHP source from git-on-borg
+- it builds it
+- runs the tests, i.e, the image won't be created if the tests don't pass
diff --git a/tools/dockerfile/grpc_php_base/Dockerfile b/tools/dockerfile/grpc_php_base/Dockerfile
new file mode 100644
index 0000000..8ec90f4
--- /dev/null
+++ b/tools/dockerfile/grpc_php_base/Dockerfile
@@ -0,0 +1,85 @@
+# Base Dockerfile for gRPC PHP.
+#
+# Includes PHP installation dependencies, things that are unlikely to vary.
+FROM grpc/base
+
+# Install RVM dependencies and other packages
+RUN apt-get update && apt-get install -y \
+    autoconf \
+    automake \
+    bison \
+    curl \
+    g++ \
+    gawk \
+    gcc \
+    groff \
+    libc6-dev \
+    libffi-dev \
+    libgdbm-dev \
+    libncurses5-dev \
+    libreadline6-dev \
+    libsqlite3-dev \
+    libssl-dev \
+    libtool \
+    libyaml-dev \
+    make \
+    patch \
+    procps \
+# TODO(mlumish): Uncomment these lines when building against them works
+#    php5-common \
+#    php5-cli \
+#    php5-dev \
+#    php-pear \
+    pkg-config \
+    procps \
+    sqlite3 \
+    zlib1g-dev
+
+# Install the version of PHP gRPC is tested against
+ENV DEBIAN_FRONTEND noniteractive
+RUN apt-get update && apt-get install -y libxml2 libxml2-dev  # used by PHP
+RUN cd /var/local \
+  && curl -o php-5.5.17.tar.gz http://php.net/distributions/php-5.5.17.tar.gz \
+  && tar -xf php-5.5.17.tar.gz \
+  && cd php-5.5.17 \
+  && ./configure --with-zlib=/usr --with-libxml-dir=ext/libxml \
+  && make && make install
+
+# Start the daemon that allows access to the protected git-on-borg repos
+RUN /var/local/git/gcompute-tools/git-cookie-authdaemon
+
+# Download the patched PHP protobuf so that PHP gRPC clients can be generated
+# from proto3 schemas.
+RUN git clone https://team.googlesource.com/one-platform-grpc-team/grpc-php-protobuf-php /var/local/git/protobuf-php
+
+# Install ruby (via RVM) as ruby tools are dependencies for building Protobuf
+# PHP extensions.
+RUN gpg --keyserver hkp://keys.gnupg.net --recv-keys D39DC0E3  # Needed for RVM
+RUN \curl -sSL https://get.rvm.io | bash -s stable --ruby
+ENV PATH /usr/local/rvm/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
+
+# ronn: a ruby tool used to convert markdown to man pages, used during the
+# install of Protobuf extensions
+#
+# rake: a ruby version of make used to build the PHP Protobuf extension
+RUN rvm all do gem install ronn rake
+
+# Install the patched PHP protobuf so that PHP gRPC clients can be generated
+# from proto3 schemas.
+RUN cd /var/local/git/protobuf-php \
+  && rvm all do rake pear:package version=1.0 \
+  && pear install Protobuf-1.0.tgz
+
+# Install PHPUnit, used to run the PHP unit tests
+RUN wget https://phar.phpunit.de/phpunit.phar \
+  && chmod +x phpunit.phar \
+  && mv phpunit.phar /usr/local/bin/phpunit
+
+RUN git clone https://team.googlesource.com/one-platform-grpc-team/grpc /var/local/git/grpc
+RUN cd /var/local/git/grpc \
+  && git submodule update --init --recursive
+
+RUN make static_c shared_c -j12 -C /var/local/git/grpc
+
+# Define the default command.
+CMD ["bash"]
diff --git a/tools/dockerfile/grpc_php_base/README.md b/tools/dockerfile/grpc_php_base/README.md
new file mode 100644
index 0000000..d93778e
--- /dev/null
+++ b/tools/dockerfile/grpc_php_base/README.md
@@ -0,0 +1,9 @@
+GRPC PHP Base Dockerfile
+========================
+
+Dockerfile for creating the PHP gRPC development Docker instance.
+
+As of 2014/10 this
+- it installs tools and dependencies needed to build gRPC PHP
+- it does not install gRPC PHP itself; a separate Dockerfile that depends on
+  this one will do that
diff --git a/tools/dockerfile/grpc_ruby/Dockerfile b/tools/dockerfile/grpc_ruby/Dockerfile
new file mode 100644
index 0000000..2092571
--- /dev/null
+++ b/tools/dockerfile/grpc_ruby/Dockerfile
@@ -0,0 +1,21 @@
+# Dockerfile for gRPC Ruby
+FROM grpc/ruby_base
+
+RUN cd /var/local/git/grpc \
+  && git pull --recurse-submodules \
+  && git submodule update --init --recursive
+
+RUN make install_c -C /var/local/git/grpc
+
+# Install the grpc gem locally with its dependencies and build the extension.
+RUN /bin/bash -l -c 'cd /var/local/git/beefcake && bundle && gem build beefcake.gemspec && gem install beefcake'
+RUN /bin/bash -l -c 'cd /var/local/git/grpc/src/ruby && bundle && rake compile:grpc && gem build grpc.gemspec && gem install grpc'
+
+# TODO add a command to run the unittest tests when the bug below is fixed
+# - the tests fail due to an error in the C threading library:
+#   they fail with 'ruby: __pthread_mutex_cond_lock_adjust for unknown reasons' at the end of a testcase
+# - however, the interop server and client run OK, so this bug can be investigated
+# RUN /bin/bash -l -c 'cd /var/local/git/grpc/src/ruby && bundle && rake'
+
+# Specify the default command such that the interop server runs on its known testing port
+CMD ["/bin/bash", "-l", "-c", "ruby /var/local/git/grpc/src/ruby/bin/interop/interop_server.rb --port 8060"]
diff --git a/tools/dockerfile/grpc_ruby/README.md b/tools/dockerfile/grpc_ruby/README.md
new file mode 100644
index 0000000..51fb2f5
--- /dev/null
+++ b/tools/dockerfile/grpc_ruby/README.md
@@ -0,0 +1,10 @@
+GRPC Ruby Dockerfile
+====================
+
+Dockerfile for creating the Ruby development instances
+
+As of 2014/10 this
+- is based on the GRPC Ruby base
+- adds a pull of the HEAD gRPC Ruby source from git-on-borg
+- it builds it
+- runs the tests, i.e, the image won't be created if the tests don't pass
diff --git a/tools/dockerfile/grpc_ruby_base/Dockerfile b/tools/dockerfile/grpc_ruby_base/Dockerfile
new file mode 100644
index 0000000..ad14e43
--- /dev/null
+++ b/tools/dockerfile/grpc_ruby_base/Dockerfile
@@ -0,0 +1,59 @@
+# Base Dockerfile for gRPC Ruby.
+#
+# Includes Ruby installation dependencies, things that are unlikely to vary.
+FROM grpc/base
+
+# Allows 'source' to work
+RUN rm /bin/sh && ln -s /bin/bash /bin/sh
+
+# Install RVM dependencies
+RUN apt-get update && apt-get install -y \
+    autoconf \
+    automake \
+    bison \
+    curl \
+    g++ \
+    gawk \
+    gcc \
+    libc6-dev \
+    libffi-dev \
+    libgdbm-dev \
+    libncurses5-dev \
+    libreadline6-dev \
+    libsqlite3-dev \
+    libssl-dev \
+    libtool \
+    libyaml-dev \
+    make \
+    patch \
+    pkg-config \
+    procps \
+    sqlite3 \
+    zlib1g-dev
+
+
+# Start the daemon that allows access to the protected git-on-borg repos
+RUN /var/local/git/gcompute-tools/git-cookie-authdaemon
+
+# Download the patched Ruby protobuf (beefcake) so that Ruby gRPC clients can
+# be generated from proto3 schemas.
+RUN git clone https://team.googlesource.com/one-platform-grpc-team/grpc-ruby-beefcake \
+              /var/local/git/beefcake
+
+# Install RVM, use this to install ruby
+RUN gpg --keyserver hkp://keys.gnupg.net --recv-keys D39DC0E3  # Needed for RVM
+RUN /bin/bash -l -c "curl -L get.rvm.io | bash -s stable"
+
+# Install Ruby 2.1
+RUN /bin/bash -l -c "rvm install ruby-2.1"
+RUN /bin/bash -l -c "rvm use --default ruby-2.1"
+RUN /bin/bash -l -c "echo 'gem: --no-ri --no-rdoc' > ~/.gemrc"
+RUN /bin/bash -l -c "echo 'source /home/grpc_ruby/.rvm/scripts/rvm' >> ~/.bashrc"
+RUN /bin/bash -l -c "echo 'rvm --default use ruby-2.1' >> ~/.bashrc"
+RUN /bin/bash -l -c "gem install bundler --no-ri --no-rdoc"
+
+RUN git clone https://team.googlesource.com/one-platform-grpc-team/grpc /var/local/git/grpc
+RUN cd /var/local/git/grpc \
+  && git submodule update --init --recursive
+
+RUN make static_c shared_c -C /var/local/git/grpc
\ No newline at end of file
diff --git a/tools/dockerfile/grpc_ruby_base/README.md b/tools/dockerfile/grpc_ruby_base/README.md
new file mode 100644
index 0000000..acf1e50
--- /dev/null
+++ b/tools/dockerfile/grpc_ruby_base/README.md
@@ -0,0 +1,9 @@
+GRPC RUBY Base Dockerfile
+========================
+
+Dockerfile for creating the Ruby gRPC development Docker instance.
+
+As of 2014/10 this
+- it installs tools and dependencies needed to build gRPC Ruby
+- it does not install gRPC Ruby itself; a separate Dockerfile that depends on
+  this one will do that
diff --git a/tools/gce_setup/README.md b/tools/gce_setup/README.md
new file mode 100644
index 0000000..253e94d
--- /dev/null
+++ b/tools/gce_setup/README.md
@@ -0,0 +1,48 @@
+GCE images for GRPC
+===================
+
+This directory contains a number of shell files used for setting up GCE images
+and instances for developing and testing gRPC.
+
+
+
+Goal
+----
+
+- provides a script to create a GCE image that has everything needed to try
+out gRPC on GCE.
+- provide another script that creates a new GCE instance from the latest image
+
+- additional scripts may be added in the future
+
+
+Usage
+------
+
+# Minimal usage (see the scripts themselves for options)
+
+$ create_grpc_dev_image.sh  # creates a grpc GCE image
+$ ...
+$ new_grpc_dev_instance.sh  # creates an instance using the latest grpc GCE image
+
+
+Requirements
+------------
+
+Install [Google Cloud SDK](https://developers.google.com/cloud/sdk/)
+
+Contents
+--------
+
+Library scripts that contain bash functions used in the other scripts:
+- shared_setup_funcs.sh  # funcs used in create_grpc_dev_image and new_grpc_dev_instance
+- gcutil_extras.sh  # wrappers for common tasks that us gcutil
+- build_grpc_dist.sh  # funcs building the GRPC library and tests into a debian dist
+
+GCE [startup scripts](https://developers.google.com/compute/docs/howtos/startupscript)
+- *_on_startup.sh
+
+Main scripts (as of 2014/09/04)
+- create_grpc_dev_instance.sh
+- new_grpc_dev_instance.sh
+
diff --git a/tools/gce_setup/compute_extras.sh b/tools/gce_setup/compute_extras.sh
new file mode 100755
index 0000000..e0def1a
--- /dev/null
+++ b/tools/gce_setup/compute_extras.sh
@@ -0,0 +1,255 @@
+#!/bin/bash
+
+# Bash funcs shared that combine common gcutil actions into single commands
+
+# remove_instance removes a named instance
+#
+# remove_instance <project> <instance_name> [<zone>="us-central1-b"]
+remove_instance() {
+  local project=$1
+  [[ -n $project ]] || {
+    echo "$FUNCNAME: missing arg: project" 1>&2
+    return 1
+  }
+  local an_instance=$2
+  [[ -n $an_instance ]] || {
+    echo "$FUNCNAME: missing arg: an_instance" 1>&2
+    return 1
+  }
+  local zone=$3
+  [[ -n $zone ]] || zone="us-central1-b"
+
+  gcloud --project $project --quiet \
+    compute instances delete $an_instance  --zone=$zone
+}
+
+# has_instance checks if a project contains a named instance
+#
+# has_instance <project> <instance_name>
+has_instance() {
+  local project=$1
+  [[ -n $project ]] || {
+    echo "$FUNCNAME: missing arg: project" 1>&2
+    return 1
+  }
+  local checked_instance=$2
+  [[ -n $checked_instance ]] || {
+    echo "$FUNCNAME: missing arg: checked_instance" 1>&2
+    return 1
+  }
+
+  instances=$(gcloud --project $project compute instances list \
+    | sed -e 's/ \+/ /g' | cut -d' ' -f 1)
+  for i in $instances
+  do
+    if [[ $i == $checked_instance ]]
+    then
+      return 0
+    fi
+  done
+
+  return 1
+}
+
+# find_network_ip finds the ip address of a instance if it is present in the project.
+#
+# find_network_ip <project> <instance_name>
+find_network_ip() {
+  local project=$1
+  [[ -n $project ]] || {
+    echo "$FUNCNAME: missing arg: project" 1>&2
+    return 1
+  }
+  local checked_instance=$2
+  [[ -n $checked_instance ]] || {
+    echo "$FUNCNAME: missing arg: checked_instance" 1>&2
+    return 1
+  }
+
+  has_instance $project $checked_instance || return 1
+  gcloud --project $project compute instances list \
+    | grep -e "$checked_instance\s" | sed -e 's/ \+/ /g' | cut -d' ' -f 4
+}
+
+# delete_disks deletes a bunch of disks matching a pattern
+#
+# delete_disks <project> <disk_pattern>
+delete_disks() {
+  local project=$1
+  [[ -n $project ]] || {
+    echo "$FUNCNAME: missing arg: project" 1>&2
+    return 1
+  }
+  local disk_pattern=$2
+  [[ -n $disk_pattern ]] || {
+    echo "$FUNCNAME: missing arg: disk_pattern" 1>&2
+    return 1
+  }
+
+  trash_disks=$(gcloud --project=$project compute disks list \
+    | sed -e 's/ \+/ /g' | cut -d' ' -f 1 | grep $disk_pattern)
+  [[ -n $trash_disks ]] && gcloud --project $project \
+    --quiet compute disks delete $trash_disks
+}
+
+# has_firewall checks if a project contains a named firewall
+#
+# has_firewall <project> <checked_firewall>
+has_firewall() {
+  local project=$1
+  [[ -n $project ]] || {
+    echo "$FUNCNAME: missing arg: project" 1>&2
+    return 1
+  }
+  local checked_firewall=$2
+  [[ -n $checked_firewall ]] || {
+    echo "$FUNCNAME: missing arg: checked_firewall" 1>&2
+    return 1
+  }
+
+  instances=$(gcloud --project $project compute firewall-rules list \
+    | sed -e 's/ \+/ /g' | cut -d' ' -f 1)
+  for i in $instances
+  do
+    if [[ $i == $checked_firewall ]]
+    then
+      return 0
+    fi
+  done
+
+  return 1
+}
+
+# remove_firewall removes a named firewall from a project.
+#
+# remove_firewall <project> <checked_firewall>
+remove_firewall() {
+  local project=$1
+  [[ -n $project ]] || {
+    echo "$FUNCNAME: missing arg: project" 1>&2
+    return 1
+  }
+  local a_firewall=$2
+  [[ -n $a_firewall ]] || {
+    echo "$FUNCNAME: missing arg: a_firewall" 1>&2
+    return 1
+  }
+
+  gcloud --project $project --quiet compute firewall-rules delete $a_firewall
+}
+
+# has_network checks if a project contains a named network
+#
+# has_network <project> <checked_network>
+has_network() {
+  local project=$1
+  [[ -n $project ]] || {
+    echo "$FUNCNAME: missing arg: project" 1>&2
+    return 1
+  }
+  local checked_network=$2
+  [[ -n $checked_network ]] || {
+    echo "$FUNCNAME: missing arg: checked_network" 1>&2
+    return 1
+  }
+
+  instances=$(gcloud --project $project compute networks list \
+    | sed -e 's/ \+/ /g' | cut -d' ' -f 1)
+  for i in $instances
+  do
+    if [[ $i == $checked_network ]]
+    then
+      return 0
+    fi
+  done
+
+  return 1
+}
+
+# maybe_setup_dev_network adds a network with the given name with firewalls
+# useful to development
+#
+# - All machines can accessed internally and externally over SSH (port 22)
+# - All machines can access one another other the internal network
+# - All machines can be accessed externally via port 80, 443, 8080 and 8443
+maybe_setup_dev_network() {
+  local name=$1
+  [[ -n $name ]] || {
+    echo "$FUNCNAME: missing arg: network name" 1>&2
+    return 1
+  }
+
+  local project=$2
+  [[ -n $project ]] || {
+    echo "$FUNCNAME: missing arg: project" 1>&2
+    return 1
+  }
+
+  has_network $project $name || {
+    echo "creating network '$name'" 1>&2
+    gcloud compute --project $project networks create $name || return 1
+  }
+
+  # allow instances on the network to connect to each other internally
+  has_firewall $project "$name-ssh" || {
+    echo "adding firewall '$name-ssh'" 1>&2
+    gcloud compute --project $project firewall-rules create "$name-ssh" \
+      --network $name  \
+      --allow tcp:22 || return 1;
+  }
+
+ # allow instances on the network to connect to each other internally
+  has_firewall $project "$name-internal" || {
+    echo "adding firewall '$name-internal'" 1>&2
+    gcloud compute --project $project firewall-rules create "$name-internal" \
+      --network $name  \
+      --source-ranges 10.0.0.0/16 --allow tcp udp icmp || return 1;
+  }
+
+  # allow instances on the network to be connected to from external ips on
+  # specific ports
+  has_firewall $project "$name-external" || {
+    echo "adding firewall '$name-external'" 1>&2
+    gcloud compute --project $project firewall-rules create "$name-external" \
+      --network $name  \
+      --allow tcp:80 tcp:8080 tcp:443 tcp:8443 || return 1;
+  }
+}
+
+# maybe_remove_dev_network removes a network set up by maybe_setup_dev_network
+maybe_remove_dev_network() {
+  local name=$1
+  [[ -n $name ]] || {
+    echo "$FUNCNAME: missing arg: network name" 1>&2
+    return 1
+  }
+
+  local project=$2
+  [[ -n $project ]] || {
+    echo "$FUNCNAME: missing arg: project" 1>&2
+    return 1
+  }
+
+  has_network $project $name || {
+    echo "network $name is not present"
+    return 0
+  }
+  for i in $(gcloud compute firewall-rules list \
+    | grep "$name-" | cut -d' ' -f 1)
+  do
+    gcloud compute --quiet firewall-rules delete $i || return 1;
+  done
+  gcloud compute --quiet networks delete $name
+}
+
+# find_named_ip finds the external ip address for a given name.
+#
+# find_named_ip <named-ip-address>
+find_named_ip() {
+  local name=$1
+  [[ -n $name ]] || { echo "$FUNCNAME: missing arg: name" 1>&2; return 1; }
+  [[ $name == 'none' ]] && return 0;
+
+  gcloud compute addresses list | sed -e 's/ \+/ /g' \
+    | grep $name | cut -d' ' -f 3
+}
diff --git a/tools/gce_setup/grpc_docker.sh b/tools/gce_setup/grpc_docker.sh
new file mode 100755
index 0000000..094b97b
--- /dev/null
+++ b/tools/gce_setup/grpc_docker.sh
@@ -0,0 +1,503 @@
+#!/bin/bash
+#
+# Contains funcs that help maintain GRPC's Docker images.
+#
+# Most funcs rely on the special-purpose GCE instance to build the docker
+# instances and store them in a GCS-backed docker repository.
+#
+# The GCE instance
+# - should be based on the container-optimized GCE instance
+# [https://cloud.google.com/compute/docs/containers].
+# - should be running google/docker-registry image
+# [https://registry.hub.docker.com/u/google/docker-registry/], so that images
+# can be saved to GCS
+# - should have the GCE support scripts from this directory install on it.
+#
+# The expected workflow is
+# - start a grpc docker GCE instance
+#  * on startup, some of the docker images will be regenerated automatically
+# - used grpc_update_image to update images via that instance
+
+# Pushes a dockerfile dir to cloud storage.
+#
+# dockerfile is expected to the parent directory to a nunber of directoies each
+# of which specifies a Dockerfiles.
+#
+# grpc_push_dockerfiles path/to/docker_parent_dir gs://bucket/path/to/gcs/parent
+grpc_push_dockerfiles() {
+  local docker_dir=$1
+  [[ -n $docker_dir ]] || {
+    echo "$FUNCNAME: missing arg: docker_dir" 1>&2
+    return 1
+  }
+
+  local gs_root_uri=$2
+  [[ -n $gs_root_uri ]] || {
+    echo "$FUNCNAME: missing arg: gs_root_uri" 1>&2
+    return 1
+  }
+
+  find $docker_dir -name '*~' -o -name '#*#' -exec rm -fv {} \; || {
+    echo "$FUNCNAME: failed: cleanup of tmp files in $docker_dir" 1>&2
+    return 1
+  }
+  gsutil cp -R $docker_dir $gs_root_uri || {
+    echo "$FUNCNAME: failed: cp $docker_dir -> $gs_root_uri" 1>&2
+    return 1
+  }
+}
+
+# Adds the user to docker group on a GCE instance, and restarts the docker
+# daemon
+grpc_add_docker_user() {
+  local host=$1
+  [[ -n $host ]] || {
+    echo "$FUNCNAME: missing arg: host" 1>&2
+    return 1
+  }
+
+  local project=$2
+  local project_opt=''
+  [[ -n $project ]] && project_opt=" --project $project"
+
+  local zone=$3
+  local zone_opt=''
+  [[ -n $zone ]] && zone_opt=" --zone $zone"
+
+
+  local func_lib="/var/local/startup_scripts/shared_startup_funcs.sh"
+  local ssh_cmd="source $func_lib && grpc_docker_add_docker_group"
+  gcloud compute $project_opt ssh $zone_opt $host --command "$ssh_cmd"
+}
+
+# Updates a docker image specified in a local dockerfile via the docker
+# container GCE instance.
+#
+# the docker container GCE instance
+# - should have been setup using ./new_grpc_docker_instance
+# - so will have /var/local/startup_scripts/shared_startup_funcs.sh, a copy of
+#   ./shared_startup_funcs.sh
+#
+# grpc_update_image gs://bucket/path/to/dockerfile parent \.
+#   image_label path/to/docker_dir docker_gce_instance [project] [zone]
+grpc_update_image() {
+  local gs_root_uri=$1
+  [[ -n $gs_root_uri ]] || {
+    echo "$FUNCNAME: missing arg: gs_root_uri" 1>&2
+    return 1
+  }
+
+  local image_label=$2
+  [[ -n $image_label ]] || {
+    echo "$FUNCNAME: missing arg: host" 1>&2
+    return 1
+  }
+
+  local docker_dir=$3
+  [[ -n $docker_dir ]] || {
+    echo "$FUNCNAME: missing arg: docker_dir" 1>&2
+    return 1
+  }
+  [[ -d $docker_dir ]] || {
+    echo "could find directory $docker_dir" 1>&2
+    return 1
+  }
+  local docker_parent_dir=$(dirname $docker_dir)
+  local gce_docker_dir="/var/local/dockerfile/$(basename $docker_dir)"
+
+  local host=$4
+  [[ -n $host ]] || {
+    echo "$FUNCNAME: missing arg: host" 1>&2
+    return 1
+  }
+
+  local project=$5
+  local project_opt=''
+  [[ -n $project ]] && project_opt=" --project $project"
+
+  local zone=$6
+  local zone_opt=''
+  [[ -n $zone ]] && zone_opt=" --zone $zone"
+
+  local func_lib="/var/local/startup_scripts/shared_startup_funcs.sh"
+  local ssh_cmd="source $func_lib"
+  local ssh_cmd+=" && grpc_dockerfile_refresh $image_label $gce_docker_dir"
+
+  grpc_push_dockerfiles $docker_parent_dir $gs_root_uri || return 1
+  gcloud compute $project_opt ssh $zone_opt $host --command "$ssh_cmd"
+}
+
+# gce_has_instance checks if a project contains a named instance
+#
+# gce_has_instance <project> <instance_name>
+gce_has_instance() {
+  local project=$1
+  [[ -n $project ]] || { echo "$FUNCNAME: missing arg: project" 1>&2; return 1; }
+  local checked_instance=$2
+  [[ -n $checked_instance ]] || {
+    echo "$FUNCNAME: missing arg: checked_instance" 1>&2
+    return 1
+  }
+
+  instances=$(gcloud --project $project compute instances list \
+    | sed -e 's/ \+/ /g' | cut -d' ' -f 1)
+  for i in $instances
+  do
+    if [[ $i == $checked_instance ]]
+    then
+      return 0
+    fi
+  done
+
+  echo "instance '$checked_instance' not found in compute project $project" 1>&2
+  return 1
+}
+
+# gce_find_internal_ip finds the ip address of a instance if it is present in
+# the project.
+#
+# gce_find_internal_ip <project> <instance_name>
+gce_find_internal_ip() {
+  local project=$1
+  [[ -n $project ]] || { echo "$FUNCNAME: missing arg: project" 1>&2; return 1; }
+  local checked_instance=$2
+  [[ -n $checked_instance ]] || {
+    echo "$FUNCNAME: missing arg: checked_instance" 1>&2
+    return 1
+  }
+
+  gce_has_instance $project $checked_instance || return 1
+  gcloud --project $project compute instances list \
+    | grep -e "$checked_instance\s" \
+    | sed -e 's/ \+/ /g' | cut -d' ' -f 4
+}
+
+# sets the vars grpc_zone and grpc_project
+#
+# to be used in funcs that want to set the zone and project and potential
+# override them with
+#
+# grpc_zone
+# - is set to the value gcloud config value for compute/zone if that's present
+# - it defaults to asia-east1-a
+# - it can be overridden by passing -z <other value>
+#
+# grpc_project
+# - is set to the value gcloud config value for project if that's present
+# - it defaults to stoked-keyword-656 (the grpc cloud testing project)
+# - it can be overridden by passing -p <other value>
+grpc_set_project_and_zone() {
+  dry_run=0
+  grpc_zone=$(gcloud config list compute/zone --format text \
+    | sed -e 's/ \+/ /g' | cut -d' ' -f 2)
+  # pick a known zone as a default
+  [[ $grpc_zone == 'None' ]] && grpc_zone='asia-east1-a'
+
+  grpc_project=$(gcloud config list project --format text \
+    | sed -e 's/ \+/ /g' | cut -d' ' -f 2)
+  # pick an known zone as a default
+  [[ $grpc_project == 'None' ]] && grpc_project='stoked-keyword-656'
+
+  # see if -p or -z is used to override the the project or zone
+  local OPTIND
+  local OPTARG
+  local arg_func
+  while getopts :p:z:f:n name
+  do
+    case $name in
+      f)   declare -F $OPTARG >> /dev/null && {
+          arg_func=$OPTARG;
+        } || {
+          echo "-f: arg_func value: $OPTARG is not defined"
+          return 2
+        }
+        ;;
+      n)   dry_run=1 ;;
+      p)   grpc_project=$OPTARG ;;
+      z)   grpc_zone=$OPTARG ;;
+      :)   [[ $OPT_ARG == 'f' ]] && {
+          echo "-f: arg_func provided" 1>&2
+          return 2
+        } || {
+          # ignore -p or -z without args, just use the defaults
+          continue
+        }
+        ;;
+      \?)  echo "-$OPTARG: unknown flag; it's ignored" 1>&2;  continue ;;
+    esac
+  done
+  shift $((OPTIND-1))
+  [[ -n $arg_func ]] && $arg_func "$@"
+}
+
+# construct the flags to be passed to the binary running the test client
+#
+# call-seq:
+#   flags=$(grpc_interop_test_flags <server_ip> <server_port> <test_case>)
+#   [[ -n flags ]] || return 1
+grpc_interop_test_flags() {
+  [[ -n $1 ]] && {  # server_ip
+    local server_ip=$1
+    shift
+  } || {
+    echo "$FUNCNAME: missing arg: server_ip" 1>&2
+    return 1
+  }
+  [[ -n $1 ]] && {  # port
+    local port=$1
+    shift
+  } || {
+    echo "$FUNCNAME: missing arg: port" 1>&2
+    return 1
+  }
+  [[ -n $1 ]] && {  # test_case
+    local test_case=$1
+    shift
+  } || {
+    echo "$FUNCNAME: missing arg: test_case" 1>&2
+    return 1
+  }
+  echo "--server_host=$server_ip --server_port=$port --test_case=$test_case"
+}
+
+# checks the positional args and assigns them to variables visible in the caller
+#
+# these are the positional args passed to grpc_interop_test after option flags
+# are removed
+#
+# five args are expected, in order
+# - test_case
+# - host <the gce docker instance on which to run the test>
+# - client to run
+# - server_host <the gce docker instance on which the test server is running>
+# - server type
+grpc_interop_test_args() {
+  [[ -n $1 ]] && {  # test_case
+    test_case=$1
+    shift
+  } || {
+    echo "$FUNCNAME: missing arg: test_case" 1>&2
+    return 1
+  }
+
+  [[ -n $1 ]] && {  # host
+    host=$1
+    shift
+  } || {
+    echo "$FUNCNAME: missing arg: host" 1>&2
+    return 1
+  }
+
+  [[ -n $1 ]] && {  # client_type
+    case $1 in
+      cxx|go|java|nodejs|php|python|ruby)
+        grpc_gen_test_cmd="grpc_interop_gen_$1_cmd"
+        declare -F $grpc_gen_test_cmd >> /dev/null || {
+          echo "-f: test_func for $1 => $grpc_gen_test_cmd is not defined" 1>&2
+          return 2
+        }
+        shift
+        ;;
+      *)
+        echo "bad client_type: $1" 1>&2
+        return 1
+        ;;
+    esac
+  } || {
+    echo "$FUNCNAME: missing arg: client_type" 1>&2
+    return 1
+  }
+
+  [[ -n $1 ]] && {  # grpc_server
+    grpc_server=$1
+    shift
+  } || {
+    echo "$FUNCNAME: missing arg: grpc_server" 1>&2
+    return 1
+  }
+
+  [[ -n $1 ]] && {  # server_type
+    case $1 in
+      cxx)    grpc_port=8010 ;;
+      go)     grpc_port=8020 ;;
+      java)   grpc_port=8030 ;;
+      nodejs) grpc_port=8040 ;;
+      python) grpc_port=8050 ;;
+      ruby)   grpc_port=8060 ;;
+      *) echo "bad server_type: $1" 1>&2; return 1 ;;
+    esac
+    shift
+  } || {
+    echo "$FUNCNAME: missing arg: server_type" 1>&2
+    return 1
+  }
+}
+
+grpc_launch_server_args() {
+  [[ -n $1 ]] && {  # host
+    host=$1
+    shift
+  } || {
+    echo "$FUNCNAME: missing arg: host" 1>&2
+    return 1
+  }
+
+  [[ -n $1 ]] && {  # server_type
+    case $1 in
+      cxx)    grpc_port=8010 ;;
+      go)     grpc_port=8020 ;;
+      java)   grpc_port=8030 ;;
+      nodejs) grpc_port=8040 ;;
+      python) grpc_port=8050 ;;
+      ruby)   grpc_port=8060 ;;
+      *) echo "bad server_type: $1" 1>&2; return 1 ;;
+    esac
+    docker_label="grpc/$1"
+    docker_name="grpc_interop_$1"
+    shift
+  } || {
+    echo "$FUNCNAME: missing arg: server_type" 1>&2
+    return 1
+  }
+}
+
+# Launches a server on a docker instance.
+#
+# call-seq;
+#   grpc_launch_server <server_name> <server_type>
+#
+# Runs the server_type on a GCE instance running docker with server_name
+grpc_launch_server() {
+  # declare vars local so that they don't pollute the shell environment
+  # where they this func is used.
+  local grpc_zone grpc_project dry_run  # set by grpc_set_project_and_zone
+  # set by grpc_launch_server_args
+  local docker_label docker_name host grpc_port
+
+  # set the project zone and check that all necessary args are provided
+  grpc_set_project_and_zone -f grpc_launch_server_args "$@" || return 1
+  gce_has_instance $grpc_project $host || return 1;
+
+  cmd="sudo docker run -d --name $docker_name"
+  cmd+=" -p $grpc_port:$grpc_port $docker_label"
+  local project_opt="--project $grpc_project"
+  local zone_opt="--zone $grpc_zone"
+  local ssh_cmd="bash -l -c \"$cmd\""
+  echo "will run:"
+  echo "  $ssh_cmd"
+  echo "on $host"
+  [[ $dry_run == 1 ]] && return 0  # don't run the command on a dry run
+  gcloud compute $project_opt ssh $zone_opt $host --command "$cmd"
+}
+
+# Runs a test command on a docker instance.
+#
+# call-seq:
+#   grpc_interop_test <test_name> <host> <client_type> \
+#                     <server_host> <server_type>
+#
+# N.B:  server_name defaults to 'grpc-docker-server'
+#
+# requirements:
+#   host is a GCE instance running docker with access to the gRPC docker images
+#   server_name is a GCE docker instance running the gRPC server in docker
+#   test_name is one of the named gRPC tests [http://go/grpc_interop_tests]
+#   client_type is one of [cxx,go,java,php,python,ruby]
+#   server_type is one of [cxx,go,java,python,ruby]
+#
+# it assumes:
+#   that each grpc-imp has a docker image named grpc/<imp>, e.g, grpc/java
+#   a test is run using $ docker run 'path/to/interop_test_bin --flags'
+#   the required images are available on <host>
+#
+#   server_name [default:grpc-docker-server] is an instance that runs the
+#   <server_type> server on the standard test port for the <server_type>
+#
+# each server_type runs it tests on a standard test port as follows:
+#   cxx:    8010
+#   go:     8020
+#   java:   8030
+#   nodejs: 8040
+#   python: 8050
+#   ruby:   8060
+#
+# each client_type should have an associated bash func:
+#   grpc_interop_gen_<client_type>_cmd
+# the func provides the dockerized commmand for running client_type's test.
+# If no such func is available, tests for that client type cannot be run.
+#
+# the flags for running a test are the same:
+#
+# --server_host=<svr_addr>  --server_port=<svr_port> --test_case=<...>
+grpc_interop_test() {
+  # declare vars local so that they don't pollute the shell environment
+  # where they this func is used.
+
+  local grpc_zone grpc_project dry_run  # set by grpc_set_project_and_zone
+  #  grpc_interop_test_args
+  local test_case host grpc_gen_test_cmd grpc_server grpc_port
+
+  # set the project zone and check that all necessary args are provided
+  grpc_set_project_and_zone -f grpc_interop_test_args "$@" || return 1
+  gce_has_instance $grpc_project $host || return 1;
+
+  local addr=$(gce_find_internal_ip $grpc_project $grpc_server)
+  [[ -n $addr ]] || return 1
+  local flags=$(grpc_interop_test_flags $addr $grpc_port $test_case)
+  [[ -n $flags ]] || return 1
+  cmd=$($grpc_gen_test_cmd $flags)
+  [[ -n $cmd ]] || return 1
+
+  local project_opt="--project $grpc_project"
+  local zone_opt="--zone $grpc_zone"
+  local ssh_cmd="bash -l -c \"$cmd\""
+  echo "will run:"
+  echo "  $ssh_cmd"
+  echo "on $host"
+  [[ $dry_run == 1 ]] && return 0  # don't run the command on a dry run
+  gcloud compute $project_opt ssh $zone_opt $host --command "$cmd"
+}
+
+# constructs the full dockerized ruby interop test cmd.
+#
+# call-seq:
+#   flags= .... # generic flags to include the command
+#   cmd=$($grpc_gen_test_cmd $flags)
+grpc_interop_gen_ruby_cmd() {
+  local cmd_prefix="sudo docker run grpc/ruby bin/bash -l -c"
+  local test_script="/var/local/git/grpc/src/ruby/bin/interop/interop_client.rb"
+  local the_cmd="$cmd_prefix 'ruby $test_script $@'"
+  echo $the_cmd
+}
+
+# constructs the full dockerized java interop test cmd.
+#
+# call-seq:
+#   flags= .... # generic flags to include the command
+#   cmd=$($grpc_gen_test_cmd $flags)
+grpc_interop_gen_java_cmd() {
+    local cmd_prefix="sudo docker run grpc/java";
+    local test_script="/var/local/git/grpc-java/run-test-client.sh";
+    local test_script+=" --transport=NETTY_TLS --grpc_version=2"
+    local the_cmd="$cmd_prefix $test_script $@";
+    echo $the_cmd
+}
+
+# constructs the full dockerized php interop test cmd.
+#
+# TODO(mlumish): update this to use the script once that's on git-on-borg
+#
+# call-seq:
+#   flags= .... # generic flags to include the command
+#   cmd=$($grpc_gen_test_cmd $flags)
+grpc_interop_gen_php_cmd() {
+    local cmd_prefix="sudo docker run grpc/php bin/bash -l -c";
+    local test_script="cd /var/local/git/grpc/src/php/tests/interop";
+    local test_script+=" && php -d extension_dir=../../ext/grpc/modules/";
+    local test_script+=" -d extension=grpc.so interop_client.php";
+    local the_cmd="$cmd_prefix '$test_script $@ 1>&2'";
+    echo $the_cmd
+}
+
+
+# TODO(grpc-team): add grpc_interop_gen_xxx_cmd for python|cxx|nodejs|go
diff --git a/tools/gce_setup/new_grpc_docker_builder.sh b/tools/gce_setup/new_grpc_docker_builder.sh
new file mode 100755
index 0000000..9a3988f
--- /dev/null
+++ b/tools/gce_setup/new_grpc_docker_builder.sh
@@ -0,0 +1,153 @@
+#!/bin/bash
+
+# Triggers the build of a GCE 'grpc-docker' instance.
+#
+# Usage:
+# /path/to/new_grpc_docker_builder.sh \
+#   [--project <cloud-project-id> | -p<cloud-project-id>] \
+#   [--instance <instance-to-create> | -i<instance-to-create>] \
+#   [--address <named_cloud_static_ip> | -a<named_cloud_static_ip>]
+#
+# To run a new docker builder instance.
+# $ /path/to/new_grpc_docker_builder.sh -pmy-project -imy-instance -amy-ip
+#
+# See main() for the full list of flags
+
+function this_dir() {
+  SCRIPT_PATH="${BASH_SOURCE[0]}";
+  if ([ -h "${SCRIPT_PATH}" ]) then
+    while([ -h "${SCRIPT_PATH}" ]) do SCRIPT_PATH=`readlink "${SCRIPT_PATH}"`; done
+  fi
+  pushd . > /dev/null
+  cd `dirname ${SCRIPT_PATH}` > /dev/null
+  SCRIPT_PATH=`pwd`;
+  popd  > /dev/null
+  echo $SCRIPT_PATH
+}
+
+source $(this_dir)/compute_extras.sh
+source $(this_dir)/grpc_docker.sh
+
+cp_startup_script() {
+  local script_dir=$1
+  [[ -n $script_dir ]] || { echo "missing arg: script_dir" 1>&2; return 1; }
+
+  local gs_script_root=$2
+  [[ -n $gs_script_root ]] || { echo "missing arg: gs_script_root" 1>&2; return 1; }
+
+  local script_path=$3
+  [[ -n $script_path ]] || { echo "missing arg: script_name" 1>&2; return 1; }
+
+  local startup_script=$script_dir/$script_path
+  local gs_startup_uri=$gs_script_root/$script_path
+  gsutil cp $startup_script $gs_startup_uri
+}
+
+# add_instance adds a generic instance that runs
+# new_grpc_docker_builder_on_startup.sh on startup
+add_instance() {
+  local project=$1
+  [[ -n $project ]] || { echo "missing arg: project" 1>&2; return 1; }
+  local gs_admin_root=$2
+  [[ -n $gs_admin_root ]] || { echo "missing arg: gs_admin_root" 1>&2; return 1; }
+  local instance=$3
+  [[ -n $instance ]] || { echo "missing arg: instance" 1>&2; return 1; }
+  local zone=$4
+  [[ -n $zone ]] || { echo "missing arg: zone" 1>&2; return 1; }
+  local address=$5
+  [[ -n $address ]] || { echo "missing arg: address" 1>&2; return 1; }
+
+  local script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+  local gs_script_root="$gs_admin_root/startup"
+
+  local on_startup=new_grpc_docker_builder_on_startup.sh
+  local gs_on_startup=$gs_script_root/$on_startup
+  cp_startup_script $script_dir $gs_script_root $on_startup || {
+    echo "Could not save script to $gs_on_startup" 1>&2
+    return 1
+  }
+  startup_md="startup-script-url=$gs_on_startup"
+
+  local shared_startup=shared_startup_funcs.sh
+  local gs_shared_startup=$gs_script_root/$shared_startup
+  cp_startup_script $script_dir $gs_script_root $shared_startup || {
+    echo "Could not save script to $gs_shared_startup" 1>&2
+    return 1
+  }
+  startup_md+=" shared_startup_script_url=$gs_shared_startup"
+
+  local docker_dir=$(this_dir)/../dockerfile
+  grpc_push_dockerfiles $docker_dir $gs_admin_root || return 1;
+  startup_md+=" gs_dockerfile_root=$gs_admin_root/dockerfile"
+  startup_md+=" gs_docker_reg=$gs_admin_root/docker_images"
+
+  local address_flag=""
+  local the_address=$(find_named_ip $address)
+  [[ -n $the_address ]] && address_flag="--address $the_address"
+  local the_image='container-vm-v20140925'
+  local scopes='compute-rw storage-full'
+  scopes+=' https://www.googleapis.com/auth/gerritcodereview'
+  gcloud --project $project compute instances create $instance \
+    $address_flag \
+    --image $the_image \
+    --image-project google-containers \
+    --metadata $startup_md  \
+    --machine-type='n1-standard-1' \
+    --scopes $scopes \
+    --tags grpc testing \
+    --zone $zone \
+    --boot-disk-size 500GB
+}
+
+main() {
+    local INSTANCE_NAME="grpc-docker-builder"
+    local PROJECT="stoked-keyword-656"
+    local GS_ADMIN_ROOT="gs://tmp-grpc-dev/admin"
+    local ZONE='asia-east1-a'
+    local ADDRESS_NAME='grpc-php-dev-static-1'  # use 'none' if no static ip is needed
+
+    # Parse the options
+    opts=`getopt -o a::p::g::i::z:: --long address_name::,project::,gs_admin_root::,instance_name::,zone:: -n $0 -- "$@"`
+    eval set -- "$opts"
+    while true ; do
+      case "$1" in
+        -p|--project)
+          case "$2" in
+            "") shift 2  ;;
+             *) PROJECT=$2; shift 2  ;;
+          esac ;;
+        -a|--address_name)
+          case $2 in
+            "") shift 2 ;;
+            *) ADDRESS_NAME=$2; shift 2 ;;
+          esac ;;
+        -g|--gs_admin_root)
+          case "$2" in
+            "") shift 2  ;;
+            *) GS_ADMIN_ROOT=$2; shift 2  ;;
+          esac ;;
+        -i|--instance_name)
+          case "$2" in
+            "") shift 2  ;;
+            *) INSTANCE_NAME=$2; shift 2  ;;
+          esac ;;
+        -z|--zone)
+          case "$2" in
+            "") shift 2  ;;
+            *) ZONE=$2; shift 2  ;;
+          esac ;;
+        --) shift ; break ;;
+        *) echo "Internal error!" ; exit 1 ;;
+      esac
+    done
+
+    # verify that the instance does not currently exist
+    has_instance $PROJECT $INSTANCE_NAME && remove_instance $PROJECT $INSTANCE_NAME $ZONE
+    has_instance $PROJECT $INSTANCE_NAME && { echo "$INSTANCE_NAME already exists" 1>&2; return 1; }
+
+    # N.B the quotes around are necessary to allow cmds with spaces
+    add_instance $PROJECT $GS_ADMIN_ROOT $INSTANCE_NAME $ZONE $ADDRESS_NAME
+}
+
+set -x
+main "$@"
diff --git a/tools/gce_setup/new_grpc_docker_builder_on_startup.sh b/tools/gce_setup/new_grpc_docker_builder_on_startup.sh
new file mode 100755
index 0000000..87e8aac
--- /dev/null
+++ b/tools/gce_setup/new_grpc_docker_builder_on_startup.sh
@@ -0,0 +1,70 @@
+#!/bin/bash
+# Startup script that initializes a grpc-dev GCE machine.
+#
+# A grpc-docker GCE machine is based on docker container image.
+#
+# On startup, it copies the grpc dockerfiles to a local directory, and update its address
+# so that the docker containers within it have git-on-borg-access.
+
+# _load_metadata curls a metadata url
+_load_metadata() {
+  local metadata_root=http://metadata/computeMetadata/v1
+  local uri=$1
+  [[ -n $uri ]] || { echo "missing arg: uri" >&2; return 1; }
+
+  if [[ $uri =~ ^'attributes/' ]]
+  then
+    for a in $(curl -H "X-Google-Metadata-Request: True" $metadata_root/instance/attributes/)
+    do
+      [[ $uri =~ "/$a"$ ]] && { curl $metadata_root/instance/$uri -H "X-Google-Metadata-Request: True"; return; }
+    done
+  fi
+
+  # if the uri is a full request uri
+  [[ $uri =~ ^$metadata_root ]] && { curl $uri -H "X-Google-Metadata-Request: True"; return; }
+}
+
+_source_gs_script() {
+  local script_attr=$1
+  [[ -n $script_attr ]] || { echo "missing arg: script_attr" >&2; return 1; }
+
+  local gs_uri=$(_load_metadata "attributes/$script_attr")
+  [[ -n $gs_uri ]] || { echo "missing metadata: $script_attr" >&2; return 1; }
+
+  local out_dir='/var/local/startup_scripts'
+  local script_path=$out_dir/$(basename $gs_uri)
+  mkdir -p $out_dir
+  gsutil cp $gs_uri $script_path || {
+    echo "could not cp $gs_uri -> $script_path"
+    return 1
+  }
+  chmod a+rwx $out_dir $script_path
+  source $script_path
+}
+
+main() {
+    local script_attr='shared_startup_script_url'
+    _source_gs_script $script_attr || {
+      echo "halting, script 'attributes/$script_attr' could not be sourced"
+      return 1
+    }
+    grpc_dockerfile_pull
+    chmod -R a+rw /var/local/dockerfile
+
+    # Install git and emacs
+    apt-get update && apt-get install -y git emacs || return 1
+
+    # Enable access to git repos on git-on-borg
+    local git_root='/var/local/git'
+    install_gob_daemon $git_root/gerrit-gcompute-tools || return 1
+
+    # Startup the docker registry
+    grpc_docker_launch_registry && grpc_docker_pull_known
+
+    # Add a sentinel file to indicate that startup has completed.
+    local sentinel_file=/var/log/GRPC_DOCKER_IS_UP
+    touch $sentinel_file
+}
+
+set -x
+main "$@"
diff --git a/tools/gce_setup/shared_startup_funcs.sh b/tools/gce_setup/shared_startup_funcs.sh
new file mode 100755
index 0000000..8bd62e2
--- /dev/null
+++ b/tools/gce_setup/shared_startup_funcs.sh
@@ -0,0 +1,432 @@
+#!/bin/bash
+# Contains common funcs shared by instance startup scripts.
+#
+# The funcs assume that the code is being run on a GCE instance during instance
+# startup.
+
+function die() {
+  local msg="$0 failed"
+  if [[ -n $1 ]]
+  then
+    msg=$1
+  fi
+  echo $msg
+  exit 1
+}
+
+# umount_by_disk_id umounts a disk given its disk_id.
+umount_by_disk_id() {
+  local disk_id=$1
+  [[ -n $disk_id ]] || { echo "missing arg: disk_id" >&2; return 1; }
+
+  # Unmount the disk first
+  sudo umount /dev/disk/by-id/google-$disk_id || { echo "Could not unmount /mnt/disk-by-id/google-$disk_id" >&2; return 1; }
+}
+
+# check_metadata confirms that the result of curling a metadata url does not
+# contain 'Error 404'
+check_metadata() {
+  local curl_output=$1
+  [[ -n $curl_output ]] || { echo "missing arg: curl_output" >&2; return 1; }
+
+  if [[ $curl_output =~ "Error 404" ]]
+  then
+    return 1
+  fi
+
+  return 0
+}
+
+# name_this_instance determines the current instance name.
+name_this_instance() {
+  local the_full_host_name
+  the_full_host_name=$(load_metadata "http://metadata/computeMetadata/v1/instance/hostname")
+  check_metadata $the_full_host_name || return 1
+  local the_instance
+  the_instance=$(echo $the_full_host_name | cut -d . -f 1 -) || {
+    echo "could not get the instance name from $the_full_host_name" >&2
+    return 1
+  }
+
+  echo $the_instance
+}
+
+# delete_this_instance deletes this GCE instance. (it will shutdown as a result
+# of running this cmd)
+delete_this_instance() {
+  local the_full_zone
+  the_full_zone=$(load_metadata "http://metadata/computeMetadata/v1/instance/zone")
+  check_metadata $the_full_zone || return 1
+  local the_zone
+  the_zone=$(echo $the_full_zone | cut -d / -f 4 -) || { echo "could not get zone from $the_full_zone" >&2; return 1; }
+
+  local the_full_host_name
+  the_full_host_name=$(load_metadata "http://metadata/computeMetadata/v1/instance/hostname")
+  check_metadata $the_full_host_name || return 1
+  local the_instance
+  the_instance=$(echo $the_full_host_name | cut -d . -f 1 -) || { echo "could not get zone from $the_full_host_name" >&2; return 1; }
+
+  echo "using gcloud compute instances delete to remove: ${the_instance}"
+  gcloud compute --quiet instances delete --delete-disks boot --zone $the_zone $the_instance
+}
+
+# save_image_info updates the 'images' release info file on GCS.
+save_image_info() {
+  local image_id=$1
+  [[ -n $image_id ]] || { echo "missing arg: image_id" >&2; return 1; }
+
+  local repo_gs_uri=$2
+  [[ -n $repo_gs_uri ]] || { echo "missing arg: repo_gs_uri" >&2; return 1; }
+
+  local sentinel="/tmp/$image_id.txt"
+  echo $image_id > $sentinel || { echo "could not create /tmp/$image_id.txt" >&2; return 1; }
+
+  local gs_sentinel="$repo_gs_uri/images/info/LATEST"
+  gsutil cp $sentinel $gs_sentinel  || { echo "failed to update $gs_sentinel" >&2; return 1; }
+}
+
+# creates an image, getting the name and cloud storage uri from the supplied
+# instance metadata.
+create_image() {
+  local image_id
+  image_id=$(load_metadata "attributes/image_id")
+  [[ -n $image_id ]] || { echo "missing metadata: image_id" >&2; return 1; }
+
+  local repo_gs_uri
+  repo_gs_uri=$(load_metadata "attributes/repo_gs_uri")
+  [[ -n $repo_gs_uri ]] || { echo "missing metadata: repo_gs_uri" >&2; return 1; }
+
+  local the_project
+  the_project=$(load_metadata "http://metadata/computeMetadata/v1/project/project-id")
+  check_metadata $the_project || return 1
+
+  sudo gcimagebundle -d /dev/sda -o /tmp/ --log_file=/tmp/$image_id.log || { echo "image creation failed" >&2; return 1; }
+  image_path=$(ls /tmp/*.tar.gz)
+  image_gs_uri="$repo_gs_uri/images/$image_id.tar.gz"
+
+  # copy the image to cloud storage
+  gsutil cp $image_path $image_gs_uri || { echo "failed to save image to $repo_gs_uri/$image_path " >&2; return 1; }
+  gcloud compute --project=$the_project images create \
+    $image_id --source-uri $image_gs_uri || { echo "failed to register $image_gs_uri as $image_id" >&2; return 1; }
+
+  save_image_info $image_id $repo_gs_uri
+}
+
+# load_metadata curls a metadata url
+load_metadata() {
+  local metadata_root=http://metadata/computeMetadata/v1
+  local uri=$1
+  [[ -n $uri ]] || { echo "missing arg: uri" >&2; return 1; }
+
+  if [[ $uri =~ ^'attributes/' ]]
+  then
+    for a in $(curl -H "X-Google-Metadata-Request: True" $metadata_root/instance/attributes/)
+    do
+      [[ $uri =~ "/$a"$ ]] && { curl $metadata_root/instance/$uri -H "X-Google-Metadata-Request: True"; return; }
+    done
+  fi
+
+  # if the uri is a full request uri
+  [[ $uri =~ ^$metadata_root ]] && { curl $uri -H "X-Google-Metadata-Request: True"; return; }
+}
+
+install_python_module() {
+  local mod=$1
+  [[ -z $mod ]] && { echo "missing arg: mod" >&2; return 1; }
+
+  echo '------------------------------------'
+  echo 'Installing: $mod'
+  echo '------------------------------------'
+  echo
+  install_with_apt_get gcc python-dev python-setuptools
+  sudo apt-get install -y gcc python-dev python-setuptools
+  sudo easy_install -U pip
+  sudo pip uninstall -y $mod
+  sudo pip install -U $mod
+}
+
+install_with_apt_get() {
+  local pkgs=$@
+  echo '---------------------------'
+  echo 'Installing: $pkgs'
+  echo '---------------------------'
+  echo
+  sudo apt-get install -y $pkgs
+}
+
+# pulls code from a git repo @HEAD to a local directory, removing the current version if present.
+setup_git_dir() {
+  local git_http_repo=$1
+  [[ -n $git_http_repo ]] || { echo "missing arg: git_http_repo" >&2; return 1; }
+
+  local git_dir=$2
+  [[ -n $git_dir ]] || { echo "missing arg: git_dir" >&2; return 1; }
+
+  if [[ -e $git_dir ]]
+  then
+    rm -fR $git_dir || { echo "could not remove existing repo at $git_dir" >&2; return 1; }
+  fi
+
+  local git_user
+  git_user=$(load_metadata "http://metadata/computeMetadata/v1/instance/service-accounts/default/email")
+  check_metadata $git_user || return 1
+  urlsafe_git_user=$(echo $git_user | sed -e s/@/%40/g) || return 1
+
+  local access_token=$(load_metadata "http://metadata/computeMetadata/v1/instance/service-accounts/default/token?alt=text")
+  check_metadata $access_token || return 1
+  local git_pwd=$(echo $access_token | cut -d' ' -f 2) || return 1
+
+  git clone https://$urlsafe_git_user:$git_pwd@$git_http_repo $git_dir
+}
+
+# network_copy copies a file to another gce instance.
+network_copy() {
+  local the_node=$1
+  [[ -n $the_node ]] || { echo "missing arg: the_node" >&2; return 1; }
+
+  local src=$2
+  [[ -n $src ]] || { echo "missing arg: src" >&2; return 1; }
+
+  local dst=$3
+  [[ -n $dst ]] || { echo "missing arg: dst" >&2; return 1; }
+
+  gcloud compute copy-files --zone=us-central1-b $src $node:$dst
+}
+
+# gcs_copy copies a file to a location beneath a root gcs object path.
+gcs_copy() {
+  local gce_root=$1
+  [[ -n $gce_root ]] || { echo "missing arg: gce_root" >&2; return 1; }
+
+  local src=$2
+  [[ -n $src ]] || { echo "missing arg: src" >&2; return 1; }
+
+  local dst=$3
+  [[ -n $dst ]] || { echo "missing arg: dst" >&2; return 1; }
+
+  gsutil cp $src $gce_root/$dst
+}
+
+# find_named_ip finds the external ip address for a given name.
+find_named_ip() {
+  local name=$1
+  [[ -n $name ]] || { echo "missing arg: name" >&2; return 1; }
+
+  gcloud compute addresses list | sed -e 's/ \+/ /g' | grep $name | cut -d' ' -f 3
+}
+
+# update_address_to updates this instances ip address to the reserved ip address with a given name
+update_address_to() {
+  local name=$1
+  [[ -n $name ]] || { echo "missing arg: name" >&2; return 1; }
+
+  named_ip=$(find_named_ip $name)
+  [[ -n $named_ip ]] || { echo "did not find an address corresponding to $name" >&2; return 1; }
+
+  local the_full_zone
+  the_full_zone=$(load_metadata "http://metadata/computeMetadata/v1/instance/zone")
+  check_metadata $the_full_zone || return 1
+  local the_zone
+  the_zone=$(echo $the_full_zone | cut -d / -f 4 -) || {
+    echo "could not get zone from $the_full_zone" >&2
+    return 1
+  }
+
+  local the_full_host_name
+  the_full_host_name=$(load_metadata "http://metadata/computeMetadata/v1/instance/hostname")
+  check_metadata $the_full_host_name || return 1
+  local the_instance
+  the_instance=$(echo $the_full_host_name | cut -d . -f 1 -) || {
+    echo "could not determine the instance from $the_full_host_name" >&2
+    return 1
+  }
+
+  gcloud compute instances delete-access-config --zone $the_zone $the_instance || {
+    echo "could not delete the access config for $the_instance" >&2
+    return 1
+  }
+  gcloud compute instances add-access-config --zone $the_zone $the_instance --address $named_ip || {
+    echo "could not update the access config for $the_instance to $named_ip" >&2
+    return 1
+  }
+}
+
+# Allows instances to checkout repos on git-on-borg.
+#
+install_gob_daemon() {
+  local gob_dir=$1
+  [[ -n $gob_dir ]] || { echo "missing args: gob_dir" >&2; return 1;  }
+
+  local gob_repo=$2
+  [[ -n $gob_repo ]] || gob_repo='https://gerrit.googlesource.com/gcompute-tools/'
+
+  if [[ -e $gob_dir ]]
+  then
+    rm -fv $gob_dir || {
+      echo "could not remove existing git repo at $gob_dir" >&2
+      return 1
+    }
+  fi
+
+  git clone $gob_repo $gob_dir || { echo "failed to pull gerrit cookie repo" >&2; return 1; }
+  local startup_script=/etc/profile.d/gob_cookie_daemon.sh
+
+  cat <<EOF >> $startup_script
+#!/bin/bash
+
+$gob_dir/git-cookie-authdaemon
+
+EOF
+
+  chmod 755 $startup_script
+  $startup_script
+}
+
+# grpc_docker_add_docker_group
+#
+# Adds a docker group, restarts docker, relaunches the docker registry
+grpc_docker_add_docker_group() {
+  [[ -f /var/log/GRPC_DOCKER_IS_UP ]] || {
+    echo "missing file /var/log/GRPC_DOCKER_IS_UP; either wrong machine or still starting up" >&2;
+    return 1
+  }
+  sudo groupadd docker
+
+  local user=$(id -un)
+  [[ -n ${user} ]] || { echo 'could not determine the user' >&2; return 1; }
+  sudo gpasswd -a ${user} docker
+  sudo service docker restart || return 1;
+  grpc_docker_launch_registry
+}
+
+# grpc_dockerfile_pull <local_docker_parent_dir>
+#
+# requires: attributes/gs_dockerfile_root is set to cloud storage directory
+# containing the dockerfile directory
+grpc_dockerfile_pull() {
+  local dockerfile_parent=$1
+  [[ -n $dockerfile_parent ]] || dockerfile_parent='/var/local'
+
+  local gs_dockerfile_root=$(load_metadata "attributes/gs_dockerfile_root")
+  [[ -n $gs_dockerfile_root ]] || { echo "missing metadata: gs_dockerfile_root" >&2; return 1; }
+
+  mkdir -p $dockerfile_parent
+  gsutil cp -R $gs_dockerfile_root $dockerfile_parent || {
+    echo "Did not copy docker files from $gs_dockerfile_root -> $dockerfile_parent"
+    return 1
+  }
+ }
+
+# grpc_docker_launch_registry
+#
+# requires: attributes/gs_docker_reg is set to the cloud storage directory to
+# use to store docker images
+grpc_docker_launch_registry() {
+  local gs_docker_reg=$(load_metadata "attributes/gs_docker_reg")
+  [[ -n $gs_docker_reg ]] || { echo "missing metadata: gs_docker_reg" >&2; return 1; }
+
+  local gs_bucket=$(echo $gs_docker_reg | sed -r 's|gs://([^/]*?).*|\1|g')
+  [[ -n $gs_bucket ]] || {
+    echo "could not determine cloud storage bucket from $gs_bucket" >&2;
+    return 1
+  }
+
+  local  storage_path_env=''
+  local image_path=$(echo $gs_docker_reg | sed -r 's|gs://[^/]*(.*)|\1|g' | sed -e 's:/$::g')
+  [[ -n $image_path ]] && {
+    storage_path_env="-e STORAGE_PATH=$image_path"
+  }
+
+  sudo docker run -d -e GCS_BUCKET=$gs_bucket $storage_path_env -p 5000:5000 google/docker-registry
+  # wait a couple of minutes max, for the registry to come up
+  local is_up=0
+  for i in {1..24}
+  do
+    local secs=`expr $i \* 5`
+    echo "is docker registry up? waited for $secs secs ..."
+    wget -q localhost:5000 && {
+      echo 'docker registry is up!'
+      is_up=1
+      break
+    }
+    sleep 5
+  done
+
+  [[ $is_up == 0 ]] && {
+    echo "docker registry not available after 120 seconds"; return 1;
+  } || return 0
+}
+
+# grpc_docker_pull_known
+#
+# This pulls a set of known docker images from a private docker registry to
+# the local image cache. It re-labels the images so that FROM in dockerfiles
+# used in dockerfiles running on the docker instance can find the images OK.
+#
+# optional: address of a grpc docker registry, the default is 0.0.0.0:5000
+grpc_docker_pull_known() {
+  local addr=$1
+  [[ -n $addr ]] || addr="0.0.0.0:5000"
+  local known="base cxx php_base php ruby_base ruby java_base java"
+  echo "... pulling docker images for '$known'"
+  for i in $known
+  do
+    sudo docker pull ${addr}/grpc/$i \
+      && sudo docker tag ${addr}/grpc/$i grpc/$i || {
+      # log and continue
+      echo "docker op error:  could not pull ${addr}/grpc/$i"
+    }
+  done
+}
+
+# grpc_dockerfile_build_install
+#
+# requires: $1 is the label to apply to the docker image
+# requires: $2 is a local directory containing a Dockerfile
+# requires: there is a docker registry running on 5000, e.g, grpc_docker_launch_registry was run
+#
+# grpc_dockerfile_install "grpc/image" /var/local/dockerfile/grpc_image
+grpc_dockerfile_install() {
+  local image_label=$1
+  [[ -n $image_label ]] || { echo "missing arg: image_label" >&2; return 1; }
+  local docker_img_url=0.0.0.0:5000/$image_label
+
+  local dockerfile_dir=$2
+  [[ -n $dockerfile_dir ]] || { echo "missing arg: dockerfile_dir" >&2; return 1; }
+
+  local cache_opt='--no-cache'
+  local cache=$3
+  [[ $cache == "cache=yes" ]] && { cache_opt=''; }
+  [[ $cache == "cache=1" ]] && { cache_opt=''; }
+  [[ $cache == "cache=true" ]] && { cache_opt=''; }
+
+  [[ -d $dockerfile_dir ]] || { echo "not a valid dir: $dockerfile_dir"; return 1; }
+
+  # TODO(temiola): maybe make cache/no-cache a func option?
+  sudo docker build $cache_opt -t $image_label $dockerfile_dir || {
+    echo "docker op error: build of $image_label <- $dockerfile_dir"
+    return 1
+  }
+  sudo docker tag $image_label $docker_img_url || {
+    echo "docker op error: tag of $docker_img_url"
+    return 1
+  }
+  sudo docker push $docker_img_url || {
+    echo "docker op error: push of $docker_img_url"
+    return 1
+  }
+}
+
+# grpc_dockerfile_refresh
+#
+# requires: $1 is the label to apply to the docker image
+# requires: $2 is a local directory containing a Dockerfile
+# requires: there is a docker registry running on 5000, e.g, grpc_docker_launch_registry was run
+#
+# invokes pull_dockerfiles to refresh them all from cloud storage, then grpc_dockerfile_install
+#
+# grpc_dockerfile_refresh "grpc/mylabel" /var/local/dockerfile/dir_containing_my_dockerfile
+grpc_dockerfile_refresh() {
+  grpc_dockerfile_pull || return 1
+  grpc_dockerfile_install "$@"
+}
diff --git a/tools/run_tests/jobset.py b/tools/run_tests/jobset.py
new file mode 100755
index 0000000..0890cc5
--- /dev/null
+++ b/tools/run_tests/jobset.py
@@ -0,0 +1,88 @@
+"""Run a group of subprocesses and then finish."""
+
+import multiprocessing
+import random
+import subprocess
+import sys
+import threading
+
+# multiplicative factor to over subscribe CPU cores
+# (many tests sleep for a long time)
+_OVERSUBSCRIBE = 32
+_active_jobs = threading.Semaphore(
+    multiprocessing.cpu_count() * _OVERSUBSCRIBE)
+_output_lock = threading.Lock()
+
+
+def shuffle_iteratable(it):
+  """Return an iterable that randomly walks it"""
+  # take a random sampling from the passed in iterable
+  # we take an element with probablity 1/p and rapidly increase
+  # p as we take elements - this gives us a somewhat random set of values before
+  # we've seen all the values, but starts producing values without having to
+  # compute ALL of them at once, allowing tests to start a little earlier
+  nextit = []
+  p = 1
+  for val in it:
+    if random.randint(0, p) == 0:
+      p *= 2
+      yield val
+    else:
+      nextit.append(val)
+  # after taking a random sampling, we shuffle the rest of the elements and
+  # yield them
+  random.shuffle(nextit)
+  for val in nextit:
+    yield val
+
+
+class Jobset(object):
+  """Manages one run of jobs."""
+
+  def __init__(self, cmdlines):
+    self._cmdlines = shuffle_iteratable(cmdlines)
+    self._failures = 0
+
+  def _run_thread(self, cmdline):
+    try:
+      # start the process
+      p = subprocess.Popen(args=cmdline,
+                           stderr=subprocess.STDOUT,
+                           stdout=subprocess.PIPE)
+      stdout, _ = p.communicate()
+      # log output (under a lock)
+      _output_lock.acquire()
+      try:
+        if p.returncode != 0:
+          sys.stdout.write('\x1b[0G\x1b[2K\x1b[31mFAILED\x1b[0m: %s'
+                           ' [ret=%d]\n'
+                           '%s\n' % (
+                               ' '.join(cmdline), p.returncode,
+                               stdout))
+          self._failures += 1
+        else:
+          sys.stdout.write('\x1b[0G\x1b[2K\x1b[32mPASSED\x1b[0m: %s' %
+                           ' '.join(cmdline))
+        sys.stdout.flush()
+      finally:
+        _output_lock.release()
+    finally:
+      _active_jobs.release()
+
+  def run(self):
+    threads = []
+    for cmdline in self._cmdlines:
+      # cap number of active jobs - release in _run_thread
+      _active_jobs.acquire()
+      t = threading.Thread(target=self._run_thread,
+                           args=[cmdline])
+      t.start()
+      threads.append(t)
+    for thread in threads:
+      thread.join()
+    return self._failures == 0
+
+
+def run(cmdlines):
+  return Jobset(cmdlines).run()
+
diff --git a/tools/run_tests/run_tests.py b/tools/run_tests/run_tests.py
new file mode 100755
index 0000000..ee61f33
--- /dev/null
+++ b/tools/run_tests/run_tests.py
@@ -0,0 +1,49 @@
+#!/usr/bin/python
+"""Run tests in parallel."""
+
+import argparse
+import glob
+import itertools
+import multiprocessing
+import sys
+
+import jobset
+
+# flags required for make for each configuration
+_CONFIGS = ['dbg', 'opt', 'tsan', 'msan', 'asan']
+
+# parse command line
+argp = argparse.ArgumentParser(description='Run grpc tests.')
+argp.add_argument('-c', '--config',
+                  choices=['all'] + _CONFIGS,
+                  nargs='+',
+                  default=['all'])
+argp.add_argument('-t', '--test-filter', nargs='*', default=['*'])
+argp.add_argument('-n', '--runs_per_test', default=1, type=int)
+args = argp.parse_args()
+
+# grab config
+configs = [cfg
+           for cfg in itertools.chain.from_iterable(
+               _CONFIGS if x == 'all' else [x]
+               for x in args.config)]
+filters = args.test_filter
+runs_per_test = args.runs_per_test
+
+# build latest, sharing cpu between the various makes
+if not jobset.run(
+    ['make',
+     '-j', '%d' % max(multiprocessing.cpu_count() / len(configs), 1),
+     'buildtests_c',
+     'CONFIG=%s' % cfg]
+    for cfg in configs):
+  sys.exit(1)
+
+# run all the tests
+jobset.run([x]
+           for x in itertools.chain.from_iterable(
+               itertools.chain.from_iterable(itertools.repeat(
+                   glob.glob('bins/%s/%s_test' % (config, filt)),
+                   runs_per_test))
+               for config in configs
+               for filt in filters))