Merge pull request #6440 from makdharma/bugfixes

RouteGuide example now works with moving between tabs. Fixes issue 6404.
diff --git a/BUILD b/BUILD
index b4b10b5..1da1650 100644
--- a/BUILD
+++ b/BUILD
@@ -463,6 +463,7 @@
     "include/grpc/grpc.h",
     "include/grpc/status.h",
     "include/grpc/impl/codegen/byte_buffer.h",
+    "include/grpc/impl/codegen/byte_buffer_reader.h",
     "include/grpc/impl/codegen/compression_types.h",
     "include/grpc/impl/codegen/connectivity_state.h",
     "include/grpc/impl/codegen/grpc_types.h",
@@ -774,6 +775,7 @@
     "include/grpc/grpc.h",
     "include/grpc/status.h",
     "include/grpc/impl/codegen/byte_buffer.h",
+    "include/grpc/impl/codegen/byte_buffer_reader.h",
     "include/grpc/impl/codegen/compression_types.h",
     "include/grpc/impl/codegen/connectivity_state.h",
     "include/grpc/impl/codegen/grpc_types.h",
@@ -946,6 +948,7 @@
     "include/grpc++/impl/codegen/sync_stream.h",
     "include/grpc++/impl/codegen/time.h",
     "include/grpc/impl/codegen/byte_buffer.h",
+    "include/grpc/impl/codegen/byte_buffer_reader.h",
     "include/grpc/impl/codegen/compression_types.h",
     "include/grpc/impl/codegen/connectivity_state.h",
     "include/grpc/impl/codegen/grpc_types.h",
@@ -1091,6 +1094,7 @@
     "include/grpc++/impl/codegen/sync_stream.h",
     "include/grpc++/impl/codegen/time.h",
     "include/grpc/impl/codegen/byte_buffer.h",
+    "include/grpc/impl/codegen/byte_buffer_reader.h",
     "include/grpc/impl/codegen/compression_types.h",
     "include/grpc/impl/codegen/connectivity_state.h",
     "include/grpc/impl/codegen/grpc_types.h",
@@ -1480,6 +1484,7 @@
     "include/grpc/grpc.h",
     "include/grpc/status.h",
     "include/grpc/impl/codegen/byte_buffer.h",
+    "include/grpc/impl/codegen/byte_buffer_reader.h",
     "include/grpc/impl/codegen/compression_types.h",
     "include/grpc/impl/codegen/connectivity_state.h",
     "include/grpc/impl/codegen/grpc_types.h",
diff --git a/Makefile b/Makefile
index 922e0b0..ffaf770 100644
--- a/Makefile
+++ b/Makefile
@@ -407,7 +407,7 @@
 Q = @
 endif
 
-VERSION = 0.14.0-dev
+VERSION = 0.15.0-dev
 
 CPPFLAGS_NO_ARCH += $(addprefix -I, $(INCLUDES)) $(addprefix -D, $(DEFINES))
 CPPFLAGS += $(CPPFLAGS_NO_ARCH) $(ARCH_FLAGS)
@@ -2650,6 +2650,7 @@
     include/grpc/grpc.h \
     include/grpc/status.h \
     include/grpc/impl/codegen/byte_buffer.h \
+    include/grpc/impl/codegen/byte_buffer_reader.h \
     include/grpc/impl/codegen/compression_types.h \
     include/grpc/impl/codegen/connectivity_state.h \
     include/grpc/impl/codegen/grpc_types.h \
@@ -2970,6 +2971,7 @@
     include/grpc/grpc.h \
     include/grpc/status.h \
     include/grpc/impl/codegen/byte_buffer.h \
+    include/grpc/impl/codegen/byte_buffer_reader.h \
     include/grpc/impl/codegen/compression_types.h \
     include/grpc/impl/codegen/connectivity_state.h \
     include/grpc/impl/codegen/grpc_types.h \
@@ -3256,6 +3258,7 @@
     include/grpc++/impl/codegen/sync_stream.h \
     include/grpc++/impl/codegen/time.h \
     include/grpc/impl/codegen/byte_buffer.h \
+    include/grpc/impl/codegen/byte_buffer_reader.h \
     include/grpc/impl/codegen/compression_types.h \
     include/grpc/impl/codegen/connectivity_state.h \
     include/grpc/impl/codegen/grpc_types.h \
@@ -3559,6 +3562,7 @@
     include/grpc++/impl/codegen/sync_stream.h \
     include/grpc++/impl/codegen/time.h \
     include/grpc/impl/codegen/byte_buffer.h \
+    include/grpc/impl/codegen/byte_buffer_reader.h \
     include/grpc/impl/codegen/compression_types.h \
     include/grpc/impl/codegen/connectivity_state.h \
     include/grpc/impl/codegen/grpc_types.h \
diff --git a/build.yaml b/build.yaml
index 441752d..5e47c08 100644
--- a/build.yaml
+++ b/build.yaml
@@ -7,7 +7,7 @@
   '#3': Use "-preN" suffixes to identify pre-release versions
   '#4': Per-language overrides are possible with (eg) ruby_version tag here
   '#5': See the expand_version.py for all the quirks here
-  version: 0.14.0-dev
+  version: 0.15.0-dev
 filegroups:
 - name: census
   public_headers:
@@ -351,6 +351,7 @@
 - name: grpc_codegen
   public_headers:
   - include/grpc/impl/codegen/byte_buffer.h
+  - include/grpc/impl/codegen/byte_buffer_reader.h
   - include/grpc/impl/codegen/compression_types.h
   - include/grpc/impl/codegen/connectivity_state.h
   - include/grpc/impl/codegen/grpc_types.h
diff --git a/composer.json b/composer.json
index 97b1a5c..b77a59e 100644
--- a/composer.json
+++ b/composer.json
@@ -2,7 +2,7 @@
   "name": "grpc/grpc",
   "type": "library",
   "description": "gRPC library for PHP",
-  "version": "0.14.0",
+  "version": "0.15.0",
   "keywords": ["rpc"],
   "homepage": "http://grpc.io",
   "license": "BSD-3-Clause",
diff --git a/examples/cpp/helloworld/Makefile b/examples/cpp/helloworld/Makefile
index 470b835..58a82db 100644
--- a/examples/cpp/helloworld/Makefile
+++ b/examples/cpp/helloworld/Makefile
@@ -41,7 +41,7 @@
 
 vpath %.proto $(PROTOS_PATH)
 
-all: system-check greeter_client greeter_server greeter_async_client greeter_async_server
+all: system-check greeter_client greeter_server greeter_async_client greeter_async_client2 greeter_async_server
 
 greeter_client: helloworld.pb.o helloworld.grpc.pb.o greeter_client.o
 	$(CXX) $^ $(LDFLAGS) -o $@
@@ -52,6 +52,9 @@
 greeter_async_client: helloworld.pb.o helloworld.grpc.pb.o greeter_async_client.o
 	$(CXX) $^ $(LDFLAGS) -o $@
 
+greeter_async_client2: helloworld.pb.o helloworld.grpc.pb.o greeter_async_client2.o
+	$(CXX) $^ $(LDFLAGS) -o $@
+
 greeter_async_server: helloworld.pb.o helloworld.grpc.pb.o greeter_async_server.o
 	$(CXX) $^ $(LDFLAGS) -o $@
 
@@ -64,7 +67,7 @@
 	$(PROTOC) -I $(PROTOS_PATH) --cpp_out=. $<
 
 clean:
-	rm -f *.o *.pb.cc *.pb.h greeter_client greeter_server greeter_async_client greeter_async_server
+	rm -f *.o *.pb.cc *.pb.h greeter_client greeter_server greeter_async_client greeter_async_client2 greeter_async_server
 
 
 # The following is to test your system and ensure a smoother experience.
diff --git a/examples/cpp/helloworld/greeter_async_client.cc b/examples/cpp/helloworld/greeter_async_client.cc
index 3501426..c1f5eb5 100644
--- a/examples/cpp/helloworld/greeter_async_client.cc
+++ b/examples/cpp/helloworld/greeter_async_client.cc
@@ -53,7 +53,7 @@
   explicit GreeterClient(std::shared_ptr<Channel> channel)
       : stub_(Greeter::NewStub(channel)) {}
 
-  // Assambles the client's payload, sends it and presents the response back
+  // Assembles the client's payload, sends it and presents the response back
   // from the server.
   std::string SayHello(const std::string& user) {
     // Data we are sending to the server.
@@ -74,9 +74,9 @@
     // Storage for the status of the RPC upon completion.
     Status status;
 
-    // stub_->AsyncSayHello() perform the RPC call, returning an instance we
-    // store in "rpc". Because we are using the asynchronous API, we need the
-    // hold on to the "rpc" instance in order to get updates on the ongoig RPC.
+    // stub_->AsyncSayHello() performs the RPC call, returning an instance we
+    // store in "rpc". Because we are using the asynchronous API, we need to
+    // hold on to the "rpc" instance in order to get updates on the ongoing RPC.
     std::unique_ptr<ClientAsyncResponseReader<HelloReply> > rpc(
         stub_->AsyncSayHello(&context, request, &cq));
 
diff --git a/examples/cpp/helloworld/greeter_async_client2.cc b/examples/cpp/helloworld/greeter_async_client2.cc
new file mode 100644
index 0000000..0902376
--- /dev/null
+++ b/examples/cpp/helloworld/greeter_async_client2.cc
@@ -0,0 +1,153 @@
+/*
+ *
+ * Copyright 2015, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <iostream>
+#include <memory>
+#include <string>
+
+#include <grpc++/grpc++.h>
+#include <thread>
+
+#include "helloworld.grpc.pb.h"
+
+using grpc::Channel;
+using grpc::ClientAsyncResponseReader;
+using grpc::ClientContext;
+using grpc::CompletionQueue;
+using grpc::Status;
+using helloworld::HelloRequest;
+using helloworld::HelloReply;
+using helloworld::Greeter;
+
+class GreeterClient {
+  public:
+    explicit GreeterClient(std::shared_ptr<Channel> channel)
+            : stub_(Greeter::NewStub(channel)) {}
+
+    // Assembles the client's payload and sends it to the server.
+    void SayHello(const std::string& user) {
+        // Data we are sending to the server.
+        HelloRequest request;
+        request.set_name(user);
+
+        // Call object to store rpc data
+        AsyncClientCall* call = new AsyncClientCall;
+
+        // stub_->AsyncSayHello() performs the RPC call, returning an instance to
+        // store in "call". Because we are using the asynchronous API, we need to
+        // hold on to the "call" instance in order to get updates on the ongoing RPC.
+        call->response_reader = stub_->AsyncSayHello(&call->context, request, &cq_);
+
+
+        // Request that, upon completion of the RPC, "reply" be updated with the
+        // server's response; "status" with the indication of whether the operation
+        // was successful. Tag the request with the memory address of the call object.
+        call->response_reader->Finish(&call->reply, &call->status, (void*)call);
+
+    }
+
+    // Loop while listening for completed responses.
+    // Prints out the response from the server.
+    void AsyncCompleteRpc() {
+        void* got_tag;
+        bool ok = false;
+
+        // Block until the next result is available in the completion queue "cq".
+        while (cq_.Next(&got_tag, &ok)) {
+            // The tag in this example is the memory location of the call object
+            AsyncClientCall* call = static_cast<AsyncClientCall*>(got_tag);
+
+            // Verify that the request was completed successfully. Note that "ok"
+            // corresponds solely to the request for updates introduced by Finish().
+            GPR_ASSERT(ok);
+
+            if (call->status.ok())
+                std::cout << "Greeter received: " << call->reply.message() << std::endl;
+            else
+                std::cout << "RPC failed" << std::endl;
+
+            // Once we're complete, deallocate the call object.
+            delete call;
+        }
+    }
+
+  private:
+
+    // struct for keeping state and data information
+    struct AsyncClientCall {
+        // Container for the data we expect from the server.
+        HelloReply reply;
+
+        // Context for the client. It could be used to convey extra information to
+        // the server and/or tweak certain RPC behaviors.
+        ClientContext context;
+
+        // Storage for the status of the RPC upon completion.
+        Status status;
+
+
+        std::unique_ptr<ClientAsyncResponseReader<HelloReply>> response_reader;
+    };
+
+    // Out of the passed in Channel comes the stub, stored here, our view of the
+    // server's exposed services.
+    std::unique_ptr<Greeter::Stub> stub_;
+
+    // The producer-consumer queue we use to communicate asynchronously with the
+    // gRPC runtime.
+    CompletionQueue cq_;
+};
+
+int main(int argc, char** argv) {
+
+
+    // Instantiate the client. It requires a channel, out of which the actual RPCs
+    // are created. This channel models a connection to an endpoint (in this case,
+    // localhost at port 50051). We indicate that the channel isn't authenticated
+    // (use of InsecureChannelCredentials()).
+    GreeterClient greeter(grpc::CreateChannel(
+            "localhost:50051", grpc::InsecureChannelCredentials()));
+
+    // Spawn reader thread that loops indefinitely
+    std::thread thread_ = std::thread(&GreeterClient::AsyncCompleteRpc, &greeter);
+
+    for (int i = 0; i < 100; i++) {
+        std::string user("world " + std::to_string(i));
+        greeter.SayHello(user);  // The actual RPC call!
+    }
+
+    std::cout << "Press control-c to quit" << std::endl << std::endl;
+    thread_.join();  //blocks forever
+
+    return 0;
+}
diff --git a/examples/python/helloworld/helloworld_pb2.py b/examples/python/helloworld/helloworld_pb2.py
index 1b2674e..1ee80e4 100644
--- a/examples/python/helloworld/helloworld_pb2.py
+++ b/examples/python/helloworld/helloworld_pb2.py
@@ -1,6 +1,8 @@
 # Generated by the protocol buffer compiler.  DO NOT EDIT!
 # source: helloworld.proto
 
+import sys
+_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
 from google.protobuf import descriptor as _descriptor
 from google.protobuf import message as _message
 from google.protobuf import reflection as _reflection
@@ -17,7 +19,7 @@
   name='helloworld.proto',
   package='helloworld',
   syntax='proto3',
-  serialized_pb=b'\n\x10helloworld.proto\x12\nhelloworld\"\x1c\n\x0cHelloRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"\x1d\n\nHelloReply\x12\x0f\n\x07message\x18\x01 \x01(\t2I\n\x07Greeter\x12>\n\x08SayHello\x12\x18.helloworld.HelloRequest\x1a\x16.helloworld.HelloReply\"\x00\x42\x18\n\x10io.grpc.examples\xa2\x02\x03HLWb\x06proto3'
+  serialized_pb=_b('\n\x10helloworld.proto\x12\nhelloworld\"\x1c\n\x0cHelloRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"\x1d\n\nHelloReply\x12\x0f\n\x07message\x18\x01 \x01(\t2I\n\x07Greeter\x12>\n\x08SayHello\x12\x18.helloworld.HelloRequest\x1a\x16.helloworld.HelloReply\"\x00\x42\x36\n\x1bio.grpc.examples.helloworldB\x0fHelloWorldProtoP\x01\xa2\x02\x03HLWb\x06proto3')
 )
 _sym_db.RegisterFileDescriptor(DESCRIPTOR)
 
@@ -34,7 +36,7 @@
     _descriptor.FieldDescriptor(
       name='name', full_name='helloworld.HelloRequest.name', index=0,
       number=1, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=b"".decode('utf-8'),
+      has_default_value=False, default_value=_b("").decode('utf-8'),
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
       options=None),
@@ -65,7 +67,7 @@
     _descriptor.FieldDescriptor(
       name='message', full_name='helloworld.HelloReply.message', index=0,
       number=1, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=b"".decode('utf-8'),
+      has_default_value=False, default_value=_b("").decode('utf-8'),
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
       options=None),
@@ -104,69 +106,28 @@
 
 
 DESCRIPTOR.has_options = True
-DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), b'\n\020io.grpc.examples\242\002\003HLW')
+DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\033io.grpc.examples.helloworldB\017HelloWorldProtoP\001\242\002\003HLW'))
 import abc
+import six
 from grpc.beta import implementations as beta_implementations
-from grpc.early_adopter import implementations as early_adopter_implementations
-from grpc.framework.alpha import utilities as alpha_utilities
+from grpc.beta import interfaces as beta_interfaces
 from grpc.framework.common import cardinality
 from grpc.framework.interfaces.face import utilities as face_utilities
-class EarlyAdopterGreeterServicer(object):
-  """<fill me in later!>"""
-  __metaclass__ = abc.ABCMeta
-  @abc.abstractmethod
-  def SayHello(self, request, context):
-    raise NotImplementedError()
-class EarlyAdopterGreeterServer(object):
-  """<fill me in later!>"""
-  __metaclass__ = abc.ABCMeta
-  @abc.abstractmethod
-  def start(self):
-    raise NotImplementedError()
-  @abc.abstractmethod
-  def stop(self):
-    raise NotImplementedError()
-class EarlyAdopterGreeterStub(object):
-  """<fill me in later!>"""
-  __metaclass__ = abc.ABCMeta
-  @abc.abstractmethod
-  def SayHello(self, request):
-    raise NotImplementedError()
-  SayHello.async = None
-def early_adopter_create_Greeter_server(servicer, port, private_key=None, certificate_chain=None):
-  import helloworld_pb2
-  import helloworld_pb2
-  method_service_descriptions = {
-    "SayHello": alpha_utilities.unary_unary_service_description(
-      servicer.SayHello,
-      helloworld_pb2.HelloRequest.FromString,
-      helloworld_pb2.HelloReply.SerializeToString,
-    ),
-  }
-  return early_adopter_implementations.server("helloworld.Greeter", method_service_descriptions, port, private_key=private_key, certificate_chain=certificate_chain)
-def early_adopter_create_Greeter_stub(host, port, metadata_transformer=None, secure=False, root_certificates=None, private_key=None, certificate_chain=None, server_host_override=None):
-  import helloworld_pb2
-  import helloworld_pb2
-  method_invocation_descriptions = {
-    "SayHello": alpha_utilities.unary_unary_invocation_description(
-      helloworld_pb2.HelloRequest.SerializeToString,
-      helloworld_pb2.HelloReply.FromString,
-    ),
-  }
-  return early_adopter_implementations.stub("helloworld.Greeter", method_invocation_descriptions, host, port, metadata_transformer=metadata_transformer, secure=secure, root_certificates=root_certificates, private_key=private_key, certificate_chain=certificate_chain, server_host_override=server_host_override)
 
 class BetaGreeterServicer(object):
-  """<fill me in later!>"""
-  __metaclass__ = abc.ABCMeta
-  @abc.abstractmethod
+  """The greeting service definition.
+  """
   def SayHello(self, request, context):
-    raise NotImplementedError()
+    """Sends a greeting
+    """
+    context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
 
 class BetaGreeterStub(object):
-  """The interface to which stubs will conform."""
-  __metaclass__ = abc.ABCMeta
-  @abc.abstractmethod
+  """The greeting service definition.
+  """
   def SayHello(self, request, timeout):
+    """Sends a greeting
+    """
     raise NotImplementedError()
   SayHello.future = None
 
diff --git a/examples/python/route_guide/route_guide_pb2.py b/examples/python/route_guide/route_guide_pb2.py
index d4d9f8d..81d5d07 100644
--- a/examples/python/route_guide/route_guide_pb2.py
+++ b/examples/python/route_guide/route_guide_pb2.py
@@ -1,6 +1,8 @@
 # Generated by the protocol buffer compiler.  DO NOT EDIT!
 # source: route_guide.proto
 
+import sys
+_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
 from google.protobuf import descriptor as _descriptor
 from google.protobuf import message as _message
 from google.protobuf import reflection as _reflection
@@ -17,7 +19,7 @@
   name='route_guide.proto',
   package='routeguide',
   syntax='proto3',
-  serialized_pb=b'\n\x11route_guide.proto\x12\nrouteguide\",\n\x05Point\x12\x10\n\x08latitude\x18\x01 \x01(\x05\x12\x11\n\tlongitude\x18\x02 \x01(\x05\"I\n\tRectangle\x12\x1d\n\x02lo\x18\x01 \x01(\x0b\x32\x11.routeguide.Point\x12\x1d\n\x02hi\x18\x02 \x01(\x0b\x32\x11.routeguide.Point\"<\n\x07\x46\x65\x61ture\x12\x0c\n\x04name\x18\x01 \x01(\t\x12#\n\x08location\x18\x02 \x01(\x0b\x32\x11.routeguide.Point\"A\n\tRouteNote\x12#\n\x08location\x18\x01 \x01(\x0b\x32\x11.routeguide.Point\x12\x0f\n\x07message\x18\x02 \x01(\t\"b\n\x0cRouteSummary\x12\x13\n\x0bpoint_count\x18\x01 \x01(\x05\x12\x15\n\rfeature_count\x18\x02 \x01(\x05\x12\x10\n\x08\x64istance\x18\x03 \x01(\x05\x12\x14\n\x0c\x65lapsed_time\x18\x04 \x01(\x05\x32\x85\x02\n\nRouteGuide\x12\x36\n\nGetFeature\x12\x11.routeguide.Point\x1a\x13.routeguide.Feature\"\x00\x12>\n\x0cListFeatures\x12\x15.routeguide.Rectangle\x1a\x13.routeguide.Feature\"\x00\x30\x01\x12>\n\x0bRecordRoute\x12\x11.routeguide.Point\x1a\x18.routeguide.RouteSummary\"\x00(\x01\x12?\n\tRouteChat\x12\x15.routeguide.RouteNote\x1a\x15.routeguide.RouteNote\"\x00(\x01\x30\x01\x42\x0f\n\x07\x65x.grpc\xa2\x02\x03RTGb\x06proto3'
+  serialized_pb=_b('\n\x11route_guide.proto\x12\nrouteguide\",\n\x05Point\x12\x10\n\x08latitude\x18\x01 \x01(\x05\x12\x11\n\tlongitude\x18\x02 \x01(\x05\"I\n\tRectangle\x12\x1d\n\x02lo\x18\x01 \x01(\x0b\x32\x11.routeguide.Point\x12\x1d\n\x02hi\x18\x02 \x01(\x0b\x32\x11.routeguide.Point\"<\n\x07\x46\x65\x61ture\x12\x0c\n\x04name\x18\x01 \x01(\t\x12#\n\x08location\x18\x02 \x01(\x0b\x32\x11.routeguide.Point\"A\n\tRouteNote\x12#\n\x08location\x18\x01 \x01(\x0b\x32\x11.routeguide.Point\x12\x0f\n\x07message\x18\x02 \x01(\t\"b\n\x0cRouteSummary\x12\x13\n\x0bpoint_count\x18\x01 \x01(\x05\x12\x15\n\rfeature_count\x18\x02 \x01(\x05\x12\x10\n\x08\x64istance\x18\x03 \x01(\x05\x12\x14\n\x0c\x65lapsed_time\x18\x04 \x01(\x05\x32\x85\x02\n\nRouteGuide\x12\x36\n\nGetFeature\x12\x11.routeguide.Point\x1a\x13.routeguide.Feature\"\x00\x12>\n\x0cListFeatures\x12\x15.routeguide.Rectangle\x1a\x13.routeguide.Feature\"\x00\x30\x01\x12>\n\x0bRecordRoute\x12\x11.routeguide.Point\x1a\x18.routeguide.RouteSummary\"\x00(\x01\x12?\n\tRouteChat\x12\x15.routeguide.RouteNote\x1a\x15.routeguide.RouteNote\"\x00(\x01\x30\x01\x42\x36\n\x1bio.grpc.examples.routeguideB\x0fRouteGuideProtoP\x01\xa2\x02\x03RTGb\x06proto3')
 )
 _sym_db.RegisterFileDescriptor(DESCRIPTOR)
 
@@ -110,7 +112,7 @@
     _descriptor.FieldDescriptor(
       name='name', full_name='routeguide.Feature.name', index=0,
       number=1, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=b"".decode('utf-8'),
+      has_default_value=False, default_value=_b("").decode('utf-8'),
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
       options=None),
@@ -155,7 +157,7 @@
     _descriptor.FieldDescriptor(
       name='message', full_name='routeguide.RouteNote.message', index=1,
       number=2, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=b"".decode('utf-8'),
+      has_default_value=False, default_value=_b("").decode('utf-8'),
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
       options=None),
@@ -274,149 +276,86 @@
 
 
 DESCRIPTOR.has_options = True
-DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), b'\n\007ex.grpc\242\002\003RTG')
+DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\033io.grpc.examples.routeguideB\017RouteGuideProtoP\001\242\002\003RTG'))
 import abc
+import six
 from grpc.beta import implementations as beta_implementations
-from grpc.early_adopter import implementations as early_adopter_implementations
-from grpc.framework.alpha import utilities as alpha_utilities
+from grpc.beta import interfaces as beta_interfaces
 from grpc.framework.common import cardinality
 from grpc.framework.interfaces.face import utilities as face_utilities
-class EarlyAdopterRouteGuideServicer(object):
-  """<fill me in later!>"""
-  __metaclass__ = abc.ABCMeta
-  @abc.abstractmethod
-  def GetFeature(self, request, context):
-    raise NotImplementedError()
-  @abc.abstractmethod
-  def ListFeatures(self, request, context):
-    raise NotImplementedError()
-  @abc.abstractmethod
-  def RecordRoute(self, request_iterator, context):
-    raise NotImplementedError()
-  @abc.abstractmethod
-  def RouteChat(self, request_iterator, context):
-    raise NotImplementedError()
-class EarlyAdopterRouteGuideServer(object):
-  """<fill me in later!>"""
-  __metaclass__ = abc.ABCMeta
-  @abc.abstractmethod
-  def start(self):
-    raise NotImplementedError()
-  @abc.abstractmethod
-  def stop(self):
-    raise NotImplementedError()
-class EarlyAdopterRouteGuideStub(object):
-  """<fill me in later!>"""
-  __metaclass__ = abc.ABCMeta
-  @abc.abstractmethod
-  def GetFeature(self, request):
-    raise NotImplementedError()
-  GetFeature.async = None
-  @abc.abstractmethod
-  def ListFeatures(self, request):
-    raise NotImplementedError()
-  ListFeatures.async = None
-  @abc.abstractmethod
-  def RecordRoute(self, request_iterator):
-    raise NotImplementedError()
-  RecordRoute.async = None
-  @abc.abstractmethod
-  def RouteChat(self, request_iterator):
-    raise NotImplementedError()
-  RouteChat.async = None
-def early_adopter_create_RouteGuide_server(servicer, port, private_key=None, certificate_chain=None):
-  import route_guide_pb2
-  import route_guide_pb2
-  import route_guide_pb2
-  import route_guide_pb2
-  import route_guide_pb2
-  import route_guide_pb2
-  import route_guide_pb2
-  import route_guide_pb2
-  method_service_descriptions = {
-    "GetFeature": alpha_utilities.unary_unary_service_description(
-      servicer.GetFeature,
-      route_guide_pb2.Point.FromString,
-      route_guide_pb2.Feature.SerializeToString,
-    ),
-    "ListFeatures": alpha_utilities.unary_stream_service_description(
-      servicer.ListFeatures,
-      route_guide_pb2.Rectangle.FromString,
-      route_guide_pb2.Feature.SerializeToString,
-    ),
-    "RecordRoute": alpha_utilities.stream_unary_service_description(
-      servicer.RecordRoute,
-      route_guide_pb2.Point.FromString,
-      route_guide_pb2.RouteSummary.SerializeToString,
-    ),
-    "RouteChat": alpha_utilities.stream_stream_service_description(
-      servicer.RouteChat,
-      route_guide_pb2.RouteNote.FromString,
-      route_guide_pb2.RouteNote.SerializeToString,
-    ),
-  }
-  return early_adopter_implementations.server("routeguide.RouteGuide", method_service_descriptions, port, private_key=private_key, certificate_chain=certificate_chain)
-def early_adopter_create_RouteGuide_stub(host, port, metadata_transformer=None, secure=False, root_certificates=None, private_key=None, certificate_chain=None, server_host_override=None):
-  import route_guide_pb2
-  import route_guide_pb2
-  import route_guide_pb2
-  import route_guide_pb2
-  import route_guide_pb2
-  import route_guide_pb2
-  import route_guide_pb2
-  import route_guide_pb2
-  method_invocation_descriptions = {
-    "GetFeature": alpha_utilities.unary_unary_invocation_description(
-      route_guide_pb2.Point.SerializeToString,
-      route_guide_pb2.Feature.FromString,
-    ),
-    "ListFeatures": alpha_utilities.unary_stream_invocation_description(
-      route_guide_pb2.Rectangle.SerializeToString,
-      route_guide_pb2.Feature.FromString,
-    ),
-    "RecordRoute": alpha_utilities.stream_unary_invocation_description(
-      route_guide_pb2.Point.SerializeToString,
-      route_guide_pb2.RouteSummary.FromString,
-    ),
-    "RouteChat": alpha_utilities.stream_stream_invocation_description(
-      route_guide_pb2.RouteNote.SerializeToString,
-      route_guide_pb2.RouteNote.FromString,
-    ),
-  }
-  return early_adopter_implementations.stub("routeguide.RouteGuide", method_invocation_descriptions, host, port, metadata_transformer=metadata_transformer, secure=secure, root_certificates=root_certificates, private_key=private_key, certificate_chain=certificate_chain, server_host_override=server_host_override)
 
 class BetaRouteGuideServicer(object):
-  """<fill me in later!>"""
-  __metaclass__ = abc.ABCMeta
-  @abc.abstractmethod
+  """Interface exported by the server.
+  """
   def GetFeature(self, request, context):
-    raise NotImplementedError()
-  @abc.abstractmethod
+    """A simple RPC.
+
+    Obtains the feature at a given position.
+
+    A feature with an empty name is returned if there's no feature at the given
+    position.
+    """
+    context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
   def ListFeatures(self, request, context):
-    raise NotImplementedError()
-  @abc.abstractmethod
+    """A server-to-client streaming RPC.
+
+    Obtains the Features available within the given Rectangle.  Results are
+    streamed rather than returned at once (e.g. in a response message with a
+    repeated field), as the rectangle may cover a large area and contain a
+    huge number of features.
+    """
+    context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
   def RecordRoute(self, request_iterator, context):
-    raise NotImplementedError()
-  @abc.abstractmethod
+    """A client-to-server streaming RPC.
+
+    Accepts a stream of Points on a route being traversed, returning a
+    RouteSummary when traversal is completed.
+    """
+    context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
   def RouteChat(self, request_iterator, context):
-    raise NotImplementedError()
+    """A Bidirectional streaming RPC.
+
+    Accepts a stream of RouteNotes sent while a route is being traversed,
+    while receiving other RouteNotes (e.g. from other users).
+    """
+    context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
 
 class BetaRouteGuideStub(object):
-  """The interface to which stubs will conform."""
-  __metaclass__ = abc.ABCMeta
-  @abc.abstractmethod
+  """Interface exported by the server.
+  """
   def GetFeature(self, request, timeout):
+    """A simple RPC.
+
+    Obtains the feature at a given position.
+
+    A feature with an empty name is returned if there's no feature at the given
+    position.
+    """
     raise NotImplementedError()
   GetFeature.future = None
-  @abc.abstractmethod
   def ListFeatures(self, request, timeout):
+    """A server-to-client streaming RPC.
+
+    Obtains the Features available within the given Rectangle.  Results are
+    streamed rather than returned at once (e.g. in a response message with a
+    repeated field), as the rectangle may cover a large area and contain a
+    huge number of features.
+    """
     raise NotImplementedError()
-  @abc.abstractmethod
   def RecordRoute(self, request_iterator, timeout):
+    """A client-to-server streaming RPC.
+
+    Accepts a stream of Points on a route being traversed, returning a
+    RouteSummary when traversal is completed.
+    """
     raise NotImplementedError()
   RecordRoute.future = None
-  @abc.abstractmethod
   def RouteChat(self, request_iterator, timeout):
+    """A Bidirectional streaming RPC.
+
+    Accepts a stream of RouteNotes sent while a route is being traversed,
+    while receiving other RouteNotes (e.g. from other users).
+    """
     raise NotImplementedError()
 
 def beta_create_RouteGuide_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):
diff --git a/gRPC.podspec b/gRPC.podspec
index 77d35bd..569f89b 100644
--- a/gRPC.podspec
+++ b/gRPC.podspec
@@ -305,6 +305,7 @@
                       'include/grpc/grpc.h',
                       'include/grpc/status.h',
                       'include/grpc/impl/codegen/byte_buffer.h',
+                      'include/grpc/impl/codegen/byte_buffer_reader.h',
                       'include/grpc/impl/codegen/compression_types.h',
                       'include/grpc/impl/codegen/connectivity_state.h',
                       'include/grpc/impl/codegen/grpc_types.h',
diff --git a/grpc.gemspec b/grpc.gemspec
index e68cd81..475fc99 100755
--- a/grpc.gemspec
+++ b/grpc.gemspec
@@ -149,6 +149,7 @@
   s.files += %w( include/grpc/grpc.h )
   s.files += %w( include/grpc/status.h )
   s.files += %w( include/grpc/impl/codegen/byte_buffer.h )
+  s.files += %w( include/grpc/impl/codegen/byte_buffer_reader.h )
   s.files += %w( include/grpc/impl/codegen/compression_types.h )
   s.files += %w( include/grpc/impl/codegen/connectivity_state.h )
   s.files += %w( include/grpc/impl/codegen/grpc_types.h )
diff --git a/include/grpc++/impl/codegen/core_codegen_interface.h b/include/grpc++/impl/codegen/core_codegen_interface.h
index 16424ba..aa9013c 100644
--- a/include/grpc++/impl/codegen/core_codegen_interface.h
+++ b/include/grpc++/impl/codegen/core_codegen_interface.h
@@ -49,18 +49,6 @@
 /// \warning This interface should be considered internal and private.
 class CoreCodegenInterface {
  public:
-  // Serialize the msg into a buffer created inside the function. The caller
-  // should destroy the returned buffer when done with it. If serialization
-  // fails,
-  // false is returned and buffer is left unchanged.
-  virtual Status SerializeProto(const grpc::protobuf::Message& msg,
-                                grpc_byte_buffer** buffer) = 0;
-
-  // The caller keeps ownership of buffer and msg.
-  virtual Status DeserializeProto(grpc_byte_buffer* buffer,
-                                  grpc::protobuf::Message* msg,
-                                  int max_message_size) = 0;
-
   /// Upon a failed assertion, log the error.
   virtual void assert_fail(const char* failed_assertion) = 0;
 
@@ -76,9 +64,29 @@
   virtual void gpr_free(void* p) = 0;
 
   virtual void grpc_byte_buffer_destroy(grpc_byte_buffer* bb) = 0;
+
+  virtual void grpc_byte_buffer_reader_init(grpc_byte_buffer_reader* reader,
+                                            grpc_byte_buffer* buffer) = 0;
+  virtual void grpc_byte_buffer_reader_destroy(
+      grpc_byte_buffer_reader* reader) = 0;
+  virtual int grpc_byte_buffer_reader_next(grpc_byte_buffer_reader* reader,
+                                           gpr_slice* slice) = 0;
+
+  virtual grpc_byte_buffer* grpc_raw_byte_buffer_create(gpr_slice* slice,
+                                                        size_t nslices) = 0;
+
+  virtual gpr_slice gpr_slice_malloc(size_t length) = 0;
+  virtual void gpr_slice_unref(gpr_slice slice) = 0;
+  virtual gpr_slice gpr_slice_split_tail(gpr_slice* s, size_t split) = 0;
+  virtual void gpr_slice_buffer_add(gpr_slice_buffer* sb, gpr_slice slice) = 0;
+  virtual void gpr_slice_buffer_pop(gpr_slice_buffer* sb) = 0;
+
   virtual void grpc_metadata_array_init(grpc_metadata_array* array) = 0;
   virtual void grpc_metadata_array_destroy(grpc_metadata_array* array) = 0;
 
+  virtual const Status& ok() = 0;
+  virtual const Status& cancelled() = 0;
+
   virtual gpr_timespec gpr_inf_future(gpr_clock_type type) = 0;
 };
 
diff --git a/include/grpc++/impl/codegen/proto_utils.h b/include/grpc++/impl/codegen/proto_utils.h
index 2aaa3c3..d044ddc 100644
--- a/include/grpc++/impl/codegen/proto_utils.h
+++ b/include/grpc++/impl/codegen/proto_utils.h
@@ -41,26 +41,179 @@
 #include <grpc++/impl/codegen/serialization_traits.h>
 #include <grpc++/impl/codegen/status.h>
 #include <grpc/impl/codegen/byte_buffer.h>
+#include <grpc/impl/codegen/byte_buffer_reader.h>
 #include <grpc/impl/codegen/log.h>
+#include <grpc/impl/codegen/slice.h>
 
 namespace grpc {
 
 extern CoreCodegenInterface* g_core_codegen_interface;
 
+namespace {
+
+const int kGrpcBufferWriterMaxBufferLength = 8192;
+
+class GrpcBufferWriter GRPC_FINAL
+    : public ::grpc::protobuf::io::ZeroCopyOutputStream {
+ public:
+  explicit GrpcBufferWriter(grpc_byte_buffer** bp, int block_size)
+      : block_size_(block_size), byte_count_(0), have_backup_(false) {
+    *bp = g_core_codegen_interface->grpc_raw_byte_buffer_create(NULL, 0);
+    slice_buffer_ = &(*bp)->data.raw.slice_buffer;
+  }
+
+  ~GrpcBufferWriter() GRPC_OVERRIDE {
+    if (have_backup_) {
+      g_core_codegen_interface->gpr_slice_unref(backup_slice_);
+    }
+  }
+
+  bool Next(void** data, int* size) GRPC_OVERRIDE {
+    if (have_backup_) {
+      slice_ = backup_slice_;
+      have_backup_ = false;
+    } else {
+      slice_ = g_core_codegen_interface->gpr_slice_malloc(block_size_);
+    }
+    *data = GPR_SLICE_START_PTR(slice_);
+    // On win x64, int is only 32bit
+    GPR_CODEGEN_ASSERT(GPR_SLICE_LENGTH(slice_) <= INT_MAX);
+    byte_count_ += * size = (int)GPR_SLICE_LENGTH(slice_);
+    g_core_codegen_interface->gpr_slice_buffer_add(slice_buffer_, slice_);
+    return true;
+  }
+
+  void BackUp(int count) GRPC_OVERRIDE {
+    g_core_codegen_interface->gpr_slice_buffer_pop(slice_buffer_);
+    if (count == block_size_) {
+      backup_slice_ = slice_;
+    } else {
+      backup_slice_ = g_core_codegen_interface->gpr_slice_split_tail(
+          &slice_, GPR_SLICE_LENGTH(slice_) - count);
+      g_core_codegen_interface->gpr_slice_buffer_add(slice_buffer_, slice_);
+    }
+    have_backup_ = true;
+    byte_count_ -= count;
+  }
+
+  grpc::protobuf::int64 ByteCount() const GRPC_OVERRIDE { return byte_count_; }
+
+ private:
+  const int block_size_;
+  int64_t byte_count_;
+  gpr_slice_buffer* slice_buffer_;
+  bool have_backup_;
+  gpr_slice backup_slice_;
+  gpr_slice slice_;
+};
+
+class GrpcBufferReader GRPC_FINAL
+    : public ::grpc::protobuf::io::ZeroCopyInputStream {
+ public:
+  explicit GrpcBufferReader(grpc_byte_buffer* buffer)
+      : byte_count_(0), backup_count_(0) {
+    g_core_codegen_interface->grpc_byte_buffer_reader_init(&reader_, buffer);
+  }
+  ~GrpcBufferReader() GRPC_OVERRIDE {
+    g_core_codegen_interface->grpc_byte_buffer_reader_destroy(&reader_);
+  }
+
+  bool Next(const void** data, int* size) GRPC_OVERRIDE {
+    if (backup_count_ > 0) {
+      *data = GPR_SLICE_START_PTR(slice_) + GPR_SLICE_LENGTH(slice_) -
+              backup_count_;
+      GPR_CODEGEN_ASSERT(backup_count_ <= INT_MAX);
+      *size = (int)backup_count_;
+      backup_count_ = 0;
+      return true;
+    }
+    if (!g_core_codegen_interface->grpc_byte_buffer_reader_next(&reader_,
+                                                                &slice_)) {
+      return false;
+    }
+    g_core_codegen_interface->gpr_slice_unref(slice_);
+    *data = GPR_SLICE_START_PTR(slice_);
+    // On win x64, int is only 32bit
+    GPR_CODEGEN_ASSERT(GPR_SLICE_LENGTH(slice_) <= INT_MAX);
+    byte_count_ += * size = (int)GPR_SLICE_LENGTH(slice_);
+    return true;
+  }
+
+  void BackUp(int count) GRPC_OVERRIDE { backup_count_ = count; }
+
+  bool Skip(int count) GRPC_OVERRIDE {
+    const void* data;
+    int size;
+    while (Next(&data, &size)) {
+      if (size >= count) {
+        BackUp(size - count);
+        return true;
+      }
+      // size < count;
+      count -= size;
+    }
+    // error or we have too large count;
+    return false;
+  }
+
+  grpc::protobuf::int64 ByteCount() const GRPC_OVERRIDE {
+    return byte_count_ - backup_count_;
+  }
+
+ private:
+  int64_t byte_count_;
+  int64_t backup_count_;
+  grpc_byte_buffer_reader reader_;
+  gpr_slice slice_;
+};
+}  // namespace
+
 template <class T>
 class SerializationTraits<T, typename std::enable_if<std::is_base_of<
                                  grpc::protobuf::Message, T>::value>::type> {
  public:
   static Status Serialize(const grpc::protobuf::Message& msg,
-                          grpc_byte_buffer** buffer, bool* own_buffer) {
+                          grpc_byte_buffer** bp, bool* own_buffer) {
     *own_buffer = true;
-    return g_core_codegen_interface->SerializeProto(msg, buffer);
+    int byte_size = msg.ByteSize();
+    if (byte_size <= kGrpcBufferWriterMaxBufferLength) {
+      gpr_slice slice = g_core_codegen_interface->gpr_slice_malloc(byte_size);
+      GPR_CODEGEN_ASSERT(
+          GPR_SLICE_END_PTR(slice) ==
+          msg.SerializeWithCachedSizesToArray(GPR_SLICE_START_PTR(slice)));
+      *bp = g_core_codegen_interface->grpc_raw_byte_buffer_create(&slice, 1);
+      g_core_codegen_interface->gpr_slice_unref(slice);
+      return g_core_codegen_interface->ok();
+    } else {
+      GrpcBufferWriter writer(bp, kGrpcBufferWriterMaxBufferLength);
+      return msg.SerializeToZeroCopyStream(&writer)
+                 ? g_core_codegen_interface->ok()
+                 : Status(StatusCode::INTERNAL, "Failed to serialize message");
+    }
   }
+
   static Status Deserialize(grpc_byte_buffer* buffer,
                             grpc::protobuf::Message* msg,
                             int max_message_size) {
-    return g_core_codegen_interface->DeserializeProto(buffer, msg,
-                                                      max_message_size);
+    if (buffer == nullptr) {
+      return Status(StatusCode::INTERNAL, "No payload");
+    }
+    Status result = g_core_codegen_interface->ok();
+    {
+      GrpcBufferReader reader(buffer);
+      ::grpc::protobuf::io::CodedInputStream decoder(&reader);
+      if (max_message_size > 0) {
+        decoder.SetTotalBytesLimit(max_message_size, max_message_size);
+      }
+      if (!msg->ParseFromCodedStream(&decoder)) {
+        result = Status(StatusCode::INTERNAL, msg->InitializationErrorString());
+      }
+      if (!decoder.ConsumedEntireMessage()) {
+        result = Status(StatusCode::INTERNAL, "Did not read entire message");
+      }
+    }
+    g_core_codegen_interface->grpc_byte_buffer_destroy(buffer);
+    return result;
   }
 };
 
diff --git a/include/grpc/byte_buffer_reader.h b/include/grpc/byte_buffer_reader.h
index 9a1c617..e95bf2f 100644
--- a/include/grpc/byte_buffer_reader.h
+++ b/include/grpc/byte_buffer_reader.h
@@ -34,25 +34,6 @@
 #ifndef GRPC_BYTE_BUFFER_READER_H
 #define GRPC_BYTE_BUFFER_READER_H
 
-#include <grpc/byte_buffer.h>
-#include <grpc/grpc.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-struct grpc_byte_buffer_reader {
-  grpc_byte_buffer *buffer_in;
-  grpc_byte_buffer *buffer_out;
-  /* Different current objects correspond to different types of byte buffers */
-  union {
-    /* Index into a slice buffer's array of slices */
-    unsigned index;
-  } current;
-};
-
-#ifdef __cplusplus
-}
-#endif
+#include <grpc/impl/codegen/byte_buffer_reader.h>
 
 #endif /* GRPC_BYTE_BUFFER_READER_H */
diff --git a/include/grpc/impl/codegen/byte_buffer_reader.h b/include/grpc/impl/codegen/byte_buffer_reader.h
new file mode 100644
index 0000000..10c3829
--- /dev/null
+++ b/include/grpc/impl/codegen/byte_buffer_reader.h
@@ -0,0 +1,57 @@
+/*
+ *
+ * Copyright 2015, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_IMPL_CODEGEN_BYTE_BUFFER_READER_H
+#define GRPC_IMPL_CODEGEN_BYTE_BUFFER_READER_H
+
+#include <grpc/impl/codegen/byte_buffer.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct grpc_byte_buffer_reader {
+  grpc_byte_buffer *buffer_in;
+  grpc_byte_buffer *buffer_out;
+  /* Different current objects correspond to different types of byte buffers */
+  union {
+    /* Index into a slice buffer's array of slices */
+    unsigned index;
+  } current;
+};
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* GRPC_IMPL_CODEGEN_BYTE_BUFFER_READER_H */
diff --git a/include/grpc/impl/codegen/grpc_types.h b/include/grpc/impl/codegen/grpc_types.h
index 4c73730..7b20cc1 100644
--- a/include/grpc/impl/codegen/grpc_types.h
+++ b/include/grpc/impl/codegen/grpc_types.h
@@ -307,7 +307,9 @@
   GRPC_OP_RECV_STATUS_ON_CLIENT,
   /** Receive close on the server: one and only one must be made on the
       server.
-      This op completes after the close has been received by the server. */
+      This op completes after the close has been received by the server.
+      This operation always succeeds, meaning ops paired with this operation
+      will also appear to succeed, even though they may not have. */
   GRPC_OP_RECV_CLOSE_ON_SERVER
 } grpc_op_type;
 
diff --git a/package.json b/package.json
index 5ed7f36..54a44ca 100644
--- a/package.json
+++ b/package.json
@@ -1,6 +1,6 @@
 {
   "name": "grpc",
-  "version": "0.14.0-dev",
+  "version": "0.15.0-dev",
   "author": "Google Inc.",
   "description": "gRPC Library for Node",
   "homepage": "http://www.grpc.io/",
diff --git a/package.xml b/package.xml
index ffb1c56..feb2717 100644
--- a/package.xml
+++ b/package.xml
@@ -13,8 +13,8 @@
  <date>2016-04-19</date>
  <time>16:06:07</time>
  <version>
-  <release>0.14.0</release>
-  <api>0.14.0</api>
+  <release>0.15.0</release>
+  <api>0.15.0</api>
  </version>
  <stability>
   <release>beta</release>
@@ -156,6 +156,7 @@
     <file baseinstalldir="/" name="include/grpc/grpc.h" role="src" />
     <file baseinstalldir="/" name="include/grpc/status.h" role="src" />
     <file baseinstalldir="/" name="include/grpc/impl/codegen/byte_buffer.h" role="src" />
+    <file baseinstalldir="/" name="include/grpc/impl/codegen/byte_buffer_reader.h" role="src" />
     <file baseinstalldir="/" name="include/grpc/impl/codegen/compression_types.h" role="src" />
     <file baseinstalldir="/" name="include/grpc/impl/codegen/connectivity_state.h" role="src" />
     <file baseinstalldir="/" name="include/grpc/impl/codegen/grpc_types.h" role="src" />
@@ -1013,8 +1014,8 @@
   </release>
   <release>
    <version>
-    <release>0.14.0</release>
-    <api>0.14.0</api>
+    <release>0.15.0</release>
+    <api>0.15.0</api>
    </version>
    <stability>
     <release>beta</release>
diff --git a/setup.py b/setup.py
index cd0d3a1..5cd2612 100644
--- a/setup.py
+++ b/setup.py
@@ -236,6 +236,8 @@
     'ext_modules': CYTHON_EXTENSION_MODULES,
     'packages': list(PACKAGES),
     'package_dir': PACKAGE_DIRECTORIES,
+    # TODO(atash): Figure out why auditwheel doesn't like namespace packages.
+    #'namespace_packages': ['grpc'],
     'package_data': PACKAGE_DATA,
     'install_requires': INSTALL_REQUIRES,
     'setup_requires': SETUP_REQUIRES,
diff --git a/src/compiler/python_generator.cc b/src/compiler/python_generator.cc
index 59137e1..8e76e6d 100644
--- a/src/compiler/python_generator.cc
+++ b/src/compiler/python_generator.cc
@@ -182,18 +182,40 @@
   return true;
 }
 
+// Get all comments (leading, leading_detached, trailing) and print them as a
+// docstring. Any leading space of a line will be removed, but the line wrapping
+// will not be changed.
+template <typename DescriptorType>
+static void PrintAllComments(const DescriptorType* desc, Printer* printer) {
+  std::vector<grpc::string> comments;
+  grpc_generator::GetComment(desc, grpc_generator::COMMENTTYPE_LEADING_DETACHED,
+                             &comments);
+  grpc_generator::GetComment(desc, grpc_generator::COMMENTTYPE_LEADING,
+                             &comments);
+  grpc_generator::GetComment(desc, grpc_generator::COMMENTTYPE_TRAILING,
+                             &comments);
+  if (comments.empty()) {
+    return;
+  }
+  printer->Print("\"\"\"");
+  for (auto it = comments.begin(); it != comments.end(); ++it) {
+    size_t start_pos = it->find_first_not_of(' ');
+    if (start_pos != grpc::string::npos) {
+      printer->Print(it->c_str() + start_pos);
+    }
+    printer->Print("\n");
+  }
+  printer->Print("\"\"\"\n");
+}
+
 bool PrintBetaServicer(const ServiceDescriptor* service,
                        Printer* out) {
-  grpc::string doc = "<fill me in later!>";
-  map<grpc::string, grpc::string> dict = ListToDict({
-        "Service", service->name(),
-        "Documentation", doc,
-      });
   out->Print("\n");
-  out->Print(dict, "class Beta$Service$Servicer(object):\n");
+  out->Print("class Beta$Service$Servicer(object):\n", "Service",
+             service->name());
   {
     IndentScope raii_class_indent(out);
-    out->Print(dict, "\"\"\"$Documentation$\"\"\"\n");
+    PrintAllComments(service, out);
     for (int i = 0; i < service->method_count(); ++i) {
       auto meth = service->method(i);
       grpc::string arg_name = meth->client_streaming() ?
@@ -202,6 +224,7 @@
                  "Method", meth->name(), "ArgName", arg_name);
       {
         IndentScope raii_method_indent(out);
+        PrintAllComments(meth, out);
         out->Print("context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)\n");
       }
     }
@@ -211,16 +234,11 @@
 
 bool PrintBetaStub(const ServiceDescriptor* service,
                    Printer* out) {
-  grpc::string doc = "The interface to which stubs will conform.";
-  map<grpc::string, grpc::string> dict = ListToDict({
-        "Service", service->name(),
-        "Documentation", doc,
-      });
   out->Print("\n");
-  out->Print(dict, "class Beta$Service$Stub(object):\n");
+  out->Print("class Beta$Service$Stub(object):\n", "Service", service->name());
   {
     IndentScope raii_class_indent(out);
-    out->Print(dict, "\"\"\"$Documentation$\"\"\"\n");
+    PrintAllComments(service, out);
     for (int i = 0; i < service->method_count(); ++i) {
       const MethodDescriptor* meth = service->method(i);
       grpc::string arg_name = meth->client_streaming() ?
@@ -229,6 +247,7 @@
       out->Print(methdict, "def $Method$(self, $ArgName$, timeout):\n");
       {
         IndentScope raii_method_indent(out);
+        PrintAllComments(meth, out);
         out->Print("raise NotImplementedError()\n");
       }
       if (!meth->server_streaming()) {
diff --git a/src/compiler/ruby_generator.cc b/src/compiler/ruby_generator.cc
index 5ac56ad..936a186 100644
--- a/src/compiler/ruby_generator.cc
+++ b/src/compiler/ruby_generator.cc
@@ -98,8 +98,8 @@
   out->Print("self.marshal_class_method = :encode\n");
   out->Print("self.unmarshal_class_method = :decode\n");
   std::map<grpc::string, grpc::string> pkg_vars =
-      ListToDict({"service.name", service->name(), "pkg.name", package, });
-  out->Print(pkg_vars, "self.service_name = '$pkg.name$.$service.name$'\n");
+      ListToDict({"service_full_name", service->full_name()});
+  out->Print(pkg_vars, "self.service_name = '$service_full_name$'\n");
   out->Print("\n");
   for (int i = 0; i < service->method_count(); ++i) {
     PrintMethod(service->method(i), package, out);
diff --git a/src/core/ext/client_config/resolver_registry.c b/src/core/ext/client_config/resolver_registry.c
index 07f29bc..e7a4abd 100644
--- a/src/core/ext/client_config/resolver_registry.c
+++ b/src/core/ext/client_config/resolver_registry.c
@@ -47,7 +47,6 @@
 static char *g_default_resolver_prefix;
 
 void grpc_resolver_registry_init(const char *default_resolver_prefix) {
-  g_number_of_resolvers = 0;
   g_default_resolver_prefix = gpr_strdup(default_resolver_prefix);
 }
 
@@ -57,6 +56,13 @@
     grpc_resolver_factory_unref(g_all_of_the_resolvers[i]);
   }
   gpr_free(g_default_resolver_prefix);
+  // FIXME(ctiller): this should live in grpc_resolver_registry_init,
+  // however that would have the client_config plugin call this AFTER we start
+  // registering resolvers from third party plugins, and so they'd never show
+  // up.
+  // We likely need some kind of dependency system for plugins.... what form
+  // that takes is TBD.
+  g_number_of_resolvers = 0;
 }
 
 void grpc_register_resolver_type(grpc_resolver_factory *factory) {
diff --git a/src/core/ext/lb_policy/round_robin/round_robin.c b/src/core/ext/lb_policy/round_robin/round_robin.c
index 3f6051b..dcdc0c6 100644
--- a/src/core/ext/lb_policy/round_robin/round_robin.c
+++ b/src/core/ext/lb_policy/round_robin/round_robin.c
@@ -306,8 +306,10 @@
   size_t i;
   p->started_picking = 1;
 
-  gpr_log(GPR_DEBUG, "LB_POLICY: p=%p num_subchannels=%d", p,
-          p->num_subchannels);
+  if (grpc_lb_round_robin_trace) {
+    gpr_log(GPR_DEBUG, "LB_POLICY: p=%p num_subchannels=%d", p,
+            p->num_subchannels);
+  }
 
   for (i = 0; i < p->num_subchannels; i++) {
     subchannel_data *sd = p->subchannels[i];
diff --git a/src/core/ext/transport/chttp2/transport/chttp2_transport.c b/src/core/ext/transport/chttp2/transport/chttp2_transport.c
index 8c85937..5363322 100644
--- a/src/core/ext/transport/chttp2/transport/chttp2_transport.c
+++ b/src/core/ext/transport/chttp2/transport/chttp2_transport.c
@@ -783,7 +783,8 @@
     grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
     uint32_t goaway_error, gpr_slice goaway_text) {
   char *msg = gpr_dump_slice(goaway_text, GPR_DUMP_HEX | GPR_DUMP_ASCII);
-  gpr_log(GPR_DEBUG, "got goaway [%d]: %s", goaway_error, msg);
+  GRPC_CHTTP2_IF_TRACING(
+      gpr_log(GPR_DEBUG, "got goaway [%d]: %s", goaway_error, msg));
   gpr_free(msg);
   gpr_slice_unref(goaway_text);
   transport_global->seen_goaway = 1;
diff --git a/src/core/ext/transport/chttp2/transport/hpack_encoder.c b/src/core/ext/transport/chttp2/transport/hpack_encoder.c
index 555027c..ebeee37 100644
--- a/src/core/ext/transport/chttp2/transport/hpack_encoder.c
+++ b/src/core/ext/transport/chttp2/transport/hpack_encoder.c
@@ -63,6 +63,8 @@
 /* don't consider adding anything bigger than this to the hpack table */
 #define MAX_DECODER_SPACE_USAGE 512
 
+extern int grpc_http_trace;
+
 typedef struct {
   int is_first_frame;
   /* number of bytes in 'output' when we started the frame - used to calculate
@@ -532,7 +534,9 @@
     }
   }
   c->advertise_table_size_change = 1;
-  gpr_log(GPR_DEBUG, "set max table size from encoder to %d", max_table_size);
+  if (grpc_http_trace) {
+    gpr_log(GPR_DEBUG, "set max table size from encoder to %d", max_table_size);
+  }
 }
 
 void grpc_chttp2_encode_header(grpc_chttp2_hpack_compressor *c,
diff --git a/src/core/ext/transport/chttp2/transport/hpack_table.c b/src/core/ext/transport/chttp2/transport/hpack_table.c
index 4d64506..295f31c 100644
--- a/src/core/ext/transport/chttp2/transport/hpack_table.c
+++ b/src/core/ext/transport/chttp2/transport/hpack_table.c
@@ -253,7 +253,9 @@
   if (tbl->max_bytes == max_bytes) {
     return;
   }
-  gpr_log(GPR_DEBUG, "Update hpack parser max size to %d", max_bytes);
+  if (grpc_http_trace) {
+    gpr_log(GPR_DEBUG, "Update hpack parser max size to %d", max_bytes);
+  }
   while (tbl->mem_used > max_bytes) {
     evict1(tbl);
   }
diff --git a/src/core/lib/channel/channel_args.h b/src/core/lib/channel/channel_args.h
index 0a51780..23c7b7b 100644
--- a/src/core/lib/channel/channel_args.h
+++ b/src/core/lib/channel/channel_args.h
@@ -56,10 +56,6 @@
 /** Destroy arguments created by \a grpc_channel_args_copy */
 void grpc_channel_args_destroy(grpc_channel_args *a);
 
-/** Reads census_enabled settings from channel args. Returns 1 if census_enabled
- * is specified in channel args, otherwise returns 0. */
-int grpc_channel_args_is_census_enabled(const grpc_channel_args *a);
-
 /** Returns the compression algorithm set in \a a. */
 grpc_compression_algorithm grpc_channel_args_get_compression_algorithm(
     const grpc_channel_args *a);
diff --git a/src/core/lib/surface/version.c b/src/core/lib/surface/version.c
index fe954cb..aca76d2 100644
--- a/src/core/lib/surface/version.c
+++ b/src/core/lib/surface/version.c
@@ -36,4 +36,4 @@
 
 #include <grpc/grpc.h>
 
-const char *grpc_version_string(void) { return "0.14.0-dev"; }
+const char *grpc_version_string(void) { return "0.15.0-dev"; }
diff --git a/src/cpp/common/core_codegen.cc b/src/cpp/common/core_codegen.cc
index 33a8f75..8e8d42e 100644
--- a/src/cpp/common/core_codegen.cc
+++ b/src/cpp/common/core_codegen.cc
@@ -48,124 +48,6 @@
 
 #include "src/core/lib/profiling/timers.h"
 
-namespace {
-
-const int kGrpcBufferWriterMaxBufferLength = 8192;
-
-class GrpcBufferWriter GRPC_FINAL
-    : public ::grpc::protobuf::io::ZeroCopyOutputStream {
- public:
-  explicit GrpcBufferWriter(grpc_byte_buffer** bp, int block_size)
-      : block_size_(block_size), byte_count_(0), have_backup_(false) {
-    *bp = grpc_raw_byte_buffer_create(NULL, 0);
-    slice_buffer_ = &(*bp)->data.raw.slice_buffer;
-  }
-
-  ~GrpcBufferWriter() GRPC_OVERRIDE {
-    if (have_backup_) {
-      gpr_slice_unref(backup_slice_);
-    }
-  }
-
-  bool Next(void** data, int* size) GRPC_OVERRIDE {
-    if (have_backup_) {
-      slice_ = backup_slice_;
-      have_backup_ = false;
-    } else {
-      slice_ = gpr_slice_malloc(block_size_);
-    }
-    *data = GPR_SLICE_START_PTR(slice_);
-    // On win x64, int is only 32bit
-    GPR_ASSERT(GPR_SLICE_LENGTH(slice_) <= INT_MAX);
-    byte_count_ += * size = (int)GPR_SLICE_LENGTH(slice_);
-    gpr_slice_buffer_add(slice_buffer_, slice_);
-    return true;
-  }
-
-  void BackUp(int count) GRPC_OVERRIDE {
-    gpr_slice_buffer_pop(slice_buffer_);
-    if (count == block_size_) {
-      backup_slice_ = slice_;
-    } else {
-      backup_slice_ =
-          gpr_slice_split_tail(&slice_, GPR_SLICE_LENGTH(slice_) - count);
-      gpr_slice_buffer_add(slice_buffer_, slice_);
-    }
-    have_backup_ = true;
-    byte_count_ -= count;
-  }
-
-  grpc::protobuf::int64 ByteCount() const GRPC_OVERRIDE { return byte_count_; }
-
- private:
-  const int block_size_;
-  int64_t byte_count_;
-  gpr_slice_buffer* slice_buffer_;
-  bool have_backup_;
-  gpr_slice backup_slice_;
-  gpr_slice slice_;
-};
-
-class GrpcBufferReader GRPC_FINAL
-    : public ::grpc::protobuf::io::ZeroCopyInputStream {
- public:
-  explicit GrpcBufferReader(grpc_byte_buffer* buffer)
-      : byte_count_(0), backup_count_(0) {
-    grpc_byte_buffer_reader_init(&reader_, buffer);
-  }
-  ~GrpcBufferReader() GRPC_OVERRIDE {
-    grpc_byte_buffer_reader_destroy(&reader_);
-  }
-
-  bool Next(const void** data, int* size) GRPC_OVERRIDE {
-    if (backup_count_ > 0) {
-      *data = GPR_SLICE_START_PTR(slice_) + GPR_SLICE_LENGTH(slice_) -
-              backup_count_;
-      GPR_ASSERT(backup_count_ <= INT_MAX);
-      *size = (int)backup_count_;
-      backup_count_ = 0;
-      return true;
-    }
-    if (!grpc_byte_buffer_reader_next(&reader_, &slice_)) {
-      return false;
-    }
-    gpr_slice_unref(slice_);
-    *data = GPR_SLICE_START_PTR(slice_);
-    // On win x64, int is only 32bit
-    GPR_ASSERT(GPR_SLICE_LENGTH(slice_) <= INT_MAX);
-    byte_count_ += * size = (int)GPR_SLICE_LENGTH(slice_);
-    return true;
-  }
-
-  void BackUp(int count) GRPC_OVERRIDE { backup_count_ = count; }
-
-  bool Skip(int count) GRPC_OVERRIDE {
-    const void* data;
-    int size;
-    while (Next(&data, &size)) {
-      if (size >= count) {
-        BackUp(size - count);
-        return true;
-      }
-      // size < count;
-      count -= size;
-    }
-    // error or we have too large count;
-    return false;
-  }
-
-  grpc::protobuf::int64 ByteCount() const GRPC_OVERRIDE {
-    return byte_count_ - backup_count_;
-  }
-
- private:
-  int64_t byte_count_;
-  int64_t backup_count_;
-  grpc_byte_buffer_reader reader_;
-  gpr_slice slice_;
-};
-}  // namespace
-
 namespace grpc {
 
 grpc_completion_queue* CoreCodegen::grpc_completion_queue_create(
@@ -192,6 +74,44 @@
   ::grpc_byte_buffer_destroy(bb);
 }
 
+void CoreCodegen::grpc_byte_buffer_reader_init(grpc_byte_buffer_reader* reader,
+                                               grpc_byte_buffer* buffer) {
+  ::grpc_byte_buffer_reader_init(reader, buffer);
+}
+
+void CoreCodegen::grpc_byte_buffer_reader_destroy(
+    grpc_byte_buffer_reader* reader) {
+  ::grpc_byte_buffer_reader_destroy(reader);
+}
+
+int CoreCodegen::grpc_byte_buffer_reader_next(grpc_byte_buffer_reader* reader,
+                                              gpr_slice* slice) {
+  return ::grpc_byte_buffer_reader_next(reader, slice);
+}
+
+grpc_byte_buffer* CoreCodegen::grpc_raw_byte_buffer_create(gpr_slice* slice,
+                                                           size_t nslices) {
+  return ::grpc_raw_byte_buffer_create(slice, nslices);
+}
+
+gpr_slice CoreCodegen::gpr_slice_malloc(size_t length) {
+  return ::gpr_slice_malloc(length);
+}
+
+void CoreCodegen::gpr_slice_unref(gpr_slice slice) { ::gpr_slice_unref(slice); }
+
+gpr_slice CoreCodegen::gpr_slice_split_tail(gpr_slice* s, size_t split) {
+  return ::gpr_slice_split_tail(s, split);
+}
+
+void CoreCodegen::gpr_slice_buffer_add(gpr_slice_buffer* sb, gpr_slice slice) {
+  ::gpr_slice_buffer_add(sb, slice);
+}
+
+void CoreCodegen::gpr_slice_buffer_pop(gpr_slice_buffer* sb) {
+  ::gpr_slice_buffer_pop(sb);
+}
+
 void CoreCodegen::grpc_metadata_array_init(grpc_metadata_array* array) {
   ::grpc_metadata_array_init(array);
 }
@@ -200,6 +120,10 @@
   ::grpc_metadata_array_destroy(array);
 }
 
+const Status& CoreCodegen::ok() { return grpc::Status::OK; }
+
+const Status& CoreCodegen::cancelled() { return grpc::Status::CANCELLED; }
+
 gpr_timespec CoreCodegen::gpr_inf_future(gpr_clock_type type) {
   return ::gpr_inf_future(type);
 }
@@ -209,48 +133,4 @@
   abort();
 }
 
-Status CoreCodegen::SerializeProto(const grpc::protobuf::Message& msg,
-                                   grpc_byte_buffer** bp) {
-  GPR_TIMER_SCOPE("SerializeProto", 0);
-  int byte_size = msg.ByteSize();
-  if (byte_size <= kGrpcBufferWriterMaxBufferLength) {
-    gpr_slice slice = gpr_slice_malloc(byte_size);
-    GPR_ASSERT(GPR_SLICE_END_PTR(slice) ==
-               msg.SerializeWithCachedSizesToArray(GPR_SLICE_START_PTR(slice)));
-    *bp = grpc_raw_byte_buffer_create(&slice, 1);
-    gpr_slice_unref(slice);
-    return Status::OK;
-  } else {
-    GrpcBufferWriter writer(bp, kGrpcBufferWriterMaxBufferLength);
-    return msg.SerializeToZeroCopyStream(&writer)
-               ? Status::OK
-               : Status(StatusCode::INTERNAL, "Failed to serialize message");
-  }
-}
-
-Status CoreCodegen::DeserializeProto(grpc_byte_buffer* buffer,
-                                     grpc::protobuf::Message* msg,
-                                     int max_message_size) {
-  GPR_TIMER_SCOPE("DeserializeProto", 0);
-  if (buffer == nullptr) {
-    return Status(StatusCode::INTERNAL, "No payload");
-  }
-  Status result = Status::OK;
-  {
-    GrpcBufferReader reader(buffer);
-    ::grpc::protobuf::io::CodedInputStream decoder(&reader);
-    if (max_message_size > 0) {
-      decoder.SetTotalBytesLimit(max_message_size, max_message_size);
-    }
-    if (!msg->ParseFromCodedStream(&decoder)) {
-      result = Status(StatusCode::INTERNAL, msg->InitializationErrorString());
-    }
-    if (!decoder.ConsumedEntireMessage()) {
-      result = Status(StatusCode::INTERNAL, "Did not read entire message");
-    }
-  }
-  grpc_byte_buffer_destroy(buffer);
-  return result;
-}
-
 }  // namespace grpc
diff --git a/src/cpp/common/core_codegen.h b/src/cpp/common/core_codegen.h
index e15cb4c..656b11e 100644
--- a/src/cpp/common/core_codegen.h
+++ b/src/cpp/common/core_codegen.h
@@ -42,13 +42,6 @@
 /// Implementation of the core codegen interface.
 class CoreCodegen : public CoreCodegenInterface {
  private:
-  Status SerializeProto(const grpc::protobuf::Message& msg,
-                        grpc_byte_buffer** bp) override;
-
-  Status DeserializeProto(grpc_byte_buffer* buffer,
-                          grpc::protobuf::Message* msg,
-                          int max_message_size) override;
-
   grpc_completion_queue* grpc_completion_queue_create(void* reserved) override;
   void grpc_completion_queue_destroy(grpc_completion_queue* cq) override;
   grpc_event grpc_completion_queue_pluck(grpc_completion_queue* cq, void* tag,
@@ -60,11 +53,30 @@
 
   void grpc_byte_buffer_destroy(grpc_byte_buffer* bb) override;
 
+  void grpc_byte_buffer_reader_init(grpc_byte_buffer_reader* reader,
+                                    grpc_byte_buffer* buffer) override;
+  void grpc_byte_buffer_reader_destroy(
+      grpc_byte_buffer_reader* reader) override;
+  int grpc_byte_buffer_reader_next(grpc_byte_buffer_reader* reader,
+                                   gpr_slice* slice) override;
+
+  grpc_byte_buffer* grpc_raw_byte_buffer_create(gpr_slice* slice,
+                                                size_t nslices) override;
+
+  gpr_slice gpr_slice_malloc(size_t length) override;
+  void gpr_slice_unref(gpr_slice slice) override;
+  gpr_slice gpr_slice_split_tail(gpr_slice* s, size_t split) override;
+  void gpr_slice_buffer_add(gpr_slice_buffer* sb, gpr_slice slice) override;
+  void gpr_slice_buffer_pop(gpr_slice_buffer* sb) override;
+
   void grpc_metadata_array_init(grpc_metadata_array* array) override;
   void grpc_metadata_array_destroy(grpc_metadata_array* array) override;
 
   gpr_timespec gpr_inf_future(gpr_clock_type type) override;
 
+  virtual const Status& ok() override;
+  virtual const Status& cancelled() override;
+
   void assert_fail(const char* failed_assertion) override;
 };
 
diff --git a/src/csharp/Grpc.Core.Tests/ClientServerTest.cs b/src/csharp/Grpc.Core.Tests/ClientServerTest.cs
index 6c13a4f..d92addb 100644
--- a/src/csharp/Grpc.Core.Tests/ClientServerTest.cs
+++ b/src/csharp/Grpc.Core.Tests/ClientServerTest.cs
@@ -167,6 +167,37 @@
         }
 
         [Test]
+        public async Task ServerStreamingCall_EndOfStreamIsIdempotent()
+        {
+            helper.ServerStreamingHandler = new ServerStreamingServerMethod<string, string>(async (request, responseStream, context) =>
+            {
+            });
+
+            var call = Calls.AsyncServerStreamingCall(helper.CreateServerStreamingCall(), "");
+
+            Assert.IsFalse(await call.ResponseStream.MoveNext());
+            Assert.IsFalse(await call.ResponseStream.MoveNext());
+        }
+
+        [Test]
+        public async Task ServerStreamingCall_ErrorCanBeAwaitedTwice()
+        {
+            helper.ServerStreamingHandler = new ServerStreamingServerMethod<string, string>(async (request, responseStream, context) =>
+            {
+                context.Status = new Status(StatusCode.InvalidArgument, "");
+            });
+
+            var call = Calls.AsyncServerStreamingCall(helper.CreateServerStreamingCall(), "");
+
+            var ex = Assert.ThrowsAsync<RpcException>(async () => await call.ResponseStream.MoveNext());
+            Assert.AreEqual(StatusCode.InvalidArgument, ex.Status.StatusCode);
+
+            // attempting MoveNext again should result in throwing the same exception.
+            var ex2 = Assert.ThrowsAsync<RpcException>(async () => await call.ResponseStream.MoveNext());
+            Assert.AreEqual(StatusCode.InvalidArgument, ex2.Status.StatusCode);
+        }
+
+        [Test]
         public async Task DuplexStreamingCall()
         {
             helper.DuplexStreamingHandler = new DuplexStreamingServerMethod<string, string>(async (requestStream, responseStream, context) =>
@@ -209,6 +240,38 @@
         }
 
         [Test]
+        public async Task ClientStreamingCall_ServerSideReadAfterCancelNotificationReturnsNull()
+        {
+            var handlerStartedBarrier = new TaskCompletionSource<object>();
+            var cancelNotificationReceivedBarrier = new TaskCompletionSource<object>();
+            var successTcs = new TaskCompletionSource<string>();
+
+            helper.ClientStreamingHandler = new ClientStreamingServerMethod<string, string>(async (requestStream, context) =>
+            {
+                handlerStartedBarrier.SetResult(null);
+
+                // wait for cancellation to be delivered.
+                context.CancellationToken.Register(() => cancelNotificationReceivedBarrier.SetResult(null));
+                await cancelNotificationReceivedBarrier.Task;
+
+                var moveNextResult = await requestStream.MoveNext();
+                successTcs.SetResult(!moveNextResult ? "SUCCESS" : "FAIL");
+                return "";
+            });
+
+            var cts = new CancellationTokenSource();
+            var call = Calls.AsyncClientStreamingCall(helper.CreateClientStreamingCall(new CallOptions(cancellationToken: cts.Token)));
+
+            await handlerStartedBarrier.Task;
+            cts.Cancel();
+
+            var ex = Assert.ThrowsAsync<RpcException>(async () => await call.ResponseAsync);
+            Assert.AreEqual(StatusCode.Cancelled, ex.Status.StatusCode);
+
+            Assert.AreEqual("SUCCESS", await successTcs.Task);
+        }
+
+        [Test]
         public async Task AsyncUnaryCall_EchoMetadata()
         {
             helper.UnaryHandler = new UnaryServerMethod<string, string>(async (request, context) =>
diff --git a/src/csharp/Grpc.Core.Tests/Grpc.Core.Tests.csproj b/src/csharp/Grpc.Core.Tests/Grpc.Core.Tests.csproj
index 0cd059c..47131fc 100644
--- a/src/csharp/Grpc.Core.Tests/Grpc.Core.Tests.csproj
+++ b/src/csharp/Grpc.Core.Tests/Grpc.Core.Tests.csproj
@@ -84,6 +84,8 @@
     <Compile Include="SanityTest.cs" />
     <Compile Include="HalfcloseTest.cs" />
     <Compile Include="NUnitMain.cs" />
+    <Compile Include="Internal\FakeNativeCall.cs" />
+    <Compile Include="Internal\AsyncCallServerTest.cs" />
   </ItemGroup>
   <Import Project="$(MSBuildBinPath)\Microsoft.CSharp.targets" />
   <ItemGroup>
diff --git a/src/csharp/Grpc.Core.Tests/Internal/AsyncCallServerTest.cs b/src/csharp/Grpc.Core.Tests/Internal/AsyncCallServerTest.cs
new file mode 100644
index 0000000..0e20476
--- /dev/null
+++ b/src/csharp/Grpc.Core.Tests/Internal/AsyncCallServerTest.cs
@@ -0,0 +1,191 @@
+#region Copyright notice and license
+
+// Copyright 2015, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#endregion
+
+using System;
+using System.Collections.Generic;
+using System.Runtime.InteropServices;
+using System.Threading.Tasks;
+
+using Grpc.Core.Internal;
+using NUnit.Framework;
+
+namespace Grpc.Core.Internal.Tests
+{
+    /// <summary>
+    /// Uses fake native call to test interaction of <c>AsyncCallServer</c> wrapping code with C core in different situations.
+    /// </summary>
+    public class AsyncCallServerTest
+    {
+        Server server;
+        FakeNativeCall fakeCall;
+        AsyncCallServer<string, string> asyncCallServer;
+
+        [SetUp]
+        public void Init()
+        {
+            var environment = GrpcEnvironment.AddRef();
+
+            // Create a fake server just so we have an instance to refer to.
+            // The server won't actually be used at all.
+            server = new Server()
+            {
+                Ports = { { "localhost", 0, ServerCredentials.Insecure } }
+            };
+            server.Start();
+
+            fakeCall = new FakeNativeCall();
+            asyncCallServer = new AsyncCallServer<string, string>(
+                Marshallers.StringMarshaller.Serializer, Marshallers.StringMarshaller.Deserializer,
+                environment,
+                server);
+            asyncCallServer.InitializeForTesting(fakeCall);
+        }
+
+        [TearDown]
+        public void Cleanup()
+        {
+            server.ShutdownAsync().Wait();
+            GrpcEnvironment.Release();
+        }
+
+        [Test]
+        public void CancelNotificationAfterStartDisposes()
+        {
+            var finishedTask = asyncCallServer.ServerSideCallAsync();
+            fakeCall.ReceivedCloseOnServerHandler(true, cancelled: true);
+            AssertFinished(asyncCallServer, fakeCall, finishedTask);
+        }
+
+        [Test]
+        public void CancelNotificationAfterStartDisposesAfterPendingReadFinishes()
+        {
+            var finishedTask = asyncCallServer.ServerSideCallAsync();
+            var requestStream = new ServerRequestStream<string, string>(asyncCallServer);
+
+            var moveNextTask = requestStream.MoveNext();
+
+            fakeCall.ReceivedCloseOnServerHandler(true, cancelled: true);
+            fakeCall.ReceivedMessageHandler(true, null);
+            Assert.IsFalse(moveNextTask.Result);
+
+            AssertFinished(asyncCallServer, fakeCall, finishedTask);
+        }
+
+        [Test]
+        public void ReadAfterCancelNotificationCanSucceed()
+        {
+            var finishedTask = asyncCallServer.ServerSideCallAsync();
+            var requestStream = new ServerRequestStream<string, string>(asyncCallServer);
+
+            fakeCall.ReceivedCloseOnServerHandler(true, cancelled: true);
+
+            // Check that starting a read after cancel notification has been processed is legal.
+            var moveNextTask = requestStream.MoveNext();
+            Assert.IsFalse(moveNextTask.Result);
+
+            AssertFinished(asyncCallServer, fakeCall, finishedTask);
+        }
+
+        [Test]
+        public void ReadCompletionFailureClosesRequestStream()
+        {
+            var finishedTask = asyncCallServer.ServerSideCallAsync();
+            var requestStream = new ServerRequestStream<string, string>(asyncCallServer);
+
+            // if a read completion's success==false, the request stream will silently finish
+            // and we rely on C core cancelling the call.
+            var moveNextTask = requestStream.MoveNext();
+            fakeCall.ReceivedMessageHandler(false, null);
+            Assert.IsFalse(moveNextTask.Result);
+
+            fakeCall.ReceivedCloseOnServerHandler(true, cancelled: true);
+            AssertFinished(asyncCallServer, fakeCall, finishedTask);
+        }
+
+        [Test]
+        public void WriteAfterCancelNotificationFails()
+        {
+            var finishedTask = asyncCallServer.ServerSideCallAsync();
+            var requestStream = new ServerRequestStream<string, string>(asyncCallServer);
+            var responseStream = new ServerResponseStream<string, string>(asyncCallServer);
+
+            fakeCall.ReceivedCloseOnServerHandler(true, cancelled: true);
+
+            // TODO(jtattermusch): should we throw a different exception type instead?
+            Assert.Throws(typeof(InvalidOperationException), () => responseStream.WriteAsync("request1"));
+            AssertFinished(asyncCallServer, fakeCall, finishedTask);
+        }
+
+        [Test]
+        public void WriteCompletionFailureThrows()
+        {
+            var finishedTask = asyncCallServer.ServerSideCallAsync();
+            var responseStream = new ServerResponseStream<string, string>(asyncCallServer);
+
+            var writeTask = responseStream.WriteAsync("request1");
+            fakeCall.SendCompletionHandler(false);
+            // TODO(jtattermusch): should we throw a different exception type instead?
+            Assert.ThrowsAsync(typeof(InvalidOperationException), async () => await writeTask);
+
+            fakeCall.ReceivedCloseOnServerHandler(true, cancelled: true);
+            AssertFinished(asyncCallServer, fakeCall, finishedTask);
+        }
+
+        [Test]
+        public void WriteAndWriteStatusCanRunConcurrently()
+        {
+            var finishedTask = asyncCallServer.ServerSideCallAsync();
+            var responseStream = new ServerResponseStream<string, string>(asyncCallServer);
+
+            var writeTask = responseStream.WriteAsync("request1");
+            var writeStatusTask = asyncCallServer.SendStatusFromServerAsync(Status.DefaultSuccess, new Metadata(), null);
+
+            fakeCall.SendCompletionHandler(true);
+            fakeCall.SendStatusFromServerHandler(true);
+
+            Assert.DoesNotThrowAsync(async () => await writeTask);
+            Assert.DoesNotThrowAsync(async () => await writeStatusTask);
+
+            fakeCall.ReceivedCloseOnServerHandler(true, cancelled: true);
+
+            AssertFinished(asyncCallServer, fakeCall, finishedTask);
+        }
+
+        static void AssertFinished(AsyncCallServer<string, string> asyncCallServer, FakeNativeCall fakeCall, Task finishedTask)
+        {
+            Assert.IsTrue(fakeCall.IsDisposed);
+            Assert.IsTrue(finishedTask.IsCompleted);
+            Assert.DoesNotThrow(() => finishedTask.Wait());
+        }
+    }
+}
diff --git a/src/csharp/Grpc.Core.Tests/Internal/AsyncCallTest.cs b/src/csharp/Grpc.Core.Tests/Internal/AsyncCallTest.cs
index a678e4d..abe9d4a 100644
--- a/src/csharp/Grpc.Core.Tests/Internal/AsyncCallTest.cs
+++ b/src/csharp/Grpc.Core.Tests/Internal/AsyncCallTest.cs
@@ -32,6 +32,7 @@
 #endregion
 
 using System;
+using System.Collections.Generic;
 using System.Runtime.InteropServices;
 using System.Threading.Tasks;
 
@@ -40,6 +41,9 @@
 
 namespace Grpc.Core.Internal.Tests
 {
+    /// <summary>
+    /// Uses fake native call to test interaction of <c>AsyncCall</c> wrapping code with C core in different situations.
+    /// </summary>
     public class AsyncCallTest
     {
         Channel channel;
@@ -75,8 +79,8 @@
         public void AsyncUnary_StreamingOperationsNotAllowed()
         {
             asyncCall.UnaryCallAsync("request1");
-            Assert.Throws(typeof(InvalidOperationException),
-                () => asyncCall.StartReadMessage((x,y) => {}));
+            Assert.ThrowsAsync(typeof(InvalidOperationException),
+                async () => await asyncCall.ReadMessageAsync());
             Assert.Throws(typeof(InvalidOperationException),
                 () => asyncCall.StartSendMessage("abc", new WriteFlags(), (x,y) => {}));
         }
@@ -119,6 +123,14 @@
         }
 
         [Test]
+        public void ClientStreaming_StreamingReadNotAllowed()
+        {
+            asyncCall.ClientStreamingCallAsync();
+            Assert.ThrowsAsync(typeof(InvalidOperationException),
+                async () => await asyncCall.ReadMessageAsync());
+        }
+
+        [Test]
         public void ClientStreaming_NoRequest_Success()
         {
             var resultTask = asyncCall.ClientStreamingCallAsync();
@@ -142,6 +154,283 @@
             AssertUnaryResponseError(asyncCall, fakeCall, resultTask, StatusCode.InvalidArgument);
         }
 
+        [Test]
+        public void ClientStreaming_MoreRequests_Success()
+        {
+            var resultTask = asyncCall.ClientStreamingCallAsync();
+            var requestStream = new ClientRequestStream<string, string>(asyncCall);
+
+            var writeTask = requestStream.WriteAsync("request1");
+            fakeCall.SendCompletionHandler(true);
+            writeTask.Wait();
+
+            var writeTask2 = requestStream.WriteAsync("request2");
+            fakeCall.SendCompletionHandler(true);
+            writeTask2.Wait();
+
+            var completeTask = requestStream.CompleteAsync();
+            fakeCall.SendCompletionHandler(true);
+            completeTask.Wait();
+
+            fakeCall.UnaryResponseClientHandler(true,
+                new ClientSideStatus(Status.DefaultSuccess, new Metadata()),
+                CreateResponsePayload(),
+                new Metadata());
+
+            AssertUnaryResponseSuccess(asyncCall, fakeCall, resultTask);
+        }
+
+        [Test]
+        public void ClientStreaming_WriteFailure()
+        {
+            var resultTask = asyncCall.ClientStreamingCallAsync();
+            var requestStream = new ClientRequestStream<string, string>(asyncCall);
+
+            var writeTask = requestStream.WriteAsync("request1");
+            fakeCall.SendCompletionHandler(false);
+            Assert.ThrowsAsync(typeof(InvalidOperationException), async () => await writeTask);
+
+            fakeCall.UnaryResponseClientHandler(true,
+                CreateClientSideStatus(StatusCode.Internal),
+                CreateResponsePayload(),
+                new Metadata());
+
+            AssertUnaryResponseError(asyncCall, fakeCall, resultTask, StatusCode.Internal);
+        }
+
+        [Test]
+        public void ClientStreaming_WriteAfterReceivingStatusFails()
+        {
+            var resultTask = asyncCall.ClientStreamingCallAsync();
+            var requestStream = new ClientRequestStream<string, string>(asyncCall);
+
+            fakeCall.UnaryResponseClientHandler(true,
+                new ClientSideStatus(Status.DefaultSuccess, new Metadata()),
+                CreateResponsePayload(),
+                new Metadata());
+
+            AssertUnaryResponseSuccess(asyncCall, fakeCall, resultTask);
+            Assert.Throws(typeof(InvalidOperationException), () => requestStream.WriteAsync("request1"));
+        }
+
+        [Test]
+        public void ClientStreaming_CompleteAfterReceivingStatusSucceeds()
+        {
+            var resultTask = asyncCall.ClientStreamingCallAsync();
+            var requestStream = new ClientRequestStream<string, string>(asyncCall);
+
+            fakeCall.UnaryResponseClientHandler(true,
+                new ClientSideStatus(Status.DefaultSuccess, new Metadata()),
+                CreateResponsePayload(),
+                new Metadata());
+
+            AssertUnaryResponseSuccess(asyncCall, fakeCall, resultTask);
+            Assert.DoesNotThrowAsync(async () => await requestStream.CompleteAsync());
+        }
+
+        [Test]
+        public void ClientStreaming_WriteAfterCancellationRequestFails()
+        {
+            var resultTask = asyncCall.ClientStreamingCallAsync();
+            var requestStream = new ClientRequestStream<string, string>(asyncCall);
+
+            asyncCall.Cancel();
+            Assert.IsTrue(fakeCall.IsCancelled);
+
+            Assert.Throws(typeof(OperationCanceledException), () => requestStream.WriteAsync("request1"));
+
+            fakeCall.UnaryResponseClientHandler(true,
+                CreateClientSideStatus(StatusCode.Cancelled),
+                CreateResponsePayload(),
+                new Metadata());
+
+            AssertUnaryResponseError(asyncCall, fakeCall, resultTask, StatusCode.Cancelled);
+        }
+
+        [Test]
+        public void ServerStreaming_StreamingSendNotAllowed()
+        {
+            asyncCall.StartServerStreamingCall("request1");
+            Assert.Throws(typeof(InvalidOperationException),
+                () => asyncCall.StartSendMessage("abc", new WriteFlags(), (x,y) => {}));
+        }
+
+        [Test]
+        public void ServerStreaming_NoResponse_Success1()
+        {
+            asyncCall.StartServerStreamingCall("request1");
+            var responseStream = new ClientResponseStream<string, string>(asyncCall);
+            var readTask = responseStream.MoveNext();
+
+            fakeCall.ReceivedResponseHeadersHandler(true, new Metadata());
+            Assert.AreEqual(0, asyncCall.ResponseHeadersAsync.Result.Count);
+
+            fakeCall.ReceivedMessageHandler(true, null);
+            fakeCall.ReceivedStatusOnClientHandler(true, new ClientSideStatus(Status.DefaultSuccess, new Metadata()));
+
+            AssertStreamingResponseSuccess(asyncCall, fakeCall, readTask);
+        }
+
+        [Test]
+        public void ServerStreaming_NoResponse_Success2()
+        {
+            asyncCall.StartServerStreamingCall("request1");
+            var responseStream = new ClientResponseStream<string, string>(asyncCall);
+            var readTask = responseStream.MoveNext();
+
+            // try alternative order of completions
+            fakeCall.ReceivedStatusOnClientHandler(true, new ClientSideStatus(Status.DefaultSuccess, new Metadata()));
+            fakeCall.ReceivedMessageHandler(true, null);
+
+            AssertStreamingResponseSuccess(asyncCall, fakeCall, readTask);
+        }
+
+        [Test]
+        public void ServerStreaming_NoResponse_ReadFailure()
+        {
+            asyncCall.StartServerStreamingCall("request1");
+            var responseStream = new ClientResponseStream<string, string>(asyncCall);
+            var readTask = responseStream.MoveNext();
+
+            fakeCall.ReceivedMessageHandler(false, null);  // after a failed read, we rely on C core to deliver appropriate status code.
+            fakeCall.ReceivedStatusOnClientHandler(true, CreateClientSideStatus(StatusCode.Internal));
+
+            AssertStreamingResponseError(asyncCall, fakeCall, readTask, StatusCode.Internal);
+        }
+
+        [Test]
+        public void ServerStreaming_MoreResponses_Success()
+        {
+            asyncCall.StartServerStreamingCall("request1");
+            var responseStream = new ClientResponseStream<string, string>(asyncCall);
+
+            var readTask1 = responseStream.MoveNext();
+            fakeCall.ReceivedMessageHandler(true, CreateResponsePayload());
+            Assert.IsTrue(readTask1.Result);
+            Assert.AreEqual("response1", responseStream.Current);
+
+            var readTask2 = responseStream.MoveNext();
+            fakeCall.ReceivedMessageHandler(true, CreateResponsePayload());
+            Assert.IsTrue(readTask2.Result);
+            Assert.AreEqual("response1", responseStream.Current);
+
+            var readTask3 = responseStream.MoveNext();
+            fakeCall.ReceivedStatusOnClientHandler(true, new ClientSideStatus(Status.DefaultSuccess, new Metadata()));
+            fakeCall.ReceivedMessageHandler(true, null);
+
+            AssertStreamingResponseSuccess(asyncCall, fakeCall, readTask3);
+        }
+
+        [Test]
+        public void DuplexStreaming_NoRequestNoResponse_Success()
+        {
+            asyncCall.StartDuplexStreamingCall();
+            var requestStream = new ClientRequestStream<string, string>(asyncCall);
+            var responseStream = new ClientResponseStream<string, string>(asyncCall);
+
+            var writeTask1 = requestStream.CompleteAsync();
+            fakeCall.SendCompletionHandler(true);
+            Assert.DoesNotThrowAsync(async () => await writeTask1);
+
+            var readTask = responseStream.MoveNext();
+            fakeCall.ReceivedMessageHandler(true, null);
+            fakeCall.ReceivedStatusOnClientHandler(true, new ClientSideStatus(Status.DefaultSuccess, new Metadata()));
+
+            AssertStreamingResponseSuccess(asyncCall, fakeCall, readTask);
+        }
+
+        [Test]
+        public void DuplexStreaming_WriteAfterReceivingStatusFails()
+        {
+            asyncCall.StartDuplexStreamingCall();
+            var requestStream = new ClientRequestStream<string, string>(asyncCall);
+            var responseStream = new ClientResponseStream<string, string>(asyncCall);
+
+            var readTask = responseStream.MoveNext();
+            fakeCall.ReceivedMessageHandler(true, null);
+            fakeCall.ReceivedStatusOnClientHandler(true, new ClientSideStatus(Status.DefaultSuccess, new Metadata()));
+
+            AssertStreamingResponseSuccess(asyncCall, fakeCall, readTask);
+
+            Assert.ThrowsAsync(typeof(InvalidOperationException), async () => await requestStream.WriteAsync("request1"));
+        }
+
+        [Test]
+        public void DuplexStreaming_CompleteAfterReceivingStatusFails()
+        {
+            asyncCall.StartDuplexStreamingCall();
+            var requestStream = new ClientRequestStream<string, string>(asyncCall);
+            var responseStream = new ClientResponseStream<string, string>(asyncCall);
+
+            var readTask = responseStream.MoveNext();
+            fakeCall.ReceivedMessageHandler(true, null);
+            fakeCall.ReceivedStatusOnClientHandler(true, new ClientSideStatus(Status.DefaultSuccess, new Metadata()));
+
+            AssertStreamingResponseSuccess(asyncCall, fakeCall, readTask);
+
+            Assert.DoesNotThrowAsync(async () => await requestStream.CompleteAsync());
+        }
+
+        [Test]
+        public void DuplexStreaming_WriteAfterCancellationRequestFails()
+        {
+            asyncCall.StartDuplexStreamingCall();
+            var requestStream = new ClientRequestStream<string, string>(asyncCall);
+            var responseStream = new ClientResponseStream<string, string>(asyncCall);
+
+            asyncCall.Cancel();
+            Assert.IsTrue(fakeCall.IsCancelled);
+            Assert.Throws(typeof(OperationCanceledException), () => requestStream.WriteAsync("request1"));
+
+            var readTask = responseStream.MoveNext();
+            fakeCall.ReceivedMessageHandler(true, null);
+            fakeCall.ReceivedStatusOnClientHandler(true, CreateClientSideStatus(StatusCode.Cancelled));
+
+            AssertStreamingResponseError(asyncCall, fakeCall, readTask, StatusCode.Cancelled);
+        }
+
+        [Test]
+        public void DuplexStreaming_ReadAfterCancellationRequestCanSucceed()
+        {
+            asyncCall.StartDuplexStreamingCall();
+            var responseStream = new ClientResponseStream<string, string>(asyncCall);
+
+            asyncCall.Cancel();
+            Assert.IsTrue(fakeCall.IsCancelled);
+
+            var readTask1 = responseStream.MoveNext();
+            fakeCall.ReceivedMessageHandler(true, CreateResponsePayload());
+            Assert.IsTrue(readTask1.Result);
+            Assert.AreEqual("response1", responseStream.Current);
+
+            var readTask2 = responseStream.MoveNext();
+            fakeCall.ReceivedMessageHandler(true, null);
+            fakeCall.ReceivedStatusOnClientHandler(true, CreateClientSideStatus(StatusCode.Cancelled));
+
+            AssertStreamingResponseError(asyncCall, fakeCall, readTask2, StatusCode.Cancelled);
+        }
+
+        [Test]
+        public void DuplexStreaming_ReadStartedBeforeCancellationRequestCanSucceed()
+        {
+            asyncCall.StartDuplexStreamingCall();
+            var responseStream = new ClientResponseStream<string, string>(asyncCall);
+
+            var readTask1 = responseStream.MoveNext();  // initiate the read before cancel request
+            asyncCall.Cancel();
+            Assert.IsTrue(fakeCall.IsCancelled);
+
+            fakeCall.ReceivedMessageHandler(true, CreateResponsePayload());
+            Assert.IsTrue(readTask1.Result);
+            Assert.AreEqual("response1", responseStream.Current);
+
+            var readTask2 = responseStream.MoveNext();
+            fakeCall.ReceivedMessageHandler(true, null);
+            fakeCall.ReceivedStatusOnClientHandler(true, CreateClientSideStatus(StatusCode.Cancelled));
+
+            AssertStreamingResponseError(asyncCall, fakeCall, readTask2, StatusCode.Cancelled);
+        }
+
         ClientSideStatus CreateClientSideStatus(StatusCode statusCode)
         {
             return new ClientSideStatus(new Status(statusCode, ""), new Metadata());
@@ -163,6 +452,16 @@
             Assert.AreEqual("response1", resultTask.Result);
         }
 
+        static void AssertStreamingResponseSuccess(AsyncCall<string, string> asyncCall, FakeNativeCall fakeCall, Task<bool> moveNextTask)
+        {
+            Assert.IsTrue(moveNextTask.IsCompleted);
+            Assert.IsTrue(fakeCall.IsDisposed);
+
+            Assert.IsFalse(moveNextTask.Result);
+            Assert.AreEqual(Status.DefaultSuccess, asyncCall.GetStatus());
+            Assert.AreEqual(0, asyncCall.GetTrailers().Count);
+        }
+
         static void AssertUnaryResponseError(AsyncCall<string, string> asyncCall, FakeNativeCall fakeCall, Task<string> resultTask, StatusCode expectedStatusCode)
         {
             Assert.IsTrue(resultTask.IsCompleted);
@@ -175,135 +474,15 @@
             Assert.AreEqual(0, asyncCall.GetTrailers().Count);
         }
 
-        internal class FakeNativeCall : INativeCall
+        static void AssertStreamingResponseError(AsyncCall<string, string> asyncCall, FakeNativeCall fakeCall, Task<bool> moveNextTask, StatusCode expectedStatusCode)
         {
-            public UnaryResponseClientHandler UnaryResponseClientHandler
-            {
-                get;
-                set;
-            }
+            Assert.IsTrue(moveNextTask.IsCompleted);
+            Assert.IsTrue(fakeCall.IsDisposed);
 
-            public ReceivedStatusOnClientHandler ReceivedStatusOnClientHandler
-            {
-                get;
-                set;
-            }
-
-            public ReceivedMessageHandler ReceivedMessageHandler
-            {
-                get;
-                set;
-            }
-
-            public ReceivedResponseHeadersHandler ReceivedResponseHeadersHandler
-            {
-                get;
-                set;
-            }
-
-            public SendCompletionHandler SendCompletionHandler
-            {
-                get;
-                set;
-            }
-
-            public ReceivedCloseOnServerHandler ReceivedCloseOnServerHandler
-            {
-                get;
-                set;
-            }
-
-            public bool IsCancelled
-            {
-                get;
-                set;
-            }
-
-            public bool IsDisposed
-            {
-                get;
-                set;
-            }
-
-            public void Cancel()
-            {
-                IsCancelled = true;
-            }
-
-            public void CancelWithStatus(Status status)
-            {
-                IsCancelled = true;
-            }
-
-            public string GetPeer()
-            {
-                return "PEER";
-            }
-
-            public void StartUnary(UnaryResponseClientHandler callback, byte[] payload, MetadataArraySafeHandle metadataArray, WriteFlags writeFlags)
-            {
-                UnaryResponseClientHandler = callback;
-            }
-
-            public void StartUnary(BatchContextSafeHandle ctx, byte[] payload, MetadataArraySafeHandle metadataArray, WriteFlags writeFlags)
-            {
-                throw new NotImplementedException();
-            }
-
-            public void StartClientStreaming(UnaryResponseClientHandler callback, MetadataArraySafeHandle metadataArray)
-            {
-                UnaryResponseClientHandler = callback;
-            }
-
-            public void StartServerStreaming(ReceivedStatusOnClientHandler callback, byte[] payload, MetadataArraySafeHandle metadataArray, WriteFlags writeFlags)
-            {
-                ReceivedStatusOnClientHandler = callback;
-            }
-
-            public void StartDuplexStreaming(ReceivedStatusOnClientHandler callback, MetadataArraySafeHandle metadataArray)
-            {
-                ReceivedStatusOnClientHandler = callback;
-            }
-
-            public void StartReceiveMessage(ReceivedMessageHandler callback)
-            {
-                ReceivedMessageHandler = callback;
-            }
-
-            public void StartReceiveInitialMetadata(ReceivedResponseHeadersHandler callback)
-            {
-                ReceivedResponseHeadersHandler = callback;
-            }
-
-            public void StartSendInitialMetadata(SendCompletionHandler callback, MetadataArraySafeHandle metadataArray)
-            {
-                SendCompletionHandler = callback;
-            }
-
-            public void StartSendMessage(SendCompletionHandler callback, byte[] payload, WriteFlags writeFlags, bool sendEmptyInitialMetadata)
-            {
-                SendCompletionHandler = callback;
-            }
-
-            public void StartSendCloseFromClient(SendCompletionHandler callback)
-            {
-                SendCompletionHandler = callback;
-            }
-
-            public void StartSendStatusFromServer(SendCompletionHandler callback, Status status, MetadataArraySafeHandle metadataArray, bool sendEmptyInitialMetadata)
-            {
-                SendCompletionHandler = callback;
-            }
-
-            public void StartServerSide(ReceivedCloseOnServerHandler callback)
-            {
-                ReceivedCloseOnServerHandler = callback;
-            }
-
-            public void Dispose()
-            {
-                IsDisposed = true;
-            }
+            var ex = Assert.ThrowsAsync<RpcException>(async () => await moveNextTask);
+            Assert.AreEqual(expectedStatusCode, ex.Status.StatusCode);
+            Assert.AreEqual(expectedStatusCode, asyncCall.GetStatus().StatusCode);
+            Assert.AreEqual(0, asyncCall.GetTrailers().Count);
         }
     }
 }
diff --git a/src/csharp/Grpc.Core.Tests/Internal/FakeNativeCall.cs b/src/csharp/Grpc.Core.Tests/Internal/FakeNativeCall.cs
new file mode 100644
index 0000000..909112a
--- /dev/null
+++ b/src/csharp/Grpc.Core.Tests/Internal/FakeNativeCall.cs
@@ -0,0 +1,184 @@
+#region Copyright notice and license
+
+// Copyright 2015, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#endregion
+
+using System;
+using System.Collections.Generic;
+using System.Runtime.InteropServices;
+using System.Threading.Tasks;
+
+using Grpc.Core.Internal;
+using NUnit.Framework;
+
+namespace Grpc.Core.Internal.Tests
+{
+    /// <summary>
+    /// For testing purposes.
+    /// </summary>
+    internal class FakeNativeCall : INativeCall
+    {
+        public UnaryResponseClientHandler UnaryResponseClientHandler
+        {
+            get;
+            set;
+        }
+
+        public ReceivedStatusOnClientHandler ReceivedStatusOnClientHandler
+        {
+            get;
+            set;
+        }
+
+        public ReceivedMessageHandler ReceivedMessageHandler
+        {
+            get;
+            set;
+        }
+
+        public ReceivedResponseHeadersHandler ReceivedResponseHeadersHandler
+        {
+            get;
+            set;
+        }
+
+        public SendCompletionHandler SendCompletionHandler
+        {
+            get;
+            set;
+        }
+
+        public SendCompletionHandler SendStatusFromServerHandler
+        {
+            get;
+            set;
+        }
+
+        public ReceivedCloseOnServerHandler ReceivedCloseOnServerHandler
+        {
+            get;
+            set;
+        }
+
+        public bool IsCancelled
+        {
+            get;
+            set;
+        }
+
+        public bool IsDisposed
+        {
+            get;
+            set;
+        }
+
+        public void Cancel()
+        {
+            IsCancelled = true;
+        }
+
+        public void CancelWithStatus(Status status)
+        {
+            IsCancelled = true;
+        }
+
+        public string GetPeer()
+        {
+            return "PEER";
+        }
+
+        public void StartUnary(UnaryResponseClientHandler callback, byte[] payload, MetadataArraySafeHandle metadataArray, WriteFlags writeFlags)
+        {
+            UnaryResponseClientHandler = callback;
+        }
+
+        public void StartUnary(BatchContextSafeHandle ctx, byte[] payload, MetadataArraySafeHandle metadataArray, WriteFlags writeFlags)
+        {
+            throw new NotImplementedException();
+        }
+
+        public void StartClientStreaming(UnaryResponseClientHandler callback, MetadataArraySafeHandle metadataArray)
+        {
+            UnaryResponseClientHandler = callback;
+        }
+
+        public void StartServerStreaming(ReceivedStatusOnClientHandler callback, byte[] payload, MetadataArraySafeHandle metadataArray, WriteFlags writeFlags)
+        {
+            ReceivedStatusOnClientHandler = callback;
+        }
+
+        public void StartDuplexStreaming(ReceivedStatusOnClientHandler callback, MetadataArraySafeHandle metadataArray)
+        {
+            ReceivedStatusOnClientHandler = callback;
+        }
+
+        public void StartReceiveMessage(ReceivedMessageHandler callback)
+        {
+            ReceivedMessageHandler = callback;
+        }
+
+        public void StartReceiveInitialMetadata(ReceivedResponseHeadersHandler callback)
+        {
+            ReceivedResponseHeadersHandler = callback;
+        }
+
+        public void StartSendInitialMetadata(SendCompletionHandler callback, MetadataArraySafeHandle metadataArray)
+        {
+            SendCompletionHandler = callback;
+        }
+
+        public void StartSendMessage(SendCompletionHandler callback, byte[] payload, WriteFlags writeFlags, bool sendEmptyInitialMetadata)
+        {
+            SendCompletionHandler = callback;
+        }
+
+        public void StartSendCloseFromClient(SendCompletionHandler callback)
+        {
+            SendCompletionHandler = callback;
+        }
+
+        public void StartSendStatusFromServer(SendCompletionHandler callback, Status status, MetadataArraySafeHandle metadataArray, bool sendEmptyInitialMetadata,
+            byte[] optionalPayload, WriteFlags writeFlags)
+        {
+            SendStatusFromServerHandler = callback;
+        }
+
+        public void StartServerSide(ReceivedCloseOnServerHandler callback)
+        {
+            ReceivedCloseOnServerHandler = callback;
+        }
+
+        public void Dispose()
+        {
+            IsDisposed = true;
+        }
+    }
+}
diff --git a/src/csharp/Grpc.Core/Internal/AsyncCall.cs b/src/csharp/Grpc.Core/Internal/AsyncCall.cs
index 50ba617..f522174 100644
--- a/src/csharp/Grpc.Core/Internal/AsyncCall.cs
+++ b/src/csharp/Grpc.Core/Internal/AsyncCall.cs
@@ -241,11 +241,10 @@
 
         /// <summary>
         /// Receives a streaming response. Only one pending read action is allowed at any given time.
-        /// completionDelegate is called when the operation finishes.
         /// </summary>
-        public void StartReadMessage(AsyncCompletionDelegate<TResponse> completionDelegate)
+        public Task<TResponse> ReadMessageAsync()
         {
-            StartReadMessageInternal(completionDelegate);
+            return ReadMessageInternalAsync();
         }
 
         /// <summary>
diff --git a/src/csharp/Grpc.Core/Internal/AsyncCallBase.cs b/src/csharp/Grpc.Core/Internal/AsyncCallBase.cs
index ccd047f..42234dc 100644
--- a/src/csharp/Grpc.Core/Internal/AsyncCallBase.cs
+++ b/src/csharp/Grpc.Core/Internal/AsyncCallBase.cs
@@ -68,7 +68,8 @@
         protected bool cancelRequested;
 
         protected AsyncCompletionDelegate<object> sendCompletionDelegate;  // Completion of a pending send or sendclose if not null.
-        protected AsyncCompletionDelegate<TRead> readCompletionDelegate;  // Completion of a pending send or sendclose if not null.
+        protected TaskCompletionSource<TRead> streamingReadTcs;  // Completion of a pending streaming read if not null.
+        protected TaskCompletionSource<object> sendStatusFromServerTcs;
 
         protected bool readingDone;  // True if last read (i.e. read with null payload) was already received.
         protected bool halfcloseRequested;  // True if send close have been initiated.
@@ -150,15 +151,25 @@
         /// Initiates reading a message. Only one read operation can be active at a time.
         /// completionDelegate is invoked upon completion.
         /// </summary>
-        protected void StartReadMessageInternal(AsyncCompletionDelegate<TRead> completionDelegate)
+        protected Task<TRead> ReadMessageInternalAsync()
         {
             lock (myLock)
             {
-                GrpcPreconditions.CheckNotNull(completionDelegate, "Completion delegate cannot be null");
-                CheckReadingAllowed();
+                GrpcPreconditions.CheckState(started);
+                if (readingDone)
+                {
+                    // the last read that returns null or throws an exception is idempotent
+                    // and maintain its state.
+                    GrpcPreconditions.CheckState(streamingReadTcs != null, "Call does not support streaming reads.");
+                    return streamingReadTcs.Task;
+                }
+
+                GrpcPreconditions.CheckState(streamingReadTcs == null, "Only one read can be pending at a time");
+                GrpcPreconditions.CheckState(!disposed);
 
                 call.StartReceiveMessage(HandleReadFinished);
-                readCompletionDelegate = completionDelegate;
+                streamingReadTcs = new TaskCompletionSource<TRead>();
+                return streamingReadTcs.Task;
             }
         }
 
@@ -213,15 +224,6 @@
             GrpcPreconditions.CheckState(sendCompletionDelegate == null, "Only one write can be pending at a time");
         }
 
-        protected virtual void CheckReadingAllowed()
-        {
-            GrpcPreconditions.CheckState(started);
-            GrpcPreconditions.CheckState(!disposed);
-
-            GrpcPreconditions.CheckState(!readingDone, "Stream has already been closed.");
-            GrpcPreconditions.CheckState(readCompletionDelegate == null, "Only one read can be pending at a time");
-        }
-
         protected void CheckNotCancelled()
         {
             if (cancelRequested)
@@ -322,22 +324,18 @@
         /// </summary>
         protected void HandleSendStatusFromServerFinished(bool success)
         {
-            AsyncCompletionDelegate<object> origCompletionDelegate = null;
             lock (myLock)
             {
-                origCompletionDelegate = sendCompletionDelegate;
-                sendCompletionDelegate = null;
-
                 ReleaseResourcesIfPossible();
             }
 
             if (!success)
             {
-                FireCompletion(origCompletionDelegate, null, new InvalidOperationException("Error sending status from server."));
+                sendStatusFromServerTcs.SetException(new InvalidOperationException("Error sending status from server."));
             }
             else
             {
-                FireCompletion(origCompletionDelegate, null, null);
+                sendStatusFromServerTcs.SetResult(null);
             }
         }
 
@@ -346,15 +344,17 @@
         /// </summary>
         protected void HandleReadFinished(bool success, byte[] receivedMessage)
         {
+            // if success == false, received message will be null. It that case we will
+            // treat this completion as the last read an rely on C core to handle the failed
+            // read (e.g. deliver approriate statusCode on the clientside).
+
             TRead msg = default(TRead);
             var deserializeException = (success && receivedMessage != null) ? TryDeserialize(receivedMessage, out msg) : null;
 
-            AsyncCompletionDelegate<TRead> origCompletionDelegate = null;
+            TaskCompletionSource<TRead> origTcs = null;
             lock (myLock)
             {
-                origCompletionDelegate = readCompletionDelegate;
-                readCompletionDelegate = null;
-
+                origTcs = streamingReadTcs;
                 if (receivedMessage == null)
                 {
                     // This was the last read.
@@ -364,20 +364,25 @@
                 if (deserializeException != null && IsClient)
                 {
                     readingDone = true;
+
+                    // TODO(jtattermusch): it might be too late to set the status
                     CancelWithStatus(DeserializeResponseFailureStatus);
                 }
 
+                if (!readingDone)
+                {
+                    streamingReadTcs = null;
+                }
+
                 ReleaseResourcesIfPossible();
             }
 
-            // TODO: handle the case when success==false
-
             if (deserializeException != null && !IsClient)
             {
-                FireCompletion(origCompletionDelegate, default(TRead), new IOException("Failed to deserialize request message.", deserializeException));
+                origTcs.SetException(new IOException("Failed to deserialize request message.", deserializeException));
                 return;
             }
-            FireCompletion(origCompletionDelegate, msg, null);
+            origTcs.SetResult(msg);
         }
     }
 }
diff --git a/src/csharp/Grpc.Core/Internal/AsyncCallServer.cs b/src/csharp/Grpc.Core/Internal/AsyncCallServer.cs
index bea2b36..b1566b4 100644
--- a/src/csharp/Grpc.Core/Internal/AsyncCallServer.cs
+++ b/src/csharp/Grpc.Core/Internal/AsyncCallServer.cs
@@ -65,6 +65,15 @@
         }
 
         /// <summary>
+        /// Only for testing purposes.
+        /// </summary>
+        public void InitializeForTesting(INativeCall call)
+        {
+            server.AddCallReference(this);
+            InitializeInternal(call);
+        }
+
+        /// <summary>
         /// Starts a server side call.
         /// </summary>
         public Task ServerSideCallAsync()
@@ -91,11 +100,10 @@
 
         /// <summary>
         /// Receives a streaming request. Only one pending read action is allowed at any given time.
-        /// completionDelegate is called when the operation finishes.
         /// </summary>
-        public void StartReadMessage(AsyncCompletionDelegate<TRequest> completionDelegate)
+        public Task<TRequest> ReadMessageAsync()
         {
-            StartReadMessageInternal(completionDelegate);
+            return ReadMessageInternalAsync();
         }
 
         /// <summary>
@@ -128,24 +136,33 @@
         }
 
         /// <summary>
-        /// Sends call result status, also indicating server is done with streaming responses.
-        /// Only one pending send action is allowed at any given time.
-        /// completionDelegate is called when the operation finishes.
+        /// Sends call result status, indicating we are done with writes.
+        /// Sending a status different from StatusCode.OK will also implicitly cancel the call.
         /// </summary>
-        public void StartSendStatusFromServer(Status status, Metadata trailers, AsyncCompletionDelegate<object> completionDelegate)
+        public Task SendStatusFromServerAsync(Status status, Metadata trailers, Tuple<TResponse, WriteFlags> optionalWrite)
         {
+            byte[] payload = optionalWrite != null ? UnsafeSerialize(optionalWrite.Item1) : null;
+            var writeFlags = optionalWrite != null ? optionalWrite.Item2 : default(WriteFlags);
+
             lock (myLock)
             {
-                GrpcPreconditions.CheckNotNull(completionDelegate, "Completion delegate cannot be null");
-                CheckSendingAllowed(allowFinished: false);
+                GrpcPreconditions.CheckState(started);
+                GrpcPreconditions.CheckState(!disposed);
+                GrpcPreconditions.CheckState(!halfcloseRequested, "Can only send status from server once.");
 
                 using (var metadataArray = MetadataArraySafeHandle.Create(trailers))
                 {
-                    call.StartSendStatusFromServer(HandleSendStatusFromServerFinished, status, metadataArray, !initialMetadataSent);
+                    call.StartSendStatusFromServer(HandleSendStatusFromServerFinished, status, metadataArray, !initialMetadataSent,
+                        payload, writeFlags);
                 }
                 halfcloseRequested = true;
-                readingDone = true;
-                sendCompletionDelegate = completionDelegate;
+                initialMetadataSent = true;
+                sendStatusFromServerTcs = new TaskCompletionSource<object>();
+                if (optionalWrite != null)
+                {
+                    streamingWritesCounter++;
+                }
+                return sendStatusFromServerTcs.Task;
             }
         }
 
@@ -174,12 +191,6 @@
             get { return false; }
         }
 
-        protected override void CheckReadingAllowed()
-        {
-            base.CheckReadingAllowed();
-            GrpcPreconditions.CheckArgument(!cancelRequested);
-        }
-
         protected override void OnAfterReleaseResources()
         {
             server.RemoveCallReference(this);
@@ -190,12 +201,21 @@
         /// </summary>
         private void HandleFinishedServerside(bool success, bool cancelled)
         {
+            // NOTE: because this event is a result of batch containing GRPC_OP_RECV_CLOSE_ON_SERVER,
+            // success will be always set to true.
             lock (myLock)
             {
                 finished = true;
+                if (streamingReadTcs == null)
+                {
+                    // if there's no pending read, readingDone=true will dispose now.
+                    // if there is a pending read, we will dispose once that read finishes.
+                    readingDone = true;
+                    streamingReadTcs = new TaskCompletionSource<TRequest>();
+                    streamingReadTcs.SetResult(default(TRequest));
+                }
                 ReleaseResourcesIfPossible();
             }
-            // TODO(jtattermusch): handle error
 
             if (cancelled)
             {
diff --git a/src/csharp/Grpc.Core/Internal/CallSafeHandle.cs b/src/csharp/Grpc.Core/Internal/CallSafeHandle.cs
index 500653b..244b97d 100644
--- a/src/csharp/Grpc.Core/Internal/CallSafeHandle.cs
+++ b/src/csharp/Grpc.Core/Internal/CallSafeHandle.cs
@@ -135,13 +135,16 @@
             }
         }
 
-        public void StartSendStatusFromServer(SendCompletionHandler callback, Status status, MetadataArraySafeHandle metadataArray, bool sendEmptyInitialMetadata)
+        public void StartSendStatusFromServer(SendCompletionHandler callback, Status status, MetadataArraySafeHandle metadataArray, bool sendEmptyInitialMetadata,
+            byte[] optionalPayload, WriteFlags writeFlags)
         {
             using (completionQueue.NewScope())
             {
                 var ctx = BatchContextSafeHandle.Create();
+                var optionalPayloadLength = optionalPayload != null ? new UIntPtr((ulong)optionalPayload.Length) : UIntPtr.Zero;
                 completionRegistry.RegisterBatchCompletion(ctx, (success, context) => callback(success));
-                Native.grpcsharp_call_send_status_from_server(this, ctx, status.StatusCode, status.Detail, metadataArray, sendEmptyInitialMetadata).CheckOk();
+                Native.grpcsharp_call_send_status_from_server(this, ctx, status.StatusCode, status.Detail, metadataArray, sendEmptyInitialMetadata,
+                    optionalPayload, optionalPayloadLength, writeFlags).CheckOk();
             }
         }
 
diff --git a/src/csharp/Grpc.Core/Internal/ClientResponseStream.cs b/src/csharp/Grpc.Core/Internal/ClientResponseStream.cs
index d6e34a0..ad9423f 100644
--- a/src/csharp/Grpc.Core/Internal/ClientResponseStream.cs
+++ b/src/csharp/Grpc.Core/Internal/ClientResponseStream.cs
@@ -68,9 +68,7 @@
             {
                 throw new InvalidOperationException("Cancellation of individual reads is not supported.");
             }
-            var taskSource = new AsyncCompletionTaskSource<TResponse>();
-            call.StartReadMessage(taskSource.CompletionDelegate);
-            var result = await taskSource.Task.ConfigureAwait(false);
+            var result = await call.ReadMessageAsync().ConfigureAwait(false);
             this.current = result;
 
             if (result == null)
diff --git a/src/csharp/Grpc.Core/Internal/INativeCall.cs b/src/csharp/Grpc.Core/Internal/INativeCall.cs
index cbef599..cd3719c 100644
--- a/src/csharp/Grpc.Core/Internal/INativeCall.cs
+++ b/src/csharp/Grpc.Core/Internal/INativeCall.cs
@@ -78,7 +78,7 @@
 
         void StartSendCloseFromClient(SendCompletionHandler callback);
 
-        void StartSendStatusFromServer(SendCompletionHandler callback, Grpc.Core.Status status, MetadataArraySafeHandle metadataArray, bool sendEmptyInitialMetadata);
+        void StartSendStatusFromServer(SendCompletionHandler callback, Grpc.Core.Status status, MetadataArraySafeHandle metadataArray, bool sendEmptyInitialMetadata, byte[] optionalPayload, Grpc.Core.WriteFlags writeFlags);
 
         void StartServerSide(ReceivedCloseOnServerHandler callback);
     }
diff --git a/src/csharp/Grpc.Core/Internal/NativeMethods.cs b/src/csharp/Grpc.Core/Internal/NativeMethods.cs
index 9ee0ba3..c277c73 100644
--- a/src/csharp/Grpc.Core/Internal/NativeMethods.cs
+++ b/src/csharp/Grpc.Core/Internal/NativeMethods.cs
@@ -421,20 +421,21 @@
             public delegate GRPCCallError grpcsharp_call_cancel_delegate(CallSafeHandle call);
             public delegate GRPCCallError grpcsharp_call_cancel_with_status_delegate(CallSafeHandle call, StatusCode status, string description);
             public delegate GRPCCallError grpcsharp_call_start_unary_delegate(CallSafeHandle call,
-                BatchContextSafeHandle ctx, byte[] send_buffer, UIntPtr send_buffer_len, MetadataArraySafeHandle metadataArray, WriteFlags writeFlags);
+                BatchContextSafeHandle ctx, byte[] sendBuffer, UIntPtr sendBufferLen, MetadataArraySafeHandle metadataArray, WriteFlags writeFlags);
             public delegate GRPCCallError grpcsharp_call_start_client_streaming_delegate(CallSafeHandle call,
                 BatchContextSafeHandle ctx, MetadataArraySafeHandle metadataArray);
             public delegate GRPCCallError grpcsharp_call_start_server_streaming_delegate(CallSafeHandle call,
-                BatchContextSafeHandle ctx, byte[] send_buffer, UIntPtr send_buffer_len,
+                BatchContextSafeHandle ctx, byte[] sendBuffer, UIntPtr sendBufferLen,
                 MetadataArraySafeHandle metadataArray, WriteFlags writeFlags);
             public delegate GRPCCallError grpcsharp_call_start_duplex_streaming_delegate(CallSafeHandle call,
                 BatchContextSafeHandle ctx, MetadataArraySafeHandle metadataArray);
             public delegate GRPCCallError grpcsharp_call_send_message_delegate(CallSafeHandle call,
-                BatchContextSafeHandle ctx, byte[] send_buffer, UIntPtr send_buffer_len, WriteFlags writeFlags, bool sendEmptyInitialMetadata);
+                BatchContextSafeHandle ctx, byte[] sendBuffer, UIntPtr sendBufferLen, WriteFlags writeFlags, bool sendEmptyInitialMetadata);
             public delegate GRPCCallError grpcsharp_call_send_close_from_client_delegate(CallSafeHandle call,
                 BatchContextSafeHandle ctx);
             public delegate GRPCCallError grpcsharp_call_send_status_from_server_delegate(CallSafeHandle call,
-                BatchContextSafeHandle ctx, StatusCode statusCode, string statusMessage, MetadataArraySafeHandle metadataArray, bool sendEmptyInitialMetadata);
+                BatchContextSafeHandle ctx, StatusCode statusCode, string statusMessage, MetadataArraySafeHandle metadataArray, bool sendEmptyInitialMetadata,
+                byte[] optionalSendBuffer, UIntPtr optionalSendBufferLen, WriteFlags writeFlags);
             public delegate GRPCCallError grpcsharp_call_recv_message_delegate(CallSafeHandle call,
                 BatchContextSafeHandle ctx);
             public delegate GRPCCallError grpcsharp_call_recv_initial_metadata_delegate(CallSafeHandle call,
@@ -593,7 +594,7 @@
 
             [DllImport("grpc_csharp_ext.dll")]
             public static extern GRPCCallError grpcsharp_call_start_unary(CallSafeHandle call,
-                BatchContextSafeHandle ctx, byte[] send_buffer, UIntPtr send_buffer_len, MetadataArraySafeHandle metadataArray, WriteFlags writeFlags);
+                BatchContextSafeHandle ctx, byte[] sendBuffer, UIntPtr sendBufferLen, MetadataArraySafeHandle metadataArray, WriteFlags writeFlags);
 
             [DllImport("grpc_csharp_ext.dll")]
             public static extern GRPCCallError grpcsharp_call_start_client_streaming(CallSafeHandle call,
@@ -601,7 +602,7 @@
 
             [DllImport("grpc_csharp_ext.dll")]
             public static extern GRPCCallError grpcsharp_call_start_server_streaming(CallSafeHandle call,
-                BatchContextSafeHandle ctx, byte[] send_buffer, UIntPtr send_buffer_len,
+                BatchContextSafeHandle ctx, byte[] sendBuffer, UIntPtr sendBufferLen,
                 MetadataArraySafeHandle metadataArray, WriteFlags writeFlags);
 
             [DllImport("grpc_csharp_ext.dll")]
@@ -610,7 +611,7 @@
 
             [DllImport("grpc_csharp_ext.dll")]
             public static extern GRPCCallError grpcsharp_call_send_message(CallSafeHandle call,
-                BatchContextSafeHandle ctx, byte[] send_buffer, UIntPtr send_buffer_len, WriteFlags writeFlags, bool sendEmptyInitialMetadata);
+                BatchContextSafeHandle ctx, byte[] sendBuffer, UIntPtr sendBufferLen, WriteFlags writeFlags, bool sendEmptyInitialMetadata);
 
             [DllImport("grpc_csharp_ext.dll")]
             public static extern GRPCCallError grpcsharp_call_send_close_from_client(CallSafeHandle call,
@@ -618,7 +619,8 @@
 
             [DllImport("grpc_csharp_ext.dll")]
             public static extern GRPCCallError grpcsharp_call_send_status_from_server(CallSafeHandle call,
-                BatchContextSafeHandle ctx, StatusCode statusCode, string statusMessage, MetadataArraySafeHandle metadataArray, bool sendEmptyInitialMetadata);
+                BatchContextSafeHandle ctx, StatusCode statusCode, string statusMessage, MetadataArraySafeHandle metadataArray, bool sendEmptyInitialMetadata,
+                byte[] optionalSendBuffer, UIntPtr optionalSendBufferLen, WriteFlags writeFlags);
 
             [DllImport("grpc_csharp_ext.dll")]
             public static extern GRPCCallError grpcsharp_call_recv_message(CallSafeHandle call,
diff --git a/src/csharp/Grpc.Core/Internal/ServerCallHandler.cs b/src/csharp/Grpc.Core/Internal/ServerCallHandler.cs
index 1f83e51..85b7a4b 100644
--- a/src/csharp/Grpc.Core/Internal/ServerCallHandler.cs
+++ b/src/csharp/Grpc.Core/Internal/ServerCallHandler.cs
@@ -75,29 +75,32 @@
             var responseStream = new ServerResponseStream<TRequest, TResponse>(asyncCall);
 
             Status status;
+            Tuple<TResponse,WriteFlags> responseTuple = null;
             var context = HandlerUtils.NewContext(newRpc, asyncCall.Peer, responseStream, asyncCall.CancellationToken);
             try
             {
                 GrpcPreconditions.CheckArgument(await requestStream.MoveNext().ConfigureAwait(false));
                 var request = requestStream.Current;
-                // TODO(jtattermusch): we need to read the full stream so that native callhandle gets deallocated.
-                GrpcPreconditions.CheckArgument(!await requestStream.MoveNext().ConfigureAwait(false));
-                var result = await handler(request, context).ConfigureAwait(false);
+                var response = await handler(request, context).ConfigureAwait(false);
                 status = context.Status;
-                await responseStream.WriteAsync(result).ConfigureAwait(false);
+                responseTuple = Tuple.Create(response, HandlerUtils.GetWriteFlags(context.WriteOptions));
             } 
             catch (Exception e)
             {
-                Logger.Error(e, "Exception occured in handler.");
+                if (!(e is RpcException))
+                {
+                    Logger.Warning(e, "Exception occured in handler.");
+                }
                 status = HandlerUtils.StatusFromException(e);
             }
             try
             {
-                await responseStream.WriteStatusAsync(status, context.ResponseTrailers).ConfigureAwait(false);
+                await asyncCall.SendStatusFromServerAsync(status, context.ResponseTrailers, responseTuple).ConfigureAwait(false);
             }
-            catch (OperationCanceledException)
+            catch (Exception)
             {
-                // Call has been already cancelled.
+                asyncCall.Cancel();
+                throw;
             }
             await finishedTask.ConfigureAwait(false);
         }
@@ -136,24 +139,26 @@
             {
                 GrpcPreconditions.CheckArgument(await requestStream.MoveNext().ConfigureAwait(false));
                 var request = requestStream.Current;
-                // TODO(jtattermusch): we need to read the full stream so that native callhandle gets deallocated.
-                GrpcPreconditions.CheckArgument(!await requestStream.MoveNext().ConfigureAwait(false));
                 await handler(request, responseStream, context).ConfigureAwait(false);
                 status = context.Status;
             }
             catch (Exception e)
             {
-                Logger.Error(e, "Exception occured in handler.");
+                if (!(e is RpcException))
+                {
+                    Logger.Warning(e, "Exception occured in handler.");
+                }
                 status = HandlerUtils.StatusFromException(e);
             }
 
             try
             {
-                await responseStream.WriteStatusAsync(status, context.ResponseTrailers).ConfigureAwait(false);
+                await asyncCall.SendStatusFromServerAsync(status, context.ResponseTrailers, null).ConfigureAwait(false);
             }
-            catch (OperationCanceledException)
+            catch (Exception)
             {
-                // Call has been already cancelled.
+                asyncCall.Cancel();
+                throw;
             }
             await finishedTask.ConfigureAwait(false);
         }
@@ -187,33 +192,31 @@
             var responseStream = new ServerResponseStream<TRequest, TResponse>(asyncCall);
 
             Status status;
+            Tuple<TResponse,WriteFlags> responseTuple = null;
             var context = HandlerUtils.NewContext(newRpc, asyncCall.Peer, responseStream, asyncCall.CancellationToken);
             try
             {
-                var result = await handler(requestStream, context).ConfigureAwait(false);
+                var response = await handler(requestStream, context).ConfigureAwait(false);
                 status = context.Status;
-                try
-                {
-                    await responseStream.WriteAsync(result).ConfigureAwait(false);
-                }
-                catch (OperationCanceledException)
-                {
-                    status = Status.DefaultCancelled;
-                }
+                responseTuple = Tuple.Create(response, HandlerUtils.GetWriteFlags(context.WriteOptions));
             }
             catch (Exception e)
             {
-                Logger.Error(e, "Exception occured in handler.");
+                if (!(e is RpcException))
+                {
+                    Logger.Warning(e, "Exception occured in handler.");
+                }
                 status = HandlerUtils.StatusFromException(e);
             }
 
             try
             {
-                await responseStream.WriteStatusAsync(status, context.ResponseTrailers).ConfigureAwait(false);
+                await asyncCall.SendStatusFromServerAsync(status, context.ResponseTrailers, responseTuple).ConfigureAwait(false);
             }
-            catch (OperationCanceledException)
+            catch (Exception)
             {
-                // Call has been already cancelled.
+                asyncCall.Cancel();
+                throw;
             }
             await finishedTask.ConfigureAwait(false);
         }
@@ -255,16 +258,20 @@
             }
             catch (Exception e)
             {
-                Logger.Error(e, "Exception occured in handler.");
+                if (!(e is RpcException))
+                {
+                    Logger.Warning(e, "Exception occured in handler.");
+                }
                 status = HandlerUtils.StatusFromException(e);
             }
             try
             {
-                await responseStream.WriteStatusAsync(status, context.ResponseTrailers).ConfigureAwait(false);
+                await asyncCall.SendStatusFromServerAsync(status, context.ResponseTrailers, null).ConfigureAwait(false);
             }
-            catch (OperationCanceledException)
+            catch (Exception)
             {
-                // Call has been already cancelled.
+                asyncCall.Cancel();
+                throw;
             }
             await finishedTask.ConfigureAwait(false);
         }
@@ -282,9 +289,7 @@
             
             asyncCall.Initialize(newRpc.Call);
             var finishedTask = asyncCall.ServerSideCallAsync();
-            var responseStream = new ServerResponseStream<byte[], byte[]>(asyncCall);
-
-            await responseStream.WriteStatusAsync(new Status(StatusCode.Unimplemented, ""), Metadata.Empty).ConfigureAwait(false);
+            await asyncCall.SendStatusFromServerAsync(new Status(StatusCode.Unimplemented, ""), Metadata.Empty, null).ConfigureAwait(false);
             await finishedTask.ConfigureAwait(false);
         }
     }
@@ -300,10 +305,14 @@
                 return rpcException.Status;
             }
 
-            // TODO(jtattermusch): what is the right status code here?
             return new Status(StatusCode.Unknown, "Exception was thrown by handler.");
         }
 
+        public static WriteFlags GetWriteFlags(WriteOptions writeOptions)
+        {
+            return writeOptions != null ? writeOptions.Flags : default(WriteFlags);
+        }
+
         public static ServerCallContext NewContext<TRequest, TResponse>(ServerRpcNew newRpc, string peer, ServerResponseStream<TRequest, TResponse> serverResponseStream, CancellationToken cancellationToken)
             where TRequest : class
             where TResponse : class
diff --git a/src/csharp/Grpc.Core/Internal/ServerRequestStream.cs b/src/csharp/Grpc.Core/Internal/ServerRequestStream.cs
index e7be82c..d76030d 100644
--- a/src/csharp/Grpc.Core/Internal/ServerRequestStream.cs
+++ b/src/csharp/Grpc.Core/Internal/ServerRequestStream.cs
@@ -68,9 +68,7 @@
             {
                 throw new InvalidOperationException("Cancellation of individual reads is not supported.");
             }
-            var taskSource = new AsyncCompletionTaskSource<TRequest>();
-            call.StartReadMessage(taskSource.CompletionDelegate);
-            var result = await taskSource.Task.ConfigureAwait(false);
+            var result = await call.ReadMessageAsync().ConfigureAwait(false);
             this.current = result;
             return result != null;
         }
diff --git a/src/csharp/Grpc.Core/Internal/ServerResponseStream.cs b/src/csharp/Grpc.Core/Internal/ServerResponseStream.cs
index 03e39ef..ecfee0b 100644
--- a/src/csharp/Grpc.Core/Internal/ServerResponseStream.cs
+++ b/src/csharp/Grpc.Core/Internal/ServerResponseStream.cs
@@ -57,13 +57,6 @@
             return taskSource.Task;
         }
 
-        public Task WriteStatusAsync(Status status, Metadata trailers)
-        {
-            var taskSource = new AsyncCompletionTaskSource<object>();
-            call.StartSendStatusFromServer(status, trailers, taskSource.CompletionDelegate);
-            return taskSource.Task;
-        }
-
         public Task WriteResponseHeadersAsync(Metadata responseHeaders)
         {
             var taskSource = new AsyncCompletionTaskSource<object>();
diff --git a/src/csharp/Grpc.Core/VersionInfo.cs b/src/csharp/Grpc.Core/VersionInfo.cs
index f7a9cb9..e160934 100644
--- a/src/csharp/Grpc.Core/VersionInfo.cs
+++ b/src/csharp/Grpc.Core/VersionInfo.cs
@@ -48,11 +48,11 @@
         /// <summary>
         /// Current <c>AssemblyFileVersion</c> of gRPC C# assemblies
         /// </summary>
-        public const string CurrentAssemblyFileVersion = "0.14.0.0";
+        public const string CurrentAssemblyFileVersion = "0.15.0.0";
 
         /// <summary>
         /// Current version of gRPC C#
         /// </summary>
-        public const string CurrentVersion = "0.14.0-dev";
+        public const string CurrentVersion = "0.15.0-dev";
     }
 }
diff --git a/src/csharp/build_packages.bat b/src/csharp/build_packages.bat
index 9a60be2..7520b0f 100644
--- a/src/csharp/build_packages.bat
+++ b/src/csharp/build_packages.bat
@@ -1,7 +1,7 @@
 @rem Builds gRPC NuGet packages
 
 @rem Current package versions
-set VERSION=0.14.0-dev
+set VERSION=0.15.0-dev
 set PROTOBUF_VERSION=3.0.0-beta2
 
 @rem Packages that depend on prerelease packages (like Google.Protobuf) need to have prerelease suffix as well.
diff --git a/src/csharp/ext/grpc_csharp_ext.c b/src/csharp/ext/grpc_csharp_ext.c
index aeef8a7..5b8ff9b 100644
--- a/src/csharp/ext/grpc_csharp_ext.c
+++ b/src/csharp/ext/grpc_csharp_ext.c
@@ -715,10 +715,11 @@
 GPR_EXPORT grpc_call_error GPR_CALLTYPE grpcsharp_call_send_status_from_server(
     grpc_call *call, grpcsharp_batch_context *ctx, grpc_status_code status_code,
     const char *status_details, grpc_metadata_array *trailing_metadata,
-    int32_t send_empty_initial_metadata) {
+    int32_t send_empty_initial_metadata, const char* optional_send_buffer,
+    size_t optional_send_buffer_len, uint32_t write_flags) {
   /* TODO: don't use magic number */
-  grpc_op ops[2];
-  size_t nops = send_empty_initial_metadata ? 2 : 1;
+  grpc_op ops[3];
+  size_t nops = 1;
   ops[0].op = GRPC_OP_SEND_STATUS_FROM_SERVER;
   ops[0].data.send_status_from_server.status = status_code;
   ops[0].data.send_status_from_server.status_details =
@@ -731,12 +732,23 @@
       ctx->send_status_from_server.trailing_metadata.metadata;
   ops[0].flags = 0;
   ops[0].reserved = NULL;
-  ops[1].op = GRPC_OP_SEND_INITIAL_METADATA;
-  ops[1].data.send_initial_metadata.count = 0;
-  ops[1].data.send_initial_metadata.metadata = NULL;
-  ops[1].flags = 0;
-  ops[1].reserved = NULL;
-
+  if (optional_send_buffer) {
+    ops[nops].op = GRPC_OP_SEND_MESSAGE;
+    ctx->send_message = string_to_byte_buffer(optional_send_buffer,
+                                              optional_send_buffer_len);
+    ops[nops].data.send_message = ctx->send_message;
+    ops[nops].flags = write_flags;
+    ops[nops].reserved = NULL;
+    nops ++;
+  }
+  if (send_empty_initial_metadata) {
+    ops[nops].op = GRPC_OP_SEND_INITIAL_METADATA;
+    ops[nops].data.send_initial_metadata.count = 0;
+    ops[nops].data.send_initial_metadata.metadata = NULL;
+    ops[nops].flags = 0;
+    ops[nops].reserved = NULL;
+    nops++;
+  }
   return grpc_call_start_batch(call, ops, nops, ctx, NULL);
 }
 
diff --git a/src/csharp/tests.json b/src/csharp/tests.json
index f733352..f6af340 100644
--- a/src/csharp/tests.json
+++ b/src/csharp/tests.json
@@ -1,5 +1,6 @@
 {
   "Grpc.Core.Tests": [
+    "Grpc.Core.Internal.Tests.AsyncCallServerTest",
     "Grpc.Core.Internal.Tests.AsyncCallTest",
     "Grpc.Core.Internal.Tests.ChannelArgsSafeHandleTest",
     "Grpc.Core.Internal.Tests.CompletionQueueEventTest",
diff --git a/src/node/tools/package.json b/src/node/tools/package.json
index d98ed0b..efdfa81 100644
--- a/src/node/tools/package.json
+++ b/src/node/tools/package.json
@@ -1,6 +1,6 @@
 {
   "name": "grpc-tools",
-  "version": "0.14.0-dev",
+  "version": "0.15.0-dev",
   "author": "Google Inc.",
   "description": "Tools for developing with gRPC on Node.js",
   "homepage": "http://www.grpc.io/",
diff --git a/src/proto/grpc/testing/echo.proto b/src/proto/grpc/testing/echo.proto
index 0eef53a..c596aab 100644
--- a/src/proto/grpc/testing/echo.proto
+++ b/src/proto/grpc/testing/echo.proto
@@ -45,3 +45,7 @@
 service UnimplementedService {
   rpc Unimplemented(EchoRequest) returns (EchoResponse);
 }
+
+// A service without any rpc defined to test coverage.
+service NoRpcService {
+}
diff --git a/src/python/grpcio/README.rst b/src/python/grpcio/README.rst
index cb3f6b8..afc4fe6 100644
--- a/src/python/grpcio/README.rst
+++ b/src/python/grpcio/README.rst
@@ -48,6 +48,7 @@
   $ export REPO_ROOT=grpc  # REPO_ROOT can be any directory of your choice
   $ git clone https://github.com/grpc/grpc.git $REPO_ROOT
   $ cd $REPO_ROOT
+  $ git submodule update --init
 
   # For the next two commands do `sudo pip install` if you get permission-denied errors
   $ pip install -rrequirements.txt
diff --git a/src/python/grpcio/grpc/__init__.py b/src/python/grpcio/grpc/__init__.py
index 7086519..b844a14 100644
--- a/src/python/grpcio/grpc/__init__.py
+++ b/src/python/grpcio/grpc/__init__.py
@@ -1,4 +1,4 @@
-# Copyright 2015, Google Inc.
+# Copyright 2015-2016, Google Inc.
 # All rights reserved.
 #
 # Redistribution and use in source and binary forms, with or without
@@ -27,4 +27,5 @@
 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+__import__('pkg_resources').declare_namespace(__name__)
 
diff --git a/src/python/grpcio/grpc/beta/implementations.py b/src/python/grpcio/grpc/beta/implementations.py
index 742e94d..822f593 100644
--- a/src/python/grpcio/grpc/beta/implementations.py
+++ b/src/python/grpcio/grpc/beta/implementations.py
@@ -1,4 +1,4 @@
-# Copyright 2015, Google Inc.
+# Copyright 2015-2016, Google Inc.
 # All rights reserved.
 #
 # Redistribution and use in source and binary forms, with or without
@@ -188,12 +188,13 @@
   Args:
     host: The name of the remote host to which to connect.
     port: The port of the remote host to which to connect.
+      If None only the 'host' part will be used.
 
   Returns:
     A Channel to the remote host through which RPCs may be conducted.
   """
   intermediary_low_channel = _intermediary_low.Channel(
-      '%s:%d' % (host, port), None)
+      '%s:%d' % (host, port) if port else host, None)
   return Channel(intermediary_low_channel._internal, intermediary_low_channel)  # pylint: disable=protected-access
 
 
@@ -203,13 +204,15 @@
   Args:
     host: The name of the remote host to which to connect.
     port: The port of the remote host to which to connect.
+      If None only the 'host' part will be used.
     channel_credentials: A ChannelCredentials.
 
   Returns:
     A secure Channel to the remote host through which RPCs may be conducted.
   """
   intermediary_low_channel = _intermediary_low.Channel(
-      '%s:%d' % (host, port), channel_credentials._low_credentials)
+      '%s:%d' % (host, port) if port else host,
+      channel_credentials._low_credentials)
   return Channel(intermediary_low_channel._internal, intermediary_low_channel)  # pylint: disable=protected-access
 
 
diff --git a/src/python/grpcio/grpc_version.py b/src/python/grpcio/grpc_version.py
index 873b4e2..0c13104 100644
--- a/src/python/grpcio/grpc_version.py
+++ b/src/python/grpcio/grpc_version.py
@@ -29,4 +29,4 @@
 
 # AUTO-GENERATED FROM `$REPO_ROOT/templates/src/python/grpcio/grpc_version.py.template`!!!
 
-VERSION='0.14.0.dev0'
+VERSION='0.15.0.dev0'
diff --git a/src/python/grpcio/tests/stress/client.py b/src/python/grpcio/tests/stress/client.py
index a733741..e2e0167 100644
--- a/src/python/grpcio/tests/stress/client.py
+++ b/src/python/grpcio/tests/stress/client.py
@@ -117,7 +117,10 @@
   for runner in runners:
     runner.start()
   try:
-    raise exception_queue.get(block=True, timeout=args.test_duration_secs)
+    timeout_secs = args.test_duration_secs
+    if timeout_secs < 0:
+      timeout_secs = None
+    raise exception_queue.get(block=True, timeout=timeout_secs)
   except Queue.Empty:
     # No exceptions thrown, success
     pass
diff --git a/src/ruby/.rubocop.yml b/src/ruby/.rubocop.yml
index d13ce42..34bb477 100644
--- a/src/ruby/.rubocop.yml
+++ b/src/ruby/.rubocop.yml
@@ -11,10 +11,10 @@
     - 'pb/test/**/*'
 
 Metrics/CyclomaticComplexity:
-  Max: 8
+  Max: 9
 
 Metrics/PerceivedComplexity:
-  Max: 8
+  Max: 9
 
 Metrics/ClassLength:
   Max: 250
diff --git a/src/ruby/ext/grpc/extconf.rb b/src/ruby/ext/grpc/extconf.rb
index 07f7bb9..6d65db8 100644
--- a/src/ruby/ext/grpc/extconf.rb
+++ b/src/ruby/ext/grpc/extconf.rb
@@ -78,9 +78,11 @@
 grpc_lib_dir = File.join(output_dir, 'libs', grpc_config)
 ENV['BUILDDIR'] = output_dir
 
-puts 'Building internal gRPC into ' + grpc_lib_dir
-system("make -j -C #{grpc_root} #{grpc_lib_dir}/libgrpc.a CONFIG=#{grpc_config}")
-exit 1 unless $? == 0
+unless windows
+  puts 'Building internal gRPC into ' + grpc_lib_dir
+  system("make -j -C #{grpc_root} #{grpc_lib_dir}/libgrpc.a CONFIG=#{grpc_config}")
+  exit 1 unless $? == 0
+end
 
 $CFLAGS << ' -I' + File.join(grpc_root, 'include')
 $LDFLAGS << ' ' + File.join(grpc_lib_dir, 'libgrpc.a') unless windows
diff --git a/src/ruby/ext/grpc/rb_byte_buffer.c b/src/ruby/ext/grpc/rb_byte_buffer.c
index cba910d..1172691 100644
--- a/src/ruby/ext/grpc/rb_byte_buffer.c
+++ b/src/ruby/ext/grpc/rb_byte_buffer.c
@@ -32,11 +32,10 @@
  */
 
 #include <ruby/ruby.h>
+
 #include "rb_grpc_imports.generated.h"
 #include "rb_byte_buffer.h"
 
-#include <ruby/ruby.h>
-
 #include <grpc/grpc.h>
 #include <grpc/byte_buffer_reader.h>
 #include <grpc/support/slice.h>
diff --git a/src/ruby/ext/grpc/rb_call.c b/src/ruby/ext/grpc/rb_call.c
index 48c49a2..1b06273 100644
--- a/src/ruby/ext/grpc/rb_call.c
+++ b/src/ruby/ext/grpc/rb_call.c
@@ -32,11 +32,10 @@
  */
 
 #include <ruby/ruby.h>
+
 #include "rb_grpc_imports.generated.h"
 #include "rb_call.h"
 
-#include <ruby/ruby.h>
-
 #include <grpc/grpc.h>
 #include <grpc/support/alloc.h>
 
diff --git a/src/ruby/ext/grpc/rb_call_credentials.c b/src/ruby/ext/grpc/rb_call_credentials.c
index 38bf1f7..79ca5b3 100644
--- a/src/ruby/ext/grpc/rb_call_credentials.c
+++ b/src/ruby/ext/grpc/rb_call_credentials.c
@@ -32,10 +32,10 @@
  */
 
 #include <ruby/ruby.h>
+
 #include "rb_grpc_imports.generated.h"
 #include "rb_call_credentials.h"
 
-#include <ruby/ruby.h>
 #include <ruby/thread.h>
 
 #include <grpc/grpc.h>
@@ -86,11 +86,11 @@
       rb_funcall(exception_object, rb_intern("backtrace"), 0),
       rb_intern("join"),
       1, rb_str_new2("\n\tfrom "));
-  VALUE exception_info = rb_funcall(exception_object, rb_intern("to_s"), 0);
+  VALUE rb_exception_info = rb_funcall(exception_object, rb_intern("to_s"), 0);
   const char *exception_classname = rb_obj_classname(exception_object);
   (void)args;
   gpr_log(GPR_INFO, "Call credentials callback failed: %s: %s\n%s",
-          exception_classname, StringValueCStr(exception_info),
+          exception_classname, StringValueCStr(rb_exception_info),
           StringValueCStr(backtrace));
   rb_hash_aset(result, rb_str_new2("metadata"), Qnil);
   /* Currently only gives the exception class name. It should be possible get
diff --git a/src/ruby/ext/grpc/rb_channel.c b/src/ruby/ext/grpc/rb_channel.c
index 984afad..013321f 100644
--- a/src/ruby/ext/grpc/rb_channel.c
+++ b/src/ruby/ext/grpc/rb_channel.c
@@ -32,11 +32,10 @@
  */
 
 #include <ruby/ruby.h>
+
 #include "rb_grpc_imports.generated.h"
 #include "rb_channel.h"
 
-#include <ruby/ruby.h>
-
 #include <grpc/grpc.h>
 #include <grpc/grpc_security.h>
 #include <grpc/support/alloc.h>
diff --git a/src/ruby/ext/grpc/rb_channel_args.c b/src/ruby/ext/grpc/rb_channel_args.c
index 2ffb8f4..87c0e0a 100644
--- a/src/ruby/ext/grpc/rb_channel_args.c
+++ b/src/ruby/ext/grpc/rb_channel_args.c
@@ -32,11 +32,10 @@
  */
 
 #include <ruby/ruby.h>
+
 #include "rb_grpc_imports.generated.h"
 #include "rb_channel_args.h"
 
-#include <ruby/ruby.h>
-
 #include <grpc/grpc.h>
 
 #include "rb_grpc.h"
diff --git a/src/ruby/ext/grpc/rb_channel_credentials.c b/src/ruby/ext/grpc/rb_channel_credentials.c
index 09bd309..cbb2388 100644
--- a/src/ruby/ext/grpc/rb_channel_credentials.c
+++ b/src/ruby/ext/grpc/rb_channel_credentials.c
@@ -31,14 +31,13 @@
  *
  */
 
+#include <ruby/ruby.h>
+
 #include <string.h>
 
-#include <ruby/ruby.h>
 #include "rb_grpc_imports.generated.h"
 #include "rb_channel_credentials.h"
 
-#include <ruby/ruby.h>
-
 #include <grpc/grpc.h>
 #include <grpc/grpc_security.h>
 #include <grpc/support/alloc.h>
diff --git a/src/ruby/ext/grpc/rb_completion_queue.c b/src/ruby/ext/grpc/rb_completion_queue.c
index 2a2eee1..4bb615f 100644
--- a/src/ruby/ext/grpc/rb_completion_queue.c
+++ b/src/ruby/ext/grpc/rb_completion_queue.c
@@ -32,10 +32,10 @@
  */
 
 #include <ruby/ruby.h>
+
 #include "rb_grpc_imports.generated.h"
 #include "rb_completion_queue.h"
 
-#include <ruby/ruby.h>
 #include <ruby/thread.h>
 
 #include <grpc/grpc.h>
diff --git a/src/ruby/ext/grpc/rb_event_thread.c b/src/ruby/ext/grpc/rb_event_thread.c
index 2649a10..9e85bbc 100644
--- a/src/ruby/ext/grpc/rb_event_thread.c
+++ b/src/ruby/ext/grpc/rb_event_thread.c
@@ -32,12 +32,12 @@
  */
 
 #include <ruby/ruby.h>
+
 #include "rb_grpc_imports.generated.h"
 #include "rb_event_thread.h"
 
 #include <stdbool.h>
 
-#include <ruby/ruby.h>
 #include <ruby/thread.h>
 #include <grpc/support/alloc.h>
 #include <grpc/support/sync.h>
diff --git a/src/ruby/ext/grpc/rb_grpc.c b/src/ruby/ext/grpc/rb_grpc.c
index acb47b0..06a07ac 100644
--- a/src/ruby/ext/grpc/rb_grpc.c
+++ b/src/ruby/ext/grpc/rb_grpc.c
@@ -32,11 +32,11 @@
  */
 
 #include <ruby/ruby.h>
+
 #include "rb_grpc_imports.generated.h"
 #include "rb_grpc.h"
 
 #include <math.h>
-#include <ruby/ruby.h>
 #include <ruby/vm.h>
 #include <sys/time.h>
 
diff --git a/src/ruby/ext/grpc/rb_server.c b/src/ruby/ext/grpc/rb_server.c
index 96e60c6..2b3acaa 100644
--- a/src/ruby/ext/grpc/rb_server.c
+++ b/src/ruby/ext/grpc/rb_server.c
@@ -32,11 +32,10 @@
  */
 
 #include <ruby/ruby.h>
+
 #include "rb_grpc_imports.generated.h"
 #include "rb_server.h"
 
-#include <ruby/ruby.h>
-
 #include <grpc/grpc.h>
 #include <grpc/grpc_security.h>
 #include "rb_call.h"
diff --git a/src/ruby/ext/grpc/rb_server_credentials.c b/src/ruby/ext/grpc/rb_server_credentials.c
index b2d7280..3b0fb6c 100644
--- a/src/ruby/ext/grpc/rb_server_credentials.c
+++ b/src/ruby/ext/grpc/rb_server_credentials.c
@@ -32,11 +32,10 @@
  */
 
 #include <ruby/ruby.h>
+
 #include "rb_grpc_imports.generated.h"
 #include "rb_server_credentials.h"
 
-#include <ruby/ruby.h>
-
 #include <grpc/grpc.h>
 #include <grpc/grpc_security.h>
 
diff --git a/src/ruby/lib/grpc/generic/rpc_server.rb b/src/ruby/lib/grpc/generic/rpc_server.rb
index 7f3a38a..a0f4071 100644
--- a/src/ruby/lib/grpc/generic/rpc_server.rb
+++ b/src/ruby/lib/grpc/generic/rpc_server.rb
@@ -332,15 +332,13 @@
     # the current thread to terminate it.
     def run_till_terminated
       GRPC.trap_signals
-      stopped = false
       t = Thread.new do
         run
-        stopped = true
       end
+      t.abort_on_exception = true
       wait_till_running
-      loop do
+      until running_state == :stopped
         sleep SIGNAL_CHECK_PERIOD
-        break if stopped
         break unless GRPC.handle_signals
       end
       stop
@@ -416,7 +414,7 @@
       GRPC.logger.warn("NOT AVAILABLE: too many jobs_waiting: #{an_rpc}")
       noop = proc { |x| x }
       c = ActiveCall.new(an_rpc.call, @cq, noop, noop, an_rpc.deadline)
-      c.send_status(StatusCodes::RESOURCE_EXHAUSTED, '')
+      c.send_status(GRPC::Core::StatusCodes::RESOURCE_EXHAUSTED, '')
       nil
     end
 
@@ -427,7 +425,7 @@
       GRPC.logger.warn("UNIMPLEMENTED: #{an_rpc}")
       noop = proc { |x| x }
       c = ActiveCall.new(an_rpc.call, @cq, noop, noop, an_rpc.deadline)
-      c.send_status(StatusCodes::UNIMPLEMENTED, '')
+      c.send_status(GRPC::Core::StatusCodes::UNIMPLEMENTED, '')
       nil
     end
 
@@ -443,7 +441,12 @@
           unless active_call.nil?
             @pool.schedule(active_call) do |ac|
               c, mth = ac
-              rpc_descs[mth].run_server_method(c, rpc_handlers[mth])
+              begin
+                rpc_descs[mth].run_server_method(c, rpc_handlers[mth])
+              rescue StandardError
+                c.send_status(GRPC::Core::StatusCodes::INTERNAL,
+                              'Server handler failed')
+              end
             end
           end
         rescue Core::CallError, RuntimeError => e
diff --git a/src/ruby/lib/grpc/version.rb b/src/ruby/lib/grpc/version.rb
index 67c6a5d..01c8c5a 100644
--- a/src/ruby/lib/grpc/version.rb
+++ b/src/ruby/lib/grpc/version.rb
@@ -29,5 +29,5 @@
 
 # GRPC contains the General RPC module.
 module GRPC
-  VERSION = '0.14.0.dev'
+  VERSION = '0.15.0.dev'
 end
diff --git a/src/ruby/tools/version.rb b/src/ruby/tools/version.rb
index 12ad21b..dca7fd7 100644
--- a/src/ruby/tools/version.rb
+++ b/src/ruby/tools/version.rb
@@ -29,6 +29,6 @@
 
 module GRPC
   module Tools
-    VERSION = '0.14.0.dev'
+    VERSION = '0.15.0.dev'
   end
 end
diff --git a/templates/tools/distrib/python/grpcio_tools/grpc_version.py.template b/templates/tools/distrib/python/grpcio_tools/grpc_version.py.template
new file mode 100644
index 0000000..200905d
--- /dev/null
+++ b/templates/tools/distrib/python/grpcio_tools/grpc_version.py.template
@@ -0,0 +1,34 @@
+%YAML 1.2
+--- |
+  # Copyright 2015, Google Inc.
+  # All rights reserved.
+  #
+  # Redistribution and use in source and binary forms, with or without
+  # modification, are permitted provided that the following conditions are
+  # met:
+  #
+  #     * Redistributions of source code must retain the above copyright
+  # notice, this list of conditions and the following disclaimer.
+  #     * Redistributions in binary form must reproduce the above
+  # copyright notice, this list of conditions and the following disclaimer
+  # in the documentation and/or other materials provided with the
+  # distribution.
+  #     * Neither the name of Google Inc. nor the names of its
+  # contributors may be used to endorse or promote products derived from
+  # this software without specific prior written permission.
+  #
+  # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+  # AUTO-GENERATED FROM `$REPO_ROOT/templates/tools/distrib/python/grpcio_tools/grpc_version.py.template`!!!
+
+  VERSION='${settings.python_version.pep440()}'
diff --git a/templates/tools/dockerfile/node_deps.include b/templates/tools/dockerfile/node_deps.include
index 7d37d67..7855fbf 100644
--- a/templates/tools/dockerfile/node_deps.include
+++ b/templates/tools/dockerfile/node_deps.include
@@ -4,4 +4,8 @@
 # Install nvm
 RUN touch .profile
 RUN curl -o- https://raw.githubusercontent.com/creationix/nvm/v0.25.4/install.sh | bash
+# Install all versions of node that we want to test
 RUN /bin/bash -l -c "nvm install 0.12 && npm config set cache /tmp/npm-cache"
+RUN /bin/bash -l -c "nvm install 4 && npm config set cache /tmp/npm-cache"
+RUN /bin/bash -l -c "nvm install 5 && npm config set cache /tmp/npm-cache"
+RUN /bin/bash -l -c "nvm alias default 4"
\ No newline at end of file
diff --git a/templates/tools/dockerfile/stress_test/grpc_interop_stress_python/Dockerfile.template b/templates/tools/dockerfile/stress_test/grpc_interop_stress_python/Dockerfile.template
new file mode 100644
index 0000000..27e9eee
--- /dev/null
+++ b/templates/tools/dockerfile/stress_test/grpc_interop_stress_python/Dockerfile.template
@@ -0,0 +1,45 @@
+%YAML 1.2
+--- |
+  # Copyright 2016, Google Inc.
+  # All rights reserved.
+  #
+  # Redistribution and use in source and binary forms, with or without
+  # modification, are permitted provided that the following conditions are
+  # met:
+  #
+  #     * Redistributions of source code must retain the above copyright
+  # notice, this list of conditions and the following disclaimer.
+  #     * Redistributions in binary form must reproduce the above
+  # copyright notice, this list of conditions and the following disclaimer
+  # in the documentation and/or other materials provided with the
+  # distribution.
+  #     * Neither the name of Google Inc. nor the names of its
+  # contributors may be used to endorse or promote products derived from
+  # this software without specific prior written permission.
+  #
+  # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+  
+  FROM debian:jessie
+  
+  <%include file="../../apt_get_basic.include"/>
+  <%include file="../../ccache_setup.include"/>
+  <%include file="../../cxx_deps.include"/>
+  <%include file="../../gcp_api_libraries.include"/>
+  <%include file="../../python_deps.include"/>
+
+  RUN pip install coverage
+  RUN pip install oauth2client
+
+  # Define the default command.
+  CMD ["bash"]
+  
diff --git a/templates/tools/dockerfile/stress_test/grpc_interop_stress_ruby/Dockerfile.template b/templates/tools/dockerfile/stress_test/grpc_interop_stress_ruby/Dockerfile.template
new file mode 100644
index 0000000..8b933aa
--- /dev/null
+++ b/templates/tools/dockerfile/stress_test/grpc_interop_stress_ruby/Dockerfile.template
@@ -0,0 +1,41 @@
+%YAML 1.2
+--- |
+  # Copyright 2015, Google Inc.
+  # All rights reserved.
+  #
+  # Redistribution and use in source and binary forms, with or without
+  # modification, are permitted provided that the following conditions are
+  # met:
+  #
+  #     * Redistributions of source code must retain the above copyright
+  # notice, this list of conditions and the following disclaimer.
+  #     * Redistributions in binary form must reproduce the above
+  # copyright notice, this list of conditions and the following disclaimer
+  # in the documentation and/or other materials provided with the
+  # distribution.
+  #     * Neither the name of Google Inc. nor the names of its
+  # contributors may be used to endorse or promote products derived from
+  # this software without specific prior written permission.
+  #
+  # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+  
+  FROM debian:jessie
+  
+  <%include file="../../apt_get_basic.include"/>
+  <%include file="../../ccache_setup.include"/>
+  <%include file="../../cxx_deps.include"/>
+  <%include file="../../gcp_api_libraries.include"/>
+  <%include file="../../ruby_deps.include"/>
+  # Define the default command.
+  CMD ["bash"]
+  
diff --git a/templates/tools/dockerfile/test/sanity/Dockerfile.template b/templates/tools/dockerfile/test/sanity/Dockerfile.template
index 8d6f52d..8e2140e 100644
--- a/templates/tools/dockerfile/test/sanity/Dockerfile.template
+++ b/templates/tools/dockerfile/test/sanity/Dockerfile.template
@@ -43,7 +43,17 @@
         python-virtualenv ${"\\"}
         python-lxml
   RUN pip install simplejson mako
-
+  
+  #======================================
+  # More sanity test dependencies (bazel)
+  RUN echo "deb http://httpredir.debian.org/debian jessie-backports main" > \
+    /etc/apt/sources.list.d/backports.list
+  RUN apt-get update
+  RUN apt-get -t jessie-backports install -y openjdk-8-jdk
+  RUN git clone https://github.com/bazelbuild/bazel.git /bazel
+  RUN cd /bazel && ./compile.sh
+  RUN ln -s /bazel/output/bazel /bin/
+  
   #===================
   # Docker "inception"
   # Note this is quite the ugly hack.
diff --git a/test/core/end2end/fixtures/h2_census.c b/test/core/end2end/fixtures/h2_census.c
index ff2f028..e46b39e 100644
--- a/test/core/end2end/fixtures/h2_census.c
+++ b/test/core/end2end/fixtures/h2_census.c
@@ -111,7 +111,7 @@
 
 /* All test configurations */
 static grpc_end2end_test_config configs[] = {
-    {"chttp2/fullstack", FEATURE_MASK_SUPPORTS_DELAYED_CONNECTION,
+    {"chttp2/fullstack+census", FEATURE_MASK_SUPPORTS_DELAYED_CONNECTION,
      chttp2_create_fixture_fullstack, chttp2_init_client_fullstack,
      chttp2_init_server_fullstack, chttp2_tear_down_fullstack},
 };
diff --git a/test/core/surface/public_headers_must_be_c89.c b/test/core/surface/public_headers_must_be_c89.c
index 0eede6c..3eeb55d 100644
--- a/test/core/surface/public_headers_must_be_c89.c
+++ b/test/core/surface/public_headers_must_be_c89.c
@@ -41,6 +41,7 @@
 #include <grpc/impl/codegen/alloc.h>
 #include <grpc/impl/codegen/atm.h>
 #include <grpc/impl/codegen/byte_buffer.h>
+#include <grpc/impl/codegen/byte_buffer_reader.h>
 #include <grpc/impl/codegen/compression_types.h>
 #include <grpc/impl/codegen/connectivity_state.h>
 #include <grpc/impl/codegen/grpc_types.h>
diff --git a/test/cpp/end2end/async_end2end_test.cc b/test/cpp/end2end/async_end2end_test.cc
index 7e4d604..0232a9f 100644
--- a/test/cpp/end2end/async_end2end_test.cc
+++ b/test/cpp/end2end/async_end2end_test.cc
@@ -51,6 +51,7 @@
 #include "test/core/util/port.h"
 #include "test/core/util/test_config.h"
 #include "test/cpp/util/string_ref_helper.h"
+#include "test/cpp/util/test_credentials_provider.h"
 
 #ifdef GPR_POSIX_SOCKET
 #include "src/core/lib/iomgr/ev_posix.h"
@@ -58,6 +59,7 @@
 
 using grpc::testing::EchoRequest;
 using grpc::testing::EchoResponse;
+using grpc::testing::kTlsCredentialsType;
 using std::chrono::system_clock;
 
 GPR_TLS_DECL(g_is_async_end2end_test);
@@ -197,20 +199,37 @@
   bool spin_;
 };
 
-class AsyncEnd2endTest : public ::testing::TestWithParam<bool> {
+class TestScenario {
+ public:
+  TestScenario(bool non_block, const grpc::string& creds_type,
+               const grpc::string& content)
+      : disable_blocking(non_block),
+        credentials_type(creds_type),
+        message_content(content) {}
+  void Log() const {
+    gpr_log(GPR_INFO,
+            "Scenario: disable_blocking %d, credentials %s, message size %d",
+            disable_blocking, credentials_type.c_str(), message_content.size());
+  }
+  bool disable_blocking;
+  const grpc::string credentials_type;
+  const grpc::string message_content;
+};
+
+class AsyncEnd2endTest : public ::testing::TestWithParam<TestScenario> {
  protected:
-  AsyncEnd2endTest() {}
+  AsyncEnd2endTest() { GetParam().Log(); }
 
   void SetUp() GRPC_OVERRIDE {
-    poll_overrider_.reset(new PollingOverrider(!GetParam()));
+    poll_overrider_.reset(new PollingOverrider(!GetParam().disable_blocking));
 
     int port = grpc_pick_unused_port_or_die();
     server_address_ << "localhost:" << port;
 
     // Setup server
     ServerBuilder builder;
-    builder.AddListeningPort(server_address_.str(),
-                             grpc::InsecureServerCredentials());
+    auto server_creds = GetServerCredentials(GetParam().credentials_type);
+    builder.AddListeningPort(server_address_.str(), server_creds);
     builder.RegisterService(&service_);
     cq_ = builder.AddCompletionQueue();
     server_ = builder.BuildAndStart();
@@ -230,8 +249,11 @@
   }
 
   void ResetStub() {
+    ChannelArguments args;
+    auto channel_creds =
+        GetChannelCredentials(GetParam().credentials_type, &args);
     std::shared_ptr<Channel> channel =
-        CreateChannel(server_address_.str(), InsecureChannelCredentials());
+        CreateCustomChannel(server_address_.str(), channel_creds, args);
     stub_ = grpc::testing::EchoTestService::NewStub(channel);
   }
 
@@ -247,22 +269,23 @@
       ServerContext srv_ctx;
       grpc::ServerAsyncResponseWriter<EchoResponse> response_writer(&srv_ctx);
 
-      send_request.set_message("Hello");
+      send_request.set_message(GetParam().message_content);
       std::unique_ptr<ClientAsyncResponseReader<EchoResponse>> response_reader(
           stub_->AsyncEcho(&cli_ctx, send_request, cq_.get()));
 
       service_.RequestEcho(&srv_ctx, &recv_request, &response_writer, cq_.get(),
                            cq_.get(), tag(2));
 
-      Verifier(GetParam()).Expect(2, true).Verify(cq_.get());
+      Verifier(GetParam().disable_blocking).Expect(2, true).Verify(cq_.get());
       EXPECT_EQ(send_request.message(), recv_request.message());
 
       send_response.set_message(recv_request.message());
       response_writer.Finish(send_response, Status::OK, tag(3));
-      Verifier(GetParam()).Expect(3, true).Verify(cq_.get());
-
       response_reader->Finish(&recv_response, &recv_status, tag(4));
-      Verifier(GetParam()).Expect(4, true).Verify(cq_.get());
+      Verifier(GetParam().disable_blocking)
+          .Expect(3, true)
+          .Expect(4, true)
+          .Verify(cq_.get());
 
       EXPECT_EQ(send_response.message(), recv_response.message());
       EXPECT_TRUE(recv_status.ok());
@@ -302,7 +325,7 @@
   ServerContext srv_ctx;
   grpc::ServerAsyncResponseWriter<EchoResponse> response_writer(&srv_ctx);
 
-  send_request.set_message("Hello");
+  send_request.set_message(GetParam().message_content);
   std::unique_ptr<ClientAsyncResponseReader<EchoResponse>> response_reader(
       stub_->AsyncEcho(&cli_ctx, send_request, cq_.get()));
 
@@ -310,23 +333,22 @@
       std::chrono::system_clock::now());
   std::chrono::system_clock::time_point time_limit(
       std::chrono::system_clock::now() + std::chrono::seconds(10));
-  Verifier(GetParam()).Verify(cq_.get(), time_now);
-  Verifier(GetParam()).Verify(cq_.get(), time_now);
+  Verifier(GetParam().disable_blocking).Verify(cq_.get(), time_now);
+  Verifier(GetParam().disable_blocking).Verify(cq_.get(), time_now);
 
   service_.RequestEcho(&srv_ctx, &recv_request, &response_writer, cq_.get(),
                        cq_.get(), tag(2));
 
-  Verifier(GetParam()).Expect(2, true).Verify(cq_.get(), time_limit);
+  Verifier(GetParam().disable_blocking)
+      .Expect(2, true)
+      .Verify(cq_.get(), time_limit);
   EXPECT_EQ(send_request.message(), recv_request.message());
 
   send_response.set_message(recv_request.message());
   response_writer.Finish(send_response, Status::OK, tag(3));
-  Verifier(GetParam())
-      .Expect(3, true)
-      .Verify(cq_.get(), std::chrono::system_clock::time_point::max());
-
   response_reader->Finish(&recv_response, &recv_status, tag(4));
-  Verifier(GetParam())
+  Verifier(GetParam().disable_blocking)
+      .Expect(3, true)
       .Expect(4, true)
       .Verify(cq_.get(), std::chrono::system_clock::time_point::max());
 
@@ -347,41 +369,48 @@
   ServerContext srv_ctx;
   ServerAsyncReader<EchoResponse, EchoRequest> srv_stream(&srv_ctx);
 
-  send_request.set_message("Hello");
+  send_request.set_message(GetParam().message_content);
   std::unique_ptr<ClientAsyncWriter<EchoRequest>> cli_stream(
       stub_->AsyncRequestStream(&cli_ctx, &recv_response, cq_.get(), tag(1)));
 
   service_.RequestRequestStream(&srv_ctx, &srv_stream, cq_.get(), cq_.get(),
                                 tag(2));
 
-  Verifier(GetParam()).Expect(2, true).Expect(1, true).Verify(cq_.get());
+  Verifier(GetParam().disable_blocking)
+      .Expect(2, true)
+      .Expect(1, true)
+      .Verify(cq_.get());
 
   cli_stream->Write(send_request, tag(3));
-  Verifier(GetParam()).Expect(3, true).Verify(cq_.get());
-
   srv_stream.Read(&recv_request, tag(4));
-  Verifier(GetParam()).Expect(4, true).Verify(cq_.get());
+  Verifier(GetParam().disable_blocking)
+      .Expect(3, true)
+      .Expect(4, true)
+      .Verify(cq_.get());
   EXPECT_EQ(send_request.message(), recv_request.message());
 
   cli_stream->Write(send_request, tag(5));
-  Verifier(GetParam()).Expect(5, true).Verify(cq_.get());
-
   srv_stream.Read(&recv_request, tag(6));
-  Verifier(GetParam()).Expect(6, true).Verify(cq_.get());
+  Verifier(GetParam().disable_blocking)
+      .Expect(5, true)
+      .Expect(6, true)
+      .Verify(cq_.get());
 
   EXPECT_EQ(send_request.message(), recv_request.message());
   cli_stream->WritesDone(tag(7));
-  Verifier(GetParam()).Expect(7, true).Verify(cq_.get());
-
   srv_stream.Read(&recv_request, tag(8));
-  Verifier(GetParam()).Expect(8, false).Verify(cq_.get());
+  Verifier(GetParam().disable_blocking)
+      .Expect(7, true)
+      .Expect(8, false)
+      .Verify(cq_.get());
 
   send_response.set_message(recv_request.message());
   srv_stream.Finish(send_response, Status::OK, tag(9));
-  Verifier(GetParam()).Expect(9, true).Verify(cq_.get());
-
   cli_stream->Finish(&recv_status, tag(10));
-  Verifier(GetParam()).Expect(10, true).Verify(cq_.get());
+  Verifier(GetParam().disable_blocking)
+      .Expect(9, true)
+      .Expect(10, true)
+      .Verify(cq_.get());
 
   EXPECT_EQ(send_response.message(), recv_response.message());
   EXPECT_TRUE(recv_status.ok());
@@ -400,39 +429,45 @@
   ServerContext srv_ctx;
   ServerAsyncWriter<EchoResponse> srv_stream(&srv_ctx);
 
-  send_request.set_message("Hello");
+  send_request.set_message(GetParam().message_content);
   std::unique_ptr<ClientAsyncReader<EchoResponse>> cli_stream(
       stub_->AsyncResponseStream(&cli_ctx, send_request, cq_.get(), tag(1)));
 
   service_.RequestResponseStream(&srv_ctx, &recv_request, &srv_stream,
                                  cq_.get(), cq_.get(), tag(2));
 
-  Verifier(GetParam()).Expect(1, true).Expect(2, true).Verify(cq_.get());
+  Verifier(GetParam().disable_blocking)
+      .Expect(1, true)
+      .Expect(2, true)
+      .Verify(cq_.get());
   EXPECT_EQ(send_request.message(), recv_request.message());
 
   send_response.set_message(recv_request.message());
   srv_stream.Write(send_response, tag(3));
-  Verifier(GetParam()).Expect(3, true).Verify(cq_.get());
-
   cli_stream->Read(&recv_response, tag(4));
-  Verifier(GetParam()).Expect(4, true).Verify(cq_.get());
+  Verifier(GetParam().disable_blocking)
+      .Expect(3, true)
+      .Expect(4, true)
+      .Verify(cq_.get());
   EXPECT_EQ(send_response.message(), recv_response.message());
 
   srv_stream.Write(send_response, tag(5));
-  Verifier(GetParam()).Expect(5, true).Verify(cq_.get());
-
   cli_stream->Read(&recv_response, tag(6));
-  Verifier(GetParam()).Expect(6, true).Verify(cq_.get());
+  Verifier(GetParam().disable_blocking)
+      .Expect(5, true)
+      .Expect(6, true)
+      .Verify(cq_.get());
   EXPECT_EQ(send_response.message(), recv_response.message());
 
   srv_stream.Finish(Status::OK, tag(7));
-  Verifier(GetParam()).Expect(7, true).Verify(cq_.get());
-
   cli_stream->Read(&recv_response, tag(8));
-  Verifier(GetParam()).Expect(8, false).Verify(cq_.get());
+  Verifier(GetParam().disable_blocking)
+      .Expect(7, true)
+      .Expect(8, false)
+      .Verify(cq_.get());
 
   cli_stream->Finish(&recv_status, tag(9));
-  Verifier(GetParam()).Expect(9, true).Verify(cq_.get());
+  Verifier(GetParam().disable_blocking).Expect(9, true).Verify(cq_.get());
 
   EXPECT_TRUE(recv_status.ok());
 }
@@ -450,41 +485,48 @@
   ServerContext srv_ctx;
   ServerAsyncReaderWriter<EchoResponse, EchoRequest> srv_stream(&srv_ctx);
 
-  send_request.set_message("Hello");
+  send_request.set_message(GetParam().message_content);
   std::unique_ptr<ClientAsyncReaderWriter<EchoRequest, EchoResponse>>
       cli_stream(stub_->AsyncBidiStream(&cli_ctx, cq_.get(), tag(1)));
 
   service_.RequestBidiStream(&srv_ctx, &srv_stream, cq_.get(), cq_.get(),
                              tag(2));
 
-  Verifier(GetParam()).Expect(1, true).Expect(2, true).Verify(cq_.get());
+  Verifier(GetParam().disable_blocking)
+      .Expect(1, true)
+      .Expect(2, true)
+      .Verify(cq_.get());
 
   cli_stream->Write(send_request, tag(3));
-  Verifier(GetParam()).Expect(3, true).Verify(cq_.get());
-
   srv_stream.Read(&recv_request, tag(4));
-  Verifier(GetParam()).Expect(4, true).Verify(cq_.get());
+  Verifier(GetParam().disable_blocking)
+      .Expect(3, true)
+      .Expect(4, true)
+      .Verify(cq_.get());
   EXPECT_EQ(send_request.message(), recv_request.message());
 
   send_response.set_message(recv_request.message());
   srv_stream.Write(send_response, tag(5));
-  Verifier(GetParam()).Expect(5, true).Verify(cq_.get());
-
   cli_stream->Read(&recv_response, tag(6));
-  Verifier(GetParam()).Expect(6, true).Verify(cq_.get());
+  Verifier(GetParam().disable_blocking)
+      .Expect(5, true)
+      .Expect(6, true)
+      .Verify(cq_.get());
   EXPECT_EQ(send_response.message(), recv_response.message());
 
   cli_stream->WritesDone(tag(7));
-  Verifier(GetParam()).Expect(7, true).Verify(cq_.get());
-
   srv_stream.Read(&recv_request, tag(8));
-  Verifier(GetParam()).Expect(8, false).Verify(cq_.get());
+  Verifier(GetParam().disable_blocking)
+      .Expect(7, true)
+      .Expect(8, false)
+      .Verify(cq_.get());
 
   srv_stream.Finish(Status::OK, tag(9));
-  Verifier(GetParam()).Expect(9, true).Verify(cq_.get());
-
   cli_stream->Finish(&recv_status, tag(10));
-  Verifier(GetParam()).Expect(10, true).Verify(cq_.get());
+  Verifier(GetParam().disable_blocking)
+      .Expect(9, true)
+      .Expect(10, true)
+      .Verify(cq_.get());
 
   EXPECT_TRUE(recv_status.ok());
 }
@@ -503,7 +545,7 @@
   ServerContext srv_ctx;
   grpc::ServerAsyncResponseWriter<EchoResponse> response_writer(&srv_ctx);
 
-  send_request.set_message("Hello");
+  send_request.set_message(GetParam().message_content);
   std::pair<grpc::string, grpc::string> meta1("key1", "val1");
   std::pair<grpc::string, grpc::string> meta2("key2", "val2");
   std::pair<grpc::string, grpc::string> meta3("g.r.d-bin", "xyz");
@@ -516,7 +558,7 @@
 
   service_.RequestEcho(&srv_ctx, &recv_request, &response_writer, cq_.get(),
                        cq_.get(), tag(2));
-  Verifier(GetParam()).Expect(2, true).Verify(cq_.get());
+  Verifier(GetParam().disable_blocking).Expect(2, true).Verify(cq_.get());
   EXPECT_EQ(send_request.message(), recv_request.message());
   auto client_initial_metadata = srv_ctx.client_metadata();
   EXPECT_EQ(meta1.second,
@@ -529,11 +571,11 @@
 
   send_response.set_message(recv_request.message());
   response_writer.Finish(send_response, Status::OK, tag(3));
-
-  Verifier(GetParam()).Expect(3, true).Verify(cq_.get());
-
   response_reader->Finish(&recv_response, &recv_status, tag(4));
-  Verifier(GetParam()).Expect(4, true).Verify(cq_.get());
+  Verifier(GetParam().disable_blocking)
+      .Expect(3, true)
+      .Expect(4, true)
+      .Verify(cq_.get());
 
   EXPECT_EQ(send_response.message(), recv_response.message());
   EXPECT_TRUE(recv_status.ok());
@@ -552,7 +594,7 @@
   ServerContext srv_ctx;
   grpc::ServerAsyncResponseWriter<EchoResponse> response_writer(&srv_ctx);
 
-  send_request.set_message("Hello");
+  send_request.set_message(GetParam().message_content);
   std::pair<grpc::string, grpc::string> meta1("key1", "val1");
   std::pair<grpc::string, grpc::string> meta2("key2", "val2");
 
@@ -561,15 +603,15 @@
 
   service_.RequestEcho(&srv_ctx, &recv_request, &response_writer, cq_.get(),
                        cq_.get(), tag(2));
-  Verifier(GetParam()).Expect(2, true).Verify(cq_.get());
+  Verifier(GetParam().disable_blocking).Expect(2, true).Verify(cq_.get());
   EXPECT_EQ(send_request.message(), recv_request.message());
   srv_ctx.AddInitialMetadata(meta1.first, meta1.second);
   srv_ctx.AddInitialMetadata(meta2.first, meta2.second);
   response_writer.SendInitialMetadata(tag(3));
-  Verifier(GetParam()).Expect(3, true).Verify(cq_.get());
+  Verifier(GetParam().disable_blocking).Expect(3, true).Verify(cq_.get());
 
   response_reader->ReadInitialMetadata(tag(4));
-  Verifier(GetParam()).Expect(4, true).Verify(cq_.get());
+  Verifier(GetParam().disable_blocking).Expect(4, true).Verify(cq_.get());
   auto server_initial_metadata = cli_ctx.GetServerInitialMetadata();
   EXPECT_EQ(meta1.second,
             ToString(server_initial_metadata.find(meta1.first)->second));
@@ -579,10 +621,11 @@
 
   send_response.set_message(recv_request.message());
   response_writer.Finish(send_response, Status::OK, tag(5));
-  Verifier(GetParam()).Expect(5, true).Verify(cq_.get());
-
   response_reader->Finish(&recv_response, &recv_status, tag(6));
-  Verifier(GetParam()).Expect(6, true).Verify(cq_.get());
+  Verifier(GetParam().disable_blocking)
+      .Expect(5, true)
+      .Expect(6, true)
+      .Verify(cq_.get());
 
   EXPECT_EQ(send_response.message(), recv_response.message());
   EXPECT_TRUE(recv_status.ok());
@@ -601,7 +644,7 @@
   ServerContext srv_ctx;
   grpc::ServerAsyncResponseWriter<EchoResponse> response_writer(&srv_ctx);
 
-  send_request.set_message("Hello");
+  send_request.set_message(GetParam().message_content);
   std::pair<grpc::string, grpc::string> meta1("key1", "val1");
   std::pair<grpc::string, grpc::string> meta2("key2", "val2");
 
@@ -610,20 +653,22 @@
 
   service_.RequestEcho(&srv_ctx, &recv_request, &response_writer, cq_.get(),
                        cq_.get(), tag(2));
-  Verifier(GetParam()).Expect(2, true).Verify(cq_.get());
+  Verifier(GetParam().disable_blocking).Expect(2, true).Verify(cq_.get());
   EXPECT_EQ(send_request.message(), recv_request.message());
   response_writer.SendInitialMetadata(tag(3));
-  Verifier(GetParam()).Expect(3, true).Verify(cq_.get());
+  Verifier(GetParam().disable_blocking).Expect(3, true).Verify(cq_.get());
 
   send_response.set_message(recv_request.message());
   srv_ctx.AddTrailingMetadata(meta1.first, meta1.second);
   srv_ctx.AddTrailingMetadata(meta2.first, meta2.second);
   response_writer.Finish(send_response, Status::OK, tag(4));
-
-  Verifier(GetParam()).Expect(4, true).Verify(cq_.get());
-
   response_reader->Finish(&recv_response, &recv_status, tag(5));
-  Verifier(GetParam()).Expect(5, true).Verify(cq_.get());
+
+  Verifier(GetParam().disable_blocking)
+      .Expect(4, true)
+      .Expect(5, true)
+      .Verify(cq_.get());
+
   EXPECT_EQ(send_response.message(), recv_response.message());
   EXPECT_TRUE(recv_status.ok());
   auto server_trailing_metadata = cli_ctx.GetServerTrailingMetadata();
@@ -647,7 +692,7 @@
   ServerContext srv_ctx;
   grpc::ServerAsyncResponseWriter<EchoResponse> response_writer(&srv_ctx);
 
-  send_request.set_message("Hello");
+  send_request.set_message(GetParam().message_content);
   std::pair<grpc::string, grpc::string> meta1("key1", "val1");
   std::pair<grpc::string, grpc::string> meta2(
       "key2-bin",
@@ -671,7 +716,7 @@
 
   service_.RequestEcho(&srv_ctx, &recv_request, &response_writer, cq_.get(),
                        cq_.get(), tag(2));
-  Verifier(GetParam()).Expect(2, true).Verify(cq_.get());
+  Verifier(GetParam().disable_blocking).Expect(2, true).Verify(cq_.get());
   EXPECT_EQ(send_request.message(), recv_request.message());
   auto client_initial_metadata = srv_ctx.client_metadata();
   EXPECT_EQ(meta1.second,
@@ -683,9 +728,9 @@
   srv_ctx.AddInitialMetadata(meta3.first, meta3.second);
   srv_ctx.AddInitialMetadata(meta4.first, meta4.second);
   response_writer.SendInitialMetadata(tag(3));
-  Verifier(GetParam()).Expect(3, true).Verify(cq_.get());
+  Verifier(GetParam().disable_blocking).Expect(3, true).Verify(cq_.get());
   response_reader->ReadInitialMetadata(tag(4));
-  Verifier(GetParam()).Expect(4, true).Verify(cq_.get());
+  Verifier(GetParam().disable_blocking).Expect(4, true).Verify(cq_.get());
   auto server_initial_metadata = cli_ctx.GetServerInitialMetadata();
   EXPECT_EQ(meta3.second,
             ToString(server_initial_metadata.find(meta3.first)->second));
@@ -697,11 +742,13 @@
   srv_ctx.AddTrailingMetadata(meta5.first, meta5.second);
   srv_ctx.AddTrailingMetadata(meta6.first, meta6.second);
   response_writer.Finish(send_response, Status::OK, tag(5));
-
-  Verifier(GetParam()).Expect(5, true).Verify(cq_.get());
-
   response_reader->Finish(&recv_response, &recv_status, tag(6));
-  Verifier(GetParam()).Expect(6, true).Verify(cq_.get());
+
+  Verifier(GetParam().disable_blocking)
+      .Expect(5, true)
+      .Expect(6, true)
+      .Verify(cq_.get());
+
   EXPECT_EQ(send_response.message(), recv_response.message());
   EXPECT_TRUE(recv_status.ok());
   auto server_trailing_metadata = cli_ctx.GetServerTrailingMetadata();
@@ -726,7 +773,7 @@
   ServerContext srv_ctx;
   grpc::ServerAsyncResponseWriter<EchoResponse> response_writer(&srv_ctx);
 
-  send_request.set_message("Hello");
+  send_request.set_message(GetParam().message_content);
   std::unique_ptr<ClientAsyncResponseReader<EchoResponse>> response_reader(
       stub_->AsyncEcho(&cli_ctx, send_request, cq_.get()));
 
@@ -734,15 +781,15 @@
   service_.RequestEcho(&srv_ctx, &recv_request, &response_writer, cq_.get(),
                        cq_.get(), tag(2));
 
-  Verifier(GetParam()).Expect(2, true).Verify(cq_.get());
+  Verifier(GetParam().disable_blocking).Expect(2, true).Verify(cq_.get());
   EXPECT_EQ(send_request.message(), recv_request.message());
 
   cli_ctx.TryCancel();
-  Verifier(GetParam()).Expect(5, true).Verify(cq_.get());
+  Verifier(GetParam().disable_blocking).Expect(5, true).Verify(cq_.get());
   EXPECT_TRUE(srv_ctx.IsCancelled());
 
   response_reader->Finish(&recv_response, &recv_status, tag(4));
-  Verifier(GetParam()).Expect(4, false).Verify(cq_.get());
+  Verifier(GetParam().disable_blocking).Expect(4, false).Verify(cq_.get());
 
   EXPECT_EQ(StatusCode::CANCELLED, recv_status.error_code());
 }
@@ -761,7 +808,7 @@
   ServerContext srv_ctx;
   grpc::ServerAsyncResponseWriter<EchoResponse> response_writer(&srv_ctx);
 
-  send_request.set_message("Hello");
+  send_request.set_message(GetParam().message_content);
   std::unique_ptr<ClientAsyncResponseReader<EchoResponse>> response_reader(
       stub_->AsyncEcho(&cli_ctx, send_request, cq_.get()));
 
@@ -769,25 +816,29 @@
   service_.RequestEcho(&srv_ctx, &recv_request, &response_writer, cq_.get(),
                        cq_.get(), tag(2));
 
-  Verifier(GetParam()).Expect(2, true).Verify(cq_.get());
+  Verifier(GetParam().disable_blocking).Expect(2, true).Verify(cq_.get());
   EXPECT_EQ(send_request.message(), recv_request.message());
 
   send_response.set_message(recv_request.message());
   response_writer.Finish(send_response, Status::OK, tag(3));
-  Verifier(GetParam()).Expect(3, true).Verify(cq_.get());
-  Verifier(GetParam()).Expect(5, true).Verify(cq_.get());
-  EXPECT_FALSE(srv_ctx.IsCancelled());
-
   response_reader->Finish(&recv_response, &recv_status, tag(4));
-  Verifier(GetParam()).Expect(4, true).Verify(cq_.get());
+  Verifier(GetParam().disable_blocking)
+      .Expect(3, true)
+      .Expect(4, true)
+      .Expect(5, true)
+      .Verify(cq_.get());
+  EXPECT_FALSE(srv_ctx.IsCancelled());
 
   EXPECT_EQ(send_response.message(), recv_response.message());
   EXPECT_TRUE(recv_status.ok());
 }
 
 TEST_P(AsyncEnd2endTest, UnimplementedRpc) {
+  ChannelArguments args;
+  auto channel_creds =
+      GetChannelCredentials(GetParam().credentials_type, &args);
   std::shared_ptr<Channel> channel =
-      CreateChannel(server_address_.str(), InsecureChannelCredentials());
+      CreateCustomChannel(server_address_.str(), channel_creds, args);
   std::unique_ptr<grpc::testing::UnimplementedService::Stub> stub;
   stub = grpc::testing::UnimplementedService::NewStub(channel);
   EchoRequest send_request;
@@ -795,12 +846,12 @@
   Status recv_status;
 
   ClientContext cli_ctx;
-  send_request.set_message("Hello");
+  send_request.set_message(GetParam().message_content);
   std::unique_ptr<ClientAsyncResponseReader<EchoResponse>> response_reader(
       stub->AsyncUnimplemented(&cli_ctx, send_request, cq_.get()));
 
   response_reader->Finish(&recv_response, &recv_status, tag(4));
-  Verifier(GetParam()).Expect(4, false).Verify(cq_.get());
+  Verifier(GetParam().disable_blocking).Expect(4, false).Verify(cq_.get());
 
   EXPECT_EQ(StatusCode::UNIMPLEMENTED, recv_status.error_code());
   EXPECT_EQ("", recv_status.error_message());
@@ -847,23 +898,25 @@
     // Initiate the 'RequestStream' call on client
     std::unique_ptr<ClientAsyncWriter<EchoRequest>> cli_stream(
         stub_->AsyncRequestStream(&cli_ctx, &recv_response, cq_.get(), tag(1)));
-    Verifier(GetParam()).Expect(1, true).Verify(cq_.get());
+    Verifier(GetParam().disable_blocking).Expect(1, true).Verify(cq_.get());
 
     // On the server, request to be notified of 'RequestStream' calls
     // and receive the 'RequestStream' call just made by the client
     srv_ctx.AsyncNotifyWhenDone(tag(11));
     service_.RequestRequestStream(&srv_ctx, &srv_stream, cq_.get(), cq_.get(),
                                   tag(2));
-    Verifier(GetParam()).Expect(2, true).Verify(cq_.get());
+    Verifier(GetParam().disable_blocking).Expect(2, true).Verify(cq_.get());
 
     // Client sends 3 messages (tags 3, 4 and 5)
     for (int tag_idx = 3; tag_idx <= 5; tag_idx++) {
       send_request.set_message("Ping " + std::to_string(tag_idx));
       cli_stream->Write(send_request, tag(tag_idx));
-      Verifier(GetParam()).Expect(tag_idx, true).Verify(cq_.get());
+      Verifier(GetParam().disable_blocking)
+          .Expect(tag_idx, true)
+          .Verify(cq_.get());
     }
     cli_stream->WritesDone(tag(6));
-    Verifier(GetParam()).Expect(6, true).Verify(cq_.get());
+    Verifier(GetParam().disable_blocking).Expect(6, true).Verify(cq_.get());
 
     bool expected_server_cq_result = true;
     bool ignore_cq_result = false;
@@ -871,7 +924,7 @@
 
     if (server_try_cancel == CANCEL_BEFORE_PROCESSING) {
       srv_ctx.TryCancel();
-      Verifier(GetParam()).Expect(11, true).Verify(cq_.get());
+      Verifier(GetParam().disable_blocking).Expect(11, true).Verify(cq_.get());
       EXPECT_TRUE(srv_ctx.IsCancelled());
 
       // Since cancellation is done before server reads any results, we know
@@ -881,7 +934,7 @@
 
     std::thread* server_try_cancel_thd = NULL;
 
-    auto verif = Verifier(GetParam());
+    auto verif = Verifier(GetParam().disable_blocking);
 
     if (server_try_cancel == CANCEL_DURING_PROCESSING) {
       server_try_cancel_thd =
@@ -939,13 +992,13 @@
     // Server sends the final message and cancelled status (but the RPC is
     // already cancelled at this point. So we expect the operation to fail)
     srv_stream.Finish(send_response, Status::CANCELLED, tag(9));
-    Verifier(GetParam()).Expect(9, false).Verify(cq_.get());
+    Verifier(GetParam().disable_blocking).Expect(9, false).Verify(cq_.get());
 
     // Client will see the cancellation
     cli_stream->Finish(&recv_status, tag(10));
     // TODO(sreek): The expectation here should be true. This is a bug (github
     // issue #4972)
-    Verifier(GetParam()).Expect(10, false).Verify(cq_.get());
+    Verifier(GetParam().disable_blocking).Expect(10, false).Verify(cq_.get());
     EXPECT_FALSE(recv_status.ok());
     EXPECT_EQ(::grpc::StatusCode::CANCELLED, recv_status.error_code());
   }
@@ -979,13 +1032,13 @@
     // Initiate the 'ResponseStream' call on the client
     std::unique_ptr<ClientAsyncReader<EchoResponse>> cli_stream(
         stub_->AsyncResponseStream(&cli_ctx, send_request, cq_.get(), tag(1)));
-    Verifier(GetParam()).Expect(1, true).Verify(cq_.get());
+    Verifier(GetParam().disable_blocking).Expect(1, true).Verify(cq_.get());
     // On the server, request to be notified of 'ResponseStream' calls and
     // receive the call just made by the client
     srv_ctx.AsyncNotifyWhenDone(tag(11));
     service_.RequestResponseStream(&srv_ctx, &recv_request, &srv_stream,
                                    cq_.get(), cq_.get(), tag(2));
-    Verifier(GetParam()).Expect(2, true).Verify(cq_.get());
+    Verifier(GetParam().disable_blocking).Expect(2, true).Verify(cq_.get());
     EXPECT_EQ(send_request.message(), recv_request.message());
 
     bool expected_cq_result = true;
@@ -994,7 +1047,7 @@
 
     if (server_try_cancel == CANCEL_BEFORE_PROCESSING) {
       srv_ctx.TryCancel();
-      Verifier(GetParam()).Expect(11, true).Verify(cq_.get());
+      Verifier(GetParam().disable_blocking).Expect(11, true).Verify(cq_.get());
       EXPECT_TRUE(srv_ctx.IsCancelled());
 
       // We know for sure that all cq results will be false from this point
@@ -1004,7 +1057,7 @@
 
     std::thread* server_try_cancel_thd = NULL;
 
-    auto verif = Verifier(GetParam());
+    auto verif = Verifier(GetParam().disable_blocking);
 
     if (server_try_cancel == CANCEL_DURING_PROCESSING) {
       server_try_cancel_thd =
@@ -1064,7 +1117,7 @@
     // Client attemts to read the three messages from the server
     for (int tag_idx = 6; tag_idx <= 8; tag_idx++) {
       cli_stream->Read(&recv_response, tag(tag_idx));
-      Verifier(GetParam())
+      Verifier(GetParam().disable_blocking)
           .Expect(tag_idx, expected_cq_result)
           .Verify(cq_.get(), ignore_cq_result);
     }
@@ -1075,11 +1128,11 @@
 
     // Server finishes the stream (but the RPC is already cancelled)
     srv_stream.Finish(Status::CANCELLED, tag(9));
-    Verifier(GetParam()).Expect(9, false).Verify(cq_.get());
+    Verifier(GetParam().disable_blocking).Expect(9, false).Verify(cq_.get());
 
     // Client will see the cancellation
     cli_stream->Finish(&recv_status, tag(10));
-    Verifier(GetParam()).Expect(10, true).Verify(cq_.get());
+    Verifier(GetParam().disable_blocking).Expect(10, true).Verify(cq_.get());
     EXPECT_FALSE(recv_status.ok());
     EXPECT_EQ(::grpc::StatusCode::CANCELLED, recv_status.error_code());
   }
@@ -1114,19 +1167,19 @@
     // Initiate the call from the client side
     std::unique_ptr<ClientAsyncReaderWriter<EchoRequest, EchoResponse>>
         cli_stream(stub_->AsyncBidiStream(&cli_ctx, cq_.get(), tag(1)));
-    Verifier(GetParam()).Expect(1, true).Verify(cq_.get());
+    Verifier(GetParam().disable_blocking).Expect(1, true).Verify(cq_.get());
 
     // On the server, request to be notified of the 'BidiStream' call and
     // receive the call just made by the client
     srv_ctx.AsyncNotifyWhenDone(tag(11));
     service_.RequestBidiStream(&srv_ctx, &srv_stream, cq_.get(), cq_.get(),
                                tag(2));
-    Verifier(GetParam()).Expect(2, true).Verify(cq_.get());
+    Verifier(GetParam().disable_blocking).Expect(2, true).Verify(cq_.get());
 
     // Client sends the first and the only message
     send_request.set_message("Ping");
     cli_stream->Write(send_request, tag(3));
-    Verifier(GetParam()).Expect(3, true).Verify(cq_.get());
+    Verifier(GetParam().disable_blocking).Expect(3, true).Verify(cq_.get());
 
     bool expected_cq_result = true;
     bool ignore_cq_result = false;
@@ -1134,7 +1187,7 @@
 
     if (server_try_cancel == CANCEL_BEFORE_PROCESSING) {
       srv_ctx.TryCancel();
-      Verifier(GetParam()).Expect(11, true).Verify(cq_.get());
+      Verifier(GetParam().disable_blocking).Expect(11, true).Verify(cq_.get());
       EXPECT_TRUE(srv_ctx.IsCancelled());
 
       // We know for sure that all cq results will be false from this point
@@ -1144,7 +1197,7 @@
 
     std::thread* server_try_cancel_thd = NULL;
 
-    auto verif = Verifier(GetParam());
+    auto verif = Verifier(GetParam().disable_blocking);
 
     if (server_try_cancel == CANCEL_DURING_PROCESSING) {
       server_try_cancel_thd =
@@ -1244,10 +1297,10 @@
     // know that cq results are supposed to return false on server.
 
     srv_stream.Finish(Status::CANCELLED, tag(9));
-    Verifier(GetParam()).Expect(9, false).Verify(cq_.get());
+    Verifier(GetParam().disable_blocking).Expect(9, false).Verify(cq_.get());
 
     cli_stream->Finish(&recv_status, tag(10));
-    Verifier(GetParam()).Expect(10, true).Verify(cq_.get());
+    Verifier(GetParam().disable_blocking).Expect(10, true).Verify(cq_.get());
     EXPECT_FALSE(recv_status.ok());
     EXPECT_EQ(grpc::StatusCode::CANCELLED, recv_status.error_code());
   }
@@ -1289,11 +1342,48 @@
   TestBidiStreamingServerCancel(CANCEL_AFTER_PROCESSING);
 }
 
+std::vector<TestScenario> CreateTestScenarios(bool test_disable_blocking,
+                                              bool test_secure,
+                                              int test_big_limit) {
+  std::vector<TestScenario> scenarios;
+  std::vector<grpc::string> credentials_types;
+  std::vector<grpc::string> messages;
+
+  credentials_types.push_back(kInsecureCredentialsType);
+  auto sec_list = GetSecureCredentialsTypeList();
+  for (auto sec = sec_list.begin(); sec != sec_list.end(); sec++) {
+    credentials_types.push_back(*sec);
+  }
+
+  messages.push_back("Hello");
+  for (int sz = 1; sz < test_big_limit; sz *= 2) {
+    grpc::string big_msg;
+    for (int i = 0; i < sz * 1024; i++) {
+      char c = 'a' + (i % 26);
+      big_msg += c;
+    }
+    messages.push_back(big_msg);
+  }
+
+  for (auto cred = credentials_types.begin(); cred != credentials_types.end();
+       ++cred) {
+    for (auto msg = messages.begin(); msg != messages.end(); msg++) {
+      scenarios.push_back(TestScenario(false, *cred, *msg));
+      if (test_disable_blocking) {
+        scenarios.push_back(TestScenario(true, *cred, *msg));
+      }
+    }
+  }
+  return scenarios;
+}
+
 INSTANTIATE_TEST_CASE_P(AsyncEnd2end, AsyncEnd2endTest,
-                        ::testing::Values(false, true));
+                        ::testing::ValuesIn(CreateTestScenarios(true, true,
+                                                                1024)));
 INSTANTIATE_TEST_CASE_P(AsyncEnd2endServerTryCancel,
                         AsyncEnd2endServerTryCancelTest,
-                        ::testing::Values(false));
+                        ::testing::ValuesIn(CreateTestScenarios(false, false,
+                                                                0)));
 
 }  // namespace
 }  // namespace testing
diff --git a/test/distrib/csharp/DistribTest/DistribTest.csproj b/test/distrib/csharp/DistribTest/DistribTest.csproj
index 7605495..1acb34d 100644
--- a/test/distrib/csharp/DistribTest/DistribTest.csproj
+++ b/test/distrib/csharp/DistribTest/DistribTest.csproj
@@ -113,12 +113,12 @@
     <None Include="packages.config" />
   </ItemGroup>
   <Import Project="$(MSBuildToolsPath)\Microsoft.CSharp.targets" />
-  <Import Project="..\packages\grpc.native.csharp.__GRPC_NUGET_VERSION__\build\portable-net45+netcore45+wpa81+wp8\grpc.native.csharp.targets" Condition="Exists('..\packages\grpc.native.csharp.__GRPC_NUGET_VERSION__\build\portable-net45+netcore45+wpa81+wp8\grpc.native.csharp.targets')" />
+  <Import Project="..\packages\Grpc.Core.__GRPC_NUGET_VERSION__\build\net45\Grpc.Core.targets" Condition="Exists('..\packages\Grpc.Core.__GRPC_NUGET_VERSION__\build\net45\Grpc.Core.targets')" />
   <Target Name="EnsureNuGetPackageBuildImports" BeforeTargets="PrepareForBuild">
     <PropertyGroup>
       <ErrorText>This project references NuGet package(s) that are missing on this computer. Enable NuGet Package Restore to download them.  For more information, see http://go.microsoft.com/fwlink/?LinkID=322105. The missing file is {0}.</ErrorText>
     </PropertyGroup>
-    <Error Condition="!Exists('..\packages\grpc.native.csharp.__GRPC_NUGET_VERSION__\build\portable-net45+netcore45+wpa81+wp8\grpc.native.csharp.targets')" Text="$([System.String]::Format('$(ErrorText)', '..\packages\grpc.native.csharp.__GRPC_NUGET_VERSION__\build\portable-net45+netcore45+wpa81+wp8\grpc.native.csharp.targets'))" />
+    <Error Condition="!Exists('..\packages\Grpc.Core.__GRPC_NUGET_VERSION__\build\net45\Grpc.Core.targets')" Text="$([System.String]::Format('$(ErrorText)', '..\packages\Grpc.Core.__GRPC_NUGET_VERSION__\build\net45\Grpc.Core.targets'))" />
     <Error Condition="!Exists('..\packages\Microsoft.Bcl.Build.1.0.21\build\Microsoft.Bcl.Build.targets')" Text="$([System.String]::Format('$(ErrorText)', '..\packages\Microsoft.Bcl.Build.1.0.21\build\Microsoft.Bcl.Build.targets'))" />
   </Target>
   <Import Project="..\packages\Microsoft.Bcl.Build.1.0.21\build\Microsoft.Bcl.Build.targets" Condition="Exists('..\packages\Microsoft.Bcl.Build.1.0.21\build\Microsoft.Bcl.Build.targets')" />
@@ -129,4 +129,4 @@
   <Target Name="AfterBuild">
   </Target>
   -->
-</Project>
\ No newline at end of file
+</Project>
diff --git a/test/distrib/csharp/DistribTest/packages.config b/test/distrib/csharp/DistribTest/packages.config
index aca09f6..6963019 100644
--- a/test/distrib/csharp/DistribTest/packages.config
+++ b/test/distrib/csharp/DistribTest/packages.config
@@ -6,11 +6,10 @@
   <package id="Grpc" version="__GRPC_NUGET_VERSION__" targetFramework="net45" />
   <package id="Grpc.Auth" version="__GRPC_NUGET_VERSION__" targetFramework="net45" />
   <package id="Grpc.Core" version="__GRPC_NUGET_VERSION__" targetFramework="net45" />
-  <package id="grpc.native.csharp" version="__GRPC_NUGET_VERSION__" targetFramework="net45" />
   <package id="Ix-Async" version="1.2.3" targetFramework="net45" />
   <package id="Microsoft.Bcl" version="1.1.10" targetFramework="net45" />
   <package id="Microsoft.Bcl.Async" version="1.0.168" targetFramework="net45" />
   <package id="Microsoft.Bcl.Build" version="1.0.21" targetFramework="net45" />
   <package id="Microsoft.Net.Http" version="2.2.29" targetFramework="net45" />
   <package id="Newtonsoft.Json" version="7.0.1" targetFramework="net45" />
-</packages>
\ No newline at end of file
+</packages>
diff --git a/test/distrib/python/run_distrib_test.sh b/test/distrib/python/run_distrib_test.sh
index 79893af..6196e54 100755
--- a/test/distrib/python/run_distrib_test.sh
+++ b/test/distrib/python/run_distrib_test.sh
@@ -33,8 +33,8 @@
 cd $(dirname $0)
 
 # Pick up the source dist archive whatever its version is
-SDIST_ARCHIVE=$EXTERNAL_GIT_ROOT/input_artifacts/grpcio-*.tar.gz
-BDIST_DIR="file://$EXTERNAL_GIT_ROOT/input_artifacts"
+BDIST_ARCHIVES=$EXTERNAL_GIT_ROOT/input_artifacts/grpcio-*.whl
+TOOLS_BDIST_ARCHIVES=$EXTERNAL_GIT_ROOT/input_artifacts/grpcio_tools-*.whl
 
 if [ ! -f ${SDIST_ARCHIVE} ]
 then
@@ -42,17 +42,22 @@
   exit 1
 fi
 
-PIP=pip2
-which $PIP || PIP=pip
 PYTHON=python2
+PIP=pip2
 which $PYTHON || PYTHON=python
+which $PIP || PIP=pip
 
 # TODO(jtattermusch): this shouldn't be required
-$PIP install --upgrade six
+${PIP} install --upgrade six pip
 
-GRPC_PYTHON_BINARIES_REPOSITORY="${BDIST_DIR}" \
-    $PIP install \
-    ${SDIST_ARCHIVE}
+# At least one of the bdist packages has to succeed (whichever one matches the
+# test machine, anyway).
+for bdist in ${BDIST_ARCHIVES} ${TOOLS_BDIST_ARCHIVES}; do
+  ($PYTHON -m pip install $bdist) || true
+done
+
+# TODO(jtattermusch): add a .proto file to the distribtest, generate python
+# code from it and then use the generated code from distribtest.py
+$PYTHON -m grpc.protoc.compiler
 
 $PYTHON distribtest.py
-
diff --git a/tools/distrib/check_include_guards.py b/tools/distrib/check_include_guards.py
index 897a899..6c160c6 100755
--- a/tools/distrib/check_include_guards.py
+++ b/tools/distrib/check_include_guards.py
@@ -31,6 +31,7 @@
 
 import argparse
 import os
+import os.path
 import re
 import sys
 import subprocess
@@ -187,6 +188,8 @@
 try:
   filename_list = subprocess.check_output(FILE_LIST_COMMAND,
                                           shell=True).splitlines()
+  # Filter out non-existent files (ie, file removed or renamed)
+  filename_list = (f for f in filename_list if os.path.isfile(f))
 except subprocess.CalledProcessError:
   sys.exit(0)
 
diff --git a/tools/distrib/python/bazel_deps.sh b/tools/distrib/python/bazel_deps.sh
new file mode 100755
index 0000000..de3ee07
--- /dev/null
+++ b/tools/distrib/python/bazel_deps.sh
@@ -0,0 +1,46 @@
+#!/bin/bash
+
+# Copyright 2016, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+cd $(dirname $0)/../../../
+
+# First check if bazel is installed on the machine. If it is, then we don't need
+# to invoke the docker bazel.
+if [ "bazel version" ]
+then
+  cd third_party/protobuf
+  bazel query 'deps('$1')'
+else
+  docker build -t bazel `realpath ./tools/dockerfile/bazel/`
+  docker run -v "`realpath .`:/src/grpc/"          \
+    -w /src/grpc/third_party/protobuf              \
+    bazel                                          \
+    bazel query 'deps('$1')'
+fi
diff --git a/tools/distrib/python/check_grpcio_tools.py b/tools/distrib/python/check_grpcio_tools.py
new file mode 100755
index 0000000..baf2ff4
--- /dev/null
+++ b/tools/distrib/python/check_grpcio_tools.py
@@ -0,0 +1,45 @@
+#!/usr/bin/env python
+
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import cStringIO
+
+import make_grpcio_tools as make
+
+OUT_OF_DATE_MESSAGE = """file {} is out of date
+
+Have you called tools/distrib/python/make_grpcio_tools.py since upgrading protobuf?"""
+
+check_protoc_lib_deps_content = make.get_deps(make.BAZEL_DEPS_PROTOC_LIB_QUERY)
+
+with open(make.GRPC_PYTHON_PROTOC_LIB_DEPS, 'r') as protoc_lib_deps_file:
+  if protoc_lib_deps_file.read() != check_protoc_lib_deps_content:
+    print(OUT_OF_DATE_MESSAGE.format(make.GRPC_PYTHON_PROTOC_LIB_DEPS))
+    raise SystemExit(1)
diff --git a/tools/distrib/python/grpcio_tools/.gitignore b/tools/distrib/python/grpcio_tools/.gitignore
new file mode 100644
index 0000000..979704d
--- /dev/null
+++ b/tools/distrib/python/grpcio_tools/.gitignore
@@ -0,0 +1,7 @@
+build/
+protobuf/
+grpc_plugin/
+grpc_root/
+*.c
+*.cpp
+*.egg-info
diff --git a/tools/distrib/python/grpcio_tools/MANIFEST.in b/tools/distrib/python/grpcio_tools/MANIFEST.in
new file mode 100644
index 0000000..e6ab312
--- /dev/null
+++ b/tools/distrib/python/grpcio_tools/MANIFEST.in
@@ -0,0 +1,5 @@
+include protoc_deps.py
+include protoc_lib_deps.py
+graft grpc
+graft grpc_root
+graft third_party
diff --git a/tools/distrib/python/grpcio_tools/grpc/__init__.py b/tools/distrib/python/grpcio_tools/grpc/__init__.py
new file mode 100644
index 0000000..70ac5ed
--- /dev/null
+++ b/tools/distrib/python/grpcio_tools/grpc/__init__.py
@@ -0,0 +1,30 @@
+# Copyright 2016, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+__import__('pkg_resources').declare_namespace(__name__)
diff --git a/tools/distrib/python/grpcio_tools/grpc/protoc/__init__.py b/tools/distrib/python/grpcio_tools/grpc/protoc/__init__.py
new file mode 100644
index 0000000..d5ad73a
--- /dev/null
+++ b/tools/distrib/python/grpcio_tools/grpc/protoc/__init__.py
@@ -0,0 +1,29 @@
+# Copyright 2016, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
diff --git a/tools/distrib/python/grpcio_tools/grpc/protoc/compiler.py b/tools/distrib/python/grpcio_tools/grpc/protoc/compiler.py
new file mode 100644
index 0000000..caafc54
--- /dev/null
+++ b/tools/distrib/python/grpcio_tools/grpc/protoc/compiler.py
@@ -0,0 +1,38 @@
+#!/usr/bin/env python
+
+# Copyright 2016, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import sys
+
+from grpc.protoc import protoc_compiler
+
+
+if __name__ == '__main__':
+  protoc_compiler.run_main(sys.argv)
diff --git a/tools/distrib/python/grpcio_tools/grpc/protoc/main.cc b/tools/distrib/python/grpcio_tools/grpc/protoc/main.cc
new file mode 100644
index 0000000..c9936a3
--- /dev/null
+++ b/tools/distrib/python/grpcio_tools/grpc/protoc/main.cc
@@ -0,0 +1,54 @@
+// Copyright 2016, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <google/protobuf/compiler/command_line_interface.h>
+#include <google/protobuf/compiler/python/python_generator.h>
+
+#include "src/compiler/python_generator.h"
+
+#include "grpc/protoc/main.h"
+
+int protoc_main(int argc, char* argv[]) {
+  google::protobuf::compiler::CommandLineInterface cli;
+  cli.AllowPlugins("protoc-");
+
+  // Proto2 Python
+  google::protobuf::compiler::python::Generator py_generator;
+  cli.RegisterGenerator("--python_out", &py_generator,
+                        "Generate Python source file.");
+
+  // gRPC Python
+  grpc_python_generator::GeneratorConfiguration grpc_py_config;
+  grpc_py_config.beta_package_root = "grpc.beta";
+  grpc_python_generator::PythonGrpcGenerator grpc_py_generator(grpc_py_config);
+  cli.RegisterGenerator("--grpc_python_out", &grpc_py_generator,
+                        "Generate Python source file.");
+
+  return cli.Run(argc, argv);
+}
diff --git a/tools/distrib/python/grpcio_tools/grpc/protoc/main.h b/tools/distrib/python/grpcio_tools/grpc/protoc/main.h
new file mode 100644
index 0000000..ea2860f
--- /dev/null
+++ b/tools/distrib/python/grpcio_tools/grpc/protoc/main.h
@@ -0,0 +1,33 @@
+// Copyright 2016, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+// We declare `protoc_main` here since we want access to it from Cython as an
+// extern but *without* triggering a dllimport declspec when on Windows.
+int protoc_main(int argc, char *argv[]);
diff --git a/tools/distrib/python/grpcio_tools/grpc/protoc/protoc_compiler.pyx b/tools/distrib/python/grpcio_tools/grpc/protoc/protoc_compiler.pyx
new file mode 100644
index 0000000..af15f3d
--- /dev/null
+++ b/tools/distrib/python/grpcio_tools/grpc/protoc/protoc_compiler.pyx
@@ -0,0 +1,39 @@
+# Copyright 2016, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from libc cimport stdlib
+
+cdef extern from "grpc/protoc/main.h":
+  int protoc_main(int argc, char *argv[])
+
+def run_main(list args not None):
+  cdef char **argv = <char **>stdlib.malloc(len(args)*sizeof(char *))
+  for i in range(len(args)):
+    argv[i] = args[i]
+  return protoc_main(len(args), argv)
diff --git a/tools/distrib/python/grpcio_tools/grpc_version.py b/tools/distrib/python/grpcio_tools/grpc_version.py
new file mode 100644
index 0000000..1267d0e
--- /dev/null
+++ b/tools/distrib/python/grpcio_tools/grpc_version.py
@@ -0,0 +1,32 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# AUTO-GENERATED FROM `$REPO_ROOT/templates/tools/distrib/python/grpcio_tools/grpc_version.py.template`!!!
+
+VERSION='0.15.0.dev0'
diff --git a/tools/distrib/python/grpcio_tools/protoc_lib_deps.py b/tools/distrib/python/grpcio_tools/protoc_lib_deps.py
new file mode 100644
index 0000000..9f31172
--- /dev/null
+++ b/tools/distrib/python/grpcio_tools/protoc_lib_deps.py
@@ -0,0 +1,32 @@
+
+# Copyright 2016, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# AUTO-GENERATED BY make_grpcio_tools.py!
+CC_FILES=['google/protobuf/compiler/zip_writer.cc', 'google/protobuf/compiler/subprocess.cc', 'google/protobuf/compiler/ruby/ruby_generator.cc', 'google/protobuf/compiler/python/python_generator.cc', 'google/protobuf/compiler/plugin.pb.cc', 'google/protobuf/compiler/plugin.cc', 'google/protobuf/compiler/objectivec/objectivec_primitive_field.cc', 'google/protobuf/compiler/objectivec/objectivec_oneof.cc', 'google/protobuf/compiler/objectivec/objectivec_message_field.cc', 'google/protobuf/compiler/objectivec/objectivec_message.cc', 'google/protobuf/compiler/objectivec/objectivec_map_field.cc', 'google/protobuf/compiler/objectivec/objectivec_helpers.cc', 'google/protobuf/compiler/objectivec/objectivec_generator.cc', 'google/protobuf/compiler/objectivec/objectivec_file.cc', 'google/protobuf/compiler/objectivec/objectivec_field.cc', 'google/protobuf/compiler/objectivec/objectivec_extension.cc', 'google/protobuf/compiler/objectivec/objectivec_enum_field.cc', 'google/protobuf/compiler/objectivec/objectivec_enum.cc', 'google/protobuf/compiler/js/js_generator.cc', 'google/protobuf/compiler/javanano/javanano_primitive_field.cc', 'google/protobuf/compiler/javanano/javanano_message_field.cc', 'google/protobuf/compiler/javanano/javanano_message.cc', 'google/protobuf/compiler/javanano/javanano_map_field.cc', 'google/protobuf/compiler/javanano/javanano_helpers.cc', 'google/protobuf/compiler/javanano/javanano_generator.cc', 'google/protobuf/compiler/javanano/javanano_file.cc', 'google/protobuf/compiler/javanano/javanano_field.cc', 'google/protobuf/compiler/javanano/javanano_extension.cc', 'google/protobuf/compiler/javanano/javanano_enum_field.cc', 'google/protobuf/compiler/javanano/javanano_enum.cc', 'google/protobuf/compiler/java/java_string_field_lite.cc', 'google/protobuf/compiler/java/java_string_field.cc', 'google/protobuf/compiler/java/java_shared_code_generator.cc', 'google/protobuf/compiler/java/java_service.cc', 'google/protobuf/compiler/java/java_primitive_field_lite.cc', 'google/protobuf/compiler/java/java_primitive_field.cc', 'google/protobuf/compiler/java/java_name_resolver.cc', 'google/protobuf/compiler/java/java_message_lite.cc', 'google/protobuf/compiler/java/java_message_field_lite.cc', 'google/protobuf/compiler/java/java_message_field.cc', 'google/protobuf/compiler/java/java_message_builder_lite.cc', 'google/protobuf/compiler/java/java_message_builder.cc', 'google/protobuf/compiler/java/java_message.cc', 'google/protobuf/compiler/java/java_map_field_lite.cc', 'google/protobuf/compiler/java/java_map_field.cc', 'google/protobuf/compiler/java/java_lazy_message_field_lite.cc', 'google/protobuf/compiler/java/java_lazy_message_field.cc', 'google/protobuf/compiler/java/java_helpers.cc', 'google/protobuf/compiler/java/java_generator_factory.cc', 'google/protobuf/compiler/java/java_generator.cc', 'google/protobuf/compiler/java/java_file.cc', 'google/protobuf/compiler/java/java_field.cc', 'google/protobuf/compiler/java/java_extension.cc', 'google/protobuf/compiler/java/java_enum_lite.cc', 'google/protobuf/compiler/java/java_enum_field_lite.cc', 'google/protobuf/compiler/java/java_enum_field.cc', 'google/protobuf/compiler/java/java_enum.cc', 'google/protobuf/compiler/java/java_doc_comment.cc', 'google/protobuf/compiler/java/java_context.cc', 'google/protobuf/compiler/csharp/csharp_wrapper_field.cc', 'google/protobuf/compiler/csharp/csharp_source_generator_base.cc', 'google/protobuf/compiler/csharp/csharp_repeated_primitive_field.cc', 'google/protobuf/compiler/csharp/csharp_repeated_message_field.cc', 'google/protobuf/compiler/csharp/csharp_repeated_enum_field.cc', 'google/protobuf/compiler/csharp/csharp_reflection_class.cc', 'google/protobuf/compiler/csharp/csharp_primitive_field.cc', 'google/protobuf/compiler/csharp/csharp_message_field.cc', 'google/protobuf/compiler/csharp/csharp_message.cc', 'google/protobuf/compiler/csharp/csharp_map_field.cc', 'google/protobuf/compiler/csharp/csharp_helpers.cc', 'google/protobuf/compiler/csharp/csharp_generator.cc', 'google/protobuf/compiler/csharp/csharp_field_base.cc', 'google/protobuf/compiler/csharp/csharp_enum_field.cc', 'google/protobuf/compiler/csharp/csharp_enum.cc', 'google/protobuf/compiler/csharp/csharp_doc_comment.cc', 'google/protobuf/compiler/cpp/cpp_string_field.cc', 'google/protobuf/compiler/cpp/cpp_service.cc', 'google/protobuf/compiler/cpp/cpp_primitive_field.cc', 'google/protobuf/compiler/cpp/cpp_message_field.cc', 'google/protobuf/compiler/cpp/cpp_message.cc', 'google/protobuf/compiler/cpp/cpp_map_field.cc', 'google/protobuf/compiler/cpp/cpp_helpers.cc', 'google/protobuf/compiler/cpp/cpp_generator.cc', 'google/protobuf/compiler/cpp/cpp_file.cc', 'google/protobuf/compiler/cpp/cpp_field.cc', 'google/protobuf/compiler/cpp/cpp_extension.cc', 'google/protobuf/compiler/cpp/cpp_enum_field.cc', 'google/protobuf/compiler/cpp/cpp_enum.cc', 'google/protobuf/compiler/command_line_interface.cc', 'google/protobuf/compiler/code_generator.cc', 'google/protobuf/wrappers.pb.cc', 'google/protobuf/wire_format.cc', 'google/protobuf/util/type_resolver_util.cc', 'google/protobuf/util/time_util.cc', 'google/protobuf/util/message_differencer.cc', 'google/protobuf/util/json_util.cc', 'google/protobuf/util/internal/utility.cc', 'google/protobuf/util/internal/type_info_test_helper.cc', 'google/protobuf/util/internal/type_info.cc', 'google/protobuf/util/internal/protostream_objectwriter.cc', 'google/protobuf/util/internal/protostream_objectsource.cc', 'google/protobuf/util/internal/proto_writer.cc', 'google/protobuf/util/internal/object_writer.cc', 'google/protobuf/util/internal/json_stream_parser.cc', 'google/protobuf/util/internal/json_objectwriter.cc', 'google/protobuf/util/internal/json_escaping.cc', 'google/protobuf/util/internal/field_mask_utility.cc', 'google/protobuf/util/internal/error_listener.cc', 'google/protobuf/util/internal/default_value_objectwriter.cc', 'google/protobuf/util/internal/datapiece.cc', 'google/protobuf/util/field_mask_util.cc', 'google/protobuf/util/field_comparator.cc', 'google/protobuf/unknown_field_set.cc', 'google/protobuf/type.pb.cc', 'google/protobuf/timestamp.pb.cc', 'google/protobuf/text_format.cc', 'google/protobuf/stubs/substitute.cc', 'google/protobuf/stubs/mathlimits.cc', 'google/protobuf/struct.pb.cc', 'google/protobuf/source_context.pb.cc', 'google/protobuf/service.cc', 'google/protobuf/reflection_ops.cc', 'google/protobuf/message.cc', 'google/protobuf/map_field.cc', 'google/protobuf/io/zero_copy_stream_impl.cc', 'google/protobuf/io/tokenizer.cc', 'google/protobuf/io/strtod.cc', 'google/protobuf/io/printer.cc', 'google/protobuf/io/gzip_stream.cc', 'google/protobuf/generated_message_reflection.cc', 'google/protobuf/field_mask.pb.cc', 'google/protobuf/extension_set_heavy.cc', 'google/protobuf/empty.pb.cc', 'google/protobuf/dynamic_message.cc', 'google/protobuf/duration.pb.cc', 'google/protobuf/descriptor_database.cc', 'google/protobuf/descriptor.pb.cc', 'google/protobuf/descriptor.cc', 'google/protobuf/compiler/parser.cc', 'google/protobuf/compiler/importer.cc', 'google/protobuf/api.pb.cc', 'google/protobuf/any.pb.cc', 'google/protobuf/any.cc', 'google/protobuf/wire_format_lite.cc', 'google/protobuf/stubs/time.cc', 'google/protobuf/stubs/strutil.cc', 'google/protobuf/stubs/structurally_valid.cc', 'google/protobuf/stubs/stringprintf.cc', 'google/protobuf/stubs/stringpiece.cc', 'google/protobuf/stubs/statusor.cc', 'google/protobuf/stubs/status.cc', 'google/protobuf/stubs/once.cc', 'google/protobuf/stubs/int128.cc', 'google/protobuf/stubs/common.cc', 'google/protobuf/stubs/bytestream.cc', 'google/protobuf/stubs/atomicops_internals_x86_msvc.cc', 'google/protobuf/stubs/atomicops_internals_x86_gcc.cc', 'google/protobuf/repeated_field.cc', 'google/protobuf/message_lite.cc', 'google/protobuf/io/zero_copy_stream_impl_lite.cc', 'google/protobuf/io/zero_copy_stream.cc', 'google/protobuf/io/coded_stream.cc', 'google/protobuf/generated_message_util.cc', 'google/protobuf/extension_set.cc', 'google/protobuf/arenastring.cc', 'google/protobuf/arena.cc']
diff --git a/tools/distrib/python/grpcio_tools/setup.py b/tools/distrib/python/grpcio_tools/setup.py
new file mode 100644
index 0000000..0281c01
--- /dev/null
+++ b/tools/distrib/python/grpcio_tools/setup.py
@@ -0,0 +1,84 @@
+# Copyright 2016, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from distutils import extension
+import os
+import os.path
+import sys
+
+import setuptools
+from setuptools.command import build_ext
+
+# TODO(atash) add flag to disable Cython use
+
+os.chdir(os.path.dirname(os.path.abspath(__file__)))
+sys.path.insert(0, os.path.abspath('.'))
+
+import protoc_lib_deps
+import grpc_version
+
+def protoc_ext_module():
+  plugin_sources = [
+      'grpc/protoc/main.cc',
+      'grpc_root/src/compiler/python_generator.cc'] + [
+      os.path.join('third_party/protobuf/src', cc_file)
+      for cc_file in protoc_lib_deps.CC_FILES]
+  plugin_ext = extension.Extension(
+      name='grpc.protoc.protoc_compiler',
+      sources=['grpc/protoc/protoc_compiler.pyx'] + plugin_sources,
+      include_dirs=[
+          '.',
+          'grpc_root',
+          'grpc_root/include',
+          'third_party/protobuf/src',
+      ],
+      language='c++',
+      define_macros=[('HAVE_PTHREAD', 1)],
+      extra_compile_args=['-lpthread', '-frtti', '-std=c++11'],
+  )
+  return plugin_ext
+
+def maybe_cythonize(exts):
+  from Cython import Build
+  return Build.cythonize(exts)
+
+setuptools.setup(
+  name='grpcio_tools',
+  version=grpc_version.VERSION,
+  license='',
+  ext_modules=maybe_cythonize([
+      protoc_ext_module(),
+  ]),
+  packages=setuptools.find_packages('.'),
+  # TODO(atash): Figure out why auditwheel doesn't like namespace packages.
+  #namespace_packages=['grpc'],
+  install_requires=[
+    'protobuf>=3.0.0a3',
+  ],
+)
diff --git a/tools/distrib/python/make_grpcio_tools.py b/tools/distrib/python/make_grpcio_tools.py
new file mode 100755
index 0000000..50fbdbb
--- /dev/null
+++ b/tools/distrib/python/make_grpcio_tools.py
@@ -0,0 +1,140 @@
+#!/usr/bin/env python
+
+# Copyright 2016, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import os
+import os.path
+import shutil
+import subprocess
+import sys
+import traceback
+
+DEPS_FILE_CONTENT="""
+# Copyright 2016, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# AUTO-GENERATED BY make_grpcio_tools.py!
+CC_FILES={}
+"""
+
+# Bazel query result prefix for expected source files in protobuf.
+PROTOBUF_CC_PREFIX = '//:src/'
+
+GRPC_ROOT = os.path.abspath(
+    os.path.join(os.path.dirname(os.path.abspath(__file__)),
+                 '..', '..', '..'))
+
+GRPC_PYTHON_ROOT = os.path.join(GRPC_ROOT, 'tools/distrib/python/grpcio_tools')
+
+GRPC_PROTOBUF = os.path.join(GRPC_ROOT, 'third_party/protobuf/src')
+GRPC_PROTOC_PLUGINS = os.path.join(GRPC_ROOT, 'src/compiler')
+GRPC_PYTHON_PROTOBUF = os.path.join(GRPC_PYTHON_ROOT,
+                                    'third_party/protobuf/src')
+GRPC_PYTHON_PROTOC_PLUGINS = os.path.join(GRPC_PYTHON_ROOT,
+                                          'grpc_root/src/compiler')
+GRPC_PYTHON_PROTOC_LIB_DEPS = os.path.join(GRPC_PYTHON_ROOT,
+                                           'protoc_lib_deps.py')
+
+GRPC_INCLUDE = os.path.join(GRPC_ROOT, 'include')
+GRPC_PYTHON_INCLUDE = os.path.join(GRPC_PYTHON_ROOT, 'grpc_root/include')
+
+BAZEL_DEPS = os.path.join(GRPC_ROOT, 'tools/distrib/python/bazel_deps.sh')
+BAZEL_DEPS_PROTOC_LIB_QUERY = '//:protoc_lib'
+
+
+def get_deps(query):
+  """Write the result of the bazel query `query` against protobuf to
+     `out_file`."""
+  output = subprocess.check_output([BAZEL_DEPS, query])
+  output = output.splitlines()
+  cc_files = [
+      name for name in output
+      if name.endswith('.cc') and name.startswith(PROTOBUF_CC_PREFIX)]
+  cc_files = [cc_file[len(PROTOBUF_CC_PREFIX):] for cc_file in cc_files]
+  deps_file_content = DEPS_FILE_CONTENT.format(cc_files)
+  return deps_file_content
+
+
+def main():
+  os.chdir(GRPC_ROOT)
+
+  for tree in [GRPC_PYTHON_PROTOBUF,
+               GRPC_PYTHON_PROTOC_PLUGINS,
+               GRPC_PYTHON_INCLUDE]:
+    try:
+      shutil.rmtree(tree)
+    except Exception as _:
+      pass
+  shutil.copytree(GRPC_PROTOBUF, GRPC_PYTHON_PROTOBUF)
+  shutil.copytree(GRPC_PROTOC_PLUGINS, GRPC_PYTHON_PROTOC_PLUGINS)
+  shutil.copytree(GRPC_INCLUDE, GRPC_PYTHON_INCLUDE)
+
+  try:
+    protoc_lib_deps_content = get_deps(BAZEL_DEPS_PROTOC_LIB_QUERY)
+  except Exception as error:
+    # We allow this script to succeed even if we couldn't get the dependencies,
+    # as then we can assume that even without a successful bazel run the
+    # dependencies currently in source control are 'good enough'.
+    sys.stderr.write("Got non-fatal error:\n")
+    traceback.print_exc(file=sys.stderr)
+    return
+  # If we successfully got the dependencies, truncate and rewrite the deps file.
+  with open(GRPC_PYTHON_PROTOC_LIB_DEPS, 'w') as deps_file:
+    deps_file.write(protoc_lib_deps_content)
+
+if __name__ == '__main__':
+  main()
+
diff --git a/tools/dockerfile/bazel/Dockerfile b/tools/dockerfile/bazel/Dockerfile
new file mode 100644
index 0000000..2a80a4d
--- /dev/null
+++ b/tools/dockerfile/bazel/Dockerfile
@@ -0,0 +1,52 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+FROM ubuntu:wily
+RUN apt-get update
+RUN apt-get -y install software-properties-common python-software-properties
+RUN add-apt-repository ppa:webupd8team/java
+RUN apt-get update
+RUN apt-get -y install \
+	vim            \
+	wget           \
+	openjdk-8-jdk  \
+	pkg-config     \
+	zip            \
+	g++            \
+	zlib1g-dev     \
+	unzip          \
+	git
+
+RUN git clone https://github.com/bazelbuild/bazel.git /bazel
+RUN cd /bazel && ./compile.sh
+
+RUN ln -s /bazel/output/bazel /bin/
+
+# ensure the installation has been extracted
+RUN bazel
diff --git a/tools/dockerfile/grpc_artifact_linux_x64/Dockerfile b/tools/dockerfile/grpc_artifact_linux_x64/Dockerfile
index d048b72..4ae4ebd 100644
--- a/tools/dockerfile/grpc_artifact_linux_x64/Dockerfile
+++ b/tools/dockerfile/grpc_artifact_linux_x64/Dockerfile
@@ -31,12 +31,15 @@
 
 FROM debian:jessie
 
+RUN apt-get update && apt-get install debian-keyring && apt-key update
+
 # Install Git and basic packages.
-RUN apt-get update && apt-get install -y \
+RUN apt-get update && apt-key update && apt-get install -y \
   autoconf \
   autotools-dev \
   build-essential \
   bzip2 \
+  clang \
   curl \
   gcc \
   gcc-multilib \
diff --git a/tools/dockerfile/grpc_artifact_linux_x86/Dockerfile b/tools/dockerfile/grpc_artifact_linux_x86/Dockerfile
index 46bc9f8..9c2fd52 100644
--- a/tools/dockerfile/grpc_artifact_linux_x86/Dockerfile
+++ b/tools/dockerfile/grpc_artifact_linux_x86/Dockerfile
@@ -31,12 +31,15 @@
 
 FROM 32bit/debian:jessie
 
+RUN apt-get update && apt-get install debian-keyring && apt-key update
+
 # Install Git and basic packages.
-RUN apt-get update && apt-get install -y \
+RUN apt-get update && apt-key update && apt-get install -y \
   autoconf \
   autotools-dev \
   build-essential \
   bzip2 \
+  clang \
   curl \
   gcc \
   gcc-multilib \
diff --git a/tools/dockerfile/grpc_artifact_python_manylinux_x64/Dockerfile b/tools/dockerfile/grpc_artifact_python_manylinux_x64/Dockerfile
new file mode 100644
index 0000000..3e31a2b
--- /dev/null
+++ b/tools/dockerfile/grpc_artifact_python_manylinux_x64/Dockerfile
@@ -0,0 +1,43 @@
+# Copyright 2016, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# Docker file for building gRPC manylinux Python artifacts.
+
+FROM quay.io/pypa/manylinux1_x86_64
+
+# Update the package manager
+RUN yum update -y
+
+###################################
+# Install Python build requirements
+RUN /opt/python/cp27-cp27m/bin/pip install cython
+RUN /opt/python/cp27-cp27mu/bin/pip install cython
+RUN /opt/python/cp34-cp34m/bin/pip install cython
+RUN /opt/python/cp35-cp35m/bin/pip install cython
+
diff --git a/tools/dockerfile/grpc_artifact_python_manylinux_x86/Dockerfile b/tools/dockerfile/grpc_artifact_python_manylinux_x86/Dockerfile
new file mode 100644
index 0000000..5fe62c2
--- /dev/null
+++ b/tools/dockerfile/grpc_artifact_python_manylinux_x86/Dockerfile
@@ -0,0 +1,43 @@
+# Copyright 2016, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# Docker file for building gRPC manylinux Python artifacts.
+
+FROM quay.io/pypa/manylinux1_i686
+
+# Update the package manager
+RUN yum update -y
+
+###################################
+# Install Python build requirements
+RUN /opt/python/cp27-cp27m/bin/pip install cython
+RUN /opt/python/cp27-cp27mu/bin/pip install cython
+RUN /opt/python/cp34-cp34m/bin/pip install cython
+RUN /opt/python/cp35-cp35m/bin/pip install cython
+
diff --git a/tools/dockerfile/interoptest/grpc_interop_node/Dockerfile b/tools/dockerfile/interoptest/grpc_interop_node/Dockerfile
index 64314f8..2a8d35a 100644
--- a/tools/dockerfile/interoptest/grpc_interop_node/Dockerfile
+++ b/tools/dockerfile/interoptest/grpc_interop_node/Dockerfile
@@ -69,8 +69,11 @@
 # Install nvm
 RUN touch .profile
 RUN curl -o- https://raw.githubusercontent.com/creationix/nvm/v0.25.4/install.sh | bash
+# Install all versions of node that we want to test
 RUN /bin/bash -l -c "nvm install 0.12 && npm config set cache /tmp/npm-cache"
-
+RUN /bin/bash -l -c "nvm install 4 && npm config set cache /tmp/npm-cache"
+RUN /bin/bash -l -c "nvm install 5 && npm config set cache /tmp/npm-cache"
+RUN /bin/bash -l -c "nvm alias default 4"
 # Prepare ccache
 RUN ln -s /usr/bin/ccache /usr/local/bin/gcc
 RUN ln -s /usr/bin/ccache /usr/local/bin/g++
diff --git a/tools/dockerfile/interoptest/grpc_interop_node/build_interop.sh b/tools/dockerfile/interoptest/grpc_interop_node/build_interop.sh
index b99fd44..976f55d 100755
--- a/tools/dockerfile/interoptest/grpc_interop_node/build_interop.sh
+++ b/tools/dockerfile/interoptest/grpc_interop_node/build_interop.sh
@@ -38,8 +38,6 @@
 cp -r /var/local/jenkins/service_account $HOME || true
 
 cd /var/local/git/grpc
-nvm use 0.12
-nvm alias default 0.12  # prevent the need to run 'nvm use' in every shell
 
 # build Node interop client & server
 npm install -g node-gyp
diff --git a/tools/dockerfile/stress_test/grpc_interop_stress_node/Dockerfile b/tools/dockerfile/stress_test/grpc_interop_stress_node/Dockerfile
index f70add4..4fd7cc2 100644
--- a/tools/dockerfile/stress_test/grpc_interop_stress_node/Dockerfile
+++ b/tools/dockerfile/stress_test/grpc_interop_stress_node/Dockerfile
@@ -69,8 +69,11 @@
 # Install nvm
 RUN touch .profile
 RUN curl -o- https://raw.githubusercontent.com/creationix/nvm/v0.25.4/install.sh | bash
+# Install all versions of node that we want to test
 RUN /bin/bash -l -c "nvm install 0.12 && npm config set cache /tmp/npm-cache"
-
+RUN /bin/bash -l -c "nvm install 4 && npm config set cache /tmp/npm-cache"
+RUN /bin/bash -l -c "nvm install 5 && npm config set cache /tmp/npm-cache"
+RUN /bin/bash -l -c "nvm alias default 4"
 # Google Cloud platform API libraries
 RUN apt-get update && apt-get install -y python-pip && apt-get clean
 RUN pip install --upgrade google-api-python-client
diff --git a/tools/dockerfile/stress_test/grpc_interop_stress_node/build_interop_stress.sh b/tools/dockerfile/stress_test/grpc_interop_stress_node/build_interop_stress.sh
index b99fd44..976f55d 100755
--- a/tools/dockerfile/stress_test/grpc_interop_stress_node/build_interop_stress.sh
+++ b/tools/dockerfile/stress_test/grpc_interop_stress_node/build_interop_stress.sh
@@ -38,8 +38,6 @@
 cp -r /var/local/jenkins/service_account $HOME || true
 
 cd /var/local/git/grpc
-nvm use 0.12
-nvm alias default 0.12  # prevent the need to run 'nvm use' in every shell
 
 # build Node interop client & server
 npm install -g node-gyp
diff --git a/tools/dockerfile/stress_test/grpc_interop_stress_python/Dockerfile b/tools/dockerfile/stress_test/grpc_interop_stress_python/Dockerfile
new file mode 100644
index 0000000..606b765
--- /dev/null
+++ b/tools/dockerfile/stress_test/grpc_interop_stress_python/Dockerfile
@@ -0,0 +1,103 @@
+# Copyright 2016, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+FROM debian:jessie
+
+# Install Git and basic packages.
+RUN apt-get update && apt-get install -y \
+  autoconf \
+  autotools-dev \
+  build-essential \
+  bzip2 \
+  ccache \
+  curl \
+  gcc \
+  gcc-multilib \
+  git \
+  golang \
+  gyp \
+  lcov \
+  libc6 \
+  libc6-dbg \
+  libc6-dev \
+  libgtest-dev \
+  libtool \
+  make \
+  perl \
+  strace \
+  python-dev \
+  python-setuptools \
+  python-yaml \
+  telnet \
+  unzip \
+  wget \
+  zip && apt-get clean
+
+#================
+# Build profiling
+RUN apt-get update && apt-get install -y time && apt-get clean
+
+# Prepare ccache
+RUN ln -s /usr/bin/ccache /usr/local/bin/gcc
+RUN ln -s /usr/bin/ccache /usr/local/bin/g++
+RUN ln -s /usr/bin/ccache /usr/local/bin/cc
+RUN ln -s /usr/bin/ccache /usr/local/bin/c++
+RUN ln -s /usr/bin/ccache /usr/local/bin/clang
+RUN ln -s /usr/bin/ccache /usr/local/bin/clang++
+
+#=================
+# C++ dependencies
+RUN apt-get update && apt-get -y install libgflags-dev libgtest-dev libc++-dev clang && apt-get clean
+
+# Google Cloud platform API libraries
+RUN apt-get update && apt-get install -y python-pip && apt-get clean
+RUN pip install --upgrade google-api-python-client
+
+
+#====================
+# Python dependencies
+
+# Install dependencies
+
+RUN apt-get update && apt-get install -y \
+    python-all-dev \
+    python3-all-dev \
+    python-pip
+
+# Install Python packages from PyPI
+RUN pip install pip --upgrade
+RUN pip install virtualenv
+RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 tox
+
+
+RUN pip install coverage
+RUN pip install oauth2client
+
+# Define the default command.
+CMD ["bash"]
diff --git a/tools/dockerfile/stress_test/grpc_interop_stress_python/build_interop_stress.sh b/tools/dockerfile/stress_test/grpc_interop_stress_python/build_interop_stress.sh
new file mode 100755
index 0000000..e65332f
--- /dev/null
+++ b/tools/dockerfile/stress_test/grpc_interop_stress_python/build_interop_stress.sh
@@ -0,0 +1,46 @@
+#!/bin/bash
+# Copyright 2016, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# Builds Python interop server and client in a base image.
+set -e
+
+mkdir -p /var/local/git
+git clone --recursive /var/local/jenkins/grpc /var/local/git/grpc
+
+# copy service account keys if available
+cp -r /var/local/jenkins/service_account $HOME || true
+
+cd /var/local/git/grpc
+
+tools/run_tests/run_tests.py -l python -c opt --build_only
+
+# Build c++ interop client
+make metrics_client -j
+
diff --git a/tools/dockerfile/stress_test/grpc_interop_stress_ruby/Dockerfile b/tools/dockerfile/stress_test/grpc_interop_stress_ruby/Dockerfile
new file mode 100644
index 0000000..36b54dd
--- /dev/null
+++ b/tools/dockerfile/stress_test/grpc_interop_stress_ruby/Dockerfile
@@ -0,0 +1,99 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+FROM debian:jessie
+
+# Install Git and basic packages.
+RUN apt-get update && apt-get install -y \
+  autoconf \
+  autotools-dev \
+  build-essential \
+  bzip2 \
+  ccache \
+  curl \
+  gcc \
+  gcc-multilib \
+  git \
+  golang \
+  gyp \
+  lcov \
+  libc6 \
+  libc6-dbg \
+  libc6-dev \
+  libgtest-dev \
+  libtool \
+  make \
+  perl \
+  strace \
+  python-dev \
+  python-setuptools \
+  python-yaml \
+  telnet \
+  unzip \
+  wget \
+  zip && apt-get clean
+
+#================
+# Build profiling
+RUN apt-get update && apt-get install -y time && apt-get clean
+
+# Prepare ccache
+RUN ln -s /usr/bin/ccache /usr/local/bin/gcc
+RUN ln -s /usr/bin/ccache /usr/local/bin/g++
+RUN ln -s /usr/bin/ccache /usr/local/bin/cc
+RUN ln -s /usr/bin/ccache /usr/local/bin/c++
+RUN ln -s /usr/bin/ccache /usr/local/bin/clang
+RUN ln -s /usr/bin/ccache /usr/local/bin/clang++
+
+#=================
+# C++ dependencies
+RUN apt-get update && apt-get -y install libgflags-dev libgtest-dev libc++-dev clang && apt-get clean
+
+# Google Cloud platform API libraries
+RUN apt-get update && apt-get install -y python-pip && apt-get clean
+RUN pip install --upgrade google-api-python-client
+
+
+#==================
+# Ruby dependencies
+
+# Install rvm
+RUN gpg --keyserver hkp://keys.gnupg.net --recv-keys 409B6B1796C275462A1703113804BB82D39DC0E3
+RUN \curl -sSL https://get.rvm.io | bash -s stable
+
+# Install Ruby 2.1
+RUN /bin/bash -l -c "rvm install ruby-2.1"
+RUN /bin/bash -l -c "rvm use --default ruby-2.1"
+RUN /bin/bash -l -c "echo 'gem: --no-ri --no-rdoc' > ~/.gemrc"
+RUN /bin/bash -l -c "echo 'export PATH=/usr/local/rvm/bin:$PATH' >> ~/.bashrc"
+RUN /bin/bash -l -c "echo 'rvm --default use ruby-2.1' >> ~/.bashrc"
+RUN /bin/bash -l -c "gem install bundler --no-ri --no-rdoc"
+
+# Define the default command.
+CMD ["bash"]
diff --git a/tools/dockerfile/stress_test/grpc_interop_stress_ruby/build_interop_stress.sh b/tools/dockerfile/stress_test/grpc_interop_stress_ruby/build_interop_stress.sh
new file mode 100755
index 0000000..1b7567d
--- /dev/null
+++ b/tools/dockerfile/stress_test/grpc_interop_stress_ruby/build_interop_stress.sh
@@ -0,0 +1,48 @@
+#!/bin/bash
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# Builds Ruby interop server and client in a base image.
+set -e
+
+mkdir -p /var/local/git
+git clone --recursive /var/local/jenkins/grpc /var/local/git/grpc
+
+# Copy service account keys if available
+cp -r /var/local/jenkins/service_account $HOME || true
+
+cd /var/local/git/grpc
+rvm --default use ruby-2.1
+
+# Build Ruby interop client and server
+(cd src/ruby && gem update bundler && bundle && rake compile)
+
+# Build c++ metrics client to query the metrics from ruby stress client
+make metrics_client -j
+
diff --git a/tools/dockerfile/test/multilang_jessie_x64/Dockerfile b/tools/dockerfile/test/multilang_jessie_x64/Dockerfile
index 71ebf2b..5c3f7740 100644
--- a/tools/dockerfile/test/multilang_jessie_x64/Dockerfile
+++ b/tools/dockerfile/test/multilang_jessie_x64/Dockerfile
@@ -90,8 +90,11 @@
 # Install nvm
 RUN touch .profile
 RUN curl -o- https://raw.githubusercontent.com/creationix/nvm/v0.25.4/install.sh | bash
+# Install all versions of node that we want to test
 RUN /bin/bash -l -c "nvm install 0.12 && npm config set cache /tmp/npm-cache"
-
+RUN /bin/bash -l -c "nvm install 4 && npm config set cache /tmp/npm-cache"
+RUN /bin/bash -l -c "nvm install 5 && npm config set cache /tmp/npm-cache"
+RUN /bin/bash -l -c "nvm alias default 4"
 #=================
 # PHP dependencies
 
diff --git a/tools/dockerfile/test/node_jessie_x64/Dockerfile b/tools/dockerfile/test/node_jessie_x64/Dockerfile
index 64314f8..2a8d35a 100644
--- a/tools/dockerfile/test/node_jessie_x64/Dockerfile
+++ b/tools/dockerfile/test/node_jessie_x64/Dockerfile
@@ -69,8 +69,11 @@
 # Install nvm
 RUN touch .profile
 RUN curl -o- https://raw.githubusercontent.com/creationix/nvm/v0.25.4/install.sh | bash
+# Install all versions of node that we want to test
 RUN /bin/bash -l -c "nvm install 0.12 && npm config set cache /tmp/npm-cache"
-
+RUN /bin/bash -l -c "nvm install 4 && npm config set cache /tmp/npm-cache"
+RUN /bin/bash -l -c "nvm install 5 && npm config set cache /tmp/npm-cache"
+RUN /bin/bash -l -c "nvm alias default 4"
 # Prepare ccache
 RUN ln -s /usr/bin/ccache /usr/local/bin/gcc
 RUN ln -s /usr/bin/ccache /usr/local/bin/g++
diff --git a/tools/dockerfile/test/sanity/Dockerfile b/tools/dockerfile/test/sanity/Dockerfile
index 3146a92..43b2a0c 100644
--- a/tools/dockerfile/test/sanity/Dockerfile
+++ b/tools/dockerfile/test/sanity/Dockerfile
@@ -75,6 +75,15 @@
       python-lxml
 RUN pip install simplejson mako
 
+#======================================
+# More sanity test dependencies (bazel)
+RUN echo "deb http://httpredir.debian.org/debian jessie-backports main" >   /etc/apt/sources.list.d/backports.list
+RUN apt-get update
+RUN apt-get -t jessie-backports install -y openjdk-8-jdk
+RUN git clone https://github.com/bazelbuild/bazel.git /bazel
+RUN cd /bazel && ./compile.sh
+RUN ln -s /bazel/output/bazel /bin/
+
 #===================
 # Docker "inception"
 # Note this is quite the ugly hack.
diff --git a/tools/doxygen/Doxyfile.c++ b/tools/doxygen/Doxyfile.c++
index 7dc0496..2a319db 100644
--- a/tools/doxygen/Doxyfile.c++
+++ b/tools/doxygen/Doxyfile.c++
@@ -40,7 +40,7 @@
 # could be handy for archiving the generated documentation or if some version
 # control system is used.
 
-PROJECT_NUMBER         = 0.14.0-dev
+PROJECT_NUMBER         = 0.15.0-dev
 
 # Using the PROJECT_BRIEF tag one can provide an optional one line description
 # for a project that appears at the top of each page and should give viewer a
@@ -833,6 +833,7 @@
 include/grpc++/impl/codegen/sync_stream.h \
 include/grpc++/impl/codegen/time.h \
 include/grpc/impl/codegen/byte_buffer.h \
+include/grpc/impl/codegen/byte_buffer_reader.h \
 include/grpc/impl/codegen/compression_types.h \
 include/grpc/impl/codegen/connectivity_state.h \
 include/grpc/impl/codegen/grpc_types.h \
diff --git a/tools/doxygen/Doxyfile.c++.internal b/tools/doxygen/Doxyfile.c++.internal
index 312fd17..5fdfafb 100644
--- a/tools/doxygen/Doxyfile.c++.internal
+++ b/tools/doxygen/Doxyfile.c++.internal
@@ -40,7 +40,7 @@
 # could be handy for archiving the generated documentation or if some version
 # control system is used.
 
-PROJECT_NUMBER         = 0.14.0-dev
+PROJECT_NUMBER         = 0.15.0-dev
 
 # Using the PROJECT_BRIEF tag one can provide an optional one line description
 # for a project that appears at the top of each page and should give viewer a
@@ -833,6 +833,7 @@
 include/grpc++/impl/codegen/sync_stream.h \
 include/grpc++/impl/codegen/time.h \
 include/grpc/impl/codegen/byte_buffer.h \
+include/grpc/impl/codegen/byte_buffer_reader.h \
 include/grpc/impl/codegen/compression_types.h \
 include/grpc/impl/codegen/connectivity_state.h \
 include/grpc/impl/codegen/grpc_types.h \
diff --git a/tools/doxygen/Doxyfile.core b/tools/doxygen/Doxyfile.core
index 034d9c6..eed8425 100644
--- a/tools/doxygen/Doxyfile.core
+++ b/tools/doxygen/Doxyfile.core
@@ -40,7 +40,7 @@
 # could be handy for archiving the generated documentation or if some version
 # control system is used.
 
-PROJECT_NUMBER         = 0.14.0-dev
+PROJECT_NUMBER         = 0.15.0-dev
 
 # Using the PROJECT_BRIEF tag one can provide an optional one line description
 # for a project that appears at the top of each page and should give viewer a
@@ -766,6 +766,7 @@
 include/grpc/grpc.h \
 include/grpc/status.h \
 include/grpc/impl/codegen/byte_buffer.h \
+include/grpc/impl/codegen/byte_buffer_reader.h \
 include/grpc/impl/codegen/compression_types.h \
 include/grpc/impl/codegen/connectivity_state.h \
 include/grpc/impl/codegen/grpc_types.h \
diff --git a/tools/doxygen/Doxyfile.core.internal b/tools/doxygen/Doxyfile.core.internal
index 1b1453f..1fcc1fa 100644
--- a/tools/doxygen/Doxyfile.core.internal
+++ b/tools/doxygen/Doxyfile.core.internal
@@ -40,7 +40,7 @@
 # could be handy for archiving the generated documentation or if some version
 # control system is used.
 
-PROJECT_NUMBER         = 0.14.0-dev
+PROJECT_NUMBER         = 0.15.0-dev
 
 # Using the PROJECT_BRIEF tag one can provide an optional one line description
 # for a project that appears at the top of each page and should give viewer a
@@ -766,6 +766,7 @@
 include/grpc/grpc.h \
 include/grpc/status.h \
 include/grpc/impl/codegen/byte_buffer.h \
+include/grpc/impl/codegen/byte_buffer_reader.h \
 include/grpc/impl/codegen/compression_types.h \
 include/grpc/impl/codegen/connectivity_state.h \
 include/grpc/impl/codegen/grpc_types.h \
diff --git a/tools/gce/linux_performance_worker_init.sh b/tools/gce/linux_performance_worker_init.sh
index 25ac3bc..df29581 100755
--- a/tools/gce/linux_performance_worker_init.sh
+++ b/tools/gce/linux_performance_worker_init.sh
@@ -95,6 +95,9 @@
 touch .profile
 curl -o- https://raw.githubusercontent.com/creationix/nvm/v0.25.4/install.sh | bash
 nvm install 0.12 && npm config set cache /tmp/npm-cache
+nvm install 4 && npm config set cache /tmp/npm-cache
+nvm install 5 && npm config set cache /tmp/npm-cache
+nvm alias default 4
 
 # C# dependencies (http://www.mono-project.com/docs/getting-started/install/linux/#debian-ubuntu-and-derivatives)
 
diff --git a/tools/gcp/stress_test/run_ruby.sh b/tools/gcp/stress_test/run_ruby.sh
new file mode 100755
index 0000000..80d0567
--- /dev/null
+++ b/tools/gcp/stress_test/run_ruby.sh
@@ -0,0 +1,37 @@
+#!/bin/bash
+# Copyright 2015-2016, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# This is a wrapper script that was created to help run_server.py and
+# run_client.py to launch 'node js' stress clients and stress servers
+source /etc/profile.d/rvm.sh
+
+set -ex
+
+$@
diff --git a/tools/jenkins/README.md b/tools/jenkins/README.md
new file mode 100644
index 0000000..8e06b68
--- /dev/null
+++ b/tools/jenkins/README.md
@@ -0,0 +1 @@
+Scripts invoked by Jenkins (our CI platform) to run gRPC test suites.
diff --git a/tools/jenkins/run_fuzzer.sh b/tools/jenkins/run_fuzzer.sh
index 3f25a93..cfa7ace 100755
--- a/tools/jenkins/run_fuzzer.sh
+++ b/tools/jenkins/run_fuzzer.sh
@@ -33,14 +33,14 @@
 set -ex
 
 export RUN_COMMAND="tools/fuzzer/build_and_run_fuzzer.sh $1"
-export DOCKER_RUN_SCRIPT=tools/jenkins/docker_run.sh
+export DOCKER_RUN_SCRIPT=tools/run_tests/dockerize/docker_run.sh
 export DOCKERFILE_DIR=tools/dockerfile/test/fuzzer
 export OUTPUT_DIR=fuzzer_output
 
 runtime=${runtime:-3600}
 jobs=${jobs:-3}
 
-tools/jenkins/build_and_run_docker.sh \
+tools/run_tests/dockerize/build_and_run_docker.sh \
   -e RUN_COMMAND="$RUN_COMMAND" \
   -e OUTPUT_DIR="$OUTPUT_DIR" \
   -e config="$config" \
diff --git a/tools/run_tests/artifact_targets.py b/tools/run_tests/artifact_targets.py
index e61c46d..477bd46 100644
--- a/tools/run_tests/artifact_targets.py
+++ b/tools/run_tests/artifact_targets.py
@@ -43,10 +43,10 @@
   for k,v in environ.iteritems():
     docker_args += ['-e', '%s=%s' % (k, v)]
   docker_env = {'DOCKERFILE_DIR': dockerfile_dir,
-                'DOCKER_RUN_SCRIPT': 'tools/jenkins/docker_run.sh',
+                'DOCKER_RUN_SCRIPT': 'tools/run_tests/dockerize/docker_run.sh',
                 'OUTPUT_DIR': 'artifacts'}
   jobspec = jobset.JobSpec(
-          cmdline=['tools/jenkins/build_and_run_docker.sh'] + docker_args,
+          cmdline=['tools/run_tests/dockerize/build_and_run_docker.sh'] + docker_args,
           environ=docker_env,
           shortname='build_artifact.%s' % (name),
           timeout_seconds=30*60,
@@ -84,12 +84,16 @@
 class PythonArtifact:
   """Builds Python artifacts."""
 
-  def __init__(self, platform, arch):
-    self.name = 'python_%s_%s' % (platform, arch)
+  def __init__(self, platform, arch, manylinux_build=None):
+    if manylinux_build:
+      self.name = 'python_%s_%s_%s' % (platform, arch, manylinux_build)
+    else:
+      self.name = 'python_%s_%s' % (platform, arch)
     self.platform = platform
     self.arch = arch
     self.labels = ['artifact', 'python', platform, arch]
     self.python_version = python_version_arch_map[arch]
+    self.manylinux_build = manylinux_build
 
   def pre_build_jobspecs(self):
       return []
@@ -99,14 +103,56 @@
     if self.platform == 'linux':
       if self.arch == 'x86':
         environ['SETARCH_CMD'] = 'linux32'
+      # Inside the manylinux container, the python installations are located in
+      # special places...
+      environ['PYTHON'] = '/opt/python/{}/bin/python'.format(self.manylinux_build)
+      environ['PIP'] = '/opt/python/{}/bin/pip'.format(self.manylinux_build)
+      # Our docker image has all the prerequisites pip-installed already.
+      environ['SKIP_PIP_INSTALL'] = '1'
+      # Platform autodetection for the manylinux1 image breaks so we set the
+      # defines ourselves.
+      # TODO(atash) get better platform-detection support in core so we don't
+      # need to do this manually...
+      environ['CFLAGS'] = " ".join([
+        '-DGPR_NO_AUTODETECT_PLATFORM',
+        '-DGPR_PLATFORM_STRING=\\"manylinux\\"',
+        '-DGPR_POSIX_CRASH_HANDLER=1',
+        '-DGPR_CPU_LINUX=1',
+        '-DGPR_GCC_ATOMIC=1',
+        '-DGPR_GCC_TLS=1',
+        '-DGPR_LINUX=1',
+        '-DGPR_LINUX_LOG=1',
+        #'-DGPR_LINUX_MULTIPOLL_WITH_EPOLL=1',
+        '-DGPR_POSIX_SOCKET=1',
+        '-DGPR_POSIX_WAKEUP_FD=1',
+        '-DGPR_POSIX_SOCKETADDR=1',
+        #'-DGPR_LINUX_EVENTFD=1',
+        '-DGPR_POSIX_NO_SPECIAL_WAKEUP_FD=1',
+        #'-DGPR_LINUX_SOCKETUTILS=1',
+        '-DGPR_POSIX_SOCKETUTILS=1',
+        '-DGPR_HAVE_UNIX_SOCKET=1',
+        '-DGPR_HAVE_IP_PKTINFO=1',
+        '-DGPR_HAVE_IPV6_RECVPKTINFO=1',
+        '-DGPR_LINUX_ENV=1',
+        '-DGPR_POSIX_FILE=1',
+        '-DGPR_POSIX_TMPFILE=1',
+        '-DGPR_POSIX_STRING=1',
+        '-DGPR_POSIX_SUBPROCESS=1',
+        '-DGPR_POSIX_SYNC=1',
+        '-DGPR_POSIX_TIME=1',
+        '-DGPR_GETPID_IN_UNISTD_H=1',
+        '-DGPR_HAVE_MSG_NOSIGNAL=1',
+        '-DGPR_ARCH_{arch}=1'.format(arch=('32' if self.arch == 'x86' else '64')),
+      ])
       return create_docker_jobspec(self.name,
-          'tools/dockerfile/grpc_artifact_linux_%s' % self.arch,
+          'tools/dockerfile/grpc_artifact_python_manylinux_%s' % self.arch,
           'tools/run_tests/build_artifact_python.sh',
           environ=environ)
     elif self.platform == 'windows':
       return create_jobspec(self.name,
                             ['tools\\run_tests\\build_artifact_python.bat',
-                             self.python_version
+                             self.python_version,
+                             '32' if self.arch == 'x86' else '64'
                             ],
                             shell=True)
     else:
@@ -307,8 +353,10 @@
            for Cls in (CSharpExtArtifact, NodeExtArtifact, ProtocArtifact)
            for platform in ('linux', 'macos', 'windows')
            for arch in ('x86', 'x64')] +
-          [PythonArtifact('linux', 'x86'),
-           PythonArtifact('linux', 'x64'),
+          [PythonArtifact('linux', 'x86', 'cp27-cp27m'),
+           PythonArtifact('linux', 'x86', 'cp27-cp27mu'),
+           PythonArtifact('linux', 'x64', 'cp27-cp27m'),
+           PythonArtifact('linux', 'x64', 'cp27-cp27mu'),
            PythonArtifact('macos', 'x64'),
            PythonArtifact('windows', 'x86'),
            PythonArtifact('windows', 'x64'),
diff --git a/tools/run_tests/build_artifact_python.bat b/tools/run_tests/build_artifact_python.bat
index 023d394..636ae0d 100644
--- a/tools/run_tests/build_artifact_python.bat
+++ b/tools/run_tests/build_artifact_python.bat
@@ -52,8 +52,21 @@
 
 python setup.py bdist_wheel
 
+@rem Build gRPC Python tools
+set PATH=C:\msys64\mingw%2\bin;%PATH%
+set CC=C:\msys64\mingw%2\bin\g++.exe
+set CFLAGS=-fno-wrapv
+python tools\distrib\python\make_grpcio_tools.py
+if %2 == 32 (
+  python tools\distrib\python\grpcio_tools\setup.py build_ext -c mingw32
+) else (
+  python tools\distrib\python\grpcio_tools\setup.py build_ext -c mingw32 -DMS_WIN64
+)
+python tools\distrib\python\grpcio_tools\setup.py bdist_wheel
+
 mkdir artifacts
 xcopy /Y /I /S dist\* artifacts\ || goto :error
+xcopy /Y /I /S tools\distrib\python\grpcio_tools\dist\* artifacts\ || goto :error
 
 goto :EOF
 
diff --git a/tools/run_tests/build_artifact_python.sh b/tools/run_tests/build_artifact_python.sh
index 1f23f9f..35c3a48 100755
--- a/tools/run_tests/build_artifact_python.sh
+++ b/tools/run_tests/build_artifact_python.sh
@@ -32,36 +32,48 @@
 
 cd $(dirname $0)/../..
 
-if [ "$SKIP_PIP_INSTALL" == "" ]
-then
-  pip install --upgrade six
-  # There's a bug in newer versions of setuptools (see
-  # https://bitbucket.org/pypa/setuptools/issues/503/pkg_resources_vendorpackagingrequirementsi)
-  pip install --upgrade 'setuptools==18'
-  pip install -rrequirements.txt
-fi
-
 export GRPC_PYTHON_USE_CUSTOM_BDIST=0
 export GRPC_PYTHON_BUILD_WITH_CYTHON=1
+export PYTHON=${PYTHON:-python}
+export PIP=${PIP:-pip}
+export AUDITWHEEL=${AUDITWHEEL:-auditwheel}
+
+
+if [ "$SKIP_PIP_INSTALL" == "" ]
+then
+  ${PIP} install --upgrade six
+  # There's a bug in newer versions of setuptools (see
+  # https://bitbucket.org/pypa/setuptools/issues/503/pkg_resources_vendorpackagingrequirementsi)
+  ${PIP} pip install --upgrade 'setuptools==18'
+  ${PIP} install -rrequirements.txt
+fi
 
 # Build the source distribution first because MANIFEST.in cannot override
 # exclusion of built shared objects among package resources (for some
 # inexplicable reason).
-${SETARCH_CMD} python setup.py  \
+${SETARCH_CMD} ${PYTHON} setup.py  \
     sdist
 
-# The bdist_wheel_grpc_custom command is finicky about command output ordering
-# and thus ought to be run in a shell command separate of others. Further, it
-# trashes the actual bdist_wheel output, so it should be run first so that
-# bdist_wheel may be run unmolested.
-${SETARCH_CMD} python setup.py  \
-    build_tagged_ext
-
 # Wheel has a bug where directories don't get excluded.
 # https://bitbucket.org/pypa/wheel/issues/99/cannot-exclude-directory
-${SETARCH_CMD} python setup.py  \
+${SETARCH_CMD} ${PYTHON} setup.py  \
     bdist_wheel
 
+# Build gRPC tools package
+${PYTHON} tools/distrib/python/make_grpcio_tools.py
+CFLAGS="$CFLAGS -fno-wrapv" ${SETARCH_CMD} \
+  ${PYTHON} tools/distrib/python/grpcio_tools/setup.py bdist_wheel
+
 mkdir -p artifacts
+if command -v ${AUDITWHEEL}
+then
+  for wheel in dist/*.whl; do
+    ${AUDITWHEEL} repair $wheel -w artifacts/
+  done
+  for wheel in tools/distrib/python/grpcio_tools/dist/*.whl; do
+    ${AUDITWHEEL} repair $wheel -w artifacts/
+  done
+fi
 
 cp -r dist/* artifacts
+cp -r tools/distrib/python/grpcio_tools/dist/* artifacts
diff --git a/tools/run_tests/distribtest_targets.py b/tools/run_tests/distribtest_targets.py
index 34cc1cd..ae918be 100644
--- a/tools/run_tests/distribtest_targets.py
+++ b/tools/run_tests/distribtest_targets.py
@@ -44,9 +44,9 @@
   for k,v in environ.iteritems():
     docker_args += ['-e', '%s=%s' % (k, v)]
   docker_env = {'DOCKERFILE_DIR': dockerfile_dir,
-                'DOCKER_RUN_SCRIPT': 'tools/jenkins/docker_run.sh'}
+                'DOCKER_RUN_SCRIPT': 'tools/run_tests/dockerize/docker_run.sh'}
   jobspec = jobset.JobSpec(
-          cmdline=['tools/jenkins/build_and_run_docker.sh'] + docker_args,
+          cmdline=['tools/run_tests/dockerize/build_and_run_docker.sh'] + docker_args,
           environ=docker_env,
           shortname='distribtest.%s' % (name),
           timeout_seconds=30*60,
diff --git a/tools/jenkins/build_and_run_docker.sh b/tools/run_tests/dockerize/build_and_run_docker.sh
similarity index 98%
rename from tools/jenkins/build_and_run_docker.sh
rename to tools/run_tests/dockerize/build_and_run_docker.sh
index 92dbbc6..1ef34b2 100755
--- a/tools/jenkins/build_and_run_docker.sh
+++ b/tools/run_tests/dockerize/build_and_run_docker.sh
@@ -33,7 +33,7 @@
 
 set -ex
 
-cd $(dirname $0)/../..
+cd $(dirname $0)/../../..
 git_root=$(pwd)
 cd -
 
diff --git a/tools/jenkins/build_docker_and_run_tests.sh b/tools/run_tests/dockerize/build_docker_and_run_tests.sh
similarity index 98%
rename from tools/jenkins/build_docker_and_run_tests.sh
rename to tools/run_tests/dockerize/build_docker_and_run_tests.sh
index 5779e63..c2ea6f2 100755
--- a/tools/jenkins/build_docker_and_run_tests.sh
+++ b/tools/run_tests/dockerize/build_docker_and_run_tests.sh
@@ -33,7 +33,7 @@
 
 set -ex
 
-cd $(dirname $0)/../..
+cd $(dirname $0)/../../..
 git_root=$(pwd)
 cd -
 
diff --git a/tools/jenkins/build_interop_image.sh b/tools/run_tests/dockerize/build_interop_image.sh
similarity index 98%
rename from tools/jenkins/build_interop_image.sh
rename to tools/run_tests/dockerize/build_interop_image.sh
index d2ba97c..48a216a 100755
--- a/tools/jenkins/build_interop_image.sh
+++ b/tools/run_tests/dockerize/build_interop_image.sh
@@ -40,7 +40,7 @@
 #  BUILD_INTEROP_DOCKER_EXTRA_ARGS - optional args to be passed to the
 #    docker run command
 
-cd `dirname $0`/../..
+cd `dirname $0`/../../..
 GRPC_ROOT=`pwd`
 MOUNT_ARGS="-v $GRPC_ROOT:/var/local/jenkins/grpc:ro"
 
diff --git a/tools/jenkins/build_interop_stress_image.sh b/tools/run_tests/dockerize/build_interop_stress_image.sh
similarity index 98%
rename from tools/jenkins/build_interop_stress_image.sh
rename to tools/run_tests/dockerize/build_interop_stress_image.sh
index 31ffa75..4407c8d 100755
--- a/tools/jenkins/build_interop_stress_image.sh
+++ b/tools/run_tests/dockerize/build_interop_stress_image.sh
@@ -44,7 +44,7 @@
 #  BUILD_INTEROP_DOCKER_EXTRA_ARGS - optional args to be passed to the
 #    docker run command
 
-cd `dirname $0`/../..
+cd `dirname $0`/../../..
 GRPC_ROOT=`pwd`
 MOUNT_ARGS="-v $GRPC_ROOT:/var/local/jenkins/grpc:ro"
 
diff --git a/tools/jenkins/docker_run.sh b/tools/run_tests/dockerize/docker_run.sh
similarity index 100%
rename from tools/jenkins/docker_run.sh
rename to tools/run_tests/dockerize/docker_run.sh
diff --git a/tools/jenkins/docker_run_tests.sh b/tools/run_tests/dockerize/docker_run_tests.sh
similarity index 100%
rename from tools/jenkins/docker_run_tests.sh
rename to tools/run_tests/dockerize/docker_run_tests.sh
diff --git a/tools/run_tests/package_targets.py b/tools/run_tests/package_targets.py
index 87bc486..820b539 100644
--- a/tools/run_tests/package_targets.py
+++ b/tools/run_tests/package_targets.py
@@ -42,10 +42,10 @@
   for k,v in environ.iteritems():
     docker_args += ['-e', '%s=%s' % (k, v)]
   docker_env = {'DOCKERFILE_DIR': dockerfile_dir,
-                'DOCKER_RUN_SCRIPT': 'tools/jenkins/docker_run.sh',
+                'DOCKER_RUN_SCRIPT': 'tools/run_tests/dockerize/docker_run.sh',
                 'OUTPUT_DIR': 'artifacts'}
   jobspec = jobset.JobSpec(
-          cmdline=['tools/jenkins/build_and_run_docker.sh'] + docker_args,
+          cmdline=['tools/run_tests/dockerize/build_and_run_docker.sh'] + docker_args,
           environ=docker_env,
           shortname='build_package.%s' % (name),
           timeout_seconds=30*60,
diff --git a/tools/run_tests/performance/run_worker_node.sh b/tools/run_tests/performance/run_worker_node.sh
index 46b6ff0..9a53a31 100755
--- a/tools/run_tests/performance/run_worker_node.sh
+++ b/tools/run_tests/performance/run_worker_node.sh
@@ -29,7 +29,7 @@
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 source ~/.nvm/nvm.sh
-nvm use 0.12
+nvm use 4
 
 set -ex
 
diff --git a/tools/run_tests/run_interop_tests.py b/tools/run_tests/run_interop_tests.py
index 758be93..e813473 100755
--- a/tools/run_tests/run_interop_tests.py
+++ b/tools/run_tests/run_interop_tests.py
@@ -542,7 +542,7 @@
     env['BUILD_INTEROP_DOCKER_EXTRA_ARGS'] = \
       '-v %s:/root/.composer/auth.json:ro' % host_file
   build_job = jobset.JobSpec(
-          cmdline=['tools/jenkins/build_interop_image.sh'],
+          cmdline=['tools/run_tests/dockerize/build_interop_image.sh'],
           environ=env,
           shortname='build_docker_%s' % (language),
           timeout_seconds=30*60)
diff --git a/tools/run_tests/run_stress_tests.py b/tools/run_tests/run_stress_tests.py
index 0ba8f51..e42ee24 100755
--- a/tools/run_tests/run_stress_tests.py
+++ b/tools/run_tests/run_stress_tests.py
@@ -195,7 +195,7 @@
     tag = 'grpc_interop_stress_%s:%s' % (language.safename, uuid.uuid4())
   env = {'INTEROP_IMAGE': tag,
          'BASE_NAME': 'grpc_interop_stress_%s' % language.safename}
-  build_job = jobset.JobSpec(cmdline=['tools/jenkins/build_interop_stress_image.sh'],
+  build_job = jobset.JobSpec(cmdline=['tools/run_tests/dockerize/build_interop_stress_image.sh'],
                              environ=env,
                              shortname='build_docker_%s' % (language),
                              timeout_seconds=30 * 60)
diff --git a/tools/run_tests/run_tests.py b/tools/run_tests/run_tests.py
index dea481e..37291f4 100755
--- a/tools/run_tests/run_tests.py
+++ b/tools/run_tests/run_tests.py
@@ -272,12 +272,17 @@
 
   def __init__(self):
     self.platform = platform_string()
-    self.node_version = '0.12'
 
   def configure(self, config, args):
     self.config = config
     self.args = args
-    _check_compiler(self.args.compiler, ['default'])
+    _check_compiler(self.args.compiler, ['default', 'node0.12',
+                                         'node4', 'node5'])
+    if self.args.compiler == 'default':
+      self.node_version = '4'
+    else:
+      # Take off the word "node"
+      self.node_version = self.args.compiler[4:]
 
   def test_specs(self):
     if self.platform == 'windows':
@@ -802,7 +807,8 @@
                            'gcc4.4', 'gcc4.9', 'gcc5.3',
                            'clang3.4', 'clang3.6',
                            'vs2010', 'vs2013', 'vs2015',
-                           'python2.7', 'python3.4'],
+                           'python2.7', 'python3.4',
+                           'node0.12', 'node4', 'node5'],
                   default='default',
                   help='Selects compiler to use. Allowed values depend on the platform and language.')
 argp.add_argument('--build_only',
@@ -906,13 +912,13 @@
   env = os.environ.copy()
   env['RUN_TESTS_COMMAND'] = run_tests_cmd
   env['DOCKERFILE_DIR'] = dockerfile_dir
-  env['DOCKER_RUN_SCRIPT'] = 'tools/jenkins/docker_run_tests.sh'
+  env['DOCKER_RUN_SCRIPT'] = 'tools/run_tests/dockerize/docker_run_tests.sh'
   if args.xml_report:
     env['XML_REPORT'] = args.xml_report
   if not args.travis:
     env['TTY_FLAG'] = '-t'  # enables Ctrl-C when not on Jenkins.
 
-  subprocess.check_call(['tools/jenkins/build_docker_and_run_tests.sh'],
+  subprocess.check_call(['tools/run_tests/dockerize/build_docker_and_run_tests.sh'],
                         shell=True,
                         env=env)
   sys.exit(0)
diff --git a/tools/run_tests/sanity/sanity_tests.yaml b/tools/run_tests/sanity/sanity_tests.yaml
index efc21e6..c5945c6 100644
--- a/tools/run_tests/sanity/sanity_tests.yaml
+++ b/tools/run_tests/sanity/sanity_tests.yaml
@@ -10,3 +10,4 @@
 - script: tools/distrib/check_trailing_newlines.sh
 - script: tools/distrib/check_nanopb_output.sh
 - script: tools/distrib/check_include_guards.py
+- script: tools/distrib/python/check_grpcio_tools.py
diff --git a/tools/run_tests/sources_and_headers.json b/tools/run_tests/sources_and_headers.json
index f546f3b..3b3a49a 100644
--- a/tools/run_tests/sources_and_headers.json
+++ b/tools/run_tests/sources_and_headers.json
@@ -5903,6 +5903,7 @@
     ], 
     "headers": [
       "include/grpc/impl/codegen/byte_buffer.h", 
+      "include/grpc/impl/codegen/byte_buffer_reader.h", 
       "include/grpc/impl/codegen/compression_types.h", 
       "include/grpc/impl/codegen/connectivity_state.h", 
       "include/grpc/impl/codegen/grpc_types.h", 
@@ -5913,6 +5914,7 @@
     "name": "grpc_codegen", 
     "src": [
       "include/grpc/impl/codegen/byte_buffer.h", 
+      "include/grpc/impl/codegen/byte_buffer_reader.h", 
       "include/grpc/impl/codegen/compression_types.h", 
       "include/grpc/impl/codegen/connectivity_state.h", 
       "include/grpc/impl/codegen/grpc_types.h", 
diff --git a/tools/run_tests/stress_test/configs/asan.json b/tools/run_tests/stress_test/configs/asan.json
index cb9f557..7ae11cc 100644
--- a/tools/run_tests/stress_test/configs/asan.json
+++ b/tools/run_tests/stress_test/configs/asan.json
@@ -1,7 +1,7 @@
 {
   "dockerImages": {
     "grpc_stress_cxx_asan" : {
-      "buildScript": "tools/jenkins/build_interop_stress_image.sh",
+      "buildScript": "tools/run_tests/dockerize/build_interop_stress_image.sh",
       "dockerFileDir": "grpc_interop_stress_cxx",
       "buildType": "asan"
     }
diff --git a/tools/run_tests/stress_test/configs/csharp.json b/tools/run_tests/stress_test/configs/csharp.json
index b709069..587e155 100644
--- a/tools/run_tests/stress_test/configs/csharp.json
+++ b/tools/run_tests/stress_test/configs/csharp.json
@@ -1,7 +1,7 @@
 {
   "dockerImages": {
     "grpc_stress_csharp" : {
-      "buildScript": "tools/jenkins/build_interop_stress_image.sh",
+      "buildScript": "tools/run_tests/dockerize/build_interop_stress_image.sh",
       "dockerFileDir": "grpc_interop_stress_csharp"
     }
   },
@@ -80,7 +80,7 @@
     "buildDockerImages": true,
     "pollIntervalSecs": 60,
     "testDurationSecs": 7200,
-    "kubernetesProxyPort": 8001,
+    "kubernetesProxyPort": 8009,
     "datasetIdNamePrefix": "stress_test_csharp",
     "summaryTableId": "summary",
     "qpsTableId": "qps",
diff --git a/tools/run_tests/stress_test/configs/go.json b/tools/run_tests/stress_test/configs/go.json
index 36b465e..f1b2b52 100644
--- a/tools/run_tests/stress_test/configs/go.json
+++ b/tools/run_tests/stress_test/configs/go.json
@@ -1,7 +1,7 @@
 {
   "dockerImages": {
     "grpc_stress_go" : {
-      "buildScript": "tools/jenkins/build_interop_stress_image.sh",
+      "buildScript": "tools/run_tests/dockerize/build_interop_stress_image.sh",
       "dockerFileDir": "grpc_interop_stress_go"
     }
   },
diff --git a/tools/run_tests/stress_test/configs/java.json b/tools/run_tests/stress_test/configs/java.json
index 275384c..2ce6c00 100644
--- a/tools/run_tests/stress_test/configs/java.json
+++ b/tools/run_tests/stress_test/configs/java.json
@@ -1,7 +1,7 @@
 {
   "dockerImages": {
     "grpc_stress_java" : {
-      "buildScript": "tools/jenkins/build_interop_stress_image.sh",
+      "buildScript": "tools/run_tests/dockerize/build_interop_stress_image.sh",
       "dockerFileDir": "grpc_interop_stress_java"
     }
   },
diff --git a/tools/run_tests/stress_test/configs/node-cxx.json b/tools/run_tests/stress_test/configs/node-cxx.json
index c4245bf..094c123 100644
--- a/tools/run_tests/stress_test/configs/node-cxx.json
+++ b/tools/run_tests/stress_test/configs/node-cxx.json
@@ -1,12 +1,12 @@
 {
   "dockerImages": {
     "grpc_stress_cxx_opt" : {
-      "buildScript": "tools/jenkins/build_interop_stress_image.sh",
+      "buildScript": "tools/run_tests/dockerize/build_interop_stress_image.sh",
       "dockerFileDir": "grpc_interop_stress_cxx",
       "buildType": "opt"
     },
    "grpc_stress_node": {
-     "buildScript": "tools/jenkins/build_interop_stress_image.sh",
+     "buildScript": "tools/run_tests/dockerize/build_interop_stress_image.sh",
      "dockerFileDir": "grpc_interop_stress_node"
    }
   },
diff --git a/tools/run_tests/stress_test/configs/node.json b/tools/run_tests/stress_test/configs/node.json
index 7a48c56..85eb9e0 100644
--- a/tools/run_tests/stress_test/configs/node.json
+++ b/tools/run_tests/stress_test/configs/node.json
@@ -1,7 +1,7 @@
 {
   "dockerImages": {
     "grpc_stress_node" : {
-      "buildScript": "tools/jenkins/build_interop_stress_image.sh",
+      "buildScript": "tools/run_tests/dockerize/build_interop_stress_image.sh",
       "dockerFileDir": "grpc_interop_stress_node"
     }
   },
diff --git a/tools/run_tests/stress_test/configs/opt-tsan-asan.json b/tools/run_tests/stress_test/configs/opt-tsan-asan.json
index 936d151..fcb3678 100644
--- a/tools/run_tests/stress_test/configs/opt-tsan-asan.json
+++ b/tools/run_tests/stress_test/configs/opt-tsan-asan.json
@@ -1,17 +1,17 @@
 {
   "dockerImages": {
     "grpc_stress_cxx_opt" : {
-      "buildScript": "tools/jenkins/build_interop_stress_image.sh",
+      "buildScript": "tools/run_tests/dockerize/build_interop_stress_image.sh",
       "dockerFileDir": "grpc_interop_stress_cxx",
       "buildType": "opt"
     },
     "grpc_stress_cxx_tsan": {
-      "buildScript": "tools/jenkins/build_interop_stress_image.sh",
+      "buildScript": "tools/run_tests/dockerize/build_interop_stress_image.sh",
       "dockerFileDir": "grpc_interop_stress_cxx",
       "buildType": "tsan"
     },
     "grpc_stress_cxx_asan": {
-      "buildScript": "tools/jenkins/build_interop_stress_image.sh",
+      "buildScript": "tools/run_tests/dockerize/build_interop_stress_image.sh",
       "dockerFileDir": "grpc_interop_stress_cxx",
       "buildType": "asan"
     }
diff --git a/tools/run_tests/stress_test/configs/opt.json b/tools/run_tests/stress_test/configs/opt.json
index f45b824..5e0e930 100644
--- a/tools/run_tests/stress_test/configs/opt.json
+++ b/tools/run_tests/stress_test/configs/opt.json
@@ -1,7 +1,7 @@
 {
   "dockerImages": {
     "grpc_stress_cxx_opt" : {
-      "buildScript": "tools/jenkins/build_interop_stress_image.sh",
+      "buildScript": "tools/run_tests/dockerize/build_interop_stress_image.sh",
       "dockerFileDir": "grpc_interop_stress_cxx",
       "buildType": "opt"
     }
diff --git a/tools/run_tests/stress_test/configs/python.json b/tools/run_tests/stress_test/configs/python.json
new file mode 100644
index 0000000..4f85de1
--- /dev/null
+++ b/tools/run_tests/stress_test/configs/python.json
@@ -0,0 +1,98 @@
+{
+  "dockerImages": {
+    "grpc_stress_python" : {
+      "buildScript": "tools/run_tests/dockerize/build_interop_stress_image.sh",
+      "dockerFileDir": "grpc_interop_stress_python"
+    }
+  },
+
+  "clientTemplates": {
+    "baseTemplates": {
+      "default": {
+        "wrapperScriptPath": "/var/local/git/grpc/tools/gcp/stress_test/run_client.py",
+        "pollIntervalSecs": 60,
+        "clientArgs": {
+          "num_channels_per_server":5,
+          "num_stubs_per_channel":10,
+          "test_cases": "empty_unary:1,large_unary:1,client_streaming:1,server_streaming:1,empty_stream:1",
+          "metrics_port": 8081
+        },
+        "metricsPort": 8081,
+        "metricsArgs": {
+          "metrics_server_address": "localhost:8081",
+          "total_only": "true"
+        },
+		"env": {
+          "PYTHONPATH": "/var/local/git/grpc/src/python/gens:/var/local/git/grpc/src/python/grpcio",
+          "LD_LIBRARY_PATH":"/var/local/git/grpc/libs/opt"
+        }
+      }
+    },
+    "templates": {
+      "python_client": {
+        "baseTemplate": "default",
+        "stressClientCmd": [
+          "python",
+          "/var/local/git/grpc/src/python/grpcio/tests/stress/client.py"
+        ],
+        "metricsClientCmd": ["/var/local/git/grpc/bins/opt/metrics_client"]
+      }
+    }
+  },
+
+  "serverTemplates": {
+    "baseTemplates":{
+      "default": {
+        "wrapperScriptPath": "/var/local/git/grpc/tools/gcp/stress_test/run_server.py",
+        "serverPort": 8080,
+        "serverArgs": {
+          "port": 8080
+        },
+        "env": {
+          "PYTHONPATH": "/var/local/git/grpc/src/python/gens:/var/local/git/grpc/src/python/grpcio",
+          "LD_LIBRARY_PATH":"/var/local/git/grpc/libs/opt"
+        }
+      }
+    },
+    "templates": {
+      "python_server": {
+        "baseTemplate": "default",
+        "stressServerCmd": [
+          "python",
+          "/var/local/git/grpc/src/python/grpcio/tests/interop/server.py"
+        ]
+      }
+    }
+  },
+
+  "testMatrix": {
+    "serverPodSpecs": {
+      "python-stress-server": {
+        "serverTemplate": "python_server",
+        "dockerImage": "grpc_stress_python",
+        "numInstances": 1
+      }
+    },
+
+    "clientPodSpecs": {
+      "python-stress-client": {
+        "clientTemplate": "python_client",
+        "dockerImage": "grpc_stress_python",
+        "numInstances": 5,
+        "serverPodSpec": "python-stress-server"
+      }
+    }
+  },
+
+  "globalSettings": {
+    "buildDockerImages": true,
+    "pollIntervalSecs": 60,
+    "testDurationSecs": 7200,
+    "kubernetesProxyPort": 8011,
+    "datasetIdNamePrefix": "stress_test_python",
+    "summaryTableId": "summary",
+    "qpsTableId": "qps",
+    "podWarmupSecs": 60
+  }
+}
+
diff --git a/tools/run_tests/stress_test/configs/ruby.json b/tools/run_tests/stress_test/configs/ruby.json
new file mode 100644
index 0000000..7e2afcb
--- /dev/null
+++ b/tools/run_tests/stress_test/configs/ruby.json
@@ -0,0 +1,92 @@
+{
+  "dockerImages": {
+    "grpc_stress_ruby" : {
+      "buildScript": "tools/run_tests/dockerize/build_interop_stress_image.sh",
+      "dockerFileDir": "grpc_interop_stress_ruby"
+    }
+  },
+
+  "clientTemplates": {
+    "baseTemplates": {
+      "default": {
+        "wrapperScriptPath": "/var/local/git/grpc/tools/gcp/stress_test/run_client.py",
+        "pollIntervalSecs": 60,
+        "clientArgs": {
+          "num_channels_per_server":5,
+          "num_stubs_per_channel":10,
+          "test_cases": "empty_unary:1,large_unary:1,client_streaming:1,server_streaming:1,empty_stream:1",
+          "metrics_port": 8081
+        },
+        "metricsPort": 8081,
+        "metricsArgs": {
+          "metrics_server_address": "localhost:8081",
+          "total_only": "true"
+        }
+      }
+    },
+    "templates": {
+      "ruby_client": {
+        "baseTemplate": "default",
+        "stressClientCmd": [
+          "/var/local/git/grpc/tools/gcp/stress_test/run_ruby.sh",
+          "ruby",
+          "/var/local/git/grpc/src/ruby/stress/stress_client.rb"
+        ],
+        "metricsClientCmd": ["/var/local/git/grpc/bins/opt/metrics_client"]
+      }
+    }
+  },
+
+  "serverTemplates": {
+    "baseTemplates":{
+      "default": {
+        "wrapperScriptPath": "/var/local/git/grpc/tools/gcp/stress_test/run_server.py",
+        "serverPort": 8080,
+        "serverArgs": {
+          "port": 8080
+        }
+      }
+    },
+    "templates": {
+      "ruby_server": {
+        "baseTemplate": "default",
+        "stressServerCmd": [
+          "/var/local/git/grpc/tools/gcp/stress_test/run_ruby.sh",
+          "ruby",
+          "/var/local/git/grpc/src/ruby/pb/test/server.rb"
+        ]
+      }
+    }
+  },
+
+  "testMatrix": {
+    "serverPodSpecs": {
+      "stress-server-ruby": {
+        "serverTemplate": "ruby_server",
+        "dockerImage": "grpc_stress_ruby",
+        "numInstances": 1
+      }
+    },
+
+    "clientPodSpecs": {
+      "stress-client-ruby": {
+        "clientTemplate": "ruby_client",
+        "dockerImage": "grpc_stress_ruby",
+        "numInstances": 10,
+        "serverPodSpec": "stress-server-ruby"
+      }
+    }
+  },
+
+  "globalSettings": {
+    "buildDockerImages": true,
+    "pollIntervalSecs": 60,
+    "testDurationSecs": 7200,
+    "kubernetesProxyPort": 8001,
+    "datasetIdNamePrefix": "stress_test_ruby",
+    "summaryTableId": "summary",
+    "qpsTableId": "qps",
+    "podWarmupSecs": 60
+  }
+}
+
diff --git a/tools/run_tests/stress_test/configs/tsan.json b/tools/run_tests/stress_test/configs/tsan.json
index 6ef3bdf..abc759c 100644
--- a/tools/run_tests/stress_test/configs/tsan.json
+++ b/tools/run_tests/stress_test/configs/tsan.json
@@ -1,7 +1,7 @@
 {
   "dockerImages": {
     "grpc_stress_cxx_tsan" : {
-      "buildScript": "tools/jenkins/build_interop_stress_image.sh",
+      "buildScript": "tools/run_tests/dockerize/build_interop_stress_image.sh",
       "dockerFileDir": "grpc_interop_stress_cxx",
       "buildType": "tsan"
     }
diff --git a/tools/run_tests/stress_test/run_on_gke.py b/tools/run_tests/stress_test/run_on_gke.py
index d4f1c4a..583e583 100755
--- a/tools/run_tests/stress_test/run_on_gke.py
+++ b/tools/run_tests/stress_test/run_on_gke.py
@@ -69,7 +69,7 @@
 
   def __init__(self, name, stress_client_cmd, metrics_client_cmd, metrics_port,
                wrapper_script_path, poll_interval_secs, client_args_dict,
-               metrics_args_dict, will_run_forever):
+               metrics_args_dict, will_run_forever, env_dict):
     self.name = name
     self.stress_client_cmd = stress_client_cmd
     self.metrics_client_cmd = metrics_client_cmd
@@ -79,19 +79,21 @@
     self.client_args_dict = client_args_dict
     self.metrics_args_dict = metrics_args_dict
     self.will_run_forever = will_run_forever
+    self.env_dict = env_dict
 
 
 class ServerTemplate:
   """ Contains all the common settings used by a stress server """
 
   def __init__(self, name, server_cmd, wrapper_script_path, server_port,
-               server_args_dict, will_run_forever):
+               server_args_dict, will_run_forever, env_dict):
     self.name = name
     self.server_cmd = server_cmd
     self.wrapper_script_path = wrapper_script_path
     self.server_port = server_port
     self.server_args_dict = server_args_dict
     self.will_run_forever = will_run_forever
+    self.env_dict = env_dict
 
 
 class DockerImage:
@@ -240,6 +242,7 @@
     # server_pod_spec.template.wrapper_script_path) are are injected into the
     # container via environment variables
     server_env = self.gke_env.copy()
+    server_env.update(server_pod_spec.template.env_dict)
     server_env.update({
         'STRESS_TEST_IMAGE_TYPE': 'SERVER',
         'STRESS_TEST_CMD': server_pod_spec.template.server_cmd,
@@ -283,6 +286,7 @@
     # client_pod_spec.template.wrapper_script_path) are are injected into the
     # container via environment variables
     client_env = self.gke_env.copy()
+    client_env.update(client_pod_spec.template.env_dict)
     client_env.update({
         'STRESS_TEST_IMAGE_TYPE': 'CLIENT',
         'STRESS_TEST_CMD': client_pod_spec.template.stress_client_cmd,
@@ -425,7 +429,8 @@
           template_name, stress_client_cmd, metrics_client_cmd,
           temp_dict['metricsPort'], temp_dict['wrapperScriptPath'],
           temp_dict['pollIntervalSecs'], temp_dict['clientArgs'].copy(),
-          temp_dict['metricsArgs'].copy(), temp_dict.get('willRunForever', 1))
+          temp_dict['metricsArgs'].copy(), temp_dict.get('willRunForever', 1),
+          temp_dict.get('env', {}).copy())
 
     return client_templates_dict
 
@@ -461,7 +466,7 @@
       server_templates_dict[template_name] = ServerTemplate(
           template_name, stress_server_cmd, temp_dict['wrapperScriptPath'],
           temp_dict['serverPort'], temp_dict['serverArgs'].copy(),
-          temp_dict.get('willRunForever', 1))
+          temp_dict.get('willRunForever', 1), temp_dict.get('env', {}).copy())
 
     return server_templates_dict
 
diff --git a/vsprojects/vcxproj/grpc++/grpc++.vcxproj b/vsprojects/vcxproj/grpc++/grpc++.vcxproj
index 29cab37..0ec53ac 100644
--- a/vsprojects/vcxproj/grpc++/grpc++.vcxproj
+++ b/vsprojects/vcxproj/grpc++/grpc++.vcxproj
@@ -331,6 +331,7 @@
     <ClInclude Include="$(SolutionDir)\..\include\grpc++\impl\codegen\sync_stream.h" />
     <ClInclude Include="$(SolutionDir)\..\include\grpc++\impl\codegen\time.h" />
     <ClInclude Include="$(SolutionDir)\..\include\grpc\impl\codegen\byte_buffer.h" />
+    <ClInclude Include="$(SolutionDir)\..\include\grpc\impl\codegen\byte_buffer_reader.h" />
     <ClInclude Include="$(SolutionDir)\..\include\grpc\impl\codegen\compression_types.h" />
     <ClInclude Include="$(SolutionDir)\..\include\grpc\impl\codegen\connectivity_state.h" />
     <ClInclude Include="$(SolutionDir)\..\include\grpc\impl\codegen\grpc_types.h" />
diff --git a/vsprojects/vcxproj/grpc++/grpc++.vcxproj.filters b/vsprojects/vcxproj/grpc++/grpc++.vcxproj.filters
index 15e2807..491aeae 100644
--- a/vsprojects/vcxproj/grpc++/grpc++.vcxproj.filters
+++ b/vsprojects/vcxproj/grpc++/grpc++.vcxproj.filters
@@ -315,6 +315,9 @@
     <ClInclude Include="$(SolutionDir)\..\include\grpc\impl\codegen\byte_buffer.h">
       <Filter>include\grpc\impl\codegen</Filter>
     </ClInclude>
+    <ClInclude Include="$(SolutionDir)\..\include\grpc\impl\codegen\byte_buffer_reader.h">
+      <Filter>include\grpc\impl\codegen</Filter>
+    </ClInclude>
     <ClInclude Include="$(SolutionDir)\..\include\grpc\impl\codegen\compression_types.h">
       <Filter>include\grpc\impl\codegen</Filter>
     </ClInclude>
diff --git a/vsprojects/vcxproj/grpc++_unsecure/grpc++_unsecure.vcxproj b/vsprojects/vcxproj/grpc++_unsecure/grpc++_unsecure.vcxproj
index fcda361..96bee41 100644
--- a/vsprojects/vcxproj/grpc++_unsecure/grpc++_unsecure.vcxproj
+++ b/vsprojects/vcxproj/grpc++_unsecure/grpc++_unsecure.vcxproj
@@ -331,6 +331,7 @@
     <ClInclude Include="$(SolutionDir)\..\include\grpc++\impl\codegen\sync_stream.h" />
     <ClInclude Include="$(SolutionDir)\..\include\grpc++\impl\codegen\time.h" />
     <ClInclude Include="$(SolutionDir)\..\include\grpc\impl\codegen\byte_buffer.h" />
+    <ClInclude Include="$(SolutionDir)\..\include\grpc\impl\codegen\byte_buffer_reader.h" />
     <ClInclude Include="$(SolutionDir)\..\include\grpc\impl\codegen\compression_types.h" />
     <ClInclude Include="$(SolutionDir)\..\include\grpc\impl\codegen\connectivity_state.h" />
     <ClInclude Include="$(SolutionDir)\..\include\grpc\impl\codegen\grpc_types.h" />
diff --git a/vsprojects/vcxproj/grpc++_unsecure/grpc++_unsecure.vcxproj.filters b/vsprojects/vcxproj/grpc++_unsecure/grpc++_unsecure.vcxproj.filters
index 1dc95f9..fe9eed7 100644
--- a/vsprojects/vcxproj/grpc++_unsecure/grpc++_unsecure.vcxproj.filters
+++ b/vsprojects/vcxproj/grpc++_unsecure/grpc++_unsecure.vcxproj.filters
@@ -300,6 +300,9 @@
     <ClInclude Include="$(SolutionDir)\..\include\grpc\impl\codegen\byte_buffer.h">
       <Filter>include\grpc\impl\codegen</Filter>
     </ClInclude>
+    <ClInclude Include="$(SolutionDir)\..\include\grpc\impl\codegen\byte_buffer_reader.h">
+      <Filter>include\grpc\impl\codegen</Filter>
+    </ClInclude>
     <ClInclude Include="$(SolutionDir)\..\include\grpc\impl\codegen\compression_types.h">
       <Filter>include\grpc\impl\codegen</Filter>
     </ClInclude>
diff --git a/vsprojects/vcxproj/grpc/grpc.vcxproj b/vsprojects/vcxproj/grpc/grpc.vcxproj
index 4eec05a..03f4eaa 100644
--- a/vsprojects/vcxproj/grpc/grpc.vcxproj
+++ b/vsprojects/vcxproj/grpc/grpc.vcxproj
@@ -273,6 +273,7 @@
     <ClInclude Include="$(SolutionDir)\..\include\grpc\grpc.h" />
     <ClInclude Include="$(SolutionDir)\..\include\grpc\status.h" />
     <ClInclude Include="$(SolutionDir)\..\include\grpc\impl\codegen\byte_buffer.h" />
+    <ClInclude Include="$(SolutionDir)\..\include\grpc\impl\codegen\byte_buffer_reader.h" />
     <ClInclude Include="$(SolutionDir)\..\include\grpc\impl\codegen\compression_types.h" />
     <ClInclude Include="$(SolutionDir)\..\include\grpc\impl\codegen\connectivity_state.h" />
     <ClInclude Include="$(SolutionDir)\..\include\grpc\impl\codegen\grpc_types.h" />
diff --git a/vsprojects/vcxproj/grpc/grpc.vcxproj.filters b/vsprojects/vcxproj/grpc/grpc.vcxproj.filters
index 17c88c4..4617e3d 100644
--- a/vsprojects/vcxproj/grpc/grpc.vcxproj.filters
+++ b/vsprojects/vcxproj/grpc/grpc.vcxproj.filters
@@ -516,6 +516,9 @@
     <ClInclude Include="$(SolutionDir)\..\include\grpc\impl\codegen\byte_buffer.h">
       <Filter>include\grpc\impl\codegen</Filter>
     </ClInclude>
+    <ClInclude Include="$(SolutionDir)\..\include\grpc\impl\codegen\byte_buffer_reader.h">
+      <Filter>include\grpc\impl\codegen</Filter>
+    </ClInclude>
     <ClInclude Include="$(SolutionDir)\..\include\grpc\impl\codegen\compression_types.h">
       <Filter>include\grpc\impl\codegen</Filter>
     </ClInclude>
diff --git a/vsprojects/vcxproj/grpc_unsecure/grpc_unsecure.vcxproj b/vsprojects/vcxproj/grpc_unsecure/grpc_unsecure.vcxproj
index 26050dc..0eb6535 100644
--- a/vsprojects/vcxproj/grpc_unsecure/grpc_unsecure.vcxproj
+++ b/vsprojects/vcxproj/grpc_unsecure/grpc_unsecure.vcxproj
@@ -264,6 +264,7 @@
     <ClInclude Include="$(SolutionDir)\..\include\grpc\grpc.h" />
     <ClInclude Include="$(SolutionDir)\..\include\grpc\status.h" />
     <ClInclude Include="$(SolutionDir)\..\include\grpc\impl\codegen\byte_buffer.h" />
+    <ClInclude Include="$(SolutionDir)\..\include\grpc\impl\codegen\byte_buffer_reader.h" />
     <ClInclude Include="$(SolutionDir)\..\include\grpc\impl\codegen\compression_types.h" />
     <ClInclude Include="$(SolutionDir)\..\include\grpc\impl\codegen\connectivity_state.h" />
     <ClInclude Include="$(SolutionDir)\..\include\grpc\impl\codegen\grpc_types.h" />
diff --git a/vsprojects/vcxproj/grpc_unsecure/grpc_unsecure.vcxproj.filters b/vsprojects/vcxproj/grpc_unsecure/grpc_unsecure.vcxproj.filters
index a4acf51..f544fe6 100644
--- a/vsprojects/vcxproj/grpc_unsecure/grpc_unsecure.vcxproj.filters
+++ b/vsprojects/vcxproj/grpc_unsecure/grpc_unsecure.vcxproj.filters
@@ -456,6 +456,9 @@
     <ClInclude Include="$(SolutionDir)\..\include\grpc\impl\codegen\byte_buffer.h">
       <Filter>include\grpc\impl\codegen</Filter>
     </ClInclude>
+    <ClInclude Include="$(SolutionDir)\..\include\grpc\impl\codegen\byte_buffer_reader.h">
+      <Filter>include\grpc\impl\codegen</Filter>
+    </ClInclude>
     <ClInclude Include="$(SolutionDir)\..\include\grpc\impl\codegen\compression_types.h">
       <Filter>include\grpc\impl\codegen</Filter>
     </ClInclude>
diff --git a/vsprojects/vcxproj/test/codegen_test_full/codegen_test_full.vcxproj b/vsprojects/vcxproj/test/codegen_test_full/codegen_test_full.vcxproj
index cd0b40c..34e939c 100644
--- a/vsprojects/vcxproj/test/codegen_test_full/codegen_test_full.vcxproj
+++ b/vsprojects/vcxproj/test/codegen_test_full/codegen_test_full.vcxproj
@@ -191,6 +191,7 @@
     <ClInclude Include="$(SolutionDir)\..\include\grpc++\impl\codegen\sync_stream.h" />
     <ClInclude Include="$(SolutionDir)\..\include\grpc++\impl\codegen\time.h" />
     <ClInclude Include="$(SolutionDir)\..\include\grpc\impl\codegen\byte_buffer.h" />
+    <ClInclude Include="$(SolutionDir)\..\include\grpc\impl\codegen\byte_buffer_reader.h" />
     <ClInclude Include="$(SolutionDir)\..\include\grpc\impl\codegen\compression_types.h" />
     <ClInclude Include="$(SolutionDir)\..\include\grpc\impl\codegen\connectivity_state.h" />
     <ClInclude Include="$(SolutionDir)\..\include\grpc\impl\codegen\grpc_types.h" />
diff --git a/vsprojects/vcxproj/test/codegen_test_full/codegen_test_full.vcxproj.filters b/vsprojects/vcxproj/test/codegen_test_full/codegen_test_full.vcxproj.filters
index 029b8ef..d662365 100644
--- a/vsprojects/vcxproj/test/codegen_test_full/codegen_test_full.vcxproj.filters
+++ b/vsprojects/vcxproj/test/codegen_test_full/codegen_test_full.vcxproj.filters
@@ -120,6 +120,9 @@
     <ClInclude Include="$(SolutionDir)\..\include\grpc\impl\codegen\byte_buffer.h">
       <Filter>include\grpc\impl\codegen</Filter>
     </ClInclude>
+    <ClInclude Include="$(SolutionDir)\..\include\grpc\impl\codegen\byte_buffer_reader.h">
+      <Filter>include\grpc\impl\codegen</Filter>
+    </ClInclude>
     <ClInclude Include="$(SolutionDir)\..\include\grpc\impl\codegen\compression_types.h">
       <Filter>include\grpc\impl\codegen</Filter>
     </ClInclude>
diff --git a/vsprojects/vcxproj/test/codegen_test_minimal/codegen_test_minimal.vcxproj b/vsprojects/vcxproj/test/codegen_test_minimal/codegen_test_minimal.vcxproj
index 6d138fa..890d77d 100644
--- a/vsprojects/vcxproj/test/codegen_test_minimal/codegen_test_minimal.vcxproj
+++ b/vsprojects/vcxproj/test/codegen_test_minimal/codegen_test_minimal.vcxproj
@@ -191,6 +191,7 @@
     <ClInclude Include="$(SolutionDir)\..\include\grpc++\impl\codegen\sync_stream.h" />
     <ClInclude Include="$(SolutionDir)\..\include\grpc++\impl\codegen\time.h" />
     <ClInclude Include="$(SolutionDir)\..\include\grpc\impl\codegen\byte_buffer.h" />
+    <ClInclude Include="$(SolutionDir)\..\include\grpc\impl\codegen\byte_buffer_reader.h" />
     <ClInclude Include="$(SolutionDir)\..\include\grpc\impl\codegen\compression_types.h" />
     <ClInclude Include="$(SolutionDir)\..\include\grpc\impl\codegen\connectivity_state.h" />
     <ClInclude Include="$(SolutionDir)\..\include\grpc\impl\codegen\grpc_types.h" />
diff --git a/vsprojects/vcxproj/test/codegen_test_minimal/codegen_test_minimal.vcxproj.filters b/vsprojects/vcxproj/test/codegen_test_minimal/codegen_test_minimal.vcxproj.filters
index dc3f0b2..4e0ba65 100644
--- a/vsprojects/vcxproj/test/codegen_test_minimal/codegen_test_minimal.vcxproj.filters
+++ b/vsprojects/vcxproj/test/codegen_test_minimal/codegen_test_minimal.vcxproj.filters
@@ -120,6 +120,9 @@
     <ClInclude Include="$(SolutionDir)\..\include\grpc\impl\codegen\byte_buffer.h">
       <Filter>include\grpc\impl\codegen</Filter>
     </ClInclude>
+    <ClInclude Include="$(SolutionDir)\..\include\grpc\impl\codegen\byte_buffer_reader.h">
+      <Filter>include\grpc\impl\codegen</Filter>
+    </ClInclude>
     <ClInclude Include="$(SolutionDir)\..\include\grpc\impl\codegen\compression_types.h">
       <Filter>include\grpc\impl\codegen</Filter>
     </ClInclude>