Merge github.com:grpc/grpc into naming-crisis
diff --git a/BUILD b/BUILD
index 7eb5979..257f12f 100644
--- a/BUILD
+++ b/BUILD
@@ -675,8 +675,11 @@
     "src/cpp/client/secure_credentials.h",
     "src/cpp/common/secure_auth_context.h",
     "src/cpp/server/secure_server_credentials.h",
-    "src/cpp/client/channel.h",
+    "src/cpp/client/create_channel_internal.h",
     "src/cpp/common/create_auth_context.h",
+    "src/cpp/server/dynamic_thread_pool.h",
+    "src/cpp/server/fixed_size_thread_pool.h",
+    "src/cpp/server/thread_pool_interface.h",
     "src/cpp/client/secure_channel_arguments.cc",
     "src/cpp/client/secure_credentials.cc",
     "src/cpp/common/auth_property_iterator.cc",
@@ -687,10 +690,10 @@
     "src/cpp/client/channel_arguments.cc",
     "src/cpp/client/client_context.cc",
     "src/cpp/client/create_channel.cc",
+    "src/cpp/client/create_channel_internal.cc",
     "src/cpp/client/credentials.cc",
     "src/cpp/client/generic_stub.cc",
     "src/cpp/client/insecure_credentials.cc",
-    "src/cpp/client/internal_stub.cc",
     "src/cpp/common/call.cc",
     "src/cpp/common/completion_queue.cc",
     "src/cpp/common/rpc_method.cc",
@@ -710,25 +713,16 @@
     "src/cpp/util/time.cc",
   ],
   hdrs = [
-    "include/grpc++/async_generic_service.h",
-    "include/grpc++/async_unary_call.h",
-    "include/grpc++/auth_context.h",
-    "include/grpc++/byte_buffer.h",
-    "include/grpc++/channel_arguments.h",
-    "include/grpc++/channel_interface.h",
+    "include/grpc++/channel.h",
     "include/grpc++/client_context.h",
     "include/grpc++/completion_queue.h",
-    "include/grpc++/config.h",
-    "include/grpc++/config_protobuf.h",
     "include/grpc++/create_channel.h",
     "include/grpc++/credentials.h",
-    "include/grpc++/dynamic_thread_pool.h",
-    "include/grpc++/fixed_size_thread_pool.h",
-    "include/grpc++/generic_stub.h",
+    "include/grpc++/generic/async_generic_service.h",
+    "include/grpc++/generic/generic_stub.h",
     "include/grpc++/impl/call.h",
     "include/grpc++/impl/client_unary_call.h",
     "include/grpc++/impl/grpc_library.h",
-    "include/grpc++/impl/internal_stub.h",
     "include/grpc++/impl/proto_utils.h",
     "include/grpc++/impl/rpc_method.h",
     "include/grpc++/impl/rpc_service_method.h",
@@ -744,13 +738,19 @@
     "include/grpc++/server_builder.h",
     "include/grpc++/server_context.h",
     "include/grpc++/server_credentials.h",
-    "include/grpc++/slice.h",
-    "include/grpc++/status.h",
-    "include/grpc++/status_code_enum.h",
-    "include/grpc++/stream.h",
-    "include/grpc++/stub_options.h",
-    "include/grpc++/thread_pool_interface.h",
-    "include/grpc++/time.h",
+    "include/grpc++/support/async_stream.h",
+    "include/grpc++/support/async_unary_call.h",
+    "include/grpc++/support/auth_context.h",
+    "include/grpc++/support/byte_buffer.h",
+    "include/grpc++/support/channel_arguments.h",
+    "include/grpc++/support/config.h",
+    "include/grpc++/support/config_protobuf.h",
+    "include/grpc++/support/slice.h",
+    "include/grpc++/support/status.h",
+    "include/grpc++/support/status_code_enum.h",
+    "include/grpc++/support/stub_options.h",
+    "include/grpc++/support/sync_stream.h",
+    "include/grpc++/support/time.h",
   ],
   includes = [
     "include",
@@ -767,17 +767,20 @@
 cc_library(
   name = "grpc++_unsecure",
   srcs = [
-    "src/cpp/client/channel.h",
+    "src/cpp/client/create_channel_internal.h",
     "src/cpp/common/create_auth_context.h",
+    "src/cpp/server/dynamic_thread_pool.h",
+    "src/cpp/server/fixed_size_thread_pool.h",
+    "src/cpp/server/thread_pool_interface.h",
     "src/cpp/common/insecure_create_auth_context.cc",
     "src/cpp/client/channel.cc",
     "src/cpp/client/channel_arguments.cc",
     "src/cpp/client/client_context.cc",
     "src/cpp/client/create_channel.cc",
+    "src/cpp/client/create_channel_internal.cc",
     "src/cpp/client/credentials.cc",
     "src/cpp/client/generic_stub.cc",
     "src/cpp/client/insecure_credentials.cc",
-    "src/cpp/client/internal_stub.cc",
     "src/cpp/common/call.cc",
     "src/cpp/common/completion_queue.cc",
     "src/cpp/common/rpc_method.cc",
@@ -797,25 +800,16 @@
     "src/cpp/util/time.cc",
   ],
   hdrs = [
-    "include/grpc++/async_generic_service.h",
-    "include/grpc++/async_unary_call.h",
-    "include/grpc++/auth_context.h",
-    "include/grpc++/byte_buffer.h",
-    "include/grpc++/channel_arguments.h",
-    "include/grpc++/channel_interface.h",
+    "include/grpc++/channel.h",
     "include/grpc++/client_context.h",
     "include/grpc++/completion_queue.h",
-    "include/grpc++/config.h",
-    "include/grpc++/config_protobuf.h",
     "include/grpc++/create_channel.h",
     "include/grpc++/credentials.h",
-    "include/grpc++/dynamic_thread_pool.h",
-    "include/grpc++/fixed_size_thread_pool.h",
-    "include/grpc++/generic_stub.h",
+    "include/grpc++/generic/async_generic_service.h",
+    "include/grpc++/generic/generic_stub.h",
     "include/grpc++/impl/call.h",
     "include/grpc++/impl/client_unary_call.h",
     "include/grpc++/impl/grpc_library.h",
-    "include/grpc++/impl/internal_stub.h",
     "include/grpc++/impl/proto_utils.h",
     "include/grpc++/impl/rpc_method.h",
     "include/grpc++/impl/rpc_service_method.h",
@@ -831,13 +825,19 @@
     "include/grpc++/server_builder.h",
     "include/grpc++/server_context.h",
     "include/grpc++/server_credentials.h",
-    "include/grpc++/slice.h",
-    "include/grpc++/status.h",
-    "include/grpc++/status_code_enum.h",
-    "include/grpc++/stream.h",
-    "include/grpc++/stub_options.h",
-    "include/grpc++/thread_pool_interface.h",
-    "include/grpc++/time.h",
+    "include/grpc++/support/async_stream.h",
+    "include/grpc++/support/async_unary_call.h",
+    "include/grpc++/support/auth_context.h",
+    "include/grpc++/support/byte_buffer.h",
+    "include/grpc++/support/channel_arguments.h",
+    "include/grpc++/support/config.h",
+    "include/grpc++/support/config_protobuf.h",
+    "include/grpc++/support/slice.h",
+    "include/grpc++/support/status.h",
+    "include/grpc++/support/status_code_enum.h",
+    "include/grpc++/support/stub_options.h",
+    "include/grpc++/support/sync_stream.h",
+    "include/grpc++/support/time.h",
   ],
   includes = [
     "include",
@@ -854,8 +854,8 @@
 cc_library(
   name = "grpc_plugin_support",
   srcs = [
-    "include/grpc++/config.h",
-    "include/grpc++/config_protobuf.h",
+    "include/grpc++/support/config.h",
+    "include/grpc++/support/config_protobuf.h",
     "src/compiler/config.h",
     "src/compiler/cpp_generator.h",
     "src/compiler/cpp_generator_helpers.h",
diff --git a/Makefile b/Makefile
index 31628b4..e5a7a2c 100644
--- a/Makefile
+++ b/Makefile
@@ -811,6 +811,7 @@
 grpc_auth_context_test: $(BINDIR)/$(CONFIG)/grpc_auth_context_test
 grpc_base64_test: $(BINDIR)/$(CONFIG)/grpc_base64_test
 grpc_byte_buffer_reader_test: $(BINDIR)/$(CONFIG)/grpc_byte_buffer_reader_test
+grpc_channel_args_test: $(BINDIR)/$(CONFIG)/grpc_channel_args_test
 grpc_channel_stack_test: $(BINDIR)/$(CONFIG)/grpc_channel_stack_test
 grpc_completion_queue_test: $(BINDIR)/$(CONFIG)/grpc_completion_queue_test
 grpc_create_jwt: $(BINDIR)/$(CONFIG)/grpc_create_jwt
@@ -862,9 +863,7 @@
 cxx_byte_buffer_test: $(BINDIR)/$(CONFIG)/cxx_byte_buffer_test
 cxx_slice_test: $(BINDIR)/$(CONFIG)/cxx_slice_test
 cxx_time_test: $(BINDIR)/$(CONFIG)/cxx_time_test
-dynamic_thread_pool_test: $(BINDIR)/$(CONFIG)/dynamic_thread_pool_test
 end2end_test: $(BINDIR)/$(CONFIG)/end2end_test
-fixed_size_thread_pool_test: $(BINDIR)/$(CONFIG)/fixed_size_thread_pool_test
 generic_end2end_test: $(BINDIR)/$(CONFIG)/generic_end2end_test
 grpc_cli: $(BINDIR)/$(CONFIG)/grpc_cli
 grpc_cpp_plugin: $(BINDIR)/$(CONFIG)/grpc_cpp_plugin
@@ -1731,12 +1730,12 @@
 
 buildtests: buildtests_c buildtests_cxx buildtests_zookeeper
 
-buildtests_c: privatelibs_c $(BINDIR)/$(CONFIG)/alarm_heap_test $(BINDIR)/$(CONFIG)/alarm_list_test $(BINDIR)/$(CONFIG)/alarm_test $(BINDIR)/$(CONFIG)/alpn_test $(BINDIR)/$(CONFIG)/bin_encoder_test $(BINDIR)/$(CONFIG)/chttp2_status_conversion_test $(BINDIR)/$(CONFIG)/chttp2_stream_encoder_test $(BINDIR)/$(CONFIG)/chttp2_stream_map_test $(BINDIR)/$(CONFIG)/compression_test $(BINDIR)/$(CONFIG)/dualstack_socket_test $(BINDIR)/$(CONFIG)/fd_conservation_posix_test $(BINDIR)/$(CONFIG)/fd_posix_test $(BINDIR)/$(CONFIG)/fling_client $(BINDIR)/$(CONFIG)/fling_server $(BINDIR)/$(CONFIG)/fling_stream_test $(BINDIR)/$(CONFIG)/fling_test $(BINDIR)/$(CONFIG)/gpr_cmdline_test $(BINDIR)/$(CONFIG)/gpr_env_test $(BINDIR)/$(CONFIG)/gpr_file_test $(BINDIR)/$(CONFIG)/gpr_histogram_test $(BINDIR)/$(CONFIG)/gpr_host_port_test $(BINDIR)/$(CONFIG)/gpr_log_test $(BINDIR)/$(CONFIG)/gpr_slice_buffer_test $(BINDIR)/$(CONFIG)/gpr_slice_test $(BINDIR)/$(CONFIG)/gpr_stack_lockfree_test $(BINDIR)/$(CONFIG)/gpr_string_test $(BINDIR)/$(CONFIG)/gpr_sync_test $(BINDIR)/$(CONFIG)/gpr_thd_test $(BINDIR)/$(CONFIG)/gpr_time_test $(BINDIR)/$(CONFIG)/gpr_tls_test $(BINDIR)/$(CONFIG)/gpr_useful_test $(BINDIR)/$(CONFIG)/grpc_auth_context_test $(BINDIR)/$(CONFIG)/grpc_base64_test $(BINDIR)/$(CONFIG)/grpc_byte_buffer_reader_test $(BINDIR)/$(CONFIG)/grpc_channel_stack_test $(BINDIR)/$(CONFIG)/grpc_completion_queue_test $(BINDIR)/$(CONFIG)/grpc_credentials_test $(BINDIR)/$(CONFIG)/grpc_json_token_test $(BINDIR)/$(CONFIG)/grpc_jwt_verifier_test $(BINDIR)/$(CONFIG)/grpc_security_connector_test $(BINDIR)/$(CONFIG)/grpc_stream_op_test $(BINDIR)/$(CONFIG)/hpack_parser_test $(BINDIR)/$(CONFIG)/hpack_table_test $(BINDIR)/$(CONFIG)/httpcli_format_request_test $(BINDIR)/$(CONFIG)/httpcli_parser_test $(BINDIR)/$(CONFIG)/httpcli_test $(BINDIR)/$(CONFIG)/json_rewrite $(BINDIR)/$(CONFIG)/json_rewrite_test $(BINDIR)/$(CONFIG)/json_test $(BINDIR)/$(CONFIG)/lame_client_test $(BINDIR)/$(CONFIG)/message_compress_test $(BINDIR)/$(CONFIG)/multi_init_test $(BINDIR)/$(CONFIG)/multiple_server_queues_test $(BINDIR)/$(CONFIG)/murmur_hash_test $(BINDIR)/$(CONFIG)/no_server_test $(BINDIR)/$(CONFIG)/resolve_address_test $(BINDIR)/$(CONFIG)/secure_endpoint_test $(BINDIR)/$(CONFIG)/sockaddr_utils_test $(BINDIR)/$(CONFIG)/tcp_client_posix_test $(BINDIR)/$(CONFIG)/tcp_posix_test $(BINDIR)/$(CONFIG)/tcp_server_posix_test $(BINDIR)/$(CONFIG)/time_averaged_stats_test $(BINDIR)/$(CONFIG)/timeout_encoding_test $(BINDIR)/$(CONFIG)/timers_test $(BINDIR)/$(CONFIG)/transport_metadata_test $(BINDIR)/$(CONFIG)/transport_security_test $(BINDIR)/$(CONFIG)/udp_server_test $(BINDIR)/$(CONFIG)/uri_parser_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_channel_connectivity_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_default_host_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_disappearing_server_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_max_concurrent_streams_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_no_op_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_request_response_with_payload_and_call_creds_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_request_response_with_trailing_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_request_with_compressed_payload_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_request_with_flags_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_server_finishes_request_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_simple_delayed_request_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_channel_connectivity_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_default_host_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_disappearing_server_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_max_concurrent_streams_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_no_op_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_response_with_payload_and_call_creds_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_response_with_trailing_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_with_compressed_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_with_flags_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_server_finishes_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_simple_delayed_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_channel_connectivity_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_default_host_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_disappearing_server_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_max_concurrent_streams_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_no_op_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_request_response_with_payload_and_call_creds_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_request_response_with_trailing_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_request_with_compressed_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_request_with_flags_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_server_finishes_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_simple_delayed_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_channel_connectivity_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_disappearing_server_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_max_concurrent_streams_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_no_op_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_response_with_payload_and_call_creds_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_response_with_trailing_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_with_compressed_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_with_flags_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_server_finishes_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_simple_delayed_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_channel_connectivity_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_disappearing_server_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_max_concurrent_streams_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_no_op_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_request_response_with_payload_and_call_creds_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_request_response_with_trailing_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_request_with_compressed_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_request_with_flags_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_server_finishes_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_simple_delayed_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_channel_connectivity_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_default_host_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_disappearing_server_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_max_concurrent_streams_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_no_op_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_response_with_payload_and_call_creds_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_response_with_trailing_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_with_compressed_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_with_flags_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_server_finishes_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_simple_delayed_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_default_host_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_disappearing_server_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_no_op_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_request_response_with_payload_and_call_creds_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_request_response_with_trailing_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_server_finishes_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_simple_delayed_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_channel_connectivity_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_default_host_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_disappearing_server_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_max_concurrent_streams_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_no_op_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_request_response_with_payload_and_call_creds_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_request_response_with_trailing_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_request_with_compressed_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_request_with_flags_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_server_finishes_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_simple_delayed_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_channel_connectivity_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_default_host_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_disappearing_server_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_max_concurrent_streams_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_no_op_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_request_response_with_payload_and_call_creds_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_request_response_with_trailing_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_request_with_compressed_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_request_with_flags_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_server_finishes_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_simple_delayed_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_proxy_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_proxy_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_proxy_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_proxy_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_proxy_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_proxy_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_proxy_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_proxy_default_host_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_proxy_disappearing_server_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_proxy_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_proxy_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_proxy_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_proxy_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_proxy_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_proxy_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_proxy_no_op_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_proxy_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_proxy_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_proxy_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_proxy_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_proxy_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_proxy_request_response_with_payload_and_call_creds_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_proxy_request_response_with_trailing_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_proxy_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_proxy_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_proxy_server_finishes_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_proxy_simple_delayed_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_proxy_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_proxy_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_channel_connectivity_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_default_host_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_disappearing_server_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_max_concurrent_streams_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_no_op_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_payload_and_call_creds_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_trailing_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_with_compressed_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_with_flags_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_server_finishes_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_simple_delayed_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_max_concurrent_streams_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_no_op_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_response_with_payload_and_call_creds_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_response_with_trailing_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_with_compressed_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_with_flags_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_server_finishes_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_max_concurrent_streams_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_no_op_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_payload_and_call_creds_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_trailing_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_with_compressed_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_with_flags_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_server_finishes_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_max_concurrent_streams_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_no_op_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_response_with_payload_and_call_creds_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_response_with_trailing_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_with_compressed_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_with_flags_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_server_finishes_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_bad_hostname_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_cancel_after_accept_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_cancel_after_accept_and_writes_closed_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_cancel_after_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_cancel_before_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_cancel_in_a_vacuum_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_census_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_channel_connectivity_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_default_host_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_disappearing_server_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_early_server_shutdown_finishes_inflight_calls_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_early_server_shutdown_finishes_tags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_empty_batch_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_graceful_server_shutdown_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_invoke_large_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_max_concurrent_streams_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_max_message_length_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_no_op_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_ping_pong_streaming_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_registered_call_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_response_with_binary_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_response_with_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_response_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_response_with_trailing_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_with_compressed_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_with_flags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_with_large_metadata_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_server_finishes_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_simple_delayed_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_simple_request_with_high_initial_sequence_number_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_bad_hostname_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_cancel_after_accept_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_cancel_after_accept_and_writes_closed_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_cancel_after_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_cancel_before_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_cancel_in_a_vacuum_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_census_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_channel_connectivity_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_default_host_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_disappearing_server_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_early_server_shutdown_finishes_inflight_calls_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_early_server_shutdown_finishes_tags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_empty_batch_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_graceful_server_shutdown_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_invoke_large_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_max_concurrent_streams_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_max_message_length_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_no_op_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_ping_pong_streaming_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_registered_call_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_request_response_with_binary_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_request_response_with_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_request_response_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_request_response_with_trailing_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_request_with_compressed_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_request_with_flags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_request_with_large_metadata_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_request_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_server_finishes_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_simple_delayed_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_simple_request_with_high_initial_sequence_number_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_bad_hostname_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_cancel_after_accept_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_cancel_after_accept_and_writes_closed_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_cancel_after_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_cancel_before_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_cancel_in_a_vacuum_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_census_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_channel_connectivity_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_disappearing_server_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_early_server_shutdown_finishes_inflight_calls_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_early_server_shutdown_finishes_tags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_empty_batch_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_graceful_server_shutdown_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_invoke_large_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_max_concurrent_streams_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_max_message_length_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_no_op_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_ping_pong_streaming_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_registered_call_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_response_with_binary_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_response_with_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_response_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_response_with_trailing_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_with_compressed_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_with_flags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_with_large_metadata_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_server_finishes_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_simple_delayed_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_simple_request_with_high_initial_sequence_number_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_bad_hostname_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_cancel_after_accept_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_cancel_after_accept_and_writes_closed_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_cancel_after_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_cancel_before_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_cancel_in_a_vacuum_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_census_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_channel_connectivity_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_disappearing_server_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_early_server_shutdown_finishes_inflight_calls_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_early_server_shutdown_finishes_tags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_empty_batch_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_graceful_server_shutdown_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_invoke_large_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_max_concurrent_streams_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_max_message_length_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_no_op_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_ping_pong_streaming_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_registered_call_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_request_response_with_binary_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_request_response_with_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_request_response_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_request_response_with_trailing_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_request_with_compressed_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_request_with_flags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_request_with_large_metadata_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_request_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_server_finishes_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_simple_delayed_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_simple_request_with_high_initial_sequence_number_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_bad_hostname_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_cancel_after_accept_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_cancel_after_accept_and_writes_closed_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_cancel_after_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_cancel_before_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_cancel_in_a_vacuum_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_census_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_channel_connectivity_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_default_host_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_disappearing_server_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_early_server_shutdown_finishes_inflight_calls_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_early_server_shutdown_finishes_tags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_empty_batch_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_graceful_server_shutdown_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_invoke_large_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_max_concurrent_streams_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_max_message_length_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_no_op_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_ping_pong_streaming_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_registered_call_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_response_with_binary_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_response_with_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_response_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_response_with_trailing_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_with_compressed_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_with_flags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_with_large_metadata_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_server_finishes_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_simple_delayed_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_simple_request_with_high_initial_sequence_number_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_bad_hostname_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_cancel_after_accept_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_cancel_after_accept_and_writes_closed_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_cancel_after_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_cancel_before_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_cancel_in_a_vacuum_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_census_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_default_host_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_disappearing_server_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_early_server_shutdown_finishes_inflight_calls_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_early_server_shutdown_finishes_tags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_empty_batch_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_graceful_server_shutdown_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_invoke_large_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_max_message_length_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_no_op_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_ping_pong_streaming_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_registered_call_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_request_response_with_binary_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_request_response_with_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_request_response_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_request_response_with_trailing_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_request_with_large_metadata_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_request_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_server_finishes_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_simple_delayed_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_simple_request_with_high_initial_sequence_number_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_bad_hostname_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_cancel_after_accept_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_cancel_after_accept_and_writes_closed_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_cancel_after_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_cancel_before_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_cancel_in_a_vacuum_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_census_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_early_server_shutdown_finishes_inflight_calls_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_early_server_shutdown_finishes_tags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_empty_batch_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_graceful_server_shutdown_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_invoke_large_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_max_concurrent_streams_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_max_message_length_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_no_op_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_ping_pong_streaming_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_registered_call_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_response_with_binary_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_response_with_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_response_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_response_with_trailing_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_with_compressed_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_with_flags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_with_large_metadata_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_server_finishes_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_simple_request_with_high_initial_sequence_number_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_bad_hostname_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_after_accept_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_after_accept_and_writes_closed_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_after_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_before_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_in_a_vacuum_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_census_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_early_server_shutdown_finishes_inflight_calls_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_early_server_shutdown_finishes_tags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_empty_batch_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_graceful_server_shutdown_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_invoke_large_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_max_concurrent_streams_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_max_message_length_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_no_op_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_ping_pong_streaming_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_registered_call_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_binary_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_trailing_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_with_compressed_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_with_flags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_with_large_metadata_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_server_finishes_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_simple_request_with_high_initial_sequence_number_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_bad_hostname_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_cancel_after_accept_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_cancel_after_accept_and_writes_closed_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_cancel_after_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_cancel_before_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_cancel_in_a_vacuum_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_census_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_early_server_shutdown_finishes_inflight_calls_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_early_server_shutdown_finishes_tags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_empty_batch_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_graceful_server_shutdown_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_invoke_large_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_max_concurrent_streams_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_max_message_length_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_no_op_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_ping_pong_streaming_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_registered_call_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_response_with_binary_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_response_with_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_response_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_response_with_trailing_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_with_compressed_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_with_flags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_with_large_metadata_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_server_finishes_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_simple_request_with_high_initial_sequence_number_unsecure_test $(BINDIR)/$(CONFIG)/connection_prefix_bad_client_test $(BINDIR)/$(CONFIG)/initial_settings_frame_bad_client_test
+buildtests_c: privatelibs_c $(BINDIR)/$(CONFIG)/alarm_heap_test $(BINDIR)/$(CONFIG)/alarm_list_test $(BINDIR)/$(CONFIG)/alarm_test $(BINDIR)/$(CONFIG)/alpn_test $(BINDIR)/$(CONFIG)/bin_encoder_test $(BINDIR)/$(CONFIG)/chttp2_status_conversion_test $(BINDIR)/$(CONFIG)/chttp2_stream_encoder_test $(BINDIR)/$(CONFIG)/chttp2_stream_map_test $(BINDIR)/$(CONFIG)/compression_test $(BINDIR)/$(CONFIG)/dualstack_socket_test $(BINDIR)/$(CONFIG)/fd_conservation_posix_test $(BINDIR)/$(CONFIG)/fd_posix_test $(BINDIR)/$(CONFIG)/fling_client $(BINDIR)/$(CONFIG)/fling_server $(BINDIR)/$(CONFIG)/fling_stream_test $(BINDIR)/$(CONFIG)/fling_test $(BINDIR)/$(CONFIG)/gpr_cmdline_test $(BINDIR)/$(CONFIG)/gpr_env_test $(BINDIR)/$(CONFIG)/gpr_file_test $(BINDIR)/$(CONFIG)/gpr_histogram_test $(BINDIR)/$(CONFIG)/gpr_host_port_test $(BINDIR)/$(CONFIG)/gpr_log_test $(BINDIR)/$(CONFIG)/gpr_slice_buffer_test $(BINDIR)/$(CONFIG)/gpr_slice_test $(BINDIR)/$(CONFIG)/gpr_stack_lockfree_test $(BINDIR)/$(CONFIG)/gpr_string_test $(BINDIR)/$(CONFIG)/gpr_sync_test $(BINDIR)/$(CONFIG)/gpr_thd_test $(BINDIR)/$(CONFIG)/gpr_time_test $(BINDIR)/$(CONFIG)/gpr_tls_test $(BINDIR)/$(CONFIG)/gpr_useful_test $(BINDIR)/$(CONFIG)/grpc_auth_context_test $(BINDIR)/$(CONFIG)/grpc_base64_test $(BINDIR)/$(CONFIG)/grpc_byte_buffer_reader_test $(BINDIR)/$(CONFIG)/grpc_channel_args_test $(BINDIR)/$(CONFIG)/grpc_channel_stack_test $(BINDIR)/$(CONFIG)/grpc_completion_queue_test $(BINDIR)/$(CONFIG)/grpc_credentials_test $(BINDIR)/$(CONFIG)/grpc_json_token_test $(BINDIR)/$(CONFIG)/grpc_jwt_verifier_test $(BINDIR)/$(CONFIG)/grpc_security_connector_test $(BINDIR)/$(CONFIG)/grpc_stream_op_test $(BINDIR)/$(CONFIG)/hpack_parser_test $(BINDIR)/$(CONFIG)/hpack_table_test $(BINDIR)/$(CONFIG)/httpcli_format_request_test $(BINDIR)/$(CONFIG)/httpcli_parser_test $(BINDIR)/$(CONFIG)/httpcli_test $(BINDIR)/$(CONFIG)/json_rewrite $(BINDIR)/$(CONFIG)/json_rewrite_test $(BINDIR)/$(CONFIG)/json_test $(BINDIR)/$(CONFIG)/lame_client_test $(BINDIR)/$(CONFIG)/message_compress_test $(BINDIR)/$(CONFIG)/multi_init_test $(BINDIR)/$(CONFIG)/multiple_server_queues_test $(BINDIR)/$(CONFIG)/murmur_hash_test $(BINDIR)/$(CONFIG)/no_server_test $(BINDIR)/$(CONFIG)/resolve_address_test $(BINDIR)/$(CONFIG)/secure_endpoint_test $(BINDIR)/$(CONFIG)/sockaddr_utils_test $(BINDIR)/$(CONFIG)/tcp_client_posix_test $(BINDIR)/$(CONFIG)/tcp_posix_test $(BINDIR)/$(CONFIG)/tcp_server_posix_test $(BINDIR)/$(CONFIG)/time_averaged_stats_test $(BINDIR)/$(CONFIG)/timeout_encoding_test $(BINDIR)/$(CONFIG)/timers_test $(BINDIR)/$(CONFIG)/transport_metadata_test $(BINDIR)/$(CONFIG)/transport_security_test $(BINDIR)/$(CONFIG)/udp_server_test $(BINDIR)/$(CONFIG)/uri_parser_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_channel_connectivity_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_default_host_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_disappearing_server_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_max_concurrent_streams_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_no_op_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_request_response_with_payload_and_call_creds_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_request_response_with_trailing_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_request_with_compressed_payload_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_request_with_flags_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_server_finishes_request_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_simple_delayed_request_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_fake_security_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_channel_connectivity_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_default_host_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_disappearing_server_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_max_concurrent_streams_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_no_op_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_response_with_payload_and_call_creds_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_response_with_trailing_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_with_compressed_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_with_flags_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_server_finishes_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_simple_delayed_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_channel_connectivity_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_default_host_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_disappearing_server_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_max_concurrent_streams_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_no_op_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_request_response_with_payload_and_call_creds_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_request_response_with_trailing_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_request_with_compressed_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_request_with_flags_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_server_finishes_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_simple_delayed_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_channel_connectivity_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_disappearing_server_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_max_concurrent_streams_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_no_op_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_response_with_payload_and_call_creds_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_response_with_trailing_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_with_compressed_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_with_flags_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_server_finishes_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_simple_delayed_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_channel_connectivity_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_disappearing_server_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_max_concurrent_streams_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_no_op_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_request_response_with_payload_and_call_creds_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_request_response_with_trailing_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_request_with_compressed_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_request_with_flags_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_server_finishes_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_simple_delayed_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_channel_connectivity_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_default_host_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_disappearing_server_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_max_concurrent_streams_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_no_op_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_response_with_payload_and_call_creds_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_response_with_trailing_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_with_compressed_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_with_flags_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_server_finishes_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_simple_delayed_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_default_host_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_disappearing_server_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_no_op_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_request_response_with_payload_and_call_creds_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_request_response_with_trailing_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_server_finishes_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_simple_delayed_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_channel_connectivity_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_default_host_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_disappearing_server_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_max_concurrent_streams_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_no_op_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_request_response_with_payload_and_call_creds_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_request_response_with_trailing_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_request_with_compressed_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_request_with_flags_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_server_finishes_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_simple_delayed_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_channel_connectivity_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_default_host_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_disappearing_server_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_max_concurrent_streams_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_no_op_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_request_response_with_payload_and_call_creds_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_request_response_with_trailing_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_request_with_compressed_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_request_with_flags_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_server_finishes_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_simple_delayed_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_poll_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_proxy_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_proxy_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_proxy_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_proxy_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_proxy_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_proxy_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_proxy_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_proxy_default_host_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_proxy_disappearing_server_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_proxy_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_proxy_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_proxy_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_proxy_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_proxy_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_proxy_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_proxy_no_op_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_proxy_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_proxy_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_proxy_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_proxy_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_proxy_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_proxy_request_response_with_payload_and_call_creds_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_proxy_request_response_with_trailing_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_proxy_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_proxy_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_proxy_server_finishes_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_proxy_simple_delayed_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_proxy_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_fullstack_with_proxy_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_channel_connectivity_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_default_host_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_disappearing_server_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_max_concurrent_streams_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_no_op_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_payload_and_call_creds_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_trailing_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_with_compressed_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_with_flags_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_server_finishes_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_simple_delayed_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_max_concurrent_streams_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_no_op_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_response_with_payload_and_call_creds_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_response_with_trailing_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_with_compressed_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_with_flags_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_server_finishes_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_max_concurrent_streams_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_no_op_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_payload_and_call_creds_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_trailing_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_with_compressed_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_with_flags_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_server_finishes_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_bad_hostname_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_cancel_after_accept_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_cancel_after_accept_and_writes_closed_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_cancel_after_invoke_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_cancel_before_invoke_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_cancel_in_a_vacuum_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_census_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_early_server_shutdown_finishes_inflight_calls_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_early_server_shutdown_finishes_tags_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_empty_batch_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_graceful_server_shutdown_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_invoke_large_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_max_concurrent_streams_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_max_message_length_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_no_op_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_ping_pong_streaming_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_registered_call_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_response_with_binary_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_response_with_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_response_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_response_with_payload_and_call_creds_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_response_with_trailing_metadata_and_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_with_compressed_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_with_flags_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_with_large_metadata_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_with_payload_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_server_finishes_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_simple_request_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_simple_request_with_high_initial_sequence_number_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_bad_hostname_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_cancel_after_accept_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_cancel_after_accept_and_writes_closed_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_cancel_after_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_cancel_before_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_cancel_in_a_vacuum_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_census_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_channel_connectivity_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_default_host_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_disappearing_server_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_early_server_shutdown_finishes_inflight_calls_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_early_server_shutdown_finishes_tags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_empty_batch_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_graceful_server_shutdown_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_invoke_large_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_max_concurrent_streams_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_max_message_length_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_no_op_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_ping_pong_streaming_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_registered_call_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_response_with_binary_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_response_with_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_response_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_response_with_trailing_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_with_compressed_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_with_flags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_with_large_metadata_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_request_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_server_finishes_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_simple_delayed_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_simple_request_with_high_initial_sequence_number_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_bad_hostname_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_cancel_after_accept_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_cancel_after_accept_and_writes_closed_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_cancel_after_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_cancel_before_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_cancel_in_a_vacuum_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_census_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_channel_connectivity_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_default_host_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_disappearing_server_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_early_server_shutdown_finishes_inflight_calls_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_early_server_shutdown_finishes_tags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_empty_batch_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_graceful_server_shutdown_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_invoke_large_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_max_concurrent_streams_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_max_message_length_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_no_op_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_ping_pong_streaming_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_registered_call_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_request_response_with_binary_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_request_response_with_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_request_response_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_request_response_with_trailing_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_request_with_compressed_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_request_with_flags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_request_with_large_metadata_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_request_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_server_finishes_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_simple_delayed_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_compression_simple_request_with_high_initial_sequence_number_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_bad_hostname_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_cancel_after_accept_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_cancel_after_accept_and_writes_closed_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_cancel_after_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_cancel_before_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_cancel_in_a_vacuum_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_census_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_channel_connectivity_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_disappearing_server_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_early_server_shutdown_finishes_inflight_calls_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_early_server_shutdown_finishes_tags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_empty_batch_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_graceful_server_shutdown_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_invoke_large_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_max_concurrent_streams_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_max_message_length_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_no_op_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_ping_pong_streaming_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_registered_call_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_response_with_binary_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_response_with_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_response_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_response_with_trailing_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_with_compressed_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_with_flags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_with_large_metadata_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_request_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_server_finishes_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_simple_delayed_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_simple_request_with_high_initial_sequence_number_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_bad_hostname_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_cancel_after_accept_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_cancel_after_accept_and_writes_closed_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_cancel_after_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_cancel_before_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_cancel_in_a_vacuum_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_census_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_channel_connectivity_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_disappearing_server_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_early_server_shutdown_finishes_inflight_calls_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_early_server_shutdown_finishes_tags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_empty_batch_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_graceful_server_shutdown_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_invoke_large_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_max_concurrent_streams_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_max_message_length_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_no_op_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_ping_pong_streaming_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_registered_call_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_request_response_with_binary_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_request_response_with_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_request_response_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_request_response_with_trailing_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_request_with_compressed_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_request_with_flags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_request_with_large_metadata_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_request_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_server_finishes_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_simple_delayed_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_uds_posix_with_poll_simple_request_with_high_initial_sequence_number_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_bad_hostname_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_cancel_after_accept_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_cancel_after_accept_and_writes_closed_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_cancel_after_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_cancel_before_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_cancel_in_a_vacuum_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_census_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_channel_connectivity_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_default_host_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_disappearing_server_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_early_server_shutdown_finishes_inflight_calls_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_early_server_shutdown_finishes_tags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_empty_batch_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_graceful_server_shutdown_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_invoke_large_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_max_concurrent_streams_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_max_message_length_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_no_op_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_ping_pong_streaming_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_registered_call_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_response_with_binary_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_response_with_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_response_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_response_with_trailing_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_with_compressed_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_with_flags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_with_large_metadata_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_request_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_server_finishes_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_simple_delayed_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_poll_simple_request_with_high_initial_sequence_number_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_bad_hostname_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_cancel_after_accept_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_cancel_after_accept_and_writes_closed_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_cancel_after_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_cancel_before_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_cancel_in_a_vacuum_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_census_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_default_host_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_disappearing_server_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_early_server_shutdown_finishes_inflight_calls_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_early_server_shutdown_finishes_tags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_empty_batch_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_graceful_server_shutdown_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_invoke_large_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_max_message_length_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_no_op_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_ping_pong_streaming_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_registered_call_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_request_response_with_binary_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_request_response_with_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_request_response_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_request_response_with_trailing_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_request_with_large_metadata_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_request_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_server_finishes_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_simple_delayed_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_fullstack_with_proxy_simple_request_with_high_initial_sequence_number_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_bad_hostname_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_cancel_after_accept_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_cancel_after_accept_and_writes_closed_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_cancel_after_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_cancel_before_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_cancel_in_a_vacuum_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_census_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_early_server_shutdown_finishes_inflight_calls_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_early_server_shutdown_finishes_tags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_empty_batch_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_graceful_server_shutdown_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_invoke_large_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_max_concurrent_streams_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_max_message_length_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_no_op_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_ping_pong_streaming_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_registered_call_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_response_with_binary_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_response_with_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_response_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_response_with_trailing_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_with_compressed_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_with_flags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_with_large_metadata_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_request_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_server_finishes_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_simple_request_with_high_initial_sequence_number_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_bad_hostname_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_after_accept_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_after_accept_and_writes_closed_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_after_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_before_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_in_a_vacuum_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_census_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_early_server_shutdown_finishes_inflight_calls_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_early_server_shutdown_finishes_tags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_empty_batch_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_graceful_server_shutdown_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_invoke_large_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_max_concurrent_streams_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_max_message_length_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_no_op_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_ping_pong_streaming_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_registered_call_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_binary_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_trailing_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_with_compressed_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_with_flags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_with_large_metadata_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_server_finishes_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_simple_request_with_high_initial_sequence_number_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_bad_hostname_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_cancel_after_accept_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_cancel_after_accept_and_writes_closed_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_cancel_after_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_cancel_before_invoke_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_cancel_in_a_vacuum_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_census_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_early_server_shutdown_finishes_inflight_calls_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_early_server_shutdown_finishes_tags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_empty_batch_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_graceful_server_shutdown_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_invoke_large_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_max_concurrent_streams_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_max_message_length_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_no_op_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_ping_pong_streaming_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_registered_call_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_response_with_binary_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_response_with_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_response_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_response_with_trailing_metadata_and_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_with_compressed_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_with_flags_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_with_large_metadata_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_request_with_payload_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_server_finishes_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_simple_request_unsecure_test $(BINDIR)/$(CONFIG)/chttp2_socket_pair_with_grpc_trace_simple_request_with_high_initial_sequence_number_unsecure_test $(BINDIR)/$(CONFIG)/connection_prefix_bad_client_test $(BINDIR)/$(CONFIG)/initial_settings_frame_bad_client_test
 
-buildtests_cxx: buildtests_zookeeper privatelibs_cxx $(BINDIR)/$(CONFIG)/async_end2end_test $(BINDIR)/$(CONFIG)/async_streaming_ping_pong_test $(BINDIR)/$(CONFIG)/async_unary_ping_pong_test $(BINDIR)/$(CONFIG)/auth_property_iterator_test $(BINDIR)/$(CONFIG)/channel_arguments_test $(BINDIR)/$(CONFIG)/cli_call_test $(BINDIR)/$(CONFIG)/client_crash_test $(BINDIR)/$(CONFIG)/client_crash_test_server $(BINDIR)/$(CONFIG)/credentials_test $(BINDIR)/$(CONFIG)/cxx_byte_buffer_test $(BINDIR)/$(CONFIG)/cxx_slice_test $(BINDIR)/$(CONFIG)/cxx_time_test $(BINDIR)/$(CONFIG)/dynamic_thread_pool_test $(BINDIR)/$(CONFIG)/end2end_test $(BINDIR)/$(CONFIG)/fixed_size_thread_pool_test $(BINDIR)/$(CONFIG)/generic_end2end_test $(BINDIR)/$(CONFIG)/grpc_cli $(BINDIR)/$(CONFIG)/interop_client $(BINDIR)/$(CONFIG)/interop_server $(BINDIR)/$(CONFIG)/interop_test $(BINDIR)/$(CONFIG)/mock_test $(BINDIR)/$(CONFIG)/qps_interarrival_test $(BINDIR)/$(CONFIG)/qps_openloop_test $(BINDIR)/$(CONFIG)/qps_test $(BINDIR)/$(CONFIG)/reconnect_interop_client $(BINDIR)/$(CONFIG)/reconnect_interop_server $(BINDIR)/$(CONFIG)/secure_auth_context_test $(BINDIR)/$(CONFIG)/server_crash_test $(BINDIR)/$(CONFIG)/server_crash_test_client $(BINDIR)/$(CONFIG)/status_test $(BINDIR)/$(CONFIG)/sync_streaming_ping_pong_test $(BINDIR)/$(CONFIG)/sync_unary_ping_pong_test $(BINDIR)/$(CONFIG)/thread_stress_test
+buildtests_cxx: buildtests_zookeeper privatelibs_cxx $(BINDIR)/$(CONFIG)/async_end2end_test $(BINDIR)/$(CONFIG)/async_streaming_ping_pong_test $(BINDIR)/$(CONFIG)/async_unary_ping_pong_test $(BINDIR)/$(CONFIG)/auth_property_iterator_test $(BINDIR)/$(CONFIG)/channel_arguments_test $(BINDIR)/$(CONFIG)/cli_call_test $(BINDIR)/$(CONFIG)/client_crash_test $(BINDIR)/$(CONFIG)/client_crash_test_server $(BINDIR)/$(CONFIG)/credentials_test $(BINDIR)/$(CONFIG)/cxx_byte_buffer_test $(BINDIR)/$(CONFIG)/cxx_slice_test $(BINDIR)/$(CONFIG)/cxx_time_test $(BINDIR)/$(CONFIG)/end2end_test $(BINDIR)/$(CONFIG)/generic_end2end_test $(BINDIR)/$(CONFIG)/grpc_cli $(BINDIR)/$(CONFIG)/interop_client $(BINDIR)/$(CONFIG)/interop_server $(BINDIR)/$(CONFIG)/interop_test $(BINDIR)/$(CONFIG)/mock_test $(BINDIR)/$(CONFIG)/qps_interarrival_test $(BINDIR)/$(CONFIG)/qps_openloop_test $(BINDIR)/$(CONFIG)/qps_test $(BINDIR)/$(CONFIG)/reconnect_interop_client $(BINDIR)/$(CONFIG)/reconnect_interop_server $(BINDIR)/$(CONFIG)/secure_auth_context_test $(BINDIR)/$(CONFIG)/server_crash_test $(BINDIR)/$(CONFIG)/server_crash_test_client $(BINDIR)/$(CONFIG)/shutdown_test $(BINDIR)/$(CONFIG)/status_test $(BINDIR)/$(CONFIG)/sync_streaming_ping_pong_test $(BINDIR)/$(CONFIG)/sync_unary_ping_pong_test $(BINDIR)/$(CONFIG)/thread_stress_test
 
 ifeq ($(HAS_ZOOKEEPER),true)
-buildtests_zookeeper: privatelibs_zookeeper $(BINDIR)/$(CONFIG)/shutdown_test $(BINDIR)/$(CONFIG)/zookeeper_test
+buildtests_zookeeper: privatelibs_zookeeper $(BINDIR)/$(CONFIG)/zookeeper_test
 else
 buildtests_zookeeper:
 endif
@@ -1811,6 +1810,8 @@
 	$(Q) $(BINDIR)/$(CONFIG)/grpc_base64_test || ( echo test grpc_base64_test failed ; exit 1 )
 	$(E) "[RUN]     Testing grpc_byte_buffer_reader_test"
 	$(Q) $(BINDIR)/$(CONFIG)/grpc_byte_buffer_reader_test || ( echo test grpc_byte_buffer_reader_test failed ; exit 1 )
+	$(E) "[RUN]     Testing grpc_channel_args_test"
+	$(Q) $(BINDIR)/$(CONFIG)/grpc_channel_args_test || ( echo test grpc_channel_args_test failed ; exit 1 )
 	$(E) "[RUN]     Testing grpc_channel_stack_test"
 	$(Q) $(BINDIR)/$(CONFIG)/grpc_channel_stack_test || ( echo test grpc_channel_stack_test failed ; exit 1 )
 	$(E) "[RUN]     Testing grpc_completion_queue_test"
@@ -3329,12 +3330,8 @@
 	$(Q) $(BINDIR)/$(CONFIG)/cxx_slice_test || ( echo test cxx_slice_test failed ; exit 1 )
 	$(E) "[RUN]     Testing cxx_time_test"
 	$(Q) $(BINDIR)/$(CONFIG)/cxx_time_test || ( echo test cxx_time_test failed ; exit 1 )
-	$(E) "[RUN]     Testing dynamic_thread_pool_test"
-	$(Q) $(BINDIR)/$(CONFIG)/dynamic_thread_pool_test || ( echo test dynamic_thread_pool_test failed ; exit 1 )
 	$(E) "[RUN]     Testing end2end_test"
 	$(Q) $(BINDIR)/$(CONFIG)/end2end_test || ( echo test end2end_test failed ; exit 1 )
-	$(E) "[RUN]     Testing fixed_size_thread_pool_test"
-	$(Q) $(BINDIR)/$(CONFIG)/fixed_size_thread_pool_test || ( echo test fixed_size_thread_pool_test failed ; exit 1 )
 	$(E) "[RUN]     Testing generic_end2end_test"
 	$(Q) $(BINDIR)/$(CONFIG)/generic_end2end_test || ( echo test generic_end2end_test failed ; exit 1 )
 	$(E) "[RUN]     Testing interop_test"
@@ -3349,6 +3346,8 @@
 	$(Q) $(BINDIR)/$(CONFIG)/secure_auth_context_test || ( echo test secure_auth_context_test failed ; exit 1 )
 	$(E) "[RUN]     Testing server_crash_test"
 	$(Q) $(BINDIR)/$(CONFIG)/server_crash_test || ( echo test server_crash_test failed ; exit 1 )
+	$(E) "[RUN]     Testing shutdown_test"
+	$(Q) $(BINDIR)/$(CONFIG)/shutdown_test || ( echo test shutdown_test failed ; exit 1 )
 	$(E) "[RUN]     Testing status_test"
 	$(Q) $(BINDIR)/$(CONFIG)/status_test || ( echo test status_test failed ; exit 1 )
 	$(E) "[RUN]     Testing sync_streaming_ping_pong_test"
@@ -3364,8 +3363,6 @@
 
 ifeq ($(HAS_ZOOKEEPER),true)
 test_zookeeper: buildtests_zookeeper
-	$(E) "[RUN]     Testing shutdown_test"
-	$(Q) $(BINDIR)/$(CONFIG)/shutdown_test || ( echo test shutdown_test failed ; exit 1 )
 	$(E) "[RUN]     Testing zookeeper_test"
 	$(Q) $(BINDIR)/$(CONFIG)/zookeeper_test || ( echo test zookeeper_test failed ; exit 1 )
 
@@ -4606,10 +4603,10 @@
     src/cpp/client/channel_arguments.cc \
     src/cpp/client/client_context.cc \
     src/cpp/client/create_channel.cc \
+    src/cpp/client/create_channel_internal.cc \
     src/cpp/client/credentials.cc \
     src/cpp/client/generic_stub.cc \
     src/cpp/client/insecure_credentials.cc \
-    src/cpp/client/internal_stub.cc \
     src/cpp/common/call.cc \
     src/cpp/common/completion_queue.cc \
     src/cpp/common/rpc_method.cc \
@@ -4629,25 +4626,16 @@
     src/cpp/util/time.cc \
 
 PUBLIC_HEADERS_CXX += \
-    include/grpc++/async_generic_service.h \
-    include/grpc++/async_unary_call.h \
-    include/grpc++/auth_context.h \
-    include/grpc++/byte_buffer.h \
-    include/grpc++/channel_arguments.h \
-    include/grpc++/channel_interface.h \
+    include/grpc++/channel.h \
     include/grpc++/client_context.h \
     include/grpc++/completion_queue.h \
-    include/grpc++/config.h \
-    include/grpc++/config_protobuf.h \
     include/grpc++/create_channel.h \
     include/grpc++/credentials.h \
-    include/grpc++/dynamic_thread_pool.h \
-    include/grpc++/fixed_size_thread_pool.h \
-    include/grpc++/generic_stub.h \
+    include/grpc++/generic/async_generic_service.h \
+    include/grpc++/generic/generic_stub.h \
     include/grpc++/impl/call.h \
     include/grpc++/impl/client_unary_call.h \
     include/grpc++/impl/grpc_library.h \
-    include/grpc++/impl/internal_stub.h \
     include/grpc++/impl/proto_utils.h \
     include/grpc++/impl/rpc_method.h \
     include/grpc++/impl/rpc_service_method.h \
@@ -4663,13 +4651,19 @@
     include/grpc++/server_builder.h \
     include/grpc++/server_context.h \
     include/grpc++/server_credentials.h \
-    include/grpc++/slice.h \
-    include/grpc++/status.h \
-    include/grpc++/status_code_enum.h \
-    include/grpc++/stream.h \
-    include/grpc++/stub_options.h \
-    include/grpc++/thread_pool_interface.h \
-    include/grpc++/time.h \
+    include/grpc++/support/async_stream.h \
+    include/grpc++/support/async_unary_call.h \
+    include/grpc++/support/auth_context.h \
+    include/grpc++/support/byte_buffer.h \
+    include/grpc++/support/channel_arguments.h \
+    include/grpc++/support/config.h \
+    include/grpc++/support/config_protobuf.h \
+    include/grpc++/support/slice.h \
+    include/grpc++/support/status.h \
+    include/grpc++/support/status_code_enum.h \
+    include/grpc++/support/stub_options.h \
+    include/grpc++/support/sync_stream.h \
+    include/grpc++/support/time.h \
 
 LIBGRPC++_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(LIBGRPC++_SRC))))
 
@@ -4849,10 +4843,10 @@
     src/cpp/client/channel_arguments.cc \
     src/cpp/client/client_context.cc \
     src/cpp/client/create_channel.cc \
+    src/cpp/client/create_channel_internal.cc \
     src/cpp/client/credentials.cc \
     src/cpp/client/generic_stub.cc \
     src/cpp/client/insecure_credentials.cc \
-    src/cpp/client/internal_stub.cc \
     src/cpp/common/call.cc \
     src/cpp/common/completion_queue.cc \
     src/cpp/common/rpc_method.cc \
@@ -4872,25 +4866,16 @@
     src/cpp/util/time.cc \
 
 PUBLIC_HEADERS_CXX += \
-    include/grpc++/async_generic_service.h \
-    include/grpc++/async_unary_call.h \
-    include/grpc++/auth_context.h \
-    include/grpc++/byte_buffer.h \
-    include/grpc++/channel_arguments.h \
-    include/grpc++/channel_interface.h \
+    include/grpc++/channel.h \
     include/grpc++/client_context.h \
     include/grpc++/completion_queue.h \
-    include/grpc++/config.h \
-    include/grpc++/config_protobuf.h \
     include/grpc++/create_channel.h \
     include/grpc++/credentials.h \
-    include/grpc++/dynamic_thread_pool.h \
-    include/grpc++/fixed_size_thread_pool.h \
-    include/grpc++/generic_stub.h \
+    include/grpc++/generic/async_generic_service.h \
+    include/grpc++/generic/generic_stub.h \
     include/grpc++/impl/call.h \
     include/grpc++/impl/client_unary_call.h \
     include/grpc++/impl/grpc_library.h \
-    include/grpc++/impl/internal_stub.h \
     include/grpc++/impl/proto_utils.h \
     include/grpc++/impl/rpc_method.h \
     include/grpc++/impl/rpc_service_method.h \
@@ -4906,13 +4891,19 @@
     include/grpc++/server_builder.h \
     include/grpc++/server_context.h \
     include/grpc++/server_credentials.h \
-    include/grpc++/slice.h \
-    include/grpc++/status.h \
-    include/grpc++/status_code_enum.h \
-    include/grpc++/stream.h \
-    include/grpc++/stub_options.h \
-    include/grpc++/thread_pool_interface.h \
-    include/grpc++/time.h \
+    include/grpc++/support/async_stream.h \
+    include/grpc++/support/async_unary_call.h \
+    include/grpc++/support/auth_context.h \
+    include/grpc++/support/byte_buffer.h \
+    include/grpc++/support/channel_arguments.h \
+    include/grpc++/support/config.h \
+    include/grpc++/support/config_protobuf.h \
+    include/grpc++/support/slice.h \
+    include/grpc++/support/status.h \
+    include/grpc++/support/status_code_enum.h \
+    include/grpc++/support/stub_options.h \
+    include/grpc++/support/sync_stream.h \
+    include/grpc++/support/time.h \
 
 LIBGRPC++_UNSECURE_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(LIBGRPC++_UNSECURE_SRC))))
 
@@ -7644,6 +7635,35 @@
 endif
 
 
+GRPC_CHANNEL_ARGS_TEST_SRC = \
+    test/core/channel/channel_args_test.c \
+
+GRPC_CHANNEL_ARGS_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(GRPC_CHANNEL_ARGS_TEST_SRC))))
+ifeq ($(NO_SECURE),true)
+
+# You can't build secure targets if you don't have OpenSSL.
+
+$(BINDIR)/$(CONFIG)/grpc_channel_args_test: openssl_dep_error
+
+else
+
+$(BINDIR)/$(CONFIG)/grpc_channel_args_test: $(GRPC_CHANNEL_ARGS_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
+	$(E) "[LD]      Linking $@"
+	$(Q) mkdir -p `dirname $@`
+	$(Q) $(LD) $(LDFLAGS) $(GRPC_CHANNEL_ARGS_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/grpc_channel_args_test
+
+endif
+
+$(OBJDIR)/$(CONFIG)/test/core/channel/channel_args_test.o:  $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
+deps_grpc_channel_args_test: $(GRPC_CHANNEL_ARGS_TEST_OBJS:.o=.dep)
+
+ifneq ($(NO_SECURE),true)
+ifneq ($(NO_DEPS),true)
+-include $(GRPC_CHANNEL_ARGS_TEST_OBJS:.o=.dep)
+endif
+endif
+
+
 GRPC_CHANNEL_STACK_TEST_SRC = \
     test/core/channel/channel_stack_test.c \
 
@@ -9255,46 +9275,6 @@
 endif
 
 
-DYNAMIC_THREAD_POOL_TEST_SRC = \
-    test/cpp/server/dynamic_thread_pool_test.cc \
-
-DYNAMIC_THREAD_POOL_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(DYNAMIC_THREAD_POOL_TEST_SRC))))
-ifeq ($(NO_SECURE),true)
-
-# You can't build secure targets if you don't have OpenSSL.
-
-$(BINDIR)/$(CONFIG)/dynamic_thread_pool_test: openssl_dep_error
-
-else
-
-
-ifeq ($(NO_PROTOBUF),true)
-
-# You can't build the protoc plugins or protobuf-enabled targets if you don't have protobuf 3.0.0+.
-
-$(BINDIR)/$(CONFIG)/dynamic_thread_pool_test: protobuf_dep_error
-
-else
-
-$(BINDIR)/$(CONFIG)/dynamic_thread_pool_test: $(PROTOBUF_DEP) $(DYNAMIC_THREAD_POOL_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
-	$(E) "[LD]      Linking $@"
-	$(Q) mkdir -p `dirname $@`
-	$(Q) $(LDXX) $(LDFLAGS) $(DYNAMIC_THREAD_POOL_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/dynamic_thread_pool_test
-
-endif
-
-endif
-
-$(OBJDIR)/$(CONFIG)/test/cpp/server/dynamic_thread_pool_test.o:  $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
-deps_dynamic_thread_pool_test: $(DYNAMIC_THREAD_POOL_TEST_OBJS:.o=.dep)
-
-ifneq ($(NO_SECURE),true)
-ifneq ($(NO_DEPS),true)
--include $(DYNAMIC_THREAD_POOL_TEST_OBJS:.o=.dep)
-endif
-endif
-
-
 END2END_TEST_SRC = \
     test/cpp/end2end/end2end_test.cc \
 
@@ -9335,46 +9315,6 @@
 endif
 
 
-FIXED_SIZE_THREAD_POOL_TEST_SRC = \
-    test/cpp/server/fixed_size_thread_pool_test.cc \
-
-FIXED_SIZE_THREAD_POOL_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(FIXED_SIZE_THREAD_POOL_TEST_SRC))))
-ifeq ($(NO_SECURE),true)
-
-# You can't build secure targets if you don't have OpenSSL.
-
-$(BINDIR)/$(CONFIG)/fixed_size_thread_pool_test: openssl_dep_error
-
-else
-
-
-ifeq ($(NO_PROTOBUF),true)
-
-# You can't build the protoc plugins or protobuf-enabled targets if you don't have protobuf 3.0.0+.
-
-$(BINDIR)/$(CONFIG)/fixed_size_thread_pool_test: protobuf_dep_error
-
-else
-
-$(BINDIR)/$(CONFIG)/fixed_size_thread_pool_test: $(PROTOBUF_DEP) $(FIXED_SIZE_THREAD_POOL_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
-	$(E) "[LD]      Linking $@"
-	$(Q) mkdir -p `dirname $@`
-	$(Q) $(LDXX) $(LDFLAGS) $(FIXED_SIZE_THREAD_POOL_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/fixed_size_thread_pool_test
-
-endif
-
-endif
-
-$(OBJDIR)/$(CONFIG)/test/cpp/server/fixed_size_thread_pool_test.o:  $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
-deps_fixed_size_thread_pool_test: $(FIXED_SIZE_THREAD_POOL_TEST_OBJS:.o=.dep)
-
-ifneq ($(NO_SECURE),true)
-ifneq ($(NO_DEPS),true)
--include $(FIXED_SIZE_THREAD_POOL_TEST_OBJS:.o=.dep)
-endif
-endif
-
-
 GENERIC_END2END_TEST_SRC = \
     test/cpp/end2end/generic_end2end_test.cc \
 
@@ -10286,16 +10226,16 @@
 
 else
 
-$(BINDIR)/$(CONFIG)/shutdown_test: $(PROTOBUF_DEP) $(SHUTDOWN_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc_zookeeper.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
+$(BINDIR)/$(CONFIG)/shutdown_test: $(PROTOBUF_DEP) $(SHUTDOWN_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
 	$(E) "[LD]      Linking $@"
 	$(Q) mkdir -p `dirname $@`
-	$(Q) $(LDXX) $(LDFLAGS) $(SHUTDOWN_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc_zookeeper.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a -lzookeeper_mt $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/shutdown_test
+	$(Q) $(LDXX) $(LDFLAGS) $(SHUTDOWN_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/shutdown_test
 
 endif
 
 endif
 
-$(OBJDIR)/$(CONFIG)/test/cpp/end2end/shutdown_test.o:  $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc_zookeeper.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
+$(OBJDIR)/$(CONFIG)/test/cpp/end2end/shutdown_test.o:  $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
 deps_shutdown_test: $(SHUTDOWN_TEST_OBJS:.o=.dep)
 
 ifneq ($(NO_SECURE),true)
diff --git a/build.json b/build.json
index bd707d2..414b957 100644
--- a/build.json
+++ b/build.json
@@ -30,25 +30,16 @@
     {
       "name": "grpc++_base",
       "public_headers": [
-        "include/grpc++/async_generic_service.h",
-        "include/grpc++/async_unary_call.h",
-        "include/grpc++/auth_context.h",
-        "include/grpc++/byte_buffer.h",
-        "include/grpc++/channel_arguments.h",
-        "include/grpc++/channel_interface.h",
+        "include/grpc++/channel.h",
         "include/grpc++/client_context.h",
         "include/grpc++/completion_queue.h",
-        "include/grpc++/config.h",
-        "include/grpc++/config_protobuf.h",
         "include/grpc++/create_channel.h",
         "include/grpc++/credentials.h",
-        "include/grpc++/dynamic_thread_pool.h",
-        "include/grpc++/fixed_size_thread_pool.h",
-        "include/grpc++/generic_stub.h",
+        "include/grpc++/generic/async_generic_service.h",
+        "include/grpc++/generic/generic_stub.h",
         "include/grpc++/impl/call.h",
         "include/grpc++/impl/client_unary_call.h",
         "include/grpc++/impl/grpc_library.h",
-        "include/grpc++/impl/internal_stub.h",
         "include/grpc++/impl/proto_utils.h",
         "include/grpc++/impl/rpc_method.h",
         "include/grpc++/impl/rpc_service_method.h",
@@ -64,27 +55,36 @@
         "include/grpc++/server_builder.h",
         "include/grpc++/server_context.h",
         "include/grpc++/server_credentials.h",
-        "include/grpc++/slice.h",
-        "include/grpc++/status.h",
-        "include/grpc++/status_code_enum.h",
-        "include/grpc++/stream.h",
-        "include/grpc++/stub_options.h",
-        "include/grpc++/thread_pool_interface.h",
-        "include/grpc++/time.h"
+        "include/grpc++/support/async_stream.h",
+        "include/grpc++/support/async_unary_call.h",
+        "include/grpc++/support/auth_context.h",
+        "include/grpc++/support/byte_buffer.h",
+        "include/grpc++/support/channel_arguments.h",
+        "include/grpc++/support/config.h",
+        "include/grpc++/support/config_protobuf.h",
+        "include/grpc++/support/slice.h",
+        "include/grpc++/support/status.h",
+        "include/grpc++/support/status_code_enum.h",
+        "include/grpc++/support/stub_options.h",
+        "include/grpc++/support/sync_stream.h",
+        "include/grpc++/support/time.h"
       ],
       "headers": [
-        "src/cpp/client/channel.h",
-        "src/cpp/common/create_auth_context.h"
+        "src/cpp/client/create_channel_internal.h",
+        "src/cpp/common/create_auth_context.h",
+        "src/cpp/server/dynamic_thread_pool.h",
+        "src/cpp/server/fixed_size_thread_pool.h",
+        "src/cpp/server/thread_pool_interface.h"
       ],
       "src": [
         "src/cpp/client/channel.cc",
         "src/cpp/client/channel_arguments.cc",
         "src/cpp/client/client_context.cc",
         "src/cpp/client/create_channel.cc",
+        "src/cpp/client/create_channel_internal.cc",
         "src/cpp/client/credentials.cc",
         "src/cpp/client/generic_stub.cc",
         "src/cpp/client/insecure_credentials.cc",
-        "src/cpp/client/internal_stub.cc",
         "src/cpp/common/call.cc",
         "src/cpp/common/completion_queue.cc",
         "src/cpp/common/rpc_method.cc",
@@ -697,8 +697,8 @@
       "build": "protoc",
       "language": "c++",
       "headers": [
-        "include/grpc++/config.h",
-        "include/grpc++/config_protobuf.h",
+        "include/grpc++/support/config.h",
+        "include/grpc++/support/config_protobuf.h",
         "src/compiler/config.h",
         "src/compiler/cpp_generator.h",
         "src/compiler/cpp_generator_helpers.h",
@@ -1366,6 +1366,20 @@
       ]
     },
     {
+      "name": "grpc_channel_args_test",
+      "build": "test",
+      "language": "c",
+      "src": [
+        "test/core/channel/channel_args_test.c"
+      ],
+      "deps": [
+        "grpc_test_util",
+        "grpc",
+        "gpr_test_util",
+        "gpr"
+      ]
+    },
+    {
       "name": "grpc_channel_stack_test",
       "build": "test",
       "language": "c",
@@ -2130,21 +2144,6 @@
       ]
     },
     {
-      "name": "dynamic_thread_pool_test",
-      "build": "test",
-      "language": "c++",
-      "src": [
-        "test/cpp/server/dynamic_thread_pool_test.cc"
-      ],
-      "deps": [
-        "grpc_test_util",
-        "grpc++",
-        "grpc",
-        "gpr_test_util",
-        "gpr"
-      ]
-    },
-    {
       "name": "end2end_test",
       "build": "test",
       "language": "c++",
@@ -2161,21 +2160,6 @@
       ]
     },
     {
-      "name": "fixed_size_thread_pool_test",
-      "build": "test",
-      "language": "c++",
-      "src": [
-        "test/cpp/server/fixed_size_thread_pool_test.cc"
-      ],
-      "deps": [
-        "grpc_test_util",
-        "grpc++",
-        "grpc",
-        "gpr_test_util",
-        "gpr"
-      ]
-    },
-    {
       "name": "generic_end2end_test",
       "build": "test",
       "language": "c++",
@@ -2624,13 +2608,9 @@
         "grpc++_test_util",
         "grpc_test_util",
         "grpc++",
-        "grpc_zookeeper",
         "grpc",
         "gpr_test_util",
         "gpr"
-      ],
-      "external_deps": [
-        "zookeeper"
       ]
     },
     {
diff --git a/examples/pubsub/main.cc b/examples/pubsub/main.cc
index b1898f1..32102dc 100644
--- a/examples/pubsub/main.cc
+++ b/examples/pubsub/main.cc
@@ -37,18 +37,16 @@
 #include <string>
 #include <thread>
 
+#include <gflags/gflags.h>
 #include <grpc/grpc.h>
 #include <grpc/support/log.h>
-#include <gflags/gflags.h>
-#include <grpc++/channel_arguments.h>
-#include <grpc++/channel_interface.h>
+#include <grpc++/channel.h>
 #include <grpc++/create_channel.h>
 #include <grpc++/credentials.h>
-#include <grpc++/status.h>
-#include "test/cpp/util/test_config.h"
 
 #include "examples/pubsub/publisher.h"
 #include "examples/pubsub/subscriber.h"
+#include "test/cpp/util/test_config.h"
 
 DEFINE_int32(server_port, 443, "Server port.");
 DEFINE_string(server_host, "pubsub-staging.googleapis.com",
@@ -72,7 +70,7 @@
   ss << FLAGS_server_host << ":" << FLAGS_server_port;
 
   std::shared_ptr<grpc::Credentials> creds = grpc::GoogleDefaultCredentials();
-  std::shared_ptr<grpc::ChannelInterface> channel =
+  std::shared_ptr<grpc::Channel> channel =
       grpc::CreateChannel(ss.str(), creds, grpc::ChannelArguments());
 
   grpc::examples::pubsub::Publisher publisher(channel);
diff --git a/examples/pubsub/publisher.cc b/examples/pubsub/publisher.cc
index 458050a..fd38ca9 100644
--- a/examples/pubsub/publisher.cc
+++ b/examples/pubsub/publisher.cc
@@ -50,7 +50,7 @@
 namespace examples {
 namespace pubsub {
 
-Publisher::Publisher(std::shared_ptr<ChannelInterface> channel)
+Publisher::Publisher(std::shared_ptr<Channel> channel)
     : stub_(PublisherService::NewStub(channel)) {}
 
 void Publisher::Shutdown() { stub_.reset(); }
diff --git a/examples/pubsub/publisher.h b/examples/pubsub/publisher.h
index 33bcf98..02e6194 100644
--- a/examples/pubsub/publisher.h
+++ b/examples/pubsub/publisher.h
@@ -34,8 +34,7 @@
 #ifndef GRPC_EXAMPLES_PUBSUB_PUBLISHER_H
 #define GRPC_EXAMPLES_PUBSUB_PUBLISHER_H
 
-#include <grpc++/channel_interface.h>
-#include <grpc++/status.h>
+#include <grpc++/channel.h>
 
 #include "examples/pubsub/pubsub.grpc.pb.h"
 
@@ -45,7 +44,7 @@
 
 class Publisher {
  public:
-  Publisher(std::shared_ptr<ChannelInterface> channel);
+  Publisher(std::shared_ptr<Channel> channel);
   void Shutdown();
 
   Status CreateTopic(const grpc::string& topic);
diff --git a/examples/pubsub/publisher_test.cc b/examples/pubsub/publisher_test.cc
index 6b9dcac..c2eb295 100644
--- a/examples/pubsub/publisher_test.cc
+++ b/examples/pubsub/publisher_test.cc
@@ -31,22 +31,20 @@
  *
  */
 
-#include <grpc++/channel_arguments.h>
-#include <grpc++/channel_interface.h>
+#include <grpc++/channel.h>
 #include <grpc++/client_context.h>
 #include <grpc++/create_channel.h>
 #include <grpc++/server.h>
 #include <grpc++/server_builder.h>
 #include <grpc++/server_context.h>
 #include <grpc++/server_credentials.h>
-#include <grpc++/status.h>
 #include <gtest/gtest.h>
 
 #include "examples/pubsub/publisher.h"
 #include "test/core/util/port.h"
 #include "test/core/util/test_config.h"
 
-using grpc::ChannelInterface;
+using grpc::Channel;
 
 namespace grpc {
 namespace testing {
@@ -124,7 +122,7 @@
   std::unique_ptr<Server> server_;
   PublisherServiceImpl service_;
 
-  std::shared_ptr<ChannelInterface> channel_;
+  std::shared_ptr<Channel> channel_;
 
   std::unique_ptr<grpc::examples::pubsub::Publisher> publisher_;
 };
diff --git a/examples/pubsub/subscriber.cc b/examples/pubsub/subscriber.cc
index d9e0292..0818f50 100644
--- a/examples/pubsub/subscriber.cc
+++ b/examples/pubsub/subscriber.cc
@@ -48,7 +48,7 @@
 namespace examples {
 namespace pubsub {
 
-Subscriber::Subscriber(std::shared_ptr<ChannelInterface> channel)
+Subscriber::Subscriber(std::shared_ptr<Channel> channel)
     : stub_(SubscriberService::NewStub(channel)) {}
 
 void Subscriber::Shutdown() { stub_.reset(); }
diff --git a/examples/pubsub/subscriber.h b/examples/pubsub/subscriber.h
index 40ab454..c5b1df0 100644
--- a/examples/pubsub/subscriber.h
+++ b/examples/pubsub/subscriber.h
@@ -34,8 +34,7 @@
 #ifndef GRPC_EXAMPLES_PUBSUB_SUBSCRIBER_H
 #define GRPC_EXAMPLES_PUBSUB_SUBSCRIBER_H
 
-#include <grpc++/channel_interface.h>
-#include <grpc++/status.h>
+#include <grpc++/channel.h>
 
 #include "examples/pubsub/pubsub.grpc.pb.h"
 
@@ -45,7 +44,7 @@
 
 class Subscriber {
  public:
-  Subscriber(std::shared_ptr<ChannelInterface> channel);
+  Subscriber(std::shared_ptr<Channel> channel);
   void Shutdown();
 
   Status CreateSubscription(const grpc::string& topic,
diff --git a/examples/pubsub/subscriber_test.cc b/examples/pubsub/subscriber_test.cc
index b0e7fc0..c5a077f 100644
--- a/examples/pubsub/subscriber_test.cc
+++ b/examples/pubsub/subscriber_test.cc
@@ -31,15 +31,13 @@
  *
  */
 
-#include <grpc++/channel_arguments.h>
-#include <grpc++/channel_interface.h>
+#include <grpc++/channel.h>
 #include <grpc++/client_context.h>
 #include <grpc++/create_channel.h>
 #include <grpc++/server.h>
 #include <grpc++/server_builder.h>
 #include <grpc++/server_context.h>
 #include <grpc++/server_credentials.h>
-#include <grpc++/status.h>
 #include <gtest/gtest.h>
 
 #include "examples/pubsub/subscriber.h"
@@ -122,7 +120,7 @@
   std::unique_ptr<Server> server_;
   SubscriberServiceImpl service_;
 
-  std::shared_ptr<ChannelInterface> channel_;
+  std::shared_ptr<Channel> channel_;
 
   std::unique_ptr<grpc::examples::pubsub::Subscriber> subscriber_;
 };
diff --git a/include/grpc++/channel.h b/include/grpc++/channel.h
new file mode 100644
index 0000000..a8af741
--- /dev/null
+++ b/include/grpc++/channel.h
@@ -0,0 +1,138 @@
+/*
+ *
+ * Copyright 2015, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPCXX_CHANNEL_H
+#define GRPCXX_CHANNEL_H
+
+#include <memory>
+
+#include <grpc/grpc.h>
+#include <grpc++/impl/call.h>
+#include <grpc++/impl/grpc_library.h>
+#include <grpc++/support/config.h>
+
+struct grpc_channel;
+
+namespace grpc {
+class CallOpSetInterface;
+class ChannelArguments;
+class CompletionQueue;
+class Credentials;
+class SecureCredentials;
+
+template <class R>
+class ClientReader;
+template <class W>
+class ClientWriter;
+template <class R, class W>
+class ClientReaderWriter;
+template <class R>
+class ClientAsyncReader;
+template <class W>
+class ClientAsyncWriter;
+template <class R, class W>
+class ClientAsyncReaderWriter;
+template <class R>
+class ClientAsyncResponseReader;
+
+class Channel GRPC_FINAL : public GrpcLibrary,
+                           public CallHook,
+                           public std::enable_shared_from_this<Channel> {
+ public:
+  ~Channel();
+
+  // Get the current channel state. If the channel is in IDLE and try_to_connect
+  // is set to true, try to connect.
+  grpc_connectivity_state GetState(bool try_to_connect);
+
+  // Return the tag on cq when the channel state is changed or deadline expires.
+  // GetState needs to called to get the current state.
+  template <typename T>
+  void NotifyOnStateChange(grpc_connectivity_state last_observed, T deadline,
+                           CompletionQueue* cq, void* tag) {
+    TimePoint<T> deadline_tp(deadline);
+    NotifyOnStateChangeImpl(last_observed, deadline_tp.raw_time(), cq, tag);
+  }
+
+  // Blocking wait for channel state change or deadline expiration.
+  // GetState needs to called to get the current state.
+  template <typename T>
+  bool WaitForStateChange(grpc_connectivity_state last_observed, T deadline) {
+    TimePoint<T> deadline_tp(deadline);
+    return WaitForStateChangeImpl(last_observed, deadline_tp.raw_time());
+  }
+
+ private:
+  template <class R>
+  friend class ::grpc::ClientReader;
+  template <class W>
+  friend class ::grpc::ClientWriter;
+  template <class R, class W>
+  friend class ::grpc::ClientReaderWriter;
+  template <class R>
+  friend class ::grpc::ClientAsyncReader;
+  template <class W>
+  friend class ::grpc::ClientAsyncWriter;
+  template <class R, class W>
+  friend class ::grpc::ClientAsyncReaderWriter;
+  template <class R>
+  friend class ::grpc::ClientAsyncResponseReader;
+  template <class InputMessage, class OutputMessage>
+  friend Status BlockingUnaryCall(Channel* channel, const RpcMethod& method,
+                                  ClientContext* context,
+                                  const InputMessage& request,
+                                  OutputMessage* result);
+  friend class ::grpc::RpcMethod;
+  friend std::shared_ptr<Channel> CreateChannelInternal(
+      const grpc::string& host, grpc_channel* c_channel);
+
+  Channel(const grpc::string& host, grpc_channel* c_channel);
+
+  Call CreateCall(const RpcMethod& method, ClientContext* context,
+                  CompletionQueue* cq);
+  void PerformOpsOnCall(CallOpSetInterface* ops, Call* call);
+  void* RegisterMethod(const char* method);
+
+  void NotifyOnStateChangeImpl(grpc_connectivity_state last_observed,
+                               gpr_timespec deadline, CompletionQueue* cq,
+                               void* tag);
+  bool WaitForStateChangeImpl(grpc_connectivity_state last_observed,
+                              gpr_timespec deadline);
+
+  const grpc::string host_;
+  grpc_channel* const c_channel_;  // owned
+};
+
+}  // namespace grpc
+
+#endif  // GRPCXX_CHANNEL_H
diff --git a/include/grpc++/channel_interface.h b/include/grpc++/channel_interface.h
deleted file mode 100644
index 4176cde..0000000
--- a/include/grpc++/channel_interface.h
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- *
- * Copyright 2015, Google Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- *     * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- *     * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#ifndef GRPCXX_CHANNEL_INTERFACE_H
-#define GRPCXX_CHANNEL_INTERFACE_H
-
-#include <memory>
-
-#include <grpc/grpc.h>
-#include <grpc++/status.h>
-#include <grpc++/impl/call.h>
-
-struct grpc_call;
-
-namespace grpc {
-class Call;
-class CallOpBuffer;
-class ClientContext;
-class CompletionQueue;
-class RpcMethod;
-
-class ChannelInterface : public CallHook,
-                         public std::enable_shared_from_this<ChannelInterface> {
- public:
-  virtual ~ChannelInterface() {}
-
-  virtual void* RegisterMethod(const char* method_name) = 0;
-  virtual Call CreateCall(const RpcMethod& method, ClientContext* context,
-                          CompletionQueue* cq) = 0;
-
-  // Get the current channel state. If the channel is in IDLE and try_to_connect
-  // is set to true, try to connect.
-  virtual grpc_connectivity_state GetState(bool try_to_connect) = 0;
-
-  // Return the tag on cq when the channel state is changed or deadline expires.
-  // GetState needs to called to get the current state.
-  template <typename T>
-  void NotifyOnStateChange(grpc_connectivity_state last_observed, T deadline,
-                           CompletionQueue* cq, void* tag) {
-    TimePoint<T> deadline_tp(deadline);
-    NotifyOnStateChangeImpl(last_observed, deadline_tp.raw_time(), cq, tag);
-  }
-
-  // Blocking wait for channel state change or deadline expiration.
-  // GetState needs to called to get the current state.
-  template <typename T>
-  bool WaitForStateChange(grpc_connectivity_state last_observed, T deadline) {
-    TimePoint<T> deadline_tp(deadline);
-    return WaitForStateChangeImpl(last_observed, deadline_tp.raw_time());
-  }
-
- private:
-  virtual void NotifyOnStateChangeImpl(grpc_connectivity_state last_observed,
-                                       gpr_timespec deadline,
-                                       CompletionQueue* cq, void* tag) = 0;
-  virtual bool WaitForStateChangeImpl(grpc_connectivity_state last_observed,
-                                      gpr_timespec deadline) = 0;
-};
-
-}  // namespace grpc
-
-#endif  // GRPCXX_CHANNEL_INTERFACE_H
diff --git a/include/grpc++/client_context.h b/include/grpc++/client_context.h
index 8de2ba4..ee28f36 100644
--- a/include/grpc++/client_context.h
+++ b/include/grpc++/client_context.h
@@ -42,16 +42,16 @@
 #include <grpc/grpc.h>
 #include <grpc/support/log.h>
 #include <grpc/support/time.h>
-#include <grpc++/auth_context.h>
-#include <grpc++/config.h>
-#include <grpc++/status.h>
-#include <grpc++/time.h>
+#include <grpc++/support/auth_context.h>
+#include <grpc++/support/config.h>
+#include <grpc++/support/status.h>
+#include <grpc++/support/time.h>
 
 struct census_context;
 
 namespace grpc {
 
-class ChannelInterface;
+class Channel;
 class CompletionQueue;
 class Credentials;
 class RpcMethod;
@@ -215,20 +215,18 @@
   template <class R>
   friend class ::grpc::ClientAsyncResponseReader;
   template <class InputMessage, class OutputMessage>
-  friend Status BlockingUnaryCall(ChannelInterface* channel,
-                                  const RpcMethod& method,
+  friend Status BlockingUnaryCall(Channel* channel, const RpcMethod& method,
                                   ClientContext* context,
                                   const InputMessage& request,
                                   OutputMessage* result);
 
   grpc_call* call() { return call_; }
-  void set_call(grpc_call* call,
-                const std::shared_ptr<ChannelInterface>& channel);
+  void set_call(grpc_call* call, const std::shared_ptr<Channel>& channel);
 
   grpc::string authority() { return authority_; }
 
   bool initial_metadata_received_;
-  std::shared_ptr<ChannelInterface> channel_;
+  std::shared_ptr<Channel> channel_;
   grpc_call* call_;
   gpr_timespec deadline_;
   grpc::string authority_;
diff --git a/include/grpc++/completion_queue.h b/include/grpc++/completion_queue.h
index 2f30211..d81d2e7 100644
--- a/include/grpc++/completion_queue.h
+++ b/include/grpc++/completion_queue.h
@@ -36,8 +36,8 @@
 
 #include <grpc/support/time.h>
 #include <grpc++/impl/grpc_library.h>
-#include <grpc++/status.h>
-#include <grpc++/time.h>
+#include <grpc++/support/status.h>
+#include <grpc++/support/time.h>
 
 struct grpc_completion_queue;
 
@@ -65,7 +65,7 @@
 class BidiStreamingHandler;
 class UnknownMethodHandler;
 
-class ChannelInterface;
+class Channel;
 class ClientContext;
 class CompletionQueue;
 class RpcMethod;
@@ -143,8 +143,7 @@
   friend class ::grpc::Server;
   friend class ::grpc::ServerContext;
   template <class InputMessage, class OutputMessage>
-  friend Status BlockingUnaryCall(ChannelInterface* channel,
-                                  const RpcMethod& method,
+  friend Status BlockingUnaryCall(Channel* channel, const RpcMethod& method,
                                   ClientContext* context,
                                   const InputMessage& request,
                                   OutputMessage* result);
diff --git a/include/grpc++/create_channel.h b/include/grpc++/create_channel.h
index 424a93a..0e559ac 100644
--- a/include/grpc++/create_channel.h
+++ b/include/grpc++/create_channel.h
@@ -36,15 +36,14 @@
 
 #include <memory>
 
-#include <grpc++/config.h>
 #include <grpc++/credentials.h>
+#include <grpc++/support/channel_arguments.h>
+#include <grpc++/support/config.h>
 
 namespace grpc {
-class ChannelArguments;
-class ChannelInterface;
 
 // If creds does not hold an object or is invalid, a lame channel is returned.
-std::shared_ptr<ChannelInterface> CreateChannel(
+std::shared_ptr<Channel> CreateChannel(
     const grpc::string& target, const std::shared_ptr<Credentials>& creds,
     const ChannelArguments& args);
 
diff --git a/include/grpc++/credentials.h b/include/grpc++/credentials.h
index a4f1e73..71e1f00 100644
--- a/include/grpc++/credentials.h
+++ b/include/grpc++/credentials.h
@@ -36,12 +36,12 @@
 
 #include <memory>
 
-#include <grpc++/config.h>
 #include <grpc++/impl/grpc_library.h>
+#include <grpc++/support/config.h>
 
 namespace grpc {
 class ChannelArguments;
-class ChannelInterface;
+class Channel;
 class SecureCredentials;
 
 class Credentials : public GrpcLibrary {
@@ -57,11 +57,11 @@
   virtual SecureCredentials* AsSecureCredentials() = 0;
 
  private:
-  friend std::shared_ptr<ChannelInterface> CreateChannel(
+  friend std::shared_ptr<Channel> CreateChannel(
       const grpc::string& target, const std::shared_ptr<Credentials>& creds,
       const ChannelArguments& args);
 
-  virtual std::shared_ptr<ChannelInterface> CreateChannel(
+  virtual std::shared_ptr<Channel> CreateChannel(
       const grpc::string& target, const ChannelArguments& args) = 0;
 };
 
diff --git a/include/grpc++/async_generic_service.h b/include/grpc++/generic/async_generic_service.h
similarity index 91%
rename from include/grpc++/async_generic_service.h
rename to include/grpc++/generic/async_generic_service.h
index b435c6e..8578d85 100644
--- a/include/grpc++/async_generic_service.h
+++ b/include/grpc++/generic/async_generic_service.h
@@ -31,11 +31,11 @@
  *
  */
 
-#ifndef GRPCXX_ASYNC_GENERIC_SERVICE_H
-#define GRPCXX_ASYNC_GENERIC_SERVICE_H
+#ifndef GRPCXX_GENERIC_ASYNC_GENERIC_SERVICE_H
+#define GRPCXX_GENERIC_ASYNC_GENERIC_SERVICE_H
 
-#include <grpc++/byte_buffer.h>
-#include <grpc++/stream.h>
+#include <grpc++/support/byte_buffer.h>
+#include <grpc++/support/async_stream.h>
 
 struct grpc_server;
 
@@ -75,4 +75,4 @@
 
 }  // namespace grpc
 
-#endif  // GRPCXX_ASYNC_GENERIC_SERVICE_H
+#endif  // GRPCXX_GENERIC_ASYNC_GENERIC_SERVICE_H
diff --git a/include/grpc++/generic_stub.h b/include/grpc++/generic/generic_stub.h
similarity index 86%
rename from include/grpc++/generic_stub.h
rename to include/grpc++/generic/generic_stub.h
index 172f10e..1bb7900 100644
--- a/include/grpc++/generic_stub.h
+++ b/include/grpc++/generic/generic_stub.h
@@ -31,11 +31,11 @@
  *
  */
 
-#ifndef GRPCXX_GENERIC_STUB_H
-#define GRPCXX_GENERIC_STUB_H
+#ifndef GRPCXX_GENERIC_GENERIC_STUB_H
+#define GRPCXX_GENERIC_GENERIC_STUB_H
 
-#include <grpc++/byte_buffer.h>
-#include <grpc++/stream.h>
+#include <grpc++/support/async_stream.h>
+#include <grpc++/support/byte_buffer.h>
 
 namespace grpc {
 
@@ -47,8 +47,7 @@
 // by name.
 class GenericStub GRPC_FINAL {
  public:
-  explicit GenericStub(std::shared_ptr<ChannelInterface> channel)
-      : channel_(channel) {}
+  explicit GenericStub(std::shared_ptr<Channel> channel) : channel_(channel) {}
 
   // begin a call to a named method
   std::unique_ptr<GenericClientAsyncReaderWriter> Call(
@@ -56,9 +55,9 @@
       void* tag);
 
  private:
-  std::shared_ptr<ChannelInterface> channel_;
+  std::shared_ptr<Channel> channel_;
 };
 
 }  // namespace grpc
 
-#endif  // GRPCXX_GENERIC_STUB_H
+#endif  // GRPCXX_GENERIC_GENERIC_STUB_H
diff --git a/include/grpc++/impl/call.h b/include/grpc++/impl/call.h
index bc1db4c..e5da6c9 100644
--- a/include/grpc++/impl/call.h
+++ b/include/grpc++/impl/call.h
@@ -34,18 +34,17 @@
 #ifndef GRPCXX_IMPL_CALL_H
 #define GRPCXX_IMPL_CALL_H
 
-#include <grpc/support/alloc.h>
-#include <grpc++/client_context.h>
-#include <grpc++/completion_queue.h>
-#include <grpc++/config.h>
-#include <grpc++/status.h>
-#include <grpc++/impl/serialization_traits.h>
-
 #include <functional>
 #include <memory>
 #include <map>
+#include <cstring>
 
-#include <string.h>
+#include <grpc/support/alloc.h>
+#include <grpc++/client_context.h>
+#include <grpc++/completion_queue.h>
+#include <grpc++/impl/serialization_traits.h>
+#include <grpc++/support/config.h>
+#include <grpc++/support/status.h>
 
 struct grpc_call;
 struct grpc_op;
@@ -541,8 +540,7 @@
 template <class Op1 = CallNoOp<1>, class Op2 = CallNoOp<2>,
           class Op3 = CallNoOp<3>, class Op4 = CallNoOp<4>,
           class Op5 = CallNoOp<5>, class Op6 = CallNoOp<6>>
-class SneakyCallOpSet GRPC_FINAL
-    : public CallOpSet<Op1, Op2, Op3, Op4, Op5, Op6> {
+class SneakyCallOpSet : public CallOpSet<Op1, Op2, Op3, Op4, Op5, Op6> {
  public:
   bool FinalizeResult(void** tag, bool* status) GRPC_OVERRIDE {
     typedef CallOpSet<Op1, Op2, Op3, Op4, Op5, Op6> Base;
diff --git a/include/grpc++/impl/client_unary_call.h b/include/grpc++/impl/client_unary_call.h
index b77ce7d..4cdc800 100644
--- a/include/grpc++/impl/client_unary_call.h
+++ b/include/grpc++/impl/client_unary_call.h
@@ -34,21 +34,20 @@
 #ifndef GRPCXX_IMPL_CLIENT_UNARY_CALL_H
 #define GRPCXX_IMPL_CLIENT_UNARY_CALL_H
 
-#include <grpc++/config.h>
-#include <grpc++/status.h>
-
 #include <grpc++/impl/call.h>
+#include <grpc++/support/config.h>
+#include <grpc++/support/status.h>
 
 namespace grpc {
 
-class ChannelInterface;
+class Channel;
 class ClientContext;
 class CompletionQueue;
 class RpcMethod;
 
 // Wrapper that performs a blocking unary call
 template <class InputMessage, class OutputMessage>
-Status BlockingUnaryCall(ChannelInterface* channel, const RpcMethod& method,
+Status BlockingUnaryCall(Channel* channel, const RpcMethod& method,
                          ClientContext* context, const InputMessage& request,
                          OutputMessage* result) {
   CompletionQueue cq;
diff --git a/include/grpc++/impl/proto_utils.h b/include/grpc++/impl/proto_utils.h
index ebefa3e..283e334 100644
--- a/include/grpc++/impl/proto_utils.h
+++ b/include/grpc++/impl/proto_utils.h
@@ -38,8 +38,8 @@
 
 #include <grpc/grpc.h>
 #include <grpc++/impl/serialization_traits.h>
-#include <grpc++/config_protobuf.h>
-#include <grpc++/status.h>
+#include <grpc++/support/config_protobuf.h>
+#include <grpc++/support/status.h>
 
 namespace grpc {
 
diff --git a/include/grpc++/impl/rpc_method.h b/include/grpc++/impl/rpc_method.h
index 50a160b..9800268 100644
--- a/include/grpc++/impl/rpc_method.h
+++ b/include/grpc++/impl/rpc_method.h
@@ -34,6 +34,10 @@
 #ifndef GRPCXX_IMPL_RPC_METHOD_H
 #define GRPCXX_IMPL_RPC_METHOD_H
 
+#include <memory>
+
+#include <grpc++/channel.h>
+
 namespace grpc {
 
 class RpcMethod {
@@ -45,8 +49,14 @@
     BIDI_STREAMING
   };
 
-  RpcMethod(const char* name, RpcType type, void* channel_tag)
-      : name_(name), method_type_(type), channel_tag_(channel_tag) {}
+  RpcMethod(const char* name, RpcType type)
+      : name_(name), method_type_(type), channel_tag_(NULL) {}
+
+  RpcMethod(const char* name, RpcType type,
+            const std::shared_ptr<Channel>& channel)
+      : name_(name),
+        method_type_(type),
+        channel_tag_(channel->RegisterMethod(name)) {}
 
   const char* name() const { return name_; }
   RpcType method_type() const { return method_type_; }
diff --git a/include/grpc++/impl/rpc_service_method.h b/include/grpc++/impl/rpc_service_method.h
index 925801e..fcb0b7c 100644
--- a/include/grpc++/impl/rpc_service_method.h
+++ b/include/grpc++/impl/rpc_service_method.h
@@ -39,10 +39,10 @@
 #include <memory>
 #include <vector>
 
-#include <grpc++/config.h>
 #include <grpc++/impl/rpc_method.h>
-#include <grpc++/status.h>
-#include <grpc++/stream.h>
+#include <grpc++/support/config.h>
+#include <grpc++/support/status.h>
+#include <grpc++/support/sync_stream.h>
 
 namespace grpc {
 class ServerContext;
@@ -211,13 +211,19 @@
 // Handle unknown method by returning UNIMPLEMENTED error.
 class UnknownMethodHandler : public MethodHandler {
  public:
-  void RunHandler(const HandlerParameter& param) GRPC_FINAL {
+  template <class T>
+  static void FillOps(ServerContext* context, T* ops) {
     Status status(StatusCode::UNIMPLEMENTED, "");
-    CallOpSet<CallOpSendInitialMetadata, CallOpServerSendStatus> ops;
-    if (!param.server_context->sent_initial_metadata_) {
-      ops.SendInitialMetadata(param.server_context->initial_metadata_);
+    if (!context->sent_initial_metadata_) {
+      ops->SendInitialMetadata(context->initial_metadata_);
+      context->sent_initial_metadata_ = true;
     }
-    ops.ServerSendStatus(param.server_context->trailing_metadata_, status);
+    ops->ServerSendStatus(context->trailing_metadata_, status);
+  }
+
+  void RunHandler(const HandlerParameter& param) GRPC_FINAL {
+    CallOpSet<CallOpSendInitialMetadata, CallOpServerSendStatus> ops;
+    FillOps(param.server_context, &ops);
     param.call->PerformOps(&ops);
     param.call->cq()->Pluck(&ops);
   }
@@ -229,7 +235,7 @@
   // Takes ownership of the handler
   RpcServiceMethod(const char* name, RpcMethod::RpcType type,
                    MethodHandler* handler)
-      : RpcMethod(name, type, nullptr), handler_(handler) {}
+      : RpcMethod(name, type), handler_(handler) {}
 
   MethodHandler* handler() { return handler_.get(); }
 
diff --git a/include/grpc++/impl/service_type.h b/include/grpc++/impl/service_type.h
index c33a278..3b6ac1d 100644
--- a/include/grpc++/impl/service_type.h
+++ b/include/grpc++/impl/service_type.h
@@ -34,10 +34,10 @@
 #ifndef GRPCXX_IMPL_SERVICE_TYPE_H
 #define GRPCXX_IMPL_SERVICE_TYPE_H
 
-#include <grpc++/config.h>
 #include <grpc++/impl/serialization_traits.h>
 #include <grpc++/server.h>
-#include <grpc++/status.h>
+#include <grpc++/support/config.h>
+#include <grpc++/support/status.h>
 
 namespace grpc {
 
diff --git a/include/grpc++/impl/sync.h b/include/grpc++/impl/sync.h
index 2f41d2b..999c430 100644
--- a/include/grpc++/impl/sync.h
+++ b/include/grpc++/impl/sync.h
@@ -34,7 +34,7 @@
 #ifndef GRPCXX_IMPL_SYNC_H
 #define GRPCXX_IMPL_SYNC_H
 
-#include <grpc++/config.h>
+#include <grpc++/support/config.h>
 
 #ifdef GRPC_CXX0X_NO_THREAD
 #include <grpc++/impl/sync_no_cxx11.h>
diff --git a/include/grpc++/impl/thd.h b/include/grpc++/impl/thd.h
index 4c4578a..f8d4258 100644
--- a/include/grpc++/impl/thd.h
+++ b/include/grpc++/impl/thd.h
@@ -34,7 +34,7 @@
 #ifndef GRPCXX_IMPL_THD_H
 #define GRPCXX_IMPL_THD_H
 
-#include <grpc++/config.h>
+#include <grpc++/support/config.h>
 
 #ifdef GRPC_CXX0X_NO_THREAD
 #include <grpc++/impl/thd_no_cxx11.h>
diff --git a/include/grpc++/server.h b/include/grpc++/server.h
index a2bc097..c8979e4 100644
--- a/include/grpc++/server.h
+++ b/include/grpc++/server.h
@@ -38,11 +38,11 @@
 #include <memory>
 
 #include <grpc++/completion_queue.h>
-#include <grpc++/config.h>
 #include <grpc++/impl/call.h>
 #include <grpc++/impl/grpc_library.h>
 #include <grpc++/impl/sync.h>
-#include <grpc++/status.h>
+#include <grpc++/support/config.h>
+#include <grpc++/support/status.h>
 
 struct grpc_server;
 
@@ -98,7 +98,7 @@
   // Add a listening port. Can be called multiple times.
   int AddListeningPort(const grpc::string& addr, ServerCredentials* creds);
   // Start the server.
-  bool Start();
+  bool Start(ServerCompletionQueue** cqs, size_t num_cqs);
 
   void HandleQueueClosed();
   void RunRpc();
@@ -112,7 +112,8 @@
    public:
     BaseAsyncRequest(Server* server, ServerContext* context,
                      ServerAsyncStreamingInterface* stream,
-                     CompletionQueue* call_cq, void* tag);
+                     CompletionQueue* call_cq, void* tag,
+                     bool delete_on_finalize);
     virtual ~BaseAsyncRequest();
 
     bool FinalizeResult(void** tag, bool* status) GRPC_OVERRIDE;
@@ -123,6 +124,7 @@
     ServerAsyncStreamingInterface* const stream_;
     CompletionQueue* const call_cq_;
     void* const tag_;
+    const bool delete_on_finalize_;
     grpc_call* call_;
     grpc_metadata_array initial_metadata_array_;
   };
@@ -184,12 +186,13 @@
     Message* const request_;
   };
 
-  class GenericAsyncRequest GRPC_FINAL : public BaseAsyncRequest {
+  class GenericAsyncRequest : public BaseAsyncRequest {
    public:
     GenericAsyncRequest(Server* server, GenericServerContext* context,
                         ServerAsyncStreamingInterface* stream,
                         CompletionQueue* call_cq,
-                        ServerCompletionQueue* notification_cq, void* tag);
+                        ServerCompletionQueue* notification_cq, void* tag,
+                        bool delete_on_finalize);
 
     bool FinalizeResult(void** tag, bool* status) GRPC_OVERRIDE;
 
@@ -197,6 +200,10 @@
     grpc_call_details call_details_;
   };
 
+  class UnimplementedAsyncRequestContext;
+  class UnimplementedAsyncRequest;
+  class UnimplementedAsyncResponse;
+
   template <class Message>
   void RequestAsyncCall(void* registered_method, ServerContext* context,
                         ServerAsyncStreamingInterface* stream,
@@ -221,7 +228,7 @@
                                ServerCompletionQueue* notification_cq,
                                void* tag) {
     new GenericAsyncRequest(this, context, stream, call_cq, notification_cq,
-                            tag);
+                            tag, true);
   }
 
   const int max_message_size_;
diff --git a/include/grpc++/server_builder.h b/include/grpc++/server_builder.h
index 906daf1..8cd2048 100644
--- a/include/grpc++/server_builder.h
+++ b/include/grpc++/server_builder.h
@@ -37,7 +37,7 @@
 #include <memory>
 #include <vector>
 
-#include <grpc++/config.h>
+#include <grpc++/support/config.h>
 
 namespace grpc {
 
@@ -96,13 +96,9 @@
                         std::shared_ptr<ServerCredentials> creds,
                         int* selected_port = nullptr);
 
-  // Set the thread pool used for running appliation rpc handlers.
-  // Does not take ownership.
-  void SetThreadPool(ThreadPoolInterface* thread_pool);
-
   // Add a completion queue for handling asynchronous services
-  // Caller is required to keep this completion queue live until calling
-  // BuildAndStart()
+  // Caller is required to keep this completion queue live until
+  // the server is destroyed.
   std::unique_ptr<ServerCompletionQueue> AddCompletionQueue();
 
   // Return a running server which is ready for processing rpcs.
diff --git a/include/grpc++/server_context.h b/include/grpc++/server_context.h
index b87a1f0..ce3cb47 100644
--- a/include/grpc++/server_context.h
+++ b/include/grpc++/server_context.h
@@ -39,9 +39,9 @@
 
 #include <grpc/compression.h>
 #include <grpc/support/time.h>
-#include <grpc++/auth_context.h>
-#include <grpc++/config.h>
-#include <grpc++/time.h>
+#include <grpc++/support/auth_context.h>
+#include <grpc++/support/config.h>
+#include <grpc++/support/time.h>
 
 struct gpr_timespec;
 struct grpc_metadata;
diff --git a/include/grpc++/server_credentials.h b/include/grpc++/server_credentials.h
index 11acd67..16b78c0 100644
--- a/include/grpc++/server_credentials.h
+++ b/include/grpc++/server_credentials.h
@@ -37,7 +37,7 @@
 #include <memory>
 #include <vector>
 
-#include <grpc++/config.h>
+#include <grpc++/support/config.h>
 
 struct grpc_server;
 
diff --git a/include/grpc++/stream.h b/include/grpc++/stream.h
deleted file mode 100644
index 45dafcd..0000000
--- a/include/grpc++/stream.h
+++ /dev/null
@@ -1,776 +0,0 @@
-/*
- *
- * Copyright 2015, Google Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- *     * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- *     * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#ifndef GRPCXX_STREAM_H
-#define GRPCXX_STREAM_H
-
-#include <grpc++/channel_interface.h>
-#include <grpc++/client_context.h>
-#include <grpc++/completion_queue.h>
-#include <grpc++/server_context.h>
-#include <grpc++/impl/call.h>
-#include <grpc++/impl/service_type.h>
-#include <grpc++/status.h>
-#include <grpc/support/log.h>
-
-namespace grpc {
-
-// Common interface for all client side streaming.
-class ClientStreamingInterface {
- public:
-  virtual ~ClientStreamingInterface() {}
-
-  // Wait until the stream finishes, and return the final status. When the
-  // client side declares it has no more message to send, either implicitly or
-  // by calling WritesDone, it needs to make sure there is no more message to
-  // be received from the server, either implicitly or by getting a false from
-  // a Read().
-  // This function will return either:
-  // - when all incoming messages have been read and the server has returned
-  //   status
-  // - OR when the server has returned a non-OK status
-  virtual Status Finish() = 0;
-};
-
-// An interface that yields a sequence of R messages.
-template <class R>
-class ReaderInterface {
- public:
-  virtual ~ReaderInterface() {}
-
-  // Blocking read a message and parse to msg. Returns true on success.
-  // The method returns false when there will be no more incoming messages,
-  // either because the other side has called WritesDone or the stream has
-  // failed (or been cancelled).
-  virtual bool Read(R* msg) = 0;
-};
-
-// An interface that can be fed a sequence of W messages.
-template <class W>
-class WriterInterface {
- public:
-  virtual ~WriterInterface() {}
-
-  // Blocking write msg to the stream. Returns true on success.
-  // Returns false when the stream has been closed.
-  virtual bool Write(const W& msg, const WriteOptions& options) = 0;
-
-  inline bool Write(const W& msg) { return Write(msg, WriteOptions()); }
-};
-
-template <class R>
-class ClientReaderInterface : public ClientStreamingInterface,
-                              public ReaderInterface<R> {
- public:
-  virtual void WaitForInitialMetadata() = 0;
-};
-
-template <class R>
-class ClientReader GRPC_FINAL : public ClientReaderInterface<R> {
- public:
-  // Blocking create a stream and write the first request out.
-  template <class W>
-  ClientReader(ChannelInterface* channel, const RpcMethod& method,
-               ClientContext* context, const W& request)
-      : context_(context), call_(channel->CreateCall(method, context, &cq_)) {
-    CallOpSet<CallOpSendInitialMetadata, CallOpSendMessage,
-              CallOpClientSendClose> ops;
-    ops.SendInitialMetadata(context->send_initial_metadata_);
-    // TODO(ctiller): don't assert
-    GPR_ASSERT(ops.SendMessage(request).ok());
-    ops.ClientSendClose();
-    call_.PerformOps(&ops);
-    cq_.Pluck(&ops);
-  }
-
-  // Blocking wait for initial metadata from server. The received metadata
-  // can only be accessed after this call returns. Should only be called before
-  // the first read. Calling this method is optional, and if it is not called
-  // the metadata will be available in ClientContext after the first read.
-  void WaitForInitialMetadata() {
-    GPR_ASSERT(!context_->initial_metadata_received_);
-
-    CallOpSet<CallOpRecvInitialMetadata> ops;
-    ops.RecvInitialMetadata(context_);
-    call_.PerformOps(&ops);
-    cq_.Pluck(&ops);  // status ignored
-  }
-
-  bool Read(R* msg) GRPC_OVERRIDE {
-    CallOpSet<CallOpRecvInitialMetadata, CallOpRecvMessage<R>> ops;
-    if (!context_->initial_metadata_received_) {
-      ops.RecvInitialMetadata(context_);
-    }
-    ops.RecvMessage(msg);
-    call_.PerformOps(&ops);
-    return cq_.Pluck(&ops) && ops.got_message;
-  }
-
-  Status Finish() GRPC_OVERRIDE {
-    CallOpSet<CallOpClientRecvStatus> ops;
-    Status status;
-    ops.ClientRecvStatus(context_, &status);
-    call_.PerformOps(&ops);
-    GPR_ASSERT(cq_.Pluck(&ops));
-    return status;
-  }
-
- private:
-  ClientContext* context_;
-  CompletionQueue cq_;
-  Call call_;
-};
-
-template <class W>
-class ClientWriterInterface : public ClientStreamingInterface,
-                              public WriterInterface<W> {
- public:
-  virtual bool WritesDone() = 0;
-};
-
-template <class W>
-class ClientWriter : public ClientWriterInterface<W> {
- public:
-  // Blocking create a stream.
-  template <class R>
-  ClientWriter(ChannelInterface* channel, const RpcMethod& method,
-               ClientContext* context, R* response)
-      : context_(context), call_(channel->CreateCall(method, context, &cq_)) {
-    finish_ops_.RecvMessage(response);
-
-    CallOpSet<CallOpSendInitialMetadata> ops;
-    ops.SendInitialMetadata(context->send_initial_metadata_);
-    call_.PerformOps(&ops);
-    cq_.Pluck(&ops);
-  }
-
-  using WriterInterface<W>::Write;
-  bool Write(const W& msg, const WriteOptions& options) GRPC_OVERRIDE {
-    CallOpSet<CallOpSendMessage> ops;
-    if (!ops.SendMessage(msg, options).ok()) {
-      return false;
-    }
-    call_.PerformOps(&ops);
-    return cq_.Pluck(&ops);
-  }
-
-  bool WritesDone() GRPC_OVERRIDE {
-    CallOpSet<CallOpClientSendClose> ops;
-    ops.ClientSendClose();
-    call_.PerformOps(&ops);
-    return cq_.Pluck(&ops);
-  }
-
-  // Read the final response and wait for the final status.
-  Status Finish() GRPC_OVERRIDE {
-    Status status;
-    finish_ops_.ClientRecvStatus(context_, &status);
-    call_.PerformOps(&finish_ops_);
-    GPR_ASSERT(cq_.Pluck(&finish_ops_));
-    return status;
-  }
-
- private:
-  ClientContext* context_;
-  CallOpSet<CallOpGenericRecvMessage, CallOpClientRecvStatus> finish_ops_;
-  CompletionQueue cq_;
-  Call call_;
-};
-
-// Client-side interface for bi-directional streaming.
-template <class W, class R>
-class ClientReaderWriterInterface : public ClientStreamingInterface,
-                                    public WriterInterface<W>,
-                                    public ReaderInterface<R> {
- public:
-  virtual void WaitForInitialMetadata() = 0;
-  virtual bool WritesDone() = 0;
-};
-
-template <class W, class R>
-class ClientReaderWriter GRPC_FINAL : public ClientReaderWriterInterface<W, R> {
- public:
-  // Blocking create a stream.
-  ClientReaderWriter(ChannelInterface* channel, const RpcMethod& method,
-                     ClientContext* context)
-      : context_(context), call_(channel->CreateCall(method, context, &cq_)) {
-    CallOpSet<CallOpSendInitialMetadata> ops;
-    ops.SendInitialMetadata(context->send_initial_metadata_);
-    call_.PerformOps(&ops);
-    cq_.Pluck(&ops);
-  }
-
-  // Blocking wait for initial metadata from server. The received metadata
-  // can only be accessed after this call returns. Should only be called before
-  // the first read. Calling this method is optional, and if it is not called
-  // the metadata will be available in ClientContext after the first read.
-  void WaitForInitialMetadata() {
-    GPR_ASSERT(!context_->initial_metadata_received_);
-
-    CallOpSet<CallOpRecvInitialMetadata> ops;
-    ops.RecvInitialMetadata(context_);
-    call_.PerformOps(&ops);
-    cq_.Pluck(&ops);  // status ignored
-  }
-
-  bool Read(R* msg) GRPC_OVERRIDE {
-    CallOpSet<CallOpRecvInitialMetadata, CallOpRecvMessage<R>> ops;
-    if (!context_->initial_metadata_received_) {
-      ops.RecvInitialMetadata(context_);
-    }
-    ops.RecvMessage(msg);
-    call_.PerformOps(&ops);
-    return cq_.Pluck(&ops) && ops.got_message;
-  }
-
-  using WriterInterface<W>::Write;
-  bool Write(const W& msg, const WriteOptions& options) GRPC_OVERRIDE {
-    CallOpSet<CallOpSendMessage> ops;
-    if (!ops.SendMessage(msg, options).ok()) return false;
-    call_.PerformOps(&ops);
-    return cq_.Pluck(&ops);
-  }
-
-  bool WritesDone() GRPC_OVERRIDE {
-    CallOpSet<CallOpClientSendClose> ops;
-    ops.ClientSendClose();
-    call_.PerformOps(&ops);
-    return cq_.Pluck(&ops);
-  }
-
-  Status Finish() GRPC_OVERRIDE {
-    CallOpSet<CallOpClientRecvStatus> ops;
-    Status status;
-    ops.ClientRecvStatus(context_, &status);
-    call_.PerformOps(&ops);
-    GPR_ASSERT(cq_.Pluck(&ops));
-    return status;
-  }
-
- private:
-  ClientContext* context_;
-  CompletionQueue cq_;
-  Call call_;
-};
-
-template <class R>
-class ServerReader GRPC_FINAL : public ReaderInterface<R> {
- public:
-  ServerReader(Call* call, ServerContext* ctx) : call_(call), ctx_(ctx) {}
-
-  void SendInitialMetadata() {
-    GPR_ASSERT(!ctx_->sent_initial_metadata_);
-
-    CallOpSet<CallOpSendInitialMetadata> ops;
-    ops.SendInitialMetadata(ctx_->initial_metadata_);
-    ctx_->sent_initial_metadata_ = true;
-    call_->PerformOps(&ops);
-    call_->cq()->Pluck(&ops);
-  }
-
-  bool Read(R* msg) GRPC_OVERRIDE {
-    CallOpSet<CallOpRecvMessage<R>> ops;
-    ops.RecvMessage(msg);
-    call_->PerformOps(&ops);
-    return call_->cq()->Pluck(&ops) && ops.got_message;
-  }
-
- private:
-  Call* const call_;
-  ServerContext* const ctx_;
-};
-
-template <class W>
-class ServerWriter GRPC_FINAL : public WriterInterface<W> {
- public:
-  ServerWriter(Call* call, ServerContext* ctx) : call_(call), ctx_(ctx) {}
-
-  void SendInitialMetadata() {
-    GPR_ASSERT(!ctx_->sent_initial_metadata_);
-
-    CallOpSet<CallOpSendInitialMetadata> ops;
-    ops.SendInitialMetadata(ctx_->initial_metadata_);
-    ctx_->sent_initial_metadata_ = true;
-    call_->PerformOps(&ops);
-    call_->cq()->Pluck(&ops);
-  }
-
-  using WriterInterface<W>::Write;
-  bool Write(const W& msg, const WriteOptions& options) GRPC_OVERRIDE {
-    CallOpSet<CallOpSendInitialMetadata, CallOpSendMessage> ops;
-    if (!ops.SendMessage(msg, options).ok()) {
-      return false;
-    }
-    if (!ctx_->sent_initial_metadata_) {
-      ops.SendInitialMetadata(ctx_->initial_metadata_);
-      ctx_->sent_initial_metadata_ = true;
-    }
-    call_->PerformOps(&ops);
-    return call_->cq()->Pluck(&ops);
-  }
-
- private:
-  Call* const call_;
-  ServerContext* const ctx_;
-};
-
-// Server-side interface for bi-directional streaming.
-template <class W, class R>
-class ServerReaderWriter GRPC_FINAL : public WriterInterface<W>,
-                                      public ReaderInterface<R> {
- public:
-  ServerReaderWriter(Call* call, ServerContext* ctx) : call_(call), ctx_(ctx) {}
-
-  void SendInitialMetadata() {
-    GPR_ASSERT(!ctx_->sent_initial_metadata_);
-
-    CallOpSet<CallOpSendInitialMetadata> ops;
-    ops.SendInitialMetadata(ctx_->initial_metadata_);
-    ctx_->sent_initial_metadata_ = true;
-    call_->PerformOps(&ops);
-    call_->cq()->Pluck(&ops);
-  }
-
-  bool Read(R* msg) GRPC_OVERRIDE {
-    CallOpSet<CallOpRecvMessage<R>> ops;
-    ops.RecvMessage(msg);
-    call_->PerformOps(&ops);
-    return call_->cq()->Pluck(&ops) && ops.got_message;
-  }
-
-  using WriterInterface<W>::Write;
-  bool Write(const W& msg, const WriteOptions& options) GRPC_OVERRIDE {
-    CallOpSet<CallOpSendInitialMetadata, CallOpSendMessage> ops;
-    if (!ops.SendMessage(msg, options).ok()) {
-      return false;
-    }
-    if (!ctx_->sent_initial_metadata_) {
-      ops.SendInitialMetadata(ctx_->initial_metadata_);
-      ctx_->sent_initial_metadata_ = true;
-    }
-    call_->PerformOps(&ops);
-    return call_->cq()->Pluck(&ops);
-  }
-
- private:
-  Call* const call_;
-  ServerContext* const ctx_;
-};
-
-// Async interfaces
-// Common interface for all client side streaming.
-class ClientAsyncStreamingInterface {
- public:
-  virtual ~ClientAsyncStreamingInterface() {}
-
-  virtual void ReadInitialMetadata(void* tag) = 0;
-
-  virtual void Finish(Status* status, void* tag) = 0;
-};
-
-// An interface that yields a sequence of R messages.
-template <class R>
-class AsyncReaderInterface {
- public:
-  virtual ~AsyncReaderInterface() {}
-
-  virtual void Read(R* msg, void* tag) = 0;
-};
-
-// An interface that can be fed a sequence of W messages.
-template <class W>
-class AsyncWriterInterface {
- public:
-  virtual ~AsyncWriterInterface() {}
-
-  virtual void Write(const W& msg, void* tag) = 0;
-};
-
-template <class R>
-class ClientAsyncReaderInterface : public ClientAsyncStreamingInterface,
-                                   public AsyncReaderInterface<R> {};
-
-template <class R>
-class ClientAsyncReader GRPC_FINAL : public ClientAsyncReaderInterface<R> {
- public:
-  // Create a stream and write the first request out.
-  template <class W>
-  ClientAsyncReader(ChannelInterface* channel, CompletionQueue* cq,
-                    const RpcMethod& method, ClientContext* context,
-                    const W& request, void* tag)
-      : context_(context), call_(channel->CreateCall(method, context, cq)) {
-    init_ops_.set_output_tag(tag);
-    init_ops_.SendInitialMetadata(context->send_initial_metadata_);
-    // TODO(ctiller): don't assert
-    GPR_ASSERT(init_ops_.SendMessage(request).ok());
-    init_ops_.ClientSendClose();
-    call_.PerformOps(&init_ops_);
-  }
-
-  void ReadInitialMetadata(void* tag) GRPC_OVERRIDE {
-    GPR_ASSERT(!context_->initial_metadata_received_);
-
-    meta_ops_.set_output_tag(tag);
-    meta_ops_.RecvInitialMetadata(context_);
-    call_.PerformOps(&meta_ops_);
-  }
-
-  void Read(R* msg, void* tag) GRPC_OVERRIDE {
-    read_ops_.set_output_tag(tag);
-    if (!context_->initial_metadata_received_) {
-      read_ops_.RecvInitialMetadata(context_);
-    }
-    read_ops_.RecvMessage(msg);
-    call_.PerformOps(&read_ops_);
-  }
-
-  void Finish(Status* status, void* tag) GRPC_OVERRIDE {
-    finish_ops_.set_output_tag(tag);
-    if (!context_->initial_metadata_received_) {
-      finish_ops_.RecvInitialMetadata(context_);
-    }
-    finish_ops_.ClientRecvStatus(context_, status);
-    call_.PerformOps(&finish_ops_);
-  }
-
- private:
-  ClientContext* context_;
-  Call call_;
-  CallOpSet<CallOpSendInitialMetadata, CallOpSendMessage, CallOpClientSendClose>
-      init_ops_;
-  CallOpSet<CallOpRecvInitialMetadata> meta_ops_;
-  CallOpSet<CallOpRecvInitialMetadata, CallOpRecvMessage<R>> read_ops_;
-  CallOpSet<CallOpRecvInitialMetadata, CallOpClientRecvStatus> finish_ops_;
-};
-
-template <class W>
-class ClientAsyncWriterInterface : public ClientAsyncStreamingInterface,
-                                   public AsyncWriterInterface<W> {
- public:
-  virtual void WritesDone(void* tag) = 0;
-};
-
-template <class W>
-class ClientAsyncWriter GRPC_FINAL : public ClientAsyncWriterInterface<W> {
- public:
-  template <class R>
-  ClientAsyncWriter(ChannelInterface* channel, CompletionQueue* cq,
-                    const RpcMethod& method, ClientContext* context,
-                    R* response, void* tag)
-      : context_(context), call_(channel->CreateCall(method, context, cq)) {
-    finish_ops_.RecvMessage(response);
-
-    init_ops_.set_output_tag(tag);
-    init_ops_.SendInitialMetadata(context->send_initial_metadata_);
-    call_.PerformOps(&init_ops_);
-  }
-
-  void ReadInitialMetadata(void* tag) GRPC_OVERRIDE {
-    GPR_ASSERT(!context_->initial_metadata_received_);
-
-    meta_ops_.set_output_tag(tag);
-    meta_ops_.RecvInitialMetadata(context_);
-    call_.PerformOps(&meta_ops_);
-  }
-
-  void Write(const W& msg, void* tag) GRPC_OVERRIDE {
-    write_ops_.set_output_tag(tag);
-    // TODO(ctiller): don't assert
-    GPR_ASSERT(write_ops_.SendMessage(msg).ok());
-    call_.PerformOps(&write_ops_);
-  }
-
-  void WritesDone(void* tag) GRPC_OVERRIDE {
-    writes_done_ops_.set_output_tag(tag);
-    writes_done_ops_.ClientSendClose();
-    call_.PerformOps(&writes_done_ops_);
-  }
-
-  void Finish(Status* status, void* tag) GRPC_OVERRIDE {
-    finish_ops_.set_output_tag(tag);
-    if (!context_->initial_metadata_received_) {
-      finish_ops_.RecvInitialMetadata(context_);
-    }
-    finish_ops_.ClientRecvStatus(context_, status);
-    call_.PerformOps(&finish_ops_);
-  }
-
- private:
-  ClientContext* context_;
-  Call call_;
-  CallOpSet<CallOpSendInitialMetadata> init_ops_;
-  CallOpSet<CallOpRecvInitialMetadata> meta_ops_;
-  CallOpSet<CallOpSendMessage> write_ops_;
-  CallOpSet<CallOpClientSendClose> writes_done_ops_;
-  CallOpSet<CallOpRecvInitialMetadata, CallOpGenericRecvMessage,
-            CallOpClientRecvStatus> finish_ops_;
-};
-
-// Client-side interface for bi-directional streaming.
-template <class W, class R>
-class ClientAsyncReaderWriterInterface : public ClientAsyncStreamingInterface,
-                                         public AsyncWriterInterface<W>,
-                                         public AsyncReaderInterface<R> {
- public:
-  virtual void WritesDone(void* tag) = 0;
-};
-
-template <class W, class R>
-class ClientAsyncReaderWriter GRPC_FINAL
-    : public ClientAsyncReaderWriterInterface<W, R> {
- public:
-  ClientAsyncReaderWriter(ChannelInterface* channel, CompletionQueue* cq,
-                          const RpcMethod& method, ClientContext* context,
-                          void* tag)
-      : context_(context), call_(channel->CreateCall(method, context, cq)) {
-    init_ops_.set_output_tag(tag);
-    init_ops_.SendInitialMetadata(context->send_initial_metadata_);
-    call_.PerformOps(&init_ops_);
-  }
-
-  void ReadInitialMetadata(void* tag) GRPC_OVERRIDE {
-    GPR_ASSERT(!context_->initial_metadata_received_);
-
-    meta_ops_.set_output_tag(tag);
-    meta_ops_.RecvInitialMetadata(context_);
-    call_.PerformOps(&meta_ops_);
-  }
-
-  void Read(R* msg, void* tag) GRPC_OVERRIDE {
-    read_ops_.set_output_tag(tag);
-    if (!context_->initial_metadata_received_) {
-      read_ops_.RecvInitialMetadata(context_);
-    }
-    read_ops_.RecvMessage(msg);
-    call_.PerformOps(&read_ops_);
-  }
-
-  void Write(const W& msg, void* tag) GRPC_OVERRIDE {
-    write_ops_.set_output_tag(tag);
-    // TODO(ctiller): don't assert
-    GPR_ASSERT(write_ops_.SendMessage(msg).ok());
-    call_.PerformOps(&write_ops_);
-  }
-
-  void WritesDone(void* tag) GRPC_OVERRIDE {
-    writes_done_ops_.set_output_tag(tag);
-    writes_done_ops_.ClientSendClose();
-    call_.PerformOps(&writes_done_ops_);
-  }
-
-  void Finish(Status* status, void* tag) GRPC_OVERRIDE {
-    finish_ops_.set_output_tag(tag);
-    if (!context_->initial_metadata_received_) {
-      finish_ops_.RecvInitialMetadata(context_);
-    }
-    finish_ops_.ClientRecvStatus(context_, status);
-    call_.PerformOps(&finish_ops_);
-  }
-
- private:
-  ClientContext* context_;
-  Call call_;
-  CallOpSet<CallOpSendInitialMetadata> init_ops_;
-  CallOpSet<CallOpRecvInitialMetadata> meta_ops_;
-  CallOpSet<CallOpRecvInitialMetadata, CallOpRecvMessage<R>> read_ops_;
-  CallOpSet<CallOpSendMessage> write_ops_;
-  CallOpSet<CallOpClientSendClose> writes_done_ops_;
-  CallOpSet<CallOpRecvInitialMetadata, CallOpClientRecvStatus> finish_ops_;
-};
-
-template <class W, class R>
-class ServerAsyncReader GRPC_FINAL : public ServerAsyncStreamingInterface,
-                                     public AsyncReaderInterface<R> {
- public:
-  explicit ServerAsyncReader(ServerContext* ctx)
-      : call_(nullptr, nullptr, nullptr), ctx_(ctx) {}
-
-  void SendInitialMetadata(void* tag) GRPC_OVERRIDE {
-    GPR_ASSERT(!ctx_->sent_initial_metadata_);
-
-    meta_ops_.set_output_tag(tag);
-    meta_ops_.SendInitialMetadata(ctx_->initial_metadata_);
-    ctx_->sent_initial_metadata_ = true;
-    call_.PerformOps(&meta_ops_);
-  }
-
-  void Read(R* msg, void* tag) GRPC_OVERRIDE {
-    read_ops_.set_output_tag(tag);
-    read_ops_.RecvMessage(msg);
-    call_.PerformOps(&read_ops_);
-  }
-
-  void Finish(const W& msg, const Status& status, void* tag) {
-    finish_ops_.set_output_tag(tag);
-    if (!ctx_->sent_initial_metadata_) {
-      finish_ops_.SendInitialMetadata(ctx_->initial_metadata_);
-      ctx_->sent_initial_metadata_ = true;
-    }
-    // The response is dropped if the status is not OK.
-    if (status.ok()) {
-      finish_ops_.ServerSendStatus(ctx_->trailing_metadata_,
-                                   finish_ops_.SendMessage(msg));
-    } else {
-      finish_ops_.ServerSendStatus(ctx_->trailing_metadata_, status);
-    }
-    call_.PerformOps(&finish_ops_);
-  }
-
-  void FinishWithError(const Status& status, void* tag) {
-    GPR_ASSERT(!status.ok());
-    finish_ops_.set_output_tag(tag);
-    if (!ctx_->sent_initial_metadata_) {
-      finish_ops_.SendInitialMetadata(ctx_->initial_metadata_);
-      ctx_->sent_initial_metadata_ = true;
-    }
-    finish_ops_.ServerSendStatus(ctx_->trailing_metadata_, status);
-    call_.PerformOps(&finish_ops_);
-  }
-
- private:
-  void BindCall(Call* call) GRPC_OVERRIDE { call_ = *call; }
-
-  Call call_;
-  ServerContext* ctx_;
-  CallOpSet<CallOpSendInitialMetadata> meta_ops_;
-  CallOpSet<CallOpRecvMessage<R>> read_ops_;
-  CallOpSet<CallOpSendInitialMetadata, CallOpSendMessage,
-            CallOpServerSendStatus> finish_ops_;
-};
-
-template <class W>
-class ServerAsyncWriter GRPC_FINAL : public ServerAsyncStreamingInterface,
-                                     public AsyncWriterInterface<W> {
- public:
-  explicit ServerAsyncWriter(ServerContext* ctx)
-      : call_(nullptr, nullptr, nullptr), ctx_(ctx) {}
-
-  void SendInitialMetadata(void* tag) GRPC_OVERRIDE {
-    GPR_ASSERT(!ctx_->sent_initial_metadata_);
-
-    meta_ops_.set_output_tag(tag);
-    meta_ops_.SendInitialMetadata(ctx_->initial_metadata_);
-    ctx_->sent_initial_metadata_ = true;
-    call_.PerformOps(&meta_ops_);
-  }
-
-  void Write(const W& msg, void* tag) GRPC_OVERRIDE {
-    write_ops_.set_output_tag(tag);
-    if (!ctx_->sent_initial_metadata_) {
-      write_ops_.SendInitialMetadata(ctx_->initial_metadata_);
-      ctx_->sent_initial_metadata_ = true;
-    }
-    // TODO(ctiller): don't assert
-    GPR_ASSERT(write_ops_.SendMessage(msg).ok());
-    call_.PerformOps(&write_ops_);
-  }
-
-  void Finish(const Status& status, void* tag) {
-    finish_ops_.set_output_tag(tag);
-    if (!ctx_->sent_initial_metadata_) {
-      finish_ops_.SendInitialMetadata(ctx_->initial_metadata_);
-      ctx_->sent_initial_metadata_ = true;
-    }
-    finish_ops_.ServerSendStatus(ctx_->trailing_metadata_, status);
-    call_.PerformOps(&finish_ops_);
-  }
-
- private:
-  void BindCall(Call* call) GRPC_OVERRIDE { call_ = *call; }
-
-  Call call_;
-  ServerContext* ctx_;
-  CallOpSet<CallOpSendInitialMetadata> meta_ops_;
-  CallOpSet<CallOpSendInitialMetadata, CallOpSendMessage> write_ops_;
-  CallOpSet<CallOpSendInitialMetadata, CallOpServerSendStatus> finish_ops_;
-};
-
-// Server-side interface for bi-directional streaming.
-template <class W, class R>
-class ServerAsyncReaderWriter GRPC_FINAL : public ServerAsyncStreamingInterface,
-                                           public AsyncWriterInterface<W>,
-                                           public AsyncReaderInterface<R> {
- public:
-  explicit ServerAsyncReaderWriter(ServerContext* ctx)
-      : call_(nullptr, nullptr, nullptr), ctx_(ctx) {}
-
-  void SendInitialMetadata(void* tag) GRPC_OVERRIDE {
-    GPR_ASSERT(!ctx_->sent_initial_metadata_);
-
-    meta_ops_.set_output_tag(tag);
-    meta_ops_.SendInitialMetadata(ctx_->initial_metadata_);
-    ctx_->sent_initial_metadata_ = true;
-    call_.PerformOps(&meta_ops_);
-  }
-
-  void Read(R* msg, void* tag) GRPC_OVERRIDE {
-    read_ops_.set_output_tag(tag);
-    read_ops_.RecvMessage(msg);
-    call_.PerformOps(&read_ops_);
-  }
-
-  void Write(const W& msg, void* tag) GRPC_OVERRIDE {
-    write_ops_.set_output_tag(tag);
-    if (!ctx_->sent_initial_metadata_) {
-      write_ops_.SendInitialMetadata(ctx_->initial_metadata_);
-      ctx_->sent_initial_metadata_ = true;
-    }
-    // TODO(ctiller): don't assert
-    GPR_ASSERT(write_ops_.SendMessage(msg).ok());
-    call_.PerformOps(&write_ops_);
-  }
-
-  void Finish(const Status& status, void* tag) {
-    finish_ops_.set_output_tag(tag);
-    if (!ctx_->sent_initial_metadata_) {
-      finish_ops_.SendInitialMetadata(ctx_->initial_metadata_);
-      ctx_->sent_initial_metadata_ = true;
-    }
-    finish_ops_.ServerSendStatus(ctx_->trailing_metadata_, status);
-    call_.PerformOps(&finish_ops_);
-  }
-
- private:
-  void BindCall(Call* call) GRPC_OVERRIDE { call_ = *call; }
-
-  Call call_;
-  ServerContext* ctx_;
-  CallOpSet<CallOpSendInitialMetadata> meta_ops_;
-  CallOpSet<CallOpRecvMessage<R>> read_ops_;
-  CallOpSet<CallOpSendInitialMetadata, CallOpSendMessage> write_ops_;
-  CallOpSet<CallOpSendInitialMetadata, CallOpServerSendStatus> finish_ops_;
-};
-
-}  // namespace grpc
-
-#endif  // GRPCXX_STREAM_H
diff --git a/include/grpc++/support/async_stream.h b/include/grpc++/support/async_stream.h
new file mode 100644
index 0000000..4c12fda
--- /dev/null
+++ b/include/grpc++/support/async_stream.h
@@ -0,0 +1,436 @@
+/*
+ *
+ * Copyright 2015, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPCXX_SUPPORT_ASYNC_STREAM_H
+#define GRPCXX_SUPPORT_ASYNC_STREAM_H
+
+#include <grpc/support/log.h>
+#include <grpc++/channel.h>
+#include <grpc++/client_context.h>
+#include <grpc++/completion_queue.h>
+#include <grpc++/impl/call.h>
+#include <grpc++/impl/service_type.h>
+#include <grpc++/server_context.h>
+#include <grpc++/support/status.h>
+
+namespace grpc {
+
+// Async interfaces
+// Common interface for all client side streaming.
+class ClientAsyncStreamingInterface {
+ public:
+  virtual ~ClientAsyncStreamingInterface() {}
+
+  virtual void ReadInitialMetadata(void* tag) = 0;
+
+  virtual void Finish(Status* status, void* tag) = 0;
+};
+
+// An interface that yields a sequence of R messages.
+template <class R>
+class AsyncReaderInterface {
+ public:
+  virtual ~AsyncReaderInterface() {}
+
+  virtual void Read(R* msg, void* tag) = 0;
+};
+
+// An interface that can be fed a sequence of W messages.
+template <class W>
+class AsyncWriterInterface {
+ public:
+  virtual ~AsyncWriterInterface() {}
+
+  virtual void Write(const W& msg, void* tag) = 0;
+};
+
+template <class R>
+class ClientAsyncReaderInterface : public ClientAsyncStreamingInterface,
+                                   public AsyncReaderInterface<R> {};
+
+template <class R>
+class ClientAsyncReader GRPC_FINAL : public ClientAsyncReaderInterface<R> {
+ public:
+  // Create a stream and write the first request out.
+  template <class W>
+  ClientAsyncReader(Channel* channel, CompletionQueue* cq,
+                    const RpcMethod& method, ClientContext* context,
+                    const W& request, void* tag)
+      : context_(context), call_(channel->CreateCall(method, context, cq)) {
+    init_ops_.set_output_tag(tag);
+    init_ops_.SendInitialMetadata(context->send_initial_metadata_);
+    // TODO(ctiller): don't assert
+    GPR_ASSERT(init_ops_.SendMessage(request).ok());
+    init_ops_.ClientSendClose();
+    call_.PerformOps(&init_ops_);
+  }
+
+  void ReadInitialMetadata(void* tag) GRPC_OVERRIDE {
+    GPR_ASSERT(!context_->initial_metadata_received_);
+
+    meta_ops_.set_output_tag(tag);
+    meta_ops_.RecvInitialMetadata(context_);
+    call_.PerformOps(&meta_ops_);
+  }
+
+  void Read(R* msg, void* tag) GRPC_OVERRIDE {
+    read_ops_.set_output_tag(tag);
+    if (!context_->initial_metadata_received_) {
+      read_ops_.RecvInitialMetadata(context_);
+    }
+    read_ops_.RecvMessage(msg);
+    call_.PerformOps(&read_ops_);
+  }
+
+  void Finish(Status* status, void* tag) GRPC_OVERRIDE {
+    finish_ops_.set_output_tag(tag);
+    if (!context_->initial_metadata_received_) {
+      finish_ops_.RecvInitialMetadata(context_);
+    }
+    finish_ops_.ClientRecvStatus(context_, status);
+    call_.PerformOps(&finish_ops_);
+  }
+
+ private:
+  ClientContext* context_;
+  Call call_;
+  CallOpSet<CallOpSendInitialMetadata, CallOpSendMessage, CallOpClientSendClose>
+      init_ops_;
+  CallOpSet<CallOpRecvInitialMetadata> meta_ops_;
+  CallOpSet<CallOpRecvInitialMetadata, CallOpRecvMessage<R>> read_ops_;
+  CallOpSet<CallOpRecvInitialMetadata, CallOpClientRecvStatus> finish_ops_;
+};
+
+template <class W>
+class ClientAsyncWriterInterface : public ClientAsyncStreamingInterface,
+                                   public AsyncWriterInterface<W> {
+ public:
+  virtual void WritesDone(void* tag) = 0;
+};
+
+template <class W>
+class ClientAsyncWriter GRPC_FINAL : public ClientAsyncWriterInterface<W> {
+ public:
+  template <class R>
+  ClientAsyncWriter(Channel* channel, CompletionQueue* cq,
+                    const RpcMethod& method, ClientContext* context,
+                    R* response, void* tag)
+      : context_(context), call_(channel->CreateCall(method, context, cq)) {
+    finish_ops_.RecvMessage(response);
+
+    init_ops_.set_output_tag(tag);
+    init_ops_.SendInitialMetadata(context->send_initial_metadata_);
+    call_.PerformOps(&init_ops_);
+  }
+
+  void ReadInitialMetadata(void* tag) GRPC_OVERRIDE {
+    GPR_ASSERT(!context_->initial_metadata_received_);
+
+    meta_ops_.set_output_tag(tag);
+    meta_ops_.RecvInitialMetadata(context_);
+    call_.PerformOps(&meta_ops_);
+  }
+
+  void Write(const W& msg, void* tag) GRPC_OVERRIDE {
+    write_ops_.set_output_tag(tag);
+    // TODO(ctiller): don't assert
+    GPR_ASSERT(write_ops_.SendMessage(msg).ok());
+    call_.PerformOps(&write_ops_);
+  }
+
+  void WritesDone(void* tag) GRPC_OVERRIDE {
+    writes_done_ops_.set_output_tag(tag);
+    writes_done_ops_.ClientSendClose();
+    call_.PerformOps(&writes_done_ops_);
+  }
+
+  void Finish(Status* status, void* tag) GRPC_OVERRIDE {
+    finish_ops_.set_output_tag(tag);
+    if (!context_->initial_metadata_received_) {
+      finish_ops_.RecvInitialMetadata(context_);
+    }
+    finish_ops_.ClientRecvStatus(context_, status);
+    call_.PerformOps(&finish_ops_);
+  }
+
+ private:
+  ClientContext* context_;
+  Call call_;
+  CallOpSet<CallOpSendInitialMetadata> init_ops_;
+  CallOpSet<CallOpRecvInitialMetadata> meta_ops_;
+  CallOpSet<CallOpSendMessage> write_ops_;
+  CallOpSet<CallOpClientSendClose> writes_done_ops_;
+  CallOpSet<CallOpRecvInitialMetadata, CallOpGenericRecvMessage,
+            CallOpClientRecvStatus> finish_ops_;
+};
+
+// Client-side interface for bi-directional streaming.
+template <class W, class R>
+class ClientAsyncReaderWriterInterface : public ClientAsyncStreamingInterface,
+                                         public AsyncWriterInterface<W>,
+                                         public AsyncReaderInterface<R> {
+ public:
+  virtual void WritesDone(void* tag) = 0;
+};
+
+template <class W, class R>
+class ClientAsyncReaderWriter GRPC_FINAL
+    : public ClientAsyncReaderWriterInterface<W, R> {
+ public:
+  ClientAsyncReaderWriter(Channel* channel, CompletionQueue* cq,
+                          const RpcMethod& method, ClientContext* context,
+                          void* tag)
+      : context_(context), call_(channel->CreateCall(method, context, cq)) {
+    init_ops_.set_output_tag(tag);
+    init_ops_.SendInitialMetadata(context->send_initial_metadata_);
+    call_.PerformOps(&init_ops_);
+  }
+
+  void ReadInitialMetadata(void* tag) GRPC_OVERRIDE {
+    GPR_ASSERT(!context_->initial_metadata_received_);
+
+    meta_ops_.set_output_tag(tag);
+    meta_ops_.RecvInitialMetadata(context_);
+    call_.PerformOps(&meta_ops_);
+  }
+
+  void Read(R* msg, void* tag) GRPC_OVERRIDE {
+    read_ops_.set_output_tag(tag);
+    if (!context_->initial_metadata_received_) {
+      read_ops_.RecvInitialMetadata(context_);
+    }
+    read_ops_.RecvMessage(msg);
+    call_.PerformOps(&read_ops_);
+  }
+
+  void Write(const W& msg, void* tag) GRPC_OVERRIDE {
+    write_ops_.set_output_tag(tag);
+    // TODO(ctiller): don't assert
+    GPR_ASSERT(write_ops_.SendMessage(msg).ok());
+    call_.PerformOps(&write_ops_);
+  }
+
+  void WritesDone(void* tag) GRPC_OVERRIDE {
+    writes_done_ops_.set_output_tag(tag);
+    writes_done_ops_.ClientSendClose();
+    call_.PerformOps(&writes_done_ops_);
+  }
+
+  void Finish(Status* status, void* tag) GRPC_OVERRIDE {
+    finish_ops_.set_output_tag(tag);
+    if (!context_->initial_metadata_received_) {
+      finish_ops_.RecvInitialMetadata(context_);
+    }
+    finish_ops_.ClientRecvStatus(context_, status);
+    call_.PerformOps(&finish_ops_);
+  }
+
+ private:
+  ClientContext* context_;
+  Call call_;
+  CallOpSet<CallOpSendInitialMetadata> init_ops_;
+  CallOpSet<CallOpRecvInitialMetadata> meta_ops_;
+  CallOpSet<CallOpRecvInitialMetadata, CallOpRecvMessage<R>> read_ops_;
+  CallOpSet<CallOpSendMessage> write_ops_;
+  CallOpSet<CallOpClientSendClose> writes_done_ops_;
+  CallOpSet<CallOpRecvInitialMetadata, CallOpClientRecvStatus> finish_ops_;
+};
+
+template <class W, class R>
+class ServerAsyncReader GRPC_FINAL : public ServerAsyncStreamingInterface,
+                                     public AsyncReaderInterface<R> {
+ public:
+  explicit ServerAsyncReader(ServerContext* ctx)
+      : call_(nullptr, nullptr, nullptr), ctx_(ctx) {}
+
+  void SendInitialMetadata(void* tag) GRPC_OVERRIDE {
+    GPR_ASSERT(!ctx_->sent_initial_metadata_);
+
+    meta_ops_.set_output_tag(tag);
+    meta_ops_.SendInitialMetadata(ctx_->initial_metadata_);
+    ctx_->sent_initial_metadata_ = true;
+    call_.PerformOps(&meta_ops_);
+  }
+
+  void Read(R* msg, void* tag) GRPC_OVERRIDE {
+    read_ops_.set_output_tag(tag);
+    read_ops_.RecvMessage(msg);
+    call_.PerformOps(&read_ops_);
+  }
+
+  void Finish(const W& msg, const Status& status, void* tag) {
+    finish_ops_.set_output_tag(tag);
+    if (!ctx_->sent_initial_metadata_) {
+      finish_ops_.SendInitialMetadata(ctx_->initial_metadata_);
+      ctx_->sent_initial_metadata_ = true;
+    }
+    // The response is dropped if the status is not OK.
+    if (status.ok()) {
+      finish_ops_.ServerSendStatus(ctx_->trailing_metadata_,
+                                   finish_ops_.SendMessage(msg));
+    } else {
+      finish_ops_.ServerSendStatus(ctx_->trailing_metadata_, status);
+    }
+    call_.PerformOps(&finish_ops_);
+  }
+
+  void FinishWithError(const Status& status, void* tag) {
+    GPR_ASSERT(!status.ok());
+    finish_ops_.set_output_tag(tag);
+    if (!ctx_->sent_initial_metadata_) {
+      finish_ops_.SendInitialMetadata(ctx_->initial_metadata_);
+      ctx_->sent_initial_metadata_ = true;
+    }
+    finish_ops_.ServerSendStatus(ctx_->trailing_metadata_, status);
+    call_.PerformOps(&finish_ops_);
+  }
+
+ private:
+  void BindCall(Call* call) GRPC_OVERRIDE { call_ = *call; }
+
+  Call call_;
+  ServerContext* ctx_;
+  CallOpSet<CallOpSendInitialMetadata> meta_ops_;
+  CallOpSet<CallOpRecvMessage<R>> read_ops_;
+  CallOpSet<CallOpSendInitialMetadata, CallOpSendMessage,
+            CallOpServerSendStatus> finish_ops_;
+};
+
+template <class W>
+class ServerAsyncWriter GRPC_FINAL : public ServerAsyncStreamingInterface,
+                                     public AsyncWriterInterface<W> {
+ public:
+  explicit ServerAsyncWriter(ServerContext* ctx)
+      : call_(nullptr, nullptr, nullptr), ctx_(ctx) {}
+
+  void SendInitialMetadata(void* tag) GRPC_OVERRIDE {
+    GPR_ASSERT(!ctx_->sent_initial_metadata_);
+
+    meta_ops_.set_output_tag(tag);
+    meta_ops_.SendInitialMetadata(ctx_->initial_metadata_);
+    ctx_->sent_initial_metadata_ = true;
+    call_.PerformOps(&meta_ops_);
+  }
+
+  void Write(const W& msg, void* tag) GRPC_OVERRIDE {
+    write_ops_.set_output_tag(tag);
+    if (!ctx_->sent_initial_metadata_) {
+      write_ops_.SendInitialMetadata(ctx_->initial_metadata_);
+      ctx_->sent_initial_metadata_ = true;
+    }
+    // TODO(ctiller): don't assert
+    GPR_ASSERT(write_ops_.SendMessage(msg).ok());
+    call_.PerformOps(&write_ops_);
+  }
+
+  void Finish(const Status& status, void* tag) {
+    finish_ops_.set_output_tag(tag);
+    if (!ctx_->sent_initial_metadata_) {
+      finish_ops_.SendInitialMetadata(ctx_->initial_metadata_);
+      ctx_->sent_initial_metadata_ = true;
+    }
+    finish_ops_.ServerSendStatus(ctx_->trailing_metadata_, status);
+    call_.PerformOps(&finish_ops_);
+  }
+
+ private:
+  void BindCall(Call* call) GRPC_OVERRIDE { call_ = *call; }
+
+  Call call_;
+  ServerContext* ctx_;
+  CallOpSet<CallOpSendInitialMetadata> meta_ops_;
+  CallOpSet<CallOpSendInitialMetadata, CallOpSendMessage> write_ops_;
+  CallOpSet<CallOpSendInitialMetadata, CallOpServerSendStatus> finish_ops_;
+};
+
+// Server-side interface for bi-directional streaming.
+template <class W, class R>
+class ServerAsyncReaderWriter GRPC_FINAL : public ServerAsyncStreamingInterface,
+                                           public AsyncWriterInterface<W>,
+                                           public AsyncReaderInterface<R> {
+ public:
+  explicit ServerAsyncReaderWriter(ServerContext* ctx)
+      : call_(nullptr, nullptr, nullptr), ctx_(ctx) {}
+
+  void SendInitialMetadata(void* tag) GRPC_OVERRIDE {
+    GPR_ASSERT(!ctx_->sent_initial_metadata_);
+
+    meta_ops_.set_output_tag(tag);
+    meta_ops_.SendInitialMetadata(ctx_->initial_metadata_);
+    ctx_->sent_initial_metadata_ = true;
+    call_.PerformOps(&meta_ops_);
+  }
+
+  void Read(R* msg, void* tag) GRPC_OVERRIDE {
+    read_ops_.set_output_tag(tag);
+    read_ops_.RecvMessage(msg);
+    call_.PerformOps(&read_ops_);
+  }
+
+  void Write(const W& msg, void* tag) GRPC_OVERRIDE {
+    write_ops_.set_output_tag(tag);
+    if (!ctx_->sent_initial_metadata_) {
+      write_ops_.SendInitialMetadata(ctx_->initial_metadata_);
+      ctx_->sent_initial_metadata_ = true;
+    }
+    // TODO(ctiller): don't assert
+    GPR_ASSERT(write_ops_.SendMessage(msg).ok());
+    call_.PerformOps(&write_ops_);
+  }
+
+  void Finish(const Status& status, void* tag) {
+    finish_ops_.set_output_tag(tag);
+    if (!ctx_->sent_initial_metadata_) {
+      finish_ops_.SendInitialMetadata(ctx_->initial_metadata_);
+      ctx_->sent_initial_metadata_ = true;
+    }
+    finish_ops_.ServerSendStatus(ctx_->trailing_metadata_, status);
+    call_.PerformOps(&finish_ops_);
+  }
+
+ private:
+  friend class ::grpc::Server;
+
+  void BindCall(Call* call) GRPC_OVERRIDE { call_ = *call; }
+
+  Call call_;
+  ServerContext* ctx_;
+  CallOpSet<CallOpSendInitialMetadata> meta_ops_;
+  CallOpSet<CallOpRecvMessage<R>> read_ops_;
+  CallOpSet<CallOpSendInitialMetadata, CallOpSendMessage> write_ops_;
+  CallOpSet<CallOpSendInitialMetadata, CallOpServerSendStatus> finish_ops_;
+};
+
+}  // namespace grpc
+
+#endif  // GRPCXX_SUPPORT_ASYNC_STREAM_H
diff --git a/include/grpc++/async_unary_call.h b/include/grpc++/support/async_unary_call.h
similarity index 95%
rename from include/grpc++/async_unary_call.h
rename to include/grpc++/support/async_unary_call.h
index 3d22df4..0f4ad26 100644
--- a/include/grpc++/async_unary_call.h
+++ b/include/grpc++/support/async_unary_call.h
@@ -31,17 +31,17 @@
  *
  */
 
-#ifndef GRPCXX_ASYNC_UNARY_CALL_H
-#define GRPCXX_ASYNC_UNARY_CALL_H
+#ifndef GRPCXX_SUPPORT_ASYNC_UNARY_CALL_H
+#define GRPCXX_SUPPORT_ASYNC_UNARY_CALL_H
 
-#include <grpc++/channel_interface.h>
+#include <grpc/support/log.h>
+#include <grpc++/channel.h>
 #include <grpc++/client_context.h>
 #include <grpc++/completion_queue.h>
 #include <grpc++/server_context.h>
 #include <grpc++/impl/call.h>
 #include <grpc++/impl/service_type.h>
-#include <grpc++/status.h>
-#include <grpc/support/log.h>
+#include <grpc++/support/status.h>
 
 namespace grpc {
 
@@ -58,7 +58,7 @@
     : public ClientAsyncResponseReaderInterface<R> {
  public:
   template <class W>
-  ClientAsyncResponseReader(ChannelInterface* channel, CompletionQueue* cq,
+  ClientAsyncResponseReader(Channel* channel, CompletionQueue* cq,
                             const RpcMethod& method, ClientContext* context,
                             const W& request)
       : context_(context), call_(channel->CreateCall(method, context, cq)) {
@@ -152,4 +152,4 @@
 
 }  // namespace grpc
 
-#endif  // GRPCXX_ASYNC_UNARY_CALL_H
+#endif  // GRPCXX_SUPPORT_ASYNC_UNARY_CALL_H
diff --git a/include/grpc++/auth_context.h b/include/grpc++/support/auth_context.h
similarity index 95%
rename from include/grpc++/auth_context.h
rename to include/grpc++/support/auth_context.h
index 7dced90..f4f2dcf 100644
--- a/include/grpc++/auth_context.h
+++ b/include/grpc++/support/auth_context.h
@@ -31,13 +31,13 @@
  *
  */
 
-#ifndef GRPCXX_AUTH_CONTEXT_H
-#define GRPCXX_AUTH_CONTEXT_H
+#ifndef GRPCXX_SUPPORT_AUTH_CONTEXT_H
+#define GRPCXX_SUPPORT_AUTH_CONTEXT_H
 
 #include <iterator>
 #include <vector>
 
-#include <grpc++/config.h>
+#include <grpc++/support/config.h>
 
 struct grpc_auth_context;
 struct grpc_auth_property;
@@ -92,4 +92,4 @@
 
 }  // namespace grpc
 
-#endif  // GRPCXX_AUTH_CONTEXT_H
+#endif  // GRPCXX_SUPPORT_AUTH_CONTEXT_H
diff --git a/include/grpc++/byte_buffer.h b/include/grpc++/support/byte_buffer.h
similarity index 92%
rename from include/grpc++/byte_buffer.h
rename to include/grpc++/support/byte_buffer.h
index 6467776..3f8cc25 100644
--- a/include/grpc++/byte_buffer.h
+++ b/include/grpc++/support/byte_buffer.h
@@ -31,16 +31,16 @@
  *
  */
 
-#ifndef GRPCXX_BYTE_BUFFER_H
-#define GRPCXX_BYTE_BUFFER_H
+#ifndef GRPCXX_SUPPORT_BYTE_BUFFER_H
+#define GRPCXX_SUPPORT_BYTE_BUFFER_H
 
 #include <grpc/grpc.h>
 #include <grpc/byte_buffer.h>
 #include <grpc/support/log.h>
-#include <grpc++/config.h>
-#include <grpc++/slice.h>
-#include <grpc++/status.h>
 #include <grpc++/impl/serialization_traits.h>
+#include <grpc++/support/config.h>
+#include <grpc++/support/slice.h>
+#include <grpc++/support/status.h>
 
 #include <vector>
 
@@ -101,4 +101,4 @@
 
 }  // namespace grpc
 
-#endif  // GRPCXX_BYTE_BUFFER_H
+#endif  // GRPCXX_SUPPORT_BYTE_BUFFER_H
diff --git a/include/grpc++/channel_arguments.h b/include/grpc++/support/channel_arguments.h
similarity index 94%
rename from include/grpc++/channel_arguments.h
rename to include/grpc++/support/channel_arguments.h
index 4d92637..cee6846 100644
--- a/include/grpc++/channel_arguments.h
+++ b/include/grpc++/support/channel_arguments.h
@@ -31,15 +31,15 @@
  *
  */
 
-#ifndef GRPCXX_CHANNEL_ARGUMENTS_H
-#define GRPCXX_CHANNEL_ARGUMENTS_H
+#ifndef GRPCXX_SUPPORT_CHANNEL_ARGUMENTS_H
+#define GRPCXX_SUPPORT_CHANNEL_ARGUMENTS_H
 
 #include <vector>
 #include <list>
 
-#include <grpc++/config.h>
 #include <grpc/compression.h>
 #include <grpc/grpc.h>
+#include <grpc++/support/config.h>
 
 namespace grpc {
 namespace testing {
@@ -90,4 +90,4 @@
 
 }  // namespace grpc
 
-#endif  // GRPCXX_CHANNEL_ARGUMENTS_H
+#endif  // GRPCXX_SUPPORT_CHANNEL_ARGUMENTS_H
diff --git a/include/grpc++/config.h b/include/grpc++/support/config.h
similarity index 96%
rename from include/grpc++/config.h
rename to include/grpc++/support/config.h
index 889dc39..836bd47 100644
--- a/include/grpc++/config.h
+++ b/include/grpc++/support/config.h
@@ -31,8 +31,8 @@
  *
  */
 
-#ifndef GRPCXX_CONFIG_H
-#define GRPCXX_CONFIG_H
+#ifndef GRPCXX_SUPPORT_CONFIG_H
+#define GRPCXX_SUPPORT_CONFIG_H
 
 #if !defined(GRPC_NO_AUTODETECT_PLATFORM)
 
@@ -113,4 +113,4 @@
 
 }  // namespace grpc
 
-#endif  // GRPCXX_CONFIG_H
+#endif  // GRPCXX_SUPPORT_CONFIG_H
diff --git a/include/grpc++/config_protobuf.h b/include/grpc++/support/config_protobuf.h
similarity index 95%
rename from include/grpc++/config_protobuf.h
rename to include/grpc++/support/config_protobuf.h
index 3afc7a5..8235590 100644
--- a/include/grpc++/config_protobuf.h
+++ b/include/grpc++/support/config_protobuf.h
@@ -31,8 +31,8 @@
  *
  */
 
-#ifndef GRPCXX_CONFIG_PROTOBUF_H
-#define GRPCXX_CONFIG_PROTOBUF_H
+#ifndef GRPCXX_SUPPORT_CONFIG_PROTOBUF_H
+#define GRPCXX_SUPPORT_CONFIG_PROTOBUF_H
 
 #ifndef GRPC_CUSTOM_PROTOBUF_INT64
 #include <google/protobuf/stubs/common.h>
@@ -69,4 +69,4 @@
 }  // namespace protobuf
 }  // namespace grpc
 
-#endif  // GRPCXX_CONFIG_PROTOBUF_H
+#endif  // GRPCXX_SUPPORT_CONFIG_PROTOBUF_H
diff --git a/include/grpc++/slice.h b/include/grpc++/support/slice.h
similarity index 94%
rename from include/grpc++/slice.h
rename to include/grpc++/support/slice.h
index 3e01bcf..b2343a7 100644
--- a/include/grpc++/slice.h
+++ b/include/grpc++/support/slice.h
@@ -31,11 +31,11 @@
  *
  */
 
-#ifndef GRPCXX_SLICE_H
-#define GRPCXX_SLICE_H
+#ifndef GRPCXX_SUPPORT_SLICE_H
+#define GRPCXX_SUPPORT_SLICE_H
 
 #include <grpc/support/slice.h>
-#include <grpc++/config.h>
+#include <grpc++/support/config.h>
 
 namespace grpc {
 
@@ -71,4 +71,4 @@
 
 }  // namespace grpc
 
-#endif  // GRPCXX_SLICE_H
+#endif  // GRPCXX_SUPPORT_SLICE_H
diff --git a/include/grpc++/status.h b/include/grpc++/support/status.h
similarity index 91%
rename from include/grpc++/status.h
rename to include/grpc++/support/status.h
index fb8526d..05750ff 100644
--- a/include/grpc++/status.h
+++ b/include/grpc++/support/status.h
@@ -31,11 +31,11 @@
  *
  */
 
-#ifndef GRPCXX_STATUS_H
-#define GRPCXX_STATUS_H
+#ifndef GRPCXX_SUPPORT_STATUS_H
+#define GRPCXX_SUPPORT_STATUS_H
 
-#include <grpc++/status_code_enum.h>
-#include <grpc++/config.h>
+#include <grpc++/support/config.h>
+#include <grpc++/support/status_code_enum.h>
 
 namespace grpc {
 
@@ -61,4 +61,4 @@
 
 }  // namespace grpc
 
-#endif  // GRPCXX_STATUS_H
+#endif  // GRPCXX_SUPPORT_STATUS_H
diff --git a/include/grpc++/status_code_enum.h b/include/grpc++/support/status_code_enum.h
similarity index 97%
rename from include/grpc++/status_code_enum.h
rename to include/grpc++/support/status_code_enum.h
index 2211c96..7cb4045 100644
--- a/include/grpc++/status_code_enum.h
+++ b/include/grpc++/support/status_code_enum.h
@@ -31,8 +31,8 @@
  *
  */
 
-#ifndef GRPCXX_STATUS_CODE_ENUM_H
-#define GRPCXX_STATUS_CODE_ENUM_H
+#ifndef GRPCXX_SUPPORT_STATUS_CODE_ENUM_H
+#define GRPCXX_SUPPORT_STATUS_CODE_ENUM_H
 
 namespace grpc {
 
@@ -156,4 +156,4 @@
 
 }  // namespace grpc
 
-#endif  // GRPCXX_STATUS_CODE_ENUM_H
+#endif  // GRPCXX_SUPPORT_STATUS_CODE_ENUM_H
diff --git a/include/grpc++/stub_options.h b/include/grpc++/support/stub_options.h
similarity index 93%
rename from include/grpc++/stub_options.h
rename to include/grpc++/support/stub_options.h
index c7c16dc..973aa9b 100644
--- a/include/grpc++/stub_options.h
+++ b/include/grpc++/support/stub_options.h
@@ -31,8 +31,8 @@
  *
  */
 
-#ifndef GRPCXX_STUB_OPTIONS_H
-#define GRPCXX_STUB_OPTIONS_H
+#ifndef GRPCXX_SUPPORT_STUB_OPTIONS_H
+#define GRPCXX_SUPPORT_STUB_OPTIONS_H
 
 namespace grpc {
 
@@ -40,4 +40,4 @@
 
 }  // namespace grpc
 
-#endif  // GRPCXX_STUB_OPTIONS_H
+#endif  // GRPCXX_SUPPORT_STUB_OPTIONS_H
diff --git a/include/grpc++/support/sync_stream.h b/include/grpc++/support/sync_stream.h
new file mode 100644
index 0000000..b4bb637
--- /dev/null
+++ b/include/grpc++/support/sync_stream.h
@@ -0,0 +1,392 @@
+/*
+ *
+ * Copyright 2015, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPCXX_SUPPORT_SYNC_STREAM_H
+#define GRPCXX_SUPPORT_SYNC_STREAM_H
+
+#include <grpc/support/log.h>
+#include <grpc++/channel.h>
+#include <grpc++/client_context.h>
+#include <grpc++/completion_queue.h>
+#include <grpc++/impl/call.h>
+#include <grpc++/impl/service_type.h>
+#include <grpc++/server_context.h>
+#include <grpc++/support/status.h>
+
+namespace grpc {
+
+// Common interface for all client side streaming.
+class ClientStreamingInterface {
+ public:
+  virtual ~ClientStreamingInterface() {}
+
+  // Wait until the stream finishes, and return the final status. When the
+  // client side declares it has no more message to send, either implicitly or
+  // by calling WritesDone, it needs to make sure there is no more message to
+  // be received from the server, either implicitly or by getting a false from
+  // a Read().
+  // This function will return either:
+  // - when all incoming messages have been read and the server has returned
+  //   status
+  // - OR when the server has returned a non-OK status
+  virtual Status Finish() = 0;
+};
+
+// An interface that yields a sequence of R messages.
+template <class R>
+class ReaderInterface {
+ public:
+  virtual ~ReaderInterface() {}
+
+  // Blocking read a message and parse to msg. Returns true on success.
+  // The method returns false when there will be no more incoming messages,
+  // either because the other side has called WritesDone or the stream has
+  // failed (or been cancelled).
+  virtual bool Read(R* msg) = 0;
+};
+
+// An interface that can be fed a sequence of W messages.
+template <class W>
+class WriterInterface {
+ public:
+  virtual ~WriterInterface() {}
+
+  // Blocking write msg to the stream. Returns true on success.
+  // Returns false when the stream has been closed.
+  virtual bool Write(const W& msg, const WriteOptions& options) = 0;
+
+  inline bool Write(const W& msg) { return Write(msg, WriteOptions()); }
+};
+
+template <class R>
+class ClientReaderInterface : public ClientStreamingInterface,
+                              public ReaderInterface<R> {
+ public:
+  virtual void WaitForInitialMetadata() = 0;
+};
+
+template <class R>
+class ClientReader GRPC_FINAL : public ClientReaderInterface<R> {
+ public:
+  // Blocking create a stream and write the first request out.
+  template <class W>
+  ClientReader(Channel* channel, const RpcMethod& method,
+               ClientContext* context, const W& request)
+      : context_(context), call_(channel->CreateCall(method, context, &cq_)) {
+    CallOpSet<CallOpSendInitialMetadata, CallOpSendMessage,
+              CallOpClientSendClose> ops;
+    ops.SendInitialMetadata(context->send_initial_metadata_);
+    // TODO(ctiller): don't assert
+    GPR_ASSERT(ops.SendMessage(request).ok());
+    ops.ClientSendClose();
+    call_.PerformOps(&ops);
+    cq_.Pluck(&ops);
+  }
+
+  // Blocking wait for initial metadata from server. The received metadata
+  // can only be accessed after this call returns. Should only be called before
+  // the first read. Calling this method is optional, and if it is not called
+  // the metadata will be available in ClientContext after the first read.
+  void WaitForInitialMetadata() {
+    GPR_ASSERT(!context_->initial_metadata_received_);
+
+    CallOpSet<CallOpRecvInitialMetadata> ops;
+    ops.RecvInitialMetadata(context_);
+    call_.PerformOps(&ops);
+    cq_.Pluck(&ops);  // status ignored
+  }
+
+  bool Read(R* msg) GRPC_OVERRIDE {
+    CallOpSet<CallOpRecvInitialMetadata, CallOpRecvMessage<R>> ops;
+    if (!context_->initial_metadata_received_) {
+      ops.RecvInitialMetadata(context_);
+    }
+    ops.RecvMessage(msg);
+    call_.PerformOps(&ops);
+    return cq_.Pluck(&ops) && ops.got_message;
+  }
+
+  Status Finish() GRPC_OVERRIDE {
+    CallOpSet<CallOpClientRecvStatus> ops;
+    Status status;
+    ops.ClientRecvStatus(context_, &status);
+    call_.PerformOps(&ops);
+    GPR_ASSERT(cq_.Pluck(&ops));
+    return status;
+  }
+
+ private:
+  ClientContext* context_;
+  CompletionQueue cq_;
+  Call call_;
+};
+
+template <class W>
+class ClientWriterInterface : public ClientStreamingInterface,
+                              public WriterInterface<W> {
+ public:
+  virtual bool WritesDone() = 0;
+};
+
+template <class W>
+class ClientWriter : public ClientWriterInterface<W> {
+ public:
+  // Blocking create a stream.
+  template <class R>
+  ClientWriter(Channel* channel, const RpcMethod& method,
+               ClientContext* context, R* response)
+      : context_(context), call_(channel->CreateCall(method, context, &cq_)) {
+    finish_ops_.RecvMessage(response);
+
+    CallOpSet<CallOpSendInitialMetadata> ops;
+    ops.SendInitialMetadata(context->send_initial_metadata_);
+    call_.PerformOps(&ops);
+    cq_.Pluck(&ops);
+  }
+
+  using WriterInterface<W>::Write;
+  bool Write(const W& msg, const WriteOptions& options) GRPC_OVERRIDE {
+    CallOpSet<CallOpSendMessage> ops;
+    if (!ops.SendMessage(msg, options).ok()) {
+      return false;
+    }
+    call_.PerformOps(&ops);
+    return cq_.Pluck(&ops);
+  }
+
+  bool WritesDone() GRPC_OVERRIDE {
+    CallOpSet<CallOpClientSendClose> ops;
+    ops.ClientSendClose();
+    call_.PerformOps(&ops);
+    return cq_.Pluck(&ops);
+  }
+
+  // Read the final response and wait for the final status.
+  Status Finish() GRPC_OVERRIDE {
+    Status status;
+    finish_ops_.ClientRecvStatus(context_, &status);
+    call_.PerformOps(&finish_ops_);
+    GPR_ASSERT(cq_.Pluck(&finish_ops_));
+    return status;
+  }
+
+ private:
+  ClientContext* context_;
+  CallOpSet<CallOpGenericRecvMessage, CallOpClientRecvStatus> finish_ops_;
+  CompletionQueue cq_;
+  Call call_;
+};
+
+// Client-side interface for bi-directional streaming.
+template <class W, class R>
+class ClientReaderWriterInterface : public ClientStreamingInterface,
+                                    public WriterInterface<W>,
+                                    public ReaderInterface<R> {
+ public:
+  virtual void WaitForInitialMetadata() = 0;
+  virtual bool WritesDone() = 0;
+};
+
+template <class W, class R>
+class ClientReaderWriter GRPC_FINAL : public ClientReaderWriterInterface<W, R> {
+ public:
+  // Blocking create a stream.
+  ClientReaderWriter(Channel* channel, const RpcMethod& method,
+                     ClientContext* context)
+      : context_(context), call_(channel->CreateCall(method, context, &cq_)) {
+    CallOpSet<CallOpSendInitialMetadata> ops;
+    ops.SendInitialMetadata(context->send_initial_metadata_);
+    call_.PerformOps(&ops);
+    cq_.Pluck(&ops);
+  }
+
+  // Blocking wait for initial metadata from server. The received metadata
+  // can only be accessed after this call returns. Should only be called before
+  // the first read. Calling this method is optional, and if it is not called
+  // the metadata will be available in ClientContext after the first read.
+  void WaitForInitialMetadata() {
+    GPR_ASSERT(!context_->initial_metadata_received_);
+
+    CallOpSet<CallOpRecvInitialMetadata> ops;
+    ops.RecvInitialMetadata(context_);
+    call_.PerformOps(&ops);
+    cq_.Pluck(&ops);  // status ignored
+  }
+
+  bool Read(R* msg) GRPC_OVERRIDE {
+    CallOpSet<CallOpRecvInitialMetadata, CallOpRecvMessage<R>> ops;
+    if (!context_->initial_metadata_received_) {
+      ops.RecvInitialMetadata(context_);
+    }
+    ops.RecvMessage(msg);
+    call_.PerformOps(&ops);
+    return cq_.Pluck(&ops) && ops.got_message;
+  }
+
+  using WriterInterface<W>::Write;
+  bool Write(const W& msg, const WriteOptions& options) GRPC_OVERRIDE {
+    CallOpSet<CallOpSendMessage> ops;
+    if (!ops.SendMessage(msg, options).ok()) return false;
+    call_.PerformOps(&ops);
+    return cq_.Pluck(&ops);
+  }
+
+  bool WritesDone() GRPC_OVERRIDE {
+    CallOpSet<CallOpClientSendClose> ops;
+    ops.ClientSendClose();
+    call_.PerformOps(&ops);
+    return cq_.Pluck(&ops);
+  }
+
+  Status Finish() GRPC_OVERRIDE {
+    CallOpSet<CallOpClientRecvStatus> ops;
+    Status status;
+    ops.ClientRecvStatus(context_, &status);
+    call_.PerformOps(&ops);
+    GPR_ASSERT(cq_.Pluck(&ops));
+    return status;
+  }
+
+ private:
+  ClientContext* context_;
+  CompletionQueue cq_;
+  Call call_;
+};
+
+template <class R>
+class ServerReader GRPC_FINAL : public ReaderInterface<R> {
+ public:
+  ServerReader(Call* call, ServerContext* ctx) : call_(call), ctx_(ctx) {}
+
+  void SendInitialMetadata() {
+    GPR_ASSERT(!ctx_->sent_initial_metadata_);
+
+    CallOpSet<CallOpSendInitialMetadata> ops;
+    ops.SendInitialMetadata(ctx_->initial_metadata_);
+    ctx_->sent_initial_metadata_ = true;
+    call_->PerformOps(&ops);
+    call_->cq()->Pluck(&ops);
+  }
+
+  bool Read(R* msg) GRPC_OVERRIDE {
+    CallOpSet<CallOpRecvMessage<R>> ops;
+    ops.RecvMessage(msg);
+    call_->PerformOps(&ops);
+    return call_->cq()->Pluck(&ops) && ops.got_message;
+  }
+
+ private:
+  Call* const call_;
+  ServerContext* const ctx_;
+};
+
+template <class W>
+class ServerWriter GRPC_FINAL : public WriterInterface<W> {
+ public:
+  ServerWriter(Call* call, ServerContext* ctx) : call_(call), ctx_(ctx) {}
+
+  void SendInitialMetadata() {
+    GPR_ASSERT(!ctx_->sent_initial_metadata_);
+
+    CallOpSet<CallOpSendInitialMetadata> ops;
+    ops.SendInitialMetadata(ctx_->initial_metadata_);
+    ctx_->sent_initial_metadata_ = true;
+    call_->PerformOps(&ops);
+    call_->cq()->Pluck(&ops);
+  }
+
+  using WriterInterface<W>::Write;
+  bool Write(const W& msg, const WriteOptions& options) GRPC_OVERRIDE {
+    CallOpSet<CallOpSendInitialMetadata, CallOpSendMessage> ops;
+    if (!ops.SendMessage(msg, options).ok()) {
+      return false;
+    }
+    if (!ctx_->sent_initial_metadata_) {
+      ops.SendInitialMetadata(ctx_->initial_metadata_);
+      ctx_->sent_initial_metadata_ = true;
+    }
+    call_->PerformOps(&ops);
+    return call_->cq()->Pluck(&ops);
+  }
+
+ private:
+  Call* const call_;
+  ServerContext* const ctx_;
+};
+
+// Server-side interface for bi-directional streaming.
+template <class W, class R>
+class ServerReaderWriter GRPC_FINAL : public WriterInterface<W>,
+                                      public ReaderInterface<R> {
+ public:
+  ServerReaderWriter(Call* call, ServerContext* ctx) : call_(call), ctx_(ctx) {}
+
+  void SendInitialMetadata() {
+    GPR_ASSERT(!ctx_->sent_initial_metadata_);
+
+    CallOpSet<CallOpSendInitialMetadata> ops;
+    ops.SendInitialMetadata(ctx_->initial_metadata_);
+    ctx_->sent_initial_metadata_ = true;
+    call_->PerformOps(&ops);
+    call_->cq()->Pluck(&ops);
+  }
+
+  bool Read(R* msg) GRPC_OVERRIDE {
+    CallOpSet<CallOpRecvMessage<R>> ops;
+    ops.RecvMessage(msg);
+    call_->PerformOps(&ops);
+    return call_->cq()->Pluck(&ops) && ops.got_message;
+  }
+
+  using WriterInterface<W>::Write;
+  bool Write(const W& msg, const WriteOptions& options) GRPC_OVERRIDE {
+    CallOpSet<CallOpSendInitialMetadata, CallOpSendMessage> ops;
+    if (!ops.SendMessage(msg, options).ok()) {
+      return false;
+    }
+    if (!ctx_->sent_initial_metadata_) {
+      ops.SendInitialMetadata(ctx_->initial_metadata_);
+      ctx_->sent_initial_metadata_ = true;
+    }
+    call_->PerformOps(&ops);
+    return call_->cq()->Pluck(&ops);
+  }
+
+ private:
+  Call* const call_;
+  ServerContext* const ctx_;
+};
+
+}  // namespace grpc
+
+#endif  // GRPCXX_SUPPORT_SYNC_STREAM_H
diff --git a/include/grpc++/time.h b/include/grpc++/support/time.h
similarity index 96%
rename from include/grpc++/time.h
rename to include/grpc++/support/time.h
index 8fb2f85..2d4196b 100644
--- a/include/grpc++/time.h
+++ b/include/grpc++/support/time.h
@@ -31,10 +31,10 @@
  *
  */
 
-#ifndef GRPCXX_TIME_H
-#define GRPCXX_TIME_H
+#ifndef GRPCXX_SUPPORT_TIME_H
+#define GRPCXX_SUPPORT_TIME_H
 
-#include <grpc++/config.h>
+#include <grpc++/support/config.h>
 
 namespace grpc {
 
@@ -107,4 +107,4 @@
 
 #endif  // !GRPC_CXX0X_NO_CHRONO
 
-#endif  // GRPCXX_TIME_H
+#endif  // GRPCXX_SUPPORT_TIME_H
diff --git a/include/grpc/compression.h b/include/grpc/compression.h
index 9924bae..82e326f 100644
--- a/include/grpc/compression.h
+++ b/include/grpc/compression.h
@@ -36,12 +36,15 @@
 
 #include <stdlib.h>
 
+#include <grpc/support/port_platform.h>
+
 #ifdef __cplusplus
 extern "C" {
 #endif
 
 /** To be used in channel arguments */
 #define GRPC_COMPRESSION_ALGORITHM_ARG "grpc.compression_algorithm"
+#define GRPC_COMPRESSION_ALGORITHM_STATE_ARG "grpc.compression_algorithm_state"
 
 /* The various compression algorithms supported by GRPC */
 typedef enum {
@@ -60,6 +63,11 @@
   GRPC_COMPRESS_LEVEL_COUNT
 } grpc_compression_level;
 
+typedef struct grpc_compression_options {
+  gpr_uint32 enabled_algorithms_bitset; /**< All algs are enabled by default */
+  grpc_compression_algorithm default_compression_algorithm; /**< for channel */
+} grpc_compression_options;
+
 /** Parses the first \a name_length bytes of \a name as a
  * grpc_compression_algorithm instance, updating \a algorithm. Returns 1 upon
  * success, 0 otherwise. */
@@ -67,9 +75,7 @@
                                      grpc_compression_algorithm *algorithm);
 
 /** Updates \a name with the encoding name corresponding to a valid \a
- * algorithm. Note that the string returned through \a name upon success is
- * statically allocated and shouldn't be freed. Returns 1 upon success, 0
- * otherwise. */
+ * algorithm.  Returns 1 upon success, 0 otherwise. */
 int grpc_compression_algorithm_name(grpc_compression_algorithm algorithm,
                                     char **name);
 
@@ -85,6 +91,20 @@
 grpc_compression_algorithm grpc_compression_algorithm_for_level(
     grpc_compression_level level);
 
+void grpc_compression_options_init(grpc_compression_options *opts);
+
+/** Mark \a algorithm as enabled in \a opts. */
+void grpc_compression_options_enable_algorithm(
+     grpc_compression_options *opts, grpc_compression_algorithm algorithm);
+
+/** Mark \a algorithm as disabled in \a opts. */
+void grpc_compression_options_disable_algorithm(
+    grpc_compression_options *opts, grpc_compression_algorithm algorithm);
+
+/** Returns true if \a algorithm is marked as enabled in \a opts. */
+int grpc_compression_options_is_algorithm_enabled(
+    const grpc_compression_options *opts, grpc_compression_algorithm algorithm);
+
 #ifdef __cplusplus
 }
 #endif
diff --git a/include/grpc/grpc.h b/include/grpc/grpc.h
index 7869e92..101fc88 100644
--- a/include/grpc/grpc.h
+++ b/include/grpc/grpc.h
@@ -589,9 +589,14 @@
     THREAD SAFETY: grpc_call_destroy is thread-compatible */
 void grpc_call_destroy(grpc_call *call);
 
-/** Request notification of a new call. 'cq_for_notification' must
-    have been registered to the server via
-    grpc_server_register_completion_queue. */
+/** Request notification of a new call.
+    Once a call is received, a notification tagged with \a tag_new is added to 
+    \a cq_for_notification. \a call, \a details and \a request_metadata are 
+    updated with the appropriate call information. \a cq_bound_to_call is bound
+    to \a call, and batch operation notifications for that call will be posted
+    to \a cq_bound_to_call.
+    Note that \a cq_for_notification must have been registered to the server via
+    \a grpc_server_register_completion_queue. */
 grpc_call_error grpc_server_request_call(
     grpc_server *server, grpc_call **call, grpc_call_details *details,
     grpc_metadata_array *request_metadata,
diff --git a/include/grpc/grpc_security.h b/include/grpc/grpc_security.h
index 640c1fd..7f8f4d4 100644
--- a/include/grpc/grpc_security.h
+++ b/include/grpc/grpc_security.h
@@ -275,12 +275,18 @@
 /* --- Auth Metadata Processing --- */
 
 /* Callback function that is called when the metadata processing is done.
-   success is 1 if processing succeeded, 0 otherwise.
-   Consumed metadata will be removed from the set of metadata available on the
-   call. */
+   - Consumed metadata will be removed from the set of metadata available on the
+     call. consumed_md may be NULL if no metadata has been consumed.
+   - Response metadata will be set on the response. response_md may be NULL.
+   - status is GRPC_STATUS_OK for success or a specific status for an error.
+     Common error status for auth metadata processing is either
+     GRPC_STATUS_UNAUTHENTICATED in case of an authentication failure or
+     GRPC_STATUS PERMISSION_DENIED in case of an authorization failure.
+   - error_details gives details about the error. May be NULL. */
 typedef void (*grpc_process_auth_metadata_done_cb)(
     void *user_data, const grpc_metadata *consumed_md, size_t num_consumed_md,
-    int success);
+    const grpc_metadata *response_md, size_t num_response_md,
+    grpc_status_code status, const char *error_details);
 
 /* Pluggable server-side metadata processor object. */
 typedef struct {
diff --git a/src/compiler/config.h b/src/compiler/config.h
index cd52aca..fea976c 100644
--- a/src/compiler/config.h
+++ b/src/compiler/config.h
@@ -34,8 +34,8 @@
 #ifndef SRC_COMPILER_CONFIG_H
 #define SRC_COMPILER_CONFIG_H
 
-#include <grpc++/config.h>
-#include <grpc++/config_protobuf.h>
+#include <grpc++/support/config.h>
+#include <grpc++/support/config_protobuf.h>
 
 #ifndef GRPC_CUSTOM_DESCRIPTOR
 #include <google/protobuf/descriptor.h>
diff --git a/src/compiler/cpp_generator.cc b/src/compiler/cpp_generator.cc
index ea487bc..1bf2b16 100644
--- a/src/compiler/cpp_generator.cc
+++ b/src/compiler/cpp_generator.cc
@@ -112,18 +112,18 @@
 grpc::string GetHeaderIncludes(const grpc::protobuf::FileDescriptor *file,
                                const Parameters &params) {
   grpc::string temp =
-      "#include <grpc++/impl/internal_stub.h>\n"
+      "#include <grpc++/support/async_stream.h>\n"
       "#include <grpc++/impl/rpc_method.h>\n"
       "#include <grpc++/impl/proto_utils.h>\n"
       "#include <grpc++/impl/service_type.h>\n"
-      "#include <grpc++/async_unary_call.h>\n"
-      "#include <grpc++/status.h>\n"
-      "#include <grpc++/stream.h>\n"
-      "#include <grpc++/stub_options.h>\n"
+      "#include <grpc++/support/async_unary_call.h>\n"
+      "#include <grpc++/support/status.h>\n"
+      "#include <grpc++/support/stub_options.h>\n"
+      "#include <grpc++/support/sync_stream.h>\n"
       "\n"
       "namespace grpc {\n"
       "class CompletionQueue;\n"
-      "class ChannelInterface;\n"
+      "class Channel;\n"
       "class RpcService;\n"
       "class ServerCompletionQueue;\n"
       "class ServerContext;\n"
@@ -554,17 +554,17 @@
   printer->Outdent();
   printer->Print("};\n");
   printer->Print(
-      "class Stub GRPC_FINAL : public StubInterface,"
-      " public ::grpc::InternalStub {\n public:\n");
+      "class Stub GRPC_FINAL : public StubInterface"
+      " {\n public:\n");
   printer->Indent();
-  printer->Print(
-      "Stub(const std::shared_ptr< ::grpc::ChannelInterface>& channel);\n");
+  printer->Print("Stub(const std::shared_ptr< ::grpc::Channel>& channel);\n");
   for (int i = 0; i < service->method_count(); ++i) {
     PrintHeaderClientMethod(printer, service->method(i), vars, true);
   }
   printer->Outdent();
   printer->Print("\n private:\n");
   printer->Indent();
+  printer->Print("std::shared_ptr< ::grpc::Channel> channel_;\n");
   for (int i = 0; i < service->method_count(); ++i) {
     PrintHeaderClientMethod(printer, service->method(i), vars, false);
   }
@@ -575,7 +575,7 @@
   printer->Print("};\n");
   printer->Print(
       "static std::unique_ptr<Stub> NewStub(const std::shared_ptr< "
-      "::grpc::ChannelInterface>& channel, "
+      "::grpc::Channel>& channel, "
       "const ::grpc::StubOptions& options = ::grpc::StubOptions());\n");
 
   printer->Print("\n");
@@ -702,12 +702,13 @@
     grpc::protobuf::io::Printer printer(&output_stream, '$');
     std::map<grpc::string, grpc::string> vars;
 
-    printer.Print(vars, "#include <grpc++/async_unary_call.h>\n");
-    printer.Print(vars, "#include <grpc++/channel_interface.h>\n");
+    printer.Print(vars, "#include <grpc++/channel.h>\n");
     printer.Print(vars, "#include <grpc++/impl/client_unary_call.h>\n");
     printer.Print(vars, "#include <grpc++/impl/rpc_service_method.h>\n");
     printer.Print(vars, "#include <grpc++/impl/service_type.h>\n");
-    printer.Print(vars, "#include <grpc++/stream.h>\n");
+    printer.Print(vars, "#include <grpc++/support/async_unary_call.h>\n");
+    printer.Print(vars, "#include <grpc++/support/async_stream.h>\n");
+    printer.Print(vars, "#include <grpc++/support/sync_stream.h>\n");
 
     if (!file->package().empty()) {
       std::vector<grpc::string> parts =
@@ -738,7 +739,7 @@
                    "::grpc::ClientContext* context, "
                    "const $Request$& request, $Response$* response) {\n");
     printer->Print(*vars,
-                   "  return ::grpc::BlockingUnaryCall(channel(), "
+                   "  return ::grpc::BlockingUnaryCall(channel_.get(), "
                    "rpcmethod_$Method$_, "
                    "context, request, response);\n"
                    "}\n\n");
@@ -751,7 +752,7 @@
     printer->Print(*vars,
                    "  return new "
                    "::grpc::ClientAsyncResponseReader< $Response$>("
-                   "channel(), cq, "
+                   "channel_.get(), cq, "
                    "rpcmethod_$Method$_, "
                    "context, request);\n"
                    "}\n\n");
@@ -762,7 +763,7 @@
                    "::grpc::ClientContext* context, $Response$* response) {\n");
     printer->Print(*vars,
                    "  return new ::grpc::ClientWriter< $Request$>("
-                   "channel(), "
+                   "channel_.get(), "
                    "rpcmethod_$Method$_, "
                    "context, response);\n"
                    "}\n\n");
@@ -773,7 +774,7 @@
                    "::grpc::CompletionQueue* cq, void* tag) {\n");
     printer->Print(*vars,
                    "  return new ::grpc::ClientAsyncWriter< $Request$>("
-                   "channel(), cq, "
+                   "channel_.get(), cq, "
                    "rpcmethod_$Method$_, "
                    "context, response, tag);\n"
                    "}\n\n");
@@ -785,7 +786,7 @@
         "::grpc::ClientContext* context, const $Request$& request) {\n");
     printer->Print(*vars,
                    "  return new ::grpc::ClientReader< $Response$>("
-                   "channel(), "
+                   "channel_.get(), "
                    "rpcmethod_$Method$_, "
                    "context, request);\n"
                    "}\n\n");
@@ -796,7 +797,7 @@
                    "::grpc::CompletionQueue* cq, void* tag) {\n");
     printer->Print(*vars,
                    "  return new ::grpc::ClientAsyncReader< $Response$>("
-                   "channel(), cq, "
+                   "channel_.get(), cq, "
                    "rpcmethod_$Method$_, "
                    "context, request, tag);\n"
                    "}\n\n");
@@ -808,7 +809,7 @@
     printer->Print(*vars,
                    "  return new ::grpc::ClientReaderWriter< "
                    "$Request$, $Response$>("
-                   "channel(), "
+                   "channel_.get(), "
                    "rpcmethod_$Method$_, "
                    "context);\n"
                    "}\n\n");
@@ -820,7 +821,7 @@
     printer->Print(*vars,
                    "  return new "
                    "::grpc::ClientAsyncReaderWriter< $Request$, $Response$>("
-                   "channel(), cq, "
+                   "channel_.get(), cq, "
                    "rpcmethod_$Method$_, "
                    "context, tag);\n"
                    "}\n\n");
@@ -964,20 +965,19 @@
   }
   printer->Print(*vars, "};\n\n");
 
-  printer->Print(
-      *vars,
-      "std::unique_ptr< $ns$$Service$::Stub> $ns$$Service$::NewStub("
-      "const std::shared_ptr< ::grpc::ChannelInterface>& channel, "
-      "const ::grpc::StubOptions& options) {\n"
-      "  std::unique_ptr< $ns$$Service$::Stub> stub(new "
-      "$ns$$Service$::Stub(channel));\n"
-      "  return stub;\n"
-      "}\n\n");
+  printer->Print(*vars,
+                 "std::unique_ptr< $ns$$Service$::Stub> $ns$$Service$::NewStub("
+                 "const std::shared_ptr< ::grpc::Channel>& channel, "
+                 "const ::grpc::StubOptions& options) {\n"
+                 "  std::unique_ptr< $ns$$Service$::Stub> stub(new "
+                 "$ns$$Service$::Stub(channel));\n"
+                 "  return stub;\n"
+                 "}\n\n");
   printer->Print(*vars,
                  "$ns$$Service$::Stub::Stub(const std::shared_ptr< "
-                 "::grpc::ChannelInterface>& channel)\n");
+                 "::grpc::Channel>& channel)\n");
   printer->Indent();
-  printer->Print(": ::grpc::InternalStub(channel)");
+  printer->Print(": channel_(channel)");
   for (int i = 0; i < service->method_count(); ++i) {
     const grpc::protobuf::MethodDescriptor *method = service->method(i);
     (*vars)["Method"] = method->name();
@@ -991,13 +991,12 @@
     } else {
       (*vars)["StreamingType"] = "BIDI_STREAMING";
     }
-    printer->Print(
-        *vars,
-        ", rpcmethod_$Method$_("
-        "$prefix$$Service$_method_names[$Idx$], "
-        "::grpc::RpcMethod::$StreamingType$, "
-        "channel->RegisterMethod($prefix$$Service$_method_names[$Idx$])"
-        ")\n");
+    printer->Print(*vars,
+                   ", rpcmethod_$Method$_("
+                   "$prefix$$Service$_method_names[$Idx$], "
+                   "::grpc::RpcMethod::$StreamingType$, "
+                   "channel"
+                   ")\n");
   }
   printer->Print("{}\n\n");
   printer->Outdent();
diff --git a/src/compiler/python_generator.cc b/src/compiler/python_generator.cc
index 2982a89..72c457a 100644
--- a/src/compiler/python_generator.cc
+++ b/src/compiler/python_generator.cc
@@ -42,7 +42,7 @@
 #include <tuple>
 #include <vector>
 
-#include <grpc++/config.h>
+#include <grpc++/support/config.h>
 #include "src/compiler/config.h"
 #include "src/compiler/generator_helpers.h"
 #include "src/compiler/python_generator.h"
diff --git a/src/core/channel/channel_args.c b/src/core/channel/channel_args.c
index c430b56..54ee75a 100644
--- a/src/core/channel/channel_args.c
+++ b/src/core/channel/channel_args.c
@@ -37,6 +37,7 @@
 
 #include <grpc/support/alloc.h>
 #include <grpc/support/string_util.h>
+#include <grpc/support/useful.h>
 
 #include <string.h>
 
@@ -146,3 +147,65 @@
   tmp.value.integer = algorithm;
   return grpc_channel_args_copy_and_add(a, &tmp, 1);
 }
+
+/** Returns 1 if the argument for compression algorithm's enabled states bitset
+ * was found in \a a, returning the arg's value in \a states. Otherwise, returns
+ * 0. */
+static int find_compression_algorithm_states_bitset(
+    const grpc_channel_args *a, int **states_arg) {
+  if (a != NULL) {
+    size_t i;
+    for (i = 0; i < a->num_args; ++i) {
+      if (a->args[i].type == GRPC_ARG_INTEGER &&
+          !strcmp(GRPC_COMPRESSION_ALGORITHM_STATE_ARG, a->args[i].key)) {
+        *states_arg = &a->args[i].value.integer;
+        return 1; /* GPR_TRUE */
+      }
+    }
+  }
+  return 0; /* GPR_FALSE */
+}
+
+grpc_channel_args *grpc_channel_args_compression_algorithm_set_state(
+    grpc_channel_args **a,
+    grpc_compression_algorithm algorithm,
+    int state) {
+  int *states_arg;
+  grpc_channel_args *result = *a;
+  const int states_arg_found =
+      find_compression_algorithm_states_bitset(*a, &states_arg);
+
+  if (states_arg_found) {
+    if (state != 0) {
+      GPR_BITSET(states_arg, algorithm);
+    } else {
+      GPR_BITCLEAR(states_arg, algorithm);
+    }
+  } else {
+    /* create a new arg */
+    grpc_arg tmp;
+    tmp.type = GRPC_ARG_INTEGER;
+    tmp.key = GRPC_COMPRESSION_ALGORITHM_STATE_ARG;
+    /* all enabled by default */
+    tmp.value.integer = (1u << GRPC_COMPRESS_ALGORITHMS_COUNT) - 1;
+    if (state != 0) {
+      GPR_BITSET(&tmp.value.integer, algorithm);
+    } else {
+      GPR_BITCLEAR(&tmp.value.integer, algorithm);
+    }
+    result = grpc_channel_args_copy_and_add(*a, &tmp, 1);
+    grpc_channel_args_destroy(*a);
+    *a = result;
+  }
+  return result;
+}
+
+int grpc_channel_args_compression_algorithm_get_states(
+    const grpc_channel_args *a) {
+  int *states_arg;
+  if (find_compression_algorithm_states_bitset(a, &states_arg)) {
+    return *states_arg;
+  } else {
+    return (1u << GRPC_COMPRESS_ALGORITHMS_COUNT) - 1; /* All algs. enabled */
+  }
+}
diff --git a/src/core/channel/channel_args.h b/src/core/channel/channel_args.h
index 7e6ddd3..06a6012 100644
--- a/src/core/channel/channel_args.h
+++ b/src/core/channel/channel_args.h
@@ -67,4 +67,24 @@
 grpc_channel_args *grpc_channel_args_set_compression_algorithm(
     grpc_channel_args *a, grpc_compression_algorithm algorithm);
 
+/** Sets the support for the given compression algorithm. By default, all
+ * compression algorithms are enabled. It's an error to disable an algorithm set
+ * by grpc_channel_args_set_compression_algorithm.
+ *
+ * Returns an instance will the updated algorithm states. The \a a pointer is
+ * modified to point to the returned instance (which may be different from the
+ * input value of \a a). */
+grpc_channel_args *grpc_channel_args_compression_algorithm_set_state(
+    grpc_channel_args **a,
+    grpc_compression_algorithm algorithm,
+    int enabled);
+
+/** Returns the bitset representing the support state (true for enabled, false
+ * for disabled) for compression algorithms.
+ *
+ * The i-th bit of the returned bitset corresponds to the i-th entry in the
+ * grpc_compression_algorithm enum. */
+int grpc_channel_args_compression_algorithm_get_states(
+    const grpc_channel_args *a);
+
 #endif /* GRPC_INTERNAL_CORE_CHANNEL_CHANNEL_ARGS_H */
diff --git a/src/core/iomgr/pollset.h b/src/core/iomgr/pollset.h
index c474e4d..337596c 100644
--- a/src/core/iomgr/pollset.h
+++ b/src/core/iomgr/pollset.h
@@ -74,10 +74,9 @@
    grpc_pollset_work, and it is guaranteed that GRPC_POLLSET_MU(pollset) will
    not be released by grpc_pollset_work AFTER worker has been destroyed.
 
-   Returns true if some work has been done, and false if the deadline
-   expired. */
-int grpc_pollset_work(grpc_pollset *pollset, grpc_pollset_worker *worker,
-                      gpr_timespec deadline);
+   Tries not to block past deadline. */
+void grpc_pollset_work(grpc_pollset *pollset, grpc_pollset_worker *worker,
+                       gpr_timespec now, gpr_timespec deadline);
 
 /* Break one polling thread out of polling work for this pollset.
    If specific_worker is GRPC_POLLSET_KICK_BROADCAST, kick ALL the workers.
diff --git a/src/core/iomgr/pollset_multipoller_with_epoll.c b/src/core/iomgr/pollset_multipoller_with_epoll.c
index 5ea9dd2..fe66ebe 100644
--- a/src/core/iomgr/pollset_multipoller_with_epoll.c
+++ b/src/core/iomgr/pollset_multipoller_with_epoll.c
@@ -181,7 +181,7 @@
   pfds[1].events = POLLIN;
   pfds[1].revents = 0;
 
-  poll_rv = poll(pfds, 2, timeout_ms);
+  poll_rv = grpc_poll_function(pfds, 2, timeout_ms);
 
   if (poll_rv < 0) {
     if (errno != EINTR) {
diff --git a/src/core/iomgr/pollset_multipoller_with_poll_posix.c b/src/core/iomgr/pollset_multipoller_with_poll_posix.c
index 001fcec..30ee6e2 100644
--- a/src/core/iomgr/pollset_multipoller_with_poll_posix.c
+++ b/src/core/iomgr/pollset_multipoller_with_poll_posix.c
@@ -144,7 +144,7 @@
                                         POLLOUT, &watchers[i]);
   }
 
-  r = poll(pfds, pfd_count, timeout);
+  r = grpc_poll_function(pfds, pfd_count, timeout);
 
   for (i = 1; i < pfd_count; i++) {
     grpc_fd_end_poll(&watchers[i], pfds[i].revents & POLLIN,
diff --git a/src/core/iomgr/pollset_posix.c b/src/core/iomgr/pollset_posix.c
index a01f9ff..6bd1b61 100644
--- a/src/core/iomgr/pollset_posix.c
+++ b/src/core/iomgr/pollset_posix.c
@@ -38,7 +38,6 @@
 #include "src/core/iomgr/pollset_posix.h"
 
 #include <errno.h>
-#include <poll.h>
 #include <stdlib.h>
 #include <string.h>
 #include <unistd.h>
@@ -57,6 +56,8 @@
 GPR_TLS_DECL(g_current_thread_poller);
 GPR_TLS_DECL(g_current_thread_worker);
 
+grpc_poll_function_type grpc_poll_function = poll;
+
 static void remove_worker(grpc_pollset *p, grpc_pollset_worker *worker) {
   worker->prev->next = worker->next;
   worker->next->prev = worker->prev;
@@ -89,6 +90,7 @@
 }
 
 void grpc_pollset_kick(grpc_pollset *p, grpc_pollset_worker *specific_worker) {
+  /* pollset->mu already held */
   if (specific_worker != NULL) {
     if (specific_worker == GRPC_POLLSET_KICK_BROADCAST) {
       for (specific_worker = p->root_worker.next;
@@ -168,14 +170,10 @@
   pollset->shutdown_done_cb(pollset->shutdown_done_arg);
 }
 
-int grpc_pollset_work(grpc_pollset *pollset, grpc_pollset_worker *worker,
-                      gpr_timespec deadline) {
+void grpc_pollset_work(grpc_pollset *pollset, grpc_pollset_worker *worker,
+                       gpr_timespec now, gpr_timespec deadline) {
   /* pollset->mu already held */
-  gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
   int added_worker = 0;
-  if (gpr_time_cmp(now, deadline) > 0) {
-    return 0;
-  }
   /* this must happen before we (potentially) drop pollset->mu */
   worker->next = worker->prev = NULL;
   /* TODO(ctiller): pool these */
@@ -217,7 +215,6 @@
       gpr_mu_lock(&pollset->mu);
     }
   }
-  return 1;
 }
 
 void grpc_pollset_shutdown(grpc_pollset *pollset,
@@ -456,7 +453,7 @@
 
   /* poll fd count (argument 2) is shortened by one if we have no events
      to poll on - such that it only includes the kicker */
-  r = poll(pfd, nfds, timeout);
+  r = grpc_poll_function(pfd, nfds, timeout);
   GRPC_TIMER_MARK(GRPC_PTAG_POLL_FINISHED, r);
 
   if (fd) {
diff --git a/src/core/iomgr/pollset_posix.h b/src/core/iomgr/pollset_posix.h
index a3ea353..69bd9cc 100644
--- a/src/core/iomgr/pollset_posix.h
+++ b/src/core/iomgr/pollset_posix.h
@@ -34,6 +34,8 @@
 #ifndef GRPC_INTERNAL_CORE_IOMGR_POLLSET_POSIX_H
 #define GRPC_INTERNAL_CORE_IOMGR_POLLSET_POSIX_H
 
+#include <poll.h>
+
 #include <grpc/support/sync.h>
 #include "src/core/iomgr/wakeup_fd_posix.h"
 
@@ -118,4 +120,8 @@
  * be locked) */
 int grpc_pollset_has_workers(grpc_pollset *pollset);
 
+/* override to allow tests to hook poll() usage */
+typedef int (*grpc_poll_function_type)(struct pollfd *, nfds_t, int);
+extern grpc_poll_function_type grpc_poll_function;
+
 #endif /* GRPC_INTERNAL_CORE_IOMGR_POLLSET_POSIX_H */
diff --git a/src/core/iomgr/pollset_windows.c b/src/core/iomgr/pollset_windows.c
index 8710395..07522c8 100644
--- a/src/core/iomgr/pollset_windows.c
+++ b/src/core/iomgr/pollset_windows.c
@@ -99,14 +99,9 @@
   gpr_mu_destroy(&pollset->mu);
 }
 
-int grpc_pollset_work(grpc_pollset *pollset, grpc_pollset_worker *worker,
-                      gpr_timespec deadline) {
-  gpr_timespec now;
+void grpc_pollset_work(grpc_pollset *pollset, grpc_pollset_worker *worker, 
+                       gpr_timespec now, gpr_timespec deadline) {
   int added_worker = 0;
-  now = gpr_now(GPR_CLOCK_MONOTONIC);
-  if (gpr_time_cmp(now, deadline) > 0) {
-    return 0 /* GPR_FALSE */;
-  }
   worker->next = worker->prev = NULL;
   gpr_cv_init(&worker->cv);
   if (grpc_maybe_call_delayed_callbacks(&pollset->mu, 1 /* GPR_TRUE */)) {
@@ -127,7 +122,6 @@
   if (added_worker) {
     remove_worker(pollset, worker);
   }
-  return 1 /* GPR_TRUE */;
 }
 
 void grpc_pollset_kick(grpc_pollset *p, grpc_pollset_worker *specific_worker) {
diff --git a/src/core/security/google_default_credentials.c b/src/core/security/google_default_credentials.c
index d6092ec..3631de8 100644
--- a/src/core/security/google_default_credentials.c
+++ b/src/core/security/google_default_credentials.c
@@ -115,7 +115,7 @@
   gpr_mu_lock(GRPC_POLLSET_MU(&detector.pollset));
   while (!detector.is_done) {
     grpc_pollset_worker worker;
-    grpc_pollset_work(&detector.pollset, &worker,
+    grpc_pollset_work(&detector.pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC),
                       gpr_inf_future(GPR_CLOCK_MONOTONIC));
   }
   gpr_mu_unlock(GRPC_POLLSET_MU(&detector.pollset));
diff --git a/src/core/security/security_connector.c b/src/core/security/security_connector.c
index a354536..ba9ac68 100644
--- a/src/core/security/security_connector.c
+++ b/src/core/security/security_connector.c
@@ -575,6 +575,16 @@
   if (!check_request_metadata_creds(request_metadata_creds)) {
     goto error;
   }
+  if (config->pem_root_certs == NULL) {
+    pem_root_certs_size = grpc_get_default_ssl_roots(&pem_root_certs);
+    if (pem_root_certs == NULL || pem_root_certs_size == 0) {
+      gpr_log(GPR_ERROR, "Could not get default pem root certs.");
+      goto error;
+    }
+  } else {
+    pem_root_certs = config->pem_root_certs;
+    pem_root_certs_size = config->pem_root_certs_size;
+  }
 
   c = gpr_malloc(sizeof(grpc_ssl_channel_security_connector));
   memset(c, 0, sizeof(grpc_ssl_channel_security_connector));
@@ -590,16 +600,6 @@
   if (overridden_target_name != NULL) {
     c->overridden_target_name = gpr_strdup(overridden_target_name);
   }
-  if (config->pem_root_certs == NULL) {
-    pem_root_certs_size = grpc_get_default_ssl_roots(&pem_root_certs);
-    if (pem_root_certs == NULL || pem_root_certs_size == 0) {
-      gpr_log(GPR_ERROR, "Could not get default pem root certs.");
-      goto error;
-    }
-  } else {
-    pem_root_certs = config->pem_root_certs;
-    pem_root_certs_size = config->pem_root_certs_size;
-  }
   result = tsi_create_ssl_client_handshaker_factory(
       config->pem_private_key, config->pem_private_key_size,
       config->pem_cert_chain, config->pem_cert_chain_size, pem_root_certs,
diff --git a/src/core/security/server_auth_filter.c b/src/core/security/server_auth_filter.c
index 2f42f01..6e83143 100644
--- a/src/core/security/server_auth_filter.c
+++ b/src/core/security/server_auth_filter.c
@@ -104,24 +104,34 @@
   return md;
 }
 
-static void on_md_processing_done(void *user_data,
-                                  const grpc_metadata *consumed_md,
-                                  size_t num_consumed_md, int success) {
+static void on_md_processing_done(
+    void *user_data, const grpc_metadata *consumed_md, size_t num_consumed_md,
+    const grpc_metadata *response_md, size_t num_response_md,
+    grpc_status_code status, const char *error_details) {
   grpc_call_element *elem = user_data;
   call_data *calld = elem->call_data;
 
-  if (success) {
+  /* TODO(jboeuf): Implement support for response_md. */
+  if (response_md != NULL && num_response_md > 0) {
+    gpr_log(GPR_INFO,
+            "response_md in auth metadata processing not supported for now. "
+            "Ignoring...");
+  }
+
+  if (status == GRPC_STATUS_OK) {
     calld->consumed_md = consumed_md;
     calld->num_consumed_md = num_consumed_md;
     grpc_metadata_batch_filter(&calld->md_op->data.metadata, remove_consumed_md,
                                elem);
-    calld->on_done_recv->cb(calld->on_done_recv->cb_arg, success);
+    calld->on_done_recv->cb(calld->on_done_recv->cb_arg, 1);
   } else {
-    gpr_slice message = gpr_slice_from_copied_string(
-        "Authentication metadata processing failed.");
+    gpr_slice message;
+    error_details = error_details != NULL
+                    ? error_details
+                    : "Authentication metadata processing failed.";
+    message = gpr_slice_from_copied_string(error_details);
     grpc_sopb_reset(calld->recv_ops);
-    grpc_transport_stream_op_add_close(&calld->transport_op,
-                                       GRPC_STATUS_UNAUTHENTICATED, &message);
+    grpc_transport_stream_op_add_close(&calld->transport_op, status, &message);
     grpc_call_next_op(elem, &calld->transport_op);
   }
 }
diff --git a/src/core/surface/completion_queue.c b/src/core/surface/completion_queue.c
index 77443a7..b58115a 100644
--- a/src/core/surface/completion_queue.c
+++ b/src/core/surface/completion_queue.c
@@ -170,6 +170,9 @@
                                       gpr_timespec deadline, void *reserved) {
   grpc_event ret;
   grpc_pollset_worker worker;
+  int first_loop = 1;
+  gpr_timespec now;
+
   GPR_ASSERT(!reserved);
 
   deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
@@ -196,12 +199,15 @@
       ret.type = GRPC_QUEUE_SHUTDOWN;
       break;
     }
-    if (!grpc_pollset_work(&cc->pollset, &worker, deadline)) {
+    now = gpr_now(GPR_CLOCK_MONOTONIC);
+    if (!first_loop && gpr_time_cmp(now, deadline) >= 0) {
       gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
       memset(&ret, 0, sizeof(ret));
       ret.type = GRPC_QUEUE_TIMEOUT;
       break;
     }
+    first_loop = 0;
+    grpc_pollset_work(&cc->pollset, &worker, now, deadline);
   }
   GRPC_SURFACE_TRACE_RETURNED_EVENT(cc, &ret);
   GRPC_CQ_INTERNAL_UNREF(cc, "next");
@@ -239,6 +245,9 @@
   grpc_cq_completion *c;
   grpc_cq_completion *prev;
   grpc_pollset_worker worker;
+  gpr_timespec now;
+  int first_loop = 1;
+
   GPR_ASSERT(!reserved);
 
   deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
@@ -281,13 +290,16 @@
       ret.type = GRPC_QUEUE_TIMEOUT;
       break;
     }
-    if (!grpc_pollset_work(&cc->pollset, &worker, deadline)) {
+    now = gpr_now(GPR_CLOCK_MONOTONIC);
+    if (!first_loop && gpr_time_cmp(now, deadline) >= 0) {
       del_plucker(cc, tag, &worker);
       gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
       memset(&ret, 0, sizeof(ret));
       ret.type = GRPC_QUEUE_TIMEOUT;
       break;
     }
+    first_loop = 0;
+    grpc_pollset_work(&cc->pollset, &worker, now, deadline);
     del_plucker(cc, tag, &worker);
   }
 done:
diff --git a/src/core/transport/chttp2/stream_encoder.c b/src/core/transport/chttp2/stream_encoder.c
index 0f04169..1ea697f 100644
--- a/src/core/transport/chttp2/stream_encoder.c
+++ b/src/core/transport/chttp2/stream_encoder.c
@@ -66,6 +66,8 @@
   size_t header_idx;
   /* was the last frame emitted a header? (if yes, we'll need a CONTINUATION */
   gpr_uint8 last_was_header;
+  /* have we seen a regular (non-colon-prefixed) header yet? */
+  gpr_uint8 seen_regular_header;
   /* output stream id */
   gpr_uint32 stream_id;
   gpr_slice_buffer *output;
@@ -361,6 +363,15 @@
   gpr_uint32 indices_key;
   int should_add_elem;
 
+  GPR_ASSERT (GPR_SLICE_LENGTH(elem->key->slice) > 0);
+  if (GPR_SLICE_START_PTR(elem->key->slice)[0] != ':') { /* regular header */
+    st->seen_regular_header = 1;
+  } else if (st->seen_regular_header != 0) { /* reserved header */
+    gpr_log(GPR_ERROR,
+            "Reserved header (colon-prefixed) happening after regular ones.");
+    abort();
+  }
+
   inc_filter(HASH_FRAGMENT_1(elem_hash), &c->filter_elems_sum, c->filter_elems);
 
   /* is this elem currently in the decoders table? */
@@ -566,6 +577,7 @@
 
   st.cur_frame_type = NONE;
   st.last_was_header = 0;
+  st.seen_regular_header = 0;
   st.stream_id = stream_id;
   st.output = output;
 
diff --git a/src/cpp/client/channel.cc b/src/cpp/client/channel.cc
index 17f31c2..8bf2e46 100644
--- a/src/cpp/client/channel.cc
+++ b/src/cpp/client/channel.cc
@@ -31,29 +31,26 @@
  *
  */
 
-#include "src/cpp/client/channel.h"
+#include <grpc++/channel.h>
 
 #include <memory>
 
 #include <grpc/grpc.h>
 #include <grpc/support/log.h>
 #include <grpc/support/slice.h>
-
-#include "src/core/profiling/timers.h"
-#include <grpc++/channel_arguments.h>
 #include <grpc++/client_context.h>
 #include <grpc++/completion_queue.h>
-#include <grpc++/config.h>
 #include <grpc++/credentials.h>
 #include <grpc++/impl/call.h>
 #include <grpc++/impl/rpc_method.h>
-#include <grpc++/status.h>
-#include <grpc++/time.h>
+#include <grpc++/support/channel_arguments.h>
+#include <grpc++/support/config.h>
+#include <grpc++/support/status.h>
+#include <grpc++/support/time.h>
+#include "src/core/profiling/timers.h"
 
 namespace grpc {
 
-Channel::Channel(grpc_channel* channel) : c_channel_(channel) {}
-
 Channel::Channel(const grpc::string& host, grpc_channel* channel)
     : host_(host), c_channel_(channel) {}
 
diff --git a/src/cpp/client/channel.h b/src/cpp/client/channel.h
deleted file mode 100644
index 7e406ad..0000000
--- a/src/cpp/client/channel.h
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- *
- * Copyright 2015, Google Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- *     * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- *     * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#ifndef GRPC_INTERNAL_CPP_CLIENT_CHANNEL_H
-#define GRPC_INTERNAL_CPP_CLIENT_CHANNEL_H
-
-#include <memory>
-
-#include <grpc++/channel_interface.h>
-#include <grpc++/config.h>
-#include <grpc++/impl/grpc_library.h>
-
-struct grpc_channel;
-
-namespace grpc {
-class Call;
-class CallOpSetInterface;
-class ChannelArguments;
-class CompletionQueue;
-class Credentials;
-class StreamContextInterface;
-
-class Channel GRPC_FINAL : public GrpcLibrary, public ChannelInterface {
- public:
-  explicit Channel(grpc_channel* c_channel);
-  Channel(const grpc::string& host, grpc_channel* c_channel);
-  ~Channel() GRPC_OVERRIDE;
-
-  void* RegisterMethod(const char* method) GRPC_OVERRIDE;
-  Call CreateCall(const RpcMethod& method, ClientContext* context,
-                  CompletionQueue* cq) GRPC_OVERRIDE;
-  void PerformOpsOnCall(CallOpSetInterface* ops, Call* call) GRPC_OVERRIDE;
-
-  grpc_connectivity_state GetState(bool try_to_connect) GRPC_OVERRIDE;
-
- private:
-  void NotifyOnStateChangeImpl(grpc_connectivity_state last_observed,
-                               gpr_timespec deadline, CompletionQueue* cq,
-                               void* tag) GRPC_OVERRIDE;
-
-  bool WaitForStateChangeImpl(grpc_connectivity_state last_observed,
-                              gpr_timespec deadline) GRPC_OVERRIDE;
-
-  const grpc::string host_;
-  grpc_channel* const c_channel_;  // owned
-};
-
-}  // namespace grpc
-
-#endif  // GRPC_INTERNAL_CPP_CLIENT_CHANNEL_H
diff --git a/src/cpp/client/channel_arguments.cc b/src/cpp/client/channel_arguments.cc
index da6602e..50422d0 100644
--- a/src/cpp/client/channel_arguments.cc
+++ b/src/cpp/client/channel_arguments.cc
@@ -31,10 +31,9 @@
  *
  */
 
-#include <grpc++/channel_arguments.h>
+#include <grpc++/support/channel_arguments.h>
 
 #include <grpc/support/log.h>
-
 #include "src/core/channel/channel_args.h"
 
 namespace grpc {
diff --git a/src/cpp/client/client_context.cc b/src/cpp/client/client_context.cc
index b8caa1e..c4d7cf2 100644
--- a/src/cpp/client/client_context.cc
+++ b/src/cpp/client/client_context.cc
@@ -38,7 +38,7 @@
 #include <grpc/support/string_util.h>
 #include <grpc++/credentials.h>
 #include <grpc++/server_context.h>
-#include <grpc++/time.h>
+#include <grpc++/support/time.h>
 
 #include "src/core/channel/compress_filter.h"
 #include "src/cpp/common/create_auth_context.h"
@@ -71,7 +71,7 @@
 }
 
 void ClientContext::set_call(grpc_call* call,
-                             const std::shared_ptr<ChannelInterface>& channel) {
+                             const std::shared_ptr<Channel>& channel) {
   GPR_ASSERT(call_ == nullptr);
   call_ = call;
   channel_ = channel;
diff --git a/src/cpp/client/create_channel.cc b/src/cpp/client/create_channel.cc
index 5ae772f..8c571cb 100644
--- a/src/cpp/client/create_channel.cc
+++ b/src/cpp/client/create_channel.cc
@@ -34,15 +34,16 @@
 #include <memory>
 #include <sstream>
 
-#include "src/cpp/client/channel.h"
-#include <grpc++/channel_interface.h>
-#include <grpc++/channel_arguments.h>
+#include <grpc++/channel.h>
 #include <grpc++/create_channel.h>
+#include <grpc++/support/channel_arguments.h>
+
+#include "src/cpp/client/create_channel_internal.h"
 
 namespace grpc {
 class ChannelArguments;
 
-std::shared_ptr<ChannelInterface> CreateChannel(
+std::shared_ptr<Channel> CreateChannel(
     const grpc::string& target, const std::shared_ptr<Credentials>& creds,
     const ChannelArguments& args) {
   ChannelArguments cp_args = args;
@@ -50,10 +51,10 @@
   user_agent_prefix << "grpc-c++/" << grpc_version_string();
   cp_args.SetString(GRPC_ARG_PRIMARY_USER_AGENT_STRING,
                     user_agent_prefix.str());
-  return creds ? creds->CreateChannel(target, cp_args)
-               : std::shared_ptr<ChannelInterface>(
-                     new Channel(grpc_lame_client_channel_create(
-                         NULL, GRPC_STATUS_INVALID_ARGUMENT,
-                         "Invalid credentials.")));
+  return creds
+             ? creds->CreateChannel(target, cp_args)
+             : CreateChannelInternal("", grpc_lame_client_channel_create(
+                                             NULL, GRPC_STATUS_INVALID_ARGUMENT,
+                                             "Invalid credentials."));
 }
 }  // namespace grpc
diff --git a/include/grpc++/stub_options.h b/src/cpp/client/create_channel_internal.cc
similarity index 84%
copy from include/grpc++/stub_options.h
copy to src/cpp/client/create_channel_internal.cc
index c7c16dc..9c5ab03 100644
--- a/include/grpc++/stub_options.h
+++ b/src/cpp/client/create_channel_internal.cc
@@ -31,13 +31,16 @@
  *
  */
 
-#ifndef GRPCXX_STUB_OPTIONS_H
-#define GRPCXX_STUB_OPTIONS_H
+#include <memory>
+
+#include <grpc++/channel.h>
+
+struct grpc_channel;
 
 namespace grpc {
 
-class StubOptions {};
-
+std::shared_ptr<Channel> CreateChannelInternal(const grpc::string& host,
+                                               grpc_channel* c_channel) {
+  return std::shared_ptr<Channel>(new Channel(host, c_channel));
+}
 }  // namespace grpc
-
-#endif  // GRPCXX_STUB_OPTIONS_H
diff --git a/include/grpc++/impl/internal_stub.h b/src/cpp/client/create_channel_internal.h
similarity index 78%
rename from include/grpc++/impl/internal_stub.h
rename to src/cpp/client/create_channel_internal.h
index 370a3b8..4385ec7 100644
--- a/include/grpc++/impl/internal_stub.h
+++ b/src/cpp/client/create_channel_internal.h
@@ -31,27 +31,21 @@
  *
  */
 
-#ifndef GRPCXX_IMPL_INTERNAL_STUB_H
-#define GRPCXX_IMPL_INTERNAL_STUB_H
+#ifndef GRPC_INTERNAL_CPP_CLIENT_CREATE_CHANNEL_INTERNAL_H
+#define GRPC_INTERNAL_CPP_CLIENT_CREATE_CHANNEL_INTERNAL_H
 
 #include <memory>
 
-#include <grpc++/channel_interface.h>
+#include <grpc++/support/config.h>
+
+struct grpc_channel;
 
 namespace grpc {
+class Channel;
 
-class InternalStub {
- public:
-  InternalStub(const std::shared_ptr<ChannelInterface>& channel)
-      : channel_(channel) {}
-  virtual ~InternalStub() {}
-
-  ChannelInterface* channel() { return channel_.get(); }
-
- private:
-  const std::shared_ptr<ChannelInterface> channel_;
-};
+std::shared_ptr<Channel> CreateChannelInternal(const grpc::string& host,
+                                               grpc_channel* c_channel);
 
 }  // namespace grpc
 
-#endif  // GRPCXX_IMPL_INTERNAL_STUB_H
+#endif  // GRPC_INTERNAL_CPP_CLIENT_CREATE_CHANNEL_INTERNAL_H
diff --git a/src/cpp/client/generic_stub.cc b/src/cpp/client/generic_stub.cc
index 0c90578..7a2fdf9 100644
--- a/src/cpp/client/generic_stub.cc
+++ b/src/cpp/client/generic_stub.cc
@@ -31,7 +31,7 @@
  *
  */
 
-#include <grpc++/generic_stub.h>
+#include <grpc++/generic/generic_stub.h>
 
 #include <grpc++/impl/rpc_method.h>
 
@@ -44,8 +44,7 @@
   return std::unique_ptr<GenericClientAsyncReaderWriter>(
       new GenericClientAsyncReaderWriter(
           channel_.get(), cq,
-          RpcMethod(method.c_str(), RpcMethod::BIDI_STREAMING, nullptr),
-          context, tag));
+          RpcMethod(method.c_str(), RpcMethod::BIDI_STREAMING), context, tag));
 }
 
 }  // namespace grpc
diff --git a/src/cpp/client/insecure_credentials.cc b/src/cpp/client/insecure_credentials.cc
index 2f9357b..4a4d2cb 100644
--- a/src/cpp/client/insecure_credentials.cc
+++ b/src/cpp/client/insecure_credentials.cc
@@ -31,25 +31,27 @@
  *
  */
 
+#include <grpc++/credentials.h>
+
 #include <grpc/grpc.h>
 #include <grpc/support/log.h>
-
-#include <grpc++/channel_arguments.h>
-#include <grpc++/config.h>
-#include <grpc++/credentials.h>
-#include "src/cpp/client/channel.h"
+#include <grpc++/channel.h>
+#include <grpc++/support/channel_arguments.h>
+#include <grpc++/support/config.h>
+#include "src/cpp/client/create_channel_internal.h"
 
 namespace grpc {
 
 namespace {
 class InsecureCredentialsImpl GRPC_FINAL : public Credentials {
  public:
-  std::shared_ptr<grpc::ChannelInterface> CreateChannel(
+  std::shared_ptr<grpc::Channel> CreateChannel(
       const string& target, const grpc::ChannelArguments& args) GRPC_OVERRIDE {
     grpc_channel_args channel_args;
     args.SetChannelArgs(&channel_args);
-    return std::shared_ptr<ChannelInterface>(new Channel(
-        grpc_insecure_channel_create(target.c_str(), &channel_args, nullptr)));
+    return CreateChannelInternal(
+        "",
+        grpc_insecure_channel_create(target.c_str(), &channel_args, nullptr));
   }
 
   // InsecureCredentials should not be applied to a call.
diff --git a/src/cpp/client/internal_stub.cc b/src/cpp/client/internal_stub.cc
deleted file mode 100644
index 91724a4..0000000
--- a/src/cpp/client/internal_stub.cc
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- *
- * Copyright 2015, Google Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- *     * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- *     * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#include <grpc++/impl/internal_stub.h>
-
-namespace grpc {}  // namespace grpc
diff --git a/src/cpp/client/secure_channel_arguments.cc b/src/cpp/client/secure_channel_arguments.cc
index d89df99..e17d3b5 100644
--- a/src/cpp/client/secure_channel_arguments.cc
+++ b/src/cpp/client/secure_channel_arguments.cc
@@ -31,9 +31,9 @@
  *
  */
 
-#include <grpc++/channel_arguments.h>
-#include <grpc/grpc_security.h>
+#include <grpc++/support/channel_arguments.h>
 
+#include <grpc/grpc_security.h>
 #include "src/core/channel/channel_args.h"
 
 namespace grpc {
diff --git a/src/cpp/client/secure_credentials.cc b/src/cpp/client/secure_credentials.cc
index 6cd6b77..f368f25 100644
--- a/src/cpp/client/secure_credentials.cc
+++ b/src/cpp/client/secure_credentials.cc
@@ -32,21 +32,21 @@
  */
 
 #include <grpc/support/log.h>
-
-#include <grpc++/channel_arguments.h>
+#include <grpc++/channel.h>
 #include <grpc++/impl/grpc_library.h>
-#include "src/cpp/client/channel.h"
+#include <grpc++/support/channel_arguments.h>
+#include "src/cpp/client/create_channel_internal.h"
 #include "src/cpp/client/secure_credentials.h"
 
 namespace grpc {
 
-std::shared_ptr<grpc::ChannelInterface> SecureCredentials::CreateChannel(
+std::shared_ptr<grpc::Channel> SecureCredentials::CreateChannel(
     const string& target, const grpc::ChannelArguments& args) {
   grpc_channel_args channel_args;
   args.SetChannelArgs(&channel_args);
-  return std::shared_ptr<ChannelInterface>(new Channel(
+  return CreateChannelInternal(
       args.GetSslTargetNameOverride(),
-      grpc_secure_channel_create(c_creds_, target.c_str(), &channel_args)));
+      grpc_secure_channel_create(c_creds_, target.c_str(), &channel_args));
 }
 
 bool SecureCredentials::ApplyToCall(grpc_call* call) {
diff --git a/src/cpp/client/secure_credentials.h b/src/cpp/client/secure_credentials.h
index c2b8d43..62d3185 100644
--- a/src/cpp/client/secure_credentials.h
+++ b/src/cpp/client/secure_credentials.h
@@ -36,7 +36,7 @@
 
 #include <grpc/grpc_security.h>
 
-#include <grpc++/config.h>
+#include <grpc++/support/config.h>
 #include <grpc++/credentials.h>
 
 namespace grpc {
@@ -48,7 +48,7 @@
   grpc_credentials* GetRawCreds() { return c_creds_; }
   bool ApplyToCall(grpc_call* call) GRPC_OVERRIDE;
 
-  std::shared_ptr<grpc::ChannelInterface> CreateChannel(
+  std::shared_ptr<grpc::Channel> CreateChannel(
       const string& target, const grpc::ChannelArguments& args) GRPC_OVERRIDE;
   SecureCredentials* AsSecureCredentials() GRPC_OVERRIDE { return this; }
 
diff --git a/src/cpp/common/auth_property_iterator.cc b/src/cpp/common/auth_property_iterator.cc
index d3bfd5c..5ccf8cf 100644
--- a/src/cpp/common/auth_property_iterator.cc
+++ b/src/cpp/common/auth_property_iterator.cc
@@ -31,7 +31,7 @@
  *
  */
 
-#include <grpc++/auth_context.h>
+#include <grpc++/support/auth_context.h>
 
 #include <grpc/grpc_security.h>
 
diff --git a/src/cpp/common/call.cc b/src/cpp/common/call.cc
index 0a5c976..16aa2c9 100644
--- a/src/cpp/common/call.cc
+++ b/src/cpp/common/call.cc
@@ -34,10 +34,9 @@
 #include <grpc++/impl/call.h>
 
 #include <grpc/support/alloc.h>
-#include <grpc++/byte_buffer.h>
+#include <grpc++/channel.h>
 #include <grpc++/client_context.h>
-#include <grpc++/channel_interface.h>
-
+#include <grpc++/support/byte_buffer.h>
 #include "src/core/profiling/timers.h"
 
 namespace grpc {
diff --git a/src/cpp/common/completion_queue.cc b/src/cpp/common/completion_queue.cc
index fca33f8..a175beb 100644
--- a/src/cpp/common/completion_queue.cc
+++ b/src/cpp/common/completion_queue.cc
@@ -36,7 +36,7 @@
 
 #include <grpc/grpc.h>
 #include <grpc/support/log.h>
-#include <grpc++/time.h>
+#include <grpc++/support/time.h>
 
 namespace grpc {
 
diff --git a/src/cpp/common/create_auth_context.h b/src/cpp/common/create_auth_context.h
index 9082a90..b4962ba 100644
--- a/src/cpp/common/create_auth_context.h
+++ b/src/cpp/common/create_auth_context.h
@@ -33,7 +33,7 @@
 #include <memory>
 
 #include <grpc/grpc.h>
-#include <grpc++/auth_context.h>
+#include <grpc++/support/auth_context.h>
 
 namespace grpc {
 
diff --git a/src/cpp/common/insecure_create_auth_context.cc b/src/cpp/common/insecure_create_auth_context.cc
index 07fc0bd..fe80c1a 100644
--- a/src/cpp/common/insecure_create_auth_context.cc
+++ b/src/cpp/common/insecure_create_auth_context.cc
@@ -33,7 +33,7 @@
 #include <memory>
 
 #include <grpc/grpc.h>
-#include <grpc++/auth_context.h>
+#include <grpc++/support/auth_context.h>
 
 namespace grpc {
 
diff --git a/src/cpp/common/secure_auth_context.h b/src/cpp/common/secure_auth_context.h
index 264ed62..01b7126 100644
--- a/src/cpp/common/secure_auth_context.h
+++ b/src/cpp/common/secure_auth_context.h
@@ -34,7 +34,7 @@
 #ifndef GRPC_INTERNAL_CPP_COMMON_SECURE_AUTH_CONTEXT_H
 #define GRPC_INTERNAL_CPP_COMMON_SECURE_AUTH_CONTEXT_H
 
-#include <grpc++/auth_context.h>
+#include <grpc++/support/auth_context.h>
 
 struct grpc_auth_context;
 
diff --git a/src/cpp/common/secure_create_auth_context.cc b/src/cpp/common/secure_create_auth_context.cc
index d81f4bb..f13d25a 100644
--- a/src/cpp/common/secure_create_auth_context.cc
+++ b/src/cpp/common/secure_create_auth_context.cc
@@ -34,7 +34,7 @@
 
 #include <grpc/grpc.h>
 #include <grpc/grpc_security.h>
-#include <grpc++/auth_context.h>
+#include <grpc++/support/auth_context.h>
 #include "src/cpp/common/secure_auth_context.h"
 
 namespace grpc {
diff --git a/src/cpp/proto/proto_utils.cc b/src/cpp/proto/proto_utils.cc
index 05470ec..be84c22 100644
--- a/src/cpp/proto/proto_utils.cc
+++ b/src/cpp/proto/proto_utils.cc
@@ -32,7 +32,6 @@
  */
 
 #include <grpc++/impl/proto_utils.h>
-#include <grpc++/config.h>
 
 #include <grpc/grpc.h>
 #include <grpc/byte_buffer.h>
@@ -40,6 +39,7 @@
 #include <grpc/support/slice.h>
 #include <grpc/support/slice_buffer.h>
 #include <grpc/support/port_platform.h>
+#include <grpc++/support/config.h>
 
 const int kMaxBufferLength = 8192;
 
diff --git a/src/cpp/server/async_generic_service.cc b/src/cpp/server/async_generic_service.cc
index 2e99afc..6b9ea53 100644
--- a/src/cpp/server/async_generic_service.cc
+++ b/src/cpp/server/async_generic_service.cc
@@ -31,7 +31,7 @@
  *
  */
 
-#include <grpc++/async_generic_service.h>
+#include <grpc++/generic/async_generic_service.h>
 
 #include <grpc++/server.h>
 
diff --git a/src/cpp/server/create_default_thread_pool.cc b/src/cpp/server/create_default_thread_pool.cc
index 9f59d25..f3b07ec 100644
--- a/src/cpp/server/create_default_thread_pool.cc
+++ b/src/cpp/server/create_default_thread_pool.cc
@@ -32,7 +32,8 @@
  */
 
 #include <grpc/support/cpu.h>
-#include <grpc++/dynamic_thread_pool.h>
+
+#include "src/cpp/server/dynamic_thread_pool.h"
 
 #ifndef GRPC_CUSTOM_DEFAULT_THREAD_POOL
 
diff --git a/src/cpp/server/dynamic_thread_pool.cc b/src/cpp/server/dynamic_thread_pool.cc
index b475f43..4b226c2 100644
--- a/src/cpp/server/dynamic_thread_pool.cc
+++ b/src/cpp/server/dynamic_thread_pool.cc
@@ -33,7 +33,8 @@
 
 #include <grpc++/impl/sync.h>
 #include <grpc++/impl/thd.h>
-#include <grpc++/dynamic_thread_pool.h>
+
+#include "src/cpp/server/dynamic_thread_pool.h"
 
 namespace grpc {
 DynamicThreadPool::DynamicThread::DynamicThread(DynamicThreadPool* pool)
diff --git a/include/grpc++/dynamic_thread_pool.h b/src/cpp/server/dynamic_thread_pool.h
similarity index 91%
rename from include/grpc++/dynamic_thread_pool.h
rename to src/cpp/server/dynamic_thread_pool.h
index a4d4885..5ba7533 100644
--- a/include/grpc++/dynamic_thread_pool.h
+++ b/src/cpp/server/dynamic_thread_pool.h
@@ -31,19 +31,19 @@
  *
  */
 
-#ifndef GRPCXX_DYNAMIC_THREAD_POOL_H
-#define GRPCXX_DYNAMIC_THREAD_POOL_H
-
-#include <grpc++/config.h>
-
-#include <grpc++/impl/sync.h>
-#include <grpc++/impl/thd.h>
-#include <grpc++/thread_pool_interface.h>
+#ifndef GRPC_INTERNAL_CPP_DYNAMIC_THREAD_POOL_H
+#define GRPC_INTERNAL_CPP_DYNAMIC_THREAD_POOL_H
 
 #include <list>
 #include <memory>
 #include <queue>
 
+#include <grpc++/impl/sync.h>
+#include <grpc++/impl/thd.h>
+#include <grpc++/support/config.h>
+
+#include "src/cpp/server/thread_pool_interface.h"
+
 namespace grpc {
 
 class DynamicThreadPool GRPC_FINAL : public ThreadPoolInterface {
@@ -80,4 +80,4 @@
 
 }  // namespace grpc
 
-#endif  // GRPCXX_DYNAMIC_THREAD_POOL_H
+#endif  // GRPC_INTERNAL_CPP_DYNAMIC_THREAD_POOL_H
diff --git a/src/cpp/server/fixed_size_thread_pool.cc b/src/cpp/server/fixed_size_thread_pool.cc
index bafbc58..2bdc44b 100644
--- a/src/cpp/server/fixed_size_thread_pool.cc
+++ b/src/cpp/server/fixed_size_thread_pool.cc
@@ -33,7 +33,7 @@
 
 #include <grpc++/impl/sync.h>
 #include <grpc++/impl/thd.h>
-#include <grpc++/fixed_size_thread_pool.h>
+#include "src/cpp/server/fixed_size_thread_pool.h"
 
 namespace grpc {
 
diff --git a/include/grpc++/fixed_size_thread_pool.h b/src/cpp/server/fixed_size_thread_pool.h
similarity index 89%
rename from include/grpc++/fixed_size_thread_pool.h
rename to src/cpp/server/fixed_size_thread_pool.h
index 307e166..394ae58 100644
--- a/include/grpc++/fixed_size_thread_pool.h
+++ b/src/cpp/server/fixed_size_thread_pool.h
@@ -31,18 +31,18 @@
  *
  */
 
-#ifndef GRPCXX_FIXED_SIZE_THREAD_POOL_H
-#define GRPCXX_FIXED_SIZE_THREAD_POOL_H
-
-#include <grpc++/config.h>
-
-#include <grpc++/impl/sync.h>
-#include <grpc++/impl/thd.h>
-#include <grpc++/thread_pool_interface.h>
+#ifndef GRPC_INTERNAL_CPP_FIXED_SIZE_THREAD_POOL_H
+#define GRPC_INTERNAL_CPP_FIXED_SIZE_THREAD_POOL_H
 
 #include <queue>
 #include <vector>
 
+#include <grpc++/impl/sync.h>
+#include <grpc++/impl/thd.h>
+#include <grpc++/support/config.h>
+
+#include "src/cpp/server/thread_pool_interface.h"
+
 namespace grpc {
 
 class FixedSizeThreadPool GRPC_FINAL : public ThreadPoolInterface {
@@ -64,4 +64,4 @@
 
 }  // namespace grpc
 
-#endif  // GRPCXX_FIXED_SIZE_THREAD_POOL_H
+#endif  // GRPC_INTERNAL_CPP_FIXED_SIZE_THREAD_POOL_H
diff --git a/src/cpp/server/secure_server_credentials.h b/src/cpp/server/secure_server_credentials.h
index b9803f1..d3d37b1 100644
--- a/src/cpp/server/secure_server_credentials.h
+++ b/src/cpp/server/secure_server_credentials.h
@@ -34,10 +34,10 @@
 #ifndef GRPC_INTERNAL_CPP_SERVER_SECURE_SERVER_CREDENTIALS_H
 #define GRPC_INTERNAL_CPP_SERVER_SECURE_SERVER_CREDENTIALS_H
 
-#include <grpc/grpc_security.h>
-
 #include <grpc++/server_credentials.h>
 
+#include <grpc/grpc_security.h>
+
 namespace grpc {
 
 class SecureServerCredentials GRPC_FINAL : public ServerCredentials {
diff --git a/src/cpp/server/server.cc b/src/cpp/server/server.cc
index e039c07..66cd27c 100644
--- a/src/cpp/server/server.cc
+++ b/src/cpp/server/server.cc
@@ -32,24 +32,71 @@
  */
 
 #include <grpc++/server.h>
+
 #include <utility>
 
 #include <grpc/grpc.h>
 #include <grpc/support/alloc.h>
 #include <grpc/support/log.h>
 #include <grpc++/completion_queue.h>
-#include <grpc++/async_generic_service.h>
+#include <grpc++/generic/async_generic_service.h>
 #include <grpc++/impl/rpc_service_method.h>
 #include <grpc++/impl/service_type.h>
 #include <grpc++/server_context.h>
 #include <grpc++/server_credentials.h>
-#include <grpc++/thread_pool_interface.h>
-#include <grpc++/time.h>
+#include <grpc++/support/time.h>
 
 #include "src/core/profiling/timers.h"
+#include "src/cpp/server/thread_pool_interface.h"
 
 namespace grpc {
 
+class Server::UnimplementedAsyncRequestContext {
+ protected:
+  UnimplementedAsyncRequestContext() : generic_stream_(&server_context_) {}
+
+  GenericServerContext server_context_;
+  GenericServerAsyncReaderWriter generic_stream_;
+};
+
+class Server::UnimplementedAsyncRequest GRPC_FINAL
+    : public UnimplementedAsyncRequestContext,
+      public GenericAsyncRequest {
+ public:
+  UnimplementedAsyncRequest(Server* server, ServerCompletionQueue* cq)
+      : GenericAsyncRequest(server, &server_context_, &generic_stream_, cq, cq,
+                            NULL, false),
+        server_(server),
+        cq_(cq) {}
+
+  bool FinalizeResult(void** tag, bool* status) GRPC_OVERRIDE;
+
+  ServerContext* context() { return &server_context_; }
+  GenericServerAsyncReaderWriter* stream() { return &generic_stream_; }
+
+ private:
+  Server* const server_;
+  ServerCompletionQueue* const cq_;
+};
+
+typedef SneakyCallOpSet<CallOpSendInitialMetadata, CallOpServerSendStatus>
+    UnimplementedAsyncResponseOp;
+class Server::UnimplementedAsyncResponse GRPC_FINAL
+    : public UnimplementedAsyncResponseOp {
+ public:
+  UnimplementedAsyncResponse(UnimplementedAsyncRequest* request);
+  ~UnimplementedAsyncResponse() { delete request_; }
+
+  bool FinalizeResult(void** tag, bool* status) GRPC_OVERRIDE {
+    bool r = UnimplementedAsyncResponseOp::FinalizeResult(tag, status);
+    delete this;
+    return r;
+  }
+
+ private:
+  UnimplementedAsyncRequest* const request_;
+};
+
 class Server::ShutdownRequest GRPC_FINAL : public CompletionQueueTag {
  public:
   bool FinalizeResult(void** tag, bool* status) {
@@ -297,18 +344,23 @@
   return creds->AddPortToServer(addr, server_);
 }
 
-bool Server::Start() {
+bool Server::Start(ServerCompletionQueue** cqs, size_t num_cqs) {
   GPR_ASSERT(!started_);
   started_ = true;
   grpc_server_start(server_);
 
   if (!has_generic_service_) {
-    unknown_method_.reset(new RpcServiceMethod(
-        "unknown", RpcMethod::BIDI_STREAMING, new UnknownMethodHandler));
-    // Use of emplace_back with just constructor arguments is not accepted here
-    // by gcc-4.4 because it can't match the anonymous nullptr with a proper
-    // constructor implicitly. Construct the object and use push_back.
-    sync_methods_->push_back(SyncRequest(unknown_method_.get(), nullptr));
+    if (!sync_methods_->empty()) {
+      unknown_method_.reset(new RpcServiceMethod(
+          "unknown", RpcMethod::BIDI_STREAMING, new UnknownMethodHandler));
+      // Use of emplace_back with just constructor arguments is not accepted
+      // here by gcc-4.4 because it can't match the anonymous nullptr with a 
+      // proper constructor implicitly. Construct the object and use push_back.
+      sync_methods_->push_back(SyncRequest(unknown_method_.get(), nullptr));
+    }
+    for (size_t i = 0; i < num_cqs; i++) {
+      new UnimplementedAsyncRequest(this, cqs[i]);
+    }
   }
   // Start processing rpcs.
   if (!sync_methods_->empty()) {
@@ -370,12 +422,14 @@
 
 Server::BaseAsyncRequest::BaseAsyncRequest(
     Server* server, ServerContext* context,
-    ServerAsyncStreamingInterface* stream, CompletionQueue* call_cq, void* tag)
+    ServerAsyncStreamingInterface* stream, CompletionQueue* call_cq, void* tag,
+    bool delete_on_finalize)
     : server_(server),
       context_(context),
       stream_(stream),
       call_cq_(call_cq),
       tag_(tag),
+      delete_on_finalize_(delete_on_finalize),
       call_(nullptr) {
   memset(&initial_metadata_array_, 0, sizeof(initial_metadata_array_));
 }
@@ -402,14 +456,16 @@
   // just the pointers inside call are copied here
   stream_->BindCall(&call);
   *tag = tag_;
-  delete this;
+  if (delete_on_finalize_) {
+    delete this;
+  }
   return true;
 }
 
 Server::RegisteredAsyncRequest::RegisteredAsyncRequest(
     Server* server, ServerContext* context,
     ServerAsyncStreamingInterface* stream, CompletionQueue* call_cq, void* tag)
-    : BaseAsyncRequest(server, context, stream, call_cq, tag) {}
+    : BaseAsyncRequest(server, context, stream, call_cq, tag, true) {}
 
 void Server::RegisteredAsyncRequest::IssueRequest(
     void* registered_method, grpc_byte_buffer** payload,
@@ -423,8 +479,9 @@
 Server::GenericAsyncRequest::GenericAsyncRequest(
     Server* server, GenericServerContext* context,
     ServerAsyncStreamingInterface* stream, CompletionQueue* call_cq,
-    ServerCompletionQueue* notification_cq, void* tag)
-    : BaseAsyncRequest(server, context, stream, call_cq, tag) {
+    ServerCompletionQueue* notification_cq, void* tag, bool delete_on_finalize)
+    : BaseAsyncRequest(server, context, stream, call_cq, tag,
+                       delete_on_finalize) {
   grpc_call_details_init(&call_details_);
   GPR_ASSERT(notification_cq);
   GPR_ASSERT(call_cq);
@@ -445,6 +502,25 @@
   return BaseAsyncRequest::FinalizeResult(tag, status);
 }
 
+bool Server::UnimplementedAsyncRequest::FinalizeResult(void** tag,
+                                                       bool* status) {
+  if (GenericAsyncRequest::FinalizeResult(tag, status) && *status) {
+    new UnimplementedAsyncRequest(server_, cq_);
+    new UnimplementedAsyncResponse(this);
+  } else {
+    delete this;
+  }
+  return false;
+}
+
+Server::UnimplementedAsyncResponse::UnimplementedAsyncResponse(
+    UnimplementedAsyncRequest* request)
+    : request_(request) {
+  Status status(StatusCode::UNIMPLEMENTED, "");
+  UnknownMethodHandler::FillOps(request_->context(), this);
+  request_->stream()->call_.PerformOps(this);
+}
+
 void Server::ScheduleCallback() {
   {
     grpc::unique_lock<grpc::mutex> lock(mu_);
diff --git a/src/cpp/server/server_builder.cc b/src/cpp/server/server_builder.cc
index 0b11d86..b739cbf 100644
--- a/src/cpp/server/server_builder.cc
+++ b/src/cpp/server/server_builder.cc
@@ -37,8 +37,8 @@
 #include <grpc/support/log.h>
 #include <grpc++/impl/service_type.h>
 #include <grpc++/server.h>
-#include <grpc++/thread_pool_interface.h>
-#include <grpc++/fixed_size_thread_pool.h>
+#include "src/cpp/server/thread_pool_interface.h"
+#include "src/cpp/server/fixed_size_thread_pool.h"
 
 namespace grpc {
 
@@ -89,10 +89,6 @@
   ports_.push_back(port);
 }
 
-void ServerBuilder::SetThreadPool(ThreadPoolInterface* thread_pool) {
-  thread_pool_ = thread_pool;
-}
-
 std::unique_ptr<Server> ServerBuilder::BuildAndStart() {
   bool thread_pool_owned = false;
   if (!async_services_.empty() && !services_.empty()) {
@@ -103,12 +99,6 @@
     thread_pool_ = CreateDefaultThreadPool();
     thread_pool_owned = true;
   }
-  // Async services only, create a thread pool to handle requests to unknown
-  // services.
-  if (!thread_pool_ && !generic_service_ && !async_services_.empty()) {
-    thread_pool_ = new FixedSizeThreadPool(1);
-    thread_pool_owned = true;
-  }
   std::unique_ptr<Server> server(
       new Server(thread_pool_, thread_pool_owned, max_message_size_));
   for (auto cq = cqs_.begin(); cq != cqs_.end(); ++cq) {
@@ -138,7 +128,7 @@
       *port->selected_port = r;
     }
   }
-  if (!server->Start()) {
+  if (!server->Start(&cqs_[0], cqs_.size())) {
     return nullptr;
   }
   return server;
diff --git a/src/cpp/server/server_context.cc b/src/cpp/server/server_context.cc
index 03461dd..acc163d 100644
--- a/src/cpp/server/server_context.cc
+++ b/src/cpp/server/server_context.cc
@@ -38,7 +38,7 @@
 #include <grpc/support/log.h>
 #include <grpc++/impl/call.h>
 #include <grpc++/impl/sync.h>
-#include <grpc++/time.h>
+#include <grpc++/support/time.h>
 
 #include "src/core/channel/compress_filter.h"
 #include "src/cpp/common/create_auth_context.h"
diff --git a/include/grpc++/thread_pool_interface.h b/src/cpp/server/thread_pool_interface.h
similarity index 92%
rename from include/grpc++/thread_pool_interface.h
rename to src/cpp/server/thread_pool_interface.h
index d080b31..1ebe30f 100644
--- a/include/grpc++/thread_pool_interface.h
+++ b/src/cpp/server/thread_pool_interface.h
@@ -31,8 +31,8 @@
  *
  */
 
-#ifndef GRPCXX_THREAD_POOL_INTERFACE_H
-#define GRPCXX_THREAD_POOL_INTERFACE_H
+#ifndef GRPC_INTERNAL_CPP_THREAD_POOL_INTERFACE_H
+#define GRPC_INTERNAL_CPP_THREAD_POOL_INTERFACE_H
 
 #include <functional>
 
@@ -51,4 +51,4 @@
 
 }  // namespace grpc
 
-#endif  // GRPCXX_THREAD_POOL_INTERFACE_H
+#endif  // GRPC_INTERNAL_CPP_THREAD_POOL_INTERFACE_H
diff --git a/src/cpp/util/byte_buffer.cc b/src/cpp/util/byte_buffer.cc
index a66c92c..e46e656 100644
--- a/src/cpp/util/byte_buffer.cc
+++ b/src/cpp/util/byte_buffer.cc
@@ -32,7 +32,7 @@
  */
 
 #include <grpc/byte_buffer_reader.h>
-#include <grpc++/byte_buffer.h>
+#include <grpc++/support/byte_buffer.h>
 
 namespace grpc {
 
diff --git a/src/cpp/util/slice.cc b/src/cpp/util/slice.cc
index 57370da..7e88423 100644
--- a/src/cpp/util/slice.cc
+++ b/src/cpp/util/slice.cc
@@ -31,7 +31,7 @@
  *
  */
 
-#include <grpc++/slice.h>
+#include <grpc++/support/slice.h>
 
 namespace grpc {
 
diff --git a/src/cpp/util/status.cc b/src/cpp/util/status.cc
index 5bb9eda..ad9850c 100644
--- a/src/cpp/util/status.cc
+++ b/src/cpp/util/status.cc
@@ -31,7 +31,7 @@
  *
  */
 
-#include <grpc++/status.h>
+#include <grpc++/support/status.h>
 
 namespace grpc {
 
diff --git a/src/cpp/util/time.cc b/src/cpp/util/time.cc
index 799c597..b3401eb 100644
--- a/src/cpp/util/time.cc
+++ b/src/cpp/util/time.cc
@@ -31,12 +31,12 @@
  *
  */
 
-#include <grpc++/config.h>
+#include <grpc++/support/config.h>
 
 #ifndef GRPC_CXX0X_NO_CHRONO
 
 #include <grpc/support/time.h>
-#include <grpc++/time.h>
+#include <grpc++/support/time.h>
 
 using std::chrono::duration_cast;
 using std::chrono::nanoseconds;
diff --git a/src/csharp/.gitignore b/src/csharp/.gitignore
index ae48956..48365e3 100644
--- a/src/csharp/.gitignore
+++ b/src/csharp/.gitignore
@@ -5,4 +5,5 @@
 packages
 Grpc.v12.suo
 TestResult.xml
+/TestResults
 *.nupkg
diff --git a/src/csharp/Grpc.Core.Tests/Grpc.Core.Tests.csproj b/src/csharp/Grpc.Core.Tests/Grpc.Core.Tests.csproj
index ad4e94a..b571fe9 100644
--- a/src/csharp/Grpc.Core.Tests/Grpc.Core.Tests.csproj
+++ b/src/csharp/Grpc.Core.Tests/Grpc.Core.Tests.csproj
@@ -65,6 +65,7 @@
     </Compile>
     <Compile Include="ClientBaseTest.cs" />
     <Compile Include="ShutdownTest.cs" />
+    <Compile Include="Internal\AsyncCallTest.cs" />
     <Compile Include="Properties\AssemblyInfo.cs" />
     <Compile Include="ClientServerTest.cs" />
     <Compile Include="ServerTest.cs" />
diff --git a/src/csharp/Grpc.Core.Tests/GrpcEnvironmentTest.cs b/src/csharp/Grpc.Core.Tests/GrpcEnvironmentTest.cs
index 4fdfab5..78295cf 100644
--- a/src/csharp/Grpc.Core.Tests/GrpcEnvironmentTest.cs
+++ b/src/csharp/Grpc.Core.Tests/GrpcEnvironmentTest.cs
@@ -53,7 +53,7 @@
         {
             var env1 = GrpcEnvironment.AddRef();
             var env2 = GrpcEnvironment.AddRef();
-            Assert.IsTrue(object.ReferenceEquals(env1, env2));
+            Assert.AreSame(env1, env2);
             GrpcEnvironment.Release();
             GrpcEnvironment.Release();
         }
@@ -61,18 +61,21 @@
         [Test]
         public void InitializeAfterShutdown()
         {
+            Assert.AreEqual(0, GrpcEnvironment.GetRefCount());
+
             var env1 = GrpcEnvironment.AddRef();
             GrpcEnvironment.Release();
 
             var env2 = GrpcEnvironment.AddRef();
             GrpcEnvironment.Release();
 
-            Assert.IsFalse(object.ReferenceEquals(env1, env2));
+            Assert.AreNotSame(env1, env2);
         }
 
         [Test]
         public void ReleaseWithoutAddRef()
         {
+            Assert.AreEqual(0, GrpcEnvironment.GetRefCount());
             Assert.Throws(typeof(InvalidOperationException), () => GrpcEnvironment.Release());
         }
 
diff --git a/src/csharp/Grpc.Core.Tests/Internal/AsyncCallTest.cs b/src/csharp/Grpc.Core.Tests/Internal/AsyncCallTest.cs
new file mode 100644
index 0000000..685c5f7
--- /dev/null
+++ b/src/csharp/Grpc.Core.Tests/Internal/AsyncCallTest.cs
@@ -0,0 +1,222 @@
+#region Copyright notice and license
+
+// Copyright 2015, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#endregion
+
+using System;
+using System.Runtime.InteropServices;
+using System.Threading.Tasks;
+
+using Grpc.Core.Internal;
+using NUnit.Framework;
+
+namespace Grpc.Core.Internal.Tests
+{
+    public class AsyncCallTest
+    {
+        Channel channel;
+        FakeNativeCall fakeCall;
+        AsyncCall<string, string> asyncCall;
+
+        [SetUp]
+        public void Init()
+        {
+            channel = new Channel("localhost", Credentials.Insecure);
+
+            fakeCall = new FakeNativeCall();
+
+            var callDetails = new CallInvocationDetails<string, string>(channel, "someMethod", null, Marshallers.StringMarshaller, Marshallers.StringMarshaller, new CallOptions());
+            asyncCall = new AsyncCall<string, string>(callDetails, fakeCall);
+        }
+
+        [TearDown]
+        public void Cleanup()
+        {
+            channel.ShutdownAsync().Wait();
+        }
+
+        [Test]
+        public void AsyncUnary_CompletionSuccess()
+        {
+            var resultTask = asyncCall.UnaryCallAsync("abc");
+            fakeCall.UnaryResponseClientHandler(true, new ClientSideStatus(Status.DefaultSuccess, new Metadata()), new byte[] { 1, 2, 3 }, new Metadata());
+            Assert.IsTrue(resultTask.IsCompleted);
+            Assert.IsTrue(fakeCall.IsDisposed);
+            Assert.AreEqual(Status.DefaultSuccess, asyncCall.GetStatus());
+        }
+
+        [Test]
+        public void AsyncUnary_CompletionFailure()
+        {
+            var resultTask = asyncCall.UnaryCallAsync("abc");
+            fakeCall.UnaryResponseClientHandler(false, new ClientSideStatus(new Status(StatusCode.Internal, ""), null), new byte[] { 1, 2, 3 }, new Metadata());
+
+            Assert.IsTrue(resultTask.IsCompleted);
+            Assert.IsTrue(fakeCall.IsDisposed);
+
+            Assert.AreEqual(StatusCode.Internal, asyncCall.GetStatus().StatusCode);
+            Assert.IsNull(asyncCall.GetTrailers());
+            var ex = Assert.Throws<RpcException>(() => resultTask.GetAwaiter().GetResult());
+            Assert.AreEqual(StatusCode.Internal, ex.Status.StatusCode);
+        }
+
+        internal class FakeNativeCall : INativeCall
+        {
+            public UnaryResponseClientHandler UnaryResponseClientHandler
+            {
+                get;
+                set;
+            }
+
+            public ReceivedStatusOnClientHandler ReceivedStatusOnClientHandler
+            {
+                get;
+                set;
+            }
+
+            public ReceivedMessageHandler ReceivedMessageHandler
+            {
+                get;
+                set;
+            }
+
+            public ReceivedResponseHeadersHandler ReceivedResponseHeadersHandler
+            {
+                get;
+                set;
+            }
+
+            public SendCompletionHandler SendCompletionHandler
+            {
+                get;
+                set;
+            }
+
+            public ReceivedCloseOnServerHandler ReceivedCloseOnServerHandler
+            {
+                get;
+                set;
+            }
+
+            public bool IsCancelled
+            {
+                get;
+                set;
+            }
+
+            public bool IsDisposed
+            {
+                get;
+                set;
+            }
+
+            public void Cancel()
+            {
+                IsCancelled = true;
+            }
+
+            public void CancelWithStatus(Status status)
+            {
+                IsCancelled = true;
+            }
+
+            public string GetPeer()
+            {
+                return "PEER";
+            }
+
+            public void StartUnary(UnaryResponseClientHandler callback, byte[] payload, MetadataArraySafeHandle metadataArray, WriteFlags writeFlags)
+            {
+                UnaryResponseClientHandler = callback;
+            }
+
+            public void StartUnary(BatchContextSafeHandle ctx, byte[] payload, MetadataArraySafeHandle metadataArray, WriteFlags writeFlags)
+            {
+                throw new NotImplementedException();
+            }
+
+            public void StartClientStreaming(UnaryResponseClientHandler callback, MetadataArraySafeHandle metadataArray)
+            {
+                UnaryResponseClientHandler = callback;
+            }
+
+            public void StartServerStreaming(ReceivedStatusOnClientHandler callback, byte[] payload, MetadataArraySafeHandle metadataArray, WriteFlags writeFlags)
+            {
+                ReceivedStatusOnClientHandler = callback;
+            }
+
+            public void StartDuplexStreaming(ReceivedStatusOnClientHandler callback, MetadataArraySafeHandle metadataArray)
+            {
+                ReceivedStatusOnClientHandler = callback;
+            }
+
+            public void StartReceiveMessage(ReceivedMessageHandler callback)
+            {
+                ReceivedMessageHandler = callback;
+            }
+
+            public void StartReceiveInitialMetadata(ReceivedResponseHeadersHandler callback)
+            {
+                ReceivedResponseHeadersHandler = callback;
+            }
+
+            public void StartSendInitialMetadata(SendCompletionHandler callback, MetadataArraySafeHandle metadataArray)
+            {
+                SendCompletionHandler = callback;
+            }
+
+            public void StartSendMessage(SendCompletionHandler callback, byte[] payload, WriteFlags writeFlags, bool sendEmptyInitialMetadata)
+            {
+                SendCompletionHandler = callback;
+            }
+
+            public void StartSendCloseFromClient(SendCompletionHandler callback)
+            {
+                SendCompletionHandler = callback;
+            }
+
+            public void StartSendStatusFromServer(SendCompletionHandler callback, Status status, MetadataArraySafeHandle metadataArray, bool sendEmptyInitialMetadata)
+            {
+                SendCompletionHandler = callback;
+            }
+
+            public void StartServerSide(ReceivedCloseOnServerHandler callback)
+            {
+                ReceivedCloseOnServerHandler = callback;
+            }
+
+            public void Dispose()
+            {
+                IsDisposed = true;
+            }
+        }
+    }
+}
\ No newline at end of file
diff --git a/src/csharp/Grpc.Core.Tests/ResponseHeadersTest.cs b/src/csharp/Grpc.Core.Tests/ResponseHeadersTest.cs
index 7060067..a1648f3 100644
--- a/src/csharp/Grpc.Core.Tests/ResponseHeadersTest.cs
+++ b/src/csharp/Grpc.Core.Tests/ResponseHeadersTest.cs
@@ -32,13 +32,16 @@
 #endregion
 
 using System;
+using System.Collections.Generic;
 using System.Diagnostics;
 using System.Linq;
 using System.Threading;
 using System.Threading.Tasks;
+
 using Grpc.Core;
 using Grpc.Core.Internal;
 using Grpc.Core.Utils;
+
 using NUnit.Framework;
 
 namespace Grpc.Core.Tests
@@ -74,6 +77,80 @@
         }
 
         [Test]
+        public async Task ResponseHeadersAsync_UnaryCall()
+        {
+            helper.UnaryHandler = new UnaryServerMethod<string, string>(async (request, context) =>
+            {
+                await context.WriteResponseHeadersAsync(headers);
+                return "PASS";
+            });
+
+            var call = Calls.AsyncUnaryCall(helper.CreateUnaryCall(), "");
+            var responseHeaders = await call.ResponseHeadersAsync;
+
+            Assert.AreEqual(headers.Count, responseHeaders.Count);
+            Assert.AreEqual("ascii-header", responseHeaders[0].Key);
+            Assert.AreEqual("abcdefg", responseHeaders[0].Value);
+
+            Assert.AreEqual("PASS", await call.ResponseAsync);
+        }
+
+        [Test]
+        public async Task ResponseHeadersAsync_ClientStreamingCall()
+        {
+            helper.ClientStreamingHandler = new ClientStreamingServerMethod<string, string>(async (requestStream, context) =>
+            {
+                await context.WriteResponseHeadersAsync(headers);
+                return "PASS";
+            });
+
+            var call = Calls.AsyncClientStreamingCall(helper.CreateClientStreamingCall());
+            await call.RequestStream.CompleteAsync();
+            var responseHeaders = await call.ResponseHeadersAsync;
+
+            Assert.AreEqual("ascii-header", responseHeaders[0].Key);
+            Assert.AreEqual("PASS", await call.ResponseAsync);
+        }
+
+        [Test]
+        public async Task ResponseHeadersAsync_ServerStreamingCall()
+        {
+            helper.ServerStreamingHandler = new ServerStreamingServerMethod<string, string>(async (request, responseStream, context) =>
+            {
+                await context.WriteResponseHeadersAsync(headers);
+                await responseStream.WriteAsync("PASS");
+            });
+
+            var call = Calls.AsyncServerStreamingCall(helper.CreateServerStreamingCall(), "");
+            var responseHeaders = await call.ResponseHeadersAsync;
+
+            Assert.AreEqual("ascii-header", responseHeaders[0].Key);
+            CollectionAssert.AreEqual(new[] { "PASS" }, await call.ResponseStream.ToListAsync());
+        }
+
+        [Test]
+        public async Task ResponseHeadersAsync_DuplexStreamingCall()
+        {
+            helper.DuplexStreamingHandler = new DuplexStreamingServerMethod<string, string>(async (requestStream, responseStream, context) =>
+            {
+                await context.WriteResponseHeadersAsync(headers);
+                while (await requestStream.MoveNext())
+                {
+                    await responseStream.WriteAsync(requestStream.Current);
+                }
+            });
+
+            var call = Calls.AsyncDuplexStreamingCall(helper.CreateDuplexStreamingCall());
+            var responseHeaders = await call.ResponseHeadersAsync;
+
+            var messages = new[] { "PASS" };
+            await call.RequestStream.WriteAllAsync(messages);
+
+            Assert.AreEqual("ascii-header", responseHeaders[0].Key);
+            CollectionAssert.AreEqual(messages, await call.ResponseStream.ToListAsync());
+        }
+
+        [Test]
         public void WriteResponseHeaders_NullNotAllowed()
         {
             helper.UnaryHandler = new UnaryServerMethod<string, string>(async (request, context) =>
diff --git a/src/csharp/Grpc.Core/AsyncClientStreamingCall.cs b/src/csharp/Grpc.Core/AsyncClientStreamingCall.cs
index fb9b562..dbaa308 100644
--- a/src/csharp/Grpc.Core/AsyncClientStreamingCall.cs
+++ b/src/csharp/Grpc.Core/AsyncClientStreamingCall.cs
@@ -44,14 +44,16 @@
     {
         readonly IClientStreamWriter<TRequest> requestStream;
         readonly Task<TResponse> responseAsync;
+        readonly Task<Metadata> responseHeadersAsync;
         readonly Func<Status> getStatusFunc;
         readonly Func<Metadata> getTrailersFunc;
         readonly Action disposeAction;
 
-        public AsyncClientStreamingCall(IClientStreamWriter<TRequest> requestStream, Task<TResponse> responseAsync, Func<Status> getStatusFunc, Func<Metadata> getTrailersFunc, Action disposeAction)
+        public AsyncClientStreamingCall(IClientStreamWriter<TRequest> requestStream, Task<TResponse> responseAsync, Task<Metadata> responseHeadersAsync, Func<Status> getStatusFunc, Func<Metadata> getTrailersFunc, Action disposeAction)
         {
             this.requestStream = requestStream;
             this.responseAsync = responseAsync;
+            this.responseHeadersAsync = responseHeadersAsync;
             this.getStatusFunc = getStatusFunc;
             this.getTrailersFunc = getTrailersFunc;
             this.disposeAction = disposeAction;
@@ -69,6 +71,17 @@
         }
 
         /// <summary>
+        /// Asynchronous access to response headers.
+        /// </summary>
+        public Task<Metadata> ResponseHeadersAsync
+        {
+            get
+            {
+                return this.responseHeadersAsync;
+            }
+        }
+
+        /// <summary>
         /// Async stream to send streaming requests.
         /// </summary>
         public IClientStreamWriter<TRequest> RequestStream
diff --git a/src/csharp/Grpc.Core/AsyncDuplexStreamingCall.cs b/src/csharp/Grpc.Core/AsyncDuplexStreamingCall.cs
index 183c842..ee7ba29 100644
--- a/src/csharp/Grpc.Core/AsyncDuplexStreamingCall.cs
+++ b/src/csharp/Grpc.Core/AsyncDuplexStreamingCall.cs
@@ -32,6 +32,7 @@
 #endregion
 
 using System;
+using System.Threading.Tasks;
 
 namespace Grpc.Core
 {
@@ -42,14 +43,16 @@
     {
         readonly IClientStreamWriter<TRequest> requestStream;
         readonly IAsyncStreamReader<TResponse> responseStream;
+        readonly Task<Metadata> responseHeadersAsync;
         readonly Func<Status> getStatusFunc;
         readonly Func<Metadata> getTrailersFunc;
         readonly Action disposeAction;
 
-        public AsyncDuplexStreamingCall(IClientStreamWriter<TRequest> requestStream, IAsyncStreamReader<TResponse> responseStream, Func<Status> getStatusFunc, Func<Metadata> getTrailersFunc, Action disposeAction)
+        public AsyncDuplexStreamingCall(IClientStreamWriter<TRequest> requestStream, IAsyncStreamReader<TResponse> responseStream, Task<Metadata> responseHeadersAsync, Func<Status> getStatusFunc, Func<Metadata> getTrailersFunc, Action disposeAction)
         {
             this.requestStream = requestStream;
             this.responseStream = responseStream;
+            this.responseHeadersAsync = responseHeadersAsync;
             this.getStatusFunc = getStatusFunc;
             this.getTrailersFunc = getTrailersFunc;
             this.disposeAction = disposeAction;
@@ -78,6 +81,17 @@
         }
 
         /// <summary>
+        /// Asynchronous access to response headers.
+        /// </summary>
+        public Task<Metadata> ResponseHeadersAsync
+        {
+            get
+            {
+                return this.responseHeadersAsync;
+            }
+        }
+
+        /// <summary>
         /// Gets the call status if the call has already finished.
         /// Throws InvalidOperationException otherwise.
         /// </summary>
diff --git a/src/csharp/Grpc.Core/AsyncServerStreamingCall.cs b/src/csharp/Grpc.Core/AsyncServerStreamingCall.cs
index ab2049f..2853a79 100644
--- a/src/csharp/Grpc.Core/AsyncServerStreamingCall.cs
+++ b/src/csharp/Grpc.Core/AsyncServerStreamingCall.cs
@@ -32,6 +32,7 @@
 #endregion
 
 using System;
+using System.Threading.Tasks;
 
 namespace Grpc.Core
 {
@@ -41,13 +42,15 @@
     public sealed class AsyncServerStreamingCall<TResponse> : IDisposable
     {
         readonly IAsyncStreamReader<TResponse> responseStream;
+        readonly Task<Metadata> responseHeadersAsync;
         readonly Func<Status> getStatusFunc;
         readonly Func<Metadata> getTrailersFunc;
         readonly Action disposeAction;
 
-        public AsyncServerStreamingCall(IAsyncStreamReader<TResponse> responseStream, Func<Status> getStatusFunc, Func<Metadata> getTrailersFunc, Action disposeAction)
+        public AsyncServerStreamingCall(IAsyncStreamReader<TResponse> responseStream, Task<Metadata> responseHeadersAsync, Func<Status> getStatusFunc, Func<Metadata> getTrailersFunc, Action disposeAction)
         {
             this.responseStream = responseStream;
+            this.responseHeadersAsync = responseHeadersAsync;
             this.getStatusFunc = getStatusFunc;
             this.getTrailersFunc = getTrailersFunc;
             this.disposeAction = disposeAction;
@@ -65,6 +68,17 @@
         }
 
         /// <summary>
+        /// Asynchronous access to response headers.
+        /// </summary>
+        public Task<Metadata> ResponseHeadersAsync
+        {
+            get
+            {
+                return this.responseHeadersAsync;
+            }
+        }
+
+        /// <summary>
         /// Gets the call status if the call has already finished.
         /// Throws InvalidOperationException otherwise.
         /// </summary>
diff --git a/src/csharp/Grpc.Core/AsyncUnaryCall.cs b/src/csharp/Grpc.Core/AsyncUnaryCall.cs
index 224e343..154a17a 100644
--- a/src/csharp/Grpc.Core/AsyncUnaryCall.cs
+++ b/src/csharp/Grpc.Core/AsyncUnaryCall.cs
@@ -43,13 +43,15 @@
     public sealed class AsyncUnaryCall<TResponse> : IDisposable
     {
         readonly Task<TResponse> responseAsync;
+        readonly Task<Metadata> responseHeadersAsync;
         readonly Func<Status> getStatusFunc;
         readonly Func<Metadata> getTrailersFunc;
         readonly Action disposeAction;
 
-        public AsyncUnaryCall(Task<TResponse> responseAsync, Func<Status> getStatusFunc, Func<Metadata> getTrailersFunc, Action disposeAction)
+        public AsyncUnaryCall(Task<TResponse> responseAsync, Task<Metadata> responseHeadersAsync, Func<Status> getStatusFunc, Func<Metadata> getTrailersFunc, Action disposeAction)
         {
             this.responseAsync = responseAsync;
+            this.responseHeadersAsync = responseHeadersAsync;
             this.getStatusFunc = getStatusFunc;
             this.getTrailersFunc = getTrailersFunc;
             this.disposeAction = disposeAction;
@@ -67,6 +69,17 @@
         }
 
         /// <summary>
+        /// Asynchronous access to response headers.
+        /// </summary>
+        public Task<Metadata> ResponseHeadersAsync
+        {
+            get
+            {
+                return this.responseHeadersAsync;
+            }
+        }
+
+        /// <summary>
         /// Allows awaiting this object directly.
         /// </summary>
         public TaskAwaiter<TResponse> GetAwaiter()
diff --git a/src/csharp/Grpc.Core/Calls.cs b/src/csharp/Grpc.Core/Calls.cs
index 7067456..e57ac89 100644
--- a/src/csharp/Grpc.Core/Calls.cs
+++ b/src/csharp/Grpc.Core/Calls.cs
@@ -74,7 +74,7 @@
         {
             var asyncCall = new AsyncCall<TRequest, TResponse>(call);
             var asyncResult = asyncCall.UnaryCallAsync(req);
-            return new AsyncUnaryCall<TResponse>(asyncResult, asyncCall.GetStatus, asyncCall.GetTrailers, asyncCall.Cancel);
+            return new AsyncUnaryCall<TResponse>(asyncResult, asyncCall.ResponseHeadersAsync, asyncCall.GetStatus, asyncCall.GetTrailers, asyncCall.Cancel);
         }
 
         /// <summary>
@@ -93,7 +93,7 @@
             var asyncCall = new AsyncCall<TRequest, TResponse>(call);
             asyncCall.StartServerStreamingCall(req);
             var responseStream = new ClientResponseStream<TRequest, TResponse>(asyncCall);
-            return new AsyncServerStreamingCall<TResponse>(responseStream, asyncCall.GetStatus, asyncCall.GetTrailers, asyncCall.Cancel);
+            return new AsyncServerStreamingCall<TResponse>(responseStream, asyncCall.ResponseHeadersAsync, asyncCall.GetStatus, asyncCall.GetTrailers, asyncCall.Cancel);
         }
 
         /// <summary>
@@ -110,7 +110,7 @@
             var asyncCall = new AsyncCall<TRequest, TResponse>(call);
             var resultTask = asyncCall.ClientStreamingCallAsync();
             var requestStream = new ClientRequestStream<TRequest, TResponse>(asyncCall);
-            return new AsyncClientStreamingCall<TRequest, TResponse>(requestStream, resultTask, asyncCall.GetStatus, asyncCall.GetTrailers, asyncCall.Cancel);
+            return new AsyncClientStreamingCall<TRequest, TResponse>(requestStream, resultTask, asyncCall.ResponseHeadersAsync, asyncCall.GetStatus, asyncCall.GetTrailers, asyncCall.Cancel);
         }
 
         /// <summary>
@@ -130,7 +130,7 @@
             asyncCall.StartDuplexStreamingCall();
             var requestStream = new ClientRequestStream<TRequest, TResponse>(asyncCall);
             var responseStream = new ClientResponseStream<TRequest, TResponse>(asyncCall);
-            return new AsyncDuplexStreamingCall<TRequest, TResponse>(requestStream, responseStream, asyncCall.GetStatus, asyncCall.GetTrailers, asyncCall.Cancel);
+            return new AsyncDuplexStreamingCall<TRequest, TResponse>(requestStream, responseStream, asyncCall.ResponseHeadersAsync, asyncCall.GetStatus, asyncCall.GetTrailers, asyncCall.Cancel);
         }
     }
 }
diff --git a/src/csharp/Grpc.Core/Channel.cs b/src/csharp/Grpc.Core/Channel.cs
index 2f8519d..c11b320 100644
--- a/src/csharp/Grpc.Core/Channel.cs
+++ b/src/csharp/Grpc.Core/Channel.cs
@@ -58,7 +58,6 @@
         readonly List<ChannelOption> options;
 
         bool shutdownRequested;
-        bool disposed;
 
         /// <summary>
         /// Creates a channel that connects to a specific host.
diff --git a/src/csharp/Grpc.Core/Grpc.Core.csproj b/src/csharp/Grpc.Core/Grpc.Core.csproj
index 055aff1..ad2af17 100644
--- a/src/csharp/Grpc.Core/Grpc.Core.csproj
+++ b/src/csharp/Grpc.Core/Grpc.Core.csproj
@@ -49,6 +49,7 @@
     <Compile Include="AsyncDuplexStreamingCall.cs" />
     <Compile Include="AsyncServerStreamingCall.cs" />
     <Compile Include="IClientStreamWriter.cs" />
+    <Compile Include="Internal\INativeCall.cs" />
     <Compile Include="IServerStreamWriter.cs" />
     <Compile Include="IAsyncStreamWriter.cs" />
     <Compile Include="IAsyncStreamReader.cs" />
diff --git a/src/csharp/Grpc.Core/GrpcEnvironment.cs b/src/csharp/Grpc.Core/GrpcEnvironment.cs
index 0a44eea..e7c0418 100644
--- a/src/csharp/Grpc.Core/GrpcEnvironment.cs
+++ b/src/csharp/Grpc.Core/GrpcEnvironment.cs
@@ -102,6 +102,14 @@
             }
         }
 
+        internal static int GetRefCount()
+        {
+            lock (staticLock)
+            {
+                return refCount;
+            }
+        }
+
         /// <summary>
         /// Gets application-wide logger used by gRPC.
         /// </summary>
@@ -177,7 +185,6 @@
             return Marshal.PtrToStringAnsi(ptr);
         }
 
-
         internal static void GrpcNativeInit()
         {
             grpcsharp_init();
diff --git a/src/csharp/Grpc.Core/Internal/AsyncCall.cs b/src/csharp/Grpc.Core/Internal/AsyncCall.cs
index bb9ba5b..be5d611 100644
--- a/src/csharp/Grpc.Core/Internal/AsyncCall.cs
+++ b/src/csharp/Grpc.Core/Internal/AsyncCall.cs
@@ -51,22 +51,35 @@
         static readonly ILogger Logger = GrpcEnvironment.Logger.ForType<AsyncCall<TRequest, TResponse>>();
 
         readonly CallInvocationDetails<TRequest, TResponse> details;
+        readonly INativeCall injectedNativeCall;  // for testing
 
         // Completion of a pending unary response if not null.
         TaskCompletionSource<TResponse> unaryResponseTcs;
 
+        // Indicates that steaming call has finished.
+        TaskCompletionSource<object> streamingCallFinishedTcs = new TaskCompletionSource<object>();
+
+        // Response headers set here once received.
+        TaskCompletionSource<Metadata> responseHeadersTcs = new TaskCompletionSource<Metadata>();
+
         // Set after status is received. Used for both unary and streaming response calls.
         ClientSideStatus? finishedStatus;
 
-        bool readObserverCompleted;  // True if readObserver has already been completed.
-
         public AsyncCall(CallInvocationDetails<TRequest, TResponse> callDetails)
-            : base(callDetails.RequestMarshaller.Serializer, callDetails.ResponseMarshaller.Deserializer)
+            : base(callDetails.RequestMarshaller.Serializer, callDetails.ResponseMarshaller.Deserializer, callDetails.Channel.Environment)
         {
             this.details = callDetails.WithOptions(callDetails.Options.Normalize());
             this.initialMetadataSent = true;  // we always send metadata at the very beginning of the call.
         }
 
+        /// <summary>
+        /// This constructor should only be used for testing.
+        /// </summary>
+        public AsyncCall(CallInvocationDetails<TRequest, TResponse> callDetails, INativeCall injectedNativeCall) : this(callDetails)
+        {
+            this.injectedNativeCall = injectedNativeCall;
+        }
+
         // TODO: this method is not Async, so it shouldn't be in AsyncCall class, but 
         // it is reusing fair amount of code in this class, so we are leaving it here.
         /// <summary>
@@ -100,7 +113,7 @@
                         bool success = (ev.success != 0);
                         try
                         {
-                            HandleUnaryResponse(success, ctx);
+                            HandleUnaryResponse(success, ctx.GetReceivedStatusOnClient(), ctx.GetReceivedMessage(), ctx.GetReceivedInitialMetadata());
                         }
                         catch (Exception e)
                         {
@@ -125,7 +138,7 @@
                 Preconditions.CheckState(!started);
                 started = true;
 
-                Initialize(details.Channel.Environment.CompletionQueue);
+                Initialize(environment.CompletionQueue);
 
                 halfcloseRequested = true;
                 readingDone = true;
@@ -152,7 +165,7 @@
                 Preconditions.CheckState(!started);
                 started = true;
 
-                Initialize(details.Channel.Environment.CompletionQueue);
+                Initialize(environment.CompletionQueue);
 
                 readingDone = true;
 
@@ -176,10 +189,9 @@
                 Preconditions.CheckState(!started);
                 started = true;
 
-                Initialize(details.Channel.Environment.CompletionQueue);
+                Initialize(environment.CompletionQueue);
 
                 halfcloseRequested = true;
-                halfclosed = true;  // halfclose not confirmed yet, but it will be once finishedHandler is called.
 
                 byte[] payload = UnsafeSerialize(msg);
 
@@ -187,6 +199,7 @@
                 {
                     call.StartServerStreaming(HandleFinished, payload, metadataArray, GetWriteFlagsForCall());
                 }
+                call.StartReceiveInitialMetadata(HandleReceivedResponseHeaders);
             }
         }
 
@@ -201,12 +214,13 @@
                 Preconditions.CheckState(!started);
                 started = true;
 
-                Initialize(details.Channel.Environment.CompletionQueue);
+                Initialize(environment.CompletionQueue);
 
                 using (var metadataArray = MetadataArraySafeHandle.Create(details.Options.Headers))
                 {
                     call.StartDuplexStreaming(HandleFinished, metadataArray);
                 }
+                call.StartReceiveInitialMetadata(HandleReceivedResponseHeaders);
             }
         }
 
@@ -248,6 +262,28 @@
         }
 
         /// <summary>
+        /// Get the task that completes once if streaming call finishes with ok status and throws RpcException with given status otherwise.
+        /// </summary>
+        public Task StreamingCallFinishedTask
+        {
+            get
+            {
+                return streamingCallFinishedTcs.Task;
+            }
+        }
+
+        /// <summary>
+        /// Get the task that completes once response headers are received.
+        /// </summary>
+        public Task<Metadata> ResponseHeadersAsync
+        {
+            get
+            {
+                return responseHeadersTcs.Task;
+            }
+        }
+
+        /// <summary>
         /// Gets the resulting status if the call has already finished.
         /// Throws InvalidOperationException otherwise.
         /// </summary>
@@ -281,36 +317,6 @@
             }
         }
 
-        /// <summary>
-        /// On client-side, we only fire readCompletionDelegate once all messages have been read 
-        /// and status has been received.
-        /// </summary>
-        protected override void ProcessLastRead(AsyncCompletionDelegate<TResponse> completionDelegate)
-        {
-            if (completionDelegate != null && readingDone && finishedStatus.HasValue)
-            {
-                bool shouldComplete;
-                lock (myLock)
-                {
-                    shouldComplete = !readObserverCompleted;
-                    readObserverCompleted = true;
-                }
-
-                if (shouldComplete)
-                {
-                    var status = finishedStatus.Value.Status;
-                    if (status.StatusCode != StatusCode.OK)
-                    {
-                        FireCompletion(completionDelegate, default(TResponse), new RpcException(status));
-                    }
-                    else
-                    {
-                        FireCompletion(completionDelegate, default(TResponse), null);
-                    }
-                }
-            }
-        }
-
         protected override void OnAfterReleaseResources()
         {
             details.Channel.RemoveCallReference(this);
@@ -318,18 +324,26 @@
 
         private void Initialize(CompletionQueueSafeHandle cq)
         {
-            var parentCall = details.Options.PropagationToken != null ? details.Options.PropagationToken.ParentCall : CallSafeHandle.NullInstance;
-
-            var call = details.Channel.Handle.CreateCall(details.Channel.Environment.CompletionRegistry,
-                parentCall, ContextPropagationToken.DefaultMask, cq,
-                details.Method, details.Host, Timespec.FromDateTime(details.Options.Deadline.Value));
-
+            var call = CreateNativeCall(cq);
             details.Channel.AddCallReference(this);
-
             InitializeInternal(call);
             RegisterCancellationCallback();
         }
 
+        private INativeCall CreateNativeCall(CompletionQueueSafeHandle cq)
+        {
+            if (injectedNativeCall != null)
+            {
+                return injectedNativeCall;  // allows injecting a mock INativeCall in tests.
+            }
+
+            var parentCall = details.Options.PropagationToken != null ? details.Options.PropagationToken.ParentCall : CallSafeHandle.NullInstance;
+
+            return details.Channel.Handle.CreateCall(environment.CompletionRegistry,
+                parentCall, ContextPropagationToken.DefaultMask, cq,
+                details.Method, details.Host, Timespec.FromDateTime(details.Options.Deadline.Value));
+        }
+
         // Make sure that once cancellationToken for this call is cancelled, Cancel() will be called.
         private void RegisterCancellationCallback()
         {
@@ -350,31 +364,31 @@
         }
 
         /// <summary>
+        /// Handles receive status completion for calls with streaming response.
+        /// </summary>
+        private void HandleReceivedResponseHeaders(bool success, Metadata responseHeaders)
+        {
+            responseHeadersTcs.SetResult(responseHeaders);
+        }
+
+        /// <summary>
         /// Handler for unary response completion.
         /// </summary>
-        private void HandleUnaryResponse(bool success, BatchContextSafeHandle ctx)
+        private void HandleUnaryResponse(bool success, ClientSideStatus receivedStatus, byte[] receivedMessage, Metadata responseHeaders)
         {
-            var fullStatus = ctx.GetReceivedStatusOnClient();
-
             lock (myLock)
             {
                 finished = true;
-                finishedStatus = fullStatus;
-
-                halfclosed = true;
+                finishedStatus = receivedStatus;
 
                 ReleaseResourcesIfPossible();
             }
 
-            if (!success)
-            {
-                unaryResponseTcs.SetException(new RpcException(new Status(StatusCode.Internal, "Internal error occured.")));
-                return;
-            }
+            responseHeadersTcs.SetResult(responseHeaders);
 
-            var status = fullStatus.Status;
+            var status = receivedStatus.Status;
 
-            if (status.StatusCode != StatusCode.OK)
+            if (!success || status.StatusCode != StatusCode.OK)
             {
                 unaryResponseTcs.SetException(new RpcException(status));
                 return;
@@ -382,7 +396,7 @@
 
             // TODO: handle deserialization error
             TResponse msg;
-            TryDeserialize(ctx.GetReceivedMessage(), out msg);
+            TryDeserialize(receivedMessage, out msg);
 
             unaryResponseTcs.SetResult(msg);
         }
@@ -390,22 +404,25 @@
         /// <summary>
         /// Handles receive status completion for calls with streaming response.
         /// </summary>
-        private void HandleFinished(bool success, BatchContextSafeHandle ctx)
+        private void HandleFinished(bool success, ClientSideStatus receivedStatus)
         {
-            var fullStatus = ctx.GetReceivedStatusOnClient();
-
-            AsyncCompletionDelegate<TResponse> origReadCompletionDelegate = null;
             lock (myLock)
             {
                 finished = true;
-                finishedStatus = fullStatus;
-
-                origReadCompletionDelegate = readCompletionDelegate;
+                finishedStatus = receivedStatus;
 
                 ReleaseResourcesIfPossible();
             }
 
-            ProcessLastRead(origReadCompletionDelegate);
+            var status = receivedStatus.Status;
+
+            if (!success || status.StatusCode != StatusCode.OK)
+            {
+                streamingCallFinishedTcs.SetException(new RpcException(status));
+                return;
+            }
+
+            streamingCallFinishedTcs.SetResult(null);
         }
     }
 }
\ No newline at end of file
diff --git a/src/csharp/Grpc.Core/Internal/AsyncCallBase.cs b/src/csharp/Grpc.Core/Internal/AsyncCallBase.cs
index 1808294..4d20394 100644
--- a/src/csharp/Grpc.Core/Internal/AsyncCallBase.cs
+++ b/src/csharp/Grpc.Core/Internal/AsyncCallBase.cs
@@ -54,30 +54,30 @@
         readonly Func<TWrite, byte[]> serializer;
         readonly Func<byte[], TRead> deserializer;
 
+        protected readonly GrpcEnvironment environment;
         protected readonly object myLock = new object();
 
-        protected CallSafeHandle call;
+        protected INativeCall call;
         protected bool disposed;
 
         protected bool started;
-        protected bool errorOccured;
         protected bool cancelRequested;
 
         protected AsyncCompletionDelegate<object> sendCompletionDelegate;  // Completion of a pending send or sendclose if not null.
         protected AsyncCompletionDelegate<TRead> readCompletionDelegate;  // Completion of a pending send or sendclose if not null.
 
-        protected bool readingDone;
-        protected bool halfcloseRequested;
-        protected bool halfclosed;
+        protected bool readingDone;  // True if last read (i.e. read with null payload) was already received.
+        protected bool halfcloseRequested;  // True if send close have been initiated.
         protected bool finished;  // True if close has been received from the peer.
 
         protected bool initialMetadataSent;
-        protected long streamingWritesCounter;
+        protected long streamingWritesCounter;  // Number of streaming send operations started so far.
 
-        public AsyncCallBase(Func<TWrite, byte[]> serializer, Func<byte[], TRead> deserializer)
+        public AsyncCallBase(Func<TWrite, byte[]> serializer, Func<byte[], TRead> deserializer, GrpcEnvironment environment)
         {
             this.serializer = Preconditions.CheckNotNull(serializer);
             this.deserializer = Preconditions.CheckNotNull(deserializer);
+            this.environment = Preconditions.CheckNotNull(environment);
         }
 
         /// <summary>
@@ -114,7 +114,7 @@
             }
         }
 
-        protected void InitializeInternal(CallSafeHandle call)
+        protected void InitializeInternal(INativeCall call)
         {
             lock (myLock)
             {
@@ -159,16 +159,6 @@
             }
         }
 
-        // TODO(jtattermusch): find more fitting name for this method.
-        /// <summary>
-        /// Default behavior just completes the read observer, but more sofisticated behavior might be required
-        /// by subclasses.
-        /// </summary>
-        protected virtual void ProcessLastRead(AsyncCompletionDelegate<TRead> completionDelegate)
-        {
-            FireCompletion(completionDelegate, default(TRead), null);
-        }
-
         /// <summary>
         /// If there are no more pending actions and no new actions can be started, releases
         /// the underlying native resources.
@@ -177,7 +167,7 @@
         {
             if (!disposed && call != null)
             {
-                bool noMoreSendCompletions = halfclosed || (cancelRequested && sendCompletionDelegate == null);
+                bool noMoreSendCompletions = sendCompletionDelegate == null && (halfcloseRequested || cancelRequested || finished);
                 if (noMoreSendCompletions && readingDone && finished)
                 {
                     ReleaseResources();
@@ -204,11 +194,11 @@
         protected void CheckSendingAllowed()
         {
             Preconditions.CheckState(started);
-            Preconditions.CheckState(!errorOccured);
             CheckNotCancelled();
             Preconditions.CheckState(!disposed);
 
             Preconditions.CheckState(!halfcloseRequested, "Already halfclosed.");
+            Preconditions.CheckState(!finished, "Already finished.");
             Preconditions.CheckState(sendCompletionDelegate == null, "Only one write can be pending at a time");
         }
 
@@ -216,7 +206,6 @@
         {
             Preconditions.CheckState(started);
             Preconditions.CheckState(!disposed);
-            Preconditions.CheckState(!errorOccured);
 
             Preconditions.CheckState(!readingDone, "Stream has already been closed.");
             Preconditions.CheckState(readCompletionDelegate == null, "Only one read can be pending at a time");
@@ -280,7 +269,7 @@
         /// <summary>
         /// Handles send completion.
         /// </summary>
-        protected void HandleSendFinished(bool success, BatchContextSafeHandle ctx)
+        protected void HandleSendFinished(bool success)
         {
             AsyncCompletionDelegate<object> origCompletionDelegate = null;
             lock (myLock)
@@ -304,12 +293,11 @@
         /// <summary>
         /// Handles halfclose completion.
         /// </summary>
-        protected void HandleHalfclosed(bool success, BatchContextSafeHandle ctx)
+        protected void HandleHalfclosed(bool success)
         {
             AsyncCompletionDelegate<object> origCompletionDelegate = null;
             lock (myLock)
             {
-                halfclosed = true;
                 origCompletionDelegate = sendCompletionDelegate;
                 sendCompletionDelegate = null;
 
@@ -329,23 +317,17 @@
         /// <summary>
         /// Handles streaming read completion.
         /// </summary>
-        protected void HandleReadFinished(bool success, BatchContextSafeHandle ctx)
+        protected void HandleReadFinished(bool success, byte[] receivedMessage)
         {
-            var payload = ctx.GetReceivedMessage();
-
             AsyncCompletionDelegate<TRead> origCompletionDelegate = null;
             lock (myLock)
             {
                 origCompletionDelegate = readCompletionDelegate;
-                if (payload != null)
+                readCompletionDelegate = null;
+
+                if (receivedMessage == null)
                 {
-                    readCompletionDelegate = null;
-                }
-                else
-                {
-                    // This was the last read. Keeping the readCompletionDelegate
-                    // to be either fired by this handler or by client-side finished
-                    // handler.
+                    // This was the last read.
                     readingDone = true;
                 }
 
@@ -354,17 +336,17 @@
 
             // TODO: handle the case when error occured...
 
-            if (payload != null)
+            if (receivedMessage != null)
             {
                 // TODO: handle deserialization error
                 TRead msg;
-                TryDeserialize(payload, out msg);
+                TryDeserialize(receivedMessage, out msg);
 
                 FireCompletion(origCompletionDelegate, msg, null);
             }
             else
             {
-                ProcessLastRead(origCompletionDelegate);
+                FireCompletion(origCompletionDelegate, default(TRead), null);
             }
         }
     }
diff --git a/src/csharp/Grpc.Core/Internal/AsyncCallServer.cs b/src/csharp/Grpc.Core/Internal/AsyncCallServer.cs
index 6278c01..5c47251 100644
--- a/src/csharp/Grpc.Core/Internal/AsyncCallServer.cs
+++ b/src/csharp/Grpc.Core/Internal/AsyncCallServer.cs
@@ -49,12 +49,10 @@
     {
         readonly TaskCompletionSource<object> finishedServersideTcs = new TaskCompletionSource<object>();
         readonly CancellationTokenSource cancellationTokenSource = new CancellationTokenSource();
-        readonly GrpcEnvironment environment;
         readonly Server server;
 
-        public AsyncCallServer(Func<TResponse, byte[]> serializer, Func<byte[], TRequest> deserializer, GrpcEnvironment environment, Server server) : base(serializer, deserializer)
+        public AsyncCallServer(Func<TResponse, byte[]> serializer, Func<byte[], TRequest> deserializer, GrpcEnvironment environment, Server server) : base(serializer, deserializer, environment)
         {
-            this.environment = Preconditions.CheckNotNull(environment);
             this.server = Preconditions.CheckNotNull(server);
         }
 
@@ -185,10 +183,8 @@
         /// <summary>
         /// Handles the server side close completion.
         /// </summary>
-        private void HandleFinishedServerside(bool success, BatchContextSafeHandle ctx)
+        private void HandleFinishedServerside(bool success, bool cancelled)
         {
-            bool cancelled = ctx.GetReceivedCloseOnServerCancelled();
-
             lock (myLock)
             {
                 finished = true;
diff --git a/src/csharp/Grpc.Core/Internal/CallSafeHandle.cs b/src/csharp/Grpc.Core/Internal/CallSafeHandle.cs
index 3cb01e2..0f18752 100644
--- a/src/csharp/Grpc.Core/Internal/CallSafeHandle.cs
+++ b/src/csharp/Grpc.Core/Internal/CallSafeHandle.cs
@@ -40,7 +40,7 @@
     /// <summary>
     /// grpc_call from <grpc/grpc.h>
     /// </summary>
-    internal class CallSafeHandle : SafeHandleZeroIsInvalid
+    internal class CallSafeHandle : SafeHandleZeroIsInvalid, INativeCall
     {
         public static readonly CallSafeHandle NullInstance = new CallSafeHandle();
 
@@ -87,6 +87,10 @@
             BatchContextSafeHandle ctx);
 
         [DllImport("grpc_csharp_ext.dll")]
+        static extern GRPCCallError grpcsharp_call_recv_initial_metadata(CallSafeHandle call,
+            BatchContextSafeHandle ctx);
+
+        [DllImport("grpc_csharp_ext.dll")]
         static extern GRPCCallError grpcsharp_call_start_serverside(CallSafeHandle call,
             BatchContextSafeHandle ctx);
 
@@ -109,10 +113,10 @@
             this.completionRegistry = completionRegistry;
         }
 
-        public void StartUnary(BatchCompletionDelegate callback, byte[] payload, MetadataArraySafeHandle metadataArray, WriteFlags writeFlags)
+        public void StartUnary(UnaryResponseClientHandler callback, byte[] payload, MetadataArraySafeHandle metadataArray, WriteFlags writeFlags)
         {
             var ctx = BatchContextSafeHandle.Create();
-            completionRegistry.RegisterBatchCompletion(ctx, callback);
+            completionRegistry.RegisterBatchCompletion(ctx, (success, context) => callback(success, context.GetReceivedStatusOnClient(), context.GetReceivedMessage(), context.GetReceivedInitialMetadata()));
             grpcsharp_call_start_unary(this, ctx, payload, new UIntPtr((ulong)payload.Length), metadataArray, writeFlags)
                 .CheckOk();
         }
@@ -123,66 +127,73 @@
                 .CheckOk();
         }
 
-        public void StartClientStreaming(BatchCompletionDelegate callback, MetadataArraySafeHandle metadataArray)
+        public void StartClientStreaming(UnaryResponseClientHandler callback, MetadataArraySafeHandle metadataArray)
         {
             var ctx = BatchContextSafeHandle.Create();
-            completionRegistry.RegisterBatchCompletion(ctx, callback);
+            completionRegistry.RegisterBatchCompletion(ctx, (success, context) => callback(success, context.GetReceivedStatusOnClient(), context.GetReceivedMessage(), context.GetReceivedInitialMetadata()));
             grpcsharp_call_start_client_streaming(this, ctx, metadataArray).CheckOk();
         }
 
-        public void StartServerStreaming(BatchCompletionDelegate callback, byte[] payload, MetadataArraySafeHandle metadataArray, WriteFlags writeFlags)
+        public void StartServerStreaming(ReceivedStatusOnClientHandler callback, byte[] payload, MetadataArraySafeHandle metadataArray, WriteFlags writeFlags)
         {
             var ctx = BatchContextSafeHandle.Create();
-            completionRegistry.RegisterBatchCompletion(ctx, callback);
+            completionRegistry.RegisterBatchCompletion(ctx, (success, context) => callback(success, context.GetReceivedStatusOnClient()));
             grpcsharp_call_start_server_streaming(this, ctx, payload, new UIntPtr((ulong)payload.Length), metadataArray, writeFlags).CheckOk();
         }
 
-        public void StartDuplexStreaming(BatchCompletionDelegate callback, MetadataArraySafeHandle metadataArray)
+        public void StartDuplexStreaming(ReceivedStatusOnClientHandler callback, MetadataArraySafeHandle metadataArray)
         {
             var ctx = BatchContextSafeHandle.Create();
-            completionRegistry.RegisterBatchCompletion(ctx, callback);
+            completionRegistry.RegisterBatchCompletion(ctx, (success, context) => callback(success, context.GetReceivedStatusOnClient()));
             grpcsharp_call_start_duplex_streaming(this, ctx, metadataArray).CheckOk();
         }
 
-        public void StartSendMessage(BatchCompletionDelegate callback, byte[] payload, WriteFlags writeFlags, bool sendEmptyInitialMetadata)
+        public void StartSendMessage(SendCompletionHandler callback, byte[] payload, WriteFlags writeFlags, bool sendEmptyInitialMetadata)
         {
             var ctx = BatchContextSafeHandle.Create();
-            completionRegistry.RegisterBatchCompletion(ctx, callback);
+            completionRegistry.RegisterBatchCompletion(ctx, (success, context) => callback(success));
             grpcsharp_call_send_message(this, ctx, payload, new UIntPtr((ulong)payload.Length), writeFlags, sendEmptyInitialMetadata).CheckOk();
         }
 
-        public void StartSendCloseFromClient(BatchCompletionDelegate callback)
+        public void StartSendCloseFromClient(SendCompletionHandler callback)
         {
             var ctx = BatchContextSafeHandle.Create();
-            completionRegistry.RegisterBatchCompletion(ctx, callback);
+            completionRegistry.RegisterBatchCompletion(ctx, (success, context) => callback(success));
             grpcsharp_call_send_close_from_client(this, ctx).CheckOk();
         }
 
-        public void StartSendStatusFromServer(BatchCompletionDelegate callback, Status status, MetadataArraySafeHandle metadataArray, bool sendEmptyInitialMetadata)
+        public void StartSendStatusFromServer(SendCompletionHandler callback, Status status, MetadataArraySafeHandle metadataArray, bool sendEmptyInitialMetadata)
         {
             var ctx = BatchContextSafeHandle.Create();
-            completionRegistry.RegisterBatchCompletion(ctx, callback);
+            completionRegistry.RegisterBatchCompletion(ctx, (success, context) => callback(success));
             grpcsharp_call_send_status_from_server(this, ctx, status.StatusCode, status.Detail, metadataArray, sendEmptyInitialMetadata).CheckOk();
         }
 
-        public void StartReceiveMessage(BatchCompletionDelegate callback)
+        public void StartReceiveMessage(ReceivedMessageHandler callback)
         {
             var ctx = BatchContextSafeHandle.Create();
-            completionRegistry.RegisterBatchCompletion(ctx, callback);
+            completionRegistry.RegisterBatchCompletion(ctx, (success, context) => callback(success, context.GetReceivedMessage()));
             grpcsharp_call_recv_message(this, ctx).CheckOk();
         }
 
-        public void StartServerSide(BatchCompletionDelegate callback)
+        public void StartReceiveInitialMetadata(ReceivedResponseHeadersHandler callback)
         {
             var ctx = BatchContextSafeHandle.Create();
-            completionRegistry.RegisterBatchCompletion(ctx, callback);
+            completionRegistry.RegisterBatchCompletion(ctx, (success, context) => callback(success, context.GetReceivedInitialMetadata()));
+            grpcsharp_call_recv_initial_metadata(this, ctx).CheckOk();
+        }
+
+        public void StartServerSide(ReceivedCloseOnServerHandler callback)
+        {
+            var ctx = BatchContextSafeHandle.Create();
+            completionRegistry.RegisterBatchCompletion(ctx, (success, context) => callback(success, context.GetReceivedCloseOnServerCancelled()));
             grpcsharp_call_start_serverside(this, ctx).CheckOk();
         }
 
-        public void StartSendInitialMetadata(BatchCompletionDelegate callback, MetadataArraySafeHandle metadataArray)
+        public void StartSendInitialMetadata(SendCompletionHandler callback, MetadataArraySafeHandle metadataArray)
         {
             var ctx = BatchContextSafeHandle.Create();
-            completionRegistry.RegisterBatchCompletion(ctx, callback);
+            completionRegistry.RegisterBatchCompletion(ctx, (success, context) => callback(success));
             grpcsharp_call_send_initial_metadata(this, ctx, metadataArray).CheckOk();
         }
 
diff --git a/src/csharp/Grpc.Core/Internal/ClientResponseStream.cs b/src/csharp/Grpc.Core/Internal/ClientResponseStream.cs
index 6c44521..b4a7335 100644
--- a/src/csharp/Grpc.Core/Internal/ClientResponseStream.cs
+++ b/src/csharp/Grpc.Core/Internal/ClientResponseStream.cs
@@ -72,7 +72,13 @@
             call.StartReadMessage(taskSource.CompletionDelegate);
             var result = await taskSource.Task;
             this.current = result;
-            return result != null;
+
+            if (result == null)
+            {
+                await call.StreamingCallFinishedTask;
+                return false;
+            }
+            return true;
         }
 
         public void Dispose()
diff --git a/src/csharp/Grpc.Core/Internal/INativeCall.cs b/src/csharp/Grpc.Core/Internal/INativeCall.cs
new file mode 100644
index 0000000..cbef599
--- /dev/null
+++ b/src/csharp/Grpc.Core/Internal/INativeCall.cs
@@ -0,0 +1,85 @@
+#region Copyright notice and license
+// Copyright 2015, Google Inc.
+// All rights reserved.
+// 
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+// 
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+// 
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#endregion
+
+using System;
+
+namespace Grpc.Core.Internal
+{
+    internal delegate void UnaryResponseClientHandler(bool success, ClientSideStatus receivedStatus, byte[] receivedMessage, Metadata responseHeaders);
+
+    // Received status for streaming response calls.
+    internal delegate void ReceivedStatusOnClientHandler(bool success, ClientSideStatus receivedStatus);
+
+    internal delegate void ReceivedMessageHandler(bool success, byte[] receivedMessage);
+
+    internal delegate void ReceivedResponseHeadersHandler(bool success, Metadata responseHeaders);
+
+    internal delegate void SendCompletionHandler(bool success);
+
+    internal delegate void ReceivedCloseOnServerHandler(bool success, bool cancelled);
+
+    /// <summary>
+    /// Abstraction of a native call object.
+    /// </summary>
+    internal interface INativeCall : IDisposable
+    {
+        void Cancel();
+
+        void CancelWithStatus(Grpc.Core.Status status);
+
+        string GetPeer();
+
+        void StartUnary(UnaryResponseClientHandler callback, byte[] payload, MetadataArraySafeHandle metadataArray, Grpc.Core.WriteFlags writeFlags);
+
+        void StartUnary(BatchContextSafeHandle ctx, byte[] payload, MetadataArraySafeHandle metadataArray, Grpc.Core.WriteFlags writeFlags);
+
+        void StartClientStreaming(UnaryResponseClientHandler callback, MetadataArraySafeHandle metadataArray);
+
+        void StartServerStreaming(ReceivedStatusOnClientHandler callback, byte[] payload, MetadataArraySafeHandle metadataArray, Grpc.Core.WriteFlags writeFlags);
+
+        void StartDuplexStreaming(ReceivedStatusOnClientHandler callback, MetadataArraySafeHandle metadataArray);
+
+        void StartReceiveMessage(ReceivedMessageHandler callback);
+
+        void StartReceiveInitialMetadata(ReceivedResponseHeadersHandler callback);
+
+        void StartSendInitialMetadata(SendCompletionHandler callback, MetadataArraySafeHandle metadataArray);
+
+        void StartSendMessage(SendCompletionHandler callback, byte[] payload, Grpc.Core.WriteFlags writeFlags, bool sendEmptyInitialMetadata);
+
+        void StartSendCloseFromClient(SendCompletionHandler callback);
+
+        void StartSendStatusFromServer(SendCompletionHandler callback, Grpc.Core.Status status, MetadataArraySafeHandle metadataArray, bool sendEmptyInitialMetadata);
+
+        void StartServerSide(ReceivedCloseOnServerHandler callback);
+    }
+}
diff --git a/src/csharp/ext/grpc_csharp_ext.c b/src/csharp/ext/grpc_csharp_ext.c
index fc9470f..489e219 100644
--- a/src/csharp/ext/grpc_csharp_ext.c
+++ b/src/csharp/ext/grpc_csharp_ext.c
@@ -595,7 +595,7 @@
     grpc_call *call, grpcsharp_batch_context *ctx, const char *send_buffer,
     size_t send_buffer_len, grpc_metadata_array *initial_metadata, gpr_uint32 write_flags) {
   /* TODO: don't use magic number */
-  grpc_op ops[5];
+  grpc_op ops[4];
   ops[0].op = GRPC_OP_SEND_INITIAL_METADATA;
   grpcsharp_metadata_array_move(&(ctx->send_initial_metadata),
                                 initial_metadata);
@@ -615,23 +615,18 @@
   ops[2].flags = 0;
   ops[2].reserved = NULL;
 
-  ops[3].op = GRPC_OP_RECV_INITIAL_METADATA;
-  ops[3].data.recv_initial_metadata = &(ctx->recv_initial_metadata);
-  ops[3].flags = 0;
-  ops[3].reserved = NULL;
-
-  ops[4].op = GRPC_OP_RECV_STATUS_ON_CLIENT;
-  ops[4].data.recv_status_on_client.trailing_metadata =
+  ops[3].op = GRPC_OP_RECV_STATUS_ON_CLIENT;
+  ops[3].data.recv_status_on_client.trailing_metadata =
       &(ctx->recv_status_on_client.trailing_metadata);
-  ops[4].data.recv_status_on_client.status =
+  ops[3].data.recv_status_on_client.status =
       &(ctx->recv_status_on_client.status);
   /* not using preallocation for status_details */
-  ops[4].data.recv_status_on_client.status_details =
+  ops[3].data.recv_status_on_client.status_details =
       &(ctx->recv_status_on_client.status_details);
-  ops[4].data.recv_status_on_client.status_details_capacity =
+  ops[3].data.recv_status_on_client.status_details_capacity =
       &(ctx->recv_status_on_client.status_details_capacity);
-  ops[4].flags = 0;
-  ops[4].reserved = NULL;
+  ops[3].flags = 0;
+  ops[3].reserved = NULL;
 
   return grpc_call_start_batch(call, ops, sizeof(ops) / sizeof(ops[0]), ctx,
                                NULL);
@@ -642,7 +637,7 @@
                                       grpcsharp_batch_context *ctx,
                                       grpc_metadata_array *initial_metadata) {
   /* TODO: don't use magic number */
-  grpc_op ops[3];
+  grpc_op ops[2];
   ops[0].op = GRPC_OP_SEND_INITIAL_METADATA;
   grpcsharp_metadata_array_move(&(ctx->send_initial_metadata),
                                 initial_metadata);
@@ -652,28 +647,36 @@
   ops[0].flags = 0;
   ops[0].reserved = NULL;
 
-  ops[1].op = GRPC_OP_RECV_INITIAL_METADATA;
-  ops[1].data.recv_initial_metadata = &(ctx->recv_initial_metadata);
+  ops[1].op = GRPC_OP_RECV_STATUS_ON_CLIENT;
+  ops[1].data.recv_status_on_client.trailing_metadata =
+      &(ctx->recv_status_on_client.trailing_metadata);
+  ops[1].data.recv_status_on_client.status =
+      &(ctx->recv_status_on_client.status);
+  /* not using preallocation for status_details */
+  ops[1].data.recv_status_on_client.status_details =
+      &(ctx->recv_status_on_client.status_details);
+  ops[1].data.recv_status_on_client.status_details_capacity =
+      &(ctx->recv_status_on_client.status_details_capacity);
   ops[1].flags = 0;
   ops[1].reserved = NULL;
 
-  ops[2].op = GRPC_OP_RECV_STATUS_ON_CLIENT;
-  ops[2].data.recv_status_on_client.trailing_metadata =
-      &(ctx->recv_status_on_client.trailing_metadata);
-  ops[2].data.recv_status_on_client.status =
-      &(ctx->recv_status_on_client.status);
-  /* not using preallocation for status_details */
-  ops[2].data.recv_status_on_client.status_details =
-      &(ctx->recv_status_on_client.status_details);
-  ops[2].data.recv_status_on_client.status_details_capacity =
-      &(ctx->recv_status_on_client.status_details_capacity);
-  ops[2].flags = 0;
-  ops[2].reserved = NULL;
-
   return grpc_call_start_batch(call, ops, sizeof(ops) / sizeof(ops[0]), ctx,
                                NULL);
 }
 
+GPR_EXPORT grpc_call_error GPR_CALLTYPE grpcsharp_call_recv_initial_metadata(
+	grpc_call *call, grpcsharp_batch_context *ctx) {
+	/* TODO: don't use magic number */
+	grpc_op ops[1];
+	ops[0].op = GRPC_OP_RECV_INITIAL_METADATA;
+	ops[0].data.recv_initial_metadata = &(ctx->recv_initial_metadata);
+	ops[0].flags = 0;
+	ops[0].reserved = NULL;
+
+	return grpc_call_start_batch(call, ops, sizeof(ops) / sizeof(ops[0]), ctx,
+		NULL);
+}
+
 GPR_EXPORT grpc_call_error GPR_CALLTYPE
 grpcsharp_call_send_message(grpc_call *call, grpcsharp_batch_context *ctx,
                             const char *send_buffer, size_t send_buffer_len,
diff --git a/src/python/grpcio/grpc/_adapter/_c/types.h b/src/python/grpcio/grpc/_adapter/_c/types.h
index f646465..f6ff957 100644
--- a/src/python/grpcio/grpc/_adapter/_c/types.h
+++ b/src/python/grpcio/grpc/_adapter/_c/types.h
@@ -146,6 +146,7 @@
   PyObject_HEAD
   grpc_server *c_serv;
   CompletionQueue *cq;
+  int shutdown_called;
 } Server;
 Server *pygrpc_Server_new(PyTypeObject *type, PyObject *args, PyObject *kwargs);
 void pygrpc_Server_dealloc(Server *self);
@@ -156,6 +157,7 @@
 PyObject *pygrpc_Server_start(Server *self, PyObject *ignored);
 PyObject *pygrpc_Server_shutdown(
     Server *self, PyObject *args, PyObject *kwargs);
+PyObject *pygrpc_Server_cancel_all_calls(Server *self, PyObject *unused);
 extern PyTypeObject pygrpc_Server_type;
 
 /*=========*/
diff --git a/src/python/grpcio/grpc/_adapter/_c/types/server.c b/src/python/grpcio/grpc/_adapter/_c/types/server.c
index 15c98f2..8feab8a 100644
--- a/src/python/grpcio/grpc/_adapter/_c/types/server.c
+++ b/src/python/grpcio/grpc/_adapter/_c/types/server.c
@@ -45,6 +45,8 @@
      METH_KEYWORDS, ""},
     {"start", (PyCFunction)pygrpc_Server_start, METH_NOARGS, ""},
     {"shutdown", (PyCFunction)pygrpc_Server_shutdown, METH_KEYWORDS, ""},
+    {"cancel_all_calls", (PyCFunction)pygrpc_Server_cancel_all_calls,
+     METH_NOARGS, ""},
     {NULL}
 };
 const char pygrpc_Server_doc[] = "See grpc._adapter._types.Server.";
@@ -109,6 +111,7 @@
   pygrpc_discard_channel_args(c_args);
   self->cq = cq;
   Py_INCREF(self->cq);
+  self->shutdown_called = 0;
   return self;
 }
 
@@ -163,6 +166,7 @@
 
 PyObject *pygrpc_Server_start(Server *self, PyObject *ignored) {
   grpc_server_start(self->c_serv);
+  self->shutdown_called = 0;
   Py_RETURN_NONE;
 }
 
@@ -176,5 +180,17 @@
   }
   tag = pygrpc_produce_server_shutdown_tag(user_tag);
   grpc_server_shutdown_and_notify(self->c_serv, self->cq->c_cq, tag);
+  self->shutdown_called = 1;
+  Py_RETURN_NONE;
+}
+
+PyObject *pygrpc_Server_cancel_all_calls(Server *self, PyObject *unused) {
+  if (!self->shutdown_called) {
+    PyErr_SetString(
+        PyExc_RuntimeError,
+        "shutdown must have been called prior to calling cancel_all_calls!");
+    return NULL;
+  }
+  grpc_server_cancel_all_calls(self->c_serv);
   Py_RETURN_NONE;
 }
diff --git a/src/python/grpcio/grpc/_adapter/_low.py b/src/python/grpcio/grpc/_adapter/_low.py
index 147086e..3859ebb 100644
--- a/src/python/grpcio/grpc/_adapter/_low.py
+++ b/src/python/grpcio/grpc/_adapter/_low.py
@@ -124,3 +124,6 @@
 
   def request_call(self, completion_queue, tag):
     return self.server.request_call(completion_queue.completion_queue, tag)
+
+  def cancel_all_calls(self):
+    return self.server.cancel_all_calls()
diff --git a/src/python/grpcio/grpc/_links/invocation.py b/src/python/grpcio/grpc/_links/invocation.py
index 0058ae9..a74c77e 100644
--- a/src/python/grpcio/grpc/_links/invocation.py
+++ b/src/python/grpcio/grpc/_links/invocation.py
@@ -101,7 +101,7 @@
     else:
       ticket = links.Ticket(
           operation_id, rpc_state.sequence_number, None, None, None, None, 1,
-          None, None, None, None, None, None)
+          None, None, None, None, None, None, None)
       rpc_state.sequence_number += 1
       self._relay.add_value(ticket)
       rpc_state.low_write = _LowWrite.OPEN
@@ -118,7 +118,7 @@
       ticket = links.Ticket(
           operation_id, rpc_state.sequence_number, None, None, None, None, None,
           None, rpc_state.response_deserializer(event.bytes), None, None, None,
-          None)
+          None, None)
       rpc_state.sequence_number += 1
       self._relay.add_value(ticket)
 
@@ -129,7 +129,7 @@
     ticket = links.Ticket(
         operation_id, rpc_state.sequence_number, None, None,
         links.Ticket.Subscription.FULL, None, None, event.metadata, None, None,
-        None, None, None)
+        None, None, None, None)
     rpc_state.sequence_number += 1
     self._relay.add_value(ticket)
 
@@ -146,7 +146,7 @@
     ticket = links.Ticket(
         operation_id, rpc_state.sequence_number, None, None, None, None, None,
         None, None, event.metadata, event.status.code, event.status.details,
-        termination)
+        termination, None)
     rpc_state.sequence_number += 1
     self._relay.add_value(ticket)
 
diff --git a/src/python/grpcio/grpc/_links/service.py b/src/python/grpcio/grpc/_links/service.py
index 5c636d6..1436176 100644
--- a/src/python/grpcio/grpc/_links/service.py
+++ b/src/python/grpcio/grpc/_links/service.py
@@ -131,7 +131,7 @@
     ticket = links.Ticket(
         call, 0, group, method, links.Ticket.Subscription.FULL,
         service_acceptance.deadline - time.time(), None, event.metadata, None,
-        None, None, None, None)
+        None, None, None, None, 'TODO: Service Context Object!')
     self._relay.add_value(ticket)
 
   def _on_read_event(self, event):
@@ -157,7 +157,7 @@
         # rpc_state.read = _Read.AWAITING_ALLOWANCE
     ticket = links.Ticket(
         call, rpc_state.sequence_number, None, None, None, None, None, None,
-        payload, None, None, None, termination)
+        payload, None, None, None, termination, None)
     rpc_state.sequence_number += 1
     self._relay.add_value(ticket)
 
@@ -176,7 +176,7 @@
     else:
       ticket = links.Ticket(
           call, rpc_state.sequence_number, None, None, None, None, 1, None,
-          None, None, None, None, None)
+          None, None, None, None, None, None)
       rpc_state.sequence_number += 1
       self._relay.add_value(ticket)
       rpc_state.low_write = _LowWrite.OPEN
@@ -198,7 +198,7 @@
       termination = links.Ticket.Termination.TRANSMISSION_FAILURE
     ticket = links.Ticket(
         call, rpc_state.sequence_number, None, None, None, None, None, None,
-        None, None, None, None, termination)
+        None, None, None, None, termination, None)
     rpc_state.sequence_number += 1
     self._relay.add_value(ticket)
 
@@ -239,7 +239,7 @@
       elif not rpc_state.premetadataed:
         if (ticket.terminal_metadata is not None or
             ticket.payload is not None or
-            ticket.termination is links.Ticket.Termination.COMPLETION or
+            ticket.termination is not None or
             ticket.code is not None or
             ticket.message is not None):
           call.premetadata()
@@ -257,11 +257,11 @@
             termination = None
           else:
             termination = links.Ticket.Termination.COMPLETION
-          ticket = links.Ticket(
+          early_read_ticket = links.Ticket(
               call, rpc_state.sequence_number, None, None, None, None, None,
-              None, payload, None, None, None, termination)
+              None, payload, None, None, None, termination, None)
           rpc_state.sequence_number += 1
-          self._relay.add_value(ticket)
+          self._relay.add_value(early_read_ticket)
 
       if ticket.payload is not None:
         call.write(rpc_state.response_serializer(ticket.payload), call)
diff --git a/src/python/grpcio/grpc/framework/core/_reception.py b/src/python/grpcio/grpc/framework/core/_reception.py
index b64faf8..0858f64 100644
--- a/src/python/grpcio/grpc/framework/core/_reception.py
+++ b/src/python/grpcio/grpc/framework/core/_reception.py
@@ -42,6 +42,7 @@
     links.Ticket.Termination.TRANSMISSION_FAILURE:
         base.Outcome.TRANSMISSION_FAILURE,
     links.Ticket.Termination.LOCAL_FAILURE: base.Outcome.REMOTE_FAILURE,
+    links.Ticket.Termination.REMOTE_FAILURE: base.Outcome.LOCAL_FAILURE,
 }
 
 
@@ -70,9 +71,10 @@
 
   def _abort(self, outcome):
     self._aborted = True
-    self._termination_manager.abort(outcome)
-    self._transmission_manager.abort(outcome)
-    self._expiration_manager.terminate()
+    if self._termination_manager.outcome is None:
+      self._termination_manager.abort(outcome)
+      self._transmission_manager.abort(None)
+      self._expiration_manager.terminate()
 
   def _sequence_failure(self, ticket):
     """Determines a just-arrived ticket's sequential legitimacy.
diff --git a/src/python/grpcio/grpc/framework/core/_transmission.py b/src/python/grpcio/grpc/framework/core/_transmission.py
index 01894d3..03644f4 100644
--- a/src/python/grpcio/grpc/framework/core/_transmission.py
+++ b/src/python/grpcio/grpc/framework/core/_transmission.py
@@ -107,7 +107,7 @@
           return links.Ticket(
               self._operation_id, self._lowest_unused_sequence_number, None,
               None, None, None, None, None, None, None, None, None,
-              termination)
+              termination, None)
 
     action = False
     # TODO(nathaniel): Support other subscriptions.
@@ -144,7 +144,7 @@
       ticket = links.Ticket(
           self._operation_id, self._lowest_unused_sequence_number, None, None,
           local_subscription, timeout, allowance, initial_metadata, payload,
-          terminal_metadata, code, message, termination)
+          terminal_metadata, code, message, termination, None)
       self._lowest_unused_sequence_number += 1
       return ticket
     else:
@@ -191,7 +191,7 @@
     ticket = links.Ticket(
         self._operation_id, 0, group, method, subscription, timeout, allowance,
         initial_metadata, payload, terminal_metadata, code, message,
-        termination)
+        termination, None)
     self._lowest_unused_sequence_number = 1
     self._transmit(ticket)
 
@@ -236,7 +236,7 @@
         ticket = links.Ticket(
             self._operation_id, self._lowest_unused_sequence_number, None, None,
             None, None, allowance, effective_initial_metadata, ticket_payload,
-            terminal_metadata, code, message, termination)
+            terminal_metadata, code, message, termination, None)
         self._lowest_unused_sequence_number += 1
         self._transmit(ticket)
 
@@ -247,7 +247,7 @@
     else:
       ticket = links.Ticket(
           self._operation_id, self._lowest_unused_sequence_number, None, None,
-          None, timeout, None, None, None, None, None, None, None)
+          None, timeout, None, None, None, None, None, None, None, None)
       self._lowest_unused_sequence_number += 1
       self._transmit(ticket)
 
@@ -268,7 +268,7 @@
       ticket = links.Ticket(
           self._operation_id, self._lowest_unused_sequence_number, None, None,
           None, None, None, None, payload, terminal_metadata, code, message,
-          termination)
+          termination, None)
       self._lowest_unused_sequence_number += 1
       self._transmit(ticket)
 
@@ -290,5 +290,5 @@
           ticket = links.Ticket(
               self._operation_id, self._lowest_unused_sequence_number, None,
               None, None, None, None, None, None, None, None, None,
-              termination)
+              termination, None)
           self._transmit(ticket)
diff --git a/src/python/grpcio/grpc/framework/interfaces/face/__init__.py b/src/python/grpcio/grpc/framework/interfaces/face/__init__.py
new file mode 100644
index 0000000..7086519
--- /dev/null
+++ b/src/python/grpcio/grpc/framework/interfaces/face/__init__.py
@@ -0,0 +1,30 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
diff --git a/src/python/grpcio/grpc/framework/interfaces/face/face.py b/src/python/grpcio/grpc/framework/interfaces/face/face.py
new file mode 100644
index 0000000..948e750
--- /dev/null
+++ b/src/python/grpcio/grpc/framework/interfaces/face/face.py
@@ -0,0 +1,933 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Interfaces defining the Face layer of RPC Framework."""
+
+import abc
+import collections
+import enum
+
+# cardinality, style, abandonment, future, and stream are
+# referenced from specification in this module.
+from grpc.framework.common import cardinality  # pylint: disable=unused-import
+from grpc.framework.common import style  # pylint: disable=unused-import
+from grpc.framework.foundation import abandonment  # pylint: disable=unused-import
+from grpc.framework.foundation import future  # pylint: disable=unused-import
+from grpc.framework.foundation import stream  # pylint: disable=unused-import
+
+
+class NoSuchMethodError(Exception):
+  """Raised by customer code to indicate an unrecognized method.
+
+  Attributes:
+    group: The group of the unrecognized method.
+    name: The name of the unrecognized method.
+  """
+
+  def __init__(self, group, method):
+    """Constructor.
+
+    Args:
+      group: The group identifier of the unrecognized RPC name.
+      method: The method identifier of the unrecognized RPC name.
+    """
+    super(NoSuchMethodError, self).__init__()
+    self.group = group
+    self.method = method
+
+  def __repr__(self):
+    return 'face.NoSuchMethodError(%s, %s)' % (self.group, self.method,)
+
+
+class Abortion(
+    collections.namedtuple(
+        'Abortion',
+        ('kind', 'initial_metadata', 'terminal_metadata', 'code', 'details',))):
+  """A value describing RPC abortion.
+
+  Attributes:
+    kind: A Kind value identifying how the RPC failed.
+    initial_metadata: The initial metadata from the other side of the RPC or
+      None if no initial metadata value was received.
+    terminal_metadata: The terminal metadata from the other side of the RPC or
+      None if no terminal metadata value was received.
+    code: The code value from the other side of the RPC or None if no code value
+      was received.
+    details: The details value from the other side of the RPC or None if no
+      details value was received.
+  """
+
+  @enum.unique
+  class Kind(enum.Enum):
+    """Types of RPC abortion."""
+
+    CANCELLED = 'cancelled'
+    EXPIRED = 'expired'
+    LOCAL_SHUTDOWN = 'local shutdown'
+    REMOTE_SHUTDOWN = 'remote shutdown'
+    NETWORK_FAILURE = 'network failure'
+    LOCAL_FAILURE = 'local failure'
+    REMOTE_FAILURE = 'remote failure'
+
+
+class AbortionError(Exception):
+  """Common super type for exceptions indicating RPC abortion.
+
+    initial_metadata: The initial metadata from the other side of the RPC or
+      None if no initial metadata value was received.
+    terminal_metadata: The terminal metadata from the other side of the RPC or
+      None if no terminal metadata value was received.
+    code: The code value from the other side of the RPC or None if no code value
+      was received.
+    details: The details value from the other side of the RPC or None if no
+      details value was received.
+  """
+  __metaclass__ = abc.ABCMeta
+
+  def __init__(self, initial_metadata, terminal_metadata, code, details):
+    super(AbortionError, self).__init__()
+    self.initial_metadata = initial_metadata
+    self.terminal_metadata = terminal_metadata
+    self.code = code
+    self.details = details
+
+
+class CancellationError(AbortionError):
+  """Indicates that an RPC has been cancelled."""
+
+
+class ExpirationError(AbortionError):
+  """Indicates that an RPC has expired ("timed out")."""
+
+
+class LocalShutdownError(AbortionError):
+  """Indicates that an RPC has terminated due to local shutdown of RPCs."""
+
+
+class RemoteShutdownError(AbortionError):
+  """Indicates that an RPC has terminated due to remote shutdown of RPCs."""
+
+
+class NetworkError(AbortionError):
+  """Indicates that some error occurred on the network."""
+
+
+class LocalError(AbortionError):
+  """Indicates that an RPC has terminated due to a local defect."""
+
+
+class RemoteError(AbortionError):
+  """Indicates that an RPC has terminated due to a remote defect."""
+
+
+class RpcContext(object):
+  """Provides RPC-related information and action."""
+  __metaclass__ = abc.ABCMeta
+
+  @abc.abstractmethod
+  def is_active(self):
+    """Describes whether the RPC is active or has terminated."""
+    raise NotImplementedError()
+
+  @abc.abstractmethod
+  def time_remaining(self):
+    """Describes the length of allowed time remaining for the RPC.
+
+    Returns:
+      A nonnegative float indicating the length of allowed time in seconds
+      remaining for the RPC to complete before it is considered to have timed
+      out.
+    """
+    raise NotImplementedError()
+
+  @abc.abstractmethod
+  def add_abortion_callback(self, abortion_callback):
+    """Registers a callback to be called if the RPC is aborted.
+
+    Args:
+      abortion_callback: A callable to be called and passed an Abortion value
+        in the event of RPC abortion.
+    """
+    raise NotImplementedError()
+
+  @abc.abstractmethod
+  def cancel(self):
+    """Cancels the RPC.
+
+    Idempotent and has no effect if the RPC has already terminated.
+    """
+    raise NotImplementedError()
+
+
+class Call(RpcContext):
+  """Invocation-side utility object for an RPC."""
+  __metaclass__ = abc.ABCMeta
+
+  @abc.abstractmethod
+  def initial_metadata(self):
+    """Accesses the initial metadata from the service-side of the RPC.
+
+    This method blocks until the value is available or is known not to have been
+    emitted from the service-side of the RPC.
+
+    Returns:
+      The initial metadata object emitted by the service-side of the RPC, or
+        None if there was no such value.
+    """
+    raise NotImplementedError()
+
+  @abc.abstractmethod
+  def terminal_metadata(self):
+    """Accesses the terminal metadata from the service-side of the RPC.
+
+    This method blocks until the value is available or is known not to have been
+    emitted from the service-side of the RPC.
+
+    Returns:
+      The terminal metadata object emitted by the service-side of the RPC, or
+        None if there was no such value.
+    """
+    raise NotImplementedError()
+
+  @abc.abstractmethod
+  def code(self):
+    """Accesses the code emitted by the service-side of the RPC.
+
+    This method blocks until the value is available or is known not to have been
+    emitted from the service-side of the RPC.
+
+    Returns:
+      The code object emitted by the service-side of the RPC, or None if there
+        was no such value.
+    """
+    raise NotImplementedError()
+
+  @abc.abstractmethod
+  def details(self):
+    """Accesses the details value emitted by the service-side of the RPC.
+
+    This method blocks until the value is available or is known not to have been
+    emitted from the service-side of the RPC.
+
+    Returns:
+      The details value emitted by the service-side of the RPC, or None if there
+        was no such value.
+    """
+    raise NotImplementedError()
+
+
+class ServicerContext(RpcContext):
+  """A context object passed to method implementations."""
+  __metaclass__ = abc.ABCMeta
+
+  @abc.abstractmethod
+  def invocation_metadata(self):
+    """Accesses the metadata from the invocation-side of the RPC.
+
+    This method blocks until the value is available or is known not to have been
+    emitted from the invocation-side of the RPC.
+
+    Returns:
+      The metadata object emitted by the invocation-side of the RPC, or None if
+        there was no such value.
+    """
+    raise NotImplementedError()
+
+  @abc.abstractmethod
+  def initial_metadata(self, initial_metadata):
+    """Accepts the service-side initial metadata value of the RPC.
+
+    This method need not be called by method implementations if they have no
+    service-side initial metadata to transmit.
+
+    Args:
+      initial_metadata: The service-side initial metadata value of the RPC to
+        be transmitted to the invocation side of the RPC.
+    """
+    raise NotImplementedError()
+
+  @abc.abstractmethod
+  def terminal_metadata(self, terminal_metadata):
+    """Accepts the service-side terminal metadata value of the RPC.
+
+    This method need not be called by method implementations if they have no
+    service-side terminal metadata to transmit.
+
+    Args:
+      terminal_metadata: The service-side terminal metadata value of the RPC to
+        be transmitted to the invocation side of the RPC.
+    """
+    raise NotImplementedError()
+
+  @abc.abstractmethod
+  def code(self, code):
+    """Accepts the service-side code of the RPC.
+
+    This method need not be called by method implementations if they have no
+    code to transmit.
+
+    Args:
+      code: The code of the RPC to be transmitted to the invocation side of the
+        RPC.
+    """
+    raise NotImplementedError()
+
+  @abc.abstractmethod
+  def details(self, details):
+    """Accepts the service-side details of the RPC.
+
+    This method need not be called by method implementations if they have no
+    service-side details to transmit.
+
+    Args:
+      details: The service-side details value of the RPC to be transmitted to
+        the invocation side of the RPC.
+    """
+    raise NotImplementedError()
+
+
+class ResponseReceiver(object):
+  """Invocation-side object used to accept the output of an RPC."""
+  __metaclass__ = abc.ABCMeta
+
+  @abc.abstractmethod
+  def initial_metadata(self, initial_metadata):
+    """Receives the initial metadata from the service-side of the RPC.
+
+    Args:
+      initial_metadata: The initial metadata object emitted from the
+        service-side of the RPC.
+    """
+    raise NotImplementedError()
+
+  @abc.abstractmethod
+  def response(self, response):
+    """Receives a response from the service-side of the RPC.
+
+    Args:
+      response: A response object emitted from the service-side of the RPC.
+    """
+    raise NotImplementedError()
+
+  @abc.abstractmethod
+  def complete(self, terminal_metadata, code, details):
+    """Receives the completion values emitted from the service-side of the RPC.
+
+    Args:
+      terminal_metadata: The terminal metadata object emitted from the
+        service-side of the RPC.
+      code: The code object emitted from the service-side of the RPC.
+      details: The details object emitted from the service-side of the RPC.
+    """
+    raise NotImplementedError()
+
+
+class UnaryUnaryMultiCallable(object):
+  """Affords invoking a unary-unary RPC in any call style."""
+  __metaclass__ = abc.ABCMeta
+
+  @abc.abstractmethod
+  def __call__(
+      self, request, timeout, metadata=None, with_call=False):
+    """Synchronously invokes the underlying RPC.
+
+    Args:
+      request: The request value for the RPC.
+      timeout: A duration of time in seconds to allow for the RPC.
+      metadata: A metadata value to be passed to the service-side of
+        the RPC.
+      with_call: Whether or not to include return a Call for the RPC in addition
+        to the reponse.
+
+    Returns:
+      The response value for the RPC, and a Call for the RPC if with_call was
+        set to True at invocation.
+
+    Raises:
+      AbortionError: Indicating that the RPC was aborted.
+    """
+    raise NotImplementedError()
+
+  @abc.abstractmethod
+  def future(self, request, timeout, metadata=None):
+    """Asynchronously invokes the underlying RPC.
+
+    Args:
+      request: The request value for the RPC.
+      timeout: A duration of time in seconds to allow for the RPC.
+      metadata: A metadata value to be passed to the service-side of
+        the RPC.
+
+    Returns:
+      An object that is both a Call for the RPC and a future.Future. In the
+        event of RPC completion, the return Future's result value will be the
+        response value of the RPC. In the event of RPC abortion, the returned
+        Future's exception value will be an AbortionError.
+    """
+    raise NotImplementedError()
+
+  @abc.abstractmethod
+  def event(
+      self, request, receiver, abortion_callback, timeout,
+      metadata=None):
+    """Asynchronously invokes the underlying RPC.
+
+    Args:
+      request: The request value for the RPC.
+      receiver: A ResponseReceiver to be passed the response data of the RPC.
+      abortion_callback: A callback to be called and passed an Abortion value
+        in the event of RPC abortion.
+      timeout: A duration of time in seconds to allow for the RPC.
+      metadata: A metadata value to be passed to the service-side of
+        the RPC.
+
+    Returns:
+      A Call for the RPC.
+    """
+    raise NotImplementedError()
+
+
+class UnaryStreamMultiCallable(object):
+  """Affords invoking a unary-stream RPC in any call style."""
+  __metaclass__ = abc.ABCMeta
+
+  @abc.abstractmethod
+  def __call__(self, request, timeout, metadata=None):
+    """Invokes the underlying RPC.
+
+    Args:
+      request: The request value for the RPC.
+      timeout: A duration of time in seconds to allow for the RPC.
+      metadata: A metadata value to be passed to the service-side of
+        the RPC.
+
+    Returns:
+      An object that is both a Call for the RPC and an iterator of response
+        values. Drawing response values from the returned iterator may raise
+        AbortionError indicating abortion of the RPC.
+    """
+    raise NotImplementedError()
+
+  @abc.abstractmethod
+  def event(
+      self, request, receiver, abortion_callback, timeout,
+      metadata=None):
+    """Asynchronously invokes the underlying RPC.
+
+    Args:
+      request: The request value for the RPC.
+      receiver: A ResponseReceiver to be passed the response data of the RPC.
+      abortion_callback: A callback to be called and passed an Abortion value
+        in the event of RPC abortion.
+      timeout: A duration of time in seconds to allow for the RPC.
+      metadata: A metadata value to be passed to the service-side of
+        the RPC.
+
+    Returns:
+      A Call object for the RPC.
+    """
+    raise NotImplementedError()
+
+
+class StreamUnaryMultiCallable(object):
+  """Affords invoking a stream-unary RPC in any call style."""
+  __metaclass__ = abc.ABCMeta
+
+  @abc.abstractmethod
+  def __call__(
+      self, request_iterator, timeout, metadata=None,
+      with_call=False):
+    """Synchronously invokes the underlying RPC.
+
+    Args:
+      request_iterator: An iterator that yields request values for the RPC.
+      timeout: A duration of time in seconds to allow for the RPC.
+      metadata: A metadata value to be passed to the service-side of
+        the RPC.
+      with_call: Whether or not to include return a Call for the RPC in addition
+        to the reponse.
+
+    Returns:
+      The response value for the RPC, and a Call for the RPC if with_call was
+        set to True at invocation.
+
+    Raises:
+      AbortionError: Indicating that the RPC was aborted.
+    """
+    raise NotImplementedError()
+
+  @abc.abstractmethod
+  def future(self, request_iterator, timeout, metadata=None):
+    """Asynchronously invokes the underlying RPC.
+
+    Args:
+      request_iterator: An iterator that yields request values for the RPC.
+      timeout: A duration of time in seconds to allow for the RPC.
+      metadata: A metadata value to be passed to the service-side of
+        the RPC.
+
+    Returns:
+      An object that is both a Call for the RPC and a future.Future. In the
+        event of RPC completion, the return Future's result value will be the
+        response value of the RPC. In the event of RPC abortion, the returned
+        Future's exception value will be an AbortionError.
+    """
+    raise NotImplementedError()
+
+  @abc.abstractmethod
+  def event(
+      self, receiver, abortion_callback, timeout, metadata=None):
+    """Asynchronously invokes the underlying RPC.
+
+    Args:
+      receiver: A ResponseReceiver to be passed the response data of the RPC.
+      abortion_callback: A callback to be called and passed an Abortion value
+        in the event of RPC abortion.
+      timeout: A duration of time in seconds to allow for the RPC.
+      metadata: A metadata value to be passed to the service-side of
+        the RPC.
+
+    Returns:
+      A single object that is both a Call object for the RPC and a
+        stream.Consumer to which the request values of the RPC should be passed.
+    """
+    raise NotImplementedError()
+
+
+class StreamStreamMultiCallable(object):
+  """Affords invoking a stream-stream RPC in any call style."""
+  __metaclass__ = abc.ABCMeta
+
+  @abc.abstractmethod
+  def __call__(self, request_iterator, timeout, metadata=None):
+    """Invokes the underlying RPC.
+
+    Args:
+      request_iterator: An iterator that yields request values for the RPC.
+      timeout: A duration of time in seconds to allow for the RPC.
+      metadata: A metadata value to be passed to the service-side of
+        the RPC.
+
+    Returns:
+      An object that is both a Call for the RPC and an iterator of response
+        values. Drawing response values from the returned iterator may raise
+        AbortionError indicating abortion of the RPC.
+    """
+    raise NotImplementedError()
+
+  @abc.abstractmethod
+  def event(
+      self, receiver, abortion_callback, timeout, metadata=None):
+    """Asynchronously invokes the underlying RPC.
+
+    Args:
+      receiver: A ResponseReceiver to be passed the response data of the RPC.
+      abortion_callback: A callback to be called and passed an Abortion value
+        in the event of RPC abortion.
+      timeout: A duration of time in seconds to allow for the RPC.
+      metadata: A metadata value to be passed to the service-side of
+        the RPC.
+
+    Returns:
+      A single object that is both a Call object for the RPC and a
+        stream.Consumer to which the request values of the RPC should be passed.
+    """
+    raise NotImplementedError()
+
+
+class MethodImplementation(object):
+  """A sum type that describes a method implementation.
+
+  Attributes:
+    cardinality: A cardinality.Cardinality value.
+    style: A style.Service value.
+    unary_unary_inline: The implementation of the method as a callable value
+      that takes a request value and a ServicerContext object and returns a
+      response value. Only non-None if cardinality is
+      cardinality.Cardinality.UNARY_UNARY and style is style.Service.INLINE.
+    unary_stream_inline: The implementation of the method as a callable value
+      that takes a request value and a ServicerContext object and returns an
+      iterator of response values. Only non-None if cardinality is
+      cardinality.Cardinality.UNARY_STREAM and style is style.Service.INLINE.
+    stream_unary_inline: The implementation of the method as a callable value
+      that takes an iterator of request values and a ServicerContext object and
+      returns a response value. Only non-None if cardinality is
+      cardinality.Cardinality.STREAM_UNARY and style is style.Service.INLINE.
+    stream_stream_inline: The implementation of the method as a callable value
+      that takes an iterator of request values and a ServicerContext object and
+      returns an iterator of response values. Only non-None if cardinality is
+      cardinality.Cardinality.STREAM_STREAM and style is style.Service.INLINE.
+    unary_unary_event: The implementation of the method as a callable value that
+      takes a request value, a response callback to which to pass the response
+      value of the RPC, and a ServicerContext. Only non-None if cardinality is
+      cardinality.Cardinality.UNARY_UNARY and style is style.Service.EVENT.
+    unary_stream_event: The implementation of the method as a callable value
+      that takes a request value, a stream.Consumer to which to pass the
+      response values of the RPC, and a ServicerContext. Only non-None if
+      cardinality is cardinality.Cardinality.UNARY_STREAM and style is
+      style.Service.EVENT.
+    stream_unary_event: The implementation of the method as a callable value
+      that takes a response callback to which to pass the response value of the
+      RPC and a ServicerContext and returns a stream.Consumer to which the
+      request values of the RPC should be passed. Only non-None if cardinality
+      is cardinality.Cardinality.STREAM_UNARY and style is style.Service.EVENT.
+    stream_stream_event: The implementation of the method as a callable value
+      that takes a stream.Consumer to which to pass the response values of the
+      RPC and a ServicerContext and returns a stream.Consumer to which the
+      request values of the RPC should be passed. Only non-None if cardinality
+      is cardinality.Cardinality.STREAM_STREAM and style is
+      style.Service.EVENT.
+  """
+  __metaclass__ = abc.ABCMeta
+
+
+class MultiMethodImplementation(object):
+  """A general type able to service many methods."""
+  __metaclass__ = abc.ABCMeta
+
+  @abc.abstractmethod
+  def service(self, group, method, response_consumer, context):
+    """Services an RPC.
+
+    Args:
+      group: The group identifier of the RPC.
+      method: The method identifier of the RPC.
+      response_consumer: A stream.Consumer to be called to accept the response
+        values of the RPC.
+      context: a ServicerContext object.
+
+    Returns:
+      A stream.Consumer with which to accept the request values of the RPC. The
+        consumer returned from this method may or may not be invoked to
+        completion: in the case of RPC abortion, RPC Framework will simply stop
+        passing values to this object. Implementations must not assume that this
+        object will be called to completion of the request stream or even called
+        at all.
+
+    Raises:
+      abandonment.Abandoned: May or may not be raised when the RPC has been
+        aborted.
+      NoSuchMethodError: If this MultiMethod does not recognize the given group
+        and name for the RPC and is not able to service the RPC.
+    """
+    raise NotImplementedError()
+
+
+class GenericStub(object):
+  """Affords RPC invocation via generic methods."""
+  __metaclass__ = abc.ABCMeta
+
+  @abc.abstractmethod
+  def blocking_unary_unary(
+      self, group, method, request, timeout, metadata=None,
+      with_call=False):
+    """Invokes a unary-request-unary-response method.
+
+    This method blocks until either returning the response value of the RPC
+    (in the event of RPC completion) or raising an exception (in the event of
+    RPC abortion).
+
+    Args:
+      group: The group identifier of the RPC.
+      method: The method identifier of the RPC.
+      request: The request value for the RPC.
+      timeout: A duration of time in seconds to allow for the RPC.
+      metadata: A metadata value to be passed to the service-side of the RPC.
+      with_call: Whether or not to include return a Call for the RPC in addition
+        to the reponse.
+
+    Returns:
+      The response value for the RPC, and a Call for the RPC if with_call was
+        set to True at invocation.
+
+    Raises:
+      AbortionError: Indicating that the RPC was aborted.
+    """
+    raise NotImplementedError()
+
+  @abc.abstractmethod
+  def future_unary_unary(
+      self, group, method, request, timeout, metadata=None):
+    """Invokes a unary-request-unary-response method.
+
+    Args:
+      group: The group identifier of the RPC.
+      method: The method identifier of the RPC.
+      request: The request value for the RPC.
+      timeout: A duration of time in seconds to allow for the RPC.
+      metadata: A metadata value to be passed to the service-side of the RPC.
+
+    Returns:
+      An object that is both a Call for the RPC and a future.Future. In the
+        event of RPC completion, the return Future's result value will be the
+        response value of the RPC. In the event of RPC abortion, the returned
+        Future's exception value will be an AbortionError.
+    """
+    raise NotImplementedError()
+
+  @abc.abstractmethod
+  def inline_unary_stream(
+      self, group, method, request, timeout, metadata=None):
+    """Invokes a unary-request-stream-response method.
+
+    Args:
+      group: The group identifier of the RPC.
+      method: The method identifier of the RPC.
+      request: The request value for the RPC.
+      timeout: A duration of time in seconds to allow for the RPC.
+      metadata: A metadata value to be passed to the service-side of the RPC.
+
+    Returns:
+      An object that is both a Call for the RPC and an iterator of response
+        values. Drawing response values from the returned iterator may raise
+        AbortionError indicating abortion of the RPC.
+    """
+    raise NotImplementedError()
+
+  @abc.abstractmethod
+  def blocking_stream_unary(
+      self, group, method, request_iterator, timeout, metadata=None,
+      with_call=False):
+    """Invokes a stream-request-unary-response method.
+
+    This method blocks until either returning the response value of the RPC
+    (in the event of RPC completion) or raising an exception (in the event of
+    RPC abortion).
+
+    Args:
+      group: The group identifier of the RPC.
+      method: The method identifier of the RPC.
+      request_iterator: An iterator that yields request values for the RPC.
+      timeout: A duration of time in seconds to allow for the RPC.
+      metadata: A metadata value to be passed to the service-side of the RPC.
+      with_call: Whether or not to include return a Call for the RPC in addition
+        to the reponse.
+
+    Returns:
+      The response value for the RPC, and a Call for the RPC if with_call was
+        set to True at invocation.
+
+    Raises:
+      AbortionError: Indicating that the RPC was aborted.
+    """
+    raise NotImplementedError()
+
+  @abc.abstractmethod
+  def future_stream_unary(
+      self, group, method, request_iterator, timeout, metadata=None):
+    """Invokes a stream-request-unary-response method.
+
+    Args:
+      group: The group identifier of the RPC.
+      method: The method identifier of the RPC.
+      request_iterator: An iterator that yields request values for the RPC.
+      timeout: A duration of time in seconds to allow for the RPC.
+      metadata: A metadata value to be passed to the service-side of the RPC.
+
+    Returns:
+      An object that is both a Call for the RPC and a future.Future. In the
+        event of RPC completion, the return Future's result value will be the
+        response value of the RPC. In the event of RPC abortion, the returned
+        Future's exception value will be an AbortionError.
+    """
+    raise NotImplementedError()
+
+  @abc.abstractmethod
+  def inline_stream_stream(
+      self, group, method, request_iterator, timeout, metadata=None):
+    """Invokes a stream-request-stream-response method.
+
+    Args:
+      group: The group identifier of the RPC.
+      method: The method identifier of the RPC.
+      request_iterator: An iterator that yields request values for the RPC.
+      timeout: A duration of time in seconds to allow for the RPC.
+      metadata: A metadata value to be passed to the service-side of the RPC.
+
+    Returns:
+      An object that is both a Call for the RPC and an iterator of response
+        values. Drawing response values from the returned iterator may raise
+        AbortionError indicating abortion of the RPC.
+    """
+    raise NotImplementedError()
+
+  @abc.abstractmethod
+  def event_unary_unary(
+      self, group, method, request, receiver, abortion_callback, timeout,
+      metadata=None):
+    """Event-driven invocation of a unary-request-unary-response method.
+
+    Args:
+      group: The group identifier of the RPC.
+      method: The method identifier of the RPC.
+      request: The request value for the RPC.
+      receiver: A ResponseReceiver to be passed the response data of the RPC.
+      abortion_callback: A callback to be called and passed an Abortion value
+        in the event of RPC abortion.
+      timeout: A duration of time in seconds to allow for the RPC.
+      metadata: A metadata value to be passed to the service-side of the RPC.
+
+    Returns:
+      A Call for the RPC.
+    """
+    raise NotImplementedError()
+
+  @abc.abstractmethod
+  def event_unary_stream(
+      self, group, method, request, receiver, abortion_callback, timeout,
+      metadata=None):
+    """Event-driven invocation of a unary-request-stream-response method.
+
+    Args:
+      group: The group identifier of the RPC.
+      method: The method identifier of the RPC.
+      request: The request value for the RPC.
+      receiver: A ResponseReceiver to be passed the response data of the RPC.
+      abortion_callback: A callback to be called and passed an Abortion value
+        in the event of RPC abortion.
+      timeout: A duration of time in seconds to allow for the RPC.
+      metadata: A metadata value to be passed to the service-side of the RPC.
+
+    Returns:
+      A Call for the RPC.
+    """
+    raise NotImplementedError()
+
+  @abc.abstractmethod
+  def event_stream_unary(
+      self, group, method, receiver, abortion_callback, timeout,
+      metadata=None):
+    """Event-driven invocation of a unary-request-unary-response method.
+
+    Args:
+      group: The group identifier of the RPC.
+      method: The method identifier of the RPC.
+      receiver: A ResponseReceiver to be passed the response data of the RPC.
+      abortion_callback: A callback to be called and passed an Abortion value
+        in the event of RPC abortion.
+      timeout: A duration of time in seconds to allow for the RPC.
+      metadata: A metadata value to be passed to the service-side of the RPC.
+
+    Returns:
+      A pair of a Call object for the RPC and a stream.Consumer to which the
+        request values of the RPC should be passed.
+    """
+    raise NotImplementedError()
+
+  @abc.abstractmethod
+  def event_stream_stream(
+      self, group, method, receiver, abortion_callback, timeout,
+      metadata=None):
+    """Event-driven invocation of a unary-request-stream-response method.
+
+    Args:
+      group: The group identifier of the RPC.
+      method: The method identifier of the RPC.
+      receiver: A ResponseReceiver to be passed the response data of the RPC.
+      abortion_callback: A callback to be called and passed an Abortion value
+        in the event of RPC abortion.
+      timeout: A duration of time in seconds to allow for the RPC.
+      metadata: A metadata value to be passed to the service-side of the RPC.
+
+    Returns:
+      A pair of a Call object for the RPC and a stream.Consumer to which the
+        request values of the RPC should be passed.
+    """
+    raise NotImplementedError()
+
+  @abc.abstractmethod
+  def unary_unary(self, group, method):
+    """Creates a UnaryUnaryMultiCallable for a unary-unary method.
+
+    Args:
+      group: The group identifier of the RPC.
+      method: The method identifier of the RPC.
+
+    Returns:
+      A UnaryUnaryMultiCallable value for the named unary-unary method.
+    """
+    raise NotImplementedError()
+
+  @abc.abstractmethod
+  def unary_stream(self, group, method):
+    """Creates a UnaryStreamMultiCallable for a unary-stream method.
+
+    Args:
+      group: The group identifier of the RPC.
+      method: The method identifier of the RPC.
+
+    Returns:
+      A UnaryStreamMultiCallable value for the name unary-stream method.
+    """
+    raise NotImplementedError()
+
+  @abc.abstractmethod
+  def stream_unary(self, group, method):
+    """Creates a StreamUnaryMultiCallable for a stream-unary method.
+
+    Args:
+      group: The group identifier of the RPC.
+      method: The method identifier of the RPC.
+
+    Returns:
+      A StreamUnaryMultiCallable value for the named stream-unary method.
+    """
+    raise NotImplementedError()
+
+  @abc.abstractmethod
+  def stream_stream(self, group, method):
+    """Creates a StreamStreamMultiCallable for a stream-stream method.
+
+    Args:
+      group: The group identifier of the RPC.
+      method: The method identifier of the RPC.
+
+    Returns:
+      A StreamStreamMultiCallable value for the named stream-stream method.
+    """
+    raise NotImplementedError()
+
+
+class DynamicStub(object):
+  """Affords RPC invocation via attributes corresponding to afforded methods.
+
+  Instances of this type may be scoped to a single group so that attribute
+  access is unambiguous.
+
+  Instances of this type respond to attribute access as follows: if the
+  requested attribute is the name of a unary-unary method, the value of the
+  attribute will be a UnaryUnaryMultiCallable with which to invoke an RPC; if
+  the requested attribute is the name of a unary-stream method, the value of the
+  attribute will be a UnaryStreamMultiCallable with which to invoke an RPC; if
+  the requested attribute is the name of a stream-unary method, the value of the
+  attribute will be a StreamUnaryMultiCallable with which to invoke an RPC; and
+  if the requested attribute is the name of a stream-stream method, the value of
+  the attribute will be a StreamStreamMultiCallable with which to invoke an RPC.
+  """
+  __metaclass__ = abc.ABCMeta
diff --git a/src/python/grpcio/grpc/framework/interfaces/face/utilities.py b/src/python/grpcio/grpc/framework/interfaces/face/utilities.py
new file mode 100644
index 0000000..db2ec6e
--- /dev/null
+++ b/src/python/grpcio/grpc/framework/interfaces/face/utilities.py
@@ -0,0 +1,178 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Utilities for RPC Framework's Face interface."""
+
+import collections
+
+# stream is referenced from specification in this module.
+from grpc.framework.common import cardinality
+from grpc.framework.common import style
+from grpc.framework.foundation import stream  # pylint: disable=unused-import
+from grpc.framework.interfaces.face import face
+
+
+class _MethodImplementation(
+    face.MethodImplementation,
+    collections.namedtuple(
+        '_MethodImplementation',
+        ['cardinality', 'style', 'unary_unary_inline', 'unary_stream_inline',
+         'stream_unary_inline', 'stream_stream_inline', 'unary_unary_event',
+         'unary_stream_event', 'stream_unary_event', 'stream_stream_event',])):
+  pass
+
+
+def unary_unary_inline(behavior):
+  """Creates an face.MethodImplementation for the given behavior.
+
+  Args:
+    behavior: The implementation of a unary-unary RPC method as a callable value
+      that takes a request value and an face.ServicerContext object and
+      returns a response value.
+
+  Returns:
+    An face.MethodImplementation derived from the given behavior.
+  """
+  return _MethodImplementation(
+      cardinality.Cardinality.UNARY_UNARY, style.Service.INLINE, behavior,
+      None, None, None, None, None, None, None)
+
+
+def unary_stream_inline(behavior):
+  """Creates an face.MethodImplementation for the given behavior.
+
+  Args:
+    behavior: The implementation of a unary-stream RPC method as a callable
+      value that takes a request value and an face.ServicerContext object and
+      returns an iterator of response values.
+
+  Returns:
+    An face.MethodImplementation derived from the given behavior.
+  """
+  return _MethodImplementation(
+      cardinality.Cardinality.UNARY_STREAM, style.Service.INLINE, None,
+      behavior, None, None, None, None, None, None)
+
+
+def stream_unary_inline(behavior):
+  """Creates an face.MethodImplementation for the given behavior.
+
+  Args:
+    behavior: The implementation of a stream-unary RPC method as a callable
+      value that takes an iterator of request values and an
+      face.ServicerContext object and returns a response value.
+
+  Returns:
+    An face.MethodImplementation derived from the given behavior.
+  """
+  return _MethodImplementation(
+      cardinality.Cardinality.STREAM_UNARY, style.Service.INLINE, None, None,
+      behavior, None, None, None, None, None)
+
+
+def stream_stream_inline(behavior):
+  """Creates an face.MethodImplementation for the given behavior.
+
+  Args:
+    behavior: The implementation of a stream-stream RPC method as a callable
+      value that takes an iterator of request values and an
+      face.ServicerContext object and returns an iterator of response values.
+
+  Returns:
+    An face.MethodImplementation derived from the given behavior.
+  """
+  return _MethodImplementation(
+      cardinality.Cardinality.STREAM_STREAM, style.Service.INLINE, None, None,
+      None, behavior, None, None, None, None)
+
+
+def unary_unary_event(behavior):
+  """Creates an face.MethodImplementation for the given behavior.
+
+  Args:
+    behavior: The implementation of a unary-unary RPC method as a callable
+      value that takes a request value, a response callback to which to pass
+      the response value of the RPC, and an face.ServicerContext.
+
+  Returns:
+    An face.MethodImplementation derived from the given behavior.
+  """
+  return _MethodImplementation(
+      cardinality.Cardinality.UNARY_UNARY, style.Service.EVENT, None, None,
+      None, None, behavior, None, None, None)
+
+
+def unary_stream_event(behavior):
+  """Creates an face.MethodImplementation for the given behavior.
+
+  Args:
+    behavior: The implementation of a unary-stream RPC method as a callable
+      value that takes a request value, a stream.Consumer to which to pass the
+      the response values of the RPC, and an face.ServicerContext.
+
+  Returns:
+    An face.MethodImplementation derived from the given behavior.
+  """
+  return _MethodImplementation(
+      cardinality.Cardinality.UNARY_STREAM, style.Service.EVENT, None, None,
+      None, None, None, behavior, None, None)
+
+
+def stream_unary_event(behavior):
+  """Creates an face.MethodImplementation for the given behavior.
+
+  Args:
+    behavior: The implementation of a stream-unary RPC method as a callable
+      value that takes a response callback to which to pass the response value
+      of the RPC and an face.ServicerContext and returns a stream.Consumer to
+      which the request values of the RPC should be passed.
+
+  Returns:
+    An face.MethodImplementation derived from the given behavior.
+  """
+  return _MethodImplementation(
+      cardinality.Cardinality.STREAM_UNARY, style.Service.EVENT, None, None,
+      None, None, None, None, behavior, None)
+
+
+def stream_stream_event(behavior):
+  """Creates an face.MethodImplementation for the given behavior.
+
+  Args:
+    behavior: The implementation of a stream-stream RPC method as a callable
+      value that takes a stream.Consumer to which to pass the response values
+      of the RPC and an face.ServicerContext and returns a stream.Consumer to
+      which the request values of the RPC should be passed.
+
+  Returns:
+    An face.MethodImplementation derived from the given behavior.
+  """
+  return _MethodImplementation(
+      cardinality.Cardinality.STREAM_STREAM, style.Service.EVENT, None, None,
+      None, None, None, None, None, behavior)
diff --git a/src/python/grpcio/grpc/framework/interfaces/links/links.py b/src/python/grpcio/grpc/framework/interfaces/links/links.py
index 069ff02..b98a30a 100644
--- a/src/python/grpcio/grpc/framework/interfaces/links/links.py
+++ b/src/python/grpcio/grpc/framework/interfaces/links/links.py
@@ -34,12 +34,30 @@
 import enum
 
 
+class Transport(collections.namedtuple('Transport', ('kind', 'value',))):
+  """A sum type for handles to an underlying transport system.
+
+  Attributes:
+    kind: A Kind value identifying the kind of value being passed to or from
+      the underlying transport.
+    value: The value being passed through RPC Framework between the high-level
+      application and the underlying transport.
+  """
+
+  @enum.unique
+  class Kind(enum.Enum):
+    CALL_OPTION = 'call option'
+    SERVICER_CONTEXT = 'servicer context'
+    INVOCATION_CONTEXT = 'invocation context'
+
+
 class Ticket(
     collections.namedtuple(
         'Ticket',
-        ['operation_id', 'sequence_number', 'group', 'method', 'subscription',
+        ('operation_id', 'sequence_number', 'group', 'method', 'subscription',
          'timeout', 'allowance', 'initial_metadata', 'payload',
-         'terminal_metadata', 'code', 'message', 'termination'])):
+         'terminal_metadata', 'code', 'message', 'termination',
+         'transport',))):
   """A sum type for all values sent from a front to a back.
 
   Attributes:
@@ -81,6 +99,8 @@
     termination: A Termination value describing the end of the operation, or
       None if the operation has not yet terminated. If set, no further tickets
       may be sent in the same direction.
+    transport: A Transport value or None, with further semantics being a matter
+      between high-level application and underlying transport.
   """
 
   @enum.unique
diff --git a/src/python/grpcio_test/grpc_test/_adapter/_low_test.py b/src/python/grpcio_test/grpc_test/_adapter/_low_test.py
index 44fe760..7014912 100644
--- a/src/python/grpcio_test/grpc_test/_adapter/_low_test.py
+++ b/src/python/grpcio_test/grpc_test/_adapter/_low_test.py
@@ -52,7 +52,6 @@
   def set_ith_result(i, completion_queue):
     result = completion_queue.next(deadline)
     with lock:
-      print i, completion_queue, result, time.time() - deadline
       results[i] = result
   for i, completion_queue in enumerate(completion_queues):
     thread = threading.Thread(target=set_ith_result,
@@ -80,10 +79,12 @@
     del self.client_channel
 
     self.client_completion_queue.shutdown()
-    while self.client_completion_queue.next().type != _types.EventType.QUEUE_SHUTDOWN:
+    while (self.client_completion_queue.next().type !=
+           _types.EventType.QUEUE_SHUTDOWN):
       pass
     self.server_completion_queue.shutdown()
-    while self.server_completion_queue.next().type != _types.EventType.QUEUE_SHUTDOWN:
+    while (self.server_completion_queue.next().type !=
+           _types.EventType.QUEUE_SHUTDOWN):
       pass
 
     del self.client_completion_queue
@@ -91,58 +92,68 @@
     del self.server
 
   def testEcho(self):
-    DEADLINE = time.time()+5
-    DEADLINE_TOLERANCE = 0.25
-    CLIENT_METADATA_ASCII_KEY = 'key'
-    CLIENT_METADATA_ASCII_VALUE = 'val'
-    CLIENT_METADATA_BIN_KEY = 'key-bin'
-    CLIENT_METADATA_BIN_VALUE = b'\0'*1000
-    SERVER_INITIAL_METADATA_KEY = 'init_me_me_me'
-    SERVER_INITIAL_METADATA_VALUE = 'whodawha?'
-    SERVER_TRAILING_METADATA_KEY = 'california_is_in_a_drought'
-    SERVER_TRAILING_METADATA_VALUE = 'zomg it is'
-    SERVER_STATUS_CODE = _types.StatusCode.OK
-    SERVER_STATUS_DETAILS = 'our work is never over'
-    REQUEST = 'in death a member of project mayhem has a name'
-    RESPONSE = 'his name is robert paulson'
-    METHOD = 'twinkies'
-    HOST = 'hostess'
+    deadline = time.time() + 5
+    event_time_tolerance = 2
+    deadline_tolerance = 0.25
+    client_metadata_ascii_key = 'key'
+    client_metadata_ascii_value = 'val'
+    client_metadata_bin_key = 'key-bin'
+    client_metadata_bin_value = b'\0'*1000
+    server_initial_metadata_key = 'init_me_me_me'
+    server_initial_metadata_value = 'whodawha?'
+    server_trailing_metadata_key = 'california_is_in_a_drought'
+    server_trailing_metadata_value = 'zomg it is'
+    server_status_code = _types.StatusCode.OK
+    server_status_details = 'our work is never over'
+    request = 'blarghaflargh'
+    response = 'his name is robert paulson'
+    method = 'twinkies'
+    host = 'hostess'
     server_request_tag = object()
-    request_call_result = self.server.request_call(self.server_completion_queue, server_request_tag)
+    request_call_result = self.server.request_call(self.server_completion_queue,
+                                                   server_request_tag)
 
-    self.assertEquals(_types.CallError.OK, request_call_result)
+    self.assertEqual(_types.CallError.OK, request_call_result)
 
     client_call_tag = object()
-    client_call = self.client_channel.create_call(self.client_completion_queue, METHOD, HOST, DEADLINE)
-    client_initial_metadata = [(CLIENT_METADATA_ASCII_KEY, CLIENT_METADATA_ASCII_VALUE), (CLIENT_METADATA_BIN_KEY, CLIENT_METADATA_BIN_VALUE)]
+    client_call = self.client_channel.create_call(
+        self.client_completion_queue, method, host, deadline)
+    client_initial_metadata = [
+        (client_metadata_ascii_key, client_metadata_ascii_value),
+        (client_metadata_bin_key, client_metadata_bin_value)
+    ]
     client_start_batch_result = client_call.start_batch([
         _types.OpArgs.send_initial_metadata(client_initial_metadata),
-        _types.OpArgs.send_message(REQUEST, 0),
+        _types.OpArgs.send_message(request, 0),
         _types.OpArgs.send_close_from_client(),
         _types.OpArgs.recv_initial_metadata(),
         _types.OpArgs.recv_message(),
         _types.OpArgs.recv_status_on_client()
     ], client_call_tag)
-    self.assertEquals(_types.CallError.OK, client_start_batch_result)
+    self.assertEqual(_types.CallError.OK, client_start_batch_result)
 
-    client_no_event, request_event, = wait_for_events([self.client_completion_queue, self.server_completion_queue], time.time() + 2)
-    self.assertEquals(client_no_event, None)
-    self.assertEquals(_types.EventType.OP_COMPLETE, request_event.type)
+    client_no_event, request_event, = wait_for_events(
+        [self.client_completion_queue, self.server_completion_queue],
+        time.time() + event_time_tolerance)
+    self.assertEqual(client_no_event, None)
+    self.assertEqual(_types.EventType.OP_COMPLETE, request_event.type)
     self.assertIsInstance(request_event.call, _low.Call)
     self.assertIs(server_request_tag, request_event.tag)
-    self.assertEquals(1, len(request_event.results))
+    self.assertEqual(1, len(request_event.results))
     received_initial_metadata = dict(request_event.results[0].initial_metadata)
     # Check that our metadata were transmitted
-    self.assertEquals(
+    self.assertEqual(
         dict(client_initial_metadata),
-        dict((x, received_initial_metadata[x]) for x in zip(*client_initial_metadata)[0]))
+        dict((x, received_initial_metadata[x])
+             for x in zip(*client_initial_metadata)[0]))
     # Check that Python's user agent string is a part of the full user agent
     # string
     self.assertIn('Python-gRPC-{}'.format(_grpcio_metadata.__version__),
                   received_initial_metadata['user-agent'])
-    self.assertEquals(METHOD, request_event.call_details.method)
-    self.assertEquals(HOST, request_event.call_details.host)
-    self.assertLess(abs(DEADLINE - request_event.call_details.deadline), DEADLINE_TOLERANCE)
+    self.assertEqual(method, request_event.call_details.method)
+    self.assertEqual(host, request_event.call_details.host)
+    self.assertLess(abs(deadline - request_event.call_details.deadline),
+                    deadline_tolerance)
 
     # Check that the channel is connected, and that both it and the call have
     # the proper target and peer; do this after the first flurry of messages to
@@ -155,33 +166,43 @@
 
     server_call_tag = object()
     server_call = request_event.call
-    server_initial_metadata = [(SERVER_INITIAL_METADATA_KEY, SERVER_INITIAL_METADATA_VALUE)]
-    server_trailing_metadata = [(SERVER_TRAILING_METADATA_KEY, SERVER_TRAILING_METADATA_VALUE)]
+    server_initial_metadata = [
+        (server_initial_metadata_key, server_initial_metadata_value)
+    ]
+    server_trailing_metadata = [
+        (server_trailing_metadata_key, server_trailing_metadata_value)
+    ]
     server_start_batch_result = server_call.start_batch([
         _types.OpArgs.send_initial_metadata(server_initial_metadata),
         _types.OpArgs.recv_message(),
-        _types.OpArgs.send_message(RESPONSE, 0),
+        _types.OpArgs.send_message(response, 0),
         _types.OpArgs.recv_close_on_server(),
-        _types.OpArgs.send_status_from_server(server_trailing_metadata, SERVER_STATUS_CODE, SERVER_STATUS_DETAILS)
+        _types.OpArgs.send_status_from_server(
+            server_trailing_metadata, server_status_code, server_status_details)
     ], server_call_tag)
-    self.assertEquals(_types.CallError.OK, server_start_batch_result)
+    self.assertEqual(_types.CallError.OK, server_start_batch_result)
 
-    client_event, server_event, = wait_for_events([self.client_completion_queue, self.server_completion_queue], time.time() + 1)
+    client_event, server_event, = wait_for_events(
+        [self.client_completion_queue, self.server_completion_queue],
+        time.time() + event_time_tolerance)
 
-    self.assertEquals(6, len(client_event.results))
+    self.assertEqual(6, len(client_event.results))
     found_client_op_types = set()
     for client_result in client_event.results:
-      self.assertNotIn(client_result.type, found_client_op_types)  # we expect each op type to be unique
+      # we expect each op type to be unique
+      self.assertNotIn(client_result.type, found_client_op_types)
       found_client_op_types.add(client_result.type)
       if client_result.type == _types.OpType.RECV_INITIAL_METADATA:
-        self.assertEquals(dict(server_initial_metadata), dict(client_result.initial_metadata))
+        self.assertEqual(dict(server_initial_metadata),
+                         dict(client_result.initial_metadata))
       elif client_result.type == _types.OpType.RECV_MESSAGE:
-        self.assertEquals(RESPONSE, client_result.message)
+        self.assertEqual(response, client_result.message)
       elif client_result.type == _types.OpType.RECV_STATUS_ON_CLIENT:
-        self.assertEquals(dict(server_trailing_metadata), dict(client_result.trailing_metadata))
-        self.assertEquals(SERVER_STATUS_DETAILS, client_result.status.details)
-        self.assertEquals(SERVER_STATUS_CODE, client_result.status.code)
-    self.assertEquals(set([
+        self.assertEqual(dict(server_trailing_metadata),
+                         dict(client_result.trailing_metadata))
+        self.assertEqual(server_status_details, client_result.status.details)
+        self.assertEqual(server_status_code, client_result.status.code)
+    self.assertEqual(set([
           _types.OpType.SEND_INITIAL_METADATA,
           _types.OpType.SEND_MESSAGE,
           _types.OpType.SEND_CLOSE_FROM_CLIENT,
@@ -190,16 +211,16 @@
           _types.OpType.RECV_STATUS_ON_CLIENT
       ]), found_client_op_types)
 
-    self.assertEquals(5, len(server_event.results))
+    self.assertEqual(5, len(server_event.results))
     found_server_op_types = set()
     for server_result in server_event.results:
       self.assertNotIn(client_result.type, found_server_op_types)
       found_server_op_types.add(server_result.type)
       if server_result.type == _types.OpType.RECV_MESSAGE:
-        self.assertEquals(REQUEST, server_result.message)
+        self.assertEqual(request, server_result.message)
       elif server_result.type == _types.OpType.RECV_CLOSE_ON_SERVER:
         self.assertFalse(server_result.cancelled)
-    self.assertEquals(set([
+    self.assertEqual(set([
           _types.OpType.SEND_INITIAL_METADATA,
           _types.OpType.RECV_MESSAGE,
           _types.OpType.SEND_MESSAGE,
@@ -211,5 +232,81 @@
     del server_call
 
 
+class HangingServerShutdown(unittest.TestCase):
+
+  def setUp(self):
+    self.server_completion_queue = _low.CompletionQueue()
+    self.server = _low.Server(self.server_completion_queue, [])
+    self.port = self.server.add_http2_port('[::]:0')
+    self.client_completion_queue = _low.CompletionQueue()
+    self.client_channel = _low.Channel('localhost:%d'%self.port, [])
+
+    self.server.start()
+
+  def tearDown(self):
+    self.server.shutdown()
+    del self.client_channel
+
+    self.client_completion_queue.shutdown()
+    self.server_completion_queue.shutdown()
+    while True:
+      client_event, server_event = wait_for_events(
+          [self.client_completion_queue, self.server_completion_queue],
+          float("+inf"))
+      if (client_event.type == _types.EventType.QUEUE_SHUTDOWN and
+          server_event.type == _types.EventType.QUEUE_SHUTDOWN):
+        break
+
+    del self.client_completion_queue
+    del self.server_completion_queue
+    del self.server
+
+  def testHangingServerCall(self):
+    deadline = time.time() + 5
+    deadline_tolerance = 0.25
+    event_time_tolerance = 2
+    cancel_all_calls_time_tolerance = 0.5
+    request = 'blarghaflargh'
+    method = 'twinkies'
+    host = 'hostess'
+    server_request_tag = object()
+    request_call_result = self.server.request_call(self.server_completion_queue,
+                                                   server_request_tag)
+
+    client_call_tag = object()
+    client_call = self.client_channel.create_call(self.client_completion_queue,
+                                                  method, host, deadline)
+    client_start_batch_result = client_call.start_batch([
+        _types.OpArgs.send_initial_metadata([]),
+        _types.OpArgs.send_message(request, 0),
+        _types.OpArgs.send_close_from_client(),
+        _types.OpArgs.recv_initial_metadata(),
+        _types.OpArgs.recv_message(),
+        _types.OpArgs.recv_status_on_client()
+    ], client_call_tag)
+
+    client_no_event, request_event, = wait_for_events(
+        [self.client_completion_queue, self.server_completion_queue],
+        time.time() + event_time_tolerance)
+
+    # Now try to shutdown the server and expect that we see server shutdown
+    # almost immediately after calling cancel_all_calls.
+    with self.assertRaises(RuntimeError):
+      self.server.cancel_all_calls()
+    shutdown_tag = object()
+    self.server.shutdown(shutdown_tag)
+    pre_cancel_timestamp = time.time()
+    self.server.cancel_all_calls()
+    finish_shutdown_timestamp = None
+    client_call_event, server_shutdown_event = wait_for_events(
+        [self.client_completion_queue, self.server_completion_queue],
+        time.time() + event_time_tolerance)
+    self.assertIs(shutdown_tag, server_shutdown_event.tag)
+    self.assertGreater(pre_cancel_timestamp + cancel_all_calls_time_tolerance,
+                       time.time())
+
+    del client_call
+
+
 if __name__ == '__main__':
   unittest.main(verbosity=2)
diff --git a/src/python/grpcio_test/grpc_test/_links/_lonely_invocation_link_test.py b/src/python/grpcio_test/grpc_test/_links/_lonely_invocation_link_test.py
index abe240e..373a2b2 100644
--- a/src/python/grpcio_test/grpc_test/_links/_lonely_invocation_link_test.py
+++ b/src/python/grpcio_test/grpc_test/_links/_lonely_invocation_link_test.py
@@ -66,7 +66,7 @@
     ticket = links.Ticket(
         test_operation_id, 0, test_group, test_method,
         links.Ticket.Subscription.FULL, test_constants.SHORT_TIMEOUT, 1, None,
-        None, None, None, None, termination)
+        None, None, None, None, termination, None)
     invocation_link.accept_ticket(ticket)
     invocation_link_mate.block_until_tickets_satisfy(test_cases.terminated)
 
diff --git a/src/python/grpcio_test/grpc_test/_links/_transmission_test.py b/src/python/grpcio_test/grpc_test/_links/_transmission_test.py
index 9cdc962..02ddd51 100644
--- a/src/python/grpcio_test/grpc_test/_links/_transmission_test.py
+++ b/src/python/grpcio_test/grpc_test/_links/_transmission_test.py
@@ -128,14 +128,14 @@
     invocation_ticket = links.Ticket(
         test_operation_id, 0, test_group, test_method,
         links.Ticket.Subscription.FULL, test_constants.LONG_TIMEOUT, None, None,
-        None, None, None, None, links.Ticket.Termination.COMPLETION)
+        None, None, None, None, links.Ticket.Termination.COMPLETION, None)
     invocation_link.accept_ticket(invocation_ticket)
     service_mate.block_until_tickets_satisfy(test_cases.terminated)
 
     service_ticket = links.Ticket(
         service_mate.tickets()[-1].operation_id, 0, None, None, None, None,
         None, None, None, None, test_code, test_message,
-        links.Ticket.Termination.COMPLETION)
+        links.Ticket.Termination.COMPLETION, None)
     service_link.accept_ticket(service_ticket)
     invocation_mate.block_until_tickets_satisfy(test_cases.terminated)
 
@@ -174,33 +174,34 @@
     invocation_ticket = links.Ticket(
         test_operation_id, 0, test_group, test_method,
         links.Ticket.Subscription.FULL, test_constants.LONG_TIMEOUT, None, None,
-        None, None, None, None, None)
+        None, None, None, None, None, None)
     invocation_link.accept_ticket(invocation_ticket)
     requests = scenario.requests()
     for request_index, request in enumerate(requests):
       request_ticket = links.Ticket(
           test_operation_id, 1 + request_index, None, None, None, None, 1, None,
-          request, None, None, None, None)
+          request, None, None, None, None, None)
       invocation_link.accept_ticket(request_ticket)
       service_mate.block_until_tickets_satisfy(
           test_cases.at_least_n_payloads_received_predicate(1 + request_index))
       response_ticket = links.Ticket(
           service_mate.tickets()[0].operation_id, request_index, None, None,
           None, None, 1, None, scenario.response_for_request(request), None,
-          None, None, None)
+          None, None, None, None)
       service_link.accept_ticket(response_ticket)
       invocation_mate.block_until_tickets_satisfy(
           test_cases.at_least_n_payloads_received_predicate(1 + request_index))
     request_count = len(requests)
     invocation_completion_ticket = links.Ticket(
         test_operation_id, request_count + 1, None, None, None, None, None,
-        None, None, None, None, None, links.Ticket.Termination.COMPLETION)
+        None, None, None, None, None, links.Ticket.Termination.COMPLETION,
+        None)
     invocation_link.accept_ticket(invocation_completion_ticket)
     service_mate.block_until_tickets_satisfy(test_cases.terminated)
     service_completion_ticket = links.Ticket(
         service_mate.tickets()[0].operation_id, request_count, None, None, None,
         None, None, None, None, None, test_code, test_message,
-        links.Ticket.Termination.COMPLETION)
+        links.Ticket.Termination.COMPLETION, None)
     service_link.accept_ticket(service_completion_ticket)
     invocation_mate.block_until_tickets_satisfy(test_cases.terminated)
 
diff --git a/src/python/grpcio_test/grpc_test/framework/interfaces/face/__init__.py b/src/python/grpcio_test/grpc_test/framework/interfaces/face/__init__.py
new file mode 100644
index 0000000..7086519
--- /dev/null
+++ b/src/python/grpcio_test/grpc_test/framework/interfaces/face/__init__.py
@@ -0,0 +1,30 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
diff --git a/src/python/grpcio_test/grpc_test/framework/interfaces/face/_blocking_invocation_inline_service.py b/src/python/grpcio_test/grpc_test/framework/interfaces/face/_blocking_invocation_inline_service.py
new file mode 100644
index 0000000..857ad5c
--- /dev/null
+++ b/src/python/grpcio_test/grpc_test/framework/interfaces/face/_blocking_invocation_inline_service.py
@@ -0,0 +1,250 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Test code for the Face layer of RPC Framework."""
+
+import abc
+import unittest
+
+# test_interfaces is referenced from specification in this module.
+from grpc.framework.interfaces.face import face
+from grpc_test.framework.common import test_constants
+from grpc_test.framework.common import test_control
+from grpc_test.framework.common import test_coverage
+from grpc_test.framework.interfaces.face import _digest
+from grpc_test.framework.interfaces.face import _stock_service
+from grpc_test.framework.interfaces.face import test_interfaces  # pylint: disable=unused-import
+
+
+class TestCase(test_coverage.Coverage, unittest.TestCase):
+  """A test of the Face layer of RPC Framework.
+
+  Concrete subclasses must have an "implementation" attribute of type
+  test_interfaces.Implementation and an "invoker_constructor" attribute of type
+  _invocation.InvokerConstructor.
+  """
+  __metaclass__ = abc.ABCMeta
+
+  NAME = 'BlockingInvocationInlineServiceTest'
+
+  def setUp(self):
+    """See unittest.TestCase.setUp for full specification.
+
+    Overriding implementations must call this implementation.
+    """
+    self._control = test_control.PauseFailControl()
+    self._digest = _digest.digest(
+        _stock_service.STOCK_TEST_SERVICE, self._control, None)
+
+    generic_stub, dynamic_stubs, self._memo = self.implementation.instantiate(
+        self._digest.methods, self._digest.inline_method_implementations, None)
+    self._invoker = self.invoker_constructor.construct_invoker(
+        generic_stub, dynamic_stubs, self._digest.methods)
+
+  def tearDown(self):
+    """See unittest.TestCase.tearDown for full specification.
+
+    Overriding implementations must call this implementation.
+    """
+    self.implementation.destantiate(self._memo)
+
+  def testSuccessfulUnaryRequestUnaryResponse(self):
+    for (group, method), test_messages_sequence in (
+        self._digest.unary_unary_messages_sequences.iteritems()):
+      for test_messages in test_messages_sequence:
+        request = test_messages.request()
+
+        response = self._invoker.blocking(group, method)(
+            request, test_constants.LONG_TIMEOUT)
+
+        test_messages.verify(request, response, self)
+
+  def testSuccessfulUnaryRequestStreamResponse(self):
+    for (group, method), test_messages_sequence in (
+        self._digest.unary_stream_messages_sequences.iteritems()):
+      for test_messages in test_messages_sequence:
+        request = test_messages.request()
+
+        response_iterator = self._invoker.blocking(group, method)(
+            request, test_constants.LONG_TIMEOUT)
+        responses = list(response_iterator)
+
+        test_messages.verify(request, responses, self)
+
+  def testSuccessfulStreamRequestUnaryResponse(self):
+    for (group, method), test_messages_sequence in (
+        self._digest.stream_unary_messages_sequences.iteritems()):
+      for test_messages in test_messages_sequence:
+        requests = test_messages.requests()
+
+        response = self._invoker.blocking(group, method)(
+            iter(requests), test_constants.LONG_TIMEOUT)
+
+        test_messages.verify(requests, response, self)
+
+  def testSuccessfulStreamRequestStreamResponse(self):
+    for (group, method), test_messages_sequence in (
+        self._digest.stream_stream_messages_sequences.iteritems()):
+      for test_messages in test_messages_sequence:
+        requests = test_messages.requests()
+
+        response_iterator = self._invoker.blocking(group, method)(
+            iter(requests), test_constants.LONG_TIMEOUT)
+        responses = list(response_iterator)
+
+        test_messages.verify(requests, responses, self)
+
+  def testSequentialInvocations(self):
+    for (group, method), test_messages_sequence in (
+        self._digest.unary_unary_messages_sequences.iteritems()):
+      for test_messages in test_messages_sequence:
+        first_request = test_messages.request()
+        second_request = test_messages.request()
+
+        first_response = self._invoker.blocking(group, method)(
+            first_request, test_constants.LONG_TIMEOUT)
+
+        test_messages.verify(first_request, first_response, self)
+
+        second_response = self._invoker.blocking(group, method)(
+            second_request, test_constants.LONG_TIMEOUT)
+
+        test_messages.verify(second_request, second_response, self)
+
+  @unittest.skip('Parallel invocations impossible with blocking control flow!')
+  def testParallelInvocations(self):
+    raise NotImplementedError()
+
+  @unittest.skip('Parallel invocations impossible with blocking control flow!')
+  def testWaitingForSomeButNotAllParallelInvocations(self):
+    raise NotImplementedError()
+
+  @unittest.skip('Cancellation impossible with blocking control flow!')
+  def testCancelledUnaryRequestUnaryResponse(self):
+    raise NotImplementedError()
+
+  @unittest.skip('Cancellation impossible with blocking control flow!')
+  def testCancelledUnaryRequestStreamResponse(self):
+    raise NotImplementedError()
+
+  @unittest.skip('Cancellation impossible with blocking control flow!')
+  def testCancelledStreamRequestUnaryResponse(self):
+    raise NotImplementedError()
+
+  @unittest.skip('Cancellation impossible with blocking control flow!')
+  def testCancelledStreamRequestStreamResponse(self):
+    raise NotImplementedError()
+
+  def testExpiredUnaryRequestUnaryResponse(self):
+    for (group, method), test_messages_sequence in (
+        self._digest.unary_unary_messages_sequences.iteritems()):
+      for test_messages in test_messages_sequence:
+        request = test_messages.request()
+
+        with self._control.pause(), self.assertRaises(
+            face.ExpirationError):
+          self._invoker.blocking(group, method)(
+              request, test_constants.SHORT_TIMEOUT)
+
+  def testExpiredUnaryRequestStreamResponse(self):
+    for (group, method), test_messages_sequence in (
+        self._digest.unary_stream_messages_sequences.iteritems()):
+      for test_messages in test_messages_sequence:
+        request = test_messages.request()
+
+        with self._control.pause(), self.assertRaises(
+            face.ExpirationError):
+          response_iterator = self._invoker.blocking(group, method)(
+              request, test_constants.SHORT_TIMEOUT)
+          list(response_iterator)
+
+  def testExpiredStreamRequestUnaryResponse(self):
+    for (group, method), test_messages_sequence in (
+        self._digest.stream_unary_messages_sequences.iteritems()):
+      for test_messages in test_messages_sequence:
+        requests = test_messages.requests()
+
+        with self._control.pause(), self.assertRaises(
+            face.ExpirationError):
+          self._invoker.blocking(group, method)(
+              iter(requests), test_constants.SHORT_TIMEOUT)
+
+  def testExpiredStreamRequestStreamResponse(self):
+    for (group, method), test_messages_sequence in (
+        self._digest.stream_stream_messages_sequences.iteritems()):
+      for test_messages in test_messages_sequence:
+        requests = test_messages.requests()
+
+        with self._control.pause(), self.assertRaises(
+            face.ExpirationError):
+          response_iterator = self._invoker.blocking(group, method)(
+              iter(requests), test_constants.SHORT_TIMEOUT)
+          list(response_iterator)
+
+  def testFailedUnaryRequestUnaryResponse(self):
+    for (group, method), test_messages_sequence in (
+        self._digest.unary_unary_messages_sequences.iteritems()):
+      for test_messages in test_messages_sequence:
+        request = test_messages.request()
+
+        with self._control.fail(), self.assertRaises(face.RemoteError):
+          self._invoker.blocking(group, method)(
+              request, test_constants.LONG_TIMEOUT)
+
+  def testFailedUnaryRequestStreamResponse(self):
+    for (group, method), test_messages_sequence in (
+        self._digest.unary_stream_messages_sequences.iteritems()):
+      for test_messages in test_messages_sequence:
+        request = test_messages.request()
+
+        with self._control.fail(), self.assertRaises(face.RemoteError):
+          response_iterator = self._invoker.blocking(group, method)(
+              request, test_constants.LONG_TIMEOUT)
+          list(response_iterator)
+
+  def testFailedStreamRequestUnaryResponse(self):
+    for (group, method), test_messages_sequence in (
+        self._digest.stream_unary_messages_sequences.iteritems()):
+      for test_messages in test_messages_sequence:
+        requests = test_messages.requests()
+
+        with self._control.fail(), self.assertRaises(face.RemoteError):
+          self._invoker.blocking(group, method)(
+              iter(requests), test_constants.LONG_TIMEOUT)
+
+  def testFailedStreamRequestStreamResponse(self):
+    for (group, method), test_messages_sequence in (
+        self._digest.stream_stream_messages_sequences.iteritems()):
+      for test_messages in test_messages_sequence:
+        requests = test_messages.requests()
+
+        with self._control.fail(), self.assertRaises(face.RemoteError):
+          response_iterator = self._invoker.blocking(group, method)(
+              iter(requests), test_constants.LONG_TIMEOUT)
+          list(response_iterator)
diff --git a/src/python/grpcio_test/grpc_test/framework/interfaces/face/_digest.py b/src/python/grpcio_test/grpc_test/framework/interfaces/face/_digest.py
new file mode 100644
index 0000000..da56ed7
--- /dev/null
+++ b/src/python/grpcio_test/grpc_test/framework/interfaces/face/_digest.py
@@ -0,0 +1,444 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Code for making a service.TestService more amenable to use in tests."""
+
+import collections
+import threading
+
+# test_control, _service, and test_interfaces are referenced from specification
+# in this module.
+from grpc.framework.common import cardinality
+from grpc.framework.common import style
+from grpc.framework.foundation import stream
+from grpc.framework.foundation import stream_util
+from grpc.framework.interfaces.face import face
+from grpc_test.framework.common import test_control  # pylint: disable=unused-import
+from grpc_test.framework.interfaces.face import _service  # pylint: disable=unused-import
+from grpc_test.framework.interfaces.face import test_interfaces  # pylint: disable=unused-import
+
+_IDENTITY = lambda x: x
+
+
+class TestServiceDigest(
+    collections.namedtuple(
+        'TestServiceDigest',
+        ('methods',
+         'inline_method_implementations',
+         'event_method_implementations',
+         'multi_method_implementation',
+         'unary_unary_messages_sequences',
+         'unary_stream_messages_sequences',
+         'stream_unary_messages_sequences',
+         'stream_stream_messages_sequences',))):
+  """A transformation of a service.TestService.
+
+  Attributes:
+    methods: A dict from method group-name pair to test_interfaces.Method object
+      describing the RPC methods that may be called during the test.
+    inline_method_implementations: A dict from method group-name pair to
+      face.MethodImplementation object to be used in tests of in-line calls to
+      behaviors under test.
+    event_method_implementations: A dict from method group-name pair to
+      face.MethodImplementation object to be used in tests of event-driven calls
+      to behaviors under test.
+    multi_method_implementation: A face.MultiMethodImplementation to be used in
+      tests of generic calls to behaviors under test.
+    unary_unary_messages_sequences: A dict from method group-name pair to
+      sequence of service.UnaryUnaryTestMessages objects to be used to test the
+      identified method.
+    unary_stream_messages_sequences: A dict from method group-name pair to
+      sequence of service.UnaryStreamTestMessages objects to be used to test the
+      identified method.
+    stream_unary_messages_sequences: A dict from method group-name pair to
+      sequence of service.StreamUnaryTestMessages objects to be used to test the
+      identified method.
+    stream_stream_messages_sequences: A dict from method group-name pair to
+      sequence of service.StreamStreamTestMessages objects to be used to test
+      the identified method.
+  """
+
+
+class _BufferingConsumer(stream.Consumer):
+  """A trivial Consumer that dumps what it consumes in a user-mutable buffer."""
+
+  def __init__(self):
+    self.consumed = []
+    self.terminated = False
+
+  def consume(self, value):
+    self.consumed.append(value)
+
+  def terminate(self):
+    self.terminated = True
+
+  def consume_and_terminate(self, value):
+    self.consumed.append(value)
+    self.terminated = True
+
+
+class _InlineUnaryUnaryMethod(face.MethodImplementation):
+
+  def __init__(self, unary_unary_test_method, control):
+    self._test_method = unary_unary_test_method
+    self._control = control
+
+    self.cardinality = cardinality.Cardinality.UNARY_UNARY
+    self.style = style.Service.INLINE
+
+  def unary_unary_inline(self, request, context):
+    response_list = []
+    self._test_method.service(
+        request, response_list.append, context, self._control)
+    return response_list.pop(0)
+
+
+class _EventUnaryUnaryMethod(face.MethodImplementation):
+
+  def __init__(self, unary_unary_test_method, control, pool):
+    self._test_method = unary_unary_test_method
+    self._control = control
+    self._pool = pool
+
+    self.cardinality = cardinality.Cardinality.UNARY_UNARY
+    self.style = style.Service.EVENT
+
+  def unary_unary_event(self, request, response_callback, context):
+    if self._pool is None:
+      self._test_method.service(
+          request, response_callback, context, self._control)
+    else:
+      self._pool.submit(
+          self._test_method.service, request, response_callback, context,
+          self._control)
+
+
+class _InlineUnaryStreamMethod(face.MethodImplementation):
+
+  def __init__(self, unary_stream_test_method, control):
+    self._test_method = unary_stream_test_method
+    self._control = control
+
+    self.cardinality = cardinality.Cardinality.UNARY_STREAM
+    self.style = style.Service.INLINE
+
+  def unary_stream_inline(self, request, context):
+    response_consumer = _BufferingConsumer()
+    self._test_method.service(
+        request, response_consumer, context, self._control)
+    for response in response_consumer.consumed:
+      yield response
+
+
+class _EventUnaryStreamMethod(face.MethodImplementation):
+
+  def __init__(self, unary_stream_test_method, control, pool):
+    self._test_method = unary_stream_test_method
+    self._control = control
+    self._pool = pool
+
+    self.cardinality = cardinality.Cardinality.UNARY_STREAM
+    self.style = style.Service.EVENT
+
+  def unary_stream_event(self, request, response_consumer, context):
+    if self._pool is None:
+      self._test_method.service(
+          request, response_consumer, context, self._control)
+    else:
+      self._pool.submit(
+          self._test_method.service, request, response_consumer, context,
+          self._control)
+
+
+class _InlineStreamUnaryMethod(face.MethodImplementation):
+
+  def __init__(self, stream_unary_test_method, control):
+    self._test_method = stream_unary_test_method
+    self._control = control
+
+    self.cardinality = cardinality.Cardinality.STREAM_UNARY
+    self.style = style.Service.INLINE
+
+  def stream_unary_inline(self, request_iterator, context):
+    response_list = []
+    request_consumer = self._test_method.service(
+        response_list.append, context, self._control)
+    for request in request_iterator:
+      request_consumer.consume(request)
+    request_consumer.terminate()
+    return response_list.pop(0)
+
+
+class _EventStreamUnaryMethod(face.MethodImplementation):
+
+  def __init__(self, stream_unary_test_method, control, pool):
+    self._test_method = stream_unary_test_method
+    self._control = control
+    self._pool = pool
+
+    self.cardinality = cardinality.Cardinality.STREAM_UNARY
+    self.style = style.Service.EVENT
+
+  def stream_unary_event(self, response_callback, context):
+    request_consumer = self._test_method.service(
+        response_callback, context, self._control)
+    if self._pool is None:
+      return request_consumer
+    else:
+      return stream_util.ThreadSwitchingConsumer(request_consumer, self._pool)
+
+
+class _InlineStreamStreamMethod(face.MethodImplementation):
+
+  def __init__(self, stream_stream_test_method, control):
+    self._test_method = stream_stream_test_method
+    self._control = control
+
+    self.cardinality = cardinality.Cardinality.STREAM_STREAM
+    self.style = style.Service.INLINE
+
+  def stream_stream_inline(self, request_iterator, context):
+    response_consumer = _BufferingConsumer()
+    request_consumer = self._test_method.service(
+        response_consumer, context, self._control)
+
+    for request in request_iterator:
+      request_consumer.consume(request)
+      while response_consumer.consumed:
+        yield response_consumer.consumed.pop(0)
+    response_consumer.terminate()
+
+
+class _EventStreamStreamMethod(face.MethodImplementation):
+
+  def __init__(self, stream_stream_test_method, control, pool):
+    self._test_method = stream_stream_test_method
+    self._control = control
+    self._pool = pool
+
+    self.cardinality = cardinality.Cardinality.STREAM_STREAM
+    self.style = style.Service.EVENT
+
+  def stream_stream_event(self, response_consumer, context):
+    request_consumer = self._test_method.service(
+        response_consumer, context, self._control)
+    if self._pool is None:
+      return request_consumer
+    else:
+      return stream_util.ThreadSwitchingConsumer(request_consumer, self._pool)
+
+
+class _UnaryConsumer(stream.Consumer):
+  """A Consumer that only allows consumption of exactly one value."""
+
+  def __init__(self, action):
+    self._lock = threading.Lock()
+    self._action = action
+    self._consumed = False
+    self._terminated = False
+
+  def consume(self, value):
+    with self._lock:
+      if self._consumed:
+        raise ValueError('Unary consumer already consumed!')
+      elif self._terminated:
+        raise ValueError('Unary consumer already terminated!')
+      else:
+        self._consumed = True
+
+    self._action(value)
+
+  def terminate(self):
+    with self._lock:
+      if not self._consumed:
+        raise ValueError('Unary consumer hasn\'t yet consumed!')
+      elif self._terminated:
+        raise ValueError('Unary consumer already terminated!')
+      else:
+        self._terminated = True
+
+  def consume_and_terminate(self, value):
+    with self._lock:
+      if self._consumed:
+        raise ValueError('Unary consumer already consumed!')
+      elif self._terminated:
+        raise ValueError('Unary consumer already terminated!')
+      else:
+        self._consumed = True
+        self._terminated = True
+
+    self._action(value)
+
+
+class _UnaryUnaryAdaptation(object):
+
+  def __init__(self, unary_unary_test_method):
+    self._method = unary_unary_test_method
+
+  def service(self, response_consumer, context, control):
+    def action(request):
+      self._method.service(
+          request, response_consumer.consume_and_terminate, context, control)
+    return _UnaryConsumer(action)
+
+
+class _UnaryStreamAdaptation(object):
+
+  def __init__(self, unary_stream_test_method):
+    self._method = unary_stream_test_method
+
+  def service(self, response_consumer, context, control):
+    def action(request):
+      self._method.service(request, response_consumer, context, control)
+    return _UnaryConsumer(action)
+
+
+class _StreamUnaryAdaptation(object):
+
+  def __init__(self, stream_unary_test_method):
+    self._method = stream_unary_test_method
+
+  def service(self, response_consumer, context, control):
+    return self._method.service(
+        response_consumer.consume_and_terminate, context, control)
+
+
+class _MultiMethodImplementation(face.MultiMethodImplementation):
+
+  def __init__(self, methods, control, pool):
+    self._methods = methods
+    self._control = control
+    self._pool = pool
+
+  def service(self, group, name, response_consumer, context):
+    method = self._methods.get(group, name, None)
+    if method is None:
+      raise face.NoSuchMethodError(group, name)
+    elif self._pool is None:
+      return method(response_consumer, context, self._control)
+    else:
+      request_consumer = method(response_consumer, context, self._control)
+      return stream_util.ThreadSwitchingConsumer(request_consumer, self._pool)
+
+
+class _Assembly(
+    collections.namedtuple(
+        '_Assembly',
+        ['methods', 'inlines', 'events', 'adaptations', 'messages'])):
+  """An intermediate structure created when creating a TestServiceDigest."""
+
+
+def _assemble(
+    scenarios, identifiers, inline_method_constructor, event_method_constructor,
+    adapter, control, pool):
+  """Creates an _Assembly from the given scenarios."""
+  methods = {}
+  inlines = {}
+  events = {}
+  adaptations = {}
+  messages = {}
+  for identifier, scenario in scenarios.iteritems():
+    if identifier in identifiers:
+      raise ValueError('Repeated identifier "(%s, %s)"!' % identifier)
+
+    test_method = scenario[0]
+    inline_method = inline_method_constructor(test_method, control)
+    event_method = event_method_constructor(test_method, control, pool)
+    adaptation = adapter(test_method)
+
+    methods[identifier] = test_method
+    inlines[identifier] = inline_method
+    events[identifier] = event_method
+    adaptations[identifier] = adaptation
+    messages[identifier] = scenario[1]
+
+  return _Assembly(methods, inlines, events, adaptations, messages)
+
+
+def digest(service, control, pool):
+  """Creates a TestServiceDigest from a TestService.
+
+  Args:
+    service: A _service.TestService.
+    control: A test_control.Control.
+    pool: If RPC methods should be serviced in a separate thread, a thread pool.
+      None if RPC methods should be serviced in the thread belonging to the
+      run-time that calls for their service.
+
+  Returns:
+    A TestServiceDigest synthesized from the given service.TestService.
+  """
+  identifiers = set()
+
+  unary_unary = _assemble(
+      service.unary_unary_scenarios(), identifiers, _InlineUnaryUnaryMethod,
+      _EventUnaryUnaryMethod, _UnaryUnaryAdaptation, control, pool)
+  identifiers.update(unary_unary.inlines)
+
+  unary_stream = _assemble(
+      service.unary_stream_scenarios(), identifiers, _InlineUnaryStreamMethod,
+      _EventUnaryStreamMethod, _UnaryStreamAdaptation, control, pool)
+  identifiers.update(unary_stream.inlines)
+
+  stream_unary = _assemble(
+      service.stream_unary_scenarios(), identifiers, _InlineStreamUnaryMethod,
+      _EventStreamUnaryMethod, _StreamUnaryAdaptation, control, pool)
+  identifiers.update(stream_unary.inlines)
+
+  stream_stream = _assemble(
+      service.stream_stream_scenarios(), identifiers, _InlineStreamStreamMethod,
+      _EventStreamStreamMethod, _IDENTITY, control, pool)
+  identifiers.update(stream_stream.inlines)
+
+  methods = dict(unary_unary.methods)
+  methods.update(unary_stream.methods)
+  methods.update(stream_unary.methods)
+  methods.update(stream_stream.methods)
+  adaptations = dict(unary_unary.adaptations)
+  adaptations.update(unary_stream.adaptations)
+  adaptations.update(stream_unary.adaptations)
+  adaptations.update(stream_stream.adaptations)
+  inlines = dict(unary_unary.inlines)
+  inlines.update(unary_stream.inlines)
+  inlines.update(stream_unary.inlines)
+  inlines.update(stream_stream.inlines)
+  events = dict(unary_unary.events)
+  events.update(unary_stream.events)
+  events.update(stream_unary.events)
+  events.update(stream_stream.events)
+
+  return TestServiceDigest(
+      methods,
+      inlines,
+      events,
+      _MultiMethodImplementation(adaptations, control, pool),
+      unary_unary.messages,
+      unary_stream.messages,
+      stream_unary.messages,
+      stream_stream.messages)
diff --git a/src/python/grpcio_test/grpc_test/framework/interfaces/face/_event_invocation_synchronous_event_service.py b/src/python/grpcio_test/grpc_test/framework/interfaces/face/_event_invocation_synchronous_event_service.py
new file mode 100644
index 0000000..ea5cdea
--- /dev/null
+++ b/src/python/grpcio_test/grpc_test/framework/interfaces/face/_event_invocation_synchronous_event_service.py
@@ -0,0 +1,377 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Test code for the Face layer of RPC Framework."""
+
+import abc
+import unittest
+
+# test_interfaces is referenced from specification in this module.
+from grpc.framework.interfaces.face import face
+from grpc_test.framework.common import test_constants
+from grpc_test.framework.common import test_control
+from grpc_test.framework.common import test_coverage
+from grpc_test.framework.interfaces.face import _digest
+from grpc_test.framework.interfaces.face import _receiver
+from grpc_test.framework.interfaces.face import _stock_service
+from grpc_test.framework.interfaces.face import test_interfaces  # pylint: disable=unused-import
+
+
+class TestCase(test_coverage.Coverage, unittest.TestCase):
+  """A test of the Face layer of RPC Framework.
+
+  Concrete subclasses must have an "implementation" attribute of type
+  test_interfaces.Implementation and an "invoker_constructor" attribute of type
+  _invocation.InvokerConstructor.
+  """
+  __metaclass__ = abc.ABCMeta
+
+  NAME = 'EventInvocationSynchronousEventServiceTest'
+
+  def setUp(self):
+    """See unittest.TestCase.setUp for full specification.
+
+    Overriding implementations must call this implementation.
+    """
+    self._control = test_control.PauseFailControl()
+    self._digest = _digest.digest(
+        _stock_service.STOCK_TEST_SERVICE, self._control, None)
+
+    generic_stub, dynamic_stubs, self._memo = self.implementation.instantiate(
+        self._digest.methods, self._digest.event_method_implementations, None)
+    self._invoker = self.invoker_constructor.construct_invoker(
+        generic_stub, dynamic_stubs, self._digest.methods)
+
+  def tearDown(self):
+    """See unittest.TestCase.tearDown for full specification.
+
+    Overriding implementations must call this implementation.
+    """
+    self.implementation.destantiate(self._memo)
+
+  def testSuccessfulUnaryRequestUnaryResponse(self):
+    for (group, method), test_messages_sequence in (
+        self._digest.unary_unary_messages_sequences.iteritems()):
+      for test_messages in test_messages_sequence:
+        request = test_messages.request()
+        receiver = _receiver.Receiver()
+
+        self._invoker.event(group, method)(
+            request, receiver, receiver.abort, test_constants.LONG_TIMEOUT)
+        receiver.block_until_terminated()
+        response = receiver.unary_response()
+
+        test_messages.verify(request, response, self)
+
+  def testSuccessfulUnaryRequestStreamResponse(self):
+    for (group, method), test_messages_sequence in (
+        self._digest.unary_stream_messages_sequences.iteritems()):
+      for test_messages in test_messages_sequence:
+        request = test_messages.request()
+        receiver = _receiver.Receiver()
+
+        self._invoker.event(group, method)(
+            request, receiver, receiver.abort, test_constants.LONG_TIMEOUT)
+        receiver.block_until_terminated()
+        responses = receiver.stream_responses()
+
+        test_messages.verify(request, responses, self)
+
+  def testSuccessfulStreamRequestUnaryResponse(self):
+    for (group, method), test_messages_sequence in (
+        self._digest.stream_unary_messages_sequences.iteritems()):
+      for test_messages in test_messages_sequence:
+        requests = test_messages.requests()
+        receiver = _receiver.Receiver()
+
+        call_consumer = self._invoker.event(group, method)(
+            receiver, receiver.abort, test_constants.LONG_TIMEOUT)
+        for request in requests:
+          call_consumer.consume(request)
+        call_consumer.terminate()
+        receiver.block_until_terminated()
+        response = receiver.unary_response()
+
+        test_messages.verify(requests, response, self)
+
+  def testSuccessfulStreamRequestStreamResponse(self):
+    for (group, method), test_messages_sequence in (
+        self._digest.stream_stream_messages_sequences.iteritems()):
+      for test_messages in test_messages_sequence:
+        requests = test_messages.requests()
+        receiver = _receiver.Receiver()
+
+        call_consumer = self._invoker.event(group, method)(
+            receiver, receiver.abort, test_constants.LONG_TIMEOUT)
+        for request in requests:
+          call_consumer.consume(request)
+        call_consumer.terminate()
+        receiver.block_until_terminated()
+        responses = receiver.stream_responses()
+
+        test_messages.verify(requests, responses, self)
+
+  def testSequentialInvocations(self):
+    # pylint: disable=cell-var-from-loop
+    for (group, method), test_messages_sequence in (
+        self._digest.unary_unary_messages_sequences.iteritems()):
+      for test_messages in test_messages_sequence:
+        first_request = test_messages.request()
+        second_request = test_messages.request()
+        second_receiver = _receiver.Receiver()
+
+        def make_second_invocation():
+          self._invoker.event(group, method)(
+              second_request, second_receiver, second_receiver.abort,
+              test_constants.LONG_TIMEOUT)
+
+        class FirstReceiver(_receiver.Receiver):
+
+          def complete(self, terminal_metadata, code, details):
+            super(FirstReceiver, self).complete(
+                terminal_metadata, code, details)
+            make_second_invocation()
+
+        first_receiver = FirstReceiver()
+
+        self._invoker.event(group, method)(
+            first_request, first_receiver, first_receiver.abort,
+            test_constants.LONG_TIMEOUT)
+        second_receiver.block_until_terminated()
+
+        first_response = first_receiver.unary_response()
+        second_response = second_receiver.unary_response()
+        test_messages.verify(first_request, first_response, self)
+        test_messages.verify(second_request, second_response, self)
+
+  def testParallelInvocations(self):
+    for (group, method), test_messages_sequence in (
+        self._digest.unary_unary_messages_sequences.iteritems()):
+      for test_messages in test_messages_sequence:
+        first_request = test_messages.request()
+        first_receiver = _receiver.Receiver()
+        second_request = test_messages.request()
+        second_receiver = _receiver.Receiver()
+
+        self._invoker.event(group, method)(
+            first_request, first_receiver, first_receiver.abort,
+            test_constants.LONG_TIMEOUT)
+        self._invoker.event(group, method)(
+            second_request, second_receiver, second_receiver.abort,
+            test_constants.LONG_TIMEOUT)
+        first_receiver.block_until_terminated()
+        second_receiver.block_until_terminated()
+
+        first_response = first_receiver.unary_response()
+        second_response = second_receiver.unary_response()
+        test_messages.verify(first_request, first_response, self)
+        test_messages.verify(second_request, second_response, self)
+
+  @unittest.skip('TODO(nathaniel): implement.')
+  def testWaitingForSomeButNotAllParallelInvocations(self):
+    raise NotImplementedError()
+
+  def testCancelledUnaryRequestUnaryResponse(self):
+    for (group, method), test_messages_sequence in (
+        self._digest.unary_unary_messages_sequences.iteritems()):
+      for test_messages in test_messages_sequence:
+        request = test_messages.request()
+        receiver = _receiver.Receiver()
+
+        with self._control.pause():
+          call = self._invoker.event(group, method)(
+              request, receiver, receiver.abort, test_constants.LONG_TIMEOUT)
+          call.cancel()
+          receiver.block_until_terminated()
+
+        self.assertIs(face.Abortion.Kind.CANCELLED, receiver.abortion().kind)
+
+  def testCancelledUnaryRequestStreamResponse(self):
+    for (group, method), test_messages_sequence in (
+        self._digest.unary_stream_messages_sequences.iteritems()):
+      for test_messages in test_messages_sequence:
+        request = test_messages.request()
+        receiver = _receiver.Receiver()
+
+        call = self._invoker.event(group, method)(
+            request, receiver, receiver.abort, test_constants.LONG_TIMEOUT)
+        call.cancel()
+        receiver.block_until_terminated()
+
+        self.assertIs(face.Abortion.Kind.CANCELLED, receiver.abortion().kind)
+
+  def testCancelledStreamRequestUnaryResponse(self):
+    for (group, method), test_messages_sequence in (
+        self._digest.stream_unary_messages_sequences.iteritems()):
+      for test_messages in test_messages_sequence:
+        requests = test_messages.requests()
+        receiver = _receiver.Receiver()
+
+        call_consumer = self._invoker.event(group, method)(
+            receiver, receiver.abort, test_constants.LONG_TIMEOUT)
+        for request in requests:
+          call_consumer.consume(request)
+        call_consumer.cancel()
+        receiver.block_until_terminated()
+
+        self.assertIs(face.Abortion.Kind.CANCELLED, receiver.abortion().kind)
+
+  def testCancelledStreamRequestStreamResponse(self):
+    for (group, method), test_messages_sequence in (
+        self._digest.stream_stream_messages_sequences.iteritems()):
+      for unused_test_messages in test_messages_sequence:
+        receiver = _receiver.Receiver()
+
+        call_consumer = self._invoker.event(group, method)(
+            receiver, receiver.abort, test_constants.LONG_TIMEOUT)
+        call_consumer.cancel()
+        receiver.block_until_terminated()
+
+        self.assertIs(face.Abortion.Kind.CANCELLED, receiver.abortion().kind)
+
+  def testExpiredUnaryRequestUnaryResponse(self):
+    for (group, method), test_messages_sequence in (
+        self._digest.unary_unary_messages_sequences.iteritems()):
+      for test_messages in test_messages_sequence:
+        request = test_messages.request()
+        receiver = _receiver.Receiver()
+
+        with self._control.pause():
+          self._invoker.event(group, method)(
+              request, receiver, receiver.abort, test_constants.SHORT_TIMEOUT)
+          receiver.block_until_terminated()
+
+        self.assertIs(face.Abortion.Kind.EXPIRED, receiver.abortion().kind)
+
+  def testExpiredUnaryRequestStreamResponse(self):
+    for (group, method), test_messages_sequence in (
+        self._digest.unary_stream_messages_sequences.iteritems()):
+      for test_messages in test_messages_sequence:
+        request = test_messages.request()
+        receiver = _receiver.Receiver()
+
+        with self._control.pause():
+          self._invoker.event(group, method)(
+              request, receiver, receiver.abort, test_constants.SHORT_TIMEOUT)
+          receiver.block_until_terminated()
+
+        self.assertIs(face.Abortion.Kind.EXPIRED, receiver.abortion().kind)
+
+  def testExpiredStreamRequestUnaryResponse(self):
+    for (group, method), test_messages_sequence in (
+        self._digest.stream_unary_messages_sequences.iteritems()):
+      for unused_test_messages in test_messages_sequence:
+        receiver = _receiver.Receiver()
+
+        self._invoker.event(group, method)(
+            receiver, receiver.abort, test_constants.SHORT_TIMEOUT)
+        receiver.block_until_terminated()
+
+        self.assertIs(face.Abortion.Kind.EXPIRED, receiver.abortion().kind)
+
+  def testExpiredStreamRequestStreamResponse(self):
+    for (group, method), test_messages_sequence in (
+        self._digest.stream_stream_messages_sequences.iteritems()):
+      for test_messages in test_messages_sequence:
+        requests = test_messages.requests()
+        receiver = _receiver.Receiver()
+
+        call_consumer = self._invoker.event(group, method)(
+            receiver, receiver.abort, test_constants.SHORT_TIMEOUT)
+        for request in requests:
+          call_consumer.consume(request)
+        receiver.block_until_terminated()
+
+        self.assertIs(face.Abortion.Kind.EXPIRED, receiver.abortion().kind)
+
+  def testFailedUnaryRequestUnaryResponse(self):
+    for (group, method), test_messages_sequence in (
+        self._digest.unary_unary_messages_sequences.iteritems()):
+      for test_messages in test_messages_sequence:
+        request = test_messages.request()
+        receiver = _receiver.Receiver()
+
+        with self._control.fail():
+          self._invoker.event(group, method)(
+              request, receiver, receiver.abort, test_constants.LONG_TIMEOUT)
+          receiver.block_until_terminated()
+
+        self.assertIs(
+            face.Abortion.Kind.REMOTE_FAILURE, receiver.abortion().kind)
+
+  def testFailedUnaryRequestStreamResponse(self):
+    for (group, method), test_messages_sequence in (
+        self._digest.unary_stream_messages_sequences.iteritems()):
+      for test_messages in test_messages_sequence:
+        request = test_messages.request()
+        receiver = _receiver.Receiver()
+
+        with self._control.fail():
+          self._invoker.event(group, method)(
+              request, receiver, receiver.abort, test_constants.LONG_TIMEOUT)
+          receiver.block_until_terminated()
+
+        self.assertIs(
+            face.Abortion.Kind.REMOTE_FAILURE, receiver.abortion().kind)
+
+  def testFailedStreamRequestUnaryResponse(self):
+    for (group, method), test_messages_sequence in (
+        self._digest.stream_unary_messages_sequences.iteritems()):
+      for test_messages in test_messages_sequence:
+        requests = test_messages.requests()
+        receiver = _receiver.Receiver()
+
+        with self._control.fail():
+          call_consumer = self._invoker.event(group, method)(
+              receiver, receiver.abort, test_constants.LONG_TIMEOUT)
+          for request in requests:
+            call_consumer.consume(request)
+          call_consumer.terminate()
+          receiver.block_until_terminated()
+
+        self.assertIs(
+            face.Abortion.Kind.REMOTE_FAILURE, receiver.abortion().kind)
+
+  def testFailedStreamRequestStreamResponse(self):
+    for (group, method), test_messages_sequence in (
+        self._digest.stream_stream_messages_sequences.iteritems()):
+      for test_messages in test_messages_sequence:
+        requests = test_messages.requests()
+        receiver = _receiver.Receiver()
+
+        with self._control.fail():
+          call_consumer = self._invoker.event(group, method)(
+              receiver, receiver.abort, test_constants.LONG_TIMEOUT)
+          for request in requests:
+            call_consumer.consume(request)
+          call_consumer.terminate()
+          receiver.block_until_terminated()
+
+        self.assertIs(
+            face.Abortion.Kind.REMOTE_FAILURE, receiver.abortion().kind)
diff --git a/src/python/grpcio_test/grpc_test/framework/interfaces/face/_future_invocation_asynchronous_event_service.py b/src/python/grpcio_test/grpc_test/framework/interfaces/face/_future_invocation_asynchronous_event_service.py
new file mode 100644
index 0000000..a649362
--- /dev/null
+++ b/src/python/grpcio_test/grpc_test/framework/interfaces/face/_future_invocation_asynchronous_event_service.py
@@ -0,0 +1,378 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Test code for the Face layer of RPC Framework."""
+
+import abc
+import contextlib
+import threading
+import unittest
+
+# test_interfaces is referenced from specification in this module.
+from grpc.framework.foundation import logging_pool
+from grpc.framework.interfaces.face import face
+from grpc_test.framework.common import test_constants
+from grpc_test.framework.common import test_control
+from grpc_test.framework.common import test_coverage
+from grpc_test.framework.interfaces.face import _digest
+from grpc_test.framework.interfaces.face import _stock_service
+from grpc_test.framework.interfaces.face import test_interfaces  # pylint: disable=unused-import
+
+
+class _PauseableIterator(object):
+
+  def __init__(self, upstream):
+    self._upstream = upstream
+    self._condition = threading.Condition()
+    self._paused = False
+
+  @contextlib.contextmanager
+  def pause(self):
+    with self._condition:
+      self._paused = True
+    yield
+    with self._condition:
+      self._paused = False
+      self._condition.notify_all()
+
+  def __iter__(self):
+    return self
+
+  def next(self):
+    with self._condition:
+      while self._paused:
+        self._condition.wait()
+    return next(self._upstream)
+
+
+class TestCase(test_coverage.Coverage, unittest.TestCase):
+  """A test of the Face layer of RPC Framework.
+
+  Concrete subclasses must have an "implementation" attribute of type
+  test_interfaces.Implementation and an "invoker_constructor" attribute of type
+  _invocation.InvokerConstructor.
+  """
+  __metaclass__ = abc.ABCMeta
+
+  NAME = 'FutureInvocationAsynchronousEventServiceTest'
+
+  def setUp(self):
+    """See unittest.TestCase.setUp for full specification.
+
+    Overriding implementations must call this implementation.
+    """
+    self._control = test_control.PauseFailControl()
+    self._digest_pool = logging_pool.pool(test_constants.POOL_SIZE)
+    self._digest = _digest.digest(
+        _stock_service.STOCK_TEST_SERVICE, self._control, self._digest_pool)
+
+    generic_stub, dynamic_stubs, self._memo = self.implementation.instantiate(
+        self._digest.methods, self._digest.event_method_implementations, None)
+    self._invoker = self.invoker_constructor.construct_invoker(
+        generic_stub, dynamic_stubs, self._digest.methods)
+
+  def tearDown(self):
+    """See unittest.TestCase.tearDown for full specification.
+
+    Overriding implementations must call this implementation.
+    """
+    self.implementation.destantiate(self._memo)
+    self._digest_pool.shutdown(wait=True)
+
+  def testSuccessfulUnaryRequestUnaryResponse(self):
+    for (group, method), test_messages_sequence in (
+        self._digest.unary_unary_messages_sequences.iteritems()):
+      for test_messages in test_messages_sequence:
+        request = test_messages.request()
+
+        response_future = self._invoker.future(group, method)(
+            request, test_constants.LONG_TIMEOUT)
+        response = response_future.result()
+
+        test_messages.verify(request, response, self)
+
+  def testSuccessfulUnaryRequestStreamResponse(self):
+    for (group, method), test_messages_sequence in (
+        self._digest.unary_stream_messages_sequences.iteritems()):
+      for test_messages in test_messages_sequence:
+        request = test_messages.request()
+
+        response_iterator = self._invoker.future(group, method)(
+            request, test_constants.LONG_TIMEOUT)
+        responses = list(response_iterator)
+
+        test_messages.verify(request, responses, self)
+
+  def testSuccessfulStreamRequestUnaryResponse(self):
+    for (group, method), test_messages_sequence in (
+        self._digest.stream_unary_messages_sequences.iteritems()):
+      for test_messages in test_messages_sequence:
+        requests = test_messages.requests()
+        request_iterator = _PauseableIterator(iter(requests))
+
+        # Use of a paused iterator of requests allows us to test that control is
+        # returned to calling code before the iterator yields any requests.
+        with request_iterator.pause():
+          response_future = self._invoker.future(group, method)(
+              request_iterator, test_constants.LONG_TIMEOUT)
+        response = response_future.result()
+
+        test_messages.verify(requests, response, self)
+
+  def testSuccessfulStreamRequestStreamResponse(self):
+    for (group, method), test_messages_sequence in (
+        self._digest.stream_stream_messages_sequences.iteritems()):
+      for test_messages in test_messages_sequence:
+        requests = test_messages.requests()
+        request_iterator = _PauseableIterator(iter(requests))
+
+        # Use of a paused iterator of requests allows us to test that control is
+        # returned to calling code before the iterator yields any requests.
+        with request_iterator.pause():
+          response_iterator = self._invoker.future(group, method)(
+              request_iterator, test_constants.LONG_TIMEOUT)
+        responses = list(response_iterator)
+
+        test_messages.verify(requests, responses, self)
+
+  def testSequentialInvocations(self):
+    for (group, method), test_messages_sequence in (
+        self._digest.unary_unary_messages_sequences.iteritems()):
+      for test_messages in test_messages_sequence:
+        first_request = test_messages.request()
+        second_request = test_messages.request()
+
+        first_response_future = self._invoker.future(group, method)(
+            first_request, test_constants.LONG_TIMEOUT)
+        first_response = first_response_future.result()
+
+        test_messages.verify(first_request, first_response, self)
+
+        second_response_future = self._invoker.future(group, method)(
+            second_request, test_constants.LONG_TIMEOUT)
+        second_response = second_response_future.result()
+
+        test_messages.verify(second_request, second_response, self)
+
+  def testParallelInvocations(self):
+    for (group, method), test_messages_sequence in (
+        self._digest.unary_unary_messages_sequences.iteritems()):
+      for test_messages in test_messages_sequence:
+        first_request = test_messages.request()
+        second_request = test_messages.request()
+
+        first_response_future = self._invoker.future(group, method)(
+            first_request, test_constants.LONG_TIMEOUT)
+        second_response_future = self._invoker.future(group, method)(
+            second_request, test_constants.LONG_TIMEOUT)
+        first_response = first_response_future.result()
+        second_response = second_response_future.result()
+
+        test_messages.verify(first_request, first_response, self)
+        test_messages.verify(second_request, second_response, self)
+
+  @unittest.skip('TODO(nathaniel): implement.')
+  def testWaitingForSomeButNotAllParallelInvocations(self):
+    raise NotImplementedError()
+
+  def testCancelledUnaryRequestUnaryResponse(self):
+    for (group, method), test_messages_sequence in (
+        self._digest.unary_unary_messages_sequences.iteritems()):
+      for test_messages in test_messages_sequence:
+        request = test_messages.request()
+
+        with self._control.pause():
+          response_future = self._invoker.future(group, method)(
+              request, test_constants.LONG_TIMEOUT)
+          cancel_method_return_value = response_future.cancel()
+
+        self.assertFalse(cancel_method_return_value)
+        self.assertTrue(response_future.cancelled())
+
+  def testCancelledUnaryRequestStreamResponse(self):
+    for (group, method), test_messages_sequence in (
+        self._digest.unary_stream_messages_sequences.iteritems()):
+      for test_messages in test_messages_sequence:
+        request = test_messages.request()
+
+        with self._control.pause():
+          response_iterator = self._invoker.future(group, method)(
+              request, test_constants.LONG_TIMEOUT)
+          response_iterator.cancel()
+
+        with self.assertRaises(face.CancellationError):
+          next(response_iterator)
+
+  def testCancelledStreamRequestUnaryResponse(self):
+    for (group, method), test_messages_sequence in (
+        self._digest.stream_unary_messages_sequences.iteritems()):
+      for test_messages in test_messages_sequence:
+        requests = test_messages.requests()
+
+        with self._control.pause():
+          response_future = self._invoker.future(group, method)(
+              iter(requests), test_constants.LONG_TIMEOUT)
+          cancel_method_return_value = response_future.cancel()
+
+        self.assertFalse(cancel_method_return_value)
+        self.assertTrue(response_future.cancelled())
+
+  def testCancelledStreamRequestStreamResponse(self):
+    for (group, method), test_messages_sequence in (
+        self._digest.stream_stream_messages_sequences.iteritems()):
+      for test_messages in test_messages_sequence:
+        requests = test_messages.requests()
+
+        with self._control.pause():
+          response_iterator = self._invoker.future(group, method)(
+              iter(requests), test_constants.LONG_TIMEOUT)
+          response_iterator.cancel()
+
+        with self.assertRaises(face.CancellationError):
+          next(response_iterator)
+
+  def testExpiredUnaryRequestUnaryResponse(self):
+    for (group, method), test_messages_sequence in (
+        self._digest.unary_unary_messages_sequences.iteritems()):
+      for test_messages in test_messages_sequence:
+        request = test_messages.request()
+
+        with self._control.pause():
+          response_future = self._invoker.future(
+              group, method)(request, test_constants.SHORT_TIMEOUT)
+          self.assertIsInstance(
+              response_future.exception(), face.ExpirationError)
+          with self.assertRaises(face.ExpirationError):
+            response_future.result()
+
+  def testExpiredUnaryRequestStreamResponse(self):
+    for (group, method), test_messages_sequence in (
+        self._digest.unary_stream_messages_sequences.iteritems()):
+      for test_messages in test_messages_sequence:
+        request = test_messages.request()
+
+        with self._control.pause():
+          response_iterator = self._invoker.future(group, method)(
+              request, test_constants.SHORT_TIMEOUT)
+          with self.assertRaises(face.ExpirationError):
+            list(response_iterator)
+
+  def testExpiredStreamRequestUnaryResponse(self):
+    for (group, method), test_messages_sequence in (
+        self._digest.stream_unary_messages_sequences.iteritems()):
+      for test_messages in test_messages_sequence:
+        requests = test_messages.requests()
+
+        with self._control.pause():
+          response_future = self._invoker.future(group, method)(
+              iter(requests), test_constants.SHORT_TIMEOUT)
+          self.assertIsInstance(
+              response_future.exception(), face.ExpirationError)
+          with self.assertRaises(face.ExpirationError):
+            response_future.result()
+
+  def testExpiredStreamRequestStreamResponse(self):
+    for (group, method), test_messages_sequence in (
+        self._digest.stream_stream_messages_sequences.iteritems()):
+      for test_messages in test_messages_sequence:
+        requests = test_messages.requests()
+
+        with self._control.pause():
+          response_iterator = self._invoker.future(group, method)(
+              iter(requests), test_constants.SHORT_TIMEOUT)
+          with self.assertRaises(face.ExpirationError):
+            list(response_iterator)
+
+  def testFailedUnaryRequestUnaryResponse(self):
+    for (group, method), test_messages_sequence in (
+        self._digest.unary_unary_messages_sequences.iteritems()):
+      for test_messages in test_messages_sequence:
+        request = test_messages.request()
+
+        with self._control.fail():
+          response_future = self._invoker.future(group, method)(
+              request, test_constants.SHORT_TIMEOUT)
+
+          # Because the servicer fails outside of the thread from which the
+          # servicer-side runtime called into it its failure is
+          # indistinguishable from simply not having called its
+          # response_callback before the expiration of the RPC.
+          self.assertIsInstance(
+              response_future.exception(), face.ExpirationError)
+          with self.assertRaises(face.ExpirationError):
+            response_future.result()
+
+  def testFailedUnaryRequestStreamResponse(self):
+    for (group, method), test_messages_sequence in (
+        self._digest.unary_stream_messages_sequences.iteritems()):
+      for test_messages in test_messages_sequence:
+        request = test_messages.request()
+
+        # Because the servicer fails outside of the thread from which the
+        # servicer-side runtime called into it its failure is indistinguishable
+        # from simply not having called its response_consumer before the
+        # expiration of the RPC.
+        with self._control.fail(), self.assertRaises(face.ExpirationError):
+          response_iterator = self._invoker.future(group, method)(
+              request, test_constants.SHORT_TIMEOUT)
+          list(response_iterator)
+
+  def testFailedStreamRequestUnaryResponse(self):
+    for (group, method), test_messages_sequence in (
+        self._digest.stream_unary_messages_sequences.iteritems()):
+      for test_messages in test_messages_sequence:
+        requests = test_messages.requests()
+
+        with self._control.fail():
+          response_future = self._invoker.future(group, method)(
+              iter(requests), test_constants.SHORT_TIMEOUT)
+
+          # Because the servicer fails outside of the thread from which the
+          # servicer-side runtime called into it its failure is
+          # indistinguishable from simply not having called its
+          # response_callback before the expiration of the RPC.
+          self.assertIsInstance(
+              response_future.exception(), face.ExpirationError)
+          with self.assertRaises(face.ExpirationError):
+            response_future.result()
+
+  def testFailedStreamRequestStreamResponse(self):
+    for (group, method), test_messages_sequence in (
+        self._digest.stream_stream_messages_sequences.iteritems()):
+      for test_messages in test_messages_sequence:
+        requests = test_messages.requests()
+
+        # Because the servicer fails outside of the thread from which the
+        # servicer-side runtime called into it its failure is indistinguishable
+        # from simply not having called its response_consumer before the
+        # expiration of the RPC.
+        with self._control.fail(), self.assertRaises(face.ExpirationError):
+          response_iterator = self._invoker.future(group, method)(
+              iter(requests), test_constants.SHORT_TIMEOUT)
+          list(response_iterator)
diff --git a/src/python/grpcio_test/grpc_test/framework/interfaces/face/_invocation.py b/src/python/grpcio_test/grpc_test/framework/interfaces/face/_invocation.py
new file mode 100644
index 0000000..448e845
--- /dev/null
+++ b/src/python/grpcio_test/grpc_test/framework/interfaces/face/_invocation.py
@@ -0,0 +1,213 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Coverage across the Face layer's generic-to-dynamic range for invocation."""
+
+import abc
+
+from grpc.framework.common import cardinality
+
+_CARDINALITY_TO_GENERIC_BLOCKING_BEHAVIOR = {
+    cardinality.Cardinality.UNARY_UNARY: 'blocking_unary_unary',
+    cardinality.Cardinality.UNARY_STREAM: 'inline_unary_stream',
+    cardinality.Cardinality.STREAM_UNARY: 'blocking_stream_unary',
+    cardinality.Cardinality.STREAM_STREAM: 'inline_stream_stream',
+}
+
+_CARDINALITY_TO_GENERIC_FUTURE_BEHAVIOR = {
+    cardinality.Cardinality.UNARY_UNARY: 'future_unary_unary',
+    cardinality.Cardinality.UNARY_STREAM: 'inline_unary_stream',
+    cardinality.Cardinality.STREAM_UNARY: 'future_stream_unary',
+    cardinality.Cardinality.STREAM_STREAM: 'inline_stream_stream',
+}
+
+_CARDINALITY_TO_GENERIC_EVENT_BEHAVIOR = {
+    cardinality.Cardinality.UNARY_UNARY: 'event_unary_unary',
+    cardinality.Cardinality.UNARY_STREAM: 'event_unary_stream',
+    cardinality.Cardinality.STREAM_UNARY: 'event_stream_unary',
+    cardinality.Cardinality.STREAM_STREAM: 'event_stream_stream',
+}
+
+_CARDINALITY_TO_MULTI_CALLABLE_ATTRIBUTE = {
+    cardinality.Cardinality.UNARY_UNARY: 'unary_unary',
+    cardinality.Cardinality.UNARY_STREAM: 'unary_stream',
+    cardinality.Cardinality.STREAM_UNARY: 'stream_unary',
+    cardinality.Cardinality.STREAM_STREAM: 'stream_stream',
+}
+
+
+class Invoker(object):
+  """A type used to invoke test RPCs."""
+  __metaclass__ = abc.ABCMeta
+
+  @abc.abstractmethod
+  def blocking(self, group, name):
+    """Invokes an RPC with blocking control flow."""
+    raise NotImplementedError()
+
+  @abc.abstractmethod
+  def future(self, group, name):
+    """Invokes an RPC with future control flow."""
+    raise NotImplementedError()
+
+  @abc.abstractmethod
+  def event(self, group, name):
+    """Invokes an RPC with event control flow."""
+    raise NotImplementedError()
+
+
+class InvokerConstructor(object):
+  """A type used to create Invokers."""
+  __metaclass__ = abc.ABCMeta
+
+  @abc.abstractmethod
+  def name(self):
+    """Specifies the name of the Invoker constructed by this object."""
+    raise NotImplementedError()
+
+  @abc.abstractmethod
+  def construct_invoker(self, generic_stub, dynamic_stubs, methods):
+    """Constructs an Invoker for the given stubs and methods."""
+    raise NotImplementedError()
+
+
+class _GenericInvoker(Invoker):
+
+  def __init__(self, generic_stub, methods):
+    self._stub = generic_stub
+    self._methods = methods
+
+  def _behavior(self, group, name, cardinality_to_generic_method):
+    method_cardinality = self._methods[group, name].cardinality()
+    behavior = getattr(
+        self._stub, cardinality_to_generic_method[method_cardinality])
+    return lambda *args, **kwargs: behavior(group, name, *args, **kwargs)
+
+  def blocking(self, group, name):
+    return self._behavior(
+        group, name, _CARDINALITY_TO_GENERIC_BLOCKING_BEHAVIOR)
+
+  def future(self, group, name):
+    return self._behavior(group, name, _CARDINALITY_TO_GENERIC_FUTURE_BEHAVIOR)
+
+  def event(self, group, name):
+    return self._behavior(group, name, _CARDINALITY_TO_GENERIC_EVENT_BEHAVIOR)
+
+
+class _GenericInvokerConstructor(InvokerConstructor):
+
+  def name(self):
+    return 'GenericInvoker'
+
+  def construct_invoker(self, generic_stub, dynamic_stub, methods):
+    return _GenericInvoker(generic_stub, methods)
+
+
+class _MultiCallableInvoker(Invoker):
+
+  def __init__(self, generic_stub, methods):
+    self._stub = generic_stub
+    self._methods = methods
+
+  def _multi_callable(self, group, name):
+    method_cardinality = self._methods[group, name].cardinality()
+    behavior = getattr(
+        self._stub,
+        _CARDINALITY_TO_MULTI_CALLABLE_ATTRIBUTE[method_cardinality])
+    return behavior(group, name)
+
+  def blocking(self, group, name):
+    return self._multi_callable(group, name)
+
+  def future(self, group, name):
+    method_cardinality = self._methods[group, name].cardinality()
+    behavior = getattr(
+        self._stub,
+        _CARDINALITY_TO_MULTI_CALLABLE_ATTRIBUTE[method_cardinality])
+    if method_cardinality in (
+        cardinality.Cardinality.UNARY_UNARY,
+        cardinality.Cardinality.STREAM_UNARY):
+      return behavior(group, name).future
+    else:
+      return behavior(group, name)
+
+  def event(self, group, name):
+    return self._multi_callable(group, name).event
+
+
+class _MultiCallableInvokerConstructor(InvokerConstructor):
+
+  def name(self):
+    return 'MultiCallableInvoker'
+
+  def construct_invoker(self, generic_stub, dynamic_stub, methods):
+    return _MultiCallableInvoker(generic_stub, methods)
+
+
+class _DynamicInvoker(Invoker):
+
+  def __init__(self, dynamic_stubs, methods):
+    self._stubs = dynamic_stubs
+    self._methods = methods
+
+  def blocking(self, group, name):
+    return getattr(self._stubs[group], name)
+
+  def future(self, group, name):
+    if self._methods[group, name].cardinality() in (
+        cardinality.Cardinality.UNARY_UNARY,
+        cardinality.Cardinality.STREAM_UNARY):
+      return getattr(self._stubs[group], name).future
+    else:
+      return getattr(self._stubs[group], name)
+
+  def event(self, group, name):
+    return getattr(self._stubs[group], name).event
+
+
+class _DynamicInvokerConstructor(InvokerConstructor):
+
+  def name(self):
+    return 'DynamicInvoker'
+
+  def construct_invoker(self, generic_stub, dynamic_stubs, methods):
+    return _DynamicInvoker(dynamic_stubs, methods)
+
+
+def invoker_constructors():
+  """Creates a sequence of InvokerConstructors to use in tests of RPCs.
+
+  Returns:
+    A sequence of InvokerConstructors.
+  """
+  return (
+      _GenericInvokerConstructor(),
+      _MultiCallableInvokerConstructor(),
+      _DynamicInvokerConstructor(),
+  )
diff --git a/src/python/grpcio_test/grpc_test/framework/interfaces/face/_receiver.py b/src/python/grpcio_test/grpc_test/framework/interfaces/face/_receiver.py
new file mode 100644
index 0000000..2e444ff
--- /dev/null
+++ b/src/python/grpcio_test/grpc_test/framework/interfaces/face/_receiver.py
@@ -0,0 +1,95 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""A utility useful in tests of asynchronous, event-driven interfaces."""
+
+import threading
+
+from grpc.framework.interfaces.face import face
+
+
+class Receiver(face.ResponseReceiver):
+  """A utility object useful in tests of asynchronous code."""
+
+  def __init__(self):
+    self._condition = threading.Condition()
+    self._initial_metadata = None
+    self._responses = []
+    self._terminal_metadata = None
+    self._code = None
+    self._details = None
+    self._completed = False
+    self._abortion = None
+
+  def abort(self, abortion):
+    with self._condition:
+      self._abortion = abortion
+      self._condition.notify_all()
+
+  def initial_metadata(self, initial_metadata):
+    with self._condition:
+      self._initial_metadata = initial_metadata
+
+  def response(self, response):
+    with self._condition:
+      self._responses.append(response)
+
+  def complete(self, terminal_metadata, code, details):
+    with self._condition:
+      self._terminal_metadata = terminal_metadata
+      self._code = code
+      self._details = details
+      self._completed = True
+      self._condition.notify_all()
+
+  def block_until_terminated(self):
+    with self._condition:
+      while self._abortion is None and not self._completed:
+        self._condition.wait()
+
+  def unary_response(self):
+    with self._condition:
+      if self._abortion is not None:
+        raise AssertionError('Aborted with abortion "%s"!' % self._abortion)
+      elif len(self._responses) != 1:
+        raise AssertionError(
+            '%d responses received, not exactly one!', len(self._responses))
+      else:
+        return self._responses[0]
+
+  def stream_responses(self):
+    with self._condition:
+      if self._abortion is None:
+        return list(self._responses)
+      else:
+        raise AssertionError('Aborted with abortion "%s"!' % self._abortion)
+
+  def abortion(self):
+    with self._condition:
+      return self._abortion
diff --git a/src/python/grpcio_test/grpc_test/framework/interfaces/face/_service.py b/src/python/grpcio_test/grpc_test/framework/interfaces/face/_service.py
new file mode 100644
index 0000000..e25b8a0
--- /dev/null
+++ b/src/python/grpcio_test/grpc_test/framework/interfaces/face/_service.py
@@ -0,0 +1,332 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Private interfaces implemented by data sets used in Face-layer tests."""
+
+import abc
+
+# face is referenced from specification in this module.
+from grpc.framework.interfaces.face import face  # pylint: disable=unused-import
+from grpc_test.framework.interfaces.face import test_interfaces
+
+
+class UnaryUnaryTestMethodImplementation(test_interfaces.Method):
+  """A controllable implementation of a unary-unary method."""
+
+  __metaclass__ = abc.ABCMeta
+
+  @abc.abstractmethod
+  def service(self, request, response_callback, context, control):
+    """Services an RPC that accepts one message and produces one message.
+
+    Args:
+      request: The single request message for the RPC.
+      response_callback: A callback to be called to accept the response message
+        of the RPC.
+      context: An face.ServicerContext object.
+      control: A test_control.Control to control execution of this method.
+
+    Raises:
+      abandonment.Abandoned: May or may not be raised when the RPC has been
+        aborted.
+    """
+    raise NotImplementedError()
+
+
+class UnaryUnaryTestMessages(object):
+  """A type for unary-request-unary-response message pairings."""
+
+  __metaclass__ = abc.ABCMeta
+
+  @abc.abstractmethod
+  def request(self):
+    """Affords a request message.
+
+    Implementations of this method should return a different message with each
+    call so that multiple test executions of the test method may be made with
+    different inputs.
+
+    Returns:
+      A request message.
+    """
+    raise NotImplementedError()
+
+  @abc.abstractmethod
+  def verify(self, request, response, test_case):
+    """Verifies that the computed response matches the given request.
+
+    Args:
+      request: A request message.
+      response: A response message.
+      test_case: A unittest.TestCase object affording useful assertion methods.
+
+    Raises:
+      AssertionError: If the request and response do not match, indicating that
+        there was some problem executing the RPC under test.
+    """
+    raise NotImplementedError()
+
+
+class UnaryStreamTestMethodImplementation(test_interfaces.Method):
+  """A controllable implementation of a unary-stream method."""
+
+  __metaclass__ = abc.ABCMeta
+
+  @abc.abstractmethod
+  def service(self, request, response_consumer, context, control):
+    """Services an RPC that takes one message and produces a stream of messages.
+
+    Args:
+      request: The single request message for the RPC.
+      response_consumer: A stream.Consumer to be called to accept the response
+        messages of the RPC.
+      context: A face.ServicerContext object.
+      control: A test_control.Control to control execution of this method.
+
+    Raises:
+      abandonment.Abandoned: May or may not be raised when the RPC has been
+        aborted.
+    """
+    raise NotImplementedError()
+
+
+class UnaryStreamTestMessages(object):
+  """A type for unary-request-stream-response message pairings."""
+
+  __metaclass__ = abc.ABCMeta
+
+  @abc.abstractmethod
+  def request(self):
+    """Affords a request message.
+
+    Implementations of this method should return a different message with each
+    call so that multiple test executions of the test method may be made with
+    different inputs.
+
+    Returns:
+      A request message.
+    """
+    raise NotImplementedError()
+
+  @abc.abstractmethod
+  def verify(self, request, responses, test_case):
+    """Verifies that the computed responses match the given request.
+
+    Args:
+      request: A request message.
+      responses: A sequence of response messages.
+      test_case: A unittest.TestCase object affording useful assertion methods.
+
+    Raises:
+      AssertionError: If the request and responses do not match, indicating that
+        there was some problem executing the RPC under test.
+    """
+    raise NotImplementedError()
+
+
+class StreamUnaryTestMethodImplementation(test_interfaces.Method):
+  """A controllable implementation of a stream-unary method."""
+
+  __metaclass__ = abc.ABCMeta
+
+  @abc.abstractmethod
+  def service(self, response_callback, context, control):
+    """Services an RPC that takes a stream of messages and produces one message.
+
+    Args:
+      response_callback: A callback to be called to accept the response message
+        of the RPC.
+      context: A face.ServicerContext object.
+      control: A test_control.Control to control execution of this method.
+
+    Returns:
+      A stream.Consumer with which to accept the request messages of the RPC.
+        The consumer returned from this method may or may not be invoked to
+        completion: in the case of RPC abortion, RPC Framework will simply stop
+        passing messages to this object. Implementations must not assume that
+        this object will be called to completion of the request stream or even
+        called at all.
+
+    Raises:
+      abandonment.Abandoned: May or may not be raised when the RPC has been
+        aborted.
+    """
+    raise NotImplementedError()
+
+
+class StreamUnaryTestMessages(object):
+  """A type for stream-request-unary-response message pairings."""
+
+  __metaclass__ = abc.ABCMeta
+
+  @abc.abstractmethod
+  def requests(self):
+    """Affords a sequence of request messages.
+
+    Implementations of this method should return a different sequences with each
+    call so that multiple test executions of the test method may be made with
+    different inputs.
+
+    Returns:
+      A sequence of request messages.
+    """
+    raise NotImplementedError()
+
+  @abc.abstractmethod
+  def verify(self, requests, response, test_case):
+    """Verifies that the computed response matches the given requests.
+
+    Args:
+      requests: A sequence of request messages.
+      response: A response message.
+      test_case: A unittest.TestCase object affording useful assertion methods.
+
+    Raises:
+      AssertionError: If the requests and response do not match, indicating that
+        there was some problem executing the RPC under test.
+    """
+    raise NotImplementedError()
+
+
+class StreamStreamTestMethodImplementation(test_interfaces.Method):
+  """A controllable implementation of a stream-stream method."""
+
+  __metaclass__ = abc.ABCMeta
+
+  @abc.abstractmethod
+  def service(self, response_consumer, context, control):
+    """Services an RPC that accepts and produces streams of messages.
+
+    Args:
+      response_consumer: A stream.Consumer to be called to accept the response
+        messages of the RPC.
+      context: A face.ServicerContext object.
+      control: A test_control.Control to control execution of this method.
+
+    Returns:
+      A stream.Consumer with which to accept the request messages of the RPC.
+        The consumer returned from this method may or may not be invoked to
+        completion: in the case of RPC abortion, RPC Framework will simply stop
+        passing messages to this object. Implementations must not assume that
+        this object will be called to completion of the request stream or even
+        called at all.
+
+    Raises:
+      abandonment.Abandoned: May or may not be raised when the RPC has been
+        aborted.
+    """
+    raise NotImplementedError()
+
+
+class StreamStreamTestMessages(object):
+  """A type for stream-request-stream-response message pairings."""
+
+  __metaclass__ = abc.ABCMeta
+
+  @abc.abstractmethod
+  def requests(self):
+    """Affords a sequence of request messages.
+
+    Implementations of this method should return a different sequences with each
+    call so that multiple test executions of the test method may be made with
+    different inputs.
+
+    Returns:
+      A sequence of request messages.
+    """
+    raise NotImplementedError()
+
+  @abc.abstractmethod
+  def verify(self, requests, responses, test_case):
+    """Verifies that the computed response matches the given requests.
+
+    Args:
+      requests: A sequence of request messages.
+      responses: A sequence of response messages.
+      test_case: A unittest.TestCase object affording useful assertion methods.
+
+    Raises:
+      AssertionError: If the requests and responses do not match, indicating
+        that there was some problem executing the RPC under test.
+    """
+    raise NotImplementedError()
+
+
+class TestService(object):
+  """A specification of implemented methods to use in tests."""
+
+  __metaclass__ = abc.ABCMeta
+
+  @abc.abstractmethod
+  def unary_unary_scenarios(self):
+    """Affords unary-request-unary-response test methods and their messages.
+
+    Returns:
+      A dict from method group-name pair to implementation/messages pair. The
+        first element of the pair is a UnaryUnaryTestMethodImplementation object
+        and the second element is a sequence of UnaryUnaryTestMethodMessages
+        objects.
+    """
+    raise NotImplementedError()
+
+  @abc.abstractmethod
+  def unary_stream_scenarios(self):
+    """Affords unary-request-stream-response test methods and their messages.
+
+    Returns:
+      A dict from method group-name pair to implementation/messages pair. The
+        first element of the pair is a UnaryStreamTestMethodImplementation
+        object and the second element is a sequence of
+        UnaryStreamTestMethodMessages objects.
+    """
+    raise NotImplementedError()
+
+  @abc.abstractmethod
+  def stream_unary_scenarios(self):
+    """Affords stream-request-unary-response test methods and their messages.
+
+    Returns:
+      A dict from method group-name pair to implementation/messages pair. The
+        first element of the pair is a StreamUnaryTestMethodImplementation
+        object and the second element is a sequence of
+        StreamUnaryTestMethodMessages objects.
+    """
+    raise NotImplementedError()
+
+  @abc.abstractmethod
+  def stream_stream_scenarios(self):
+    """Affords stream-request-stream-response test methods and their messages.
+
+    Returns:
+      A dict from method group-name pair to implementation/messages pair. The
+        first element of the pair is a StreamStreamTestMethodImplementation
+        object and the second element is a sequence of
+        StreamStreamTestMethodMessages objects.
+    """
+    raise NotImplementedError()
diff --git a/src/python/grpcio_test/grpc_test/framework/interfaces/face/_stock_service.py b/src/python/grpcio_test/grpc_test/framework/interfaces/face/_stock_service.py
new file mode 100644
index 0000000..1dd2ec3
--- /dev/null
+++ b/src/python/grpcio_test/grpc_test/framework/interfaces/face/_stock_service.py
@@ -0,0 +1,396 @@
+B# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Examples of Python implementations of the stock.proto Stock service."""
+
+from grpc.framework.common import cardinality
+from grpc.framework.foundation import abandonment
+from grpc.framework.foundation import stream
+from grpc_test.framework.common import test_constants
+from grpc_test.framework.interfaces.face import _service
+from grpc_test._junkdrawer import stock_pb2
+
+_STOCK_GROUP_NAME = 'Stock'
+_SYMBOL_FORMAT = 'test symbol:%03d'
+
+# A test-appropriate security-pricing function. :-P
+_price = lambda symbol_name: float(hash(symbol_name) % 4096)
+
+
+def _get_last_trade_price(stock_request, stock_reply_callback, control, active):
+  """A unary-request, unary-response test method."""
+  control.control()
+  if active():
+    stock_reply_callback(
+        stock_pb2.StockReply(
+            symbol=stock_request.symbol, price=_price(stock_request.symbol)))
+  else:
+    raise abandonment.Abandoned()
+
+
+def _get_last_trade_price_multiple(stock_reply_consumer, control, active):
+  """A stream-request, stream-response test method."""
+  def stock_reply_for_stock_request(stock_request):
+    control.control()
+    if active():
+      return stock_pb2.StockReply(
+          symbol=stock_request.symbol, price=_price(stock_request.symbol))
+    else:
+      raise abandonment.Abandoned()
+
+  class StockRequestConsumer(stream.Consumer):
+
+    def consume(self, stock_request):
+      stock_reply_consumer.consume(stock_reply_for_stock_request(stock_request))
+
+    def terminate(self):
+      control.control()
+      stock_reply_consumer.terminate()
+
+    def consume_and_terminate(self, stock_request):
+      stock_reply_consumer.consume_and_terminate(
+          stock_reply_for_stock_request(stock_request))
+
+  return StockRequestConsumer()
+
+
+def _watch_future_trades(stock_request, stock_reply_consumer, control, active):
+  """A unary-request, stream-response test method."""
+  base_price = _price(stock_request.symbol)
+  for index in range(stock_request.num_trades_to_watch):
+    control.control()
+    if active():
+      stock_reply_consumer.consume(
+          stock_pb2.StockReply(
+              symbol=stock_request.symbol, price=base_price + index))
+    else:
+      raise abandonment.Abandoned()
+  stock_reply_consumer.terminate()
+
+
+def _get_highest_trade_price(stock_reply_callback, control, active):
+  """A stream-request, unary-response test method."""
+
+  class StockRequestConsumer(stream.Consumer):
+    """Keeps an ongoing record of the most valuable symbol yet consumed."""
+
+    def __init__(self):
+      self._symbol = None
+      self._price = None
+
+    def consume(self, stock_request):
+      control.control()
+      if active():
+        if self._price is None:
+          self._symbol = stock_request.symbol
+          self._price = _price(stock_request.symbol)
+        else:
+          candidate_price = _price(stock_request.symbol)
+          if self._price < candidate_price:
+            self._symbol = stock_request.symbol
+            self._price = candidate_price
+
+    def terminate(self):
+      control.control()
+      if active():
+        if self._symbol is None:
+          raise ValueError()
+        else:
+          stock_reply_callback(
+              stock_pb2.StockReply(symbol=self._symbol, price=self._price))
+          self._symbol = None
+          self._price = None
+
+    def consume_and_terminate(self, stock_request):
+      control.control()
+      if active():
+        if self._price is None:
+          stock_reply_callback(
+              stock_pb2.StockReply(
+                  symbol=stock_request.symbol,
+                  price=_price(stock_request.symbol)))
+        else:
+          candidate_price = _price(stock_request.symbol)
+          if self._price < candidate_price:
+            stock_reply_callback(
+                stock_pb2.StockReply(
+                    symbol=stock_request.symbol, price=candidate_price))
+          else:
+            stock_reply_callback(
+                stock_pb2.StockReply(
+                    symbol=self._symbol, price=self._price))
+
+        self._symbol = None
+        self._price = None
+
+  return StockRequestConsumer()
+
+
+class GetLastTradePrice(_service.UnaryUnaryTestMethodImplementation):
+  """GetLastTradePrice for use in tests."""
+
+  def group(self):
+    return _STOCK_GROUP_NAME
+
+  def name(self):
+    return 'GetLastTradePrice'
+
+  def cardinality(self):
+    return cardinality.Cardinality.UNARY_UNARY
+
+  def request_class(self):
+    return stock_pb2.StockRequest
+
+  def response_class(self):
+    return stock_pb2.StockReply
+
+  def serialize_request(self, request):
+    return request.SerializeToString()
+
+  def deserialize_request(self, serialized_request):
+    return stock_pb2.StockRequest.FromString(serialized_request)
+
+  def serialize_response(self, response):
+    return response.SerializeToString()
+
+  def deserialize_response(self, serialized_response):
+    return stock_pb2.StockReply.FromString(serialized_response)
+
+  def service(self, request, response_callback, context, control):
+    _get_last_trade_price(
+        request, response_callback, control, context.is_active)
+
+
+class GetLastTradePriceMessages(_service.UnaryUnaryTestMessages):
+
+  def __init__(self):
+    self._index = 0
+
+  def request(self):
+    symbol = _SYMBOL_FORMAT % self._index
+    self._index += 1
+    return stock_pb2.StockRequest(symbol=symbol)
+
+  def verify(self, request, response, test_case):
+    test_case.assertEqual(request.symbol, response.symbol)
+    test_case.assertEqual(_price(request.symbol), response.price)
+
+
+class GetLastTradePriceMultiple(_service.StreamStreamTestMethodImplementation):
+  """GetLastTradePriceMultiple for use in tests."""
+
+  def group(self):
+    return _STOCK_GROUP_NAME
+
+  def name(self):
+    return 'GetLastTradePriceMultiple'
+
+  def cardinality(self):
+    return cardinality.Cardinality.STREAM_STREAM
+
+  def request_class(self):
+    return stock_pb2.StockRequest
+
+  def response_class(self):
+    return stock_pb2.StockReply
+
+  def serialize_request(self, request):
+    return request.SerializeToString()
+
+  def deserialize_request(self, serialized_request):
+    return stock_pb2.StockRequest.FromString(serialized_request)
+
+  def serialize_response(self, response):
+    return response.SerializeToString()
+
+  def deserialize_response(self, serialized_response):
+    return stock_pb2.StockReply.FromString(serialized_response)
+
+  def service(self, response_consumer, context, control):
+    return _get_last_trade_price_multiple(
+        response_consumer, control, context.is_active)
+
+
+class GetLastTradePriceMultipleMessages(_service.StreamStreamTestMessages):
+  """Pairs of message streams for use with GetLastTradePriceMultiple."""
+
+  def __init__(self):
+    self._index = 0
+
+  def requests(self):
+    base_index = self._index
+    self._index += 1
+    return [
+        stock_pb2.StockRequest(symbol=_SYMBOL_FORMAT % (base_index + index))
+        for index in range(test_constants.STREAM_LENGTH)]
+
+  def verify(self, requests, responses, test_case):
+    test_case.assertEqual(len(requests), len(responses))
+    for stock_request, stock_reply in zip(requests, responses):
+      test_case.assertEqual(stock_request.symbol, stock_reply.symbol)
+      test_case.assertEqual(_price(stock_request.symbol), stock_reply.price)
+
+
+class WatchFutureTrades(_service.UnaryStreamTestMethodImplementation):
+  """WatchFutureTrades for use in tests."""
+
+  def group(self):
+    return _STOCK_GROUP_NAME
+
+  def name(self):
+    return 'WatchFutureTrades'
+
+  def cardinality(self):
+    return cardinality.Cardinality.UNARY_STREAM
+
+  def request_class(self):
+    return stock_pb2.StockRequest
+
+  def response_class(self):
+    return stock_pb2.StockReply
+
+  def serialize_request(self, request):
+    return request.SerializeToString()
+
+  def deserialize_request(self, serialized_request):
+    return stock_pb2.StockRequest.FromString(serialized_request)
+
+  def serialize_response(self, response):
+    return response.SerializeToString()
+
+  def deserialize_response(self, serialized_response):
+    return stock_pb2.StockReply.FromString(serialized_response)
+
+  def service(self, request, response_consumer, context, control):
+    _watch_future_trades(request, response_consumer, control, context.is_active)
+
+
+class WatchFutureTradesMessages(_service.UnaryStreamTestMessages):
+  """Pairs of a single request message and a sequence of response messages."""
+
+  def __init__(self):
+    self._index = 0
+
+  def request(self):
+    symbol = _SYMBOL_FORMAT % self._index
+    self._index += 1
+    return stock_pb2.StockRequest(
+        symbol=symbol, num_trades_to_watch=test_constants.STREAM_LENGTH)
+
+  def verify(self, request, responses, test_case):
+    test_case.assertEqual(test_constants.STREAM_LENGTH, len(responses))
+    base_price = _price(request.symbol)
+    for index, response in enumerate(responses):
+      test_case.assertEqual(base_price + index, response.price)
+
+
+class GetHighestTradePrice(_service.StreamUnaryTestMethodImplementation):
+  """GetHighestTradePrice for use in tests."""
+
+  def group(self):
+    return _STOCK_GROUP_NAME
+
+  def name(self):
+    return 'GetHighestTradePrice'
+
+  def cardinality(self):
+    return cardinality.Cardinality.STREAM_UNARY
+
+  def request_class(self):
+    return stock_pb2.StockRequest
+
+  def response_class(self):
+    return stock_pb2.StockReply
+
+  def serialize_request(self, request):
+    return request.SerializeToString()
+
+  def deserialize_request(self, serialized_request):
+    return stock_pb2.StockRequest.FromString(serialized_request)
+
+  def serialize_response(self, response):
+    return response.SerializeToString()
+
+  def deserialize_response(self, serialized_response):
+    return stock_pb2.StockReply.FromString(serialized_response)
+
+  def service(self, response_callback, context, control):
+    return _get_highest_trade_price(
+        response_callback, control, context.is_active)
+
+
+class GetHighestTradePriceMessages(_service.StreamUnaryTestMessages):
+
+  def requests(self):
+    return [
+        stock_pb2.StockRequest(symbol=_SYMBOL_FORMAT % index)
+        for index in range(test_constants.STREAM_LENGTH)]
+
+  def verify(self, requests, response, test_case):
+    price = None
+    symbol = None
+    for stock_request in requests:
+      current_symbol = stock_request.symbol
+      current_price = _price(current_symbol)
+      if price is None or price < current_price:
+        price = current_price
+        symbol = current_symbol
+    test_case.assertEqual(price, response.price)
+    test_case.assertEqual(symbol, response.symbol)
+
+
+class StockTestService(_service.TestService):
+  """A corpus of test data with one method of each RPC cardinality."""
+
+  def unary_unary_scenarios(self):
+    return {
+        (_STOCK_GROUP_NAME, 'GetLastTradePrice'): (
+            GetLastTradePrice(), [GetLastTradePriceMessages()]),
+    }
+
+  def unary_stream_scenarios(self):
+    return {
+        (_STOCK_GROUP_NAME, 'WatchFutureTrades'): (
+            WatchFutureTrades(), [WatchFutureTradesMessages()]),
+    }
+
+  def stream_unary_scenarios(self):
+    return {
+        (_STOCK_GROUP_NAME, 'GetHighestTradePrice'): (
+            GetHighestTradePrice(), [GetHighestTradePriceMessages()])
+    }
+
+  def stream_stream_scenarios(self):
+    return {
+        (_STOCK_GROUP_NAME, 'GetLastTradePriceMultiple'): (
+            GetLastTradePriceMultiple(), [GetLastTradePriceMultipleMessages()]),
+    }
+
+
+STOCK_TEST_SERVICE = StockTestService()
diff --git a/src/python/grpcio_test/grpc_test/framework/interfaces/face/test_cases.py b/src/python/grpcio_test/grpc_test/framework/interfaces/face/test_cases.py
new file mode 100644
index 0000000..ca62366
--- /dev/null
+++ b/src/python/grpcio_test/grpc_test/framework/interfaces/face/test_cases.py
@@ -0,0 +1,67 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Tools for creating tests of implementations of the Face layer."""
+
+# unittest is referenced from specification in this module.
+import unittest  # pylint: disable=unused-import
+
+# test_interfaces is referenced from specification in this module.
+from grpc_test.framework.interfaces.face import _blocking_invocation_inline_service
+from grpc_test.framework.interfaces.face import _event_invocation_synchronous_event_service
+from grpc_test.framework.interfaces.face import _future_invocation_asynchronous_event_service
+from grpc_test.framework.interfaces.face import _invocation
+from grpc_test.framework.interfaces.face import test_interfaces  # pylint: disable=unused-import
+
+_TEST_CASE_SUPERCLASSES = (
+    _blocking_invocation_inline_service.TestCase,
+    _event_invocation_synchronous_event_service.TestCase,
+    _future_invocation_asynchronous_event_service.TestCase,
+)
+
+
+def test_cases(implementation):
+  """Creates unittest.TestCase classes for a given Face layer implementation.
+
+  Args:
+    implementation: A test_interfaces.Implementation specifying creation and
+      destruction of a given Face layer implementation.
+
+  Returns:
+    A sequence of subclasses of unittest.TestCase defining tests of the
+      specified Face layer implementation.
+  """
+  test_case_classes = []
+  for invoker_constructor in _invocation.invoker_constructors():
+    for super_class in _TEST_CASE_SUPERCLASSES:
+      test_case_classes.append(
+          type(invoker_constructor.name() + super_class.NAME, (super_class,),
+               {'implementation': implementation,
+                'invoker_constructor': invoker_constructor}))
+  return test_case_classes
diff --git a/src/python/grpcio_test/grpc_test/framework/interfaces/face/test_interfaces.py b/src/python/grpcio_test/grpc_test/framework/interfaces/face/test_interfaces.py
new file mode 100644
index 0000000..b2b5c10
--- /dev/null
+++ b/src/python/grpcio_test/grpc_test/framework/interfaces/face/test_interfaces.py
@@ -0,0 +1,229 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Interfaces used in tests of implementations of the Face layer."""
+
+import abc
+
+from grpc.framework.common import cardinality  # pylint: disable=unused-import
+from grpc.framework.interfaces.face import face  # pylint: disable=unused-import
+
+
+class Method(object):
+  """Specifies a method to be used in tests."""
+  __metaclass__ = abc.ABCMeta
+
+  @abc.abstractmethod
+  def group(self):
+    """Identify the group of the method.
+
+    Returns:
+      The group of the method.
+    """
+    raise NotImplementedError()
+
+  @abc.abstractmethod
+  def name(self):
+    """Identify the name of the method.
+
+    Returns:
+      The name of the method.
+    """
+    raise NotImplementedError()
+
+  @abc.abstractmethod
+  def cardinality(self):
+    """Identify the cardinality of the method.
+
+    Returns:
+      A cardinality.Cardinality value describing the streaming semantics of the
+        method.
+    """
+    raise NotImplementedError()
+
+  @abc.abstractmethod
+  def request_class(self):
+    """Identify the class used for the method's request objects.
+
+    Returns:
+      The class object of the class to which the method's request objects
+        belong.
+    """
+    raise NotImplementedError()
+
+  @abc.abstractmethod
+  def response_class(self):
+    """Identify the class used for the method's response objects.
+
+    Returns:
+      The class object of the class to which the method's response objects
+        belong.
+    """
+    raise NotImplementedError()
+
+  @abc.abstractmethod
+  def serialize_request(self, request):
+    """Serialize the given request object.
+
+    Args:
+      request: A request object appropriate for this method.
+    """
+    raise NotImplementedError()
+
+  @abc.abstractmethod
+  def deserialize_request(self, serialized_request):
+    """Synthesize a request object from a given bytestring.
+
+    Args:
+      serialized_request: A bytestring deserializable into a request object
+        appropriate for this method.
+    """
+    raise NotImplementedError()
+
+  @abc.abstractmethod
+  def serialize_response(self, response):
+    """Serialize the given response object.
+
+    Args:
+      response: A response object appropriate for this method.
+    """
+    raise NotImplementedError()
+
+  @abc.abstractmethod
+  def deserialize_response(self, serialized_response):
+    """Synthesize a response object from a given bytestring.
+
+    Args:
+      serialized_response: A bytestring deserializable into a response object
+        appropriate for this method.
+    """
+    raise NotImplementedError()
+
+
+class Implementation(object):
+  """Specifies an implementation of the Face layer."""
+  __metaclass__ = abc.ABCMeta
+
+  @abc.abstractmethod
+  def instantiate(
+      self, methods, method_implementations,
+      multi_method_implementation):
+    """Instantiates the Face layer implementation to be used in a test.
+
+    Args:
+      methods: A sequence of Method objects describing the methods available to
+        be called during the test.
+      method_implementations: A dictionary from group-name pair to
+        face.MethodImplementation object specifying implementation of a method.
+      multi_method_implementation: A face.MultiMethodImplementation or None.
+
+    Returns:
+      A sequence of length three the first element of which is a
+        face.GenericStub, the second element of which is dictionary from groups
+        to face.DynamicStubs affording invocation of the group's methods, and
+        the third element of which is an arbitrary memo object to be kept and
+        passed to destantiate at the conclusion of the test. The returned stubs
+        must be backed by the provided implementations.
+    """
+    raise NotImplementedError()
+
+  @abc.abstractmethod
+  def destantiate(self, memo):
+    """Destroys the Face layer implementation under test.
+
+    Args:
+      memo: The object from the third position of the return value of a call to
+        instantiate.
+    """
+    raise NotImplementedError()
+
+  @abc.abstractmethod
+  def invocation_metadata(self):
+    """Provides the metadata to be used when invoking a test RPC.
+
+    Returns:
+      An object to use as the supplied-at-invocation-time metadata in a test
+        RPC.
+    """
+    raise NotImplementedError()
+
+  @abc.abstractmethod
+  def initial_metadata(self):
+    """Provides the metadata for use as a test RPC's first servicer metadata.
+
+    Returns:
+      An object to use as the from-the-servicer-before-responses metadata in a
+        test RPC.
+    """
+    raise NotImplementedError()
+
+  @abc.abstractmethod
+  def terminal_metadata(self):
+    """Provides the metadata for use as a test RPC's second servicer metadata.
+
+    Returns:
+      An object to use as the from-the-servicer-after-all-responses metadata in
+        a test RPC.
+    """
+    raise NotImplementedError()
+
+  @abc.abstractmethod
+  def code(self):
+    """Provides the value for use as a test RPC's code.
+
+    Returns:
+      An object to use as the from-the-servicer code in a test RPC.
+    """
+    raise NotImplementedError()
+
+  @abc.abstractmethod
+  def details(self):
+    """Provides the value for use as a test RPC's details.
+
+    Returns:
+      An object to use as the from-the-servicer details in a test RPC.
+    """
+    raise NotImplementedError()
+
+  @abc.abstractmethod
+  def metadata_transmitted(self, original_metadata, transmitted_metadata):
+    """Identifies whether or not metadata was properly transmitted.
+
+    Args:
+      original_metadata: A metadata value passed to the Face interface
+        implementation under test.
+      transmitted_metadata: The same metadata value after having been
+        transmitted via an RPC performed by the Face interface implementation
+          under test.
+
+    Returns:
+      Whether or not the metadata was properly transmitted by the Face interface
+        implementation under test.
+    """
+    raise NotImplementedError()
diff --git a/src/python/grpcio_test/grpc_test/framework/interfaces/links/test_cases.py b/src/python/grpcio_test/grpc_test/framework/interfaces/links/test_cases.py
index 1e575d1..ecf49d9 100644
--- a/src/python/grpcio_test/grpc_test/framework/interfaces/links/test_cases.py
+++ b/src/python/grpcio_test/grpc_test/framework/interfaces/links/test_cases.py
@@ -300,7 +300,7 @@
         invocation_operation_id, 0, _TRANSMISSION_GROUP, _TRANSMISSION_METHOD,
         links.Ticket.Subscription.FULL, timeout, 0, invocation_initial_metadata,
         invocation_payload, invocation_terminal_metadata, invocation_code,
-        invocation_message, links.Ticket.Termination.COMPLETION)
+        invocation_message, links.Ticket.Termination.COMPLETION, None)
     self._invocation_link.accept_ticket(original_invocation_ticket)
 
     self._service_mate.block_until_tickets_satisfy(
@@ -317,7 +317,7 @@
         service_operation_id, 0, None, None, links.Ticket.Subscription.FULL,
         timeout, 0, service_initial_metadata, service_payload,
         service_terminal_metadata, service_code, service_message,
-        links.Ticket.Termination.COMPLETION)
+        links.Ticket.Termination.COMPLETION, None)
     self._service_link.accept_ticket(original_service_ticket)
     self._invocation_mate.block_until_tickets_satisfy(terminated)
     self._assert_is_valid_service_sequence(
diff --git a/src/python/grpcio_test/grpc_test/framework/interfaces/links/test_utilities.py b/src/python/grpcio_test/grpc_test/framework/interfaces/links/test_utilities.py
index a2bd710..39c7f2f 100644
--- a/src/python/grpcio_test/grpc_test/framework/interfaces/links/test_utilities.py
+++ b/src/python/grpcio_test/grpc_test/framework/interfaces/links/test_utilities.py
@@ -64,7 +64,7 @@
         ticket.allowance, ticket.initial_metadata,
         '<payload of length {}>'.format(payload_length),
         ticket.terminal_metadata, ticket.code, ticket.message,
-        ticket.termination)
+        ticket.termination, None)
 
 
 class RecordingLink(links.Link):
diff --git a/test/core/channel/channel_args_test.c b/test/core/channel/channel_args_test.c
new file mode 100644
index 0000000..87f006a
--- /dev/null
+++ b/test/core/channel/channel_args_test.c
@@ -0,0 +1,141 @@
+/*
+ *
+ * Copyright 2015, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <string.h>
+
+#include <grpc/support/log.h>
+#include <grpc/support/useful.h>
+
+#include "src/core/channel/channel_args.h"
+
+#include "test/core/util/test_config.h"
+
+static void test_create(void) {
+  grpc_arg arg_int;
+  grpc_arg arg_string;
+  grpc_arg to_add[2];
+  grpc_channel_args *ch_args;
+
+  arg_int.key = "int_arg";
+  arg_int.type = GRPC_ARG_INTEGER;
+  arg_int.value.integer = 123;
+
+  arg_string.key = "str key";
+  arg_string.type = GRPC_ARG_STRING;
+  arg_string.value.string = "str value";
+
+  to_add[0] = arg_int;
+  to_add[1] = arg_string;
+  ch_args = grpc_channel_args_copy_and_add(NULL, to_add, 2);
+
+  GPR_ASSERT(ch_args->num_args == 2);
+  GPR_ASSERT(strcmp(ch_args->args[0].key, arg_int.key) == 0);
+  GPR_ASSERT(ch_args->args[0].type == arg_int.type);
+  GPR_ASSERT(ch_args->args[0].value.integer == arg_int.value.integer);
+
+  GPR_ASSERT(strcmp(ch_args->args[1].key, arg_string.key) == 0);
+  GPR_ASSERT(ch_args->args[1].type == arg_string.type);
+  GPR_ASSERT(strcmp(ch_args->args[1].value.string, arg_string.value.string) ==
+             0);
+
+  grpc_channel_args_destroy(ch_args);
+}
+
+static void test_set_compression_algorithm(void) {
+  grpc_channel_args *ch_args;
+
+  ch_args =
+      grpc_channel_args_set_compression_algorithm(NULL, GRPC_COMPRESS_GZIP);
+  GPR_ASSERT(ch_args->num_args == 1);
+  GPR_ASSERT(strcmp(ch_args->args[0].key, GRPC_COMPRESSION_ALGORITHM_ARG) == 0);
+  GPR_ASSERT(ch_args->args[0].type == GRPC_ARG_INTEGER);
+
+  grpc_channel_args_destroy(ch_args);
+}
+
+static void test_compression_algorithm_states(void) {
+  grpc_channel_args *ch_args, *ch_args_wo_gzip, *ch_args_wo_gzip_deflate;
+  int states_bitset;
+  size_t i;
+
+  ch_args = grpc_channel_args_copy_and_add(NULL, NULL, 0);
+  /* by default, all enabled */
+  states_bitset = grpc_channel_args_compression_algorithm_get_states(ch_args);
+
+  for (i = 0; i < GRPC_COMPRESS_ALGORITHMS_COUNT; i++) {
+    GPR_ASSERT(GPR_BITGET(states_bitset, i));
+  }
+
+  /* disable gzip and deflate */
+  ch_args_wo_gzip = grpc_channel_args_compression_algorithm_set_state(
+      &ch_args, GRPC_COMPRESS_GZIP, 0);
+  GPR_ASSERT(ch_args == ch_args_wo_gzip);
+  ch_args_wo_gzip_deflate = grpc_channel_args_compression_algorithm_set_state(
+      &ch_args_wo_gzip, GRPC_COMPRESS_DEFLATE, 0);
+  GPR_ASSERT(ch_args_wo_gzip == ch_args_wo_gzip_deflate);
+
+  states_bitset = grpc_channel_args_compression_algorithm_get_states(
+      ch_args_wo_gzip_deflate);
+  for (i = 0; i < GRPC_COMPRESS_ALGORITHMS_COUNT; i++) {
+    if (i == GRPC_COMPRESS_GZIP || i == GRPC_COMPRESS_DEFLATE) {
+      GPR_ASSERT(GPR_BITGET(states_bitset, i) == 0);
+    } else {
+      GPR_ASSERT(GPR_BITGET(states_bitset, i) != 0);
+    }
+  }
+
+  /* re-enabled gzip only */
+  ch_args_wo_gzip = grpc_channel_args_compression_algorithm_set_state(
+      &ch_args_wo_gzip_deflate, GRPC_COMPRESS_GZIP, 1);
+  GPR_ASSERT(ch_args_wo_gzip == ch_args_wo_gzip_deflate);
+
+  states_bitset =
+      grpc_channel_args_compression_algorithm_get_states(ch_args_wo_gzip);
+  for (i = 0; i < GRPC_COMPRESS_ALGORITHMS_COUNT; i++) {
+    if (i == GRPC_COMPRESS_DEFLATE) {
+      GPR_ASSERT(GPR_BITGET(states_bitset, i) == 0);
+    } else {
+      GPR_ASSERT(GPR_BITGET(states_bitset, i) != 0);
+    }
+  }
+
+  grpc_channel_args_destroy(ch_args);
+}
+
+int main(int argc, char **argv) {
+  grpc_test_init(argc, argv);
+  test_create();
+  test_set_compression_algorithm();
+  test_compression_algorithm_states();
+  return 0;
+}
diff --git a/test/core/end2end/fixtures/chttp2_fake_security.c b/test/core/end2end/fixtures/chttp2_fake_security.c
index 27531ec..a0a6793 100644
--- a/test/core/end2end/fixtures/chttp2_fake_security.c
+++ b/test/core/end2end/fixtures/chttp2_fake_security.c
@@ -70,7 +70,7 @@
                                  grpc_process_auth_metadata_done_cb cb,
                                  void *user_data) {
   GPR_ASSERT(state == NULL);
-  cb(user_data, NULL, 0, 0);
+  cb(user_data, NULL, 0, NULL, 0, GRPC_STATUS_UNAUTHENTICATED, NULL);
 }
 
 static void chttp2_init_client_secure_fullstack(grpc_end2end_test_fixture *f,
diff --git a/test/core/end2end/fixtures/chttp2_simple_ssl_fullstack.c b/test/core/end2end/fixtures/chttp2_simple_ssl_fullstack.c
index 491a293..beae241 100644
--- a/test/core/end2end/fixtures/chttp2_simple_ssl_fullstack.c
+++ b/test/core/end2end/fixtures/chttp2_simple_ssl_fullstack.c
@@ -73,7 +73,7 @@
                                  grpc_process_auth_metadata_done_cb cb,
                                  void *user_data) {
   GPR_ASSERT(state == NULL);
-  cb(user_data, NULL, 0, 0);
+  cb(user_data, NULL, 0, NULL, 0, GRPC_STATUS_UNAUTHENTICATED, NULL);
 }
 
 static void chttp2_init_client_secure_fullstack(grpc_end2end_test_fixture *f,
diff --git a/test/core/end2end/fixtures/chttp2_simple_ssl_fullstack_with_poll.c b/test/core/end2end/fixtures/chttp2_simple_ssl_fullstack_with_poll.c
index f2736cc..c8971be 100644
--- a/test/core/end2end/fixtures/chttp2_simple_ssl_fullstack_with_poll.c
+++ b/test/core/end2end/fixtures/chttp2_simple_ssl_fullstack_with_poll.c
@@ -73,7 +73,7 @@
                                  grpc_process_auth_metadata_done_cb cb,
                                  void *user_data) {
   GPR_ASSERT(state == NULL);
-  cb(user_data, NULL, 0, 0);
+  cb(user_data, NULL, 0, NULL, 0, GRPC_STATUS_UNAUTHENTICATED, NULL);
 }
 
 static void chttp2_init_client_secure_fullstack(grpc_end2end_test_fixture *f,
diff --git a/test/core/end2end/fixtures/chttp2_simple_ssl_fullstack_with_proxy.c b/test/core/end2end/fixtures/chttp2_simple_ssl_fullstack_with_proxy.c
index cc0b9db..a518a7d 100644
--- a/test/core/end2end/fixtures/chttp2_simple_ssl_fullstack_with_proxy.c
+++ b/test/core/end2end/fixtures/chttp2_simple_ssl_fullstack_with_proxy.c
@@ -101,7 +101,7 @@
                                  grpc_process_auth_metadata_done_cb cb,
                                  void *user_data) {
   GPR_ASSERT(state == NULL);
-  cb(user_data, NULL, 0, 0);
+  cb(user_data, NULL, 0, NULL, 0, GRPC_STATUS_UNAUTHENTICATED, NULL);
 }
 
 static void chttp2_init_client_secure_fullstack(grpc_end2end_test_fixture *f,
diff --git a/test/core/end2end/fixtures/chttp2_simple_ssl_with_oauth2_fullstack.c b/test/core/end2end/fixtures/chttp2_simple_ssl_with_oauth2_fullstack.c
index d82e623..7f11028 100644
--- a/test/core/end2end/fixtures/chttp2_simple_ssl_with_oauth2_fullstack.c
+++ b/test/core/end2end/fixtures/chttp2_simple_ssl_with_oauth2_fullstack.c
@@ -79,7 +79,7 @@
                                          client_identity);
   GPR_ASSERT(grpc_auth_context_set_peer_identity_property_name(
                  ctx, client_identity_property_name) == 1);
-  cb(user_data, oauth2, 1, 1);
+  cb(user_data, oauth2, 1, NULL, 0, GRPC_STATUS_OK, NULL);
 }
 
 static void process_oauth2_failure(void *state, grpc_auth_context *ctx,
@@ -90,7 +90,7 @@
       find_metadata(md, md_count, "Authorization", oauth2_md);
   GPR_ASSERT(state == NULL);
   GPR_ASSERT(oauth2 != NULL);
-  cb(user_data, oauth2, 1, 0);
+  cb(user_data, oauth2, 1, NULL, 0, GRPC_STATUS_UNAUTHENTICATED, NULL);
 }
 
 static grpc_end2end_test_fixture chttp2_create_fixture_secure_fullstack(
diff --git a/test/core/httpcli/httpcli_test.c b/test/core/httpcli/httpcli_test.c
index 8dddfbe..42b2661 100644
--- a/test/core/httpcli/httpcli_test.c
+++ b/test/core/httpcli/httpcli_test.c
@@ -88,7 +88,8 @@
   gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
   while (!g_done) {
     grpc_pollset_worker worker;
-    grpc_pollset_work(&g_pollset, &worker, n_seconds_time(20));
+    grpc_pollset_work(&g_pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC),
+                      n_seconds_time(20));
   }
   gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
   gpr_free(host);
@@ -114,7 +115,8 @@
   gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
   while (!g_done) {
     grpc_pollset_worker worker;
-    grpc_pollset_work(&g_pollset, &worker, n_seconds_time(20));
+    grpc_pollset_work(&g_pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC),
+                      n_seconds_time(20));
   }
   gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
   gpr_free(host);
diff --git a/test/core/iomgr/endpoint_tests.c b/test/core/iomgr/endpoint_tests.c
index 8186c96..6ef8e9c 100644
--- a/test/core/iomgr/endpoint_tests.c
+++ b/test/core/iomgr/endpoint_tests.c
@@ -256,7 +256,8 @@
   while (!state.read_done || !state.write_done) {
     grpc_pollset_worker worker;
     GPR_ASSERT(gpr_time_cmp(gpr_now(GPR_CLOCK_MONOTONIC), deadline) < 0);
-    grpc_pollset_work(g_pollset, &worker, deadline);
+    grpc_pollset_work(g_pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC),
+                      deadline);
   }
   gpr_mu_unlock(GRPC_POLLSET_MU(g_pollset));
 
@@ -353,7 +354,8 @@
         while (!write_st.done) {
           grpc_pollset_worker worker;
           GPR_ASSERT(gpr_time_cmp(gpr_now(deadline.clock_type), deadline) < 0);
-          grpc_pollset_work(g_pollset, &worker, deadline);
+          grpc_pollset_work(g_pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC),
+                            deadline);
         }
         gpr_mu_unlock(GRPC_POLLSET_MU(g_pollset));
         grpc_endpoint_destroy(write_st.ep);
@@ -361,7 +363,8 @@
         while (!read_st.done) {
           grpc_pollset_worker worker;
           GPR_ASSERT(gpr_time_cmp(gpr_now(deadline.clock_type), deadline) < 0);
-          grpc_pollset_work(g_pollset, &worker, deadline);
+          grpc_pollset_work(g_pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC),
+                            deadline);
         }
         gpr_mu_unlock(GRPC_POLLSET_MU(g_pollset));
         gpr_free(slices);
diff --git a/test/core/iomgr/fd_posix_test.c b/test/core/iomgr/fd_posix_test.c
index adcbcaf..8bba87d 100644
--- a/test/core/iomgr/fd_posix_test.c
+++ b/test/core/iomgr/fd_posix_test.c
@@ -250,7 +250,8 @@
   gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
   while (!sv->done) {
     grpc_pollset_worker worker;
-    grpc_pollset_work(&g_pollset, &worker, gpr_inf_future(GPR_CLOCK_MONOTONIC));
+    grpc_pollset_work(&g_pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC),
+                      gpr_inf_future(GPR_CLOCK_MONOTONIC));
   }
   gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
 }
@@ -358,7 +359,8 @@
   gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
   while (!cl->done) {
     grpc_pollset_worker worker;
-    grpc_pollset_work(&g_pollset, &worker, gpr_inf_future(GPR_CLOCK_MONOTONIC));
+    grpc_pollset_work(&g_pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC),
+                      gpr_inf_future(GPR_CLOCK_MONOTONIC));
   }
   gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
 }
@@ -448,7 +450,8 @@
   gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
   while (a.cb_that_ran == NULL) {
     grpc_pollset_worker worker;
-    grpc_pollset_work(&g_pollset, &worker, gpr_inf_future(GPR_CLOCK_MONOTONIC));
+    grpc_pollset_work(&g_pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC),
+                      gpr_inf_future(GPR_CLOCK_MONOTONIC));
   }
   GPR_ASSERT(a.cb_that_ran == first_read_callback);
   gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
@@ -467,7 +470,8 @@
   gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
   while (b.cb_that_ran == NULL) {
     grpc_pollset_worker worker;
-    grpc_pollset_work(&g_pollset, &worker, gpr_inf_future(GPR_CLOCK_MONOTONIC));
+    grpc_pollset_work(&g_pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC),
+                      gpr_inf_future(GPR_CLOCK_MONOTONIC));
   }
   /* Except now we verify that second_read_callback ran instead */
   GPR_ASSERT(b.cb_that_ran == second_read_callback);
diff --git a/test/core/iomgr/tcp_client_posix_test.c b/test/core/iomgr/tcp_client_posix_test.c
index 07bbe1f..dea0b33 100644
--- a/test/core/iomgr/tcp_client_posix_test.c
+++ b/test/core/iomgr/tcp_client_posix_test.c
@@ -112,7 +112,8 @@
 
   while (g_connections_complete == connections_complete_before) {
     grpc_pollset_worker worker;
-    grpc_pollset_work(&g_pollset, &worker, GRPC_TIMEOUT_SECONDS_TO_DEADLINE(5));
+    grpc_pollset_work(&g_pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC),
+                      GRPC_TIMEOUT_SECONDS_TO_DEADLINE(5));
   }
 
   gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
@@ -142,7 +143,8 @@
   /* wait for the connection callback to finish */
   while (g_connections_complete == connections_complete_before) {
     grpc_pollset_worker worker;
-    grpc_pollset_work(&g_pollset, &worker, test_deadline());
+    grpc_pollset_work(&g_pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC),
+                      test_deadline());
   }
 
   gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
@@ -211,7 +213,8 @@
       GPR_ASSERT(g_connections_complete ==
                  connections_complete_before + is_after_deadline);
     }
-    grpc_pollset_work(&g_pollset, &worker, GRPC_TIMEOUT_MILLIS_TO_DEADLINE(10));
+    grpc_pollset_work(&g_pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC),
+                      GRPC_TIMEOUT_MILLIS_TO_DEADLINE(10));
   }
   gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
 
diff --git a/test/core/iomgr/tcp_posix_test.c b/test/core/iomgr/tcp_posix_test.c
index 17a85ce..6ad8322 100644
--- a/test/core/iomgr/tcp_posix_test.c
+++ b/test/core/iomgr/tcp_posix_test.c
@@ -187,7 +187,8 @@
   gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
   while (state.read_bytes < state.target_read_bytes) {
     grpc_pollset_worker worker;
-    grpc_pollset_work(&g_pollset, &worker, deadline);
+    grpc_pollset_work(&g_pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC),
+                      deadline);
   }
   GPR_ASSERT(state.read_bytes == state.target_read_bytes);
   gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
@@ -224,7 +225,8 @@
   gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
   while (state.read_bytes < state.target_read_bytes) {
     grpc_pollset_worker worker;
-    grpc_pollset_work(&g_pollset, &worker, deadline);
+    grpc_pollset_work(&g_pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC),
+                      deadline);
   }
   GPR_ASSERT(state.read_bytes == state.target_read_bytes);
   gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
@@ -285,7 +287,8 @@
   for (;;) {
     grpc_pollset_worker worker;
     gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
-    grpc_pollset_work(&g_pollset, &worker, GRPC_TIMEOUT_MILLIS_TO_DEADLINE(10));
+    grpc_pollset_work(&g_pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC),
+                      GRPC_TIMEOUT_MILLIS_TO_DEADLINE(10));
     gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
     do {
       bytes_read =
@@ -365,7 +368,8 @@
       if (state.write_done) {
         break;
       }
-      grpc_pollset_work(&g_pollset, &worker, deadline);
+      grpc_pollset_work(&g_pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC),
+                        deadline);
     }
     gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
   }
@@ -422,7 +426,8 @@
         if (state.write_done) {
           break;
         }
-        grpc_pollset_work(&g_pollset, &worker, deadline);
+        grpc_pollset_work(&g_pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC),
+                          deadline);
       }
       gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
       break;
diff --git a/test/core/iomgr/tcp_server_posix_test.c b/test/core/iomgr/tcp_server_posix_test.c
index b82d7c0..29a20cb 100644
--- a/test/core/iomgr/tcp_server_posix_test.c
+++ b/test/core/iomgr/tcp_server_posix_test.c
@@ -137,7 +137,8 @@
     while (g_nconnects == nconnects_before &&
            gpr_time_cmp(deadline, gpr_now(deadline.clock_type)) > 0) {
       grpc_pollset_worker worker;
-      grpc_pollset_work(&g_pollset, &worker, deadline);
+      grpc_pollset_work(&g_pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC),
+                        deadline);
     }
     gpr_log(GPR_DEBUG, "wait done");
 
diff --git a/test/core/iomgr/udp_server_test.c b/test/core/iomgr/udp_server_test.c
index 5a5f99f..471d5b5 100644
--- a/test/core/iomgr/udp_server_test.c
+++ b/test/core/iomgr/udp_server_test.c
@@ -146,7 +146,8 @@
     while (g_number_of_reads == number_of_reads_before &&
            gpr_time_cmp(deadline, gpr_now(deadline.clock_type)) > 0) {
       grpc_pollset_worker worker;
-      grpc_pollset_work(&g_pollset, &worker, deadline);
+      grpc_pollset_work(&g_pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC),
+                        deadline);
     }
     GPR_ASSERT(g_number_of_reads == number_of_reads_before + 1);
     close(clifd);
diff --git a/test/core/security/oauth2_utils.c b/test/core/security/oauth2_utils.c
index 990855a..7df6fad 100644
--- a/test/core/security/oauth2_utils.c
+++ b/test/core/security/oauth2_utils.c
@@ -85,7 +85,7 @@
   gpr_mu_lock(GRPC_POLLSET_MU(&request.pollset));
   while (!request.is_done) {
     grpc_pollset_worker worker;
-    grpc_pollset_work(&request.pollset, &worker,
+    grpc_pollset_work(&request.pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC),
                       gpr_inf_future(GPR_CLOCK_MONOTONIC));
   }
   gpr_mu_unlock(GRPC_POLLSET_MU(&request.pollset));
diff --git a/test/core/security/print_google_default_creds_token.c b/test/core/security/print_google_default_creds_token.c
index b4323ab..753221c 100644
--- a/test/core/security/print_google_default_creds_token.c
+++ b/test/core/security/print_google_default_creds_token.c
@@ -96,8 +96,8 @@
   gpr_mu_lock(GRPC_POLLSET_MU(&sync.pollset));
   while (!sync.is_done) {
     grpc_pollset_worker worker;
-    grpc_pollset_work(&sync.pollset, &worker,
-                      gpr_inf_future(GPR_CLOCK_REALTIME));
+    grpc_pollset_work(&sync.pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC),
+                      gpr_inf_future(GPR_CLOCK_MONOTONIC));
   }
   gpr_mu_unlock(GRPC_POLLSET_MU(&sync.pollset));
 
diff --git a/test/core/security/verify_jwt.c b/test/core/security/verify_jwt.c
index 5ebde5f..f443266 100644
--- a/test/core/security/verify_jwt.c
+++ b/test/core/security/verify_jwt.c
@@ -111,7 +111,7 @@
   gpr_mu_lock(GRPC_POLLSET_MU(&sync.pollset));
   while (!sync.is_done) {
     grpc_pollset_worker worker;
-    grpc_pollset_work(&sync.pollset, &worker,
+    grpc_pollset_work(&sync.pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC),
                       gpr_inf_future(GPR_CLOCK_MONOTONIC));
   }
   gpr_mu_unlock(GRPC_POLLSET_MU(&sync.pollset));
diff --git a/test/core/util/port_posix.c b/test/core/util/port_posix.c
index cec0eeb..836e62a 100644
--- a/test/core/util/port_posix.c
+++ b/test/core/util/port_posix.c
@@ -178,7 +178,7 @@
   gpr_mu_lock(GRPC_POLLSET_MU(&pr.pollset));
   while (pr.port == -1) {
     grpc_pollset_worker worker;
-    grpc_pollset_work(&pr.pollset, &worker,
+    grpc_pollset_work(&pr.pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC),
                       GRPC_TIMEOUT_SECONDS_TO_DEADLINE(1));
   }
   gpr_mu_unlock(GRPC_POLLSET_MU(&pr.pollset));
diff --git a/test/core/util/reconnect_server.c b/test/core/util/reconnect_server.c
index 2a21133..a06cb50 100644
--- a/test/core/util/reconnect_server.c
+++ b/test/core/util/reconnect_server.c
@@ -134,7 +134,8 @@
       gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC),
                    gpr_time_from_seconds(seconds, GPR_TIMESPAN));
   gpr_mu_lock(GRPC_POLLSET_MU(&server->pollset));
-  grpc_pollset_work(&server->pollset, &worker, deadline);
+  grpc_pollset_work(&server->pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC),
+                    deadline);
   gpr_mu_unlock(GRPC_POLLSET_MU(&server->pollset));
 }
 
diff --git a/test/cpp/client/channel_arguments_test.cc b/test/cpp/client/channel_arguments_test.cc
index 01c56cb..3d75e7b 100644
--- a/test/cpp/client/channel_arguments_test.cc
+++ b/test/cpp/client/channel_arguments_test.cc
@@ -31,7 +31,7 @@
  *
  */
 
-#include <grpc++/channel_arguments.h>
+#include <grpc++/support/channel_arguments.h>
 
 #include <grpc/grpc.h>
 #include <gtest/gtest.h>
diff --git a/test/cpp/common/auth_property_iterator_test.cc b/test/cpp/common/auth_property_iterator_test.cc
index bf17842..630c38c 100644
--- a/test/cpp/common/auth_property_iterator_test.cc
+++ b/test/cpp/common/auth_property_iterator_test.cc
@@ -32,7 +32,7 @@
  */
 
 #include <grpc/grpc_security.h>
-#include <grpc++/auth_context.h>
+#include <grpc++/support/auth_context.h>
 #include <gtest/gtest.h>
 #include "src/cpp/common/secure_auth_context.h"
 
diff --git a/test/cpp/common/secure_auth_context_test.cc b/test/cpp/common/secure_auth_context_test.cc
index e0376c9..c71ef58 100644
--- a/test/cpp/common/secure_auth_context_test.cc
+++ b/test/cpp/common/secure_auth_context_test.cc
@@ -32,7 +32,7 @@
  */
 
 #include <grpc/grpc_security.h>
-#include <grpc++/auth_context.h>
+#include <grpc++/support/auth_context.h>
 #include <gtest/gtest.h>
 #include "src/cpp/common/secure_auth_context.h"
 
diff --git a/test/cpp/end2end/async_end2end_test.cc b/test/cpp/end2end/async_end2end_test.cc
index a30c841..6343810 100644
--- a/test/cpp/end2end/async_end2end_test.cc
+++ b/test/cpp/end2end/async_end2end_test.cc
@@ -33,13 +33,10 @@
 
 #include <memory>
 
-#include "test/core/util/port.h"
-#include "test/core/util/test_config.h"
-#include "test/cpp/util/echo_duplicate.grpc.pb.h"
-#include "test/cpp/util/echo.grpc.pb.h"
-#include <grpc++/async_unary_call.h>
-#include <grpc++/channel_arguments.h>
-#include <grpc++/channel_interface.h>
+#include <grpc/grpc.h>
+#include <grpc/support/thd.h>
+#include <grpc/support/time.h>
+#include <grpc++/channel.h>
 #include <grpc++/client_context.h>
 #include <grpc++/create_channel.h>
 #include <grpc++/credentials.h>
@@ -47,14 +44,16 @@
 #include <grpc++/server_builder.h>
 #include <grpc++/server_context.h>
 #include <grpc++/server_credentials.h>
-#include <grpc++/status.h>
-#include <grpc++/stream.h>
-#include <grpc++/time.h>
 #include <gtest/gtest.h>
 
-#include <grpc/grpc.h>
-#include <grpc/support/thd.h>
-#include <grpc/support/time.h>
+#include "test/core/util/port.h"
+#include "test/core/util/test_config.h"
+#include "test/cpp/util/echo_duplicate.grpc.pb.h"
+#include "test/cpp/util/echo.grpc.pb.h"
+
+#ifdef GPR_POSIX_SOCKET
+#include "src/core/iomgr/pollset_posix.h"
+#endif
 
 using grpc::cpp::test::util::EchoRequest;
 using grpc::cpp::test::util::EchoResponse;
@@ -67,8 +66,41 @@
 
 void* tag(int i) { return (void*)(gpr_intptr)i; }
 
-class Verifier {
+#ifdef GPR_POSIX_SOCKET
+static int assert_non_blocking_poll(struct pollfd* pfds, nfds_t nfds,
+                                    int timeout) {
+  GPR_ASSERT(timeout == 0);
+  return poll(pfds, nfds, timeout);
+}
+
+class PollOverride {
  public:
+  PollOverride(grpc_poll_function_type f) {
+    prev_ = grpc_poll_function;
+    grpc_poll_function = f;
+  }
+
+  ~PollOverride() { grpc_poll_function = prev_; }
+
+ private:
+  grpc_poll_function_type prev_;
+};
+
+class PollingCheckRegion : public PollOverride {
+ public:
+  explicit PollingCheckRegion(bool allow_blocking)
+      : PollOverride(allow_blocking ? poll : assert_non_blocking_poll) {}
+};
+#else
+class PollingCheckRegion {
+ public:
+  explicit PollingCheckRegion(bool allow_blocking) {}
+};
+#endif
+
+class Verifier : public PollingCheckRegion {
+ public:
+  explicit Verifier(bool spin) : PollingCheckRegion(!spin), spin_(spin) {}
   Verifier& Expect(int i, bool expect_ok) {
     expectations_[tag(i)] = expect_ok;
     return *this;
@@ -78,7 +110,17 @@
     while (!expectations_.empty()) {
       bool ok;
       void* got_tag;
-      EXPECT_TRUE(cq->Next(&got_tag, &ok));
+      if (spin_) {
+        for (;;) {
+          auto r = cq->AsyncNext(&got_tag, &ok, gpr_time_0(GPR_CLOCK_REALTIME));
+          if (r == CompletionQueue::TIMEOUT) continue;
+          if (r == CompletionQueue::GOT_EVENT) break;
+          gpr_log(GPR_ERROR, "unexpected result from AsyncNext");
+          abort();
+        }
+      } else {
+        EXPECT_TRUE(cq->Next(&got_tag, &ok));
+      }
       auto it = expectations_.find(got_tag);
       EXPECT_TRUE(it != expectations_.end());
       EXPECT_EQ(it->second, ok);
@@ -90,14 +132,34 @@
     if (expectations_.empty()) {
       bool ok;
       void* got_tag;
-      EXPECT_EQ(cq->AsyncNext(&got_tag, &ok, deadline),
-                CompletionQueue::TIMEOUT);
+      if (spin_) {
+        while (std::chrono::system_clock::now() < deadline) {
+          EXPECT_EQ(
+              cq->AsyncNext(&got_tag, &ok, gpr_time_0(GPR_CLOCK_REALTIME)),
+              CompletionQueue::TIMEOUT);
+        }
+      } else {
+        EXPECT_EQ(cq->AsyncNext(&got_tag, &ok, deadline),
+                  CompletionQueue::TIMEOUT);
+      }
     } else {
       while (!expectations_.empty()) {
         bool ok;
         void* got_tag;
-        EXPECT_EQ(cq->AsyncNext(&got_tag, &ok, deadline),
-                  CompletionQueue::GOT_EVENT);
+        if (spin_) {
+          for (;;) {
+            GPR_ASSERT(std::chrono::system_clock::now() < deadline);
+            auto r =
+                cq->AsyncNext(&got_tag, &ok, gpr_time_0(GPR_CLOCK_REALTIME));
+            if (r == CompletionQueue::TIMEOUT) continue;
+            if (r == CompletionQueue::GOT_EVENT) break;
+            gpr_log(GPR_ERROR, "unexpected result from AsyncNext");
+            abort();
+          }
+        } else {
+          EXPECT_EQ(cq->AsyncNext(&got_tag, &ok, deadline),
+                    CompletionQueue::GOT_EVENT);
+        }
         auto it = expectations_.find(got_tag);
         EXPECT_TRUE(it != expectations_.end());
         EXPECT_EQ(it->second, ok);
@@ -108,9 +170,10 @@
 
  private:
   std::map<void*, bool> expectations_;
+  bool spin_;
 };
 
-class AsyncEnd2endTest : public ::testing::Test {
+class AsyncEnd2endTest : public ::testing::TestWithParam<bool> {
  protected:
   AsyncEnd2endTest() {}
 
@@ -136,7 +199,7 @@
   }
 
   void ResetStub() {
-    std::shared_ptr<ChannelInterface> channel = CreateChannel(
+    std::shared_ptr<Channel> channel = CreateChannel(
         server_address_.str(), InsecureCredentials(), ChannelArguments());
     stub_ = std::move(grpc::cpp::test::util::TestService::NewStub(channel));
   }
@@ -160,15 +223,15 @@
       service_.RequestEcho(&srv_ctx, &recv_request, &response_writer, cq_.get(),
                            cq_.get(), tag(2));
 
-      Verifier().Expect(2, true).Verify(cq_.get());
+      Verifier(GetParam()).Expect(2, true).Verify(cq_.get());
       EXPECT_EQ(send_request.message(), recv_request.message());
 
       send_response.set_message(recv_request.message());
       response_writer.Finish(send_response, Status::OK, tag(3));
-      Verifier().Expect(3, true).Verify(cq_.get());
+      Verifier(GetParam()).Expect(3, true).Verify(cq_.get());
 
       response_reader->Finish(&recv_response, &recv_status, tag(4));
-      Verifier().Expect(4, true).Verify(cq_.get());
+      Verifier(GetParam()).Expect(4, true).Verify(cq_.get());
 
       EXPECT_EQ(send_response.message(), recv_response.message());
       EXPECT_TRUE(recv_status.ok());
@@ -182,18 +245,18 @@
   std::ostringstream server_address_;
 };
 
-TEST_F(AsyncEnd2endTest, SimpleRpc) {
+TEST_P(AsyncEnd2endTest, SimpleRpc) {
   ResetStub();
   SendRpc(1);
 }
 
-TEST_F(AsyncEnd2endTest, SequentialRpcs) {
+TEST_P(AsyncEnd2endTest, SequentialRpcs) {
   ResetStub();
   SendRpc(10);
 }
 
 // Test a simple RPC using the async version of Next
-TEST_F(AsyncEnd2endTest, AsyncNextRpc) {
+TEST_P(AsyncEnd2endTest, AsyncNextRpc) {
   ResetStub();
 
   EchoRequest send_request;
@@ -214,30 +277,32 @@
       std::chrono::system_clock::now());
   std::chrono::system_clock::time_point time_limit(
       std::chrono::system_clock::now() + std::chrono::seconds(10));
-  Verifier().Verify(cq_.get(), time_now);
-  Verifier().Verify(cq_.get(), time_now);
+  Verifier(GetParam()).Verify(cq_.get(), time_now);
+  Verifier(GetParam()).Verify(cq_.get(), time_now);
 
   service_.RequestEcho(&srv_ctx, &recv_request, &response_writer, cq_.get(),
                        cq_.get(), tag(2));
 
-  Verifier().Expect(2, true).Verify(cq_.get(), time_limit);
+  Verifier(GetParam()).Expect(2, true).Verify(cq_.get(), time_limit);
   EXPECT_EQ(send_request.message(), recv_request.message());
 
   send_response.set_message(recv_request.message());
   response_writer.Finish(send_response, Status::OK, tag(3));
-  Verifier().Expect(3, true).Verify(
-      cq_.get(), std::chrono::system_clock::time_point::max());
+  Verifier(GetParam())
+      .Expect(3, true)
+      .Verify(cq_.get(), std::chrono::system_clock::time_point::max());
 
   response_reader->Finish(&recv_response, &recv_status, tag(4));
-  Verifier().Expect(4, true).Verify(
-      cq_.get(), std::chrono::system_clock::time_point::max());
+  Verifier(GetParam())
+      .Expect(4, true)
+      .Verify(cq_.get(), std::chrono::system_clock::time_point::max());
 
   EXPECT_EQ(send_response.message(), recv_response.message());
   EXPECT_TRUE(recv_status.ok());
 }
 
 // Two pings and a final pong.
-TEST_F(AsyncEnd2endTest, SimpleClientStreaming) {
+TEST_P(AsyncEnd2endTest, SimpleClientStreaming) {
   ResetStub();
 
   EchoRequest send_request;
@@ -256,41 +321,41 @@
   service_.RequestRequestStream(&srv_ctx, &srv_stream, cq_.get(), cq_.get(),
                                 tag(2));
 
-  Verifier().Expect(2, true).Expect(1, true).Verify(cq_.get());
+  Verifier(GetParam()).Expect(2, true).Expect(1, true).Verify(cq_.get());
 
   cli_stream->Write(send_request, tag(3));
-  Verifier().Expect(3, true).Verify(cq_.get());
+  Verifier(GetParam()).Expect(3, true).Verify(cq_.get());
 
   srv_stream.Read(&recv_request, tag(4));
-  Verifier().Expect(4, true).Verify(cq_.get());
+  Verifier(GetParam()).Expect(4, true).Verify(cq_.get());
   EXPECT_EQ(send_request.message(), recv_request.message());
 
   cli_stream->Write(send_request, tag(5));
-  Verifier().Expect(5, true).Verify(cq_.get());
+  Verifier(GetParam()).Expect(5, true).Verify(cq_.get());
 
   srv_stream.Read(&recv_request, tag(6));
-  Verifier().Expect(6, true).Verify(cq_.get());
+  Verifier(GetParam()).Expect(6, true).Verify(cq_.get());
 
   EXPECT_EQ(send_request.message(), recv_request.message());
   cli_stream->WritesDone(tag(7));
-  Verifier().Expect(7, true).Verify(cq_.get());
+  Verifier(GetParam()).Expect(7, true).Verify(cq_.get());
 
   srv_stream.Read(&recv_request, tag(8));
-  Verifier().Expect(8, false).Verify(cq_.get());
+  Verifier(GetParam()).Expect(8, false).Verify(cq_.get());
 
   send_response.set_message(recv_request.message());
   srv_stream.Finish(send_response, Status::OK, tag(9));
-  Verifier().Expect(9, true).Verify(cq_.get());
+  Verifier(GetParam()).Expect(9, true).Verify(cq_.get());
 
   cli_stream->Finish(&recv_status, tag(10));
-  Verifier().Expect(10, true).Verify(cq_.get());
+  Verifier(GetParam()).Expect(10, true).Verify(cq_.get());
 
   EXPECT_EQ(send_response.message(), recv_response.message());
   EXPECT_TRUE(recv_status.ok());
 }
 
 // One ping, two pongs.
-TEST_F(AsyncEnd2endTest, SimpleServerStreaming) {
+TEST_P(AsyncEnd2endTest, SimpleServerStreaming) {
   ResetStub();
 
   EchoRequest send_request;
@@ -309,38 +374,38 @@
   service_.RequestResponseStream(&srv_ctx, &recv_request, &srv_stream,
                                  cq_.get(), cq_.get(), tag(2));
 
-  Verifier().Expect(1, true).Expect(2, true).Verify(cq_.get());
+  Verifier(GetParam()).Expect(1, true).Expect(2, true).Verify(cq_.get());
   EXPECT_EQ(send_request.message(), recv_request.message());
 
   send_response.set_message(recv_request.message());
   srv_stream.Write(send_response, tag(3));
-  Verifier().Expect(3, true).Verify(cq_.get());
+  Verifier(GetParam()).Expect(3, true).Verify(cq_.get());
 
   cli_stream->Read(&recv_response, tag(4));
-  Verifier().Expect(4, true).Verify(cq_.get());
+  Verifier(GetParam()).Expect(4, true).Verify(cq_.get());
   EXPECT_EQ(send_response.message(), recv_response.message());
 
   srv_stream.Write(send_response, tag(5));
-  Verifier().Expect(5, true).Verify(cq_.get());
+  Verifier(GetParam()).Expect(5, true).Verify(cq_.get());
 
   cli_stream->Read(&recv_response, tag(6));
-  Verifier().Expect(6, true).Verify(cq_.get());
+  Verifier(GetParam()).Expect(6, true).Verify(cq_.get());
   EXPECT_EQ(send_response.message(), recv_response.message());
 
   srv_stream.Finish(Status::OK, tag(7));
-  Verifier().Expect(7, true).Verify(cq_.get());
+  Verifier(GetParam()).Expect(7, true).Verify(cq_.get());
 
   cli_stream->Read(&recv_response, tag(8));
-  Verifier().Expect(8, false).Verify(cq_.get());
+  Verifier(GetParam()).Expect(8, false).Verify(cq_.get());
 
   cli_stream->Finish(&recv_status, tag(9));
-  Verifier().Expect(9, true).Verify(cq_.get());
+  Verifier(GetParam()).Expect(9, true).Verify(cq_.get());
 
   EXPECT_TRUE(recv_status.ok());
 }
 
 // One ping, one pong.
-TEST_F(AsyncEnd2endTest, SimpleBidiStreaming) {
+TEST_P(AsyncEnd2endTest, SimpleBidiStreaming) {
   ResetStub();
 
   EchoRequest send_request;
@@ -359,40 +424,40 @@
   service_.RequestBidiStream(&srv_ctx, &srv_stream, cq_.get(), cq_.get(),
                              tag(2));
 
-  Verifier().Expect(1, true).Expect(2, true).Verify(cq_.get());
+  Verifier(GetParam()).Expect(1, true).Expect(2, true).Verify(cq_.get());
 
   cli_stream->Write(send_request, tag(3));
-  Verifier().Expect(3, true).Verify(cq_.get());
+  Verifier(GetParam()).Expect(3, true).Verify(cq_.get());
 
   srv_stream.Read(&recv_request, tag(4));
-  Verifier().Expect(4, true).Verify(cq_.get());
+  Verifier(GetParam()).Expect(4, true).Verify(cq_.get());
   EXPECT_EQ(send_request.message(), recv_request.message());
 
   send_response.set_message(recv_request.message());
   srv_stream.Write(send_response, tag(5));
-  Verifier().Expect(5, true).Verify(cq_.get());
+  Verifier(GetParam()).Expect(5, true).Verify(cq_.get());
 
   cli_stream->Read(&recv_response, tag(6));
-  Verifier().Expect(6, true).Verify(cq_.get());
+  Verifier(GetParam()).Expect(6, true).Verify(cq_.get());
   EXPECT_EQ(send_response.message(), recv_response.message());
 
   cli_stream->WritesDone(tag(7));
-  Verifier().Expect(7, true).Verify(cq_.get());
+  Verifier(GetParam()).Expect(7, true).Verify(cq_.get());
 
   srv_stream.Read(&recv_request, tag(8));
-  Verifier().Expect(8, false).Verify(cq_.get());
+  Verifier(GetParam()).Expect(8, false).Verify(cq_.get());
 
   srv_stream.Finish(Status::OK, tag(9));
-  Verifier().Expect(9, true).Verify(cq_.get());
+  Verifier(GetParam()).Expect(9, true).Verify(cq_.get());
 
   cli_stream->Finish(&recv_status, tag(10));
-  Verifier().Expect(10, true).Verify(cq_.get());
+  Verifier(GetParam()).Expect(10, true).Verify(cq_.get());
 
   EXPECT_TRUE(recv_status.ok());
 }
 
 // Metadata tests
-TEST_F(AsyncEnd2endTest, ClientInitialMetadataRpc) {
+TEST_P(AsyncEnd2endTest, ClientInitialMetadataRpc) {
   ResetStub();
 
   EchoRequest send_request;
@@ -416,7 +481,7 @@
 
   service_.RequestEcho(&srv_ctx, &recv_request, &response_writer, cq_.get(),
                        cq_.get(), tag(2));
-  Verifier().Expect(2, true).Verify(cq_.get());
+  Verifier(GetParam()).Expect(2, true).Verify(cq_.get());
   EXPECT_EQ(send_request.message(), recv_request.message());
   auto client_initial_metadata = srv_ctx.client_metadata();
   EXPECT_EQ(meta1.second, client_initial_metadata.find(meta1.first)->second);
@@ -426,16 +491,16 @@
   send_response.set_message(recv_request.message());
   response_writer.Finish(send_response, Status::OK, tag(3));
 
-  Verifier().Expect(3, true).Verify(cq_.get());
+  Verifier(GetParam()).Expect(3, true).Verify(cq_.get());
 
   response_reader->Finish(&recv_response, &recv_status, tag(4));
-  Verifier().Expect(4, true).Verify(cq_.get());
+  Verifier(GetParam()).Expect(4, true).Verify(cq_.get());
 
   EXPECT_EQ(send_response.message(), recv_response.message());
   EXPECT_TRUE(recv_status.ok());
 }
 
-TEST_F(AsyncEnd2endTest, ServerInitialMetadataRpc) {
+TEST_P(AsyncEnd2endTest, ServerInitialMetadataRpc) {
   ResetStub();
 
   EchoRequest send_request;
@@ -457,15 +522,15 @@
 
   service_.RequestEcho(&srv_ctx, &recv_request, &response_writer, cq_.get(),
                        cq_.get(), tag(2));
-  Verifier().Expect(2, true).Verify(cq_.get());
+  Verifier(GetParam()).Expect(2, true).Verify(cq_.get());
   EXPECT_EQ(send_request.message(), recv_request.message());
   srv_ctx.AddInitialMetadata(meta1.first, meta1.second);
   srv_ctx.AddInitialMetadata(meta2.first, meta2.second);
   response_writer.SendInitialMetadata(tag(3));
-  Verifier().Expect(3, true).Verify(cq_.get());
+  Verifier(GetParam()).Expect(3, true).Verify(cq_.get());
 
   response_reader->ReadInitialMetadata(tag(4));
-  Verifier().Expect(4, true).Verify(cq_.get());
+  Verifier(GetParam()).Expect(4, true).Verify(cq_.get());
   auto server_initial_metadata = cli_ctx.GetServerInitialMetadata();
   EXPECT_EQ(meta1.second, server_initial_metadata.find(meta1.first)->second);
   EXPECT_EQ(meta2.second, server_initial_metadata.find(meta2.first)->second);
@@ -473,16 +538,16 @@
 
   send_response.set_message(recv_request.message());
   response_writer.Finish(send_response, Status::OK, tag(5));
-  Verifier().Expect(5, true).Verify(cq_.get());
+  Verifier(GetParam()).Expect(5, true).Verify(cq_.get());
 
   response_reader->Finish(&recv_response, &recv_status, tag(6));
-  Verifier().Expect(6, true).Verify(cq_.get());
+  Verifier(GetParam()).Expect(6, true).Verify(cq_.get());
 
   EXPECT_EQ(send_response.message(), recv_response.message());
   EXPECT_TRUE(recv_status.ok());
 }
 
-TEST_F(AsyncEnd2endTest, ServerTrailingMetadataRpc) {
+TEST_P(AsyncEnd2endTest, ServerTrailingMetadataRpc) {
   ResetStub();
 
   EchoRequest send_request;
@@ -504,20 +569,20 @@
 
   service_.RequestEcho(&srv_ctx, &recv_request, &response_writer, cq_.get(),
                        cq_.get(), tag(2));
-  Verifier().Expect(2, true).Verify(cq_.get());
+  Verifier(GetParam()).Expect(2, true).Verify(cq_.get());
   EXPECT_EQ(send_request.message(), recv_request.message());
   response_writer.SendInitialMetadata(tag(3));
-  Verifier().Expect(3, true).Verify(cq_.get());
+  Verifier(GetParam()).Expect(3, true).Verify(cq_.get());
 
   send_response.set_message(recv_request.message());
   srv_ctx.AddTrailingMetadata(meta1.first, meta1.second);
   srv_ctx.AddTrailingMetadata(meta2.first, meta2.second);
   response_writer.Finish(send_response, Status::OK, tag(4));
 
-  Verifier().Expect(4, true).Verify(cq_.get());
+  Verifier(GetParam()).Expect(4, true).Verify(cq_.get());
 
   response_reader->Finish(&recv_response, &recv_status, tag(5));
-  Verifier().Expect(5, true).Verify(cq_.get());
+  Verifier(GetParam()).Expect(5, true).Verify(cq_.get());
   EXPECT_EQ(send_response.message(), recv_response.message());
   EXPECT_TRUE(recv_status.ok());
   auto server_trailing_metadata = cli_ctx.GetServerTrailingMetadata();
@@ -526,7 +591,7 @@
   EXPECT_EQ(static_cast<size_t>(2), server_trailing_metadata.size());
 }
 
-TEST_F(AsyncEnd2endTest, MetadataRpc) {
+TEST_P(AsyncEnd2endTest, MetadataRpc) {
   ResetStub();
 
   EchoRequest send_request;
@@ -563,7 +628,7 @@
 
   service_.RequestEcho(&srv_ctx, &recv_request, &response_writer, cq_.get(),
                        cq_.get(), tag(2));
-  Verifier().Expect(2, true).Verify(cq_.get());
+  Verifier(GetParam()).Expect(2, true).Verify(cq_.get());
   EXPECT_EQ(send_request.message(), recv_request.message());
   auto client_initial_metadata = srv_ctx.client_metadata();
   EXPECT_EQ(meta1.second, client_initial_metadata.find(meta1.first)->second);
@@ -573,9 +638,9 @@
   srv_ctx.AddInitialMetadata(meta3.first, meta3.second);
   srv_ctx.AddInitialMetadata(meta4.first, meta4.second);
   response_writer.SendInitialMetadata(tag(3));
-  Verifier().Expect(3, true).Verify(cq_.get());
+  Verifier(GetParam()).Expect(3, true).Verify(cq_.get());
   response_reader->ReadInitialMetadata(tag(4));
-  Verifier().Expect(4, true).Verify(cq_.get());
+  Verifier(GetParam()).Expect(4, true).Verify(cq_.get());
   auto server_initial_metadata = cli_ctx.GetServerInitialMetadata();
   EXPECT_EQ(meta3.second, server_initial_metadata.find(meta3.first)->second);
   EXPECT_EQ(meta4.second, server_initial_metadata.find(meta4.first)->second);
@@ -586,10 +651,10 @@
   srv_ctx.AddTrailingMetadata(meta6.first, meta6.second);
   response_writer.Finish(send_response, Status::OK, tag(5));
 
-  Verifier().Expect(5, true).Verify(cq_.get());
+  Verifier(GetParam()).Expect(5, true).Verify(cq_.get());
 
   response_reader->Finish(&recv_response, &recv_status, tag(6));
-  Verifier().Expect(6, true).Verify(cq_.get());
+  Verifier(GetParam()).Expect(6, true).Verify(cq_.get());
   EXPECT_EQ(send_response.message(), recv_response.message());
   EXPECT_TRUE(recv_status.ok());
   auto server_trailing_metadata = cli_ctx.GetServerTrailingMetadata();
@@ -599,7 +664,7 @@
 }
 
 // Server uses AsyncNotifyWhenDone API to check for cancellation
-TEST_F(AsyncEnd2endTest, ServerCheckCancellation) {
+TEST_P(AsyncEnd2endTest, ServerCheckCancellation) {
   ResetStub();
 
   EchoRequest send_request;
@@ -620,21 +685,21 @@
   service_.RequestEcho(&srv_ctx, &recv_request, &response_writer, cq_.get(),
                        cq_.get(), tag(2));
 
-  Verifier().Expect(2, true).Verify(cq_.get());
+  Verifier(GetParam()).Expect(2, true).Verify(cq_.get());
   EXPECT_EQ(send_request.message(), recv_request.message());
 
   cli_ctx.TryCancel();
-  Verifier().Expect(5, true).Verify(cq_.get());
+  Verifier(GetParam()).Expect(5, true).Verify(cq_.get());
   EXPECT_TRUE(srv_ctx.IsCancelled());
 
   response_reader->Finish(&recv_response, &recv_status, tag(4));
-  Verifier().Expect(4, false).Verify(cq_.get());
+  Verifier(GetParam()).Expect(4, false).Verify(cq_.get());
 
   EXPECT_EQ(StatusCode::CANCELLED, recv_status.error_code());
 }
 
 // Server uses AsyncNotifyWhenDone API to check for normal finish
-TEST_F(AsyncEnd2endTest, ServerCheckDone) {
+TEST_P(AsyncEnd2endTest, ServerCheckDone) {
   ResetStub();
 
   EchoRequest send_request;
@@ -655,24 +720,24 @@
   service_.RequestEcho(&srv_ctx, &recv_request, &response_writer, cq_.get(),
                        cq_.get(), tag(2));
 
-  Verifier().Expect(2, true).Verify(cq_.get());
+  Verifier(GetParam()).Expect(2, true).Verify(cq_.get());
   EXPECT_EQ(send_request.message(), recv_request.message());
 
   send_response.set_message(recv_request.message());
   response_writer.Finish(send_response, Status::OK, tag(3));
-  Verifier().Expect(3, true).Verify(cq_.get());
-  Verifier().Expect(5, true).Verify(cq_.get());
+  Verifier(GetParam()).Expect(3, true).Verify(cq_.get());
+  Verifier(GetParam()).Expect(5, true).Verify(cq_.get());
   EXPECT_FALSE(srv_ctx.IsCancelled());
 
   response_reader->Finish(&recv_response, &recv_status, tag(4));
-  Verifier().Expect(4, true).Verify(cq_.get());
+  Verifier(GetParam()).Expect(4, true).Verify(cq_.get());
 
   EXPECT_EQ(send_response.message(), recv_response.message());
   EXPECT_TRUE(recv_status.ok());
 }
 
-TEST_F(AsyncEnd2endTest, UnimplementedRpc) {
-  std::shared_ptr<ChannelInterface> channel = CreateChannel(
+TEST_P(AsyncEnd2endTest, UnimplementedRpc) {
+  std::shared_ptr<Channel> channel = CreateChannel(
       server_address_.str(), InsecureCredentials(), ChannelArguments());
   std::unique_ptr<grpc::cpp::test::util::UnimplementedService::Stub> stub;
   stub =
@@ -687,12 +752,15 @@
       stub->AsyncUnimplemented(&cli_ctx, send_request, cq_.get()));
 
   response_reader->Finish(&recv_response, &recv_status, tag(4));
-  Verifier().Expect(4, false).Verify(cq_.get());
+  Verifier(GetParam()).Expect(4, false).Verify(cq_.get());
 
   EXPECT_EQ(StatusCode::UNIMPLEMENTED, recv_status.error_code());
   EXPECT_EQ("", recv_status.error_message());
 }
 
+INSTANTIATE_TEST_CASE_P(AsyncEnd2end, AsyncEnd2endTest,
+                        ::testing::Values(false, true));
+
 }  // namespace
 }  // namespace testing
 }  // namespace grpc
diff --git a/test/cpp/end2end/client_crash_test.cc b/test/cpp/end2end/client_crash_test.cc
index 1c2a5c3..3359080 100644
--- a/test/cpp/end2end/client_crash_test.cc
+++ b/test/cpp/end2end/client_crash_test.cc
@@ -31,12 +31,10 @@
  *
  */
 
-#include "test/core/util/port.h"
-#include "test/core/util/test_config.h"
-#include "test/cpp/util/echo_duplicate.grpc.pb.h"
-#include "test/cpp/util/echo.grpc.pb.h"
-#include <grpc++/channel_arguments.h>
-#include <grpc++/channel_interface.h>
+#include <grpc/grpc.h>
+#include <grpc/support/thd.h>
+#include <grpc/support/time.h>
+#include <grpc++/channel.h>
 #include <grpc++/client_context.h>
 #include <grpc++/create_channel.h>
 #include <grpc++/credentials.h>
@@ -44,15 +42,12 @@
 #include <grpc++/server_builder.h>
 #include <grpc++/server_context.h>
 #include <grpc++/server_credentials.h>
-#include <grpc++/status.h>
-#include <grpc++/stream.h>
-#include <grpc++/time.h>
 #include <gtest/gtest.h>
 
-#include <grpc/grpc.h>
-#include <grpc/support/thd.h>
-#include <grpc/support/time.h>
-
+#include "test/core/util/port.h"
+#include "test/core/util/test_config.h"
+#include "test/cpp/util/echo_duplicate.grpc.pb.h"
+#include "test/cpp/util/echo.grpc.pb.h"
 #include "test/cpp/util/subprocess.h"
 
 using grpc::cpp::test::util::EchoRequest;
diff --git a/test/cpp/end2end/client_crash_test_server.cc b/test/cpp/end2end/client_crash_test_server.cc
index 3fd8c2c..79a7832 100644
--- a/test/cpp/end2end/client_crash_test_server.cc
+++ b/test/cpp/end2end/client_crash_test_server.cc
@@ -40,7 +40,6 @@
 #include <grpc++/server_builder.h>
 #include <grpc++/server_context.h>
 #include <grpc++/server_credentials.h>
-#include <grpc++/status.h>
 #include "test/cpp/util/echo.grpc.pb.h"
 
 DEFINE_string(address, "", "Address to bind to");
diff --git a/test/cpp/end2end/end2end_test.cc b/test/cpp/end2end/end2end_test.cc
index 350b107..2728dce 100644
--- a/test/cpp/end2end/end2end_test.cc
+++ b/test/cpp/end2end/end2end_test.cc
@@ -34,30 +34,25 @@
 #include <mutex>
 #include <thread>
 
+#include <grpc/grpc.h>
+#include <grpc/support/thd.h>
+#include <grpc/support/time.h>
+#include <grpc++/channel.h>
+#include <grpc++/client_context.h>
+#include <grpc++/create_channel.h>
+#include <grpc++/credentials.h>
+#include <grpc++/server.h>
+#include <grpc++/server_builder.h>
+#include <grpc++/server_context.h>
+#include <grpc++/server_credentials.h>
+#include <gtest/gtest.h>
+
 #include "src/core/security/credentials.h"
 #include "test/core/end2end/data/ssl_test_data.h"
 #include "test/core/util/port.h"
 #include "test/core/util/test_config.h"
 #include "test/cpp/util/echo_duplicate.grpc.pb.h"
 #include "test/cpp/util/echo.grpc.pb.h"
-#include <grpc++/channel_arguments.h>
-#include <grpc++/channel_interface.h>
-#include <grpc++/client_context.h>
-#include <grpc++/create_channel.h>
-#include <grpc++/credentials.h>
-#include <grpc++/dynamic_thread_pool.h>
-#include <grpc++/server.h>
-#include <grpc++/server_builder.h>
-#include <grpc++/server_context.h>
-#include <grpc++/server_credentials.h>
-#include <grpc++/status.h>
-#include <grpc++/stream.h>
-#include <grpc++/time.h>
-#include <gtest/gtest.h>
-
-#include <grpc/grpc.h>
-#include <grpc/support/thd.h>
-#include <grpc/support/time.h>
 
 using grpc::cpp::test::util::EchoRequest;
 using grpc::cpp::test::util::EchoResponse;
@@ -106,7 +101,7 @@
 
 class Proxy : public ::grpc::cpp::test::util::TestService::Service {
  public:
-  Proxy(std::shared_ptr<ChannelInterface> channel)
+  Proxy(std::shared_ptr<Channel> channel)
       : stub_(grpc::cpp::test::util::TestService::NewStub(channel)) {}
 
   Status Echo(ServerContext* server_context, const EchoRequest* request,
@@ -262,7 +257,7 @@
 class End2endTest : public ::testing::TestWithParam<bool> {
  protected:
   End2endTest()
-      : kMaxMessageSize_(8192), special_service_("special"), thread_pool_(2) {}
+      : kMaxMessageSize_(8192), special_service_("special") {}
 
   void SetUp() GRPC_OVERRIDE {
     int port = grpc_pick_unused_port_or_die();
@@ -281,7 +276,6 @@
     builder.SetMaxMessageSize(
         kMaxMessageSize_);  // For testing max message size.
     builder.RegisterService(&dup_pkg_service_);
-    builder.SetThreadPool(&thread_pool_);
     server_ = builder.BuildAndStart();
   }
 
@@ -309,7 +303,6 @@
       ServerBuilder builder;
       builder.AddListeningPort(proxyaddr.str(), InsecureServerCredentials());
       builder.RegisterService(proxy_service_.get());
-      builder.SetThreadPool(&thread_pool_);
       proxy_server_ = builder.BuildAndStart();
 
       channel_ = CreateChannel(proxyaddr.str(), InsecureCredentials(),
@@ -319,7 +312,7 @@
     stub_ = std::move(grpc::cpp::test::util::TestService::NewStub(channel_));
   }
 
-  std::shared_ptr<ChannelInterface> channel_;
+  std::shared_ptr<Channel> channel_;
   std::unique_ptr<grpc::cpp::test::util::TestService::Stub> stub_;
   std::unique_ptr<Server> server_;
   std::unique_ptr<Server> proxy_server_;
@@ -329,7 +322,6 @@
   TestServiceImpl service_;
   TestServiceImpl special_service_;
   TestServiceImplDupPkg dup_pkg_service_;
-  DynamicThreadPool thread_pool_;
 };
 
 static void SendRpc(grpc::cpp::test::util::TestService::Stub* stub,
@@ -571,7 +563,7 @@
 TEST_F(End2endTest, BadCredentials) {
   std::shared_ptr<Credentials> bad_creds = ServiceAccountCredentials("", "", 1);
   EXPECT_EQ(static_cast<Credentials*>(nullptr), bad_creds.get());
-  std::shared_ptr<ChannelInterface> channel =
+  std::shared_ptr<Channel> channel =
       CreateChannel(server_address_.str(), bad_creds, ChannelArguments());
   std::unique_ptr<grpc::cpp::test::util::TestService::Stub> stub(
       grpc::cpp::test::util::TestService::NewStub(channel));
diff --git a/test/cpp/end2end/generic_end2end_test.cc b/test/cpp/end2end/generic_end2end_test.cc
index 3120cec..de7eab8 100644
--- a/test/cpp/end2end/generic_end2end_test.cc
+++ b/test/cpp/end2end/generic_end2end_test.cc
@@ -33,32 +33,26 @@
 
 #include <memory>
 
-#include "test/core/util/port.h"
-#include "test/core/util/test_config.h"
-#include "test/cpp/util/echo.grpc.pb.h"
+#include <grpc/grpc.h>
+#include <grpc/support/thd.h>
+#include <grpc/support/time.h>
 #include <grpc++/impl/proto_utils.h>
-#include <grpc++/async_generic_service.h>
-#include <grpc++/async_unary_call.h>
-#include <grpc++/byte_buffer.h>
-#include <grpc++/channel_arguments.h>
-#include <grpc++/channel_interface.h>
+#include <grpc++/channel.h>
 #include <grpc++/client_context.h>
 #include <grpc++/create_channel.h>
 #include <grpc++/credentials.h>
-#include <grpc++/generic_stub.h>
+#include <grpc++/generic/async_generic_service.h>
+#include <grpc++/generic/generic_stub.h>
 #include <grpc++/server.h>
 #include <grpc++/server_builder.h>
 #include <grpc++/server_context.h>
 #include <grpc++/server_credentials.h>
-#include <grpc++/slice.h>
-#include <grpc++/status.h>
-#include <grpc++/stream.h>
-#include <grpc++/time.h>
+#include <grpc++/support/slice.h>
 #include <gtest/gtest.h>
 
-#include <grpc/grpc.h>
-#include <grpc/support/thd.h>
-#include <grpc/support/time.h>
+#include "test/core/util/port.h"
+#include "test/core/util/test_config.h"
+#include "test/cpp/util/echo.grpc.pb.h"
 
 using grpc::cpp::test::util::EchoRequest;
 using grpc::cpp::test::util::EchoResponse;
@@ -127,7 +121,7 @@
   }
 
   void ResetStub() {
-    std::shared_ptr<ChannelInterface> channel = CreateChannel(
+    std::shared_ptr<Channel> channel = CreateChannel(
         server_address_.str(), InsecureCredentials(), ChannelArguments());
     generic_stub_.reset(new GenericStub(channel));
   }
diff --git a/test/cpp/end2end/mock_test.cc b/test/cpp/end2end/mock_test.cc
index 32130e2..b2c6dc3 100644
--- a/test/cpp/end2end/mock_test.cc
+++ b/test/cpp/end2end/mock_test.cc
@@ -33,28 +33,23 @@
 
 #include <thread>
 
-#include "test/core/util/port.h"
-#include "test/core/util/test_config.h"
-#include "test/cpp/util/echo_duplicate.grpc.pb.h"
-#include "test/cpp/util/echo.grpc.pb.h"
-#include <grpc++/channel_arguments.h>
-#include <grpc++/channel_interface.h>
+#include <grpc/grpc.h>
+#include <grpc/support/thd.h>
+#include <grpc/support/time.h>
+#include <grpc++/channel.h>
 #include <grpc++/client_context.h>
 #include <grpc++/create_channel.h>
 #include <grpc++/credentials.h>
-#include <grpc++/dynamic_thread_pool.h>
 #include <grpc++/server.h>
 #include <grpc++/server_builder.h>
 #include <grpc++/server_context.h>
 #include <grpc++/server_credentials.h>
-#include <grpc++/status.h>
-#include <grpc++/stream.h>
-#include <grpc++/time.h>
 #include <gtest/gtest.h>
 
-#include <grpc/grpc.h>
-#include <grpc/support/thd.h>
-#include <grpc/support/time.h>
+#include "test/core/util/port.h"
+#include "test/core/util/test_config.h"
+#include "test/cpp/util/echo_duplicate.grpc.pb.h"
+#include "test/cpp/util/echo.grpc.pb.h"
 
 using grpc::cpp::test::util::EchoRequest;
 using grpc::cpp::test::util::EchoResponse;
@@ -234,7 +229,7 @@
 
 class MockTest : public ::testing::Test {
  protected:
-  MockTest() : thread_pool_(2) {}
+  MockTest() {}
 
   void SetUp() GRPC_OVERRIDE {
     int port = grpc_pick_unused_port_or_die();
@@ -244,14 +239,13 @@
     builder.AddListeningPort(server_address_.str(),
                              InsecureServerCredentials());
     builder.RegisterService(&service_);
-    builder.SetThreadPool(&thread_pool_);
     server_ = builder.BuildAndStart();
   }
 
   void TearDown() GRPC_OVERRIDE { server_->Shutdown(); }
 
   void ResetStub() {
-    std::shared_ptr<ChannelInterface> channel = CreateChannel(
+    std::shared_ptr<Channel> channel = CreateChannel(
         server_address_.str(), InsecureCredentials(), ChannelArguments());
     stub_ = std::move(grpc::cpp::test::util::TestService::NewStub(channel));
   }
@@ -260,7 +254,6 @@
   std::unique_ptr<Server> server_;
   std::ostringstream server_address_;
   TestServiceImpl service_;
-  DynamicThreadPool thread_pool_;
 };
 
 // Do one real rpc and one mocked one
diff --git a/test/cpp/end2end/server_crash_test.cc b/test/cpp/end2end/server_crash_test.cc
index 5c7bb4e..1a0f04e 100644
--- a/test/cpp/end2end/server_crash_test.cc
+++ b/test/cpp/end2end/server_crash_test.cc
@@ -31,12 +31,10 @@
  *
  */
 
-#include "test/core/util/port.h"
-#include "test/core/util/test_config.h"
-#include "test/cpp/util/echo_duplicate.grpc.pb.h"
-#include "test/cpp/util/echo.grpc.pb.h"
-#include <grpc++/channel_arguments.h>
-#include <grpc++/channel_interface.h>
+#include <grpc/grpc.h>
+#include <grpc/support/thd.h>
+#include <grpc/support/time.h>
+#include <grpc++/channel.h>
 #include <grpc++/client_context.h>
 #include <grpc++/create_channel.h>
 #include <grpc++/credentials.h>
@@ -44,15 +42,12 @@
 #include <grpc++/server_builder.h>
 #include <grpc++/server_context.h>
 #include <grpc++/server_credentials.h>
-#include <grpc++/status.h>
-#include <grpc++/stream.h>
-#include <grpc++/time.h>
 #include <gtest/gtest.h>
 
-#include <grpc/grpc.h>
-#include <grpc/support/thd.h>
-#include <grpc/support/time.h>
-
+#include "test/core/util/port.h"
+#include "test/core/util/test_config.h"
+#include "test/cpp/util/echo.grpc.pb.h"
+#include "test/cpp/util/echo_duplicate.grpc.pb.h"
 #include "test/cpp/util/subprocess.h"
 
 using grpc::cpp::test::util::EchoRequest;
diff --git a/test/cpp/end2end/server_crash_test_client.cc b/test/cpp/end2end/server_crash_test_client.cc
index 1da4f05..7ca43a0 100644
--- a/test/cpp/end2end/server_crash_test_client.cc
+++ b/test/cpp/end2end/server_crash_test_client.cc
@@ -37,12 +37,10 @@
 #include <string>
 #include <gflags/gflags.h>
 
-#include <grpc++/channel_arguments.h>
-#include <grpc++/channel_interface.h>
+#include <grpc++/channel.h>
 #include <grpc++/client_context.h>
 #include <grpc++/create_channel.h>
 #include <grpc++/credentials.h>
-#include <grpc++/status.h>
 #include "test/cpp/util/echo.grpc.pb.h"
 
 DEFINE_string(address, "", "Address to connect to");
diff --git a/test/cpp/end2end/shutdown_test.cc b/test/cpp/end2end/shutdown_test.cc
index fccbb13..e83f86f 100644
--- a/test/cpp/end2end/shutdown_test.cc
+++ b/test/cpp/end2end/shutdown_test.cc
@@ -31,15 +31,11 @@
  *
  */
 
-#include "test/core/util/test_config.h"
-
 #include <thread>
 
-#include "test/core/util/port.h"
-#include "test/cpp/util/echo.grpc.pb.h"
-#include "src/core/support/env.h"
-#include <grpc++/channel_arguments.h>
-#include <grpc++/channel_interface.h>
+#include <grpc/grpc.h>
+#include <grpc/support/sync.h>
+#include <grpc++/channel.h>
 #include <grpc++/client_context.h>
 #include <grpc++/create_channel.h>
 #include <grpc++/credentials.h>
@@ -47,10 +43,12 @@
 #include <grpc++/server_builder.h>
 #include <grpc++/server_context.h>
 #include <grpc++/server_credentials.h>
-#include <grpc++/status.h>
 #include <gtest/gtest.h>
-#include <grpc/grpc.h>
-#include <grpc/support/sync.h>
+
+#include "src/core/support/env.h"
+#include "test/core/util/test_config.h"
+#include "test/core/util/port.h"
+#include "test/cpp/util/echo.grpc.pb.h"
 
 using grpc::cpp::test::util::EchoRequest;
 using grpc::cpp::test::util::EchoResponse;
@@ -118,7 +116,7 @@
   }
 
  protected:
-  std::shared_ptr<ChannelInterface> channel_;
+  std::shared_ptr<Channel> channel_;
   std::unique_ptr<grpc::cpp::test::util::TestService::Stub> stub_;
   std::unique_ptr<Server> server_;
   bool shutdown_;
diff --git a/test/cpp/end2end/thread_stress_test.cc b/test/cpp/end2end/thread_stress_test.cc
index ff9c945..8304f04 100644
--- a/test/cpp/end2end/thread_stress_test.cc
+++ b/test/cpp/end2end/thread_stress_test.cc
@@ -34,28 +34,23 @@
 #include <mutex>
 #include <thread>
 
-#include "test/core/util/port.h"
-#include "test/core/util/test_config.h"
-#include "test/cpp/util/echo_duplicate.grpc.pb.h"
-#include "test/cpp/util/echo.grpc.pb.h"
-#include <grpc++/channel_arguments.h>
-#include <grpc++/channel_interface.h>
+#include <grpc/grpc.h>
+#include <grpc/support/thd.h>
+#include <grpc/support/time.h>
+#include <grpc++/channel.h>
 #include <grpc++/client_context.h>
 #include <grpc++/create_channel.h>
 #include <grpc++/credentials.h>
-#include <grpc++/dynamic_thread_pool.h>
 #include <grpc++/server.h>
 #include <grpc++/server_builder.h>
 #include <grpc++/server_context.h>
 #include <grpc++/server_credentials.h>
-#include <grpc++/status.h>
-#include <grpc++/stream.h>
-#include <grpc++/time.h>
 #include <gtest/gtest.h>
 
-#include <grpc/grpc.h>
-#include <grpc/support/thd.h>
-#include <grpc/support/time.h>
+#include "test/core/util/port.h"
+#include "test/core/util/test_config.h"
+#include "test/cpp/util/echo_duplicate.grpc.pb.h"
+#include "test/cpp/util/echo.grpc.pb.h"
 
 using grpc::cpp::test::util::EchoRequest;
 using grpc::cpp::test::util::EchoResponse;
@@ -177,7 +172,7 @@
 
 class End2endTest : public ::testing::Test {
  protected:
-  End2endTest() : kMaxMessageSize_(8192), thread_pool_(2) {}
+  End2endTest() : kMaxMessageSize_(8192) {}
 
   void SetUp() GRPC_OVERRIDE {
     int port = grpc_pick_unused_port_or_die();
@@ -190,14 +185,13 @@
     builder.SetMaxMessageSize(
         kMaxMessageSize_);  // For testing max message size.
     builder.RegisterService(&dup_pkg_service_);
-    builder.SetThreadPool(&thread_pool_);
     server_ = builder.BuildAndStart();
   }
 
   void TearDown() GRPC_OVERRIDE { server_->Shutdown(); }
 
   void ResetStub() {
-    std::shared_ptr<ChannelInterface> channel = CreateChannel(
+    std::shared_ptr<Channel> channel = CreateChannel(
         server_address_.str(), InsecureCredentials(), ChannelArguments());
     stub_ = std::move(grpc::cpp::test::util::TestService::NewStub(channel));
   }
@@ -208,7 +202,6 @@
   const int kMaxMessageSize_;
   TestServiceImpl service_;
   TestServiceImplDupPkg dup_pkg_service_;
-  DynamicThreadPool thread_pool_;
 };
 
 static void SendRpc(grpc::cpp::test::util::TestService::Stub* stub,
diff --git a/test/cpp/end2end/zookeeper_test.cc b/test/cpp/end2end/zookeeper_test.cc
index f5eba66..e7d95b1 100644
--- a/test/cpp/end2end/zookeeper_test.cc
+++ b/test/cpp/end2end/zookeeper_test.cc
@@ -31,12 +31,7 @@
  *
  */
 
-#include "test/core/util/test_config.h"
-#include "test/core/util/port.h"
-#include "test/cpp/util/echo.grpc.pb.h"
-#include "src/core/support/env.h"
-#include <grpc++/channel_arguments.h>
-#include <grpc++/channel_interface.h>
+#include <grpc++/channel.h>
 #include <grpc++/client_context.h>
 #include <grpc++/create_channel.h>
 #include <grpc++/credentials.h>
@@ -44,12 +39,16 @@
 #include <grpc++/server_builder.h>
 #include <grpc++/server_context.h>
 #include <grpc++/server_credentials.h>
-#include <grpc++/status.h>
 #include <gtest/gtest.h>
 #include <grpc/grpc.h>
 #include <grpc/grpc_zookeeper.h>
 #include <zookeeper/zookeeper.h>
 
+#include "test/core/util/test_config.h"
+#include "test/core/util/port.h"
+#include "test/cpp/util/echo.grpc.pb.h"
+#include "src/core/support/env.h"
+
 using grpc::cpp::test::util::EchoRequest;
 using grpc::cpp::test::util::EchoResponse;
 
@@ -170,7 +169,7 @@
     return strs.str();
   }
 
-  std::shared_ptr<ChannelInterface> channel_;
+  std::shared_ptr<Channel> channel_;
   std::unique_ptr<grpc::cpp::test::util::TestService::Stub> stub_;
   std::unique_ptr<Server> server1_;
   std::unique_ptr<Server> server2_;
diff --git a/test/cpp/interop/client.cc b/test/cpp/interop/client.cc
index 48143b2..cb52321 100644
--- a/test/cpp/interop/client.cc
+++ b/test/cpp/interop/client.cc
@@ -38,10 +38,9 @@
 #include <grpc/grpc.h>
 #include <grpc/support/log.h>
 #include <gflags/gflags.h>
-#include <grpc++/channel_interface.h>
+#include <grpc++/channel.h>
 #include <grpc++/client_context.h>
-#include <grpc++/status.h>
-#include <grpc++/stream.h>
+
 #include "test/cpp/interop/client_helper.h"
 #include "test/cpp/interop/interop_client.h"
 #include "test/cpp/util/test_config.h"
diff --git a/test/cpp/interop/client_helper.cc b/test/cpp/interop/client_helper.cc
index da5627d..abc14ae 100644
--- a/test/cpp/interop/client_helper.cc
+++ b/test/cpp/interop/client_helper.cc
@@ -33,26 +33,23 @@
 
 #include "test/cpp/interop/client_helper.h"
 
+#include <unistd.h>
+
 #include <fstream>
 #include <memory>
 #include <sstream>
 
-#include <unistd.h>
-
 #include <grpc/grpc.h>
 #include <grpc/support/alloc.h>
 #include <grpc/support/log.h>
 #include <gflags/gflags.h>
-#include <grpc++/channel_arguments.h>
-#include <grpc++/channel_interface.h>
+#include <grpc++/channel.h>
 #include <grpc++/create_channel.h>
 #include <grpc++/credentials.h>
-#include <grpc++/stream.h>
-
-#include "test/core/security/oauth2_utils.h"
-#include "test/cpp/util/create_test_channel.h"
 
 #include "src/cpp/client/secure_credentials.h"
+#include "test/core/security/oauth2_utils.h"
+#include "test/cpp/util/create_test_channel.h"
 
 DECLARE_bool(enable_ssl);
 DECLARE_bool(use_prod_roots);
@@ -102,7 +99,7 @@
   return access_token;
 }
 
-std::shared_ptr<ChannelInterface> CreateChannelForTestCase(
+std::shared_ptr<Channel> CreateChannelForTestCase(
     const grpc::string& test_case) {
   GPR_ASSERT(FLAGS_server_port);
   const int host_port_buf_size = 1024;
diff --git a/test/cpp/interop/client_helper.h b/test/cpp/interop/client_helper.h
index edc69e9..92d5078 100644
--- a/test/cpp/interop/client_helper.h
+++ b/test/cpp/interop/client_helper.h
@@ -36,8 +36,7 @@
 
 #include <memory>
 
-#include <grpc++/config.h>
-#include <grpc++/channel_interface.h>
+#include <grpc++/channel.h>
 
 #include "src/core/surface/call.h"
 
@@ -48,7 +47,7 @@
 
 grpc::string GetOauth2AccessToken();
 
-std::shared_ptr<ChannelInterface> CreateChannelForTestCase(
+std::shared_ptr<Channel> CreateChannelForTestCase(
     const grpc::string& test_case);
 
 class InteropClientContextInspector {
diff --git a/test/cpp/interop/interop_client.cc b/test/cpp/interop/interop_client.cc
index 5ed14d5..fa35858 100644
--- a/test/cpp/interop/interop_client.cc
+++ b/test/cpp/interop/interop_client.cc
@@ -33,26 +33,24 @@
 
 #include "test/cpp/interop/interop_client.h"
 
+#include <unistd.h>
+
 #include <fstream>
 #include <memory>
 
-#include <unistd.h>
-
 #include <grpc/grpc.h>
 #include <grpc/support/log.h>
 #include <grpc/support/string_util.h>
 #include <grpc/support/useful.h>
-#include <grpc++/channel_interface.h>
+#include <grpc++/channel.h>
 #include <grpc++/client_context.h>
 #include <grpc++/credentials.h>
-#include <grpc++/status.h>
-#include <grpc++/stream.h>
 
+#include "src/core/transport/stream_op.h"
 #include "test/cpp/interop/client_helper.h"
 #include "test/proto/test.grpc.pb.h"
 #include "test/proto/empty.grpc.pb.h"
 #include "test/proto/messages.grpc.pb.h"
-#include "src/core/transport/stream_op.h"
 
 namespace grpc {
 namespace testing {
@@ -84,7 +82,7 @@
 }
 }  // namespace
 
-InteropClient::InteropClient(std::shared_ptr<ChannelInterface> channel)
+InteropClient::InteropClient(std::shared_ptr<Channel> channel)
     : channel_(channel) {}
 
 void InteropClient::AssertOkOrPrintErrorStatus(const Status& s) {
diff --git a/test/cpp/interop/interop_client.h b/test/cpp/interop/interop_client.h
index d6fb9bf..5e26cc8 100644
--- a/test/cpp/interop/interop_client.h
+++ b/test/cpp/interop/interop_client.h
@@ -33,11 +33,11 @@
 
 #ifndef GRPC_TEST_CPP_INTEROP_INTEROP_CLIENT_H
 #define GRPC_TEST_CPP_INTEROP_INTEROP_CLIENT_H
+
 #include <memory>
 
 #include <grpc/grpc.h>
-#include <grpc++/channel_interface.h>
-#include <grpc++/status.h>
+#include <grpc++/channel.h>
 #include "test/proto/messages.grpc.pb.h"
 
 namespace grpc {
@@ -45,10 +45,10 @@
 
 class InteropClient {
  public:
-  explicit InteropClient(std::shared_ptr<ChannelInterface> channel);
+  explicit InteropClient(std::shared_ptr<Channel> channel);
   ~InteropClient() {}
 
-  void Reset(std::shared_ptr<ChannelInterface> channel) { channel_ = channel; }
+  void Reset(std::shared_ptr<Channel> channel) { channel_ = channel; }
 
   void DoEmpty();
   void DoLargeUnary();
@@ -82,7 +82,7 @@
   void PerformLargeUnary(SimpleRequest* request, SimpleResponse* response);
   void AssertOkOrPrintErrorStatus(const Status& s);
 
-  std::shared_ptr<ChannelInterface> channel_;
+  std::shared_ptr<Channel> channel_;
 };
 
 }  // namespace testing
diff --git a/test/cpp/interop/interop_test.cc b/test/cpp/interop/interop_test.cc
index aac6e56..f01b032 100644
--- a/test/cpp/interop/interop_test.cc
+++ b/test/cpp/interop/interop_test.cc
@@ -44,17 +44,18 @@
 #include <sys/types.h>
 #include <sys/wait.h>
 
-extern "C" {
-#include "src/core/iomgr/socket_utils_posix.h"
-#include "src/core/support/string.h"
-}
-
 #include <grpc/support/alloc.h>
 #include <grpc/support/host_port.h>
 #include <grpc/support/log.h>
 #include <grpc/support/string_util.h>
 #include "test/core/util/port.h"
 
+extern "C" {
+#include "src/core/iomgr/socket_utils_posix.h"
+#include "src/core/support/string.h"
+}
+
+
 int test_client(const char* root, const char* host, int port) {
   int status;
   pid_t cli;
diff --git a/test/cpp/interop/reconnect_interop_client.cc b/test/cpp/interop/reconnect_interop_client.cc
index 65f0980..d332dca 100644
--- a/test/cpp/interop/reconnect_interop_client.cc
+++ b/test/cpp/interop/reconnect_interop_client.cc
@@ -37,9 +37,8 @@
 #include <grpc/grpc.h>
 #include <grpc/support/log.h>
 #include <gflags/gflags.h>
-#include <grpc++/channel_interface.h>
+#include <grpc++/channel.h>
 #include <grpc++/client_context.h>
-#include <grpc++/status.h>
 #include "test/cpp/util/create_test_channel.h"
 #include "test/cpp/util/test_config.h"
 #include "test/proto/test.grpc.pb.h"
@@ -50,7 +49,7 @@
 DEFINE_int32(server_retry_port, 0, "Server port for testing reconnection.");
 DEFINE_string(server_host, "127.0.0.1", "Server host to connect to");
 
-using grpc::ChannelInterface;
+using grpc::Channel;
 using grpc::ClientContext;
 using grpc::CreateTestChannel;
 using grpc::Status;
@@ -78,7 +77,7 @@
   gpr_log(GPR_INFO, "Starting connections with retries.");
   server_address.str("");
   server_address << FLAGS_server_host << ':' << FLAGS_server_retry_port;
-  std::shared_ptr<ChannelInterface> retry_channel =
+  std::shared_ptr<Channel> retry_channel =
       CreateTestChannel(server_address.str(), true);
   // About 13 retries.
   const int kDeadlineSeconds = 540;
diff --git a/test/cpp/interop/reconnect_interop_server.cc b/test/cpp/interop/reconnect_interop_server.cc
index 8bc51aa..d4f171b 100644
--- a/test/cpp/interop/reconnect_interop_server.cc
+++ b/test/cpp/interop/reconnect_interop_server.cc
@@ -31,23 +31,22 @@
  *
  */
 
+#include <signal.h>
+#include <unistd.h>
+
 #include <condition_variable>
 #include <memory>
 #include <mutex>
 #include <sstream>
 
-#include <signal.h>
-#include <unistd.h>
-
 #include <gflags/gflags.h>
 #include <grpc/grpc.h>
 #include <grpc/support/log.h>
-#include <grpc++/config.h>
 #include <grpc++/server.h>
 #include <grpc++/server_builder.h>
 #include <grpc++/server_context.h>
 #include <grpc++/server_credentials.h>
-#include <grpc++/status.h>
+
 #include "test/core/util/reconnect_server.h"
 #include "test/cpp/util/test_config.h"
 #include "test/proto/test.grpc.pb.h"
diff --git a/test/cpp/interop/server.cc b/test/cpp/interop/server.cc
index 760bb18..35ec890 100644
--- a/test/cpp/interop/server.cc
+++ b/test/cpp/interop/server.cc
@@ -31,32 +31,28 @@
  *
  */
 
+#include <signal.h>
+#include <unistd.h>
+
 #include <fstream>
 #include <memory>
 #include <sstream>
 #include <thread>
 
-#include <signal.h>
-#include <unistd.h>
-
 #include <gflags/gflags.h>
 #include <grpc/grpc.h>
 #include <grpc/support/log.h>
 #include <grpc/support/useful.h>
-
-#include <grpc++/config.h>
 #include <grpc++/server.h>
 #include <grpc++/server_builder.h>
 #include <grpc++/server_context.h>
 #include <grpc++/server_credentials.h>
-#include <grpc++/status.h>
-#include <grpc++/stream.h>
 
+#include "test/cpp/interop/server_helper.h"
+#include "test/cpp/util/test_config.h"
 #include "test/proto/test.grpc.pb.h"
 #include "test/proto/empty.grpc.pb.h"
 #include "test/proto/messages.grpc.pb.h"
-#include "test/cpp/interop/server_helper.h"
-#include "test/cpp/util/test_config.h"
 
 DEFINE_bool(enable_ssl, false, "Whether to use ssl/tls.");
 DEFINE_int32(port, 0, "Server port.");
diff --git a/test/cpp/interop/server_helper.cc b/test/cpp/interop/server_helper.cc
index 3721d79..e897f4e 100644
--- a/test/cpp/interop/server_helper.cc
+++ b/test/cpp/interop/server_helper.cc
@@ -36,7 +36,6 @@
 #include <memory>
 
 #include <gflags/gflags.h>
-#include <grpc++/config.h>
 #include <grpc++/server_credentials.h>
 
 #include "src/core/surface/call.h"
diff --git a/test/cpp/qps/client.h b/test/cpp/qps/client.h
index 1c4f463..0f95cfe 100644
--- a/test/cpp/qps/client.h
+++ b/test/cpp/qps/client.h
@@ -34,14 +34,14 @@
 #ifndef TEST_QPS_CLIENT_H
 #define TEST_QPS_CLIENT_H
 
+#include <condition_variable>
+#include <mutex>
+
 #include "test/cpp/qps/histogram.h"
 #include "test/cpp/qps/interarrival.h"
 #include "test/cpp/qps/timer.h"
 #include "test/cpp/qps/qpstest.grpc.pb.h"
-
-#include <condition_variable>
-#include <mutex>
-#include <grpc++/config.h>
+#include "test/cpp/util/create_test_channel.h"
 
 namespace grpc {
 
@@ -125,11 +125,11 @@
       channel_ = CreateTestChannel(target, config.enable_ssl());
       stub_ = TestService::NewStub(channel_);
     }
-    ChannelInterface* get_channel() { return channel_.get(); }
+    Channel* get_channel() { return channel_.get(); }
     TestService::Stub* get_stub() { return stub_.get(); }
 
    private:
-    std::shared_ptr<ChannelInterface> channel_;
+    std::shared_ptr<Channel> channel_;
     std::unique_ptr<TestService::Stub> stub_;
   };
   std::vector<ClientChannelInfo> channels_;
diff --git a/test/cpp/qps/client_async.cc b/test/cpp/qps/client_async.cc
index a337610..f779e4a 100644
--- a/test/cpp/qps/client_async.cc
+++ b/test/cpp/qps/client_async.cc
@@ -46,14 +46,12 @@
 #include <grpc/support/histogram.h>
 #include <grpc/support/log.h>
 #include <gflags/gflags.h>
-#include <grpc++/async_unary_call.h>
 #include <grpc++/client_context.h>
-#include <grpc++/status.h>
-#include <grpc++/stream.h>
-#include "test/cpp/util/create_test_channel.h"
+
 #include "test/cpp/qps/qpstest.grpc.pb.h"
 #include "test/cpp/qps/timer.h"
 #include "test/cpp/qps/client.h"
+#include "test/cpp/util/create_test_channel.h"
 
 namespace grpc {
 namespace testing {
diff --git a/test/cpp/qps/client_sync.cc b/test/cpp/qps/client_sync.cc
index db5416a..123dca6 100644
--- a/test/cpp/qps/client_sync.cc
+++ b/test/cpp/qps/client_sync.cc
@@ -31,6 +31,8 @@
  *
  */
 
+#include <sys/signal.h>
+
 #include <cassert>
 #include <chrono>
 #include <memory>
@@ -40,21 +42,18 @@
 #include <vector>
 #include <sstream>
 
-#include <sys/signal.h>
-
+#include <gflags/gflags.h>
 #include <grpc/grpc.h>
 #include <grpc/support/alloc.h>
 #include <grpc/support/histogram.h>
 #include <grpc/support/host_port.h>
 #include <grpc/support/log.h>
 #include <grpc/support/time.h>
-#include <gflags/gflags.h>
 #include <grpc++/client_context.h>
 #include <grpc++/server.h>
 #include <grpc++/server_builder.h>
-#include <grpc++/status.h>
-#include <grpc++/stream.h>
 #include <gtest/gtest.h>
+
 #include "test/cpp/util/create_test_channel.h"
 #include "test/cpp/qps/client.h"
 #include "test/cpp/qps/qpstest.grpc.pb.h"
diff --git a/test/cpp/qps/driver.cc b/test/cpp/qps/driver.cc
index 78e3720..3bd61ea 100644
--- a/test/cpp/qps/driver.cc
+++ b/test/cpp/qps/driver.cc
@@ -31,24 +31,24 @@
  *
  */
 
-#include "test/cpp/qps/driver.h"
-#include "src/core/support/env.h"
-#include <grpc/support/alloc.h>
-#include <grpc/support/log.h>
-#include <grpc/support/host_port.h>
-#include <grpc++/channel_arguments.h>
-#include <grpc++/client_context.h>
-#include <grpc++/create_channel.h>
-#include <grpc++/stream.h>
+#include <unistd.h>
 #include <list>
 #include <thread>
 #include <deque>
 #include <vector>
-#include <unistd.h>
-#include "test/cpp/qps/histogram.h"
-#include "test/cpp/qps/qps_worker.h"
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/host_port.h>
+#include <grpc++/client_context.h>
+#include <grpc++/create_channel.h>
+
+#include "src/core/support/env.h"
 #include "test/core/util/port.h"
 #include "test/core/util/test_config.h"
+#include "test/cpp/qps/driver.h"
+#include "test/cpp/qps/histogram.h"
+#include "test/cpp/qps/qps_worker.h"
 
 using std::list;
 using std::thread;
diff --git a/test/cpp/qps/interarrival.h b/test/cpp/qps/interarrival.h
index 04d14f6..841619e 100644
--- a/test/cpp/qps/interarrival.h
+++ b/test/cpp/qps/interarrival.h
@@ -39,7 +39,7 @@
 #include <cstdlib>
 #include <vector>
 
-#include <grpc++/config.h>
+#include <grpc++/support/config.h>
 
 namespace grpc {
 namespace testing {
diff --git a/test/cpp/qps/perf_db_client.h b/test/cpp/qps/perf_db_client.h
index 7a9d86d..ae5d170 100644
--- a/test/cpp/qps/perf_db_client.h
+++ b/test/cpp/qps/perf_db_client.h
@@ -37,12 +37,11 @@
 #include <cfloat>
 
 #include <grpc/grpc.h>
-#include <grpc++/channel_arguments.h>
-#include <grpc++/channel_interface.h>
+#include <grpc++/support/channel_arguments.h>
+#include <grpc++/channel.h>
 #include <grpc++/client_context.h>
 #include <grpc++/create_channel.h>
 #include <grpc++/credentials.h>
-#include <grpc++/status.h>
 #include "test/cpp/qps/perf_db.grpc.pb.h"
 
 namespace grpc {
@@ -65,7 +64,7 @@
     client_user_time_ = DBL_MIN;
   }
 
-  void init(std::shared_ptr<ChannelInterface> channel) {
+  void init(std::shared_ptr<Channel> channel) {
     stub_ = PerfDbTransfer::NewStub(channel);
   }
 
diff --git a/test/cpp/qps/qps_interarrival_test.cc b/test/cpp/qps/qps_interarrival_test.cc
index 1eed956..a7979e6 100644
--- a/test/cpp/qps/qps_interarrival_test.cc
+++ b/test/cpp/qps/qps_interarrival_test.cc
@@ -31,13 +31,13 @@
  *
  */
 
-#include "test/cpp/qps/interarrival.h"
 #include <chrono>
 #include <iostream>
 
 // Use the C histogram rather than C++ to avoid depending on proto
 #include <grpc/support/histogram.h>
-#include <grpc++/config.h>
+
+#include "test/cpp/qps/interarrival.h"
 
 using grpc::testing::RandomDist;
 using grpc::testing::InterarrivalTimer;
diff --git a/test/cpp/qps/qps_openloop_test.cc b/test/cpp/qps/qps_openloop_test.cc
index 9a7313f..5a6a924 100644
--- a/test/cpp/qps/qps_openloop_test.cc
+++ b/test/cpp/qps/qps_openloop_test.cc
@@ -31,12 +31,12 @@
  *
  */
 
+#include <signal.h>
+
 #include <set>
 
 #include <grpc/support/log.h>
 
-#include <signal.h>
-
 #include "test/cpp/qps/driver.h"
 #include "test/cpp/qps/report.h"
 #include "test/cpp/util/benchmark_config.h"
diff --git a/test/cpp/qps/qps_test.cc b/test/cpp/qps/qps_test.cc
index ba980a6..d0c4a79 100644
--- a/test/cpp/qps/qps_test.cc
+++ b/test/cpp/qps/qps_test.cc
@@ -31,12 +31,12 @@
  *
  */
 
+#include <signal.h>
+
 #include <set>
 
 #include <grpc/support/log.h>
 
-#include <signal.h>
-
 #include "test/cpp/qps/driver.h"
 #include "test/cpp/qps/report.h"
 #include "test/cpp/util/benchmark_config.h"
diff --git a/test/cpp/qps/qps_test_with_poll.cc b/test/cpp/qps/qps_test_with_poll.cc
index 90a8da8..31d2c1b 100644
--- a/test/cpp/qps/qps_test_with_poll.cc
+++ b/test/cpp/qps/qps_test_with_poll.cc
@@ -31,12 +31,12 @@
  *
  */
 
+#include <signal.h>
+
 #include <set>
 
 #include <grpc/support/log.h>
 
-#include <signal.h>
-
 #include "test/cpp/qps/driver.h"
 #include "test/cpp/qps/report.h"
 #include "test/cpp/util/benchmark_config.h"
diff --git a/test/cpp/qps/qps_worker.cc b/test/cpp/qps/qps_worker.cc
index f1cea5e..51e955a 100644
--- a/test/cpp/qps/qps_worker.cc
+++ b/test/cpp/qps/qps_worker.cc
@@ -47,16 +47,15 @@
 #include <grpc/support/log.h>
 #include <grpc/support/host_port.h>
 #include <grpc++/client_context.h>
-#include <grpc++/status.h>
 #include <grpc++/server.h>
 #include <grpc++/server_builder.h>
 #include <grpc++/server_credentials.h>
-#include <grpc++/stream.h>
+
 #include "test/core/util/grpc_profiler.h"
-#include "test/cpp/util/create_test_channel.h"
 #include "test/cpp/qps/qpstest.pb.h"
 #include "test/cpp/qps/client.h"
 #include "test/cpp/qps/server.h"
+#include "test/cpp/util/create_test_channel.h"
 
 namespace grpc {
 namespace testing {
diff --git a/test/cpp/qps/report.h b/test/cpp/qps/report.h
index aec3cbe..620abad 100644
--- a/test/cpp/qps/report.h
+++ b/test/cpp/qps/report.h
@@ -37,7 +37,8 @@
 #include <memory>
 #include <set>
 #include <vector>
-#include <grpc++/config.h>
+
+#include <grpc++/support/config.h>
 
 #include "test/cpp/qps/driver.h"
 #include "test/cpp/qps/qpstest.grpc.pb.h"
diff --git a/test/cpp/qps/server_async.cc b/test/cpp/qps/server_async.cc
index b4fc49c..77415f4 100644
--- a/test/cpp/qps/server_async.cc
+++ b/test/cpp/qps/server_async.cc
@@ -41,22 +41,20 @@
 #include <thread>
 
 #include <gflags/gflags.h>
+#include <grpc/grpc.h>
 #include <grpc/support/alloc.h>
 #include <grpc/support/host_port.h>
-#include <grpc++/async_unary_call.h>
-#include <grpc++/config.h>
+#include <grpc/support/log.h>
+#include <grpc++/support/config.h>
 #include <grpc++/server.h>
 #include <grpc++/server_builder.h>
 #include <grpc++/server_context.h>
 #include <grpc++/server_credentials.h>
-#include <grpc++/status.h>
-#include <grpc++/stream.h>
 #include <gtest/gtest.h>
+
 #include "test/cpp/qps/qpstest.grpc.pb.h"
 #include "test/cpp/qps/server.h"
 
-#include <grpc/grpc.h>
-#include <grpc/support/log.h>
 
 namespace grpc {
 namespace testing {
diff --git a/test/cpp/qps/server_sync.cc b/test/cpp/qps/server_sync.cc
index 4c3c9cb..29ec19c 100644
--- a/test/cpp/qps/server_sync.cc
+++ b/test/cpp/qps/server_sync.cc
@@ -32,28 +32,23 @@
  */
 
 #include <sys/signal.h>
+#include <unistd.h>
 #include <thread>
 
-#include <unistd.h>
-
 #include <gflags/gflags.h>
+#include <grpc/grpc.h>
 #include <grpc/support/alloc.h>
 #include <grpc/support/host_port.h>
-#include <grpc++/config.h>
-#include <grpc++/dynamic_thread_pool.h>
-#include <grpc++/fixed_size_thread_pool.h>
+#include <grpc/support/log.h>
 #include <grpc++/server.h>
 #include <grpc++/server_builder.h>
 #include <grpc++/server_context.h>
 #include <grpc++/server_credentials.h>
-#include <grpc++/status.h>
-#include <grpc++/stream.h>
+
 #include "test/cpp/qps/qpstest.grpc.pb.h"
 #include "test/cpp/qps/server.h"
 #include "test/cpp/qps/timer.h"
 
-#include <grpc/grpc.h>
-#include <grpc/support/log.h>
 
 namespace grpc {
 namespace testing {
@@ -93,12 +88,7 @@
 class SynchronousServer GRPC_FINAL : public grpc::testing::Server {
  public:
   SynchronousServer(const ServerConfig& config, int port)
-      : thread_pool_(), impl_(MakeImpl(port)) {
-    if (config.threads() > 0) {
-      thread_pool_.reset(new FixedSizeThreadPool(config.threads()));
-    } else {
-      thread_pool_.reset(new DynamicThreadPool(-config.threads()));
-    }
+      : impl_(MakeImpl(port)) {
   }
 
  private:
@@ -112,13 +102,10 @@
 
     builder.RegisterService(&service_);
 
-    builder.SetThreadPool(thread_pool_.get());
-
     return builder.BuildAndStart();
   }
 
   TestServiceImpl service_;
-  std::unique_ptr<ThreadPoolInterface> thread_pool_;
   std::unique_ptr<grpc::Server> impl_;
 };
 
diff --git a/test/cpp/qps/stats.h b/test/cpp/qps/stats.h
index 82dc03e..9387501 100644
--- a/test/cpp/qps/stats.h
+++ b/test/cpp/qps/stats.h
@@ -34,9 +34,10 @@
 #ifndef TEST_QPS_STATS_UTILS_H
 #define TEST_QPS_STATS_UTILS_H
 
-#include "test/cpp/qps/histogram.h"
 #include <string>
 
+#include "test/cpp/qps/histogram.h"
+
 namespace grpc {
 namespace testing {
 
diff --git a/test/cpp/qps/sync_streaming_ping_pong_test.cc b/test/cpp/qps/sync_streaming_ping_pong_test.cc
index d53905a..52e4393 100644
--- a/test/cpp/qps/sync_streaming_ping_pong_test.cc
+++ b/test/cpp/qps/sync_streaming_ping_pong_test.cc
@@ -31,12 +31,12 @@
  *
  */
 
+#include <signal.h>
+
 #include <set>
 
 #include <grpc/support/log.h>
 
-#include <signal.h>
-
 #include "test/cpp/qps/driver.h"
 #include "test/cpp/qps/report.h"
 #include "test/cpp/util/benchmark_config.h"
diff --git a/test/cpp/qps/sync_unary_ping_pong_test.cc b/test/cpp/qps/sync_unary_ping_pong_test.cc
index d276d13..fbd2135 100644
--- a/test/cpp/qps/sync_unary_ping_pong_test.cc
+++ b/test/cpp/qps/sync_unary_ping_pong_test.cc
@@ -31,12 +31,12 @@
  *
  */
 
+#include <signal.h>
+
 #include <set>
 
 #include <grpc/support/log.h>
 
-#include <signal.h>
-
 #include "test/cpp/qps/driver.h"
 #include "test/cpp/qps/report.h"
 #include "test/cpp/util/benchmark_config.h"
diff --git a/test/cpp/qps/timer.cc b/test/cpp/qps/timer.cc
index c1ba23d..8edb838 100644
--- a/test/cpp/qps/timer.cc
+++ b/test/cpp/qps/timer.cc
@@ -36,7 +36,6 @@
 #include <sys/time.h>
 #include <sys/resource.h>
 #include <grpc/support/time.h>
-#include <grpc++/config.h>
 
 Timer::Timer() : start_(Sample()) {}
 
diff --git a/test/cpp/qps/worker.cc b/test/cpp/qps/worker.cc
index 7cf4903..935e485 100644
--- a/test/cpp/qps/worker.cc
+++ b/test/cpp/qps/worker.cc
@@ -36,9 +36,9 @@
 #include <chrono>
 #include <thread>
 
+#include <gflags/gflags.h>
 #include <grpc/grpc.h>
 #include <grpc/support/time.h>
-#include <gflags/gflags.h>
 
 #include "test/cpp/qps/qps_worker.h"
 #include "test/cpp/util/test_config.h"
diff --git a/test/cpp/server/dynamic_thread_pool_test.cc b/test/cpp/server/dynamic_thread_pool_test.cc
deleted file mode 100644
index 63b603b..0000000
--- a/test/cpp/server/dynamic_thread_pool_test.cc
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- *
- * Copyright 2015, Google Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- *     * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- *     * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#include <condition_variable>
-#include <functional>
-#include <mutex>
-
-#include <grpc++/dynamic_thread_pool.h>
-#include <gtest/gtest.h>
-
-namespace grpc {
-
-class DynamicThreadPoolTest : public ::testing::Test {
- public:
-  DynamicThreadPoolTest() : thread_pool_(0) {}
-
- protected:
-  DynamicThreadPool thread_pool_;
-};
-
-void Callback(std::mutex* mu, std::condition_variable* cv, bool* done) {
-  std::unique_lock<std::mutex> lock(*mu);
-  *done = true;
-  cv->notify_all();
-}
-
-TEST_F(DynamicThreadPoolTest, Add) {
-  std::mutex mu;
-  std::condition_variable cv;
-  bool done = false;
-  std::function<void()> callback = std::bind(Callback, &mu, &cv, &done);
-  thread_pool_.Add(callback);
-
-  // Wait for the callback to finish.
-  std::unique_lock<std::mutex> lock(mu);
-  while (!done) {
-    cv.wait(lock);
-  }
-}
-
-}  // namespace grpc
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  int result = RUN_ALL_TESTS();
-  return result;
-}
diff --git a/test/cpp/server/fixed_size_thread_pool_test.cc b/test/cpp/server/fixed_size_thread_pool_test.cc
deleted file mode 100644
index 442e974..0000000
--- a/test/cpp/server/fixed_size_thread_pool_test.cc
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- *
- * Copyright 2015, Google Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- *     * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- *     * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#include <condition_variable>
-#include <functional>
-#include <mutex>
-
-#include <grpc++/fixed_size_thread_pool.h>
-#include <gtest/gtest.h>
-
-namespace grpc {
-
-class FixedSizeThreadPoolTest : public ::testing::Test {
- public:
-  FixedSizeThreadPoolTest() : thread_pool_(4) {}
-
- protected:
-  FixedSizeThreadPool thread_pool_;
-};
-
-void Callback(std::mutex* mu, std::condition_variable* cv, bool* done) {
-  std::unique_lock<std::mutex> lock(*mu);
-  *done = true;
-  cv->notify_all();
-}
-
-TEST_F(FixedSizeThreadPoolTest, Add) {
-  std::mutex mu;
-  std::condition_variable cv;
-  bool done = false;
-  std::function<void()> callback = std::bind(Callback, &mu, &cv, &done);
-  thread_pool_.Add(callback);
-
-  // Wait for the callback to finish.
-  std::unique_lock<std::mutex> lock(mu);
-  while (!done) {
-    cv.wait(lock);
-  }
-}
-
-}  // namespace grpc
-
-int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-  int result = RUN_ALL_TESTS();
-  return result;
-}
diff --git a/test/cpp/util/byte_buffer_test.cc b/test/cpp/util/byte_buffer_test.cc
index 5195575..f36c32c 100644
--- a/test/cpp/util/byte_buffer_test.cc
+++ b/test/cpp/util/byte_buffer_test.cc
@@ -31,13 +31,13 @@
  *
  */
 
-#include <grpc++/byte_buffer.h>
+#include <grpc++/support/byte_buffer.h>
 
 #include <cstring>
 #include <vector>
 
 #include <grpc/support/slice.h>
-#include <grpc++/slice.h>
+#include <grpc++/support/slice.h>
 #include <gtest/gtest.h>
 
 namespace grpc {
diff --git a/test/cpp/util/cli_call.cc b/test/cpp/util/cli_call.cc
index ac88910..d60cee9 100644
--- a/test/cpp/util/cli_call.cc
+++ b/test/cpp/util/cli_call.cc
@@ -35,16 +35,13 @@
 
 #include <iostream>
 
-#include <grpc++/byte_buffer.h>
-#include <grpc++/channel_interface.h>
-#include <grpc++/client_context.h>
-#include <grpc++/generic_stub.h>
-#include <grpc++/status.h>
-#include <grpc++/stream.h>
-
 #include <grpc/grpc.h>
 #include <grpc/support/log.h>
 #include <grpc/support/slice.h>
+#include <grpc++/support/byte_buffer.h>
+#include <grpc++/channel.h>
+#include <grpc++/client_context.h>
+#include <grpc++/generic/generic_stub.h>
 
 namespace grpc {
 namespace testing {
@@ -52,7 +49,7 @@
 void* tag(int i) { return (void*)(gpr_intptr)i; }
 }  // namespace
 
-Status CliCall::Call(std::shared_ptr<grpc::ChannelInterface> channel,
+Status CliCall::Call(std::shared_ptr<grpc::Channel> channel,
                      const grpc::string& method, const grpc::string& request,
                      grpc::string* response, const MetadataContainer& metadata,
                      MetadataContainer* server_initial_metadata,
diff --git a/test/cpp/util/cli_call.h b/test/cpp/util/cli_call.h
index 8d114c9..7a3dcf2 100644
--- a/test/cpp/util/cli_call.h
+++ b/test/cpp/util/cli_call.h
@@ -36,9 +36,8 @@
 
 #include <map>
 
-#include <grpc++/channel_interface.h>
-#include <grpc++/config.h>
-#include <grpc++/status.h>
+#include <grpc++/channel.h>
+#include <grpc++/support/status.h>
 
 namespace grpc {
 namespace testing {
@@ -46,7 +45,7 @@
 class CliCall GRPC_FINAL {
  public:
   typedef std::multimap<grpc::string, grpc::string> MetadataContainer;
-  static Status Call(std::shared_ptr<grpc::ChannelInterface> channel,
+  static Status Call(std::shared_ptr<grpc::Channel> channel,
                      const grpc::string& method, const grpc::string& request,
                      grpc::string* response, const MetadataContainer& metadata,
                      MetadataContainer* server_initial_metadata,
diff --git a/test/cpp/util/cli_call_test.cc b/test/cpp/util/cli_call_test.cc
index 848a3ae..35bfad2 100644
--- a/test/cpp/util/cli_call_test.cc
+++ b/test/cpp/util/cli_call_test.cc
@@ -31,24 +31,22 @@
  *
  */
 
-#include "test/core/util/test_config.h"
 #include "test/cpp/util/cli_call.h"
-#include "test/cpp/util/echo.grpc.pb.h"
-#include <grpc++/channel_arguments.h>
-#include <grpc++/channel_interface.h>
+
+#include <grpc/grpc.h>
+#include <grpc++/channel.h>
 #include <grpc++/client_context.h>
 #include <grpc++/create_channel.h>
 #include <grpc++/credentials.h>
-#include <grpc++/dynamic_thread_pool.h>
 #include <grpc++/server.h>
 #include <grpc++/server_builder.h>
 #include <grpc++/server_context.h>
 #include <grpc++/server_credentials.h>
-#include <grpc++/status.h>
-#include "test/core/util/port.h"
 #include <gtest/gtest.h>
 
-#include <grpc/grpc.h>
+#include "test/core/util/port.h"
+#include "test/core/util/test_config.h"
+#include "test/cpp/util/echo.grpc.pb.h"
 
 using grpc::cpp::test::util::EchoRequest;
 using grpc::cpp::test::util::EchoResponse;
@@ -75,7 +73,7 @@
 
 class CliCallTest : public ::testing::Test {
  protected:
-  CliCallTest() : thread_pool_(2) {}
+  CliCallTest() {}
 
   void SetUp() GRPC_OVERRIDE {
     int port = grpc_pick_unused_port_or_die();
@@ -85,7 +83,6 @@
     builder.AddListeningPort(server_address_.str(),
                              InsecureServerCredentials());
     builder.RegisterService(&service_);
-    builder.SetThreadPool(&thread_pool_);
     server_ = builder.BuildAndStart();
   }
 
@@ -97,12 +94,11 @@
     stub_ = std::move(grpc::cpp::test::util::TestService::NewStub(channel_));
   }
 
-  std::shared_ptr<ChannelInterface> channel_;
+  std::shared_ptr<Channel> channel_;
   std::unique_ptr<grpc::cpp::test::util::TestService::Stub> stub_;
   std::unique_ptr<Server> server_;
   std::ostringstream server_address_;
   TestServiceImpl service_;
-  DynamicThreadPool thread_pool_;
 };
 
 // Send a rpc with a normal stub and then a CliCall. Verify they match.
diff --git a/test/cpp/util/create_test_channel.cc b/test/cpp/util/create_test_channel.cc
index dc48fa2..161b4bd 100644
--- a/test/cpp/util/create_test_channel.cc
+++ b/test/cpp/util/create_test_channel.cc
@@ -33,11 +33,11 @@
 
 #include "test/cpp/util/create_test_channel.h"
 
-#include "test/core/end2end/data/ssl_test_data.h"
-#include <grpc++/channel_arguments.h>
 #include <grpc++/create_channel.h>
 #include <grpc++/credentials.h>
 
+#include "test/core/end2end/data/ssl_test_data.h"
+
 namespace grpc {
 
 // When ssl is enabled, if server is empty, override_hostname is used to
@@ -55,7 +55,7 @@
 //   CreateTestChannel("test.google.com:443", "", true, true, creds);
 //   same as above
 //   CreateTestChannel("", "test.google.com:443", true, true, creds);
-std::shared_ptr<ChannelInterface> CreateTestChannel(
+std::shared_ptr<Channel> CreateTestChannel(
     const grpc::string& server, const grpc::string& override_hostname,
     bool enable_ssl, bool use_prod_roots,
     const std::shared_ptr<Credentials>& creds) {
@@ -80,7 +80,7 @@
   }
 }
 
-std::shared_ptr<ChannelInterface> CreateTestChannel(
+std::shared_ptr<Channel> CreateTestChannel(
     const grpc::string& server, const grpc::string& override_hostname,
     bool enable_ssl, bool use_prod_roots) {
   return CreateTestChannel(server, override_hostname, enable_ssl,
@@ -88,8 +88,8 @@
 }
 
 // Shortcut for end2end and interop tests.
-std::shared_ptr<ChannelInterface> CreateTestChannel(const grpc::string& server,
-                                                    bool enable_ssl) {
+std::shared_ptr<Channel> CreateTestChannel(const grpc::string& server,
+                                           bool enable_ssl) {
   return CreateTestChannel(server, "foo.test.google.fr", enable_ssl, false);
 }
 
diff --git a/test/cpp/util/create_test_channel.h b/test/cpp/util/create_test_channel.h
index 5f2609d..1263d4e 100644
--- a/test/cpp/util/create_test_channel.h
+++ b/test/cpp/util/create_test_channel.h
@@ -36,20 +36,19 @@
 
 #include <memory>
 
-#include <grpc++/config.h>
 #include <grpc++/credentials.h>
 
 namespace grpc {
-class ChannelInterface;
+class Channel;
 
-std::shared_ptr<ChannelInterface> CreateTestChannel(const grpc::string& server,
-                                                    bool enable_ssl);
+std::shared_ptr<Channel> CreateTestChannel(const grpc::string& server,
+                                           bool enable_ssl);
 
-std::shared_ptr<ChannelInterface> CreateTestChannel(
+std::shared_ptr<Channel> CreateTestChannel(
     const grpc::string& server, const grpc::string& override_hostname,
     bool enable_ssl, bool use_prod_roots);
 
-std::shared_ptr<ChannelInterface> CreateTestChannel(
+std::shared_ptr<Channel> CreateTestChannel(
     const grpc::string& server, const grpc::string& override_hostname,
     bool enable_ssl, bool use_prod_roots,
     const std::shared_ptr<Credentials>& creds);
diff --git a/test/cpp/util/grpc_cli.cc b/test/cpp/util/grpc_cli.cc
index 3c3baeb..746d67d 100644
--- a/test/cpp/util/grpc_cli.cc
+++ b/test/cpp/util/grpc_cli.cc
@@ -64,14 +64,13 @@
 #include <sstream>
 
 #include <gflags/gflags.h>
-#include "test/cpp/util/cli_call.h"
-#include "test/cpp/util/test_config.h"
-#include <grpc++/channel_arguments.h>
-#include <grpc++/channel_interface.h>
+#include <grpc/grpc.h>
+#include <grpc++/channel.h>
 #include <grpc++/create_channel.h>
 #include <grpc++/credentials.h>
 
-#include <grpc/grpc.h>
+#include "test/cpp/util/cli_call.h"
+#include "test/cpp/util/test_config.h"
 
 DEFINE_bool(enable_ssl, true, "Whether to use ssl/tls.");
 DEFINE_bool(use_auth, false, "Whether to create default google credentials.");
@@ -154,7 +153,7 @@
       creds = grpc::SslCredentials(grpc::SslCredentialsOptions());
     }
   }
-  std::shared_ptr<grpc::ChannelInterface> channel =
+  std::shared_ptr<grpc::Channel> channel =
       grpc::CreateChannel(server_address, creds, grpc::ChannelArguments());
 
   grpc::string response;
diff --git a/test/cpp/util/slice_test.cc b/test/cpp/util/slice_test.cc
index eb32849..de7ff03 100644
--- a/test/cpp/util/slice_test.cc
+++ b/test/cpp/util/slice_test.cc
@@ -31,7 +31,7 @@
  *
  */
 
-#include <grpc++/slice.h>
+#include <grpc++/support/slice.h>
 
 #include <grpc/support/slice.h>
 #include <gtest/gtest.h>
diff --git a/test/cpp/util/status_test.cc b/test/cpp/util/status_test.cc
index 17b92ab..837a6ba 100644
--- a/test/cpp/util/status_test.cc
+++ b/test/cpp/util/status_test.cc
@@ -31,7 +31,8 @@
  *
  */
 
-#include <grpc++/status.h>
+#include <grpc++/support/status.h>
+
 #include <grpc/status.h>
 #include <grpc/support/log.h>
 
diff --git a/test/cpp/util/time_test.cc b/test/cpp/util/time_test.cc
index 4cb6ec4..1e501df 100644
--- a/test/cpp/util/time_test.cc
+++ b/test/cpp/util/time_test.cc
@@ -32,7 +32,7 @@
  */
 
 #include <grpc/support/time.h>
-#include <grpc++/time.h>
+#include <grpc++/support/time.h>
 #include <gtest/gtest.h>
 
 using std::chrono::duration_cast;
diff --git a/tools/doxygen/Doxyfile.c++ b/tools/doxygen/Doxyfile.c++
index 790e637..7965284 100644
--- a/tools/doxygen/Doxyfile.c++
+++ b/tools/doxygen/Doxyfile.c++
@@ -760,25 +760,16 @@
 # spaces.
 # Note: If this tag is empty the current directory is searched.
 
-INPUT                  = include/grpc++/async_generic_service.h \
-include/grpc++/async_unary_call.h \
-include/grpc++/auth_context.h \
-include/grpc++/byte_buffer.h \
-include/grpc++/channel_arguments.h \
-include/grpc++/channel_interface.h \
+INPUT                  = include/grpc++/channel.h \
 include/grpc++/client_context.h \
 include/grpc++/completion_queue.h \
-include/grpc++/config.h \
-include/grpc++/config_protobuf.h \
 include/grpc++/create_channel.h \
 include/grpc++/credentials.h \
-include/grpc++/dynamic_thread_pool.h \
-include/grpc++/fixed_size_thread_pool.h \
-include/grpc++/generic_stub.h \
+include/grpc++/generic/async_generic_service.h \
+include/grpc++/generic/generic_stub.h \
 include/grpc++/impl/call.h \
 include/grpc++/impl/client_unary_call.h \
 include/grpc++/impl/grpc_library.h \
-include/grpc++/impl/internal_stub.h \
 include/grpc++/impl/proto_utils.h \
 include/grpc++/impl/rpc_method.h \
 include/grpc++/impl/rpc_service_method.h \
@@ -794,13 +785,19 @@
 include/grpc++/server_builder.h \
 include/grpc++/server_context.h \
 include/grpc++/server_credentials.h \
-include/grpc++/slice.h \
-include/grpc++/status.h \
-include/grpc++/status_code_enum.h \
-include/grpc++/stream.h \
-include/grpc++/stub_options.h \
-include/grpc++/thread_pool_interface.h \
-include/grpc++/time.h
+include/grpc++/support/async_stream.h \
+include/grpc++/support/async_unary_call.h \
+include/grpc++/support/auth_context.h \
+include/grpc++/support/byte_buffer.h \
+include/grpc++/support/channel_arguments.h \
+include/grpc++/support/config.h \
+include/grpc++/support/config_protobuf.h \
+include/grpc++/support/slice.h \
+include/grpc++/support/status.h \
+include/grpc++/support/status_code_enum.h \
+include/grpc++/support/stub_options.h \
+include/grpc++/support/sync_stream.h \
+include/grpc++/support/time.h
 
 # This tag can be used to specify the character encoding of the source files
 # that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses
diff --git a/tools/doxygen/Doxyfile.c++.internal b/tools/doxygen/Doxyfile.c++.internal
index cd1279e..0ce703a 100644
--- a/tools/doxygen/Doxyfile.c++.internal
+++ b/tools/doxygen/Doxyfile.c++.internal
@@ -760,25 +760,16 @@
 # spaces.
 # Note: If this tag is empty the current directory is searched.
 
-INPUT                  = include/grpc++/async_generic_service.h \
-include/grpc++/async_unary_call.h \
-include/grpc++/auth_context.h \
-include/grpc++/byte_buffer.h \
-include/grpc++/channel_arguments.h \
-include/grpc++/channel_interface.h \
+INPUT                  = include/grpc++/channel.h \
 include/grpc++/client_context.h \
 include/grpc++/completion_queue.h \
-include/grpc++/config.h \
-include/grpc++/config_protobuf.h \
 include/grpc++/create_channel.h \
 include/grpc++/credentials.h \
-include/grpc++/dynamic_thread_pool.h \
-include/grpc++/fixed_size_thread_pool.h \
-include/grpc++/generic_stub.h \
+include/grpc++/generic/async_generic_service.h \
+include/grpc++/generic/generic_stub.h \
 include/grpc++/impl/call.h \
 include/grpc++/impl/client_unary_call.h \
 include/grpc++/impl/grpc_library.h \
-include/grpc++/impl/internal_stub.h \
 include/grpc++/impl/proto_utils.h \
 include/grpc++/impl/rpc_method.h \
 include/grpc++/impl/rpc_service_method.h \
@@ -794,18 +785,27 @@
 include/grpc++/server_builder.h \
 include/grpc++/server_context.h \
 include/grpc++/server_credentials.h \
-include/grpc++/slice.h \
-include/grpc++/status.h \
-include/grpc++/status_code_enum.h \
-include/grpc++/stream.h \
-include/grpc++/stub_options.h \
-include/grpc++/thread_pool_interface.h \
-include/grpc++/time.h \
+include/grpc++/support/async_stream.h \
+include/grpc++/support/async_unary_call.h \
+include/grpc++/support/auth_context.h \
+include/grpc++/support/byte_buffer.h \
+include/grpc++/support/channel_arguments.h \
+include/grpc++/support/config.h \
+include/grpc++/support/config_protobuf.h \
+include/grpc++/support/slice.h \
+include/grpc++/support/status.h \
+include/grpc++/support/status_code_enum.h \
+include/grpc++/support/stub_options.h \
+include/grpc++/support/sync_stream.h \
+include/grpc++/support/time.h \
 src/cpp/client/secure_credentials.h \
 src/cpp/common/secure_auth_context.h \
 src/cpp/server/secure_server_credentials.h \
-src/cpp/client/channel.h \
+src/cpp/client/create_channel_internal.h \
 src/cpp/common/create_auth_context.h \
+src/cpp/server/dynamic_thread_pool.h \
+src/cpp/server/fixed_size_thread_pool.h \
+src/cpp/server/thread_pool_interface.h \
 src/cpp/client/secure_channel_arguments.cc \
 src/cpp/client/secure_credentials.cc \
 src/cpp/common/auth_property_iterator.cc \
@@ -816,10 +816,10 @@
 src/cpp/client/channel_arguments.cc \
 src/cpp/client/client_context.cc \
 src/cpp/client/create_channel.cc \
+src/cpp/client/create_channel_internal.cc \
 src/cpp/client/credentials.cc \
 src/cpp/client/generic_stub.cc \
 src/cpp/client/insecure_credentials.cc \
-src/cpp/client/internal_stub.cc \
 src/cpp/common/call.cc \
 src/cpp/common/completion_queue.cc \
 src/cpp/common/rpc_method.cc \
diff --git a/tools/run_tests/sources_and_headers.json b/tools/run_tests/sources_and_headers.json
index 50f0785..0fe5f68 100644
--- a/tools/run_tests/sources_and_headers.json
+++ b/tools/run_tests/sources_and_headers.json
@@ -468,6 +468,20 @@
     ], 
     "headers": [], 
     "language": "c", 
+    "name": "grpc_channel_args_test", 
+    "src": [
+      "test/core/channel/channel_args_test.c"
+    ]
+  }, 
+  {
+    "deps": [
+      "gpr", 
+      "gpr_test_util", 
+      "grpc", 
+      "grpc_test_util"
+    ], 
+    "headers": [], 
+    "language": "c", 
     "name": "grpc_channel_stack_test", 
     "src": [
       "test/core/channel/channel_stack_test.c"
@@ -1189,21 +1203,6 @@
       "gpr_test_util", 
       "grpc", 
       "grpc++", 
-      "grpc_test_util"
-    ], 
-    "headers": [], 
-    "language": "c++", 
-    "name": "dynamic_thread_pool_test", 
-    "src": [
-      "test/cpp/server/dynamic_thread_pool_test.cc"
-    ]
-  }, 
-  {
-    "deps": [
-      "gpr", 
-      "gpr_test_util", 
-      "grpc", 
-      "grpc++", 
       "grpc++_test_util", 
       "grpc_test_util"
     ], 
@@ -1220,21 +1219,6 @@
       "gpr_test_util", 
       "grpc", 
       "grpc++", 
-      "grpc_test_util"
-    ], 
-    "headers": [], 
-    "language": "c++", 
-    "name": "fixed_size_thread_pool_test", 
-    "src": [
-      "test/cpp/server/fixed_size_thread_pool_test.cc"
-    ]
-  }, 
-  {
-    "deps": [
-      "gpr", 
-      "gpr_test_util", 
-      "grpc", 
-      "grpc++", 
       "grpc++_test_util", 
       "grpc_test_util"
     ], 
@@ -1627,8 +1611,7 @@
       "grpc", 
       "grpc++", 
       "grpc++_test_util", 
-      "grpc_test_util", 
-      "grpc_zookeeper"
+      "grpc_test_util"
     ], 
     "headers": [], 
     "language": "c++", 
@@ -13118,25 +13101,16 @@
       "grpc"
     ], 
     "headers": [
-      "include/grpc++/async_generic_service.h", 
-      "include/grpc++/async_unary_call.h", 
-      "include/grpc++/auth_context.h", 
-      "include/grpc++/byte_buffer.h", 
-      "include/grpc++/channel_arguments.h", 
-      "include/grpc++/channel_interface.h", 
+      "include/grpc++/channel.h", 
       "include/grpc++/client_context.h", 
       "include/grpc++/completion_queue.h", 
-      "include/grpc++/config.h", 
-      "include/grpc++/config_protobuf.h", 
       "include/grpc++/create_channel.h", 
       "include/grpc++/credentials.h", 
-      "include/grpc++/dynamic_thread_pool.h", 
-      "include/grpc++/fixed_size_thread_pool.h", 
-      "include/grpc++/generic_stub.h", 
+      "include/grpc++/generic/async_generic_service.h", 
+      "include/grpc++/generic/generic_stub.h", 
       "include/grpc++/impl/call.h", 
       "include/grpc++/impl/client_unary_call.h", 
       "include/grpc++/impl/grpc_library.h", 
-      "include/grpc++/impl/internal_stub.h", 
       "include/grpc++/impl/proto_utils.h", 
       "include/grpc++/impl/rpc_method.h", 
       "include/grpc++/impl/rpc_service_method.h", 
@@ -13152,41 +13126,41 @@
       "include/grpc++/server_builder.h", 
       "include/grpc++/server_context.h", 
       "include/grpc++/server_credentials.h", 
-      "include/grpc++/slice.h", 
-      "include/grpc++/status.h", 
-      "include/grpc++/status_code_enum.h", 
-      "include/grpc++/stream.h", 
-      "include/grpc++/stub_options.h", 
-      "include/grpc++/thread_pool_interface.h", 
-      "include/grpc++/time.h", 
-      "src/cpp/client/channel.h", 
+      "include/grpc++/support/async_stream.h", 
+      "include/grpc++/support/async_unary_call.h", 
+      "include/grpc++/support/auth_context.h", 
+      "include/grpc++/support/byte_buffer.h", 
+      "include/grpc++/support/channel_arguments.h", 
+      "include/grpc++/support/config.h", 
+      "include/grpc++/support/config_protobuf.h", 
+      "include/grpc++/support/slice.h", 
+      "include/grpc++/support/status.h", 
+      "include/grpc++/support/status_code_enum.h", 
+      "include/grpc++/support/stub_options.h", 
+      "include/grpc++/support/sync_stream.h", 
+      "include/grpc++/support/time.h", 
+      "src/cpp/client/create_channel_internal.h", 
       "src/cpp/client/secure_credentials.h", 
       "src/cpp/common/create_auth_context.h", 
       "src/cpp/common/secure_auth_context.h", 
-      "src/cpp/server/secure_server_credentials.h"
+      "src/cpp/server/dynamic_thread_pool.h", 
+      "src/cpp/server/fixed_size_thread_pool.h", 
+      "src/cpp/server/secure_server_credentials.h", 
+      "src/cpp/server/thread_pool_interface.h"
     ], 
     "language": "c++", 
     "name": "grpc++", 
     "src": [
-      "include/grpc++/async_generic_service.h", 
-      "include/grpc++/async_unary_call.h", 
-      "include/grpc++/auth_context.h", 
-      "include/grpc++/byte_buffer.h", 
-      "include/grpc++/channel_arguments.h", 
-      "include/grpc++/channel_interface.h", 
+      "include/grpc++/channel.h", 
       "include/grpc++/client_context.h", 
       "include/grpc++/completion_queue.h", 
-      "include/grpc++/config.h", 
-      "include/grpc++/config_protobuf.h", 
       "include/grpc++/create_channel.h", 
       "include/grpc++/credentials.h", 
-      "include/grpc++/dynamic_thread_pool.h", 
-      "include/grpc++/fixed_size_thread_pool.h", 
-      "include/grpc++/generic_stub.h", 
+      "include/grpc++/generic/async_generic_service.h", 
+      "include/grpc++/generic/generic_stub.h", 
       "include/grpc++/impl/call.h", 
       "include/grpc++/impl/client_unary_call.h", 
       "include/grpc++/impl/grpc_library.h", 
-      "include/grpc++/impl/internal_stub.h", 
       "include/grpc++/impl/proto_utils.h", 
       "include/grpc++/impl/rpc_method.h", 
       "include/grpc++/impl/rpc_service_method.h", 
@@ -13202,22 +13176,28 @@
       "include/grpc++/server_builder.h", 
       "include/grpc++/server_context.h", 
       "include/grpc++/server_credentials.h", 
-      "include/grpc++/slice.h", 
-      "include/grpc++/status.h", 
-      "include/grpc++/status_code_enum.h", 
-      "include/grpc++/stream.h", 
-      "include/grpc++/stub_options.h", 
-      "include/grpc++/thread_pool_interface.h", 
-      "include/grpc++/time.h", 
+      "include/grpc++/support/async_stream.h", 
+      "include/grpc++/support/async_unary_call.h", 
+      "include/grpc++/support/auth_context.h", 
+      "include/grpc++/support/byte_buffer.h", 
+      "include/grpc++/support/channel_arguments.h", 
+      "include/grpc++/support/config.h", 
+      "include/grpc++/support/config_protobuf.h", 
+      "include/grpc++/support/slice.h", 
+      "include/grpc++/support/status.h", 
+      "include/grpc++/support/status_code_enum.h", 
+      "include/grpc++/support/stub_options.h", 
+      "include/grpc++/support/sync_stream.h", 
+      "include/grpc++/support/time.h", 
       "src/cpp/client/channel.cc", 
-      "src/cpp/client/channel.h", 
       "src/cpp/client/channel_arguments.cc", 
       "src/cpp/client/client_context.cc", 
       "src/cpp/client/create_channel.cc", 
+      "src/cpp/client/create_channel_internal.cc", 
+      "src/cpp/client/create_channel_internal.h", 
       "src/cpp/client/credentials.cc", 
       "src/cpp/client/generic_stub.cc", 
       "src/cpp/client/insecure_credentials.cc", 
-      "src/cpp/client/internal_stub.cc", 
       "src/cpp/client/secure_channel_arguments.cc", 
       "src/cpp/client/secure_credentials.cc", 
       "src/cpp/client/secure_credentials.h", 
@@ -13233,7 +13213,9 @@
       "src/cpp/server/async_generic_service.cc", 
       "src/cpp/server/create_default_thread_pool.cc", 
       "src/cpp/server/dynamic_thread_pool.cc", 
+      "src/cpp/server/dynamic_thread_pool.h", 
       "src/cpp/server/fixed_size_thread_pool.cc", 
+      "src/cpp/server/fixed_size_thread_pool.h", 
       "src/cpp/server/insecure_server_credentials.cc", 
       "src/cpp/server/secure_server_credentials.cc", 
       "src/cpp/server/secure_server_credentials.h", 
@@ -13241,6 +13223,7 @@
       "src/cpp/server/server_builder.cc", 
       "src/cpp/server/server_context.cc", 
       "src/cpp/server/server_credentials.cc", 
+      "src/cpp/server/thread_pool_interface.h", 
       "src/cpp/util/byte_buffer.cc", 
       "src/cpp/util/slice.cc", 
       "src/cpp/util/status.cc", 
@@ -13292,25 +13275,16 @@
       "grpc_unsecure"
     ], 
     "headers": [
-      "include/grpc++/async_generic_service.h", 
-      "include/grpc++/async_unary_call.h", 
-      "include/grpc++/auth_context.h", 
-      "include/grpc++/byte_buffer.h", 
-      "include/grpc++/channel_arguments.h", 
-      "include/grpc++/channel_interface.h", 
+      "include/grpc++/channel.h", 
       "include/grpc++/client_context.h", 
       "include/grpc++/completion_queue.h", 
-      "include/grpc++/config.h", 
-      "include/grpc++/config_protobuf.h", 
       "include/grpc++/create_channel.h", 
       "include/grpc++/credentials.h", 
-      "include/grpc++/dynamic_thread_pool.h", 
-      "include/grpc++/fixed_size_thread_pool.h", 
-      "include/grpc++/generic_stub.h", 
+      "include/grpc++/generic/async_generic_service.h", 
+      "include/grpc++/generic/generic_stub.h", 
       "include/grpc++/impl/call.h", 
       "include/grpc++/impl/client_unary_call.h", 
       "include/grpc++/impl/grpc_library.h", 
-      "include/grpc++/impl/internal_stub.h", 
       "include/grpc++/impl/proto_utils.h", 
       "include/grpc++/impl/rpc_method.h", 
       "include/grpc++/impl/rpc_service_method.h", 
@@ -13326,38 +13300,38 @@
       "include/grpc++/server_builder.h", 
       "include/grpc++/server_context.h", 
       "include/grpc++/server_credentials.h", 
-      "include/grpc++/slice.h", 
-      "include/grpc++/status.h", 
-      "include/grpc++/status_code_enum.h", 
-      "include/grpc++/stream.h", 
-      "include/grpc++/stub_options.h", 
-      "include/grpc++/thread_pool_interface.h", 
-      "include/grpc++/time.h", 
-      "src/cpp/client/channel.h", 
-      "src/cpp/common/create_auth_context.h"
+      "include/grpc++/support/async_stream.h", 
+      "include/grpc++/support/async_unary_call.h", 
+      "include/grpc++/support/auth_context.h", 
+      "include/grpc++/support/byte_buffer.h", 
+      "include/grpc++/support/channel_arguments.h", 
+      "include/grpc++/support/config.h", 
+      "include/grpc++/support/config_protobuf.h", 
+      "include/grpc++/support/slice.h", 
+      "include/grpc++/support/status.h", 
+      "include/grpc++/support/status_code_enum.h", 
+      "include/grpc++/support/stub_options.h", 
+      "include/grpc++/support/sync_stream.h", 
+      "include/grpc++/support/time.h", 
+      "src/cpp/client/create_channel_internal.h", 
+      "src/cpp/common/create_auth_context.h", 
+      "src/cpp/server/dynamic_thread_pool.h", 
+      "src/cpp/server/fixed_size_thread_pool.h", 
+      "src/cpp/server/thread_pool_interface.h"
     ], 
     "language": "c++", 
     "name": "grpc++_unsecure", 
     "src": [
-      "include/grpc++/async_generic_service.h", 
-      "include/grpc++/async_unary_call.h", 
-      "include/grpc++/auth_context.h", 
-      "include/grpc++/byte_buffer.h", 
-      "include/grpc++/channel_arguments.h", 
-      "include/grpc++/channel_interface.h", 
+      "include/grpc++/channel.h", 
       "include/grpc++/client_context.h", 
       "include/grpc++/completion_queue.h", 
-      "include/grpc++/config.h", 
-      "include/grpc++/config_protobuf.h", 
       "include/grpc++/create_channel.h", 
       "include/grpc++/credentials.h", 
-      "include/grpc++/dynamic_thread_pool.h", 
-      "include/grpc++/fixed_size_thread_pool.h", 
-      "include/grpc++/generic_stub.h", 
+      "include/grpc++/generic/async_generic_service.h", 
+      "include/grpc++/generic/generic_stub.h", 
       "include/grpc++/impl/call.h", 
       "include/grpc++/impl/client_unary_call.h", 
       "include/grpc++/impl/grpc_library.h", 
-      "include/grpc++/impl/internal_stub.h", 
       "include/grpc++/impl/proto_utils.h", 
       "include/grpc++/impl/rpc_method.h", 
       "include/grpc++/impl/rpc_service_method.h", 
@@ -13373,22 +13347,28 @@
       "include/grpc++/server_builder.h", 
       "include/grpc++/server_context.h", 
       "include/grpc++/server_credentials.h", 
-      "include/grpc++/slice.h", 
-      "include/grpc++/status.h", 
-      "include/grpc++/status_code_enum.h", 
-      "include/grpc++/stream.h", 
-      "include/grpc++/stub_options.h", 
-      "include/grpc++/thread_pool_interface.h", 
-      "include/grpc++/time.h", 
+      "include/grpc++/support/async_stream.h", 
+      "include/grpc++/support/async_unary_call.h", 
+      "include/grpc++/support/auth_context.h", 
+      "include/grpc++/support/byte_buffer.h", 
+      "include/grpc++/support/channel_arguments.h", 
+      "include/grpc++/support/config.h", 
+      "include/grpc++/support/config_protobuf.h", 
+      "include/grpc++/support/slice.h", 
+      "include/grpc++/support/status.h", 
+      "include/grpc++/support/status_code_enum.h", 
+      "include/grpc++/support/stub_options.h", 
+      "include/grpc++/support/sync_stream.h", 
+      "include/grpc++/support/time.h", 
       "src/cpp/client/channel.cc", 
-      "src/cpp/client/channel.h", 
       "src/cpp/client/channel_arguments.cc", 
       "src/cpp/client/client_context.cc", 
       "src/cpp/client/create_channel.cc", 
+      "src/cpp/client/create_channel_internal.cc", 
+      "src/cpp/client/create_channel_internal.h", 
       "src/cpp/client/credentials.cc", 
       "src/cpp/client/generic_stub.cc", 
       "src/cpp/client/insecure_credentials.cc", 
-      "src/cpp/client/internal_stub.cc", 
       "src/cpp/common/call.cc", 
       "src/cpp/common/completion_queue.cc", 
       "src/cpp/common/create_auth_context.h", 
@@ -13398,12 +13378,15 @@
       "src/cpp/server/async_generic_service.cc", 
       "src/cpp/server/create_default_thread_pool.cc", 
       "src/cpp/server/dynamic_thread_pool.cc", 
+      "src/cpp/server/dynamic_thread_pool.h", 
       "src/cpp/server/fixed_size_thread_pool.cc", 
+      "src/cpp/server/fixed_size_thread_pool.h", 
       "src/cpp/server/insecure_server_credentials.cc", 
       "src/cpp/server/server.cc", 
       "src/cpp/server/server_builder.cc", 
       "src/cpp/server/server_context.cc", 
       "src/cpp/server/server_credentials.cc", 
+      "src/cpp/server/thread_pool_interface.h", 
       "src/cpp/util/byte_buffer.cc", 
       "src/cpp/util/slice.cc", 
       "src/cpp/util/status.cc", 
@@ -13413,8 +13396,8 @@
   {
     "deps": [], 
     "headers": [
-      "include/grpc++/config.h", 
-      "include/grpc++/config_protobuf.h", 
+      "include/grpc++/support/config.h", 
+      "include/grpc++/support/config_protobuf.h", 
       "src/compiler/config.h", 
       "src/compiler/cpp_generator.h", 
       "src/compiler/cpp_generator_helpers.h", 
@@ -13432,8 +13415,8 @@
     "language": "c++", 
     "name": "grpc_plugin_support", 
     "src": [
-      "include/grpc++/config.h", 
-      "include/grpc++/config_protobuf.h", 
+      "include/grpc++/support/config.h", 
+      "include/grpc++/support/config_protobuf.h", 
       "src/compiler/config.h", 
       "src/compiler/cpp_generator.cc", 
       "src/compiler/cpp_generator.h", 
diff --git a/tools/run_tests/tests.json b/tools/run_tests/tests.json
index 127b1df..0f5968e 100644
--- a/tools/run_tests/tests.json
+++ b/tools/run_tests/tests.json
@@ -577,6 +577,24 @@
     "exclude_configs": [], 
     "flaky": false, 
     "language": "c", 
+    "name": "grpc_channel_args_test", 
+    "platforms": [
+      "linux", 
+      "mac", 
+      "posix", 
+      "windows"
+    ]
+  }, 
+  {
+    "ci_platforms": [
+      "linux", 
+      "mac", 
+      "posix", 
+      "windows"
+    ], 
+    "exclude_configs": [], 
+    "flaky": false, 
+    "language": "c", 
     "name": "grpc_channel_stack_test", 
     "platforms": [
       "linux", 
@@ -1349,24 +1367,6 @@
     "exclude_configs": [], 
     "flaky": false, 
     "language": "c++", 
-    "name": "dynamic_thread_pool_test", 
-    "platforms": [
-      "linux", 
-      "mac", 
-      "posix", 
-      "windows"
-    ]
-  }, 
-  {
-    "ci_platforms": [
-      "linux", 
-      "mac", 
-      "posix", 
-      "windows"
-    ], 
-    "exclude_configs": [], 
-    "flaky": false, 
-    "language": "c++", 
     "name": "end2end_test", 
     "platforms": [
       "linux", 
@@ -1385,24 +1385,6 @@
     "exclude_configs": [], 
     "flaky": false, 
     "language": "c++", 
-    "name": "fixed_size_thread_pool_test", 
-    "platforms": [
-      "linux", 
-      "mac", 
-      "posix", 
-      "windows"
-    ]
-  }, 
-  {
-    "ci_platforms": [
-      "linux", 
-      "mac", 
-      "posix", 
-      "windows"
-    ], 
-    "exclude_configs": [], 
-    "flaky": false, 
-    "language": "c++", 
     "name": "generic_end2end_test", 
     "platforms": [
       "linux", 
diff --git a/vsprojects/Grpc.mak b/vsprojects/Grpc.mak
index 662de78..6bfa594 100644
--- a/vsprojects/Grpc.mak
+++ b/vsprojects/Grpc.mak
@@ -80,10 +80,10 @@
 build_libs: build_gpr build_gpr_test_util build_grpc build_grpc_test_util build_grpc_test_util_unsecure build_grpc_unsecure Debug\grpc_zookeeper.lib Debug\reconnect_server.lib build_grpc++ Debug\grpc++_test_config.lib Debug\grpc++_test_util.lib build_grpc++_unsecure Debug\interop_client_helper.lib Debug\interop_client_main.lib Debug\interop_server_helper.lib Debug\interop_server_main.lib Debug\qps.lib Debug\end2end_fixture_chttp2_fake_security.lib Debug\end2end_fixture_chttp2_fullstack.lib Debug\end2end_fixture_chttp2_fullstack_compression.lib Debug\end2end_fixture_chttp2_fullstack_with_proxy.lib Debug\end2end_fixture_chttp2_simple_ssl_fullstack.lib Debug\end2end_fixture_chttp2_simple_ssl_fullstack_with_proxy.lib Debug\end2end_fixture_chttp2_simple_ssl_with_oauth2_fullstack.lib Debug\end2end_fixture_chttp2_socket_pair.lib Debug\end2end_fixture_chttp2_socket_pair_one_byte_at_a_time.lib Debug\end2end_fixture_chttp2_socket_pair_with_grpc_trace.lib Debug\end2end_test_bad_hostname.lib Debug\end2end_test_cancel_after_accept.lib Debug\end2end_test_cancel_after_accept_and_writes_closed.lib Debug\end2end_test_cancel_after_invoke.lib Debug\end2end_test_cancel_before_invoke.lib Debug\end2end_test_cancel_in_a_vacuum.lib Debug\end2end_test_census_simple_request.lib Debug\end2end_test_channel_connectivity.lib Debug\end2end_test_default_host.lib Debug\end2end_test_disappearing_server.lib Debug\end2end_test_early_server_shutdown_finishes_inflight_calls.lib Debug\end2end_test_early_server_shutdown_finishes_tags.lib Debug\end2end_test_empty_batch.lib Debug\end2end_test_graceful_server_shutdown.lib Debug\end2end_test_invoke_large_request.lib Debug\end2end_test_max_concurrent_streams.lib Debug\end2end_test_max_message_length.lib Debug\end2end_test_no_op.lib Debug\end2end_test_ping_pong_streaming.lib Debug\end2end_test_registered_call.lib Debug\end2end_test_request_response_with_binary_metadata_and_payload.lib Debug\end2end_test_request_response_with_metadata_and_payload.lib Debug\end2end_test_request_response_with_payload.lib Debug\end2end_test_request_response_with_payload_and_call_creds.lib Debug\end2end_test_request_response_with_trailing_metadata_and_payload.lib Debug\end2end_test_request_with_compressed_payload.lib Debug\end2end_test_request_with_flags.lib Debug\end2end_test_request_with_large_metadata.lib Debug\end2end_test_request_with_payload.lib Debug\end2end_test_server_finishes_request.lib Debug\end2end_test_simple_delayed_request.lib Debug\end2end_test_simple_request.lib Debug\end2end_test_simple_request_with_high_initial_sequence_number.lib Debug\end2end_certs.lib Debug\bad_client_test.lib 
 buildtests: buildtests_c buildtests_cxx
 
-buildtests_c: alarm_heap_test.exe alarm_list_test.exe alarm_test.exe alpn_test.exe bin_encoder_test.exe chttp2_status_conversion_test.exe chttp2_stream_encoder_test.exe chttp2_stream_map_test.exe compression_test.exe fling_client.exe fling_server.exe gpr_cmdline_test.exe gpr_env_test.exe gpr_file_test.exe gpr_histogram_test.exe gpr_host_port_test.exe gpr_log_test.exe gpr_slice_buffer_test.exe gpr_slice_test.exe gpr_stack_lockfree_test.exe gpr_string_test.exe gpr_sync_test.exe gpr_thd_test.exe gpr_time_test.exe gpr_tls_test.exe gpr_useful_test.exe grpc_auth_context_test.exe grpc_base64_test.exe grpc_byte_buffer_reader_test.exe grpc_channel_stack_test.exe grpc_completion_queue_test.exe grpc_credentials_test.exe grpc_json_token_test.exe grpc_jwt_verifier_test.exe grpc_security_connector_test.exe grpc_stream_op_test.exe hpack_parser_test.exe hpack_table_test.exe httpcli_format_request_test.exe httpcli_parser_test.exe json_rewrite.exe json_rewrite_test.exe json_test.exe lame_client_test.exe message_compress_test.exe multi_init_test.exe multiple_server_queues_test.exe murmur_hash_test.exe no_server_test.exe resolve_address_test.exe secure_endpoint_test.exe sockaddr_utils_test.exe time_averaged_stats_test.exe timeout_encoding_test.exe timers_test.exe transport_metadata_test.exe transport_security_test.exe uri_parser_test.exe chttp2_fake_security_bad_hostname_test.exe chttp2_fake_security_cancel_after_accept_test.exe chttp2_fake_security_cancel_after_accept_and_writes_closed_test.exe chttp2_fake_security_cancel_after_invoke_test.exe chttp2_fake_security_cancel_before_invoke_test.exe chttp2_fake_security_cancel_in_a_vacuum_test.exe chttp2_fake_security_census_simple_request_test.exe chttp2_fake_security_channel_connectivity_test.exe chttp2_fake_security_default_host_test.exe chttp2_fake_security_disappearing_server_test.exe chttp2_fake_security_early_server_shutdown_finishes_inflight_calls_test.exe chttp2_fake_security_early_server_shutdown_finishes_tags_test.exe chttp2_fake_security_empty_batch_test.exe chttp2_fake_security_graceful_server_shutdown_test.exe chttp2_fake_security_invoke_large_request_test.exe chttp2_fake_security_max_concurrent_streams_test.exe chttp2_fake_security_max_message_length_test.exe chttp2_fake_security_no_op_test.exe chttp2_fake_security_ping_pong_streaming_test.exe chttp2_fake_security_registered_call_test.exe chttp2_fake_security_request_response_with_binary_metadata_and_payload_test.exe chttp2_fake_security_request_response_with_metadata_and_payload_test.exe chttp2_fake_security_request_response_with_payload_test.exe chttp2_fake_security_request_response_with_payload_and_call_creds_test.exe chttp2_fake_security_request_response_with_trailing_metadata_and_payload_test.exe chttp2_fake_security_request_with_compressed_payload_test.exe chttp2_fake_security_request_with_flags_test.exe chttp2_fake_security_request_with_large_metadata_test.exe chttp2_fake_security_request_with_payload_test.exe chttp2_fake_security_server_finishes_request_test.exe chttp2_fake_security_simple_delayed_request_test.exe chttp2_fake_security_simple_request_test.exe chttp2_fake_security_simple_request_with_high_initial_sequence_number_test.exe chttp2_fullstack_bad_hostname_test.exe chttp2_fullstack_cancel_after_accept_test.exe chttp2_fullstack_cancel_after_accept_and_writes_closed_test.exe chttp2_fullstack_cancel_after_invoke_test.exe chttp2_fullstack_cancel_before_invoke_test.exe chttp2_fullstack_cancel_in_a_vacuum_test.exe chttp2_fullstack_census_simple_request_test.exe chttp2_fullstack_channel_connectivity_test.exe chttp2_fullstack_default_host_test.exe chttp2_fullstack_disappearing_server_test.exe chttp2_fullstack_early_server_shutdown_finishes_inflight_calls_test.exe chttp2_fullstack_early_server_shutdown_finishes_tags_test.exe chttp2_fullstack_empty_batch_test.exe chttp2_fullstack_graceful_server_shutdown_test.exe chttp2_fullstack_invoke_large_request_test.exe chttp2_fullstack_max_concurrent_streams_test.exe chttp2_fullstack_max_message_length_test.exe chttp2_fullstack_no_op_test.exe chttp2_fullstack_ping_pong_streaming_test.exe chttp2_fullstack_registered_call_test.exe chttp2_fullstack_request_response_with_binary_metadata_and_payload_test.exe chttp2_fullstack_request_response_with_metadata_and_payload_test.exe chttp2_fullstack_request_response_with_payload_test.exe chttp2_fullstack_request_response_with_payload_and_call_creds_test.exe chttp2_fullstack_request_response_with_trailing_metadata_and_payload_test.exe chttp2_fullstack_request_with_compressed_payload_test.exe chttp2_fullstack_request_with_flags_test.exe chttp2_fullstack_request_with_large_metadata_test.exe chttp2_fullstack_request_with_payload_test.exe chttp2_fullstack_server_finishes_request_test.exe chttp2_fullstack_simple_delayed_request_test.exe chttp2_fullstack_simple_request_test.exe chttp2_fullstack_simple_request_with_high_initial_sequence_number_test.exe chttp2_fullstack_compression_bad_hostname_test.exe chttp2_fullstack_compression_cancel_after_accept_test.exe chttp2_fullstack_compression_cancel_after_accept_and_writes_closed_test.exe chttp2_fullstack_compression_cancel_after_invoke_test.exe chttp2_fullstack_compression_cancel_before_invoke_test.exe chttp2_fullstack_compression_cancel_in_a_vacuum_test.exe chttp2_fullstack_compression_census_simple_request_test.exe chttp2_fullstack_compression_channel_connectivity_test.exe chttp2_fullstack_compression_default_host_test.exe chttp2_fullstack_compression_disappearing_server_test.exe chttp2_fullstack_compression_early_server_shutdown_finishes_inflight_calls_test.exe chttp2_fullstack_compression_early_server_shutdown_finishes_tags_test.exe chttp2_fullstack_compression_empty_batch_test.exe chttp2_fullstack_compression_graceful_server_shutdown_test.exe chttp2_fullstack_compression_invoke_large_request_test.exe chttp2_fullstack_compression_max_concurrent_streams_test.exe chttp2_fullstack_compression_max_message_length_test.exe chttp2_fullstack_compression_no_op_test.exe chttp2_fullstack_compression_ping_pong_streaming_test.exe chttp2_fullstack_compression_registered_call_test.exe chttp2_fullstack_compression_request_response_with_binary_metadata_and_payload_test.exe chttp2_fullstack_compression_request_response_with_metadata_and_payload_test.exe chttp2_fullstack_compression_request_response_with_payload_test.exe chttp2_fullstack_compression_request_response_with_payload_and_call_creds_test.exe chttp2_fullstack_compression_request_response_with_trailing_metadata_and_payload_test.exe chttp2_fullstack_compression_request_with_compressed_payload_test.exe chttp2_fullstack_compression_request_with_flags_test.exe chttp2_fullstack_compression_request_with_large_metadata_test.exe chttp2_fullstack_compression_request_with_payload_test.exe chttp2_fullstack_compression_server_finishes_request_test.exe chttp2_fullstack_compression_simple_delayed_request_test.exe chttp2_fullstack_compression_simple_request_test.exe chttp2_fullstack_compression_simple_request_with_high_initial_sequence_number_test.exe chttp2_fullstack_with_proxy_bad_hostname_test.exe chttp2_fullstack_with_proxy_cancel_after_accept_test.exe chttp2_fullstack_with_proxy_cancel_after_accept_and_writes_closed_test.exe chttp2_fullstack_with_proxy_cancel_after_invoke_test.exe chttp2_fullstack_with_proxy_cancel_before_invoke_test.exe chttp2_fullstack_with_proxy_cancel_in_a_vacuum_test.exe chttp2_fullstack_with_proxy_census_simple_request_test.exe chttp2_fullstack_with_proxy_default_host_test.exe chttp2_fullstack_with_proxy_disappearing_server_test.exe chttp2_fullstack_with_proxy_early_server_shutdown_finishes_inflight_calls_test.exe chttp2_fullstack_with_proxy_early_server_shutdown_finishes_tags_test.exe chttp2_fullstack_with_proxy_empty_batch_test.exe chttp2_fullstack_with_proxy_graceful_server_shutdown_test.exe chttp2_fullstack_with_proxy_invoke_large_request_test.exe chttp2_fullstack_with_proxy_max_message_length_test.exe chttp2_fullstack_with_proxy_no_op_test.exe chttp2_fullstack_with_proxy_ping_pong_streaming_test.exe chttp2_fullstack_with_proxy_registered_call_test.exe chttp2_fullstack_with_proxy_request_response_with_binary_metadata_and_payload_test.exe chttp2_fullstack_with_proxy_request_response_with_metadata_and_payload_test.exe chttp2_fullstack_with_proxy_request_response_with_payload_test.exe chttp2_fullstack_with_proxy_request_response_with_payload_and_call_creds_test.exe chttp2_fullstack_with_proxy_request_response_with_trailing_metadata_and_payload_test.exe chttp2_fullstack_with_proxy_request_with_large_metadata_test.exe chttp2_fullstack_with_proxy_request_with_payload_test.exe chttp2_fullstack_with_proxy_server_finishes_request_test.exe chttp2_fullstack_with_proxy_simple_delayed_request_test.exe chttp2_fullstack_with_proxy_simple_request_test.exe chttp2_fullstack_with_proxy_simple_request_with_high_initial_sequence_number_test.exe chttp2_simple_ssl_fullstack_bad_hostname_test.exe chttp2_simple_ssl_fullstack_cancel_after_accept_test.exe chttp2_simple_ssl_fullstack_cancel_after_accept_and_writes_closed_test.exe chttp2_simple_ssl_fullstack_cancel_after_invoke_test.exe chttp2_simple_ssl_fullstack_cancel_before_invoke_test.exe chttp2_simple_ssl_fullstack_cancel_in_a_vacuum_test.exe chttp2_simple_ssl_fullstack_census_simple_request_test.exe chttp2_simple_ssl_fullstack_channel_connectivity_test.exe chttp2_simple_ssl_fullstack_default_host_test.exe chttp2_simple_ssl_fullstack_disappearing_server_test.exe chttp2_simple_ssl_fullstack_early_server_shutdown_finishes_inflight_calls_test.exe chttp2_simple_ssl_fullstack_early_server_shutdown_finishes_tags_test.exe chttp2_simple_ssl_fullstack_empty_batch_test.exe chttp2_simple_ssl_fullstack_graceful_server_shutdown_test.exe chttp2_simple_ssl_fullstack_invoke_large_request_test.exe chttp2_simple_ssl_fullstack_max_concurrent_streams_test.exe chttp2_simple_ssl_fullstack_max_message_length_test.exe chttp2_simple_ssl_fullstack_no_op_test.exe chttp2_simple_ssl_fullstack_ping_pong_streaming_test.exe chttp2_simple_ssl_fullstack_registered_call_test.exe chttp2_simple_ssl_fullstack_request_response_with_binary_metadata_and_payload_test.exe chttp2_simple_ssl_fullstack_request_response_with_metadata_and_payload_test.exe chttp2_simple_ssl_fullstack_request_response_with_payload_test.exe chttp2_simple_ssl_fullstack_request_response_with_payload_and_call_creds_test.exe chttp2_simple_ssl_fullstack_request_response_with_trailing_metadata_and_payload_test.exe chttp2_simple_ssl_fullstack_request_with_compressed_payload_test.exe chttp2_simple_ssl_fullstack_request_with_flags_test.exe chttp2_simple_ssl_fullstack_request_with_large_metadata_test.exe chttp2_simple_ssl_fullstack_request_with_payload_test.exe chttp2_simple_ssl_fullstack_server_finishes_request_test.exe chttp2_simple_ssl_fullstack_simple_delayed_request_test.exe chttp2_simple_ssl_fullstack_simple_request_test.exe chttp2_simple_ssl_fullstack_simple_request_with_high_initial_sequence_number_test.exe chttp2_simple_ssl_fullstack_with_proxy_bad_hostname_test.exe chttp2_simple_ssl_fullstack_with_proxy_cancel_after_accept_test.exe chttp2_simple_ssl_fullstack_with_proxy_cancel_after_accept_and_writes_closed_test.exe chttp2_simple_ssl_fullstack_with_proxy_cancel_after_invoke_test.exe chttp2_simple_ssl_fullstack_with_proxy_cancel_before_invoke_test.exe chttp2_simple_ssl_fullstack_with_proxy_cancel_in_a_vacuum_test.exe chttp2_simple_ssl_fullstack_with_proxy_census_simple_request_test.exe chttp2_simple_ssl_fullstack_with_proxy_default_host_test.exe chttp2_simple_ssl_fullstack_with_proxy_disappearing_server_test.exe chttp2_simple_ssl_fullstack_with_proxy_early_server_shutdown_finishes_inflight_calls_test.exe chttp2_simple_ssl_fullstack_with_proxy_early_server_shutdown_finishes_tags_test.exe chttp2_simple_ssl_fullstack_with_proxy_empty_batch_test.exe chttp2_simple_ssl_fullstack_with_proxy_graceful_server_shutdown_test.exe chttp2_simple_ssl_fullstack_with_proxy_invoke_large_request_test.exe chttp2_simple_ssl_fullstack_with_proxy_max_message_length_test.exe chttp2_simple_ssl_fullstack_with_proxy_no_op_test.exe chttp2_simple_ssl_fullstack_with_proxy_ping_pong_streaming_test.exe chttp2_simple_ssl_fullstack_with_proxy_registered_call_test.exe chttp2_simple_ssl_fullstack_with_proxy_request_response_with_binary_metadata_and_payload_test.exe chttp2_simple_ssl_fullstack_with_proxy_request_response_with_metadata_and_payload_test.exe chttp2_simple_ssl_fullstack_with_proxy_request_response_with_payload_test.exe chttp2_simple_ssl_fullstack_with_proxy_request_response_with_payload_and_call_creds_test.exe chttp2_simple_ssl_fullstack_with_proxy_request_response_with_trailing_metadata_and_payload_test.exe chttp2_simple_ssl_fullstack_with_proxy_request_with_large_metadata_test.exe chttp2_simple_ssl_fullstack_with_proxy_request_with_payload_test.exe chttp2_simple_ssl_fullstack_with_proxy_server_finishes_request_test.exe chttp2_simple_ssl_fullstack_with_proxy_simple_delayed_request_test.exe chttp2_simple_ssl_fullstack_with_proxy_simple_request_test.exe chttp2_simple_ssl_fullstack_with_proxy_simple_request_with_high_initial_sequence_number_test.exe chttp2_simple_ssl_with_oauth2_fullstack_bad_hostname_test.exe chttp2_simple_ssl_with_oauth2_fullstack_cancel_after_accept_test.exe chttp2_simple_ssl_with_oauth2_fullstack_cancel_after_accept_and_writes_closed_test.exe chttp2_simple_ssl_with_oauth2_fullstack_cancel_after_invoke_test.exe chttp2_simple_ssl_with_oauth2_fullstack_cancel_before_invoke_test.exe chttp2_simple_ssl_with_oauth2_fullstack_cancel_in_a_vacuum_test.exe chttp2_simple_ssl_with_oauth2_fullstack_census_simple_request_test.exe chttp2_simple_ssl_with_oauth2_fullstack_channel_connectivity_test.exe chttp2_simple_ssl_with_oauth2_fullstack_default_host_test.exe chttp2_simple_ssl_with_oauth2_fullstack_disappearing_server_test.exe chttp2_simple_ssl_with_oauth2_fullstack_early_server_shutdown_finishes_inflight_calls_test.exe chttp2_simple_ssl_with_oauth2_fullstack_early_server_shutdown_finishes_tags_test.exe chttp2_simple_ssl_with_oauth2_fullstack_empty_batch_test.exe chttp2_simple_ssl_with_oauth2_fullstack_graceful_server_shutdown_test.exe chttp2_simple_ssl_with_oauth2_fullstack_invoke_large_request_test.exe chttp2_simple_ssl_with_oauth2_fullstack_max_concurrent_streams_test.exe chttp2_simple_ssl_with_oauth2_fullstack_max_message_length_test.exe chttp2_simple_ssl_with_oauth2_fullstack_no_op_test.exe chttp2_simple_ssl_with_oauth2_fullstack_ping_pong_streaming_test.exe chttp2_simple_ssl_with_oauth2_fullstack_registered_call_test.exe chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_binary_metadata_and_payload_test.exe chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_metadata_and_payload_test.exe chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_payload_test.exe chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_payload_and_call_creds_test.exe chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_trailing_metadata_and_payload_test.exe chttp2_simple_ssl_with_oauth2_fullstack_request_with_compressed_payload_test.exe chttp2_simple_ssl_with_oauth2_fullstack_request_with_flags_test.exe chttp2_simple_ssl_with_oauth2_fullstack_request_with_large_metadata_test.exe chttp2_simple_ssl_with_oauth2_fullstack_request_with_payload_test.exe chttp2_simple_ssl_with_oauth2_fullstack_server_finishes_request_test.exe chttp2_simple_ssl_with_oauth2_fullstack_simple_delayed_request_test.exe chttp2_simple_ssl_with_oauth2_fullstack_simple_request_test.exe chttp2_simple_ssl_with_oauth2_fullstack_simple_request_with_high_initial_sequence_number_test.exe chttp2_socket_pair_bad_hostname_test.exe chttp2_socket_pair_cancel_after_accept_test.exe chttp2_socket_pair_cancel_after_accept_and_writes_closed_test.exe chttp2_socket_pair_cancel_after_invoke_test.exe chttp2_socket_pair_cancel_before_invoke_test.exe chttp2_socket_pair_cancel_in_a_vacuum_test.exe chttp2_socket_pair_census_simple_request_test.exe chttp2_socket_pair_early_server_shutdown_finishes_inflight_calls_test.exe chttp2_socket_pair_early_server_shutdown_finishes_tags_test.exe chttp2_socket_pair_empty_batch_test.exe chttp2_socket_pair_graceful_server_shutdown_test.exe chttp2_socket_pair_invoke_large_request_test.exe chttp2_socket_pair_max_concurrent_streams_test.exe chttp2_socket_pair_max_message_length_test.exe chttp2_socket_pair_no_op_test.exe chttp2_socket_pair_ping_pong_streaming_test.exe chttp2_socket_pair_registered_call_test.exe chttp2_socket_pair_request_response_with_binary_metadata_and_payload_test.exe chttp2_socket_pair_request_response_with_metadata_and_payload_test.exe chttp2_socket_pair_request_response_with_payload_test.exe chttp2_socket_pair_request_response_with_payload_and_call_creds_test.exe chttp2_socket_pair_request_response_with_trailing_metadata_and_payload_test.exe chttp2_socket_pair_request_with_compressed_payload_test.exe chttp2_socket_pair_request_with_flags_test.exe chttp2_socket_pair_request_with_large_metadata_test.exe chttp2_socket_pair_request_with_payload_test.exe chttp2_socket_pair_server_finishes_request_test.exe chttp2_socket_pair_simple_request_test.exe chttp2_socket_pair_simple_request_with_high_initial_sequence_number_test.exe chttp2_socket_pair_one_byte_at_a_time_bad_hostname_test.exe chttp2_socket_pair_one_byte_at_a_time_cancel_after_accept_test.exe chttp2_socket_pair_one_byte_at_a_time_cancel_after_accept_and_writes_closed_test.exe chttp2_socket_pair_one_byte_at_a_time_cancel_after_invoke_test.exe chttp2_socket_pair_one_byte_at_a_time_cancel_before_invoke_test.exe chttp2_socket_pair_one_byte_at_a_time_cancel_in_a_vacuum_test.exe chttp2_socket_pair_one_byte_at_a_time_census_simple_request_test.exe chttp2_socket_pair_one_byte_at_a_time_early_server_shutdown_finishes_inflight_calls_test.exe chttp2_socket_pair_one_byte_at_a_time_early_server_shutdown_finishes_tags_test.exe chttp2_socket_pair_one_byte_at_a_time_empty_batch_test.exe chttp2_socket_pair_one_byte_at_a_time_graceful_server_shutdown_test.exe chttp2_socket_pair_one_byte_at_a_time_invoke_large_request_test.exe chttp2_socket_pair_one_byte_at_a_time_max_concurrent_streams_test.exe chttp2_socket_pair_one_byte_at_a_time_max_message_length_test.exe chttp2_socket_pair_one_byte_at_a_time_no_op_test.exe chttp2_socket_pair_one_byte_at_a_time_ping_pong_streaming_test.exe chttp2_socket_pair_one_byte_at_a_time_registered_call_test.exe chttp2_socket_pair_one_byte_at_a_time_request_response_with_binary_metadata_and_payload_test.exe chttp2_socket_pair_one_byte_at_a_time_request_response_with_metadata_and_payload_test.exe chttp2_socket_pair_one_byte_at_a_time_request_response_with_payload_test.exe chttp2_socket_pair_one_byte_at_a_time_request_response_with_payload_and_call_creds_test.exe chttp2_socket_pair_one_byte_at_a_time_request_response_with_trailing_metadata_and_payload_test.exe chttp2_socket_pair_one_byte_at_a_time_request_with_compressed_payload_test.exe chttp2_socket_pair_one_byte_at_a_time_request_with_flags_test.exe chttp2_socket_pair_one_byte_at_a_time_request_with_large_metadata_test.exe chttp2_socket_pair_one_byte_at_a_time_request_with_payload_test.exe chttp2_socket_pair_one_byte_at_a_time_server_finishes_request_test.exe chttp2_socket_pair_one_byte_at_a_time_simple_request_test.exe chttp2_socket_pair_one_byte_at_a_time_simple_request_with_high_initial_sequence_number_test.exe chttp2_socket_pair_with_grpc_trace_bad_hostname_test.exe chttp2_socket_pair_with_grpc_trace_cancel_after_accept_test.exe chttp2_socket_pair_with_grpc_trace_cancel_after_accept_and_writes_closed_test.exe chttp2_socket_pair_with_grpc_trace_cancel_after_invoke_test.exe chttp2_socket_pair_with_grpc_trace_cancel_before_invoke_test.exe chttp2_socket_pair_with_grpc_trace_cancel_in_a_vacuum_test.exe chttp2_socket_pair_with_grpc_trace_census_simple_request_test.exe chttp2_socket_pair_with_grpc_trace_early_server_shutdown_finishes_inflight_calls_test.exe chttp2_socket_pair_with_grpc_trace_early_server_shutdown_finishes_tags_test.exe chttp2_socket_pair_with_grpc_trace_empty_batch_test.exe chttp2_socket_pair_with_grpc_trace_graceful_server_shutdown_test.exe chttp2_socket_pair_with_grpc_trace_invoke_large_request_test.exe chttp2_socket_pair_with_grpc_trace_max_concurrent_streams_test.exe chttp2_socket_pair_with_grpc_trace_max_message_length_test.exe chttp2_socket_pair_with_grpc_trace_no_op_test.exe chttp2_socket_pair_with_grpc_trace_ping_pong_streaming_test.exe chttp2_socket_pair_with_grpc_trace_registered_call_test.exe chttp2_socket_pair_with_grpc_trace_request_response_with_binary_metadata_and_payload_test.exe chttp2_socket_pair_with_grpc_trace_request_response_with_metadata_and_payload_test.exe chttp2_socket_pair_with_grpc_trace_request_response_with_payload_test.exe chttp2_socket_pair_with_grpc_trace_request_response_with_payload_and_call_creds_test.exe chttp2_socket_pair_with_grpc_trace_request_response_with_trailing_metadata_and_payload_test.exe chttp2_socket_pair_with_grpc_trace_request_with_compressed_payload_test.exe chttp2_socket_pair_with_grpc_trace_request_with_flags_test.exe chttp2_socket_pair_with_grpc_trace_request_with_large_metadata_test.exe chttp2_socket_pair_with_grpc_trace_request_with_payload_test.exe chttp2_socket_pair_with_grpc_trace_server_finishes_request_test.exe chttp2_socket_pair_with_grpc_trace_simple_request_test.exe chttp2_socket_pair_with_grpc_trace_simple_request_with_high_initial_sequence_number_test.exe chttp2_fullstack_bad_hostname_unsecure_test.exe chttp2_fullstack_cancel_after_accept_unsecure_test.exe chttp2_fullstack_cancel_after_accept_and_writes_closed_unsecure_test.exe chttp2_fullstack_cancel_after_invoke_unsecure_test.exe chttp2_fullstack_cancel_before_invoke_unsecure_test.exe chttp2_fullstack_cancel_in_a_vacuum_unsecure_test.exe chttp2_fullstack_census_simple_request_unsecure_test.exe chttp2_fullstack_channel_connectivity_unsecure_test.exe chttp2_fullstack_default_host_unsecure_test.exe chttp2_fullstack_disappearing_server_unsecure_test.exe chttp2_fullstack_early_server_shutdown_finishes_inflight_calls_unsecure_test.exe chttp2_fullstack_early_server_shutdown_finishes_tags_unsecure_test.exe chttp2_fullstack_empty_batch_unsecure_test.exe chttp2_fullstack_graceful_server_shutdown_unsecure_test.exe chttp2_fullstack_invoke_large_request_unsecure_test.exe chttp2_fullstack_max_concurrent_streams_unsecure_test.exe chttp2_fullstack_max_message_length_unsecure_test.exe chttp2_fullstack_no_op_unsecure_test.exe chttp2_fullstack_ping_pong_streaming_unsecure_test.exe chttp2_fullstack_registered_call_unsecure_test.exe chttp2_fullstack_request_response_with_binary_metadata_and_payload_unsecure_test.exe chttp2_fullstack_request_response_with_metadata_and_payload_unsecure_test.exe chttp2_fullstack_request_response_with_payload_unsecure_test.exe chttp2_fullstack_request_response_with_trailing_metadata_and_payload_unsecure_test.exe chttp2_fullstack_request_with_compressed_payload_unsecure_test.exe chttp2_fullstack_request_with_flags_unsecure_test.exe chttp2_fullstack_request_with_large_metadata_unsecure_test.exe chttp2_fullstack_request_with_payload_unsecure_test.exe chttp2_fullstack_server_finishes_request_unsecure_test.exe chttp2_fullstack_simple_delayed_request_unsecure_test.exe chttp2_fullstack_simple_request_unsecure_test.exe chttp2_fullstack_simple_request_with_high_initial_sequence_number_unsecure_test.exe chttp2_fullstack_compression_bad_hostname_unsecure_test.exe chttp2_fullstack_compression_cancel_after_accept_unsecure_test.exe chttp2_fullstack_compression_cancel_after_accept_and_writes_closed_unsecure_test.exe chttp2_fullstack_compression_cancel_after_invoke_unsecure_test.exe chttp2_fullstack_compression_cancel_before_invoke_unsecure_test.exe chttp2_fullstack_compression_cancel_in_a_vacuum_unsecure_test.exe chttp2_fullstack_compression_census_simple_request_unsecure_test.exe chttp2_fullstack_compression_channel_connectivity_unsecure_test.exe chttp2_fullstack_compression_default_host_unsecure_test.exe chttp2_fullstack_compression_disappearing_server_unsecure_test.exe chttp2_fullstack_compression_early_server_shutdown_finishes_inflight_calls_unsecure_test.exe chttp2_fullstack_compression_early_server_shutdown_finishes_tags_unsecure_test.exe chttp2_fullstack_compression_empty_batch_unsecure_test.exe chttp2_fullstack_compression_graceful_server_shutdown_unsecure_test.exe chttp2_fullstack_compression_invoke_large_request_unsecure_test.exe chttp2_fullstack_compression_max_concurrent_streams_unsecure_test.exe chttp2_fullstack_compression_max_message_length_unsecure_test.exe chttp2_fullstack_compression_no_op_unsecure_test.exe chttp2_fullstack_compression_ping_pong_streaming_unsecure_test.exe chttp2_fullstack_compression_registered_call_unsecure_test.exe chttp2_fullstack_compression_request_response_with_binary_metadata_and_payload_unsecure_test.exe chttp2_fullstack_compression_request_response_with_metadata_and_payload_unsecure_test.exe chttp2_fullstack_compression_request_response_with_payload_unsecure_test.exe chttp2_fullstack_compression_request_response_with_trailing_metadata_and_payload_unsecure_test.exe chttp2_fullstack_compression_request_with_compressed_payload_unsecure_test.exe chttp2_fullstack_compression_request_with_flags_unsecure_test.exe chttp2_fullstack_compression_request_with_large_metadata_unsecure_test.exe chttp2_fullstack_compression_request_with_payload_unsecure_test.exe chttp2_fullstack_compression_server_finishes_request_unsecure_test.exe chttp2_fullstack_compression_simple_delayed_request_unsecure_test.exe chttp2_fullstack_compression_simple_request_unsecure_test.exe chttp2_fullstack_compression_simple_request_with_high_initial_sequence_number_unsecure_test.exe chttp2_fullstack_with_proxy_bad_hostname_unsecure_test.exe chttp2_fullstack_with_proxy_cancel_after_accept_unsecure_test.exe chttp2_fullstack_with_proxy_cancel_after_accept_and_writes_closed_unsecure_test.exe chttp2_fullstack_with_proxy_cancel_after_invoke_unsecure_test.exe chttp2_fullstack_with_proxy_cancel_before_invoke_unsecure_test.exe chttp2_fullstack_with_proxy_cancel_in_a_vacuum_unsecure_test.exe chttp2_fullstack_with_proxy_census_simple_request_unsecure_test.exe chttp2_fullstack_with_proxy_default_host_unsecure_test.exe chttp2_fullstack_with_proxy_disappearing_server_unsecure_test.exe chttp2_fullstack_with_proxy_early_server_shutdown_finishes_inflight_calls_unsecure_test.exe chttp2_fullstack_with_proxy_early_server_shutdown_finishes_tags_unsecure_test.exe chttp2_fullstack_with_proxy_empty_batch_unsecure_test.exe chttp2_fullstack_with_proxy_graceful_server_shutdown_unsecure_test.exe chttp2_fullstack_with_proxy_invoke_large_request_unsecure_test.exe chttp2_fullstack_with_proxy_max_message_length_unsecure_test.exe chttp2_fullstack_with_proxy_no_op_unsecure_test.exe chttp2_fullstack_with_proxy_ping_pong_streaming_unsecure_test.exe chttp2_fullstack_with_proxy_registered_call_unsecure_test.exe chttp2_fullstack_with_proxy_request_response_with_binary_metadata_and_payload_unsecure_test.exe chttp2_fullstack_with_proxy_request_response_with_metadata_and_payload_unsecure_test.exe chttp2_fullstack_with_proxy_request_response_with_payload_unsecure_test.exe chttp2_fullstack_with_proxy_request_response_with_trailing_metadata_and_payload_unsecure_test.exe chttp2_fullstack_with_proxy_request_with_large_metadata_unsecure_test.exe chttp2_fullstack_with_proxy_request_with_payload_unsecure_test.exe chttp2_fullstack_with_proxy_server_finishes_request_unsecure_test.exe chttp2_fullstack_with_proxy_simple_delayed_request_unsecure_test.exe chttp2_fullstack_with_proxy_simple_request_unsecure_test.exe chttp2_fullstack_with_proxy_simple_request_with_high_initial_sequence_number_unsecure_test.exe chttp2_socket_pair_bad_hostname_unsecure_test.exe chttp2_socket_pair_cancel_after_accept_unsecure_test.exe chttp2_socket_pair_cancel_after_accept_and_writes_closed_unsecure_test.exe chttp2_socket_pair_cancel_after_invoke_unsecure_test.exe chttp2_socket_pair_cancel_before_invoke_unsecure_test.exe chttp2_socket_pair_cancel_in_a_vacuum_unsecure_test.exe chttp2_socket_pair_census_simple_request_unsecure_test.exe chttp2_socket_pair_early_server_shutdown_finishes_inflight_calls_unsecure_test.exe chttp2_socket_pair_early_server_shutdown_finishes_tags_unsecure_test.exe chttp2_socket_pair_empty_batch_unsecure_test.exe chttp2_socket_pair_graceful_server_shutdown_unsecure_test.exe chttp2_socket_pair_invoke_large_request_unsecure_test.exe chttp2_socket_pair_max_concurrent_streams_unsecure_test.exe chttp2_socket_pair_max_message_length_unsecure_test.exe chttp2_socket_pair_no_op_unsecure_test.exe chttp2_socket_pair_ping_pong_streaming_unsecure_test.exe chttp2_socket_pair_registered_call_unsecure_test.exe chttp2_socket_pair_request_response_with_binary_metadata_and_payload_unsecure_test.exe chttp2_socket_pair_request_response_with_metadata_and_payload_unsecure_test.exe chttp2_socket_pair_request_response_with_payload_unsecure_test.exe chttp2_socket_pair_request_response_with_trailing_metadata_and_payload_unsecure_test.exe chttp2_socket_pair_request_with_compressed_payload_unsecure_test.exe chttp2_socket_pair_request_with_flags_unsecure_test.exe chttp2_socket_pair_request_with_large_metadata_unsecure_test.exe chttp2_socket_pair_request_with_payload_unsecure_test.exe chttp2_socket_pair_server_finishes_request_unsecure_test.exe chttp2_socket_pair_simple_request_unsecure_test.exe chttp2_socket_pair_simple_request_with_high_initial_sequence_number_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_bad_hostname_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_cancel_after_accept_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_cancel_after_accept_and_writes_closed_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_cancel_after_invoke_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_cancel_before_invoke_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_cancel_in_a_vacuum_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_census_simple_request_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_early_server_shutdown_finishes_inflight_calls_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_early_server_shutdown_finishes_tags_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_empty_batch_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_graceful_server_shutdown_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_invoke_large_request_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_max_concurrent_streams_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_max_message_length_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_no_op_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_ping_pong_streaming_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_registered_call_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_request_response_with_binary_metadata_and_payload_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_request_response_with_metadata_and_payload_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_request_response_with_payload_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_request_response_with_trailing_metadata_and_payload_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_request_with_compressed_payload_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_request_with_flags_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_request_with_large_metadata_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_request_with_payload_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_server_finishes_request_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_simple_request_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_simple_request_with_high_initial_sequence_number_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_bad_hostname_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_cancel_after_accept_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_cancel_after_accept_and_writes_closed_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_cancel_after_invoke_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_cancel_before_invoke_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_cancel_in_a_vacuum_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_census_simple_request_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_early_server_shutdown_finishes_inflight_calls_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_early_server_shutdown_finishes_tags_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_empty_batch_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_graceful_server_shutdown_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_invoke_large_request_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_max_concurrent_streams_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_max_message_length_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_no_op_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_ping_pong_streaming_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_registered_call_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_request_response_with_binary_metadata_and_payload_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_request_response_with_metadata_and_payload_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_request_response_with_payload_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_request_response_with_trailing_metadata_and_payload_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_request_with_compressed_payload_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_request_with_flags_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_request_with_large_metadata_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_request_with_payload_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_server_finishes_request_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_simple_request_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_simple_request_with_high_initial_sequence_number_unsecure_test.exe connection_prefix_bad_client_test.exe initial_settings_frame_bad_client_test.exe 
+buildtests_c: alarm_heap_test.exe alarm_list_test.exe alarm_test.exe alpn_test.exe bin_encoder_test.exe chttp2_status_conversion_test.exe chttp2_stream_encoder_test.exe chttp2_stream_map_test.exe compression_test.exe fling_client.exe fling_server.exe gpr_cmdline_test.exe gpr_env_test.exe gpr_file_test.exe gpr_histogram_test.exe gpr_host_port_test.exe gpr_log_test.exe gpr_slice_buffer_test.exe gpr_slice_test.exe gpr_stack_lockfree_test.exe gpr_string_test.exe gpr_sync_test.exe gpr_thd_test.exe gpr_time_test.exe gpr_tls_test.exe gpr_useful_test.exe grpc_auth_context_test.exe grpc_base64_test.exe grpc_byte_buffer_reader_test.exe grpc_channel_args_test.exe grpc_channel_stack_test.exe grpc_completion_queue_test.exe grpc_credentials_test.exe grpc_json_token_test.exe grpc_jwt_verifier_test.exe grpc_security_connector_test.exe grpc_stream_op_test.exe hpack_parser_test.exe hpack_table_test.exe httpcli_format_request_test.exe httpcli_parser_test.exe json_rewrite.exe json_rewrite_test.exe json_test.exe lame_client_test.exe message_compress_test.exe multi_init_test.exe multiple_server_queues_test.exe murmur_hash_test.exe no_server_test.exe resolve_address_test.exe secure_endpoint_test.exe sockaddr_utils_test.exe time_averaged_stats_test.exe timeout_encoding_test.exe timers_test.exe transport_metadata_test.exe transport_security_test.exe uri_parser_test.exe chttp2_fake_security_bad_hostname_test.exe chttp2_fake_security_cancel_after_accept_test.exe chttp2_fake_security_cancel_after_accept_and_writes_closed_test.exe chttp2_fake_security_cancel_after_invoke_test.exe chttp2_fake_security_cancel_before_invoke_test.exe chttp2_fake_security_cancel_in_a_vacuum_test.exe chttp2_fake_security_census_simple_request_test.exe chttp2_fake_security_channel_connectivity_test.exe chttp2_fake_security_default_host_test.exe chttp2_fake_security_disappearing_server_test.exe chttp2_fake_security_early_server_shutdown_finishes_inflight_calls_test.exe chttp2_fake_security_early_server_shutdown_finishes_tags_test.exe chttp2_fake_security_empty_batch_test.exe chttp2_fake_security_graceful_server_shutdown_test.exe chttp2_fake_security_invoke_large_request_test.exe chttp2_fake_security_max_concurrent_streams_test.exe chttp2_fake_security_max_message_length_test.exe chttp2_fake_security_no_op_test.exe chttp2_fake_security_ping_pong_streaming_test.exe chttp2_fake_security_registered_call_test.exe chttp2_fake_security_request_response_with_binary_metadata_and_payload_test.exe chttp2_fake_security_request_response_with_metadata_and_payload_test.exe chttp2_fake_security_request_response_with_payload_test.exe chttp2_fake_security_request_response_with_payload_and_call_creds_test.exe chttp2_fake_security_request_response_with_trailing_metadata_and_payload_test.exe chttp2_fake_security_request_with_compressed_payload_test.exe chttp2_fake_security_request_with_flags_test.exe chttp2_fake_security_request_with_large_metadata_test.exe chttp2_fake_security_request_with_payload_test.exe chttp2_fake_security_server_finishes_request_test.exe chttp2_fake_security_simple_delayed_request_test.exe chttp2_fake_security_simple_request_test.exe chttp2_fake_security_simple_request_with_high_initial_sequence_number_test.exe chttp2_fullstack_bad_hostname_test.exe chttp2_fullstack_cancel_after_accept_test.exe chttp2_fullstack_cancel_after_accept_and_writes_closed_test.exe chttp2_fullstack_cancel_after_invoke_test.exe chttp2_fullstack_cancel_before_invoke_test.exe chttp2_fullstack_cancel_in_a_vacuum_test.exe chttp2_fullstack_census_simple_request_test.exe chttp2_fullstack_channel_connectivity_test.exe chttp2_fullstack_default_host_test.exe chttp2_fullstack_disappearing_server_test.exe chttp2_fullstack_early_server_shutdown_finishes_inflight_calls_test.exe chttp2_fullstack_early_server_shutdown_finishes_tags_test.exe chttp2_fullstack_empty_batch_test.exe chttp2_fullstack_graceful_server_shutdown_test.exe chttp2_fullstack_invoke_large_request_test.exe chttp2_fullstack_max_concurrent_streams_test.exe chttp2_fullstack_max_message_length_test.exe chttp2_fullstack_no_op_test.exe chttp2_fullstack_ping_pong_streaming_test.exe chttp2_fullstack_registered_call_test.exe chttp2_fullstack_request_response_with_binary_metadata_and_payload_test.exe chttp2_fullstack_request_response_with_metadata_and_payload_test.exe chttp2_fullstack_request_response_with_payload_test.exe chttp2_fullstack_request_response_with_payload_and_call_creds_test.exe chttp2_fullstack_request_response_with_trailing_metadata_and_payload_test.exe chttp2_fullstack_request_with_compressed_payload_test.exe chttp2_fullstack_request_with_flags_test.exe chttp2_fullstack_request_with_large_metadata_test.exe chttp2_fullstack_request_with_payload_test.exe chttp2_fullstack_server_finishes_request_test.exe chttp2_fullstack_simple_delayed_request_test.exe chttp2_fullstack_simple_request_test.exe chttp2_fullstack_simple_request_with_high_initial_sequence_number_test.exe chttp2_fullstack_compression_bad_hostname_test.exe chttp2_fullstack_compression_cancel_after_accept_test.exe chttp2_fullstack_compression_cancel_after_accept_and_writes_closed_test.exe chttp2_fullstack_compression_cancel_after_invoke_test.exe chttp2_fullstack_compression_cancel_before_invoke_test.exe chttp2_fullstack_compression_cancel_in_a_vacuum_test.exe chttp2_fullstack_compression_census_simple_request_test.exe chttp2_fullstack_compression_channel_connectivity_test.exe chttp2_fullstack_compression_default_host_test.exe chttp2_fullstack_compression_disappearing_server_test.exe chttp2_fullstack_compression_early_server_shutdown_finishes_inflight_calls_test.exe chttp2_fullstack_compression_early_server_shutdown_finishes_tags_test.exe chttp2_fullstack_compression_empty_batch_test.exe chttp2_fullstack_compression_graceful_server_shutdown_test.exe chttp2_fullstack_compression_invoke_large_request_test.exe chttp2_fullstack_compression_max_concurrent_streams_test.exe chttp2_fullstack_compression_max_message_length_test.exe chttp2_fullstack_compression_no_op_test.exe chttp2_fullstack_compression_ping_pong_streaming_test.exe chttp2_fullstack_compression_registered_call_test.exe chttp2_fullstack_compression_request_response_with_binary_metadata_and_payload_test.exe chttp2_fullstack_compression_request_response_with_metadata_and_payload_test.exe chttp2_fullstack_compression_request_response_with_payload_test.exe chttp2_fullstack_compression_request_response_with_payload_and_call_creds_test.exe chttp2_fullstack_compression_request_response_with_trailing_metadata_and_payload_test.exe chttp2_fullstack_compression_request_with_compressed_payload_test.exe chttp2_fullstack_compression_request_with_flags_test.exe chttp2_fullstack_compression_request_with_large_metadata_test.exe chttp2_fullstack_compression_request_with_payload_test.exe chttp2_fullstack_compression_server_finishes_request_test.exe chttp2_fullstack_compression_simple_delayed_request_test.exe chttp2_fullstack_compression_simple_request_test.exe chttp2_fullstack_compression_simple_request_with_high_initial_sequence_number_test.exe chttp2_fullstack_with_proxy_bad_hostname_test.exe chttp2_fullstack_with_proxy_cancel_after_accept_test.exe chttp2_fullstack_with_proxy_cancel_after_accept_and_writes_closed_test.exe chttp2_fullstack_with_proxy_cancel_after_invoke_test.exe chttp2_fullstack_with_proxy_cancel_before_invoke_test.exe chttp2_fullstack_with_proxy_cancel_in_a_vacuum_test.exe chttp2_fullstack_with_proxy_census_simple_request_test.exe chttp2_fullstack_with_proxy_default_host_test.exe chttp2_fullstack_with_proxy_disappearing_server_test.exe chttp2_fullstack_with_proxy_early_server_shutdown_finishes_inflight_calls_test.exe chttp2_fullstack_with_proxy_early_server_shutdown_finishes_tags_test.exe chttp2_fullstack_with_proxy_empty_batch_test.exe chttp2_fullstack_with_proxy_graceful_server_shutdown_test.exe chttp2_fullstack_with_proxy_invoke_large_request_test.exe chttp2_fullstack_with_proxy_max_message_length_test.exe chttp2_fullstack_with_proxy_no_op_test.exe chttp2_fullstack_with_proxy_ping_pong_streaming_test.exe chttp2_fullstack_with_proxy_registered_call_test.exe chttp2_fullstack_with_proxy_request_response_with_binary_metadata_and_payload_test.exe chttp2_fullstack_with_proxy_request_response_with_metadata_and_payload_test.exe chttp2_fullstack_with_proxy_request_response_with_payload_test.exe chttp2_fullstack_with_proxy_request_response_with_payload_and_call_creds_test.exe chttp2_fullstack_with_proxy_request_response_with_trailing_metadata_and_payload_test.exe chttp2_fullstack_with_proxy_request_with_large_metadata_test.exe chttp2_fullstack_with_proxy_request_with_payload_test.exe chttp2_fullstack_with_proxy_server_finishes_request_test.exe chttp2_fullstack_with_proxy_simple_delayed_request_test.exe chttp2_fullstack_with_proxy_simple_request_test.exe chttp2_fullstack_with_proxy_simple_request_with_high_initial_sequence_number_test.exe chttp2_simple_ssl_fullstack_bad_hostname_test.exe chttp2_simple_ssl_fullstack_cancel_after_accept_test.exe chttp2_simple_ssl_fullstack_cancel_after_accept_and_writes_closed_test.exe chttp2_simple_ssl_fullstack_cancel_after_invoke_test.exe chttp2_simple_ssl_fullstack_cancel_before_invoke_test.exe chttp2_simple_ssl_fullstack_cancel_in_a_vacuum_test.exe chttp2_simple_ssl_fullstack_census_simple_request_test.exe chttp2_simple_ssl_fullstack_channel_connectivity_test.exe chttp2_simple_ssl_fullstack_default_host_test.exe chttp2_simple_ssl_fullstack_disappearing_server_test.exe chttp2_simple_ssl_fullstack_early_server_shutdown_finishes_inflight_calls_test.exe chttp2_simple_ssl_fullstack_early_server_shutdown_finishes_tags_test.exe chttp2_simple_ssl_fullstack_empty_batch_test.exe chttp2_simple_ssl_fullstack_graceful_server_shutdown_test.exe chttp2_simple_ssl_fullstack_invoke_large_request_test.exe chttp2_simple_ssl_fullstack_max_concurrent_streams_test.exe chttp2_simple_ssl_fullstack_max_message_length_test.exe chttp2_simple_ssl_fullstack_no_op_test.exe chttp2_simple_ssl_fullstack_ping_pong_streaming_test.exe chttp2_simple_ssl_fullstack_registered_call_test.exe chttp2_simple_ssl_fullstack_request_response_with_binary_metadata_and_payload_test.exe chttp2_simple_ssl_fullstack_request_response_with_metadata_and_payload_test.exe chttp2_simple_ssl_fullstack_request_response_with_payload_test.exe chttp2_simple_ssl_fullstack_request_response_with_payload_and_call_creds_test.exe chttp2_simple_ssl_fullstack_request_response_with_trailing_metadata_and_payload_test.exe chttp2_simple_ssl_fullstack_request_with_compressed_payload_test.exe chttp2_simple_ssl_fullstack_request_with_flags_test.exe chttp2_simple_ssl_fullstack_request_with_large_metadata_test.exe chttp2_simple_ssl_fullstack_request_with_payload_test.exe chttp2_simple_ssl_fullstack_server_finishes_request_test.exe chttp2_simple_ssl_fullstack_simple_delayed_request_test.exe chttp2_simple_ssl_fullstack_simple_request_test.exe chttp2_simple_ssl_fullstack_simple_request_with_high_initial_sequence_number_test.exe chttp2_simple_ssl_fullstack_with_proxy_bad_hostname_test.exe chttp2_simple_ssl_fullstack_with_proxy_cancel_after_accept_test.exe chttp2_simple_ssl_fullstack_with_proxy_cancel_after_accept_and_writes_closed_test.exe chttp2_simple_ssl_fullstack_with_proxy_cancel_after_invoke_test.exe chttp2_simple_ssl_fullstack_with_proxy_cancel_before_invoke_test.exe chttp2_simple_ssl_fullstack_with_proxy_cancel_in_a_vacuum_test.exe chttp2_simple_ssl_fullstack_with_proxy_census_simple_request_test.exe chttp2_simple_ssl_fullstack_with_proxy_default_host_test.exe chttp2_simple_ssl_fullstack_with_proxy_disappearing_server_test.exe chttp2_simple_ssl_fullstack_with_proxy_early_server_shutdown_finishes_inflight_calls_test.exe chttp2_simple_ssl_fullstack_with_proxy_early_server_shutdown_finishes_tags_test.exe chttp2_simple_ssl_fullstack_with_proxy_empty_batch_test.exe chttp2_simple_ssl_fullstack_with_proxy_graceful_server_shutdown_test.exe chttp2_simple_ssl_fullstack_with_proxy_invoke_large_request_test.exe chttp2_simple_ssl_fullstack_with_proxy_max_message_length_test.exe chttp2_simple_ssl_fullstack_with_proxy_no_op_test.exe chttp2_simple_ssl_fullstack_with_proxy_ping_pong_streaming_test.exe chttp2_simple_ssl_fullstack_with_proxy_registered_call_test.exe chttp2_simple_ssl_fullstack_with_proxy_request_response_with_binary_metadata_and_payload_test.exe chttp2_simple_ssl_fullstack_with_proxy_request_response_with_metadata_and_payload_test.exe chttp2_simple_ssl_fullstack_with_proxy_request_response_with_payload_test.exe chttp2_simple_ssl_fullstack_with_proxy_request_response_with_payload_and_call_creds_test.exe chttp2_simple_ssl_fullstack_with_proxy_request_response_with_trailing_metadata_and_payload_test.exe chttp2_simple_ssl_fullstack_with_proxy_request_with_large_metadata_test.exe chttp2_simple_ssl_fullstack_with_proxy_request_with_payload_test.exe chttp2_simple_ssl_fullstack_with_proxy_server_finishes_request_test.exe chttp2_simple_ssl_fullstack_with_proxy_simple_delayed_request_test.exe chttp2_simple_ssl_fullstack_with_proxy_simple_request_test.exe chttp2_simple_ssl_fullstack_with_proxy_simple_request_with_high_initial_sequence_number_test.exe chttp2_simple_ssl_with_oauth2_fullstack_bad_hostname_test.exe chttp2_simple_ssl_with_oauth2_fullstack_cancel_after_accept_test.exe chttp2_simple_ssl_with_oauth2_fullstack_cancel_after_accept_and_writes_closed_test.exe chttp2_simple_ssl_with_oauth2_fullstack_cancel_after_invoke_test.exe chttp2_simple_ssl_with_oauth2_fullstack_cancel_before_invoke_test.exe chttp2_simple_ssl_with_oauth2_fullstack_cancel_in_a_vacuum_test.exe chttp2_simple_ssl_with_oauth2_fullstack_census_simple_request_test.exe chttp2_simple_ssl_with_oauth2_fullstack_channel_connectivity_test.exe chttp2_simple_ssl_with_oauth2_fullstack_default_host_test.exe chttp2_simple_ssl_with_oauth2_fullstack_disappearing_server_test.exe chttp2_simple_ssl_with_oauth2_fullstack_early_server_shutdown_finishes_inflight_calls_test.exe chttp2_simple_ssl_with_oauth2_fullstack_early_server_shutdown_finishes_tags_test.exe chttp2_simple_ssl_with_oauth2_fullstack_empty_batch_test.exe chttp2_simple_ssl_with_oauth2_fullstack_graceful_server_shutdown_test.exe chttp2_simple_ssl_with_oauth2_fullstack_invoke_large_request_test.exe chttp2_simple_ssl_with_oauth2_fullstack_max_concurrent_streams_test.exe chttp2_simple_ssl_with_oauth2_fullstack_max_message_length_test.exe chttp2_simple_ssl_with_oauth2_fullstack_no_op_test.exe chttp2_simple_ssl_with_oauth2_fullstack_ping_pong_streaming_test.exe chttp2_simple_ssl_with_oauth2_fullstack_registered_call_test.exe chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_binary_metadata_and_payload_test.exe chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_metadata_and_payload_test.exe chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_payload_test.exe chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_payload_and_call_creds_test.exe chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_trailing_metadata_and_payload_test.exe chttp2_simple_ssl_with_oauth2_fullstack_request_with_compressed_payload_test.exe chttp2_simple_ssl_with_oauth2_fullstack_request_with_flags_test.exe chttp2_simple_ssl_with_oauth2_fullstack_request_with_large_metadata_test.exe chttp2_simple_ssl_with_oauth2_fullstack_request_with_payload_test.exe chttp2_simple_ssl_with_oauth2_fullstack_server_finishes_request_test.exe chttp2_simple_ssl_with_oauth2_fullstack_simple_delayed_request_test.exe chttp2_simple_ssl_with_oauth2_fullstack_simple_request_test.exe chttp2_simple_ssl_with_oauth2_fullstack_simple_request_with_high_initial_sequence_number_test.exe chttp2_socket_pair_bad_hostname_test.exe chttp2_socket_pair_cancel_after_accept_test.exe chttp2_socket_pair_cancel_after_accept_and_writes_closed_test.exe chttp2_socket_pair_cancel_after_invoke_test.exe chttp2_socket_pair_cancel_before_invoke_test.exe chttp2_socket_pair_cancel_in_a_vacuum_test.exe chttp2_socket_pair_census_simple_request_test.exe chttp2_socket_pair_early_server_shutdown_finishes_inflight_calls_test.exe chttp2_socket_pair_early_server_shutdown_finishes_tags_test.exe chttp2_socket_pair_empty_batch_test.exe chttp2_socket_pair_graceful_server_shutdown_test.exe chttp2_socket_pair_invoke_large_request_test.exe chttp2_socket_pair_max_concurrent_streams_test.exe chttp2_socket_pair_max_message_length_test.exe chttp2_socket_pair_no_op_test.exe chttp2_socket_pair_ping_pong_streaming_test.exe chttp2_socket_pair_registered_call_test.exe chttp2_socket_pair_request_response_with_binary_metadata_and_payload_test.exe chttp2_socket_pair_request_response_with_metadata_and_payload_test.exe chttp2_socket_pair_request_response_with_payload_test.exe chttp2_socket_pair_request_response_with_payload_and_call_creds_test.exe chttp2_socket_pair_request_response_with_trailing_metadata_and_payload_test.exe chttp2_socket_pair_request_with_compressed_payload_test.exe chttp2_socket_pair_request_with_flags_test.exe chttp2_socket_pair_request_with_large_metadata_test.exe chttp2_socket_pair_request_with_payload_test.exe chttp2_socket_pair_server_finishes_request_test.exe chttp2_socket_pair_simple_request_test.exe chttp2_socket_pair_simple_request_with_high_initial_sequence_number_test.exe chttp2_socket_pair_one_byte_at_a_time_bad_hostname_test.exe chttp2_socket_pair_one_byte_at_a_time_cancel_after_accept_test.exe chttp2_socket_pair_one_byte_at_a_time_cancel_after_accept_and_writes_closed_test.exe chttp2_socket_pair_one_byte_at_a_time_cancel_after_invoke_test.exe chttp2_socket_pair_one_byte_at_a_time_cancel_before_invoke_test.exe chttp2_socket_pair_one_byte_at_a_time_cancel_in_a_vacuum_test.exe chttp2_socket_pair_one_byte_at_a_time_census_simple_request_test.exe chttp2_socket_pair_one_byte_at_a_time_early_server_shutdown_finishes_inflight_calls_test.exe chttp2_socket_pair_one_byte_at_a_time_early_server_shutdown_finishes_tags_test.exe chttp2_socket_pair_one_byte_at_a_time_empty_batch_test.exe chttp2_socket_pair_one_byte_at_a_time_graceful_server_shutdown_test.exe chttp2_socket_pair_one_byte_at_a_time_invoke_large_request_test.exe chttp2_socket_pair_one_byte_at_a_time_max_concurrent_streams_test.exe chttp2_socket_pair_one_byte_at_a_time_max_message_length_test.exe chttp2_socket_pair_one_byte_at_a_time_no_op_test.exe chttp2_socket_pair_one_byte_at_a_time_ping_pong_streaming_test.exe chttp2_socket_pair_one_byte_at_a_time_registered_call_test.exe chttp2_socket_pair_one_byte_at_a_time_request_response_with_binary_metadata_and_payload_test.exe chttp2_socket_pair_one_byte_at_a_time_request_response_with_metadata_and_payload_test.exe chttp2_socket_pair_one_byte_at_a_time_request_response_with_payload_test.exe chttp2_socket_pair_one_byte_at_a_time_request_response_with_payload_and_call_creds_test.exe chttp2_socket_pair_one_byte_at_a_time_request_response_with_trailing_metadata_and_payload_test.exe chttp2_socket_pair_one_byte_at_a_time_request_with_compressed_payload_test.exe chttp2_socket_pair_one_byte_at_a_time_request_with_flags_test.exe chttp2_socket_pair_one_byte_at_a_time_request_with_large_metadata_test.exe chttp2_socket_pair_one_byte_at_a_time_request_with_payload_test.exe chttp2_socket_pair_one_byte_at_a_time_server_finishes_request_test.exe chttp2_socket_pair_one_byte_at_a_time_simple_request_test.exe chttp2_socket_pair_one_byte_at_a_time_simple_request_with_high_initial_sequence_number_test.exe chttp2_socket_pair_with_grpc_trace_bad_hostname_test.exe chttp2_socket_pair_with_grpc_trace_cancel_after_accept_test.exe chttp2_socket_pair_with_grpc_trace_cancel_after_accept_and_writes_closed_test.exe chttp2_socket_pair_with_grpc_trace_cancel_after_invoke_test.exe chttp2_socket_pair_with_grpc_trace_cancel_before_invoke_test.exe chttp2_socket_pair_with_grpc_trace_cancel_in_a_vacuum_test.exe chttp2_socket_pair_with_grpc_trace_census_simple_request_test.exe chttp2_socket_pair_with_grpc_trace_early_server_shutdown_finishes_inflight_calls_test.exe chttp2_socket_pair_with_grpc_trace_early_server_shutdown_finishes_tags_test.exe chttp2_socket_pair_with_grpc_trace_empty_batch_test.exe chttp2_socket_pair_with_grpc_trace_graceful_server_shutdown_test.exe chttp2_socket_pair_with_grpc_trace_invoke_large_request_test.exe chttp2_socket_pair_with_grpc_trace_max_concurrent_streams_test.exe chttp2_socket_pair_with_grpc_trace_max_message_length_test.exe chttp2_socket_pair_with_grpc_trace_no_op_test.exe chttp2_socket_pair_with_grpc_trace_ping_pong_streaming_test.exe chttp2_socket_pair_with_grpc_trace_registered_call_test.exe chttp2_socket_pair_with_grpc_trace_request_response_with_binary_metadata_and_payload_test.exe chttp2_socket_pair_with_grpc_trace_request_response_with_metadata_and_payload_test.exe chttp2_socket_pair_with_grpc_trace_request_response_with_payload_test.exe chttp2_socket_pair_with_grpc_trace_request_response_with_payload_and_call_creds_test.exe chttp2_socket_pair_with_grpc_trace_request_response_with_trailing_metadata_and_payload_test.exe chttp2_socket_pair_with_grpc_trace_request_with_compressed_payload_test.exe chttp2_socket_pair_with_grpc_trace_request_with_flags_test.exe chttp2_socket_pair_with_grpc_trace_request_with_large_metadata_test.exe chttp2_socket_pair_with_grpc_trace_request_with_payload_test.exe chttp2_socket_pair_with_grpc_trace_server_finishes_request_test.exe chttp2_socket_pair_with_grpc_trace_simple_request_test.exe chttp2_socket_pair_with_grpc_trace_simple_request_with_high_initial_sequence_number_test.exe chttp2_fullstack_bad_hostname_unsecure_test.exe chttp2_fullstack_cancel_after_accept_unsecure_test.exe chttp2_fullstack_cancel_after_accept_and_writes_closed_unsecure_test.exe chttp2_fullstack_cancel_after_invoke_unsecure_test.exe chttp2_fullstack_cancel_before_invoke_unsecure_test.exe chttp2_fullstack_cancel_in_a_vacuum_unsecure_test.exe chttp2_fullstack_census_simple_request_unsecure_test.exe chttp2_fullstack_channel_connectivity_unsecure_test.exe chttp2_fullstack_default_host_unsecure_test.exe chttp2_fullstack_disappearing_server_unsecure_test.exe chttp2_fullstack_early_server_shutdown_finishes_inflight_calls_unsecure_test.exe chttp2_fullstack_early_server_shutdown_finishes_tags_unsecure_test.exe chttp2_fullstack_empty_batch_unsecure_test.exe chttp2_fullstack_graceful_server_shutdown_unsecure_test.exe chttp2_fullstack_invoke_large_request_unsecure_test.exe chttp2_fullstack_max_concurrent_streams_unsecure_test.exe chttp2_fullstack_max_message_length_unsecure_test.exe chttp2_fullstack_no_op_unsecure_test.exe chttp2_fullstack_ping_pong_streaming_unsecure_test.exe chttp2_fullstack_registered_call_unsecure_test.exe chttp2_fullstack_request_response_with_binary_metadata_and_payload_unsecure_test.exe chttp2_fullstack_request_response_with_metadata_and_payload_unsecure_test.exe chttp2_fullstack_request_response_with_payload_unsecure_test.exe chttp2_fullstack_request_response_with_trailing_metadata_and_payload_unsecure_test.exe chttp2_fullstack_request_with_compressed_payload_unsecure_test.exe chttp2_fullstack_request_with_flags_unsecure_test.exe chttp2_fullstack_request_with_large_metadata_unsecure_test.exe chttp2_fullstack_request_with_payload_unsecure_test.exe chttp2_fullstack_server_finishes_request_unsecure_test.exe chttp2_fullstack_simple_delayed_request_unsecure_test.exe chttp2_fullstack_simple_request_unsecure_test.exe chttp2_fullstack_simple_request_with_high_initial_sequence_number_unsecure_test.exe chttp2_fullstack_compression_bad_hostname_unsecure_test.exe chttp2_fullstack_compression_cancel_after_accept_unsecure_test.exe chttp2_fullstack_compression_cancel_after_accept_and_writes_closed_unsecure_test.exe chttp2_fullstack_compression_cancel_after_invoke_unsecure_test.exe chttp2_fullstack_compression_cancel_before_invoke_unsecure_test.exe chttp2_fullstack_compression_cancel_in_a_vacuum_unsecure_test.exe chttp2_fullstack_compression_census_simple_request_unsecure_test.exe chttp2_fullstack_compression_channel_connectivity_unsecure_test.exe chttp2_fullstack_compression_default_host_unsecure_test.exe chttp2_fullstack_compression_disappearing_server_unsecure_test.exe chttp2_fullstack_compression_early_server_shutdown_finishes_inflight_calls_unsecure_test.exe chttp2_fullstack_compression_early_server_shutdown_finishes_tags_unsecure_test.exe chttp2_fullstack_compression_empty_batch_unsecure_test.exe chttp2_fullstack_compression_graceful_server_shutdown_unsecure_test.exe chttp2_fullstack_compression_invoke_large_request_unsecure_test.exe chttp2_fullstack_compression_max_concurrent_streams_unsecure_test.exe chttp2_fullstack_compression_max_message_length_unsecure_test.exe chttp2_fullstack_compression_no_op_unsecure_test.exe chttp2_fullstack_compression_ping_pong_streaming_unsecure_test.exe chttp2_fullstack_compression_registered_call_unsecure_test.exe chttp2_fullstack_compression_request_response_with_binary_metadata_and_payload_unsecure_test.exe chttp2_fullstack_compression_request_response_with_metadata_and_payload_unsecure_test.exe chttp2_fullstack_compression_request_response_with_payload_unsecure_test.exe chttp2_fullstack_compression_request_response_with_trailing_metadata_and_payload_unsecure_test.exe chttp2_fullstack_compression_request_with_compressed_payload_unsecure_test.exe chttp2_fullstack_compression_request_with_flags_unsecure_test.exe chttp2_fullstack_compression_request_with_large_metadata_unsecure_test.exe chttp2_fullstack_compression_request_with_payload_unsecure_test.exe chttp2_fullstack_compression_server_finishes_request_unsecure_test.exe chttp2_fullstack_compression_simple_delayed_request_unsecure_test.exe chttp2_fullstack_compression_simple_request_unsecure_test.exe chttp2_fullstack_compression_simple_request_with_high_initial_sequence_number_unsecure_test.exe chttp2_fullstack_with_proxy_bad_hostname_unsecure_test.exe chttp2_fullstack_with_proxy_cancel_after_accept_unsecure_test.exe chttp2_fullstack_with_proxy_cancel_after_accept_and_writes_closed_unsecure_test.exe chttp2_fullstack_with_proxy_cancel_after_invoke_unsecure_test.exe chttp2_fullstack_with_proxy_cancel_before_invoke_unsecure_test.exe chttp2_fullstack_with_proxy_cancel_in_a_vacuum_unsecure_test.exe chttp2_fullstack_with_proxy_census_simple_request_unsecure_test.exe chttp2_fullstack_with_proxy_default_host_unsecure_test.exe chttp2_fullstack_with_proxy_disappearing_server_unsecure_test.exe chttp2_fullstack_with_proxy_early_server_shutdown_finishes_inflight_calls_unsecure_test.exe chttp2_fullstack_with_proxy_early_server_shutdown_finishes_tags_unsecure_test.exe chttp2_fullstack_with_proxy_empty_batch_unsecure_test.exe chttp2_fullstack_with_proxy_graceful_server_shutdown_unsecure_test.exe chttp2_fullstack_with_proxy_invoke_large_request_unsecure_test.exe chttp2_fullstack_with_proxy_max_message_length_unsecure_test.exe chttp2_fullstack_with_proxy_no_op_unsecure_test.exe chttp2_fullstack_with_proxy_ping_pong_streaming_unsecure_test.exe chttp2_fullstack_with_proxy_registered_call_unsecure_test.exe chttp2_fullstack_with_proxy_request_response_with_binary_metadata_and_payload_unsecure_test.exe chttp2_fullstack_with_proxy_request_response_with_metadata_and_payload_unsecure_test.exe chttp2_fullstack_with_proxy_request_response_with_payload_unsecure_test.exe chttp2_fullstack_with_proxy_request_response_with_trailing_metadata_and_payload_unsecure_test.exe chttp2_fullstack_with_proxy_request_with_large_metadata_unsecure_test.exe chttp2_fullstack_with_proxy_request_with_payload_unsecure_test.exe chttp2_fullstack_with_proxy_server_finishes_request_unsecure_test.exe chttp2_fullstack_with_proxy_simple_delayed_request_unsecure_test.exe chttp2_fullstack_with_proxy_simple_request_unsecure_test.exe chttp2_fullstack_with_proxy_simple_request_with_high_initial_sequence_number_unsecure_test.exe chttp2_socket_pair_bad_hostname_unsecure_test.exe chttp2_socket_pair_cancel_after_accept_unsecure_test.exe chttp2_socket_pair_cancel_after_accept_and_writes_closed_unsecure_test.exe chttp2_socket_pair_cancel_after_invoke_unsecure_test.exe chttp2_socket_pair_cancel_before_invoke_unsecure_test.exe chttp2_socket_pair_cancel_in_a_vacuum_unsecure_test.exe chttp2_socket_pair_census_simple_request_unsecure_test.exe chttp2_socket_pair_early_server_shutdown_finishes_inflight_calls_unsecure_test.exe chttp2_socket_pair_early_server_shutdown_finishes_tags_unsecure_test.exe chttp2_socket_pair_empty_batch_unsecure_test.exe chttp2_socket_pair_graceful_server_shutdown_unsecure_test.exe chttp2_socket_pair_invoke_large_request_unsecure_test.exe chttp2_socket_pair_max_concurrent_streams_unsecure_test.exe chttp2_socket_pair_max_message_length_unsecure_test.exe chttp2_socket_pair_no_op_unsecure_test.exe chttp2_socket_pair_ping_pong_streaming_unsecure_test.exe chttp2_socket_pair_registered_call_unsecure_test.exe chttp2_socket_pair_request_response_with_binary_metadata_and_payload_unsecure_test.exe chttp2_socket_pair_request_response_with_metadata_and_payload_unsecure_test.exe chttp2_socket_pair_request_response_with_payload_unsecure_test.exe chttp2_socket_pair_request_response_with_trailing_metadata_and_payload_unsecure_test.exe chttp2_socket_pair_request_with_compressed_payload_unsecure_test.exe chttp2_socket_pair_request_with_flags_unsecure_test.exe chttp2_socket_pair_request_with_large_metadata_unsecure_test.exe chttp2_socket_pair_request_with_payload_unsecure_test.exe chttp2_socket_pair_server_finishes_request_unsecure_test.exe chttp2_socket_pair_simple_request_unsecure_test.exe chttp2_socket_pair_simple_request_with_high_initial_sequence_number_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_bad_hostname_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_cancel_after_accept_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_cancel_after_accept_and_writes_closed_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_cancel_after_invoke_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_cancel_before_invoke_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_cancel_in_a_vacuum_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_census_simple_request_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_early_server_shutdown_finishes_inflight_calls_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_early_server_shutdown_finishes_tags_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_empty_batch_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_graceful_server_shutdown_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_invoke_large_request_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_max_concurrent_streams_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_max_message_length_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_no_op_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_ping_pong_streaming_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_registered_call_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_request_response_with_binary_metadata_and_payload_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_request_response_with_metadata_and_payload_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_request_response_with_payload_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_request_response_with_trailing_metadata_and_payload_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_request_with_compressed_payload_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_request_with_flags_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_request_with_large_metadata_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_request_with_payload_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_server_finishes_request_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_simple_request_unsecure_test.exe chttp2_socket_pair_one_byte_at_a_time_simple_request_with_high_initial_sequence_number_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_bad_hostname_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_cancel_after_accept_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_cancel_after_accept_and_writes_closed_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_cancel_after_invoke_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_cancel_before_invoke_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_cancel_in_a_vacuum_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_census_simple_request_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_early_server_shutdown_finishes_inflight_calls_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_early_server_shutdown_finishes_tags_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_empty_batch_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_graceful_server_shutdown_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_invoke_large_request_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_max_concurrent_streams_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_max_message_length_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_no_op_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_ping_pong_streaming_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_registered_call_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_request_response_with_binary_metadata_and_payload_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_request_response_with_metadata_and_payload_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_request_response_with_payload_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_request_response_with_trailing_metadata_and_payload_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_request_with_compressed_payload_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_request_with_flags_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_request_with_large_metadata_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_request_with_payload_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_server_finishes_request_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_simple_request_unsecure_test.exe chttp2_socket_pair_with_grpc_trace_simple_request_with_high_initial_sequence_number_unsecure_test.exe connection_prefix_bad_client_test.exe initial_settings_frame_bad_client_test.exe 
 	echo All C tests built.
 
-buildtests_cxx: async_end2end_test.exe auth_property_iterator_test.exe channel_arguments_test.exe cli_call_test.exe client_crash_test_server.exe credentials_test.exe cxx_byte_buffer_test.exe cxx_slice_test.exe cxx_time_test.exe dynamic_thread_pool_test.exe end2end_test.exe fixed_size_thread_pool_test.exe generic_end2end_test.exe grpc_cli.exe mock_test.exe reconnect_interop_client.exe reconnect_interop_server.exe secure_auth_context_test.exe server_crash_test_client.exe shutdown_test.exe status_test.exe thread_stress_test.exe zookeeper_test.exe 
+buildtests_cxx: async_end2end_test.exe auth_property_iterator_test.exe channel_arguments_test.exe cli_call_test.exe client_crash_test_server.exe credentials_test.exe cxx_byte_buffer_test.exe cxx_slice_test.exe cxx_time_test.exe end2end_test.exe generic_end2end_test.exe grpc_cli.exe mock_test.exe reconnect_interop_client.exe reconnect_interop_server.exe secure_auth_context_test.exe server_crash_test_client.exe shutdown_test.exe status_test.exe thread_stress_test.exe zookeeper_test.exe 
 	echo All C++ tests built.
 
 
@@ -327,6 +327,14 @@
 	echo Running grpc_byte_buffer_reader_test
 	$(OUT_DIR)\grpc_byte_buffer_reader_test.exe
 
+grpc_channel_args_test.exe: build_grpc_test_util build_grpc build_gpr_test_util build_gpr $(OUT_DIR)
+	echo Building grpc_channel_args_test
+	$(CC) $(CFLAGS) /Fo:$(OUT_DIR)\ $(REPO_ROOT)\test\core\channel\channel_args_test.c 
+	$(LINK) $(LFLAGS) /OUT:"$(OUT_DIR)\grpc_channel_args_test.exe" Debug\grpc_test_util.lib Debug\grpc.lib Debug\gpr_test_util.lib Debug\gpr.lib $(LIBS) $(OUT_DIR)\channel_args_test.obj 
+grpc_channel_args_test: grpc_channel_args_test.exe
+	echo Running grpc_channel_args_test
+	$(OUT_DIR)\grpc_channel_args_test.exe
+
 grpc_channel_stack_test.exe: build_grpc_test_util build_grpc build_gpr_test_util build_gpr $(OUT_DIR)
 	echo Building grpc_channel_stack_test
 	$(CC) $(CFLAGS) /Fo:$(OUT_DIR)\ $(REPO_ROOT)\test\core\channel\channel_stack_test.c 
@@ -671,14 +679,6 @@
 	echo Running cxx_time_test
 	$(OUT_DIR)\cxx_time_test.exe
 
-dynamic_thread_pool_test.exe: build_grpc_test_util build_grpc++ build_grpc build_gpr_test_util build_gpr $(OUT_DIR)
-	echo Building dynamic_thread_pool_test
-    $(CC) $(CXXFLAGS) /Fo:$(OUT_DIR)\ $(REPO_ROOT)\test\cpp\server\dynamic_thread_pool_test.cc 
-	$(LINK) $(LFLAGS) /OUT:"$(OUT_DIR)\dynamic_thread_pool_test.exe" Debug\grpc_test_util.lib Debug\grpc++.lib Debug\grpc.lib Debug\gpr_test_util.lib Debug\gpr.lib $(CXX_LIBS) $(LIBS) $(OUT_DIR)\dynamic_thread_pool_test.obj 
-dynamic_thread_pool_test: dynamic_thread_pool_test.exe
-	echo Running dynamic_thread_pool_test
-	$(OUT_DIR)\dynamic_thread_pool_test.exe
-
 end2end_test.exe: Debug\grpc++_test_util.lib build_grpc_test_util build_grpc++ build_grpc build_gpr_test_util build_gpr $(OUT_DIR)
 	echo Building end2end_test
     $(CC) $(CXXFLAGS) /Fo:$(OUT_DIR)\ $(REPO_ROOT)\test\cpp\end2end\end2end_test.cc 
@@ -687,14 +687,6 @@
 	echo Running end2end_test
 	$(OUT_DIR)\end2end_test.exe
 
-fixed_size_thread_pool_test.exe: build_grpc_test_util build_grpc++ build_grpc build_gpr_test_util build_gpr $(OUT_DIR)
-	echo Building fixed_size_thread_pool_test
-    $(CC) $(CXXFLAGS) /Fo:$(OUT_DIR)\ $(REPO_ROOT)\test\cpp\server\fixed_size_thread_pool_test.cc 
-	$(LINK) $(LFLAGS) /OUT:"$(OUT_DIR)\fixed_size_thread_pool_test.exe" Debug\grpc_test_util.lib Debug\grpc++.lib Debug\grpc.lib Debug\gpr_test_util.lib Debug\gpr.lib $(CXX_LIBS) $(LIBS) $(OUT_DIR)\fixed_size_thread_pool_test.obj 
-fixed_size_thread_pool_test: fixed_size_thread_pool_test.exe
-	echo Running fixed_size_thread_pool_test
-	$(OUT_DIR)\fixed_size_thread_pool_test.exe
-
 generic_end2end_test.exe: Debug\grpc++_test_util.lib build_grpc_test_util build_grpc++ build_grpc build_gpr_test_util build_gpr $(OUT_DIR)
 	echo Building generic_end2end_test
     $(CC) $(CXXFLAGS) /Fo:$(OUT_DIR)\ $(REPO_ROOT)\test\cpp\end2end\generic_end2end_test.cc 
@@ -767,10 +759,10 @@
 	echo Running server_crash_test_client
 	$(OUT_DIR)\server_crash_test_client.exe
 
-shutdown_test.exe: Debug\grpc++_test_util.lib build_grpc_test_util build_grpc++ Debug\grpc_zookeeper.lib build_grpc build_gpr_test_util build_gpr $(OUT_DIR)
+shutdown_test.exe: Debug\grpc++_test_util.lib build_grpc_test_util build_grpc++ build_grpc build_gpr_test_util build_gpr $(OUT_DIR)
 	echo Building shutdown_test
     $(CC) $(CXXFLAGS) /Fo:$(OUT_DIR)\ $(REPO_ROOT)\test\cpp\end2end\shutdown_test.cc 
-	$(LINK) $(LFLAGS) /OUT:"$(OUT_DIR)\shutdown_test.exe" Debug\grpc++_test_util.lib Debug\grpc_test_util.lib Debug\grpc++.lib Debug\grpc_zookeeper.lib Debug\grpc.lib Debug\gpr_test_util.lib Debug\gpr.lib $(CXX_LIBS) $(LIBS) $(OUT_DIR)\shutdown_test.obj 
+	$(LINK) $(LFLAGS) /OUT:"$(OUT_DIR)\shutdown_test.exe" Debug\grpc++_test_util.lib Debug\grpc_test_util.lib Debug\grpc++.lib Debug\grpc.lib Debug\gpr_test_util.lib Debug\gpr.lib $(CXX_LIBS) $(LIBS) $(OUT_DIR)\shutdown_test.obj 
 shutdown_test: shutdown_test.exe
 	echo Running shutdown_test
 	$(OUT_DIR)\shutdown_test.exe
diff --git a/vsprojects/grpc++/grpc++.vcxproj b/vsprojects/grpc++/grpc++.vcxproj
index 929bc15..e178bbc 100644
--- a/vsprojects/grpc++/grpc++.vcxproj
+++ b/vsprojects/grpc++/grpc++.vcxproj
@@ -213,25 +213,16 @@
     </Link>
   </ItemDefinitionGroup>
   <ItemGroup>
-    <ClInclude Include="..\..\include\grpc++\async_generic_service.h" />
-    <ClInclude Include="..\..\include\grpc++\async_unary_call.h" />
-    <ClInclude Include="..\..\include\grpc++\auth_context.h" />
-    <ClInclude Include="..\..\include\grpc++\byte_buffer.h" />
-    <ClInclude Include="..\..\include\grpc++\channel_arguments.h" />
-    <ClInclude Include="..\..\include\grpc++\channel_interface.h" />
+    <ClInclude Include="..\..\include\grpc++\channel.h" />
     <ClInclude Include="..\..\include\grpc++\client_context.h" />
     <ClInclude Include="..\..\include\grpc++\completion_queue.h" />
-    <ClInclude Include="..\..\include\grpc++\config.h" />
-    <ClInclude Include="..\..\include\grpc++\config_protobuf.h" />
     <ClInclude Include="..\..\include\grpc++\create_channel.h" />
     <ClInclude Include="..\..\include\grpc++\credentials.h" />
-    <ClInclude Include="..\..\include\grpc++\dynamic_thread_pool.h" />
-    <ClInclude Include="..\..\include\grpc++\fixed_size_thread_pool.h" />
-    <ClInclude Include="..\..\include\grpc++\generic_stub.h" />
+    <ClInclude Include="..\..\include\grpc++\generic\async_generic_service.h" />
+    <ClInclude Include="..\..\include\grpc++\generic\generic_stub.h" />
     <ClInclude Include="..\..\include\grpc++\impl\call.h" />
     <ClInclude Include="..\..\include\grpc++\impl\client_unary_call.h" />
     <ClInclude Include="..\..\include\grpc++\impl\grpc_library.h" />
-    <ClInclude Include="..\..\include\grpc++\impl\internal_stub.h" />
     <ClInclude Include="..\..\include\grpc++\impl\proto_utils.h" />
     <ClInclude Include="..\..\include\grpc++\impl\rpc_method.h" />
     <ClInclude Include="..\..\include\grpc++\impl\rpc_service_method.h" />
@@ -247,20 +238,29 @@
     <ClInclude Include="..\..\include\grpc++\server_builder.h" />
     <ClInclude Include="..\..\include\grpc++\server_context.h" />
     <ClInclude Include="..\..\include\grpc++\server_credentials.h" />
-    <ClInclude Include="..\..\include\grpc++\slice.h" />
-    <ClInclude Include="..\..\include\grpc++\status.h" />
-    <ClInclude Include="..\..\include\grpc++\status_code_enum.h" />
-    <ClInclude Include="..\..\include\grpc++\stream.h" />
-    <ClInclude Include="..\..\include\grpc++\stub_options.h" />
-    <ClInclude Include="..\..\include\grpc++\thread_pool_interface.h" />
-    <ClInclude Include="..\..\include\grpc++\time.h" />
+    <ClInclude Include="..\..\include\grpc++\support\async_stream.h" />
+    <ClInclude Include="..\..\include\grpc++\support\async_unary_call.h" />
+    <ClInclude Include="..\..\include\grpc++\support\auth_context.h" />
+    <ClInclude Include="..\..\include\grpc++\support\byte_buffer.h" />
+    <ClInclude Include="..\..\include\grpc++\support\channel_arguments.h" />
+    <ClInclude Include="..\..\include\grpc++\support\config.h" />
+    <ClInclude Include="..\..\include\grpc++\support\config_protobuf.h" />
+    <ClInclude Include="..\..\include\grpc++\support\slice.h" />
+    <ClInclude Include="..\..\include\grpc++\support\status.h" />
+    <ClInclude Include="..\..\include\grpc++\support\status_code_enum.h" />
+    <ClInclude Include="..\..\include\grpc++\support\stub_options.h" />
+    <ClInclude Include="..\..\include\grpc++\support\sync_stream.h" />
+    <ClInclude Include="..\..\include\grpc++\support\time.h" />
   </ItemGroup>
   <ItemGroup>
     <ClInclude Include="..\..\src\cpp\client\secure_credentials.h" />
     <ClInclude Include="..\..\src\cpp\common\secure_auth_context.h" />
     <ClInclude Include="..\..\src\cpp\server\secure_server_credentials.h" />
-    <ClInclude Include="..\..\src\cpp\client\channel.h" />
+    <ClInclude Include="..\..\src\cpp\client\create_channel_internal.h" />
     <ClInclude Include="..\..\src\cpp\common\create_auth_context.h" />
+    <ClInclude Include="..\..\src\cpp\server\dynamic_thread_pool.h" />
+    <ClInclude Include="..\..\src\cpp\server\fixed_size_thread_pool.h" />
+    <ClInclude Include="..\..\src\cpp\server\thread_pool_interface.h" />
   </ItemGroup>
   <ItemGroup>
     <ClCompile Include="..\..\src\cpp\client\secure_channel_arguments.cc">
@@ -283,14 +283,14 @@
     </ClCompile>
     <ClCompile Include="..\..\src\cpp\client\create_channel.cc">
     </ClCompile>
+    <ClCompile Include="..\..\src\cpp\client\create_channel_internal.cc">
+    </ClCompile>
     <ClCompile Include="..\..\src\cpp\client\credentials.cc">
     </ClCompile>
     <ClCompile Include="..\..\src\cpp\client\generic_stub.cc">
     </ClCompile>
     <ClCompile Include="..\..\src\cpp\client\insecure_credentials.cc">
     </ClCompile>
-    <ClCompile Include="..\..\src\cpp\client\internal_stub.cc">
-    </ClCompile>
     <ClCompile Include="..\..\src\cpp\common\call.cc">
     </ClCompile>
     <ClCompile Include="..\..\src\cpp\common\completion_queue.cc">
diff --git a/vsprojects/grpc++/grpc++.vcxproj.filters b/vsprojects/grpc++/grpc++.vcxproj.filters
index 0408fb4..8b9d68a 100644
--- a/vsprojects/grpc++/grpc++.vcxproj.filters
+++ b/vsprojects/grpc++/grpc++.vcxproj.filters
@@ -31,6 +31,9 @@
     <ClCompile Include="..\..\src\cpp\client\create_channel.cc">
       <Filter>src\cpp\client</Filter>
     </ClCompile>
+    <ClCompile Include="..\..\src\cpp\client\create_channel_internal.cc">
+      <Filter>src\cpp\client</Filter>
+    </ClCompile>
     <ClCompile Include="..\..\src\cpp\client\credentials.cc">
       <Filter>src\cpp\client</Filter>
     </ClCompile>
@@ -40,9 +43,6 @@
     <ClCompile Include="..\..\src\cpp\client\insecure_credentials.cc">
       <Filter>src\cpp\client</Filter>
     </ClCompile>
-    <ClCompile Include="..\..\src\cpp\client\internal_stub.cc">
-      <Filter>src\cpp\client</Filter>
-    </ClCompile>
     <ClCompile Include="..\..\src\cpp\common\call.cc">
       <Filter>src\cpp\common</Filter>
     </ClCompile>
@@ -96,22 +96,7 @@
     </ClCompile>
   </ItemGroup>
   <ItemGroup>
-    <ClInclude Include="..\..\include\grpc++\async_generic_service.h">
-      <Filter>include\grpc++</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\include\grpc++\async_unary_call.h">
-      <Filter>include\grpc++</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\include\grpc++\auth_context.h">
-      <Filter>include\grpc++</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\include\grpc++\byte_buffer.h">
-      <Filter>include\grpc++</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\include\grpc++\channel_arguments.h">
-      <Filter>include\grpc++</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\include\grpc++\channel_interface.h">
+    <ClInclude Include="..\..\include\grpc++\channel.h">
       <Filter>include\grpc++</Filter>
     </ClInclude>
     <ClInclude Include="..\..\include\grpc++\client_context.h">
@@ -120,26 +105,17 @@
     <ClInclude Include="..\..\include\grpc++\completion_queue.h">
       <Filter>include\grpc++</Filter>
     </ClInclude>
-    <ClInclude Include="..\..\include\grpc++\config.h">
-      <Filter>include\grpc++</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\include\grpc++\config_protobuf.h">
-      <Filter>include\grpc++</Filter>
-    </ClInclude>
     <ClInclude Include="..\..\include\grpc++\create_channel.h">
       <Filter>include\grpc++</Filter>
     </ClInclude>
     <ClInclude Include="..\..\include\grpc++\credentials.h">
       <Filter>include\grpc++</Filter>
     </ClInclude>
-    <ClInclude Include="..\..\include\grpc++\dynamic_thread_pool.h">
-      <Filter>include\grpc++</Filter>
+    <ClInclude Include="..\..\include\grpc++\generic\async_generic_service.h">
+      <Filter>include\grpc++\generic</Filter>
     </ClInclude>
-    <ClInclude Include="..\..\include\grpc++\fixed_size_thread_pool.h">
-      <Filter>include\grpc++</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\include\grpc++\generic_stub.h">
-      <Filter>include\grpc++</Filter>
+    <ClInclude Include="..\..\include\grpc++\generic\generic_stub.h">
+      <Filter>include\grpc++\generic</Filter>
     </ClInclude>
     <ClInclude Include="..\..\include\grpc++\impl\call.h">
       <Filter>include\grpc++\impl</Filter>
@@ -150,9 +126,6 @@
     <ClInclude Include="..\..\include\grpc++\impl\grpc_library.h">
       <Filter>include\grpc++\impl</Filter>
     </ClInclude>
-    <ClInclude Include="..\..\include\grpc++\impl\internal_stub.h">
-      <Filter>include\grpc++\impl</Filter>
-    </ClInclude>
     <ClInclude Include="..\..\include\grpc++\impl\proto_utils.h">
       <Filter>include\grpc++\impl</Filter>
     </ClInclude>
@@ -198,26 +171,44 @@
     <ClInclude Include="..\..\include\grpc++\server_credentials.h">
       <Filter>include\grpc++</Filter>
     </ClInclude>
-    <ClInclude Include="..\..\include\grpc++\slice.h">
-      <Filter>include\grpc++</Filter>
+    <ClInclude Include="..\..\include\grpc++\support\async_stream.h">
+      <Filter>include\grpc++\support</Filter>
     </ClInclude>
-    <ClInclude Include="..\..\include\grpc++\status.h">
-      <Filter>include\grpc++</Filter>
+    <ClInclude Include="..\..\include\grpc++\support\async_unary_call.h">
+      <Filter>include\grpc++\support</Filter>
     </ClInclude>
-    <ClInclude Include="..\..\include\grpc++\status_code_enum.h">
-      <Filter>include\grpc++</Filter>
+    <ClInclude Include="..\..\include\grpc++\support\auth_context.h">
+      <Filter>include\grpc++\support</Filter>
     </ClInclude>
-    <ClInclude Include="..\..\include\grpc++\stream.h">
-      <Filter>include\grpc++</Filter>
+    <ClInclude Include="..\..\include\grpc++\support\byte_buffer.h">
+      <Filter>include\grpc++\support</Filter>
     </ClInclude>
-    <ClInclude Include="..\..\include\grpc++\stub_options.h">
-      <Filter>include\grpc++</Filter>
+    <ClInclude Include="..\..\include\grpc++\support\channel_arguments.h">
+      <Filter>include\grpc++\support</Filter>
     </ClInclude>
-    <ClInclude Include="..\..\include\grpc++\thread_pool_interface.h">
-      <Filter>include\grpc++</Filter>
+    <ClInclude Include="..\..\include\grpc++\support\config.h">
+      <Filter>include\grpc++\support</Filter>
     </ClInclude>
-    <ClInclude Include="..\..\include\grpc++\time.h">
-      <Filter>include\grpc++</Filter>
+    <ClInclude Include="..\..\include\grpc++\support\config_protobuf.h">
+      <Filter>include\grpc++\support</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\include\grpc++\support\slice.h">
+      <Filter>include\grpc++\support</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\include\grpc++\support\status.h">
+      <Filter>include\grpc++\support</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\include\grpc++\support\status_code_enum.h">
+      <Filter>include\grpc++\support</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\include\grpc++\support\stub_options.h">
+      <Filter>include\grpc++\support</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\include\grpc++\support\sync_stream.h">
+      <Filter>include\grpc++\support</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\include\grpc++\support\time.h">
+      <Filter>include\grpc++\support</Filter>
     </ClInclude>
   </ItemGroup>
   <ItemGroup>
@@ -230,12 +221,21 @@
     <ClInclude Include="..\..\src\cpp\server\secure_server_credentials.h">
       <Filter>src\cpp\server</Filter>
     </ClInclude>
-    <ClInclude Include="..\..\src\cpp\client\channel.h">
+    <ClInclude Include="..\..\src\cpp\client\create_channel_internal.h">
       <Filter>src\cpp\client</Filter>
     </ClInclude>
     <ClInclude Include="..\..\src\cpp\common\create_auth_context.h">
       <Filter>src\cpp\common</Filter>
     </ClInclude>
+    <ClInclude Include="..\..\src\cpp\server\dynamic_thread_pool.h">
+      <Filter>src\cpp\server</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\src\cpp\server\fixed_size_thread_pool.h">
+      <Filter>src\cpp\server</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\src\cpp\server\thread_pool_interface.h">
+      <Filter>src\cpp\server</Filter>
+    </ClInclude>
   </ItemGroup>
 
   <ItemGroup>
@@ -245,9 +245,15 @@
     <Filter Include="include\grpc++">
       <UniqueIdentifier>{784a0281-f547-aeb0-9f55-b26b7de9c769}</UniqueIdentifier>
     </Filter>
+    <Filter Include="include\grpc++\generic">
+      <UniqueIdentifier>{51dae921-3aa2-1976-2ee4-c5615de1af54}</UniqueIdentifier>
+    </Filter>
     <Filter Include="include\grpc++\impl">
       <UniqueIdentifier>{0da8cd95-314f-da1b-5ce7-7791a5be1f1a}</UniqueIdentifier>
     </Filter>
+    <Filter Include="include\grpc++\support">
+      <UniqueIdentifier>{a5c10dae-f715-2a30-1066-d22f8bc94cb2}</UniqueIdentifier>
+    </Filter>
     <Filter Include="src">
       <UniqueIdentifier>{328ff211-2886-406e-56f9-18ba1686f363}</UniqueIdentifier>
     </Filter>
diff --git a/vsprojects/grpc++_unsecure/grpc++_unsecure.vcxproj b/vsprojects/grpc++_unsecure/grpc++_unsecure.vcxproj
index 2ff252e..2f0b18d 100644
--- a/vsprojects/grpc++_unsecure/grpc++_unsecure.vcxproj
+++ b/vsprojects/grpc++_unsecure/grpc++_unsecure.vcxproj
@@ -213,25 +213,16 @@
     </Link>
   </ItemDefinitionGroup>
   <ItemGroup>
-    <ClInclude Include="..\..\include\grpc++\async_generic_service.h" />
-    <ClInclude Include="..\..\include\grpc++\async_unary_call.h" />
-    <ClInclude Include="..\..\include\grpc++\auth_context.h" />
-    <ClInclude Include="..\..\include\grpc++\byte_buffer.h" />
-    <ClInclude Include="..\..\include\grpc++\channel_arguments.h" />
-    <ClInclude Include="..\..\include\grpc++\channel_interface.h" />
+    <ClInclude Include="..\..\include\grpc++\channel.h" />
     <ClInclude Include="..\..\include\grpc++\client_context.h" />
     <ClInclude Include="..\..\include\grpc++\completion_queue.h" />
-    <ClInclude Include="..\..\include\grpc++\config.h" />
-    <ClInclude Include="..\..\include\grpc++\config_protobuf.h" />
     <ClInclude Include="..\..\include\grpc++\create_channel.h" />
     <ClInclude Include="..\..\include\grpc++\credentials.h" />
-    <ClInclude Include="..\..\include\grpc++\dynamic_thread_pool.h" />
-    <ClInclude Include="..\..\include\grpc++\fixed_size_thread_pool.h" />
-    <ClInclude Include="..\..\include\grpc++\generic_stub.h" />
+    <ClInclude Include="..\..\include\grpc++\generic\async_generic_service.h" />
+    <ClInclude Include="..\..\include\grpc++\generic\generic_stub.h" />
     <ClInclude Include="..\..\include\grpc++\impl\call.h" />
     <ClInclude Include="..\..\include\grpc++\impl\client_unary_call.h" />
     <ClInclude Include="..\..\include\grpc++\impl\grpc_library.h" />
-    <ClInclude Include="..\..\include\grpc++\impl\internal_stub.h" />
     <ClInclude Include="..\..\include\grpc++\impl\proto_utils.h" />
     <ClInclude Include="..\..\include\grpc++\impl\rpc_method.h" />
     <ClInclude Include="..\..\include\grpc++\impl\rpc_service_method.h" />
@@ -247,17 +238,26 @@
     <ClInclude Include="..\..\include\grpc++\server_builder.h" />
     <ClInclude Include="..\..\include\grpc++\server_context.h" />
     <ClInclude Include="..\..\include\grpc++\server_credentials.h" />
-    <ClInclude Include="..\..\include\grpc++\slice.h" />
-    <ClInclude Include="..\..\include\grpc++\status.h" />
-    <ClInclude Include="..\..\include\grpc++\status_code_enum.h" />
-    <ClInclude Include="..\..\include\grpc++\stream.h" />
-    <ClInclude Include="..\..\include\grpc++\stub_options.h" />
-    <ClInclude Include="..\..\include\grpc++\thread_pool_interface.h" />
-    <ClInclude Include="..\..\include\grpc++\time.h" />
+    <ClInclude Include="..\..\include\grpc++\support\async_stream.h" />
+    <ClInclude Include="..\..\include\grpc++\support\async_unary_call.h" />
+    <ClInclude Include="..\..\include\grpc++\support\auth_context.h" />
+    <ClInclude Include="..\..\include\grpc++\support\byte_buffer.h" />
+    <ClInclude Include="..\..\include\grpc++\support\channel_arguments.h" />
+    <ClInclude Include="..\..\include\grpc++\support\config.h" />
+    <ClInclude Include="..\..\include\grpc++\support\config_protobuf.h" />
+    <ClInclude Include="..\..\include\grpc++\support\slice.h" />
+    <ClInclude Include="..\..\include\grpc++\support\status.h" />
+    <ClInclude Include="..\..\include\grpc++\support\status_code_enum.h" />
+    <ClInclude Include="..\..\include\grpc++\support\stub_options.h" />
+    <ClInclude Include="..\..\include\grpc++\support\sync_stream.h" />
+    <ClInclude Include="..\..\include\grpc++\support\time.h" />
   </ItemGroup>
   <ItemGroup>
-    <ClInclude Include="..\..\src\cpp\client\channel.h" />
+    <ClInclude Include="..\..\src\cpp\client\create_channel_internal.h" />
     <ClInclude Include="..\..\src\cpp\common\create_auth_context.h" />
+    <ClInclude Include="..\..\src\cpp\server\dynamic_thread_pool.h" />
+    <ClInclude Include="..\..\src\cpp\server\fixed_size_thread_pool.h" />
+    <ClInclude Include="..\..\src\cpp\server\thread_pool_interface.h" />
   </ItemGroup>
   <ItemGroup>
     <ClCompile Include="..\..\src\cpp\common\insecure_create_auth_context.cc">
@@ -270,14 +270,14 @@
     </ClCompile>
     <ClCompile Include="..\..\src\cpp\client\create_channel.cc">
     </ClCompile>
+    <ClCompile Include="..\..\src\cpp\client\create_channel_internal.cc">
+    </ClCompile>
     <ClCompile Include="..\..\src\cpp\client\credentials.cc">
     </ClCompile>
     <ClCompile Include="..\..\src\cpp\client\generic_stub.cc">
     </ClCompile>
     <ClCompile Include="..\..\src\cpp\client\insecure_credentials.cc">
     </ClCompile>
-    <ClCompile Include="..\..\src\cpp\client\internal_stub.cc">
-    </ClCompile>
     <ClCompile Include="..\..\src\cpp\common\call.cc">
     </ClCompile>
     <ClCompile Include="..\..\src\cpp\common\completion_queue.cc">
diff --git a/vsprojects/grpc++_unsecure/grpc++_unsecure.vcxproj.filters b/vsprojects/grpc++_unsecure/grpc++_unsecure.vcxproj.filters
index b4fae77..911ca32 100644
--- a/vsprojects/grpc++_unsecure/grpc++_unsecure.vcxproj.filters
+++ b/vsprojects/grpc++_unsecure/grpc++_unsecure.vcxproj.filters
@@ -16,6 +16,9 @@
     <ClCompile Include="..\..\src\cpp\client\create_channel.cc">
       <Filter>src\cpp\client</Filter>
     </ClCompile>
+    <ClCompile Include="..\..\src\cpp\client\create_channel_internal.cc">
+      <Filter>src\cpp\client</Filter>
+    </ClCompile>
     <ClCompile Include="..\..\src\cpp\client\credentials.cc">
       <Filter>src\cpp\client</Filter>
     </ClCompile>
@@ -25,9 +28,6 @@
     <ClCompile Include="..\..\src\cpp\client\insecure_credentials.cc">
       <Filter>src\cpp\client</Filter>
     </ClCompile>
-    <ClCompile Include="..\..\src\cpp\client\internal_stub.cc">
-      <Filter>src\cpp\client</Filter>
-    </ClCompile>
     <ClCompile Include="..\..\src\cpp\common\call.cc">
       <Filter>src\cpp\common</Filter>
     </ClCompile>
@@ -81,22 +81,7 @@
     </ClCompile>
   </ItemGroup>
   <ItemGroup>
-    <ClInclude Include="..\..\include\grpc++\async_generic_service.h">
-      <Filter>include\grpc++</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\include\grpc++\async_unary_call.h">
-      <Filter>include\grpc++</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\include\grpc++\auth_context.h">
-      <Filter>include\grpc++</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\include\grpc++\byte_buffer.h">
-      <Filter>include\grpc++</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\include\grpc++\channel_arguments.h">
-      <Filter>include\grpc++</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\include\grpc++\channel_interface.h">
+    <ClInclude Include="..\..\include\grpc++\channel.h">
       <Filter>include\grpc++</Filter>
     </ClInclude>
     <ClInclude Include="..\..\include\grpc++\client_context.h">
@@ -105,26 +90,17 @@
     <ClInclude Include="..\..\include\grpc++\completion_queue.h">
       <Filter>include\grpc++</Filter>
     </ClInclude>
-    <ClInclude Include="..\..\include\grpc++\config.h">
-      <Filter>include\grpc++</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\include\grpc++\config_protobuf.h">
-      <Filter>include\grpc++</Filter>
-    </ClInclude>
     <ClInclude Include="..\..\include\grpc++\create_channel.h">
       <Filter>include\grpc++</Filter>
     </ClInclude>
     <ClInclude Include="..\..\include\grpc++\credentials.h">
       <Filter>include\grpc++</Filter>
     </ClInclude>
-    <ClInclude Include="..\..\include\grpc++\dynamic_thread_pool.h">
-      <Filter>include\grpc++</Filter>
+    <ClInclude Include="..\..\include\grpc++\generic\async_generic_service.h">
+      <Filter>include\grpc++\generic</Filter>
     </ClInclude>
-    <ClInclude Include="..\..\include\grpc++\fixed_size_thread_pool.h">
-      <Filter>include\grpc++</Filter>
-    </ClInclude>
-    <ClInclude Include="..\..\include\grpc++\generic_stub.h">
-      <Filter>include\grpc++</Filter>
+    <ClInclude Include="..\..\include\grpc++\generic\generic_stub.h">
+      <Filter>include\grpc++\generic</Filter>
     </ClInclude>
     <ClInclude Include="..\..\include\grpc++\impl\call.h">
       <Filter>include\grpc++\impl</Filter>
@@ -135,9 +111,6 @@
     <ClInclude Include="..\..\include\grpc++\impl\grpc_library.h">
       <Filter>include\grpc++\impl</Filter>
     </ClInclude>
-    <ClInclude Include="..\..\include\grpc++\impl\internal_stub.h">
-      <Filter>include\grpc++\impl</Filter>
-    </ClInclude>
     <ClInclude Include="..\..\include\grpc++\impl\proto_utils.h">
       <Filter>include\grpc++\impl</Filter>
     </ClInclude>
@@ -183,35 +156,62 @@
     <ClInclude Include="..\..\include\grpc++\server_credentials.h">
       <Filter>include\grpc++</Filter>
     </ClInclude>
-    <ClInclude Include="..\..\include\grpc++\slice.h">
-      <Filter>include\grpc++</Filter>
+    <ClInclude Include="..\..\include\grpc++\support\async_stream.h">
+      <Filter>include\grpc++\support</Filter>
     </ClInclude>
-    <ClInclude Include="..\..\include\grpc++\status.h">
-      <Filter>include\grpc++</Filter>
+    <ClInclude Include="..\..\include\grpc++\support\async_unary_call.h">
+      <Filter>include\grpc++\support</Filter>
     </ClInclude>
-    <ClInclude Include="..\..\include\grpc++\status_code_enum.h">
-      <Filter>include\grpc++</Filter>
+    <ClInclude Include="..\..\include\grpc++\support\auth_context.h">
+      <Filter>include\grpc++\support</Filter>
     </ClInclude>
-    <ClInclude Include="..\..\include\grpc++\stream.h">
-      <Filter>include\grpc++</Filter>
+    <ClInclude Include="..\..\include\grpc++\support\byte_buffer.h">
+      <Filter>include\grpc++\support</Filter>
     </ClInclude>
-    <ClInclude Include="..\..\include\grpc++\stub_options.h">
-      <Filter>include\grpc++</Filter>
+    <ClInclude Include="..\..\include\grpc++\support\channel_arguments.h">
+      <Filter>include\grpc++\support</Filter>
     </ClInclude>
-    <ClInclude Include="..\..\include\grpc++\thread_pool_interface.h">
-      <Filter>include\grpc++</Filter>
+    <ClInclude Include="..\..\include\grpc++\support\config.h">
+      <Filter>include\grpc++\support</Filter>
     </ClInclude>
-    <ClInclude Include="..\..\include\grpc++\time.h">
-      <Filter>include\grpc++</Filter>
+    <ClInclude Include="..\..\include\grpc++\support\config_protobuf.h">
+      <Filter>include\grpc++\support</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\include\grpc++\support\slice.h">
+      <Filter>include\grpc++\support</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\include\grpc++\support\status.h">
+      <Filter>include\grpc++\support</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\include\grpc++\support\status_code_enum.h">
+      <Filter>include\grpc++\support</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\include\grpc++\support\stub_options.h">
+      <Filter>include\grpc++\support</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\include\grpc++\support\sync_stream.h">
+      <Filter>include\grpc++\support</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\include\grpc++\support\time.h">
+      <Filter>include\grpc++\support</Filter>
     </ClInclude>
   </ItemGroup>
   <ItemGroup>
-    <ClInclude Include="..\..\src\cpp\client\channel.h">
+    <ClInclude Include="..\..\src\cpp\client\create_channel_internal.h">
       <Filter>src\cpp\client</Filter>
     </ClInclude>
     <ClInclude Include="..\..\src\cpp\common\create_auth_context.h">
       <Filter>src\cpp\common</Filter>
     </ClInclude>
+    <ClInclude Include="..\..\src\cpp\server\dynamic_thread_pool.h">
+      <Filter>src\cpp\server</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\src\cpp\server\fixed_size_thread_pool.h">
+      <Filter>src\cpp\server</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\src\cpp\server\thread_pool_interface.h">
+      <Filter>src\cpp\server</Filter>
+    </ClInclude>
   </ItemGroup>
 
   <ItemGroup>
@@ -221,9 +221,15 @@
     <Filter Include="include\grpc++">
       <UniqueIdentifier>{eceb50c0-bb49-3812-b6bd-b0af6df81da7}</UniqueIdentifier>
     </Filter>
+    <Filter Include="include\grpc++\generic">
+      <UniqueIdentifier>{83717d3c-57d9-2bfa-ed9c-2b08f86da12b}</UniqueIdentifier>
+    </Filter>
     <Filter Include="include\grpc++\impl">
       <UniqueIdentifier>{dadc0002-f2ac-451b-a9b8-33b8de10b5fc}</UniqueIdentifier>
     </Filter>
+    <Filter Include="include\grpc++\support">
+      <UniqueIdentifier>{0ebf8008-80b9-d6da-e1dc-854bf1ec2195}</UniqueIdentifier>
+    </Filter>
     <Filter Include="src">
       <UniqueIdentifier>{cce6a85d-1111-3834-6825-31e170d93cff}</UniqueIdentifier>
     </Filter>
diff --git a/vsprojects/grpc_plugin_support/grpc_plugin_support.vcxproj b/vsprojects/grpc_plugin_support/grpc_plugin_support.vcxproj
index 444d796..9f098d1 100644
--- a/vsprojects/grpc_plugin_support/grpc_plugin_support.vcxproj
+++ b/vsprojects/grpc_plugin_support/grpc_plugin_support.vcxproj
@@ -122,8 +122,8 @@
     </Link>
   </ItemDefinitionGroup>
   <ItemGroup>
-    <ClInclude Include="..\..\include\grpc++\config.h" />
-    <ClInclude Include="..\..\include\grpc++\config_protobuf.h" />
+    <ClInclude Include="..\..\include\grpc++\support\config.h" />
+    <ClInclude Include="..\..\include\grpc++\support\config_protobuf.h" />
     <ClInclude Include="..\..\src\compiler\config.h" />
     <ClInclude Include="..\..\src\compiler\cpp_generator.h" />
     <ClInclude Include="..\..\src\compiler\cpp_generator_helpers.h" />