Merge pull request #351 from yang-g/lsan

Fix a memory leak.
diff --git a/.editorconfig b/.editorconfig
new file mode 100644
index 0000000..f744516
--- /dev/null
+++ b/.editorconfig
@@ -0,0 +1,7 @@
+root = true
+[**]
+end_of_line = LF
+indent_style = space
+indent_size = 2
+insert_final_newline = true
+tab_width = 8
diff --git a/.gitignore b/.gitignore
index 002e3e66..6eb55b1 100644
--- a/.gitignore
+++ b/.gitignore
@@ -21,4 +21,8 @@
 .run_tests_cache
 
 # emacs temp files
-*~
\ No newline at end of file
+*~
+
+# vim temp files
+.*.swp
+
diff --git a/INSTALL b/INSTALL
index bba923c..7a3d02f 100644
--- a/INSTALL
+++ b/INSTALL
@@ -85,11 +85,12 @@
 itself, notably the autoconf suite, curl, and unzip. If you have apt-get, you
 can install these dependencies this way:
 
-  # apt-get install unzip curl autotools-dev
+  # apt-get install unzip curl autoconf libtool
 
 Then, you can build and install protobuf 3.0.0:
 
   $ cd third_party/protobuf
+  $ ./autogen.sh
   $ ./configure
   $ make
   # make install
diff --git a/Makefile b/Makefile
index 9031853..a3f1aaf 100644
--- a/Makefile
+++ b/Makefile
@@ -122,7 +122,7 @@
 
 CFLAGS += -std=c89 -pedantic
 CXXFLAGS += -std=c++11
-CPPFLAGS += -g -fPIC -Wall -Werror -Wno-long-long
+CPPFLAGS += -g -fPIC -Wall -Wextra -Werror -Wno-long-long -Wno-unused-parameter
 LDFLAGS += -g -fPIC
 
 INCLUDES = . include gens
@@ -357,6 +357,9 @@
 httpcli_format_request_test: bins/$(CONFIG)/httpcli_format_request_test
 httpcli_parser_test: bins/$(CONFIG)/httpcli_parser_test
 httpcli_test: bins/$(CONFIG)/httpcli_test
+json_rewrite: bins/$(CONFIG)/json_rewrite
+json_rewrite_test: bins/$(CONFIG)/json_rewrite_test
+json_test: bins/$(CONFIG)/json_test
 lame_client_test: bins/$(CONFIG)/lame_client_test
 low_level_ping_pong_benchmark: bins/$(CONFIG)/low_level_ping_pong_benchmark
 message_compress_test: bins/$(CONFIG)/message_compress_test
@@ -380,14 +383,14 @@
 end2end_test: bins/$(CONFIG)/end2end_test
 interop_client: bins/$(CONFIG)/interop_client
 interop_server: bins/$(CONFIG)/interop_server
-tips_client: bins/$(CONFIG)/tips_client
-tips_client_test: bins/$(CONFIG)/tips_client_test
 qps_client: bins/$(CONFIG)/qps_client
 qps_server: bins/$(CONFIG)/qps_server
 ruby_plugin: bins/$(CONFIG)/ruby_plugin
 status_test: bins/$(CONFIG)/status_test
 sync_client_async_server_test: bins/$(CONFIG)/sync_client_async_server_test
 thread_pool_test: bins/$(CONFIG)/thread_pool_test
+tips_client: bins/$(CONFIG)/tips_client
+tips_client_test: bins/$(CONFIG)/tips_client_test
 chttp2_fake_security_cancel_after_accept_test: bins/$(CONFIG)/chttp2_fake_security_cancel_after_accept_test
 chttp2_fake_security_cancel_after_accept_and_writes_closed_test: bins/$(CONFIG)/chttp2_fake_security_cancel_after_accept_and_writes_closed_test
 chttp2_fake_security_cancel_after_invoke_test: bins/$(CONFIG)/chttp2_fake_security_cancel_after_invoke_test
@@ -565,9 +568,9 @@
 
 buildtests: buildtests_c buildtests_cxx
 
-buildtests_c: privatelibs_c bins/$(CONFIG)/alarm_heap_test bins/$(CONFIG)/alarm_list_test bins/$(CONFIG)/alarm_test bins/$(CONFIG)/alpn_test bins/$(CONFIG)/bin_encoder_test bins/$(CONFIG)/census_hash_table_test bins/$(CONFIG)/census_statistics_multiple_writers_circular_buffer_test bins/$(CONFIG)/census_statistics_multiple_writers_test bins/$(CONFIG)/census_statistics_performance_test bins/$(CONFIG)/census_statistics_quick_test bins/$(CONFIG)/census_statistics_small_log_test bins/$(CONFIG)/census_stub_test bins/$(CONFIG)/census_window_stats_test bins/$(CONFIG)/chttp2_status_conversion_test bins/$(CONFIG)/chttp2_stream_encoder_test bins/$(CONFIG)/chttp2_stream_map_test bins/$(CONFIG)/chttp2_transport_end2end_test bins/$(CONFIG)/dualstack_socket_test bins/$(CONFIG)/echo_client bins/$(CONFIG)/echo_server bins/$(CONFIG)/echo_test bins/$(CONFIG)/fd_posix_test bins/$(CONFIG)/fling_client bins/$(CONFIG)/fling_server bins/$(CONFIG)/fling_stream_test bins/$(CONFIG)/fling_test bins/$(CONFIG)/gpr_cancellable_test bins/$(CONFIG)/gpr_cmdline_test bins/$(CONFIG)/gpr_histogram_test bins/$(CONFIG)/gpr_host_port_test bins/$(CONFIG)/gpr_log_test bins/$(CONFIG)/gpr_slice_buffer_test bins/$(CONFIG)/gpr_slice_test bins/$(CONFIG)/gpr_string_test bins/$(CONFIG)/gpr_sync_test bins/$(CONFIG)/gpr_thd_test bins/$(CONFIG)/gpr_time_test bins/$(CONFIG)/gpr_useful_test bins/$(CONFIG)/grpc_base64_test bins/$(CONFIG)/grpc_byte_buffer_reader_test bins/$(CONFIG)/grpc_channel_stack_test bins/$(CONFIG)/grpc_completion_queue_test bins/$(CONFIG)/grpc_credentials_test bins/$(CONFIG)/grpc_json_token_test bins/$(CONFIG)/grpc_stream_op_test bins/$(CONFIG)/hpack_parser_test bins/$(CONFIG)/hpack_table_test bins/$(CONFIG)/httpcli_format_request_test bins/$(CONFIG)/httpcli_parser_test bins/$(CONFIG)/httpcli_test bins/$(CONFIG)/lame_client_test bins/$(CONFIG)/message_compress_test bins/$(CONFIG)/metadata_buffer_test bins/$(CONFIG)/murmur_hash_test bins/$(CONFIG)/no_server_test bins/$(CONFIG)/poll_kick_posix_test bins/$(CONFIG)/resolve_address_test bins/$(CONFIG)/secure_endpoint_test bins/$(CONFIG)/sockaddr_utils_test bins/$(CONFIG)/tcp_client_posix_test bins/$(CONFIG)/tcp_posix_test bins/$(CONFIG)/tcp_server_posix_test bins/$(CONFIG)/time_averaged_stats_test bins/$(CONFIG)/time_test bins/$(CONFIG)/timeout_encoding_test bins/$(CONFIG)/transport_metadata_test bins/$(CONFIG)/chttp2_fake_security_cancel_after_accept_test bins/$(CONFIG)/chttp2_fake_security_cancel_after_accept_and_writes_closed_test bins/$(CONFIG)/chttp2_fake_security_cancel_after_invoke_test bins/$(CONFIG)/chttp2_fake_security_cancel_before_invoke_test bins/$(CONFIG)/chttp2_fake_security_cancel_in_a_vacuum_test bins/$(CONFIG)/chttp2_fake_security_census_simple_request_test bins/$(CONFIG)/chttp2_fake_security_disappearing_server_test bins/$(CONFIG)/chttp2_fake_security_early_server_shutdown_finishes_inflight_calls_test bins/$(CONFIG)/chttp2_fake_security_early_server_shutdown_finishes_tags_test bins/$(CONFIG)/chttp2_fake_security_graceful_server_shutdown_test bins/$(CONFIG)/chttp2_fake_security_invoke_large_request_test bins/$(CONFIG)/chttp2_fake_security_max_concurrent_streams_test bins/$(CONFIG)/chttp2_fake_security_no_op_test bins/$(CONFIG)/chttp2_fake_security_ping_pong_streaming_test bins/$(CONFIG)/chttp2_fake_security_request_response_with_binary_metadata_and_payload_test bins/$(CONFIG)/chttp2_fake_security_request_response_with_metadata_and_payload_test bins/$(CONFIG)/chttp2_fake_security_request_response_with_payload_test bins/$(CONFIG)/chttp2_fake_security_request_response_with_trailing_metadata_and_payload_test bins/$(CONFIG)/chttp2_fake_security_simple_delayed_request_test bins/$(CONFIG)/chttp2_fake_security_simple_request_test bins/$(CONFIG)/chttp2_fake_security_thread_stress_test bins/$(CONFIG)/chttp2_fake_security_writes_done_hangs_with_pending_read_test bins/$(CONFIG)/chttp2_fullstack_cancel_after_accept_test bins/$(CONFIG)/chttp2_fullstack_cancel_after_accept_and_writes_closed_test bins/$(CONFIG)/chttp2_fullstack_cancel_after_invoke_test bins/$(CONFIG)/chttp2_fullstack_cancel_before_invoke_test bins/$(CONFIG)/chttp2_fullstack_cancel_in_a_vacuum_test bins/$(CONFIG)/chttp2_fullstack_census_simple_request_test bins/$(CONFIG)/chttp2_fullstack_disappearing_server_test bins/$(CONFIG)/chttp2_fullstack_early_server_shutdown_finishes_inflight_calls_test bins/$(CONFIG)/chttp2_fullstack_early_server_shutdown_finishes_tags_test bins/$(CONFIG)/chttp2_fullstack_graceful_server_shutdown_test bins/$(CONFIG)/chttp2_fullstack_invoke_large_request_test bins/$(CONFIG)/chttp2_fullstack_max_concurrent_streams_test bins/$(CONFIG)/chttp2_fullstack_no_op_test bins/$(CONFIG)/chttp2_fullstack_ping_pong_streaming_test bins/$(CONFIG)/chttp2_fullstack_request_response_with_binary_metadata_and_payload_test bins/$(CONFIG)/chttp2_fullstack_request_response_with_metadata_and_payload_test bins/$(CONFIG)/chttp2_fullstack_request_response_with_payload_test bins/$(CONFIG)/chttp2_fullstack_request_response_with_trailing_metadata_and_payload_test bins/$(CONFIG)/chttp2_fullstack_simple_delayed_request_test bins/$(CONFIG)/chttp2_fullstack_simple_request_test bins/$(CONFIG)/chttp2_fullstack_thread_stress_test bins/$(CONFIG)/chttp2_fullstack_writes_done_hangs_with_pending_read_test bins/$(CONFIG)/chttp2_simple_ssl_fullstack_cancel_after_accept_test bins/$(CONFIG)/chttp2_simple_ssl_fullstack_cancel_after_accept_and_writes_closed_test bins/$(CONFIG)/chttp2_simple_ssl_fullstack_cancel_after_invoke_test bins/$(CONFIG)/chttp2_simple_ssl_fullstack_cancel_before_invoke_test bins/$(CONFIG)/chttp2_simple_ssl_fullstack_cancel_in_a_vacuum_test bins/$(CONFIG)/chttp2_simple_ssl_fullstack_census_simple_request_test bins/$(CONFIG)/chttp2_simple_ssl_fullstack_disappearing_server_test bins/$(CONFIG)/chttp2_simple_ssl_fullstack_early_server_shutdown_finishes_inflight_calls_test bins/$(CONFIG)/chttp2_simple_ssl_fullstack_early_server_shutdown_finishes_tags_test bins/$(CONFIG)/chttp2_simple_ssl_fullstack_graceful_server_shutdown_test bins/$(CONFIG)/chttp2_simple_ssl_fullstack_invoke_large_request_test bins/$(CONFIG)/chttp2_simple_ssl_fullstack_max_concurrent_streams_test bins/$(CONFIG)/chttp2_simple_ssl_fullstack_no_op_test bins/$(CONFIG)/chttp2_simple_ssl_fullstack_ping_pong_streaming_test bins/$(CONFIG)/chttp2_simple_ssl_fullstack_request_response_with_binary_metadata_and_payload_test bins/$(CONFIG)/chttp2_simple_ssl_fullstack_request_response_with_metadata_and_payload_test bins/$(CONFIG)/chttp2_simple_ssl_fullstack_request_response_with_payload_test bins/$(CONFIG)/chttp2_simple_ssl_fullstack_request_response_with_trailing_metadata_and_payload_test bins/$(CONFIG)/chttp2_simple_ssl_fullstack_simple_delayed_request_test bins/$(CONFIG)/chttp2_simple_ssl_fullstack_simple_request_test bins/$(CONFIG)/chttp2_simple_ssl_fullstack_thread_stress_test bins/$(CONFIG)/chttp2_simple_ssl_fullstack_writes_done_hangs_with_pending_read_test bins/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_cancel_after_accept_test bins/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_cancel_after_accept_and_writes_closed_test bins/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_cancel_after_invoke_test bins/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_cancel_before_invoke_test bins/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_cancel_in_a_vacuum_test bins/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_census_simple_request_test bins/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_disappearing_server_test bins/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_early_server_shutdown_finishes_inflight_calls_test bins/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_early_server_shutdown_finishes_tags_test bins/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_graceful_server_shutdown_test bins/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_invoke_large_request_test bins/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_max_concurrent_streams_test bins/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_no_op_test bins/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_ping_pong_streaming_test bins/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_binary_metadata_and_payload_test bins/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_metadata_and_payload_test bins/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_payload_test bins/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_trailing_metadata_and_payload_test bins/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_simple_delayed_request_test bins/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_simple_request_test bins/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_thread_stress_test bins/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_writes_done_hangs_with_pending_read_test bins/$(CONFIG)/chttp2_socket_pair_cancel_after_accept_test bins/$(CONFIG)/chttp2_socket_pair_cancel_after_accept_and_writes_closed_test bins/$(CONFIG)/chttp2_socket_pair_cancel_after_invoke_test bins/$(CONFIG)/chttp2_socket_pair_cancel_before_invoke_test bins/$(CONFIG)/chttp2_socket_pair_cancel_in_a_vacuum_test bins/$(CONFIG)/chttp2_socket_pair_census_simple_request_test bins/$(CONFIG)/chttp2_socket_pair_disappearing_server_test bins/$(CONFIG)/chttp2_socket_pair_early_server_shutdown_finishes_inflight_calls_test bins/$(CONFIG)/chttp2_socket_pair_early_server_shutdown_finishes_tags_test bins/$(CONFIG)/chttp2_socket_pair_graceful_server_shutdown_test bins/$(CONFIG)/chttp2_socket_pair_invoke_large_request_test bins/$(CONFIG)/chttp2_socket_pair_max_concurrent_streams_test bins/$(CONFIG)/chttp2_socket_pair_no_op_test bins/$(CONFIG)/chttp2_socket_pair_ping_pong_streaming_test bins/$(CONFIG)/chttp2_socket_pair_request_response_with_binary_metadata_and_payload_test bins/$(CONFIG)/chttp2_socket_pair_request_response_with_metadata_and_payload_test bins/$(CONFIG)/chttp2_socket_pair_request_response_with_payload_test bins/$(CONFIG)/chttp2_socket_pair_request_response_with_trailing_metadata_and_payload_test bins/$(CONFIG)/chttp2_socket_pair_simple_delayed_request_test bins/$(CONFIG)/chttp2_socket_pair_simple_request_test bins/$(CONFIG)/chttp2_socket_pair_thread_stress_test bins/$(CONFIG)/chttp2_socket_pair_writes_done_hangs_with_pending_read_test bins/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_after_accept_test bins/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_after_accept_and_writes_closed_test bins/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_after_invoke_test bins/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_before_invoke_test bins/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_in_a_vacuum_test bins/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_census_simple_request_test bins/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_disappearing_server_test bins/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_early_server_shutdown_finishes_inflight_calls_test bins/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_early_server_shutdown_finishes_tags_test bins/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_graceful_server_shutdown_test bins/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_invoke_large_request_test bins/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_max_concurrent_streams_test bins/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_no_op_test bins/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_ping_pong_streaming_test bins/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_binary_metadata_and_payload_test bins/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_metadata_and_payload_test bins/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_payload_test bins/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_trailing_metadata_and_payload_test bins/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_simple_delayed_request_test bins/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_simple_request_test bins/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_thread_stress_test bins/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_writes_done_hangs_with_pending_read_test
+buildtests_c: privatelibs_c bins/$(CONFIG)/alarm_heap_test bins/$(CONFIG)/alarm_list_test bins/$(CONFIG)/alarm_test bins/$(CONFIG)/alpn_test bins/$(CONFIG)/bin_encoder_test bins/$(CONFIG)/census_hash_table_test bins/$(CONFIG)/census_statistics_multiple_writers_circular_buffer_test bins/$(CONFIG)/census_statistics_multiple_writers_test bins/$(CONFIG)/census_statistics_performance_test bins/$(CONFIG)/census_statistics_quick_test bins/$(CONFIG)/census_statistics_small_log_test bins/$(CONFIG)/census_stub_test bins/$(CONFIG)/census_window_stats_test bins/$(CONFIG)/chttp2_status_conversion_test bins/$(CONFIG)/chttp2_stream_encoder_test bins/$(CONFIG)/chttp2_stream_map_test bins/$(CONFIG)/chttp2_transport_end2end_test bins/$(CONFIG)/dualstack_socket_test bins/$(CONFIG)/echo_client bins/$(CONFIG)/echo_server bins/$(CONFIG)/echo_test bins/$(CONFIG)/fd_posix_test bins/$(CONFIG)/fling_client bins/$(CONFIG)/fling_server bins/$(CONFIG)/fling_stream_test bins/$(CONFIG)/fling_test bins/$(CONFIG)/gpr_cancellable_test bins/$(CONFIG)/gpr_cmdline_test bins/$(CONFIG)/gpr_histogram_test bins/$(CONFIG)/gpr_host_port_test bins/$(CONFIG)/gpr_log_test bins/$(CONFIG)/gpr_slice_buffer_test bins/$(CONFIG)/gpr_slice_test bins/$(CONFIG)/gpr_string_test bins/$(CONFIG)/gpr_sync_test bins/$(CONFIG)/gpr_thd_test bins/$(CONFIG)/gpr_time_test bins/$(CONFIG)/gpr_useful_test bins/$(CONFIG)/grpc_base64_test bins/$(CONFIG)/grpc_byte_buffer_reader_test bins/$(CONFIG)/grpc_channel_stack_test bins/$(CONFIG)/grpc_completion_queue_test bins/$(CONFIG)/grpc_credentials_test bins/$(CONFIG)/grpc_json_token_test bins/$(CONFIG)/grpc_stream_op_test bins/$(CONFIG)/hpack_parser_test bins/$(CONFIG)/hpack_table_test bins/$(CONFIG)/httpcli_format_request_test bins/$(CONFIG)/httpcli_parser_test bins/$(CONFIG)/httpcli_test bins/$(CONFIG)/json_rewrite bins/$(CONFIG)/json_rewrite_test bins/$(CONFIG)/json_test bins/$(CONFIG)/lame_client_test bins/$(CONFIG)/message_compress_test bins/$(CONFIG)/metadata_buffer_test bins/$(CONFIG)/murmur_hash_test bins/$(CONFIG)/no_server_test bins/$(CONFIG)/poll_kick_posix_test bins/$(CONFIG)/resolve_address_test bins/$(CONFIG)/secure_endpoint_test bins/$(CONFIG)/sockaddr_utils_test bins/$(CONFIG)/tcp_client_posix_test bins/$(CONFIG)/tcp_posix_test bins/$(CONFIG)/tcp_server_posix_test bins/$(CONFIG)/time_averaged_stats_test bins/$(CONFIG)/time_test bins/$(CONFIG)/timeout_encoding_test bins/$(CONFIG)/transport_metadata_test bins/$(CONFIG)/chttp2_fake_security_cancel_after_accept_test bins/$(CONFIG)/chttp2_fake_security_cancel_after_accept_and_writes_closed_test bins/$(CONFIG)/chttp2_fake_security_cancel_after_invoke_test bins/$(CONFIG)/chttp2_fake_security_cancel_before_invoke_test bins/$(CONFIG)/chttp2_fake_security_cancel_in_a_vacuum_test bins/$(CONFIG)/chttp2_fake_security_census_simple_request_test bins/$(CONFIG)/chttp2_fake_security_disappearing_server_test bins/$(CONFIG)/chttp2_fake_security_early_server_shutdown_finishes_inflight_calls_test bins/$(CONFIG)/chttp2_fake_security_early_server_shutdown_finishes_tags_test bins/$(CONFIG)/chttp2_fake_security_graceful_server_shutdown_test bins/$(CONFIG)/chttp2_fake_security_invoke_large_request_test bins/$(CONFIG)/chttp2_fake_security_max_concurrent_streams_test bins/$(CONFIG)/chttp2_fake_security_no_op_test bins/$(CONFIG)/chttp2_fake_security_ping_pong_streaming_test bins/$(CONFIG)/chttp2_fake_security_request_response_with_binary_metadata_and_payload_test bins/$(CONFIG)/chttp2_fake_security_request_response_with_metadata_and_payload_test bins/$(CONFIG)/chttp2_fake_security_request_response_with_payload_test bins/$(CONFIG)/chttp2_fake_security_request_response_with_trailing_metadata_and_payload_test bins/$(CONFIG)/chttp2_fake_security_simple_delayed_request_test bins/$(CONFIG)/chttp2_fake_security_simple_request_test bins/$(CONFIG)/chttp2_fake_security_thread_stress_test bins/$(CONFIG)/chttp2_fake_security_writes_done_hangs_with_pending_read_test bins/$(CONFIG)/chttp2_fullstack_cancel_after_accept_test bins/$(CONFIG)/chttp2_fullstack_cancel_after_accept_and_writes_closed_test bins/$(CONFIG)/chttp2_fullstack_cancel_after_invoke_test bins/$(CONFIG)/chttp2_fullstack_cancel_before_invoke_test bins/$(CONFIG)/chttp2_fullstack_cancel_in_a_vacuum_test bins/$(CONFIG)/chttp2_fullstack_census_simple_request_test bins/$(CONFIG)/chttp2_fullstack_disappearing_server_test bins/$(CONFIG)/chttp2_fullstack_early_server_shutdown_finishes_inflight_calls_test bins/$(CONFIG)/chttp2_fullstack_early_server_shutdown_finishes_tags_test bins/$(CONFIG)/chttp2_fullstack_graceful_server_shutdown_test bins/$(CONFIG)/chttp2_fullstack_invoke_large_request_test bins/$(CONFIG)/chttp2_fullstack_max_concurrent_streams_test bins/$(CONFIG)/chttp2_fullstack_no_op_test bins/$(CONFIG)/chttp2_fullstack_ping_pong_streaming_test bins/$(CONFIG)/chttp2_fullstack_request_response_with_binary_metadata_and_payload_test bins/$(CONFIG)/chttp2_fullstack_request_response_with_metadata_and_payload_test bins/$(CONFIG)/chttp2_fullstack_request_response_with_payload_test bins/$(CONFIG)/chttp2_fullstack_request_response_with_trailing_metadata_and_payload_test bins/$(CONFIG)/chttp2_fullstack_simple_delayed_request_test bins/$(CONFIG)/chttp2_fullstack_simple_request_test bins/$(CONFIG)/chttp2_fullstack_thread_stress_test bins/$(CONFIG)/chttp2_fullstack_writes_done_hangs_with_pending_read_test bins/$(CONFIG)/chttp2_simple_ssl_fullstack_cancel_after_accept_test bins/$(CONFIG)/chttp2_simple_ssl_fullstack_cancel_after_accept_and_writes_closed_test bins/$(CONFIG)/chttp2_simple_ssl_fullstack_cancel_after_invoke_test bins/$(CONFIG)/chttp2_simple_ssl_fullstack_cancel_before_invoke_test bins/$(CONFIG)/chttp2_simple_ssl_fullstack_cancel_in_a_vacuum_test bins/$(CONFIG)/chttp2_simple_ssl_fullstack_census_simple_request_test bins/$(CONFIG)/chttp2_simple_ssl_fullstack_disappearing_server_test bins/$(CONFIG)/chttp2_simple_ssl_fullstack_early_server_shutdown_finishes_inflight_calls_test bins/$(CONFIG)/chttp2_simple_ssl_fullstack_early_server_shutdown_finishes_tags_test bins/$(CONFIG)/chttp2_simple_ssl_fullstack_graceful_server_shutdown_test bins/$(CONFIG)/chttp2_simple_ssl_fullstack_invoke_large_request_test bins/$(CONFIG)/chttp2_simple_ssl_fullstack_max_concurrent_streams_test bins/$(CONFIG)/chttp2_simple_ssl_fullstack_no_op_test bins/$(CONFIG)/chttp2_simple_ssl_fullstack_ping_pong_streaming_test bins/$(CONFIG)/chttp2_simple_ssl_fullstack_request_response_with_binary_metadata_and_payload_test bins/$(CONFIG)/chttp2_simple_ssl_fullstack_request_response_with_metadata_and_payload_test bins/$(CONFIG)/chttp2_simple_ssl_fullstack_request_response_with_payload_test bins/$(CONFIG)/chttp2_simple_ssl_fullstack_request_response_with_trailing_metadata_and_payload_test bins/$(CONFIG)/chttp2_simple_ssl_fullstack_simple_delayed_request_test bins/$(CONFIG)/chttp2_simple_ssl_fullstack_simple_request_test bins/$(CONFIG)/chttp2_simple_ssl_fullstack_thread_stress_test bins/$(CONFIG)/chttp2_simple_ssl_fullstack_writes_done_hangs_with_pending_read_test bins/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_cancel_after_accept_test bins/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_cancel_after_accept_and_writes_closed_test bins/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_cancel_after_invoke_test bins/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_cancel_before_invoke_test bins/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_cancel_in_a_vacuum_test bins/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_census_simple_request_test bins/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_disappearing_server_test bins/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_early_server_shutdown_finishes_inflight_calls_test bins/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_early_server_shutdown_finishes_tags_test bins/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_graceful_server_shutdown_test bins/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_invoke_large_request_test bins/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_max_concurrent_streams_test bins/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_no_op_test bins/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_ping_pong_streaming_test bins/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_binary_metadata_and_payload_test bins/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_metadata_and_payload_test bins/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_payload_test bins/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_trailing_metadata_and_payload_test bins/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_simple_delayed_request_test bins/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_simple_request_test bins/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_thread_stress_test bins/$(CONFIG)/chttp2_simple_ssl_with_oauth2_fullstack_writes_done_hangs_with_pending_read_test bins/$(CONFIG)/chttp2_socket_pair_cancel_after_accept_test bins/$(CONFIG)/chttp2_socket_pair_cancel_after_accept_and_writes_closed_test bins/$(CONFIG)/chttp2_socket_pair_cancel_after_invoke_test bins/$(CONFIG)/chttp2_socket_pair_cancel_before_invoke_test bins/$(CONFIG)/chttp2_socket_pair_cancel_in_a_vacuum_test bins/$(CONFIG)/chttp2_socket_pair_census_simple_request_test bins/$(CONFIG)/chttp2_socket_pair_disappearing_server_test bins/$(CONFIG)/chttp2_socket_pair_early_server_shutdown_finishes_inflight_calls_test bins/$(CONFIG)/chttp2_socket_pair_early_server_shutdown_finishes_tags_test bins/$(CONFIG)/chttp2_socket_pair_graceful_server_shutdown_test bins/$(CONFIG)/chttp2_socket_pair_invoke_large_request_test bins/$(CONFIG)/chttp2_socket_pair_max_concurrent_streams_test bins/$(CONFIG)/chttp2_socket_pair_no_op_test bins/$(CONFIG)/chttp2_socket_pair_ping_pong_streaming_test bins/$(CONFIG)/chttp2_socket_pair_request_response_with_binary_metadata_and_payload_test bins/$(CONFIG)/chttp2_socket_pair_request_response_with_metadata_and_payload_test bins/$(CONFIG)/chttp2_socket_pair_request_response_with_payload_test bins/$(CONFIG)/chttp2_socket_pair_request_response_with_trailing_metadata_and_payload_test bins/$(CONFIG)/chttp2_socket_pair_simple_delayed_request_test bins/$(CONFIG)/chttp2_socket_pair_simple_request_test bins/$(CONFIG)/chttp2_socket_pair_thread_stress_test bins/$(CONFIG)/chttp2_socket_pair_writes_done_hangs_with_pending_read_test bins/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_after_accept_test bins/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_after_accept_and_writes_closed_test bins/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_after_invoke_test bins/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_before_invoke_test bins/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_cancel_in_a_vacuum_test bins/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_census_simple_request_test bins/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_disappearing_server_test bins/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_early_server_shutdown_finishes_inflight_calls_test bins/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_early_server_shutdown_finishes_tags_test bins/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_graceful_server_shutdown_test bins/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_invoke_large_request_test bins/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_max_concurrent_streams_test bins/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_no_op_test bins/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_ping_pong_streaming_test bins/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_binary_metadata_and_payload_test bins/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_metadata_and_payload_test bins/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_payload_test bins/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_request_response_with_trailing_metadata_and_payload_test bins/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_simple_delayed_request_test bins/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_simple_request_test bins/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_thread_stress_test bins/$(CONFIG)/chttp2_socket_pair_one_byte_at_a_time_writes_done_hangs_with_pending_read_test
 
-buildtests_cxx: privatelibs_cxx bins/$(CONFIG)/channel_arguments_test bins/$(CONFIG)/credentials_test bins/$(CONFIG)/end2end_test bins/$(CONFIG)/interop_client bins/$(CONFIG)/interop_server bins/$(CONFIG)/tips_client bins/$(CONFIG)/tips_client_test bins/$(CONFIG)/qps_client bins/$(CONFIG)/qps_server bins/$(CONFIG)/status_test bins/$(CONFIG)/sync_client_async_server_test bins/$(CONFIG)/thread_pool_test
+buildtests_cxx: privatelibs_cxx bins/$(CONFIG)/channel_arguments_test bins/$(CONFIG)/credentials_test bins/$(CONFIG)/end2end_test bins/$(CONFIG)/interop_client bins/$(CONFIG)/interop_server bins/$(CONFIG)/qps_client bins/$(CONFIG)/qps_server bins/$(CONFIG)/status_test bins/$(CONFIG)/sync_client_async_server_test bins/$(CONFIG)/thread_pool_test bins/$(CONFIG)/tips_client bins/$(CONFIG)/tips_client_test
 
 test: test_c test_cxx
 
@@ -664,6 +667,8 @@
 	$(Q) ./bins/$(CONFIG)/httpcli_parser_test || ( echo test httpcli_parser_test failed ; exit 1 )
 	$(E) "[RUN]     Testing httpcli_test"
 	$(Q) ./bins/$(CONFIG)/httpcli_test || ( echo test httpcli_test failed ; exit 1 )
+	$(E) "[RUN]     Testing json_test"
+	$(Q) ./bins/$(CONFIG)/json_test || ( echo test json_test failed ; exit 1 )
 	$(E) "[RUN]     Testing lame_client_test"
 	$(Q) ./bins/$(CONFIG)/lame_client_test || ( echo test lame_client_test failed ; exit 1 )
 	$(E) "[RUN]     Testing message_compress_test"
@@ -969,8 +974,6 @@
 	$(Q) ./bins/$(CONFIG)/credentials_test || ( echo test credentials_test failed ; exit 1 )
 	$(E) "[RUN]     Testing end2end_test"
 	$(Q) ./bins/$(CONFIG)/end2end_test || ( echo test end2end_test failed ; exit 1 )
-	$(E) "[RUN]     Testing tips_client_test"
-	$(Q) ./bins/$(CONFIG)/tips_client_test || ( echo test tips_client_test failed ; exit 1 )
 	$(E) "[RUN]     Testing qps_client"
 	$(Q) ./bins/$(CONFIG)/qps_client || ( echo test qps_client failed ; exit 1 )
 	$(E) "[RUN]     Testing qps_server"
@@ -981,6 +984,8 @@
 	$(Q) ./bins/$(CONFIG)/sync_client_async_server_test || ( echo test sync_client_async_server_test failed ; exit 1 )
 	$(E) "[RUN]     Testing thread_pool_test"
 	$(Q) ./bins/$(CONFIG)/thread_pool_test || ( echo test thread_pool_test failed ; exit 1 )
+	$(E) "[RUN]     Testing tips_client_test"
+	$(Q) ./bins/$(CONFIG)/tips_client_test || ( echo test tips_client_test failed ; exit 1 )
 
 
 tools: privatelibs bins/$(CONFIG)/gen_hpack_tables bins/$(CONFIG)/grpc_fetch_oauth2
@@ -1001,28 +1006,36 @@
 # This prevents proper debugging after running make install.
 
 strip-static_c: static_c
+ifeq ($(CONFIG),opt)
 	$(E) "[STRIP]   Stripping libgpr.a"
 	$(Q) $(STRIP) libs/$(CONFIG)/libgpr.a
 	$(E) "[STRIP]   Stripping libgrpc.a"
 	$(Q) $(STRIP) libs/$(CONFIG)/libgrpc.a
 	$(E) "[STRIP]   Stripping libgrpc_unsecure.a"
 	$(Q) $(STRIP) libs/$(CONFIG)/libgrpc_unsecure.a
+endif
 
 strip-static_cxx: static_cxx
+ifeq ($(CONFIG),opt)
 	$(E) "[STRIP]   Stripping libgrpc++.a"
 	$(Q) $(STRIP) libs/$(CONFIG)/libgrpc++.a
+endif
 
 strip-shared_c: shared_c
+ifeq ($(CONFIG),opt)
 	$(E) "[STRIP]   Stripping libgpr.so"
 	$(Q) $(STRIP) libs/$(CONFIG)/libgpr.$(SHARED_EXT)
 	$(E) "[STRIP]   Stripping libgrpc.so"
 	$(Q) $(STRIP) libs/$(CONFIG)/libgrpc.$(SHARED_EXT)
 	$(E) "[STRIP]   Stripping libgrpc_unsecure.so"
 	$(Q) $(STRIP) libs/$(CONFIG)/libgrpc_unsecure.$(SHARED_EXT)
+endif
 
 strip-shared_cxx: shared_cxx
+ifeq ($(CONFIG),opt)
 	$(E) "[STRIP]   Stripping libgrpc++.so"
 	$(Q) $(STRIP) libs/$(CONFIG)/libgrpc++.$(SHARED_EXT)
+endif
 
 gens/examples/tips/empty.pb.cc: examples/tips/empty.proto $(PROTOC_PLUGINS)
 	$(E) "[PROTOC]  Generating protobuf CC file from $<"
@@ -1271,6 +1284,7 @@
 	$(Q) $(LD) $(LDFLAGS) -Llibs/$(CONFIG) -dynamiclib -o libs/$(CONFIG)/libgpr.$(SHARED_EXT) $(LIBGPR_OBJS) $(LDLIBS)
 else
 	$(Q) $(LD) $(LDFLAGS) -Llibs/$(CONFIG) -shared -Wl,-soname,libgpr.so.0 -o libs/$(CONFIG)/libgpr.$(SHARED_EXT) $(LIBGPR_OBJS) $(LDLIBS)
+	$(Q) ln -sf libgpr.$(SHARED_EXT) libs/$(CONFIG)/libgpr.so.0
 	$(Q) ln -sf libgpr.$(SHARED_EXT) libs/$(CONFIG)/libgpr.so
 endif
 endif
@@ -1408,6 +1422,10 @@
     src/core/iomgr/wakeup_fd_nospecial.c \
     src/core/iomgr/wakeup_fd_pipe.c \
     src/core/iomgr/wakeup_fd_posix.c \
+    src/core/json/json.c \
+    src/core/json/json_reader.c \
+    src/core/json/json_string.c \
+    src/core/json/json_writer.c \
     src/core/statistics/census_init.c \
     src/core/statistics/census_log.c \
     src/core/statistics/census_rpc_stats.c \
@@ -1449,7 +1467,6 @@
     src/core/transport/metadata.c \
     src/core/transport/stream_op.c \
     src/core/transport/transport.c \
-    third_party/cJSON/cJSON.c \
 
 PUBLIC_HEADERS_C += \
     include/grpc/grpc_security.h \
@@ -1531,6 +1548,10 @@
 src/core/iomgr/wakeup_fd_nospecial.c: $(OPENSSL_DEP)
 src/core/iomgr/wakeup_fd_pipe.c: $(OPENSSL_DEP)
 src/core/iomgr/wakeup_fd_posix.c: $(OPENSSL_DEP)
+src/core/json/json.c: $(OPENSSL_DEP)
+src/core/json/json_reader.c: $(OPENSSL_DEP)
+src/core/json/json_string.c: $(OPENSSL_DEP)
+src/core/json/json_writer.c: $(OPENSSL_DEP)
 src/core/statistics/census_init.c: $(OPENSSL_DEP)
 src/core/statistics/census_log.c: $(OPENSSL_DEP)
 src/core/statistics/census_rpc_stats.c: $(OPENSSL_DEP)
@@ -1572,7 +1593,6 @@
 src/core/transport/metadata.c: $(OPENSSL_DEP)
 src/core/transport/stream_op.c: $(OPENSSL_DEP)
 src/core/transport/transport.c: $(OPENSSL_DEP)
-third_party/cJSON/cJSON.c: $(OPENSSL_DEP)
 endif
 
 libs/$(CONFIG)/libgrpc.a: $(ZLIB_DEP) $(OPENSSL_DEP) $(LIBGRPC_OBJS)
@@ -1606,6 +1626,7 @@
 	$(Q) $(LD) $(LDFLAGS) -Llibs/$(CONFIG) -dynamiclib -o libs/$(CONFIG)/libgrpc.$(SHARED_EXT) $(LIBGRPC_OBJS) $(LDLIBS) $(LDLIBS_SECURE) $(OPENSSL_MERGE_LIBS) -lgpr
 else
 	$(Q) $(LD) $(LDFLAGS) -Llibs/$(CONFIG) -shared -Wl,-soname,libgrpc.so.0 -o libs/$(CONFIG)/libgrpc.$(SHARED_EXT) $(LIBGRPC_OBJS) $(LDLIBS) $(LDLIBS_SECURE) $(OPENSSL_MERGE_LIBS) -lgpr
+	$(Q) ln -sf libgrpc.$(SHARED_EXT) libs/$(CONFIG)/libgrpc.so.0
 	$(Q) ln -sf libgrpc.$(SHARED_EXT) libs/$(CONFIG)/libgrpc.so
 endif
 endif
@@ -1675,6 +1696,10 @@
 objs/$(CONFIG)/src/core/iomgr/wakeup_fd_nospecial.o: 
 objs/$(CONFIG)/src/core/iomgr/wakeup_fd_pipe.o: 
 objs/$(CONFIG)/src/core/iomgr/wakeup_fd_posix.o: 
+objs/$(CONFIG)/src/core/json/json.o: 
+objs/$(CONFIG)/src/core/json/json_reader.o: 
+objs/$(CONFIG)/src/core/json/json_string.o: 
+objs/$(CONFIG)/src/core/json/json_writer.o: 
 objs/$(CONFIG)/src/core/statistics/census_init.o: 
 objs/$(CONFIG)/src/core/statistics/census_log.o: 
 objs/$(CONFIG)/src/core/statistics/census_rpc_stats.o: 
@@ -1716,7 +1741,6 @@
 objs/$(CONFIG)/src/core/transport/metadata.o: 
 objs/$(CONFIG)/src/core/transport/stream_op.o: 
 objs/$(CONFIG)/src/core/transport/transport.o: 
-objs/$(CONFIG)/third_party/cJSON/cJSON.o: 
 
 
 LIBGRPC_TEST_UTIL_SRC = \
@@ -1839,6 +1863,10 @@
     src/core/iomgr/wakeup_fd_nospecial.c \
     src/core/iomgr/wakeup_fd_pipe.c \
     src/core/iomgr/wakeup_fd_posix.c \
+    src/core/json/json.c \
+    src/core/json/json_reader.c \
+    src/core/json/json_string.c \
+    src/core/json/json_writer.c \
     src/core/statistics/census_init.c \
     src/core/statistics/census_log.c \
     src/core/statistics/census_rpc_stats.c \
@@ -1880,7 +1908,6 @@
     src/core/transport/metadata.c \
     src/core/transport/stream_op.c \
     src/core/transport/transport.c \
-    third_party/cJSON/cJSON.c \
 
 PUBLIC_HEADERS_C += \
     include/grpc/byte_buffer.h \
@@ -1914,6 +1941,7 @@
 	$(Q) $(LD) $(LDFLAGS) -Llibs/$(CONFIG) -dynamiclib -o libs/$(CONFIG)/libgrpc_unsecure.$(SHARED_EXT) $(LIBGRPC_UNSECURE_OBJS) $(LDLIBS) -lgpr
 else
 	$(Q) $(LD) $(LDFLAGS) -Llibs/$(CONFIG) -shared -Wl,-soname,libgrpc_unsecure.so.0 -o libs/$(CONFIG)/libgrpc_unsecure.$(SHARED_EXT) $(LIBGRPC_UNSECURE_OBJS) $(LDLIBS) -lgpr
+	$(Q) ln -sf libgrpc_unsecure.$(SHARED_EXT) libs/$(CONFIG)/libgrpc_unsecure.so.0
 	$(Q) ln -sf libgrpc_unsecure.$(SHARED_EXT) libs/$(CONFIG)/libgrpc_unsecure.so
 endif
 endif
@@ -1966,6 +1994,10 @@
 objs/$(CONFIG)/src/core/iomgr/wakeup_fd_nospecial.o: 
 objs/$(CONFIG)/src/core/iomgr/wakeup_fd_pipe.o: 
 objs/$(CONFIG)/src/core/iomgr/wakeup_fd_posix.o: 
+objs/$(CONFIG)/src/core/json/json.o: 
+objs/$(CONFIG)/src/core/json/json_reader.o: 
+objs/$(CONFIG)/src/core/json/json_string.o: 
+objs/$(CONFIG)/src/core/json/json_writer.o: 
 objs/$(CONFIG)/src/core/statistics/census_init.o: 
 objs/$(CONFIG)/src/core/statistics/census_log.o: 
 objs/$(CONFIG)/src/core/statistics/census_rpc_stats.o: 
@@ -2007,7 +2039,6 @@
 objs/$(CONFIG)/src/core/transport/metadata.o: 
 objs/$(CONFIG)/src/core/transport/stream_op.o: 
 objs/$(CONFIG)/src/core/transport/transport.o: 
-objs/$(CONFIG)/third_party/cJSON/cJSON.o: 
 
 
 LIBGRPC++_SRC = \
@@ -2116,6 +2147,7 @@
 	$(Q) $(LDXX) $(LDFLAGS) -Llibs/$(CONFIG) -dynamiclib -o libs/$(CONFIG)/libgrpc++.$(SHARED_EXT) $(LIBGRPC++_OBJS) $(LDLIBS) $(LDLIBS_SECURE) $(OPENSSL_MERGE_LIBS) -lgrpc
 else
 	$(Q) $(LDXX) $(LDFLAGS) -Llibs/$(CONFIG) -shared -Wl,-soname,libgrpc++.so.0 -o libs/$(CONFIG)/libgrpc++.$(SHARED_EXT) $(LIBGRPC++_OBJS) $(LDLIBS) $(LDLIBS_SECURE) $(OPENSSL_MERGE_LIBS) -lgrpc
+	$(Q) ln -sf libgrpc++.$(SHARED_EXT) libs/$(CONFIG)/libgrpc++.so.0
 	$(Q) ln -sf libgrpc++.$(SHARED_EXT) libs/$(CONFIG)/libgrpc++.so
 endif
 endif
@@ -4849,6 +4881,99 @@
 endif
 
 
+JSON_REWRITE_SRC = \
+    test/core/json/json_rewrite.c \
+
+JSON_REWRITE_OBJS = $(addprefix objs/$(CONFIG)/, $(addsuffix .o, $(basename $(JSON_REWRITE_SRC))))
+
+ifeq ($(NO_SECURE),true)
+
+# You can't build secure targets if you don't have OpenSSL with ALPN.
+
+bins/$(CONFIG)/json_rewrite: openssl_dep_error
+
+else
+
+bins/$(CONFIG)/json_rewrite: $(JSON_REWRITE_OBJS) libs/$(CONFIG)/libgrpc.a libs/$(CONFIG)/libgpr.a
+	$(E) "[LD]      Linking $@"
+	$(Q) mkdir -p `dirname $@`
+	$(Q) $(LD) $(LDFLAGS) $(JSON_REWRITE_OBJS) libs/$(CONFIG)/libgrpc.a libs/$(CONFIG)/libgpr.a $(LDLIBS) $(LDLIBS_SECURE) -o bins/$(CONFIG)/json_rewrite
+
+endif
+
+objs/$(CONFIG)/test/core/json/json_rewrite.o:  libs/$(CONFIG)/libgrpc.a libs/$(CONFIG)/libgpr.a
+
+deps_json_rewrite: $(JSON_REWRITE_OBJS:.o=.dep)
+
+ifneq ($(NO_SECURE),true)
+ifneq ($(NO_DEPS),true)
+-include $(JSON_REWRITE_OBJS:.o=.dep)
+endif
+endif
+
+
+JSON_REWRITE_TEST_SRC = \
+    test/core/json/json_rewrite_test.c \
+
+JSON_REWRITE_TEST_OBJS = $(addprefix objs/$(CONFIG)/, $(addsuffix .o, $(basename $(JSON_REWRITE_TEST_SRC))))
+
+ifeq ($(NO_SECURE),true)
+
+# You can't build secure targets if you don't have OpenSSL with ALPN.
+
+bins/$(CONFIG)/json_rewrite_test: openssl_dep_error
+
+else
+
+bins/$(CONFIG)/json_rewrite_test: $(JSON_REWRITE_TEST_OBJS) libs/$(CONFIG)/libgrpc_test_util.a libs/$(CONFIG)/libgrpc.a libs/$(CONFIG)/libgpr_test_util.a libs/$(CONFIG)/libgpr.a
+	$(E) "[LD]      Linking $@"
+	$(Q) mkdir -p `dirname $@`
+	$(Q) $(LD) $(LDFLAGS) $(JSON_REWRITE_TEST_OBJS) libs/$(CONFIG)/libgrpc_test_util.a libs/$(CONFIG)/libgrpc.a libs/$(CONFIG)/libgpr_test_util.a libs/$(CONFIG)/libgpr.a $(LDLIBS) $(LDLIBS_SECURE) -o bins/$(CONFIG)/json_rewrite_test
+
+endif
+
+objs/$(CONFIG)/test/core/json/json_rewrite_test.o:  libs/$(CONFIG)/libgrpc_test_util.a libs/$(CONFIG)/libgrpc.a libs/$(CONFIG)/libgpr_test_util.a libs/$(CONFIG)/libgpr.a
+
+deps_json_rewrite_test: $(JSON_REWRITE_TEST_OBJS:.o=.dep)
+
+ifneq ($(NO_SECURE),true)
+ifneq ($(NO_DEPS),true)
+-include $(JSON_REWRITE_TEST_OBJS:.o=.dep)
+endif
+endif
+
+
+JSON_TEST_SRC = \
+    test/core/json/json_test.c \
+
+JSON_TEST_OBJS = $(addprefix objs/$(CONFIG)/, $(addsuffix .o, $(basename $(JSON_TEST_SRC))))
+
+ifeq ($(NO_SECURE),true)
+
+# You can't build secure targets if you don't have OpenSSL with ALPN.
+
+bins/$(CONFIG)/json_test: openssl_dep_error
+
+else
+
+bins/$(CONFIG)/json_test: $(JSON_TEST_OBJS) libs/$(CONFIG)/libgrpc_test_util.a libs/$(CONFIG)/libgrpc.a libs/$(CONFIG)/libgpr_test_util.a libs/$(CONFIG)/libgpr.a
+	$(E) "[LD]      Linking $@"
+	$(Q) mkdir -p `dirname $@`
+	$(Q) $(LD) $(LDFLAGS) $(JSON_TEST_OBJS) libs/$(CONFIG)/libgrpc_test_util.a libs/$(CONFIG)/libgrpc.a libs/$(CONFIG)/libgpr_test_util.a libs/$(CONFIG)/libgpr.a $(LDLIBS) $(LDLIBS_SECURE) -o bins/$(CONFIG)/json_test
+
+endif
+
+objs/$(CONFIG)/test/core/json/json_test.o:  libs/$(CONFIG)/libgrpc_test_util.a libs/$(CONFIG)/libgrpc.a libs/$(CONFIG)/libgpr_test_util.a libs/$(CONFIG)/libgpr.a
+
+deps_json_test: $(JSON_TEST_OBJS:.o=.dep)
+
+ifneq ($(NO_SECURE),true)
+ifneq ($(NO_DEPS),true)
+-include $(JSON_TEST_OBJS:.o=.dep)
+endif
+endif
+
+
 LAME_CLIENT_TEST_SRC = \
     test/core/surface/lame_client_test.c \
 
@@ -5564,68 +5689,6 @@
 endif
 
 
-TIPS_CLIENT_SRC = \
-    examples/tips/client_main.cc \
-
-TIPS_CLIENT_OBJS = $(addprefix objs/$(CONFIG)/, $(addsuffix .o, $(basename $(TIPS_CLIENT_SRC))))
-
-ifeq ($(NO_SECURE),true)
-
-# You can't build secure targets if you don't have OpenSSL with ALPN.
-
-bins/$(CONFIG)/tips_client: openssl_dep_error
-
-else
-
-bins/$(CONFIG)/tips_client: $(TIPS_CLIENT_OBJS) libs/$(CONFIG)/libtips_client_lib.a libs/$(CONFIG)/libgrpc++_test_util.a libs/$(CONFIG)/libgrpc_test_util.a libs/$(CONFIG)/libgrpc++.a libs/$(CONFIG)/libgrpc.a libs/$(CONFIG)/libgpr_test_util.a libs/$(CONFIG)/libgpr.a
-	$(E) "[LD]      Linking $@"
-	$(Q) mkdir -p `dirname $@`
-	$(Q) $(LDXX) $(LDFLAGS) $(TIPS_CLIENT_OBJS) $(GTEST_LIB) libs/$(CONFIG)/libtips_client_lib.a libs/$(CONFIG)/libgrpc++_test_util.a libs/$(CONFIG)/libgrpc_test_util.a libs/$(CONFIG)/libgrpc++.a libs/$(CONFIG)/libgrpc.a libs/$(CONFIG)/libgpr_test_util.a libs/$(CONFIG)/libgpr.a $(LDLIBSXX) $(LDLIBS) $(LDLIBS_SECURE) -o bins/$(CONFIG)/tips_client
-
-endif
-
-objs/$(CONFIG)/examples/tips/client_main.o:  libs/$(CONFIG)/libtips_client_lib.a libs/$(CONFIG)/libgrpc++_test_util.a libs/$(CONFIG)/libgrpc_test_util.a libs/$(CONFIG)/libgrpc++.a libs/$(CONFIG)/libgrpc.a libs/$(CONFIG)/libgpr_test_util.a libs/$(CONFIG)/libgpr.a
-
-deps_tips_client: $(TIPS_CLIENT_OBJS:.o=.dep)
-
-ifneq ($(NO_SECURE),true)
-ifneq ($(NO_DEPS),true)
--include $(TIPS_CLIENT_OBJS:.o=.dep)
-endif
-endif
-
-
-TIPS_CLIENT_TEST_SRC = \
-    examples/tips/client_test.cc \
-
-TIPS_CLIENT_TEST_OBJS = $(addprefix objs/$(CONFIG)/, $(addsuffix .o, $(basename $(TIPS_CLIENT_TEST_SRC))))
-
-ifeq ($(NO_SECURE),true)
-
-# You can't build secure targets if you don't have OpenSSL with ALPN.
-
-bins/$(CONFIG)/tips_client_test: openssl_dep_error
-
-else
-
-bins/$(CONFIG)/tips_client_test: $(TIPS_CLIENT_TEST_OBJS) libs/$(CONFIG)/libtips_client_lib.a libs/$(CONFIG)/libgrpc++_test_util.a libs/$(CONFIG)/libgrpc_test_util.a libs/$(CONFIG)/libgrpc++.a libs/$(CONFIG)/libgrpc.a libs/$(CONFIG)/libgpr_test_util.a libs/$(CONFIG)/libgpr.a
-	$(E) "[LD]      Linking $@"
-	$(Q) mkdir -p `dirname $@`
-	$(Q) $(LDXX) $(LDFLAGS) $(TIPS_CLIENT_TEST_OBJS) $(GTEST_LIB) libs/$(CONFIG)/libtips_client_lib.a libs/$(CONFIG)/libgrpc++_test_util.a libs/$(CONFIG)/libgrpc_test_util.a libs/$(CONFIG)/libgrpc++.a libs/$(CONFIG)/libgrpc.a libs/$(CONFIG)/libgpr_test_util.a libs/$(CONFIG)/libgpr.a $(LDLIBSXX) $(LDLIBS) $(LDLIBS_SECURE) -o bins/$(CONFIG)/tips_client_test
-
-endif
-
-objs/$(CONFIG)/examples/tips/client_test.o:  libs/$(CONFIG)/libtips_client_lib.a libs/$(CONFIG)/libgrpc++_test_util.a libs/$(CONFIG)/libgrpc_test_util.a libs/$(CONFIG)/libgrpc++.a libs/$(CONFIG)/libgrpc.a libs/$(CONFIG)/libgpr_test_util.a libs/$(CONFIG)/libgpr.a
-
-deps_tips_client_test: $(TIPS_CLIENT_TEST_OBJS:.o=.dep)
-
-ifneq ($(NO_SECURE),true)
-ifneq ($(NO_DEPS),true)
--include $(TIPS_CLIENT_TEST_OBJS:.o=.dep)
-endif
-endif
-
-
 QPS_CLIENT_SRC = \
     gens/test/cpp/qps/qpstest.pb.cc \
     test/cpp/qps/client.cc \
@@ -5806,6 +5869,68 @@
 endif
 
 
+TIPS_CLIENT_SRC = \
+    examples/tips/client_main.cc \
+
+TIPS_CLIENT_OBJS = $(addprefix objs/$(CONFIG)/, $(addsuffix .o, $(basename $(TIPS_CLIENT_SRC))))
+
+ifeq ($(NO_SECURE),true)
+
+# You can't build secure targets if you don't have OpenSSL with ALPN.
+
+bins/$(CONFIG)/tips_client: openssl_dep_error
+
+else
+
+bins/$(CONFIG)/tips_client: $(TIPS_CLIENT_OBJS) libs/$(CONFIG)/libtips_client_lib.a libs/$(CONFIG)/libgrpc++_test_util.a libs/$(CONFIG)/libgrpc_test_util.a libs/$(CONFIG)/libgrpc++.a libs/$(CONFIG)/libgrpc.a libs/$(CONFIG)/libgpr_test_util.a libs/$(CONFIG)/libgpr.a
+	$(E) "[LD]      Linking $@"
+	$(Q) mkdir -p `dirname $@`
+	$(Q) $(LDXX) $(LDFLAGS) $(TIPS_CLIENT_OBJS) $(GTEST_LIB) libs/$(CONFIG)/libtips_client_lib.a libs/$(CONFIG)/libgrpc++_test_util.a libs/$(CONFIG)/libgrpc_test_util.a libs/$(CONFIG)/libgrpc++.a libs/$(CONFIG)/libgrpc.a libs/$(CONFIG)/libgpr_test_util.a libs/$(CONFIG)/libgpr.a $(LDLIBSXX) $(LDLIBS) $(LDLIBS_SECURE) -o bins/$(CONFIG)/tips_client
+
+endif
+
+objs/$(CONFIG)/examples/tips/client_main.o:  libs/$(CONFIG)/libtips_client_lib.a libs/$(CONFIG)/libgrpc++_test_util.a libs/$(CONFIG)/libgrpc_test_util.a libs/$(CONFIG)/libgrpc++.a libs/$(CONFIG)/libgrpc.a libs/$(CONFIG)/libgpr_test_util.a libs/$(CONFIG)/libgpr.a
+
+deps_tips_client: $(TIPS_CLIENT_OBJS:.o=.dep)
+
+ifneq ($(NO_SECURE),true)
+ifneq ($(NO_DEPS),true)
+-include $(TIPS_CLIENT_OBJS:.o=.dep)
+endif
+endif
+
+
+TIPS_CLIENT_TEST_SRC = \
+    examples/tips/client_test.cc \
+
+TIPS_CLIENT_TEST_OBJS = $(addprefix objs/$(CONFIG)/, $(addsuffix .o, $(basename $(TIPS_CLIENT_TEST_SRC))))
+
+ifeq ($(NO_SECURE),true)
+
+# You can't build secure targets if you don't have OpenSSL with ALPN.
+
+bins/$(CONFIG)/tips_client_test: openssl_dep_error
+
+else
+
+bins/$(CONFIG)/tips_client_test: $(TIPS_CLIENT_TEST_OBJS) libs/$(CONFIG)/libtips_client_lib.a libs/$(CONFIG)/libgrpc++_test_util.a libs/$(CONFIG)/libgrpc_test_util.a libs/$(CONFIG)/libgrpc++.a libs/$(CONFIG)/libgrpc.a libs/$(CONFIG)/libgpr_test_util.a libs/$(CONFIG)/libgpr.a
+	$(E) "[LD]      Linking $@"
+	$(Q) mkdir -p `dirname $@`
+	$(Q) $(LDXX) $(LDFLAGS) $(TIPS_CLIENT_TEST_OBJS) $(GTEST_LIB) libs/$(CONFIG)/libtips_client_lib.a libs/$(CONFIG)/libgrpc++_test_util.a libs/$(CONFIG)/libgrpc_test_util.a libs/$(CONFIG)/libgrpc++.a libs/$(CONFIG)/libgrpc.a libs/$(CONFIG)/libgpr_test_util.a libs/$(CONFIG)/libgpr.a $(LDLIBSXX) $(LDLIBS) $(LDLIBS_SECURE) -o bins/$(CONFIG)/tips_client_test
+
+endif
+
+objs/$(CONFIG)/examples/tips/client_test.o:  libs/$(CONFIG)/libtips_client_lib.a libs/$(CONFIG)/libgrpc++_test_util.a libs/$(CONFIG)/libgrpc_test_util.a libs/$(CONFIG)/libgrpc++.a libs/$(CONFIG)/libgrpc.a libs/$(CONFIG)/libgpr_test_util.a libs/$(CONFIG)/libgpr.a
+
+deps_tips_client_test: $(TIPS_CLIENT_TEST_OBJS:.o=.dep)
+
+ifneq ($(NO_SECURE),true)
+ifneq ($(NO_DEPS),true)
+-include $(TIPS_CLIENT_TEST_OBJS:.o=.dep)
+endif
+endif
+
+
 CHTTP2_FAKE_SECURITY_CANCEL_AFTER_ACCEPT_TEST_SRC = \
 
 CHTTP2_FAKE_SECURITY_CANCEL_AFTER_ACCEPT_TEST_OBJS = $(addprefix objs/$(CONFIG)/, $(addsuffix .o, $(basename $(CHTTP2_FAKE_SECURITY_CANCEL_AFTER_ACCEPT_TEST_SRC))))
diff --git a/README.md b/README.md
index fa39d3b..95825f6 100644
--- a/README.md
+++ b/README.md
@@ -24,7 +24,7 @@
 (a collection of methods), and generate client and server side interfaces
 which they use on the client-side and implement on the server side.
 
-By default, gRPC uses [Protocol Buffers](github.com/google/protobuf) as the
+By default, gRPC uses [Protocol Buffers](https://github.com/google/protobuf) as the
 Interface Definition Language (IDL) for describing both the service interface
 and the structure of the payload messages. It is possible to use other 
 alternatives if desired.
@@ -67,7 +67,7 @@
 A gRPC RPC comprises of a bidirectional stream of messages, initiated by the client. In the client-to-server direction, this stream begins with a mandatory `Call Header`, followed by optional `Initial-Metadata`, followed by zero or more `Payload Messages`. The server-to-client direction contains an optional `Initial-Metadata`, followed by zero or more `Payload Messages` terminated with a mandatory `Status` and optional `Status-Metadata` (a.k.a.,`Trailing-Metadata`).
 
 ## Implementation over HTTP/2
-The abstract protocol defined above is implemented over [HTTP/2](https://http2.github.io/). gRPC bidirectional streams are mapped to HTTP/2 streams. The contents of `Call Header` and `Initial Metadata` are sent as HTTP/2 headers and subject to HPAC compression. `Payload Messages` are serialized into a byte stream of length prefixed gRPC frames which are then fragmented into HTTP/2 frames at the sender and reassembled at the receiver. `Status` and `Trailing-Metadata` are sent as HTTP/2 trailing headers (a.k.a., trailers).     
+The abstract protocol defined above is implemented over [HTTP/2](https://http2.github.io/). gRPC bidirectional streams are mapped to HTTP/2 streams. The contents of `Call Header` and `Initial Metadata` are sent as HTTP/2 headers and subject to HPACK compression. `Payload Messages` are serialized into a byte stream of length prefixed gRPC frames which are then fragmented into HTTP/2 frames at the sender and reassembled at the receiver. `Status` and `Trailing-Metadata` are sent as HTTP/2 trailing headers (a.k.a., trailers).     
 
 ## Flow Control
-gRPC inherits the flow control mchanims in HTTP/2 and uses them to enable fine-grained control of the amount of memory used for buffering in-flight messages.
+gRPC inherits the flow control mechanisms in HTTP/2 and uses them to enable fine-grained control of the amount of memory used for buffering in-flight messages.
diff --git a/build.json b/build.json
index bf780b7..119e612 100644
--- a/build.json
+++ b/build.json
@@ -61,8 +61,12 @@
         "src/core/iomgr/tcp_posix.h",
         "src/core/iomgr/tcp_server.h",
         "src/core/iomgr/time_averaged_stats.h",
-        "src/core/iomgr/wakeup_fd_posix.h",
         "src/core/iomgr/wakeup_fd_pipe.h",
+        "src/core/iomgr/wakeup_fd_posix.h",
+        "src/core/json/json.h",
+        "src/core/json/json_common.h",
+        "src/core/json/json_reader.h",
+        "src/core/json/json_writer.h",
         "src/core/statistics/census_interface.h",
         "src/core/statistics/census_log.h",
         "src/core/statistics/census_rpc_stats.h",
@@ -144,6 +148,10 @@
         "src/core/iomgr/wakeup_fd_nospecial.c",
         "src/core/iomgr/wakeup_fd_pipe.c",
         "src/core/iomgr/wakeup_fd_posix.c",
+        "src/core/json/json.c",
+        "src/core/json/json_reader.c",
+        "src/core/json/json_string.c",
+        "src/core/json/json_writer.c",
         "src/core/statistics/census_init.c",
         "src/core/statistics/census_log.c",
         "src/core/statistics/census_rpc_stats.c",
@@ -184,8 +192,7 @@
         "src/core/transport/chttp2_transport.c",
         "src/core/transport/metadata.c",
         "src/core/transport/stream_op.c",
-        "src/core/transport/transport.c",
-        "third_party/cJSON/cJSON.c"
+        "src/core/transport/transport.c"
       ]
     }
   ],
@@ -1186,6 +1193,48 @@
       ]
     },
     {
+      "name": "json_rewrite",
+      "build": "test",
+      "language": "c",
+      "src": [
+        "test/core/json/json_rewrite.c"
+      ],
+      "deps": [
+        "grpc",
+        "gpr"
+      ],
+      "run": false
+    },
+    {
+      "name": "json_rewrite_test",
+      "build": "test",
+      "language": "c",
+      "src": [
+        "test/core/json/json_rewrite_test.c"
+      ],
+      "deps": [
+        "grpc_test_util",
+        "grpc",
+        "gpr_test_util",
+        "gpr"
+      ],
+      "run": false
+    },
+    {
+      "name": "json_test",
+      "build": "test",
+      "language": "c",
+      "src": [
+        "test/core/json/json_test.c"
+      ],
+      "deps": [
+        "grpc_test_util",
+        "grpc",
+        "gpr_test_util",
+        "gpr"
+      ]
+    },
+    {
       "name": "lame_client_test",
       "build": "test",
       "language": "c",
@@ -1519,41 +1568,6 @@
       "run": false
     },
     {
-      "name": "tips_client",
-      "build": "test",
-      "run": false,
-      "language": "c++",
-      "src": [
-        "examples/tips/client_main.cc"
-      ],
-      "deps": [
-        "tips_client_lib",
-        "grpc++_test_util",
-        "grpc_test_util",
-        "grpc++",
-        "grpc",
-        "gpr_test_util",
-        "gpr"
-      ]
-    },
-    {
-      "name": "tips_client_test",
-      "build": "test",
-      "language": "c++",
-      "src": [
-        "examples/tips/client_test.cc"
-      ],
-      "deps": [
-        "tips_client_lib",
-        "grpc++_test_util",
-        "grpc_test_util",
-        "grpc++",
-        "grpc",
-        "gpr_test_util",
-        "gpr"
-      ]
-    },
-    {
       "name": "qps_client",
       "build": "test",
       "language": "c++",
@@ -1649,6 +1663,41 @@
         "gpr_test_util",
         "gpr"
       ]
+    },
+    {
+      "name": "tips_client",
+      "build": "test",
+      "language": "c++",
+      "src": [
+        "examples/tips/client_main.cc"
+      ],
+      "deps": [
+        "tips_client_lib",
+        "grpc++_test_util",
+        "grpc_test_util",
+        "grpc++",
+        "grpc",
+        "gpr_test_util",
+        "gpr"
+      ],
+      "run": false
+    },
+    {
+      "name": "tips_client_test",
+      "build": "test",
+      "language": "c++",
+      "src": [
+        "examples/tips/client_test.cc"
+      ],
+      "deps": [
+        "tips_client_lib",
+        "grpc++_test_util",
+        "grpc_test_util",
+        "grpc++",
+        "grpc",
+        "gpr_test_util",
+        "gpr"
+      ]
     }
   ]
 }
diff --git a/examples/tips/client.cc b/examples/tips/client.cc
index 695ff80..f9d5319 100644
--- a/examples/tips/client.cc
+++ b/examples/tips/client.cc
@@ -36,7 +36,11 @@
 #include "examples/tips/client.h"
 
 using tech::pubsub::Topic;
+using tech::pubsub::DeleteTopicRequest;
+using tech::pubsub::GetTopicRequest;
 using tech::pubsub::PublisherService;
+using tech::pubsub::ListTopicsRequest;
+using tech::pubsub::ListTopicsResponse;
 
 namespace grpc {
 namespace examples {
@@ -55,6 +59,34 @@
   return stub_->CreateTopic(&context, request, &response);
 }
 
+Status Client::ListTopics() {
+  ListTopicsRequest request;
+  ListTopicsResponse response;
+  ClientContext context;
+
+  return stub_->ListTopics(&context, request, &response);
+}
+
+Status Client::GetTopic(grpc::string topic) {
+  GetTopicRequest request;
+  Topic response;
+  ClientContext context;
+
+  request.set_topic(topic);
+
+  return stub_->GetTopic(&context, request, &response);
+}
+
+Status Client::DeleteTopic(grpc::string topic) {
+  DeleteTopicRequest request;
+  proto2::Empty response;
+  ClientContext context;
+
+  request.set_topic(topic);
+
+  return stub_->DeleteTopic(&context, request, &response);
+}
+
 }  // namespace tips
 }  // namespace examples
 }  // namespace grpc
diff --git a/examples/tips/client.h b/examples/tips/client.h
index 6ae9d50..661ee5c 100644
--- a/examples/tips/client.h
+++ b/examples/tips/client.h
@@ -47,6 +47,9 @@
  public:
   Client(std::shared_ptr<grpc::ChannelInterface> channel);
   Status CreateTopic(grpc::string topic);
+  Status GetTopic(grpc::string topic);
+  Status DeleteTopic(grpc::string topic);
+  Status ListTopics();
 
  private:
   std::unique_ptr<tech::pubsub::PublisherService::Stub> stub_;
diff --git a/examples/tips/client_main.cc b/examples/tips/client_main.cc
index 23cabd1..5a3a0da 100644
--- a/examples/tips/client_main.cc
+++ b/examples/tips/client_main.cc
@@ -31,6 +31,13 @@
  *
  */
 
+#include <chrono>
+#include <fstream>
+#include <memory>
+#include <sstream>
+#include <string>
+#include <thread>
+
 #include <grpc/grpc.h>
 #include <grpc/support/log.h>
 #include <google/gflags.h>
@@ -45,6 +52,20 @@
 DEFINE_int32(server_port, 443, "Server port.");
 DEFINE_string(server_host,
               "pubsub-staging.googleapis.com", "Server host to connect to");
+DEFINE_string(service_account_key_file, "",
+              "Path to service account json key file.");
+DEFINE_string(oauth_scope, "", "Scope for OAuth tokens.");
+
+grpc::string GetServiceAccountJsonKey() {
+  static grpc::string json_key;
+  if (json_key.empty()) {
+    std::ifstream json_key_file(FLAGS_service_account_key_file);
+    std::stringstream key_stream;
+    key_stream << json_key_file.rdbuf();
+    json_key = key_stream.str();
+  }
+  return json_key;
+}
 
 int main(int argc, char** argv) {
   grpc_init();
@@ -56,8 +77,15 @@
   snprintf(host_port, host_port_buf_size, "%s:%d", FLAGS_server_host.c_str(),
            FLAGS_server_port);
 
-  std::unique_ptr<grpc::Credentials> creds =
-      grpc::CredentialsFactory::ComputeEngineCredentials();
+  std::unique_ptr<grpc::Credentials> creds;
+  if (FLAGS_service_account_key_file != "") {
+    grpc::string json_key = GetServiceAccountJsonKey();
+    creds = grpc::CredentialsFactory::ServiceAccountCredentials(
+        json_key, FLAGS_oauth_scope, std::chrono::hours(1));
+  } else {
+    creds = grpc::CredentialsFactory::ComputeEngineCredentials();
+  }
+
   std::shared_ptr<grpc::ChannelInterface> channel(
       grpc::CreateTestChannel(
           host_port,
@@ -67,8 +95,17 @@
           creds));
 
   grpc::examples::tips::Client client(channel);
-  grpc::Status s = client.CreateTopic("test");
-  gpr_log(GPR_INFO, "return code %d", s.code());
+
+  grpc::Status s = client.CreateTopic("/topics/stoked-keyword-656/testtopics");
+  gpr_log(GPR_INFO, "return code %d, %s", s.code(), s.details().c_str());
+  GPR_ASSERT(s.IsOk());
+
+  s = client.GetTopic("/topics/stoked-keyword-656/testtopics");
+  gpr_log(GPR_INFO, "return code %d, %s", s.code(), s.details().c_str());
+  GPR_ASSERT(s.IsOk());
+
+  s = client.DeleteTopic("/topics/stoked-keyword-656/testtopics");
+  gpr_log(GPR_INFO, "return code %d, %s", s.code(), s.details().c_str());
   GPR_ASSERT(s.IsOk());
 
   channel.reset();
diff --git a/include/grpc/byte_buffer_reader.h b/include/grpc/byte_buffer_reader.h
index 6386db6..a9cbb77 100644
--- a/include/grpc/byte_buffer_reader.h
+++ b/include/grpc/byte_buffer_reader.h
@@ -42,7 +42,7 @@
   /* Different current objects correspond to different types of byte buffers */
   union {
     /* Index into a slice buffer's array of slices */
-    int index;
+    unsigned index;
   } current;
 };
 
diff --git a/include/grpc/support/port_platform.h b/include/grpc/support/port_platform.h
index 58444d0..2bf5348 100644
--- a/include/grpc/support/port_platform.h
+++ b/include/grpc/support/port_platform.h
@@ -56,6 +56,8 @@
 #define GPR_CPU_LINUX 1
 #define GPR_GCC_SYNC 1
 #define GPR_POSIX_MULTIPOLL_WITH_POLL 1
+#define GPR_POSIX_WAKEUP_FD 1
+#define GPR_LINUX_EVENTFD 1
 #define GPR_POSIX_SOCKET 1
 #define GPR_POSIX_SOCKETADDR 1
 #define GPR_POSIX_SOCKETUTILS 1
@@ -68,7 +70,7 @@
 #define GPR_GCC_ATOMIC 1
 #define GPR_LINUX 1
 #define GPR_POSIX_MULTIPOLL_WITH_POLL 1
-#define GPR_POSIX_HAS_SPECIAL_WAKEUP_FD 1
+#define GPR_POSIX_WAKEUP_FD 1
 #define GPR_LINUX_EVENTFD 1
 #define GPR_POSIX_SOCKET 1
 #define GPR_POSIX_SOCKETADDR 1
@@ -86,6 +88,8 @@
 #define GPR_GCC_ATOMIC 1
 #define GPR_POSIX_LOG 1
 #define GPR_POSIX_MULTIPOLL_WITH_POLL 1
+#define GPR_POSIX_WAKEUP_FD 1
+#define GPR_POSIX_NO_SPECIAL_WAKEUP_FD 1
 #define GPR_POSIX_SOCKET 1
 #define GPR_POSIX_SOCKETADDR 1
 #define GPR_POSIX_SOCKETUTILS 1
@@ -155,7 +159,7 @@
 typedef uintptr_t gpr_uintptr;
 
 /* INT64_MAX is unavailable on some platforms. */
-#define GPR_INT64_MAX (~(gpr_uint64)0 >> 1)
+#define GPR_INT64_MAX (gpr_int64)(~(gpr_uint64)0 >> 1)
 
 /* maximum alignment needed for any type on this platform, rounded up to a
    power of two */
diff --git a/include/grpc/support/slice_buffer.h b/include/grpc/support/slice_buffer.h
index 0ad735a..80c13e0 100644
--- a/include/grpc/support/slice_buffer.h
+++ b/include/grpc/support/slice_buffer.h
@@ -73,7 +73,7 @@
 void gpr_slice_buffer_addn(gpr_slice_buffer *sb, gpr_slice *slices, size_t n);
 /* add a very small (less than 8 bytes) amount of data to the end of a slice
    buffer: returns a pointer into which to add the data */
-gpr_uint8 *gpr_slice_buffer_tiny_add(gpr_slice_buffer *sb, int len);
+gpr_uint8 *gpr_slice_buffer_tiny_add(gpr_slice_buffer *sb, unsigned len);
 /* clear a slice buffer, unref all elements */
 void gpr_slice_buffer_reset_and_unref(gpr_slice_buffer *sb);
 
diff --git a/src/core/channel/channel_args.c b/src/core/channel/channel_args.c
index 5f16c7b..f48415e 100644
--- a/src/core/channel/channel_args.c
+++ b/src/core/channel/channel_args.c
@@ -105,7 +105,7 @@
 }
 
 int grpc_channel_args_is_census_enabled(const grpc_channel_args *a) {
-  int i;
+  unsigned i;
   if (a == NULL) return 0;
   for (i = 0; i < a->num_args; i++) {
     if (0 == strcmp(a->args[i].key, GRPC_ARG_ENABLE_CENSUS)) {
diff --git a/src/core/channel/channel_stack.c b/src/core/channel/channel_stack.c
index af47df8..e28bbd7 100644
--- a/src/core/channel/channel_stack.c
+++ b/src/core/channel/channel_stack.c
@@ -125,7 +125,8 @@
     call_size += ROUND_UP_TO_ALIGNMENT_SIZE(filters[i]->sizeof_call_data);
   }
 
-  GPR_ASSERT(user_data - (char *)stack ==
+  GPR_ASSERT(user_data > (char *)stack);
+  GPR_ASSERT((gpr_uintptr)(user_data - (char *)stack) ==
              grpc_channel_stack_size(filters, filter_count));
 
   stack->call_stack_size = call_size;
diff --git a/src/core/channel/http_client_filter.c b/src/core/channel/http_client_filter.c
index 4735aa9..96acb38 100644
--- a/src/core/channel/http_client_filter.c
+++ b/src/core/channel/http_client_filter.c
@@ -134,7 +134,7 @@
 }
 
 static const char *scheme_from_args(const grpc_channel_args *args) {
-  int i;
+  unsigned i;
   if (args != NULL) {
     for (i = 0; i < args->num_args; ++i) {
       if (args->args[i].type == GRPC_ARG_STRING &&
diff --git a/src/core/channel/http_server_filter.c b/src/core/channel/http_server_filter.c
index 2658a6d..b70af43 100644
--- a/src/core/channel/http_server_filter.c
+++ b/src/core/channel/http_server_filter.c
@@ -319,8 +319,8 @@
       if (channeld->gettable_count == gettable_capacity) {
         gettable_capacity =
             GPR_MAX(gettable_capacity * 3 / 2, gettable_capacity + 1);
-        channeld->gettables =
-            gpr_realloc(channeld->gettables, gettable_capacity * sizeof(gettable));
+        channeld->gettables = gpr_realloc(channeld->gettables,
+                                          gettable_capacity * sizeof(gettable));
       }
       g = &channeld->gettables[channeld->gettable_count++];
       g->path = grpc_mdelem_from_strings(mdctx, ":path", p->path);
@@ -328,15 +328,25 @@
           grpc_mdelem_from_strings(mdctx, "content-type", p->content_type);
       slice = gpr_slice_from_copied_string(p->content);
       g->content = grpc_byte_buffer_create(&slice, 1);
+      gpr_slice_unref(slice);
     }
   }
 }
 
 /* Destructor for channel data */
 static void destroy_channel_elem(grpc_channel_element *elem) {
+  size_t i;
+
   /* grab pointers to our data from the channel element */
   channel_data *channeld = elem->channel_data;
 
+  for (i = 0; i < channeld->gettable_count; i++) {
+    grpc_mdelem_unref(channeld->gettables[i].path);
+    grpc_mdelem_unref(channeld->gettables[i].content_type);
+    grpc_byte_buffer_destroy(channeld->gettables[i].content);
+  }
+  gpr_free(channeld->gettables);
+
   grpc_mdelem_unref(channeld->te_trailers);
   grpc_mdelem_unref(channeld->status_ok);
   grpc_mdelem_unref(channeld->status_not_found);
@@ -350,6 +360,6 @@
 }
 
 const grpc_channel_filter grpc_http_server_filter = {
-    call_op,           channel_op,           sizeof(call_data),
-    init_call_elem,    destroy_call_elem,    sizeof(channel_data),
-    init_channel_elem, destroy_channel_elem, "http-server"};
+    call_op, channel_op, sizeof(call_data), init_call_elem, destroy_call_elem,
+    sizeof(channel_data), init_channel_elem, destroy_channel_elem,
+    "http-server"};
diff --git a/src/core/iomgr/fd_posix.c b/src/core/iomgr/fd_posix.c
index 9f70a26..b67c6cd 100644
--- a/src/core/iomgr/fd_posix.c
+++ b/src/core/iomgr/fd_posix.c
@@ -47,12 +47,63 @@
 
 enum descriptor_state { NOT_READY, READY, WAITING };
 
-static void destroy(grpc_fd *fd) {
-  grpc_iomgr_add_callback(fd->on_done, fd->on_done_user_data);
-  gpr_mu_destroy(&fd->set_state_mu);
+/* We need to keep a freelist not because of any concerns of malloc performance
+ * but instead so that implementations with multiple threads in (for example)
+ * epoll_wait deal with the race between pollset removal and incoming poll
+ * notifications.
+ *
+ * The problem is that the poller ultimately holds a reference to this
+ * object, so it is very difficult to know when is safe to free it, at least
+ * without some expensive synchronization.
+ *
+ * If we keep the object freelisted, in the worst case losing this race just
+ * becomes a spurious read notification on a reused fd.
+ */
+/* TODO(klempner): We could use some form of polling generation count to know
+ * when these are safe to free. */
+/* TODO(klempner): Consider disabling freelisting if we don't have multiple
+ * threads in poll on the same fd */
+/* TODO(klempner): Batch these allocations to reduce fragmentation */
+static grpc_fd *fd_freelist = NULL;
+static gpr_mu fd_freelist_mu;
+
+static void freelist_fd(grpc_fd *fd) {
   gpr_free(fd->watchers);
+  gpr_mu_lock(&fd_freelist_mu);
+  fd->freelist_next = fd_freelist;
+  fd_freelist = fd;
+  gpr_mu_unlock(&fd_freelist_mu);
+}
+
+static grpc_fd *alloc_fd(int fd) {
+  grpc_fd *r = NULL;
+  gpr_mu_lock(&fd_freelist_mu);
+  if (fd_freelist != NULL) {
+    r = fd_freelist;
+    fd_freelist = fd_freelist->freelist_next;
+  }
+  gpr_mu_unlock(&fd_freelist_mu);
+  if (r == NULL) {
+    r = gpr_malloc(sizeof(grpc_fd));
+    gpr_mu_init(&r->set_state_mu);
+    gpr_mu_init(&r->watcher_mu);
+  }
+  gpr_atm_rel_store(&r->refst, 1);
+  gpr_atm_rel_store(&r->readst.state, NOT_READY);
+  gpr_atm_rel_store(&r->writest.state, NOT_READY);
+  gpr_atm_rel_store(&r->shutdown, 0);
+  r->fd = fd;
+  r->watchers = NULL;
+  r->watcher_count = 0;
+  r->watcher_capacity = 0;
+  r->freelist_next = NULL;
+  return r;
+}
+
+static void destroy(grpc_fd *fd) {
+  gpr_mu_destroy(&fd->set_state_mu);
+  gpr_mu_destroy(&fd->watcher_mu);
   gpr_free(fd);
-  grpc_iomgr_unref();
 }
 
 static void ref_by(grpc_fd *fd, int n) {
@@ -61,25 +112,30 @@
 
 static void unref_by(grpc_fd *fd, int n) {
   if (gpr_atm_full_fetch_add(&fd->refst, -n) == n) {
+    grpc_iomgr_add_callback(fd->on_done, fd->on_done_user_data);
+    freelist_fd(fd);
+    grpc_iomgr_unref();
+  }
+}
+
+void grpc_fd_global_init(void) {
+  gpr_mu_init(&fd_freelist_mu);
+}
+
+void grpc_fd_global_shutdown(void) {
+  while (fd_freelist != NULL) {
+    grpc_fd *fd = fd_freelist;
+    fd_freelist = fd_freelist->freelist_next;
     destroy(fd);
   }
+  gpr_mu_destroy(&fd_freelist_mu);
 }
 
 static void do_nothing(void *ignored, int success) {}
 
 grpc_fd *grpc_fd_create(int fd) {
-  grpc_fd *r = gpr_malloc(sizeof(grpc_fd));
+  grpc_fd *r = alloc_fd(fd);
   grpc_iomgr_ref();
-  gpr_atm_rel_store(&r->refst, 1);
-  gpr_atm_rel_store(&r->readst.state, NOT_READY);
-  gpr_atm_rel_store(&r->writest.state, NOT_READY);
-  gpr_mu_init(&r->set_state_mu);
-  gpr_mu_init(&r->watcher_mu);
-  gpr_atm_rel_store(&r->shutdown, 0);
-  r->fd = fd;
-  r->watchers = NULL;
-  r->watcher_count = 0;
-  r->watcher_capacity = 0;
   grpc_pollset_add_fd(grpc_backup_pollset(), r);
   return r;
 }
diff --git a/src/core/iomgr/fd_posix.h b/src/core/iomgr/fd_posix.h
index 232de0c..f42ae19 100644
--- a/src/core/iomgr/fd_posix.h
+++ b/src/core/iomgr/fd_posix.h
@@ -69,6 +69,7 @@
 
   grpc_iomgr_cb_func on_done;
   void *on_done_user_data;
+  struct grpc_fd *freelist_next;
 } grpc_fd;
 
 /* Create a wrapped file descriptor.
@@ -135,4 +136,7 @@
 void grpc_fd_ref(grpc_fd *fd);
 void grpc_fd_unref(grpc_fd *fd);
 
+void grpc_fd_global_init(void);
+void grpc_fd_global_shutdown(void);
+
 #endif /* __GRPC_INTERNAL_IOMGR_FD_POSIX_H_ */
diff --git a/src/core/iomgr/iomgr.c b/src/core/iomgr/iomgr.c
index 7f266ab..8989b49 100644
--- a/src/core/iomgr/iomgr.c
+++ b/src/core/iomgr/iomgr.c
@@ -98,7 +98,6 @@
   gpr_timespec shutdown_deadline =
       gpr_time_add(gpr_now(), gpr_time_from_seconds(10));
 
-  grpc_iomgr_platform_shutdown();
 
   gpr_mu_lock(&g_mu);
   g_shutdown = 1;
@@ -129,6 +128,7 @@
 
   gpr_event_wait(&g_background_callback_executor_done, gpr_inf_future);
 
+  grpc_iomgr_platform_shutdown();
   grpc_alarm_list_shutdown();
   gpr_mu_destroy(&g_mu);
   gpr_cv_destroy(&g_cv);
diff --git a/src/core/iomgr/iomgr_posix.c b/src/core/iomgr/iomgr_posix.c
index 61fec6b..9297f08 100644
--- a/src/core/iomgr/iomgr_posix.c
+++ b/src/core/iomgr/iomgr_posix.c
@@ -32,7 +32,14 @@
  */
 
 #include "src/core/iomgr/iomgr_posix.h"
+#include "src/core/iomgr/fd_posix.h"
 
-void grpc_iomgr_platform_init(void) { grpc_pollset_global_init(); }
+void grpc_iomgr_platform_init(void) {
+  grpc_fd_global_init();
+  grpc_pollset_global_init();
+}
 
-void grpc_iomgr_platform_shutdown(void) { grpc_pollset_global_shutdown(); }
+void grpc_iomgr_platform_shutdown(void) {
+  grpc_pollset_global_shutdown();
+  grpc_fd_global_shutdown();
+}
diff --git a/src/core/iomgr/pollset_kick.c b/src/core/iomgr/pollset_kick.c
index 5ee1cef..238ec75 100644
--- a/src/core/iomgr/pollset_kick.c
+++ b/src/core/iomgr/pollset_kick.c
@@ -138,15 +138,18 @@
 }
 
 void grpc_pollset_kick_global_init_fallback_fd(void) {
+  gpr_mu_init(&fd_freelist_mu);
   grpc_wakeup_fd_global_init_force_fallback();
 }
 
 void grpc_pollset_kick_global_init(void) {
+  gpr_mu_init(&fd_freelist_mu);
   grpc_wakeup_fd_global_init();
 }
 
 void grpc_pollset_kick_global_destroy(void) {
   grpc_wakeup_fd_global_destroy();
+  gpr_mu_destroy(&fd_freelist_mu);
 }
 
 
diff --git a/src/core/iomgr/pollset_multipoller_with_poll_posix.c b/src/core/iomgr/pollset_multipoller_with_poll_posix.c
index 7c9a949..e882969 100644
--- a/src/core/iomgr/pollset_multipoller_with_poll_posix.c
+++ b/src/core/iomgr/pollset_multipoller_with_poll_posix.c
@@ -147,8 +147,6 @@
       grpc_fd_unref(h->fds[i]);
     } else {
       h->fds[nf++] = h->fds[i];
-      h->pfds[np].events =
-          grpc_fd_begin_poll(h->fds[i], pollset, POLLIN, POLLOUT);
       h->selfds[np] = h->fds[i];
       h->pfds[np].fd = h->fds[i]->fd;
       h->pfds[np].revents = 0;
@@ -168,6 +166,11 @@
   pollset->counter = 1;
   gpr_mu_unlock(&pollset->mu);
 
+  for (i = 1; i < np; i++) {
+    h->pfds[i].events =
+        grpc_fd_begin_poll(h->selfds[i], pollset, POLLIN, POLLOUT);
+  }
+
   r = poll(h->pfds, h->pfd_count, timeout);
   if (r < 0) {
     if (errno != EINTR) {
diff --git a/src/core/iomgr/pollset_posix.c b/src/core/iomgr/pollset_posix.c
index 39e2dc4..994dbe4 100644
--- a/src/core/iomgr/pollset_posix.c
+++ b/src/core/iomgr/pollset_posix.c
@@ -75,11 +75,14 @@
 }
 
 void grpc_pollset_kick(grpc_pollset *p) {
-  if (!p->counter) return;
-  grpc_pollset_kick_kick(&p->kick_state);
+  if (p->counter) {
+    grpc_pollset_kick_kick(&p->kick_state);
+  }
 }
 
-void grpc_pollset_force_kick(grpc_pollset *p) { grpc_pollset_kick(p); }
+void grpc_pollset_force_kick(grpc_pollset *p) {
+  grpc_pollset_kick_kick(&p->kick_state);
+}
 
 /* global state management */
 
@@ -244,11 +247,12 @@
   pfd[0].events = POLLIN;
   pfd[0].revents = 0;
   pfd[1].fd = fd->fd;
-  pfd[1].events = grpc_fd_begin_poll(fd, pollset, POLLIN, POLLOUT);
   pfd[1].revents = 0;
   pollset->counter = 1;
   gpr_mu_unlock(&pollset->mu);
 
+  pfd[1].events = grpc_fd_begin_poll(fd, pollset, POLLIN, POLLOUT);
+
   r = poll(pfd, GPR_ARRAY_SIZE(pfd), timeout);
   if (r < 0) {
     if (errno != EINTR) {
@@ -269,9 +273,9 @@
   }
 
   grpc_pollset_kick_post_poll(&pollset->kick_state);
+  grpc_fd_end_poll(fd, pollset);
 
   gpr_mu_lock(&pollset->mu);
-  grpc_fd_end_poll(fd, pollset);
   pollset->counter = 0;
   gpr_cv_broadcast(&pollset->cv);
   return 1;
diff --git a/src/core/iomgr/pollset_posix.h b/src/core/iomgr/pollset_posix.h
index f624337..cdcb995 100644
--- a/src/core/iomgr/pollset_posix.h
+++ b/src/core/iomgr/pollset_posix.h
@@ -78,7 +78,11 @@
    poll after an fd is orphaned) */
 void grpc_pollset_del_fd(grpc_pollset *pollset, struct grpc_fd *fd);
 
-/* Force any current pollers to break polling */
+/* Force any current pollers to break polling: it's the callers responsibility
+   to ensure that the pollset indeed needs to be kicked - no verification that
+   the pollset is actually performing polling work is done. At worst this will
+   result in spurious wakeups if performed at the wrong moment.
+   Does not touch pollset->mu. */
 void grpc_pollset_force_kick(grpc_pollset *pollset);
 /* Returns the fd to listen on for kicks */
 int grpc_kick_read_fd(grpc_pollset *p);
diff --git a/src/core/iomgr/tcp_server.h b/src/core/iomgr/tcp_server.h
index c4d836e..2558a1e 100644
--- a/src/core/iomgr/tcp_server.h
+++ b/src/core/iomgr/tcp_server.h
@@ -68,7 +68,7 @@
 
    The file descriptor remains owned by the server, and will be cleaned
    up when grpc_tcp_server_destroy is called. */
-int grpc_tcp_server_get_fd(grpc_tcp_server *s, int index);
+int grpc_tcp_server_get_fd(grpc_tcp_server *s, unsigned index);
 
 void grpc_tcp_server_destroy(grpc_tcp_server *server);
 
diff --git a/src/core/iomgr/tcp_server_posix.c b/src/core/iomgr/tcp_server_posix.c
index 10daf45..d169d23 100644
--- a/src/core/iomgr/tcp_server_posix.c
+++ b/src/core/iomgr/tcp_server_posix.c
@@ -272,7 +272,7 @@
                              int addr_len) {
   int allocated_port1 = -1;
   int allocated_port2 = -1;
-  int i;
+  unsigned i;
   int fd;
   grpc_dualstack_mode dsmode;
   struct sockaddr_in6 addr6_v4mapped;
@@ -345,8 +345,8 @@
   return allocated_port1 >= 0 ? allocated_port1 : allocated_port2;
 }
 
-int grpc_tcp_server_get_fd(grpc_tcp_server *s, int index) {
-  return (0 <= index && index < s->nports) ? s->ports[index].fd : -1;
+int grpc_tcp_server_get_fd(grpc_tcp_server *s, unsigned index) {
+  return (index < s->nports) ? s->ports[index].fd : -1;
 }
 
 void grpc_tcp_server_start(grpc_tcp_server *s, grpc_pollset *pollset,
diff --git a/src/core/iomgr/wakeup_fd_eventfd.c b/src/core/iomgr/wakeup_fd_eventfd.c
index 3ee7f94..99c32bb 100644
--- a/src/core/iomgr/wakeup_fd_eventfd.c
+++ b/src/core/iomgr/wakeup_fd_eventfd.c
@@ -74,7 +74,7 @@
   return 1;
 }
 
-const grpc_wakeup_fd_vtable specialized_wakeup_fd_vtable = {
+const grpc_wakeup_fd_vtable grpc_specialized_wakeup_fd_vtable = {
   eventfd_create, eventfd_consume, eventfd_wakeup, eventfd_destroy,
   eventfd_check_availability
 };
diff --git a/src/core/iomgr/wakeup_fd_nospecial.c b/src/core/iomgr/wakeup_fd_nospecial.c
index 21e8074..c1038bf 100644
--- a/src/core/iomgr/wakeup_fd_nospecial.c
+++ b/src/core/iomgr/wakeup_fd_nospecial.c
@@ -38,16 +38,17 @@
 
 #include <grpc/support/port_platform.h>
 
-#ifndef GPR_POSIX_HAS_SPECIAL_WAKEUP_FD
+#ifdef GPR_POSIX_NO_SPECIAL_WAKEUP_FD
 
-#include "src/core/iomgr/wakeup_fd.h"
+#include "src/core/iomgr/wakeup_fd_posix.h"
+#include <stddef.h>
 
 static int check_availability_invalid(void) {
   return 0;
 }
 
-const grpc_wakeup_fd_vtable specialized_wakeup_fd_vtable = {
+const grpc_wakeup_fd_vtable grpc_specialized_wakeup_fd_vtable = {
   NULL, NULL, NULL, NULL, check_availability_invalid
 };
 
-#endif /* GPR_POSIX_HAS_SPECIAL_WAKEUP */
+#endif  /* GPR_POSIX_NO_SPECIAL_WAKEUP_FD */
diff --git a/src/core/iomgr/wakeup_fd_pipe.c b/src/core/iomgr/wakeup_fd_pipe.c
index f36e6ee..f895478 100644
--- a/src/core/iomgr/wakeup_fd_pipe.c
+++ b/src/core/iomgr/wakeup_fd_pipe.c
@@ -31,7 +31,10 @@
  *
  */
 
-/* TODO(klempner): Allow this code to be disabled. */
+#include <grpc/support/port_platform.h>
+
+#ifdef GPR_POSIX_WAKEUP_FD
+
 #include "src/core/iomgr/wakeup_fd_posix.h"
 
 #include <errno.h>
@@ -87,7 +90,8 @@
   return 1;
 }
 
-const grpc_wakeup_fd_vtable pipe_wakeup_fd_vtable = {
+const grpc_wakeup_fd_vtable grpc_pipe_wakeup_fd_vtable = {
   pipe_create, pipe_consume, pipe_wakeup, pipe_destroy, pipe_check_availability
 };
 
+#endif  /* GPR_POSIX_WAKUP_FD */
diff --git a/src/core/iomgr/wakeup_fd_pipe.h b/src/core/iomgr/wakeup_fd_pipe.h
index fc2898f..a2fcde5 100644
--- a/src/core/iomgr/wakeup_fd_pipe.h
+++ b/src/core/iomgr/wakeup_fd_pipe.h
@@ -36,6 +36,6 @@
 
 #include "src/core/iomgr/wakeup_fd_posix.h"
 
-extern grpc_wakeup_fd_vtable pipe_wakeup_fd_vtable;
+extern grpc_wakeup_fd_vtable grpc_pipe_wakeup_fd_vtable;
 
 #endif  /* __GRPC_INTERNAL_IOMGR_WAKEUP_FD_PIPE_H_ */
diff --git a/src/core/iomgr/wakeup_fd_posix.c b/src/core/iomgr/wakeup_fd_posix.c
index 9107cf3..d3cc3ec 100644
--- a/src/core/iomgr/wakeup_fd_posix.c
+++ b/src/core/iomgr/wakeup_fd_posix.c
@@ -31,6 +31,10 @@
  *
  */
 
+#include <grpc/support/port_platform.h>
+
+#ifdef GPR_POSIX_WAKEUP_FD
+
 #include "src/core/iomgr/wakeup_fd_posix.h"
 #include "src/core/iomgr/wakeup_fd_pipe.h"
 #include <stddef.h>
@@ -38,15 +42,15 @@
 static const grpc_wakeup_fd_vtable *wakeup_fd_vtable = NULL;
 
 void grpc_wakeup_fd_global_init(void) {
-  if (specialized_wakeup_fd_vtable.check_availability()) {
-    wakeup_fd_vtable = &specialized_wakeup_fd_vtable;
+  if (grpc_specialized_wakeup_fd_vtable.check_availability()) {
+    wakeup_fd_vtable = &grpc_specialized_wakeup_fd_vtable;
   } else {
-    wakeup_fd_vtable = &pipe_wakeup_fd_vtable;
+    wakeup_fd_vtable = &grpc_pipe_wakeup_fd_vtable;
   }
 }
 
 void grpc_wakeup_fd_global_init_force_fallback(void) {
-  wakeup_fd_vtable = &pipe_wakeup_fd_vtable;
+  wakeup_fd_vtable = &grpc_pipe_wakeup_fd_vtable;
 }
 
 void grpc_wakeup_fd_global_destroy(void) {
@@ -68,3 +72,5 @@
 void grpc_wakeup_fd_destroy(grpc_wakeup_fd_info *fd_info) {
   wakeup_fd_vtable->destroy(fd_info);
 }
+
+#endif  /* GPR_POSIX_WAKEUP_FD */
diff --git a/src/core/iomgr/wakeup_fd_posix.h b/src/core/iomgr/wakeup_fd_posix.h
index c2769af..75bb9fc 100644
--- a/src/core/iomgr/wakeup_fd_posix.h
+++ b/src/core/iomgr/wakeup_fd_posix.h
@@ -62,29 +62,14 @@
 #ifndef __GRPC_INTERNAL_IOMGR_WAKEUP_FD_POSIX_H_
 #define __GRPC_INTERNAL_IOMGR_WAKEUP_FD_POSIX_H_
 
-typedef struct grpc_wakeup_fd_info grpc_wakeup_fd_info;
-
 void grpc_wakeup_fd_global_init(void);
 void grpc_wakeup_fd_global_destroy(void);
 
-
-void grpc_wakeup_fd_create(grpc_wakeup_fd_info *fd_info);
-void grpc_wakeup_fd_consume_wakeup(grpc_wakeup_fd_info *fd_info);
-void grpc_wakeup_fd_wakeup(grpc_wakeup_fd_info *fd_info);
-void grpc_wakeup_fd_destroy(grpc_wakeup_fd_info *fd_info);
-
-#define GRPC_WAKEUP_FD_GET_READ_FD(fd_info) ((fd_info)->read_fd)
-
 /* Force using the fallback implementation. This is intended for testing
  * purposes only.*/
 void grpc_wakeup_fd_global_init_force_fallback(void);
 
-/* Private structures; don't access their fields directly outside of wakeup fd
- * code. */
-struct grpc_wakeup_fd_info {
-  int read_fd;
-  int write_fd;
-};
+typedef struct grpc_wakeup_fd_info grpc_wakeup_fd_info;
 
 typedef struct grpc_wakeup_fd_vtable {
   void (*create)(grpc_wakeup_fd_info *fd_info);
@@ -95,8 +80,20 @@
   int (*check_availability)(void);
 } grpc_wakeup_fd_vtable;
 
+struct grpc_wakeup_fd_info {
+  int read_fd;
+  int write_fd;
+};
+
+#define GRPC_WAKEUP_FD_GET_READ_FD(fd_info) ((fd_info)->read_fd)
+
+void grpc_wakeup_fd_create(grpc_wakeup_fd_info *fd_info);
+void grpc_wakeup_fd_consume_wakeup(grpc_wakeup_fd_info *fd_info);
+void grpc_wakeup_fd_wakeup(grpc_wakeup_fd_info *fd_info);
+void grpc_wakeup_fd_destroy(grpc_wakeup_fd_info *fd_info);
+
 /* Defined in some specialized implementation's .c file, or by
  * wakeup_fd_nospecial.c if no such implementation exists. */
-extern const grpc_wakeup_fd_vtable specialized_wakeup_fd_vtable;
+extern const grpc_wakeup_fd_vtable grpc_specialized_wakeup_fd_vtable;
 
 #endif /* __GRPC_INTERNAL_IOMGR_WAKEUP_FD_POSIX_H_ */
diff --git a/src/core/json/json.c b/src/core/json/json.c
new file mode 100644
index 0000000..1cff4fa
--- /dev/null
+++ b/src/core/json/json.c
@@ -0,0 +1,64 @@
+/*
+ *
+ * Copyright 2014, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <string.h>
+
+#include <grpc/support/alloc.h>
+
+#include "src/core/json/json.h"
+
+grpc_json *grpc_json_create(grpc_json_type type) {
+  grpc_json *json = gpr_malloc(sizeof(grpc_json));
+  memset(json, 0, sizeof(grpc_json));
+  json->type = type;
+
+  return json;
+}
+
+void grpc_json_destroy(grpc_json *json) {
+  while (json->child) {
+    grpc_json_destroy(json->child);
+  }
+
+  if (json->next) {
+    json->next->prev = json->prev;
+  }
+
+  if (json->prev) {
+    json->prev->next = json->next;
+  } else if (json->parent) {
+    json->parent->child = json->next;
+  }
+
+  gpr_free(json);
+}
diff --git a/src/core/json/json.h b/src/core/json/json.h
new file mode 100644
index 0000000..6676744
--- /dev/null
+++ b/src/core/json/json.h
@@ -0,0 +1,88 @@
+/*
+ *
+ * Copyright 2014, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __GRPC_SRC_CORE_JSON_JSON_H__
+#define __GRPC_SRC_CORE_JSON_JSON_H__
+
+#include <stdlib.h>
+
+#include "src/core/json/json_common.h"
+
+/* A tree-like structure to hold json values. The key and value pointers
+ * are not owned by it.
+ */
+typedef struct grpc_json {
+  struct grpc_json* next;
+  struct grpc_json* prev;
+  struct grpc_json* child;
+  struct grpc_json* parent;
+
+  grpc_json_type type;
+  const char* key;
+  const char* value;
+} grpc_json;
+
+/* The next two functions are going to parse the input string, and
+ * destroy it in the process, in order to use its space to store
+ * all of the keys and values for the returned object tree.
+ *
+ * They assume UTF-8 input stream, and will output UTF-8 encoded
+ * strings in the tree. The input stream's UTF-8 isn't validated,
+ * as in, what you input is what you get as an output.
+ *
+ * All the keys and values in the grpc_json_t objects will be strings
+ * pointing at your input buffer.
+ *
+ * Delete the allocated tree afterward using grpc_json_destroy().
+ */
+grpc_json* grpc_json_parse_string_with_len(char* input, size_t size);
+grpc_json* grpc_json_parse_string(char* input);
+
+/* This function will create a new string using gpr_realloc, and will
+ * deserialize the grpc_json tree into it. It'll be zero-terminated,
+ * but will be allocated in chunks of 256 bytes.
+ *
+ * The indent parameter controls the way the output is formatted.
+ * If indent is 0, then newlines will be suppressed as well, and the
+ * output will be condensed at its maximum.
+ */
+char* grpc_json_dump_to_string(grpc_json* json, int indent);
+
+/* Use these to create or delete a grpc_json object.
+ * Deletion is recursive. We will not attempt to free any of the strings
+ * in any of the objects of that tree.
+ */
+grpc_json* grpc_json_create(grpc_json_type type);
+void grpc_json_destroy(grpc_json* json);
+
+#endif /* __GRPC_SRC_CORE_JSON_JSON_H__ */
diff --git a/src/core/json/json_common.h b/src/core/json/json_common.h
new file mode 100644
index 0000000..88a8155
--- /dev/null
+++ b/src/core/json/json_common.h
@@ -0,0 +1,49 @@
+/*
+ *
+ * Copyright 2014, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __GRPC_SRC_CORE_JSON_JSON_COMMON_H__
+#define __GRPC_SRC_CORE_JSON_JSON_COMMON_H__
+
+/* The various json types. */
+typedef enum {
+  GRPC_JSON_OBJECT,
+  GRPC_JSON_ARRAY,
+  GRPC_JSON_STRING,
+  GRPC_JSON_NUMBER,
+  GRPC_JSON_TRUE,
+  GRPC_JSON_FALSE,
+  GRPC_JSON_NULL,
+  GRPC_JSON_TOP_LEVEL
+} grpc_json_type;
+
+#endif /* __GRPC_SRC_CORE_JSON_JSON_COMMON_H__ */
diff --git a/src/core/json/json_reader.c b/src/core/json/json_reader.c
new file mode 100644
index 0000000..75aa87e
--- /dev/null
+++ b/src/core/json/json_reader.c
@@ -0,0 +1,653 @@
+/*
+ *
+ * Copyright 2014, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <string.h>
+
+#include <grpc/support/port_platform.h>
+
+#include "src/core/json/json_reader.h"
+
+static void json_reader_string_clear(grpc_json_reader* reader) {
+  reader->vtable->string_clear(reader->userdata);
+}
+
+static void json_reader_string_add_char(grpc_json_reader* reader,
+                                             gpr_uint32 c) {
+  reader->vtable->string_add_char(reader->userdata, c);
+}
+
+static void json_reader_string_add_utf32(grpc_json_reader* reader,
+                                              gpr_uint32 utf32) {
+  reader->vtable->string_add_utf32(reader->userdata, utf32);
+}
+
+static gpr_uint32
+    grpc_json_reader_read_char(grpc_json_reader* reader) {
+  return reader->vtable->read_char(reader->userdata);
+}
+
+static void json_reader_container_begins(grpc_json_reader* reader,
+                                              grpc_json_type type) {
+  reader->vtable->container_begins(reader->userdata, type);
+}
+
+static grpc_json_type
+    grpc_json_reader_container_ends(grpc_json_reader* reader) {
+  return reader->vtable->container_ends(reader->userdata);
+}
+
+static void json_reader_set_key(grpc_json_reader* reader) {
+  reader->vtable->set_key(reader->userdata);
+}
+
+static void json_reader_set_string(grpc_json_reader* reader) {
+  reader->vtable->set_string(reader->userdata);
+}
+
+static int json_reader_set_number(grpc_json_reader* reader) {
+  return reader->vtable->set_number(reader->userdata);
+}
+
+static void json_reader_set_true(grpc_json_reader* reader) {
+  reader->vtable->set_true(reader->userdata);
+}
+
+static void json_reader_set_false(grpc_json_reader* reader) {
+  reader->vtable->set_false(reader->userdata);
+}
+
+static void json_reader_set_null(grpc_json_reader* reader) {
+  reader->vtable->set_null(reader->userdata);
+}
+
+/* Call this function to initialize the reader structure. */
+void grpc_json_reader_init(grpc_json_reader* reader,
+                           grpc_json_reader_vtable* vtable, void* userdata) {
+  memset(reader, 0, sizeof(grpc_json_reader));
+  reader->vtable = vtable;
+  reader->userdata = userdata;
+  json_reader_string_clear(reader);
+  reader->state = GRPC_JSON_STATE_VALUE_BEGIN;
+}
+
+int grpc_json_reader_is_complete(grpc_json_reader* reader) {
+  return ((reader->depth == 0) && ((reader->state == GRPC_JSON_STATE_END) ||
+          (reader->state == GRPC_JSON_STATE_VALUE_END)));
+}
+
+grpc_json_reader_status grpc_json_reader_run(grpc_json_reader* reader) {
+  gpr_uint32 c, success;
+
+  /* This state-machine is a strict implementation of ECMA-404 */
+  for (;;) {
+    c = grpc_json_reader_read_char(reader);
+    switch (c) {
+      /* Let's process the error cases first. */
+      case GRPC_JSON_READ_CHAR_ERROR:
+        return GRPC_JSON_READ_ERROR;
+
+      case GRPC_JSON_READ_CHAR_EAGAIN:
+        return GRPC_JSON_EAGAIN;
+
+      case GRPC_JSON_READ_CHAR_EOF:
+        if (grpc_json_reader_is_complete(reader)) {
+          return GRPC_JSON_DONE;
+        } else {
+          return GRPC_JSON_PARSE_ERROR;
+        }
+        break;
+
+      /* Processing whitespaces. */
+      case ' ':
+      case '\t':
+      case '\n':
+      case '\r':
+        switch (reader->state) {
+          case GRPC_JSON_STATE_OBJECT_KEY_BEGIN:
+          case GRPC_JSON_STATE_OBJECT_KEY_END:
+          case GRPC_JSON_STATE_VALUE_BEGIN:
+          case GRPC_JSON_STATE_VALUE_END:
+          case GRPC_JSON_STATE_END:
+            break;
+
+          case GRPC_JSON_STATE_OBJECT_KEY_STRING:
+          case GRPC_JSON_STATE_VALUE_STRING:
+            if (c != ' ') return GRPC_JSON_PARSE_ERROR;
+            if (reader->unicode_high_surrogate != 0) return GRPC_JSON_PARSE_ERROR;
+            json_reader_string_add_char(reader, c);
+            break;
+
+          case GRPC_JSON_STATE_VALUE_NUMBER:
+          case GRPC_JSON_STATE_VALUE_NUMBER_WITH_DECIMAL:
+          case GRPC_JSON_STATE_VALUE_NUMBER_ZERO:
+          case GRPC_JSON_STATE_VALUE_NUMBER_EPM:
+            success = json_reader_set_number(reader);
+            if (!success) return GRPC_JSON_PARSE_ERROR;
+            json_reader_string_clear(reader);
+            reader->state = GRPC_JSON_STATE_VALUE_END;
+            break;
+
+          default:
+            return GRPC_JSON_PARSE_ERROR;
+        }
+        break;
+
+      /* Value, object or array terminations. */
+      case ',':
+      case '}':
+      case ']':
+        switch (reader->state) {
+          case GRPC_JSON_STATE_OBJECT_KEY_STRING:
+          case GRPC_JSON_STATE_VALUE_STRING:
+            if (reader->unicode_high_surrogate != 0) return GRPC_JSON_PARSE_ERROR;
+            json_reader_string_add_char(reader, c);
+            break;
+
+          case GRPC_JSON_STATE_VALUE_NUMBER:
+          case GRPC_JSON_STATE_VALUE_NUMBER_WITH_DECIMAL:
+          case GRPC_JSON_STATE_VALUE_NUMBER_ZERO:
+          case GRPC_JSON_STATE_VALUE_NUMBER_EPM:
+            success = json_reader_set_number(reader);
+            if (!success) return GRPC_JSON_PARSE_ERROR;
+            json_reader_string_clear(reader);
+            reader->state = GRPC_JSON_STATE_VALUE_END;
+          /* The missing break here is intentional. */
+
+          case GRPC_JSON_STATE_VALUE_END:
+          case GRPC_JSON_STATE_OBJECT_KEY_BEGIN:
+          case GRPC_JSON_STATE_VALUE_BEGIN:
+            if (c == ',') {
+              if (reader->state != GRPC_JSON_STATE_VALUE_END) {
+                return GRPC_JSON_PARSE_ERROR;
+              }
+              if (reader->in_object) {
+                reader->state = GRPC_JSON_STATE_OBJECT_KEY_BEGIN;
+              } else {
+                reader->state = GRPC_JSON_STATE_VALUE_BEGIN;
+              }
+            } else {
+              if (reader->depth-- == 0) return GRPC_JSON_PARSE_ERROR;
+              if ((c == '}') && !reader->in_object) {
+                return GRPC_JSON_PARSE_ERROR;
+              }
+              if ((c == '}') &&
+                  (reader->state == GRPC_JSON_STATE_OBJECT_KEY_BEGIN) &&
+                  !reader->container_just_begun) {
+                return GRPC_JSON_PARSE_ERROR;
+              }
+              if ((c == ']') && !reader->in_array) return GRPC_JSON_PARSE_ERROR;
+              if ((c == ']') &&
+                  (reader->state == GRPC_JSON_STATE_VALUE_BEGIN) &&
+                  !reader->container_just_begun) {
+                return GRPC_JSON_PARSE_ERROR;
+              }
+              reader->state = GRPC_JSON_STATE_VALUE_END;
+              switch (grpc_json_reader_container_ends(reader)) {
+                case GRPC_JSON_OBJECT:
+                  reader->in_object = 1;
+                  reader->in_array = 0;
+                  break;
+                case GRPC_JSON_ARRAY:
+                  reader->in_object = 0;
+                  reader->in_array = 1;
+                  break;
+                case GRPC_JSON_TOP_LEVEL:
+                  if (reader->depth != 0) return GRPC_JSON_INTERNAL_ERROR;
+                  reader->in_object = 0;
+                  reader->in_array = 0;
+                  reader->state = GRPC_JSON_STATE_END;
+                  break;
+                default:
+                  return GRPC_JSON_INTERNAL_ERROR;
+              }
+            }
+            break;
+
+          default:
+            return GRPC_JSON_PARSE_ERROR;
+        }
+        break;
+
+      /* In-string escaping. */
+      case '\\':
+        switch (reader->state) {
+          case GRPC_JSON_STATE_OBJECT_KEY_STRING:
+            reader->escaped_string_was_key = 1;
+            reader->state = GRPC_JSON_STATE_STRING_ESCAPE;
+            break;
+
+          case GRPC_JSON_STATE_VALUE_STRING:
+            reader->escaped_string_was_key = 0;
+            reader->state = GRPC_JSON_STATE_STRING_ESCAPE;
+            break;
+
+          /* This is the \\ case. */
+          case GRPC_JSON_STATE_STRING_ESCAPE:
+            if (reader->unicode_high_surrogate != 0) return GRPC_JSON_PARSE_ERROR;
+            json_reader_string_add_char(reader, '\\');
+            if (reader->escaped_string_was_key) {
+              reader->state = GRPC_JSON_STATE_OBJECT_KEY_STRING;
+            } else {
+              reader->state = GRPC_JSON_STATE_VALUE_STRING;
+            }
+            break;
+
+          default:
+            return GRPC_JSON_PARSE_ERROR;
+        }
+        break;
+
+      default:
+        reader->container_just_begun = 0;
+        switch (reader->state) {
+          case GRPC_JSON_STATE_OBJECT_KEY_BEGIN:
+            if (c != '"') return GRPC_JSON_PARSE_ERROR;
+            reader->state = GRPC_JSON_STATE_OBJECT_KEY_STRING;
+            break;
+
+          case GRPC_JSON_STATE_OBJECT_KEY_STRING:
+            if (reader->unicode_high_surrogate != 0) return GRPC_JSON_PARSE_ERROR;
+            if (c == '"') {
+              reader->state = GRPC_JSON_STATE_OBJECT_KEY_END;
+              json_reader_set_key(reader);
+              json_reader_string_clear(reader);
+            } else {
+              if (c <= 0x001f) return GRPC_JSON_PARSE_ERROR;
+              json_reader_string_add_char(reader, c);
+            }
+            break;
+
+          case GRPC_JSON_STATE_VALUE_STRING:
+            if (reader->unicode_high_surrogate != 0) return GRPC_JSON_PARSE_ERROR;
+            if (c == '"') {
+              reader->state = GRPC_JSON_STATE_VALUE_END;
+              json_reader_set_string(reader);
+              json_reader_string_clear(reader);
+            } else {
+              if (c < 32) return GRPC_JSON_PARSE_ERROR;
+              json_reader_string_add_char(reader, c);
+            }
+            break;
+
+          case GRPC_JSON_STATE_OBJECT_KEY_END:
+            if (c != ':') return GRPC_JSON_PARSE_ERROR;
+            reader->state = GRPC_JSON_STATE_VALUE_BEGIN;
+            break;
+
+          case GRPC_JSON_STATE_VALUE_BEGIN:
+            switch (c) {
+              case 't':
+                reader->state = GRPC_JSON_STATE_VALUE_TRUE_R;
+                break;
+
+              case 'f':
+                reader->state = GRPC_JSON_STATE_VALUE_FALSE_A;
+                break;
+
+              case 'n':
+                reader->state = GRPC_JSON_STATE_VALUE_NULL_U;
+                break;
+
+              case '"':
+                reader->state = GRPC_JSON_STATE_VALUE_STRING;
+                break;
+
+              case '0':
+                json_reader_string_add_char(reader, c);
+                reader->state = GRPC_JSON_STATE_VALUE_NUMBER_ZERO;
+                break;
+
+              case '1':
+              case '2':
+              case '3':
+              case '4':
+              case '5':
+              case '6':
+              case '7':
+              case '8':
+              case '9':
+              case '-':
+                json_reader_string_add_char(reader, c);
+                reader->state = GRPC_JSON_STATE_VALUE_NUMBER;
+                break;
+
+              case '{':
+                reader->container_just_begun = 1;
+                json_reader_container_begins(reader, GRPC_JSON_OBJECT);
+                reader->depth++;
+                reader->state = GRPC_JSON_STATE_OBJECT_KEY_BEGIN;
+                reader->in_object = 1;
+                reader->in_array = 0;
+                break;
+
+              case '[':
+                reader->container_just_begun = 1;
+                json_reader_container_begins(reader, GRPC_JSON_ARRAY);
+                reader->depth++;
+                reader->in_object = 0;
+                reader->in_array = 1;
+                break;
+            }
+            break;
+
+          case GRPC_JSON_STATE_STRING_ESCAPE:
+            if (reader->escaped_string_was_key) {
+              reader->state = GRPC_JSON_STATE_OBJECT_KEY_STRING;
+            } else {
+              reader->state = GRPC_JSON_STATE_VALUE_STRING;
+            }
+            if (reader->unicode_high_surrogate && c != 'u')
+              return GRPC_JSON_PARSE_ERROR;
+            switch (c) {
+              case '"':
+              case '/':
+                json_reader_string_add_char(reader, c);
+                break;
+              case 'b':
+                json_reader_string_add_char(reader, '\b');
+                break;
+              case 'f':
+                json_reader_string_add_char(reader, '\f');
+                break;
+              case 'n':
+                json_reader_string_add_char(reader, '\n');
+                break;
+              case 'r':
+                json_reader_string_add_char(reader, '\r');
+                break;
+              case 't':
+                json_reader_string_add_char(reader, '\t');
+                break;
+              case 'u':
+                reader->state = GRPC_JSON_STATE_STRING_ESCAPE_U1;
+                reader->unicode_char = 0;
+                break;
+              default:
+                return GRPC_JSON_PARSE_ERROR;
+            }
+            break;
+
+          case GRPC_JSON_STATE_STRING_ESCAPE_U1:
+          case GRPC_JSON_STATE_STRING_ESCAPE_U2:
+          case GRPC_JSON_STATE_STRING_ESCAPE_U3:
+          case GRPC_JSON_STATE_STRING_ESCAPE_U4:
+            if ((c >= '0') && (c <= '9')) {
+              c -= '0';
+            } else if ((c >= 'A') && (c <= 'F')) {
+              c -= 'A' - 10;
+            } else if ((c >= 'a') && (c <= 'f')) {
+              c -= 'a' - 10;
+            } else {
+              return GRPC_JSON_PARSE_ERROR;
+            }
+            reader->unicode_char <<= 4;
+            reader->unicode_char |= c;
+
+            switch (reader->state) {
+              case GRPC_JSON_STATE_STRING_ESCAPE_U1:
+                reader->state = GRPC_JSON_STATE_STRING_ESCAPE_U2;
+                break;
+              case GRPC_JSON_STATE_STRING_ESCAPE_U2:
+                reader->state = GRPC_JSON_STATE_STRING_ESCAPE_U3;
+                break;
+              case GRPC_JSON_STATE_STRING_ESCAPE_U3:
+                reader->state = GRPC_JSON_STATE_STRING_ESCAPE_U4;
+                break;
+              case GRPC_JSON_STATE_STRING_ESCAPE_U4:
+                /* See grpc_json_writer_escape_string to have a description
+                 * of what's going on here.
+                 */
+                if ((reader->unicode_char & 0xfc00) == 0xd800) {
+                  /* high surrogate utf-16 */
+                  if (reader->unicode_high_surrogate != 0)
+                    return GRPC_JSON_PARSE_ERROR;
+                  reader->unicode_high_surrogate = reader->unicode_char;
+                } else if ((reader->unicode_char & 0xfc00) == 0xdc00) {
+                  /* low surrogate utf-16 */
+                  gpr_uint32 utf32;
+                  if (reader->unicode_high_surrogate == 0)
+                    return GRPC_JSON_PARSE_ERROR;
+                  utf32 = 0x10000;
+                  utf32 += (reader->unicode_high_surrogate - 0xd800) * 0x400;
+                  utf32 += reader->unicode_char - 0xdc00;
+                  json_reader_string_add_utf32(reader, utf32);
+                  reader->unicode_high_surrogate = 0;
+                } else {
+                  /* anything else */
+                  if (reader->unicode_high_surrogate != 0)
+                    return GRPC_JSON_PARSE_ERROR;
+                  json_reader_string_add_utf32(reader, reader->unicode_char);
+                }
+                if (reader->escaped_string_was_key) {
+                  reader->state = GRPC_JSON_STATE_OBJECT_KEY_STRING;
+                } else {
+                  reader->state = GRPC_JSON_STATE_VALUE_STRING;
+                }
+                break;
+              default:
+                return GRPC_JSON_INTERNAL_ERROR;
+            }
+            break;
+
+          case GRPC_JSON_STATE_VALUE_NUMBER:
+            json_reader_string_add_char(reader, c);
+            switch (c) {
+              case '0':
+              case '1':
+              case '2':
+              case '3':
+              case '4':
+              case '5':
+              case '6':
+              case '7':
+              case '8':
+              case '9':
+                break;
+              case 'e':
+              case 'E':
+                reader->state = GRPC_JSON_STATE_VALUE_NUMBER_E;
+                break;
+              case '.':
+                reader->state = GRPC_JSON_STATE_VALUE_NUMBER_DOT;
+                break;
+              default:
+                return GRPC_JSON_PARSE_ERROR;
+            }
+            break;
+
+          case GRPC_JSON_STATE_VALUE_NUMBER_WITH_DECIMAL:
+            json_reader_string_add_char(reader, c);
+            switch (c) {
+              case '0':
+              case '1':
+              case '2':
+              case '3':
+              case '4':
+              case '5':
+              case '6':
+              case '7':
+              case '8':
+              case '9':
+                break;
+              case 'e':
+              case 'E':
+                reader->state = GRPC_JSON_STATE_VALUE_NUMBER_E;
+                break;
+              default:
+                return GRPC_JSON_PARSE_ERROR;
+            }
+            break;
+
+          case GRPC_JSON_STATE_VALUE_NUMBER_ZERO:
+            if (c != '.') return GRPC_JSON_PARSE_ERROR;
+            json_reader_string_add_char(reader, c);
+            reader->state = GRPC_JSON_STATE_VALUE_NUMBER_DOT;
+            break;
+
+          case GRPC_JSON_STATE_VALUE_NUMBER_DOT:
+            json_reader_string_add_char(reader, c);
+            switch (c) {
+              case '0':
+              case '1':
+              case '2':
+              case '3':
+              case '4':
+              case '5':
+              case '6':
+              case '7':
+              case '8':
+              case '9':
+                reader->state = GRPC_JSON_STATE_VALUE_NUMBER_WITH_DECIMAL;
+                break;
+              default:
+                return GRPC_JSON_PARSE_ERROR;
+            }
+            break;
+
+          case GRPC_JSON_STATE_VALUE_NUMBER_E:
+            json_reader_string_add_char(reader, c);
+            switch (c) {
+              case '0':
+              case '1':
+              case '2':
+              case '3':
+              case '4':
+              case '5':
+              case '6':
+              case '7':
+              case '8':
+              case '9':
+              case '+':
+              case '-':
+                reader->state = GRPC_JSON_STATE_VALUE_NUMBER_EPM;
+                break;
+              default:
+                return GRPC_JSON_PARSE_ERROR;
+            }
+            break;
+
+          case GRPC_JSON_STATE_VALUE_NUMBER_EPM:
+            json_reader_string_add_char(reader, c);
+            switch (c) {
+              case '0':
+              case '1':
+              case '2':
+              case '3':
+              case '4':
+              case '5':
+              case '6':
+              case '7':
+              case '8':
+              case '9':
+                break;
+              default:
+                return GRPC_JSON_PARSE_ERROR;
+            }
+            break;
+
+          case GRPC_JSON_STATE_VALUE_TRUE_R:
+            if (c != 'r') return GRPC_JSON_PARSE_ERROR;
+            reader->state = GRPC_JSON_STATE_VALUE_TRUE_U;
+            break;
+
+          case GRPC_JSON_STATE_VALUE_TRUE_U:
+            if (c != 'u') return GRPC_JSON_PARSE_ERROR;
+            reader->state = GRPC_JSON_STATE_VALUE_TRUE_E;
+            break;
+
+          case GRPC_JSON_STATE_VALUE_TRUE_E:
+            if (c != 'e') return GRPC_JSON_PARSE_ERROR;
+            json_reader_set_true(reader);
+            reader->state = GRPC_JSON_STATE_VALUE_END;
+            break;
+
+          case GRPC_JSON_STATE_VALUE_FALSE_A:
+            if (c != 'a') return GRPC_JSON_PARSE_ERROR;
+            reader->state = GRPC_JSON_STATE_VALUE_FALSE_L;
+            break;
+
+          case GRPC_JSON_STATE_VALUE_FALSE_L:
+            if (c != 'l') return GRPC_JSON_PARSE_ERROR;
+            reader->state = GRPC_JSON_STATE_VALUE_FALSE_S;
+            break;
+
+          case GRPC_JSON_STATE_VALUE_FALSE_S:
+            if (c != 's') return GRPC_JSON_PARSE_ERROR;
+            reader->state = GRPC_JSON_STATE_VALUE_FALSE_E;
+            break;
+
+          case GRPC_JSON_STATE_VALUE_FALSE_E:
+            if (c != 'e') return GRPC_JSON_PARSE_ERROR;
+            json_reader_set_false(reader);
+            reader->state = GRPC_JSON_STATE_VALUE_END;
+            break;
+
+          case GRPC_JSON_STATE_VALUE_NULL_U:
+            if (c != 'u') return GRPC_JSON_PARSE_ERROR;
+            reader->state = GRPC_JSON_STATE_VALUE_NULL_L1;
+            break;
+
+          case GRPC_JSON_STATE_VALUE_NULL_L1:
+            if (c != 'l') return GRPC_JSON_PARSE_ERROR;
+            reader->state = GRPC_JSON_STATE_VALUE_NULL_L2;
+            break;
+
+          case GRPC_JSON_STATE_VALUE_NULL_L2:
+            if (c != 'l') return GRPC_JSON_PARSE_ERROR;
+            json_reader_set_null(reader);
+            reader->state = GRPC_JSON_STATE_VALUE_END;
+            break;
+
+          /* All of the VALUE_END cases are handled in the specialized case
+           * above. */
+          case GRPC_JSON_STATE_VALUE_END:
+            switch (c) {
+              case ',':
+              case '}':
+              case ']':
+                return GRPC_JSON_INTERNAL_ERROR;
+                break;
+
+              default:
+                return GRPC_JSON_PARSE_ERROR;
+            }
+            break;
+
+          case GRPC_JSON_STATE_END:
+            return GRPC_JSON_PARSE_ERROR;
+        }
+    }
+  }
+
+  return GRPC_JSON_INTERNAL_ERROR;
+}
diff --git a/src/core/json/json_reader.h b/src/core/json/json_reader.h
new file mode 100644
index 0000000..388ee36
--- /dev/null
+++ b/src/core/json/json_reader.h
@@ -0,0 +1,160 @@
+/*
+ *
+ * Copyright 2014, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __GRPC_SRC_CORE_JSON_JSON_READER_H__
+#define __GRPC_SRC_CORE_JSON_JSON_READER_H__
+
+#include <grpc/support/port_platform.h>
+#include "src/core/json/json_common.h"
+
+typedef enum {
+  GRPC_JSON_STATE_OBJECT_KEY_BEGIN,
+  GRPC_JSON_STATE_OBJECT_KEY_STRING,
+  GRPC_JSON_STATE_OBJECT_KEY_END,
+  GRPC_JSON_STATE_VALUE_BEGIN,
+  GRPC_JSON_STATE_VALUE_STRING,
+  GRPC_JSON_STATE_STRING_ESCAPE,
+  GRPC_JSON_STATE_STRING_ESCAPE_U1,
+  GRPC_JSON_STATE_STRING_ESCAPE_U2,
+  GRPC_JSON_STATE_STRING_ESCAPE_U3,
+  GRPC_JSON_STATE_STRING_ESCAPE_U4,
+  GRPC_JSON_STATE_VALUE_NUMBER,
+  GRPC_JSON_STATE_VALUE_NUMBER_WITH_DECIMAL,
+  GRPC_JSON_STATE_VALUE_NUMBER_ZERO,
+  GRPC_JSON_STATE_VALUE_NUMBER_DOT,
+  GRPC_JSON_STATE_VALUE_NUMBER_E,
+  GRPC_JSON_STATE_VALUE_NUMBER_EPM,
+  GRPC_JSON_STATE_VALUE_TRUE_R,
+  GRPC_JSON_STATE_VALUE_TRUE_U,
+  GRPC_JSON_STATE_VALUE_TRUE_E,
+  GRPC_JSON_STATE_VALUE_FALSE_A,
+  GRPC_JSON_STATE_VALUE_FALSE_L,
+  GRPC_JSON_STATE_VALUE_FALSE_S,
+  GRPC_JSON_STATE_VALUE_FALSE_E,
+  GRPC_JSON_STATE_VALUE_NULL_U,
+  GRPC_JSON_STATE_VALUE_NULL_L1,
+  GRPC_JSON_STATE_VALUE_NULL_L2,
+  GRPC_JSON_STATE_VALUE_END,
+  GRPC_JSON_STATE_END
+} grpc_json_reader_state;
+
+enum {
+  /* The first non-unicode value is 0x110000. But let's pick
+   * a value high enough to start our error codes from. These
+   * values are safe to return from the read_char function.
+   */
+  GRPC_JSON_READ_CHAR_EOF = 0x7ffffff0,
+  GRPC_JSON_READ_CHAR_EAGAIN,
+  GRPC_JSON_READ_CHAR_ERROR
+};
+
+struct grpc_json_reader;
+
+typedef struct grpc_json_reader_vtable {
+  /* Clears your internal string scratchpad. */
+  void (*string_clear)(void* userdata);
+  /* Adds a char to the string scratchpad. */
+  void (*string_add_char)(void* userdata, gpr_uint32 c);
+  /* Adds a utf32 char to the string scratchpad. */
+  void (*string_add_utf32)(void* userdata, gpr_uint32 c);
+  /* Reads a character from your input. May be utf-8, 16 or 32. */
+  gpr_uint32 (*read_char)(void* userdata);
+  /* Starts a container of type GRPC_JSON_ARRAY or GRPC_JSON_OBJECT. */
+  void (*container_begins)(void* userdata, grpc_json_type type);
+  /* Ends the current container. Must return the type of its parent. */
+  grpc_json_type (*container_ends)(void* userdata);
+  /* Your internal string scratchpad is an object's key. */
+  void (*set_key)(void* userdata);
+  /* Your internal string scratchpad is a string value. */
+  void (*set_string)(void* userdata);
+  /* Your internal string scratchpad is a numerical value. Return 1 if valid. */
+  int (*set_number)(void* userdata);
+  /* Sets the values true, false or null. */
+  void (*set_true)(void* userdata);
+  void (*set_false)(void* userdata);
+  void (*set_null)(void* userdata);
+} grpc_json_reader_vtable;
+
+typedef struct grpc_json_reader {
+  /* That structure is fully private, and initialized by grpc_json_reader_init.
+   * The definition is public so you can put it on your stack.
+   */
+
+  void* userdata;
+  grpc_json_reader_vtable* vtable;
+  int depth;
+  int in_object;
+  int in_array;
+  int escaped_string_was_key;
+  int container_just_begun;
+  gpr_uint16 unicode_char, unicode_high_surrogate;
+  grpc_json_reader_state state;
+} grpc_json_reader;
+
+/* The return type of the parser. */
+typedef enum {
+  GRPC_JSON_DONE,          /* The parser finished successfully. */
+  GRPC_JSON_EAGAIN,        /* The parser yields to get more data. */
+  GRPC_JSON_READ_ERROR,    /* The parser passes through a read error. */
+  GRPC_JSON_PARSE_ERROR,   /* The parser found an error in the json stream. */
+  GRPC_JSON_INTERNAL_ERROR /* The parser got an internal error. */
+} grpc_json_reader_status;
+
+/* Call this function to start parsing the input. It will return the following:
+ *    . GRPC_JSON_DONE if the input got eof, and the parsing finished
+ *      successfully.
+ *    . GRPC_JSON_EAGAIN if the read_char function returned again. Call the
+ *      parser again as needed. It is okay to call the parser in polling mode,
+ *      although a bit dull.
+ *    . GRPC_JSON_READ_ERROR if the read_char function returned an error. The
+ *      state isn't broken however, and the function can be called again if the
+ *      error has been corrected. But please use the EAGAIN feature instead for
+ *      consistency.
+ *    . GRPC_JSON_PARSE_ERROR if the input was somehow invalid.
+ *    . GRPC_JSON_INTERNAL_ERROR if the parser somehow ended into an invalid
+ *      internal state.
+ */
+grpc_json_reader_status grpc_json_reader_run(grpc_json_reader* reader);
+
+/* Call this function to initialize the reader structure. */
+void grpc_json_reader_init(grpc_json_reader* reader,
+                           grpc_json_reader_vtable* vtable, void* userdata);
+
+/* You may call this from the read_char callback if you don't know where is the
+ * end of your input stream, and you'd like the json reader to hint you that it
+ * has completed reading its input, so you can return an EOF to it. Note that
+ * there might still be trailing whitespaces after that point.
+ */
+int grpc_json_reader_is_complete(grpc_json_reader* reader);
+
+#endif /* __GRPC_SRC_CORE_JSON_JSON_READER_H__ */
diff --git a/src/core/json/json_string.c b/src/core/json/json_string.c
new file mode 100644
index 0000000..d29e9e3
--- /dev/null
+++ b/src/core/json/json_string.c
@@ -0,0 +1,391 @@
+/*
+ *
+ * Copyright 2014, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <string.h>
+#include <stdlib.h>
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+
+#include "src/core/json/json.h"
+#include "src/core/json/json_reader.h"
+#include "src/core/json/json_writer.h"
+
+/* The json reader will construct a bunch of grpc_json objects and
+ * link them all up together in a tree-like structure that will represent
+ * the json data in memory.
+ *
+ * It also uses its own input as a scratchpad to store all of the decoded,
+ * unescaped strings. So we need to keep track of all these pointers in
+ * that opaque structure the reader will carry for us.
+ *
+ * Note that this works because the act of parsing json always reduces its
+ * input size, and never expands it.
+ */
+typedef struct {
+  grpc_json* top;
+  grpc_json* current_container;
+  grpc_json* current_value;
+  gpr_uint8* input;
+  gpr_uint8* key;
+  gpr_uint8* string;
+  gpr_uint8* string_ptr;
+  size_t remaining_input;
+} json_reader_userdata;
+
+/* This json writer will put everything in a big string.
+ * The point is that we allocate that string in chunks of 256 bytes.
+ */
+typedef struct {
+  char* output;
+  size_t free_space;
+  size_t string_len;
+  size_t allocated;
+} json_writer_userdata;
+
+
+/* This function checks if there's enough space left in the output buffer,
+ * and will enlarge it if necessary. We're only allocating chunks of 256
+ * bytes at a time (or multiples thereof).
+ */
+static void json_writer_output_check(void* userdata, size_t needed) {
+  json_writer_userdata* state = userdata;
+  if (state->free_space >= needed) return;
+  needed -= state->free_space;
+  /* Round up by 256 bytes. */
+  needed = (needed + 0xff) & ~0xff;
+  state->output = gpr_realloc(state->output, state->allocated + needed);
+  state->free_space += needed;
+  state->allocated += needed;
+}
+
+/* These are needed by the writer's implementation. */
+static void json_writer_output_char(void* userdata, char c) {
+  json_writer_userdata* state = userdata;
+  json_writer_output_check(userdata, 1);
+  state->output[state->string_len++] = c;
+  state->free_space--;
+}
+
+static void json_writer_output_string_with_len(void* userdata,
+                                               const char* str, size_t len) {
+  json_writer_userdata* state = userdata;
+  json_writer_output_check(userdata, len);
+  memcpy(state->output + state->string_len, str, len);
+  state->string_len += len;
+  state->free_space -= len;
+}
+
+static void json_writer_output_string(void* userdata,
+                                           const char* str) {
+  size_t len = strlen(str);
+  json_writer_output_string_with_len(userdata, str, len);
+}
+
+/* The reader asks us to clear our scratchpad. In our case, we'll simply mark
+ * the end of the current string, and advance our output pointer.
+ */
+static void json_reader_string_clear(void* userdata) {
+  json_reader_userdata* state = userdata;
+  if (state->string) {
+    GPR_ASSERT(state->string_ptr < state->input);
+    *state->string_ptr++ = 0;
+  }
+  state->string = state->string_ptr;
+}
+
+static void json_reader_string_add_char(void* userdata, gpr_uint32 c) {
+  json_reader_userdata* state = userdata;
+  GPR_ASSERT(state->string_ptr < state->input);
+  GPR_ASSERT(c <= 0xff);
+  *state->string_ptr++ = (char)c;
+}
+
+/* We are converting a UTF-32 character into UTF-8 here,
+ * as described by RFC3629.
+ */
+static void json_reader_string_add_utf32(void* userdata, gpr_uint32 c) {
+  if (c <= 0x7f) {
+    json_reader_string_add_char(userdata, c);
+  } else if (c <= 0x7ff) {
+    int b1 = 0xc0 | ((c >> 6) & 0x1f);
+    int b2 = 0x80 | (c & 0x3f);
+    json_reader_string_add_char(userdata, b1);
+    json_reader_string_add_char(userdata, b2);
+  } else if (c <= 0xffff) {
+    int b1 = 0xe0 | ((c >> 12) & 0x0f);
+    int b2 = 0x80 | ((c >> 6) & 0x3f);
+    int b3 = 0x80 | (c & 0x3f);
+    json_reader_string_add_char(userdata, b1);
+    json_reader_string_add_char(userdata, b2);
+    json_reader_string_add_char(userdata, b3);
+  } else if (c <= 0x1fffff) {
+    int b1 = 0xf0 | ((c >> 18) & 0x07);
+    int b2 = 0x80 | ((c >> 12) & 0x3f);
+    int b3 = 0x80 | ((c >> 6) & 0x3f);
+    int b4 = 0x80 | (c & 0x3f);
+    json_reader_string_add_char(userdata, b1);
+    json_reader_string_add_char(userdata, b2);
+    json_reader_string_add_char(userdata, b3);
+    json_reader_string_add_char(userdata, b4);
+  }
+}
+
+/* We consider that the input may be a zero-terminated string. So we
+ * can end up hitting eof before the end of the alleged string length.
+ */
+static gpr_uint32 json_reader_read_char(void* userdata) {
+  gpr_uint32 r;
+  json_reader_userdata* state = userdata;
+
+  if (state->remaining_input == 0) return GRPC_JSON_READ_CHAR_EOF;
+
+  r = *state->input++;
+  state->remaining_input--;
+
+  if (r == 0) {
+    state->remaining_input = 0;
+    return GRPC_JSON_READ_CHAR_EOF;
+  }
+
+  return r;
+}
+
+/* Helper function to create a new grpc_json object and link it into
+ * our tree-in-progress inside our opaque structure.
+ */
+static grpc_json* json_create_and_link(void* userdata,
+                                       grpc_json_type type) {
+  json_reader_userdata* state = userdata;
+  grpc_json* json = grpc_json_create(type);
+
+  json->parent = state->current_container;
+  json->prev = state->current_value;
+  state->current_value = json;
+
+  if (json->prev) {
+    json->prev->next = json;
+  }
+  if (json->parent) {
+    if (!json->parent->child) {
+      json->parent->child = json;
+    }
+    if (json->parent->type == GRPC_JSON_OBJECT) {
+      json->key = (char*) state->key;
+    }
+  }
+  if (!state->top) {
+    state->top = json;
+  }
+
+  return json;
+}
+
+static void json_reader_container_begins(void* userdata, grpc_json_type type) {
+  json_reader_userdata* state = userdata;
+  grpc_json* container;
+
+  GPR_ASSERT(type == GRPC_JSON_ARRAY || type == GRPC_JSON_OBJECT);
+
+  container = json_create_and_link(userdata, type);
+  state->current_container = container;
+  state->current_value = NULL;
+}
+
+/* It's important to remember that the reader is mostly stateless, so it
+ * isn't trying to remember what the container was prior the one that just
+ * ends. Since we're keeping track of these for our own purpose, we are
+ * able to return that information back, which is useful for it to validate
+ * the input json stream.
+ *
+ * Also note that if we're at the top of the tree, and the last container
+ * ends, we have to return GRPC_JSON_TOP_LEVEL.
+ */
+static grpc_json_type json_reader_container_ends(void* userdata) {
+  grpc_json_type container_type = GRPC_JSON_TOP_LEVEL;
+  json_reader_userdata* state = userdata;
+
+  GPR_ASSERT(state->current_container);
+
+  state->current_value = state->current_container;
+  state->current_container = state->current_container->parent;
+
+  if (state->current_container) {
+    container_type = state->current_container->type;
+  }
+
+  return container_type;
+}
+
+/* The next 3 functions basically are the reader asking us to use our string
+ * scratchpad for one of these 3 purposes.
+ *
+ * Note that in the set_number case, we're not going to try interpreting it.
+ * We'll keep it as a string, and leave it to the caller to evaluate it.
+ */
+static void json_reader_set_key(void* userdata) {
+  json_reader_userdata* state = userdata;
+  state->key = state->string;
+}
+
+static void json_reader_set_string(void* userdata) {
+  json_reader_userdata* state = userdata;
+  grpc_json* json = json_create_and_link(userdata, GRPC_JSON_STRING);
+  json->value = (char*) state->string;
+}
+
+static int json_reader_set_number(void* userdata) {
+  json_reader_userdata* state = userdata;
+  grpc_json* json = json_create_and_link(userdata, GRPC_JSON_NUMBER);
+  json->value = (char*) state->string;
+  return 1;
+}
+
+/* The object types true, false and null are self-sufficient, and don't need
+ * any more information beside their type.
+ */
+static void json_reader_set_true(void* userdata) {
+  json_create_and_link(userdata, GRPC_JSON_TRUE);
+}
+
+static void json_reader_set_false(void* userdata) {
+  json_create_and_link(userdata, GRPC_JSON_FALSE);
+}
+
+static void json_reader_set_null(void* userdata) {
+  json_create_and_link(userdata, GRPC_JSON_NULL);
+}
+
+static grpc_json_reader_vtable reader_vtable = {
+  json_reader_string_clear,
+  json_reader_string_add_char,
+  json_reader_string_add_utf32,
+  json_reader_read_char,
+  json_reader_container_begins,
+  json_reader_container_ends,
+  json_reader_set_key,
+  json_reader_set_string,
+  json_reader_set_number,
+  json_reader_set_true,
+  json_reader_set_false,
+  json_reader_set_null
+};
+
+/* And finally, let's define our public API. */
+grpc_json* grpc_json_parse_string_with_len(char* input, size_t size) {
+  grpc_json_reader reader;
+  json_reader_userdata state;
+  grpc_json *json = NULL;
+  grpc_json_reader_status status;
+
+  if (!input) return NULL;
+
+  state.top = state.current_container = state.current_value = NULL;
+  state.string = state.key = NULL;
+  state.string_ptr = state.input = (gpr_uint8*) input;
+  state.remaining_input = size;
+  grpc_json_reader_init(&reader, &reader_vtable, &state);
+
+  status = grpc_json_reader_run(&reader);
+  json = state.top;
+
+  if ((status != GRPC_JSON_DONE) && json) {
+    grpc_json_destroy(json);
+    json = NULL;
+  }
+
+  return json;
+}
+
+#define UNBOUND_JSON_STRING_LENGTH 0x7fffffff
+
+grpc_json* grpc_json_parse_string(char* input) {
+  return grpc_json_parse_string_with_len(input, UNBOUND_JSON_STRING_LENGTH);
+}
+
+static void json_dump_recursive(grpc_json_writer* writer,
+                                grpc_json* json, int in_object) {
+  while (json) {
+    if (in_object) grpc_json_writer_object_key(writer, json->key);
+
+    switch (json->type) {
+      case GRPC_JSON_OBJECT:
+      case GRPC_JSON_ARRAY:
+        grpc_json_writer_container_begins(writer, json->type);
+        if (json->child)
+          json_dump_recursive(writer, json->child,
+                              json->type == GRPC_JSON_OBJECT);
+        grpc_json_writer_container_ends(writer, json->type);
+        break;
+      case GRPC_JSON_STRING:
+        grpc_json_writer_value_string(writer, json->value);
+        break;
+      case GRPC_JSON_NUMBER:
+        grpc_json_writer_value_raw(writer, json->value);
+        break;
+      case GRPC_JSON_TRUE:
+        grpc_json_writer_value_raw_with_len(writer, "true", 4);
+        break;
+      case GRPC_JSON_FALSE:
+        grpc_json_writer_value_raw_with_len(writer, "false", 5);
+        break;
+      case GRPC_JSON_NULL:
+        grpc_json_writer_value_raw_with_len(writer, "null", 4);
+        break;
+      default:
+        abort();
+    }
+    json = json->next;
+  }
+}
+
+static grpc_json_writer_vtable writer_vtable = {
+  json_writer_output_char,
+  json_writer_output_string,
+  json_writer_output_string_with_len
+};
+
+char* grpc_json_dump_to_string(grpc_json* json, int indent) {
+  grpc_json_writer writer;
+  json_writer_userdata state;
+
+  state.output = NULL;
+  state.free_space = state.string_len = state.allocated = 0;
+  grpc_json_writer_init(&writer, indent, &writer_vtable, &state);
+
+  json_dump_recursive(&writer, json, 0);
+
+  json_writer_output_char(&state, 0);
+
+  return state.output;
+}
diff --git a/src/core/json/json_writer.c b/src/core/json/json_writer.c
new file mode 100644
index 0000000..5605694
--- /dev/null
+++ b/src/core/json/json_writer.c
@@ -0,0 +1,252 @@
+/*
+ *
+ * Copyright 2014, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <string.h>
+
+#include <grpc/support/port_platform.h>
+
+#include "src/core/json/json_writer.h"
+
+static void json_writer_output_char(grpc_json_writer* writer, char c) {
+  writer->vtable->output_char(writer->userdata, c);
+}
+
+static void json_writer_output_string(grpc_json_writer* writer, const char* str) {
+  writer->vtable->output_string(writer->userdata, str);
+}
+
+static void json_writer_output_string_with_len(grpc_json_writer* writer, const char* str, size_t len) {
+  writer->vtable->output_string_with_len(writer->userdata, str, len);
+}
+
+void grpc_json_writer_init(grpc_json_writer* writer, int indent,
+                           grpc_json_writer_vtable* vtable, void* userdata) {
+  memset(writer, 0, sizeof(grpc_json_writer));
+  writer->container_empty = 1;
+  writer->indent = indent;
+  writer->vtable = vtable;
+  writer->userdata = userdata;
+}
+
+static void json_writer_output_indent(
+    grpc_json_writer* writer) {
+  static const char spacesstr[] =
+      "                "
+      "                "
+      "                "
+      "                ";
+
+  unsigned spaces = writer->depth * writer->indent;
+
+  if (writer->indent == 0) return;
+
+  if (writer->got_key) {
+    json_writer_output_char(writer, ' ');
+    return;
+  }
+
+  while (spaces >= (sizeof(spacesstr) - 1)) {
+    json_writer_output_string_with_len(writer, spacesstr,
+                                            sizeof(spacesstr) - 1);
+    spaces -= (sizeof(spacesstr) - 1);
+  }
+
+  if (spaces == 0) return;
+
+  json_writer_output_string_with_len(
+      writer, spacesstr + sizeof(spacesstr) - 1 - spaces, spaces);
+}
+
+static void json_writer_value_end(grpc_json_writer* writer) {
+  if (writer->container_empty) {
+    writer->container_empty = 0;
+    if ((writer->indent == 0) || (writer->depth == 0)) return;
+    json_writer_output_char(writer, '\n');
+  } else {
+    json_writer_output_char(writer, ',');
+    if (writer->indent == 0) return;
+    json_writer_output_char(writer, '\n');
+  }
+}
+
+static void json_writer_escape_utf16(grpc_json_writer* writer, gpr_uint16 utf16) {
+  static const char hex[] = "0123456789abcdef";
+
+  json_writer_output_string_with_len(writer, "\\u", 2);
+  json_writer_output_char(writer, hex[(utf16 >> 12) & 0x0f]);
+  json_writer_output_char(writer, hex[(utf16 >> 8) & 0x0f]);
+  json_writer_output_char(writer, hex[(utf16 >> 4) & 0x0f]);
+  json_writer_output_char(writer, hex[(utf16) & 0x0f]);
+}
+
+static void json_writer_escape_string(grpc_json_writer* writer,
+                                      const char* string) {
+  json_writer_output_char(writer, '"');
+
+  for (;;) {
+    gpr_uint8 c = (gpr_uint8)*string++;
+    if (c == 0) {
+      break;
+    } else if ((c >= 32) && (c <= 127)) {
+      if ((c == '\\') || (c == '"')) json_writer_output_char(writer, '\\');
+      json_writer_output_char(writer, c);
+    } else if (c < 32) {
+      switch (c) {
+        case '\b':
+          json_writer_output_string_with_len(writer, "\\b", 2);
+          break;
+        case '\f':
+          json_writer_output_string_with_len(writer, "\\f", 2);
+          break;
+        case '\n':
+          json_writer_output_string_with_len(writer, "\\n", 2);
+          break;
+        case '\r':
+          json_writer_output_string_with_len(writer, "\\r", 2);
+          break;
+        case '\t':
+          json_writer_output_string_with_len(writer, "\\t", 2);
+          break;
+        default:
+          json_writer_escape_utf16(writer, c);
+          break;
+      }
+    } else {
+      gpr_uint32 utf32 = 0;
+      int extra = 0;
+      int i;
+      int valid = 1;
+      if ((c & 0xe0) == 0xc0) {
+        utf32 = c & 0x1f;
+        extra = 1;
+      } else if ((c & 0xf0) == 0xe0) {
+        utf32 = c & 0x0f;
+        extra = 2;
+      } else if ((c & 0xf8) == 0xf0) {
+        utf32 = c & 0x07;
+        extra = 3;
+      } else {
+        break;
+      }
+      for (i = 0; i < extra; i++) {
+        utf32 <<= 6;
+        c = *string++;
+        if ((c & 0xc0) != 0x80) {
+          valid = 0;
+          break;
+        }
+        utf32 |= c & 0x3f;
+      }
+      if (!valid) break;
+      /* The range 0xd800 - 0xdfff is reserved by the surrogates ad vitam.
+       * Any other range is technically reserved for future usage, so if we
+       * don't want the software to break in the future, we have to allow
+       * anything else. The first non-unicode character is 0x110000. */
+      if (((utf32 >= 0xd800) && (utf32 <= 0xdfff)) ||
+          (utf32 >= 0x110000)) break;
+      if (utf32 >= 0x10000) {
+        /* If utf32 contains a character that is above 0xffff, it needs to be
+         * broken down into a utf-16 surrogate pair. A surrogate pair is first
+         * a high surrogate, followed by a low surrogate. Each surrogate holds
+         * 10 bits of usable data, thus allowing a total of 20 bits of data.
+         * The high surrogate marker is 0xd800, while the low surrogate marker
+         * is 0xdc00. The low 10 bits of each will be the usable data.
+         *
+         * After re-combining the 20 bits of data, one has to add 0x10000 to
+         * the resulting value, in order to obtain the original character.
+         * This is obviously because the range 0x0000 - 0xffff can be written
+         * without any special trick.
+         *
+         * Since 0x10ffff is the highest allowed character, we're working in
+         * the range 0x00000 - 0xfffff after we decrement it by 0x10000.
+         * That range is exactly 20 bits.
+         */
+        utf32 -= 0x10000;
+        json_writer_escape_utf16(writer, 0xd800 | (utf32 >> 10));
+        json_writer_escape_utf16(writer, 0xdc00 | (utf32 & 0x3ff));
+      } else {
+        json_writer_escape_utf16(writer, utf32);
+      }
+    }
+  }
+
+  json_writer_output_char(writer, '"');
+}
+
+void grpc_json_writer_container_begins(grpc_json_writer* writer, grpc_json_type type) {
+  if (!writer->got_key) json_writer_value_end(writer);
+  json_writer_output_indent(writer);
+  json_writer_output_char(writer, type == GRPC_JSON_OBJECT ? '{' : '[');
+  writer->container_empty = 1;
+  writer->got_key = 0;
+  writer->depth++;
+}
+
+void grpc_json_writer_container_ends(grpc_json_writer* writer, grpc_json_type type) {
+  if (writer->indent && !writer->container_empty)
+    json_writer_output_char(writer, '\n');
+  writer->depth--;
+  if (!writer->container_empty) json_writer_output_indent(writer);
+  json_writer_output_char(writer, type == GRPC_JSON_OBJECT ? '}' : ']');
+  writer->container_empty = 0;
+  writer->got_key = 0;
+}
+
+void grpc_json_writer_object_key(grpc_json_writer* writer, const char* string) {
+  json_writer_value_end(writer);
+  json_writer_output_indent(writer);
+  json_writer_escape_string(writer, string);
+  json_writer_output_char(writer, ':');
+  writer->got_key = 1;
+}
+
+void grpc_json_writer_value_raw(grpc_json_writer* writer, const char* string) {
+  if (!writer->got_key) json_writer_value_end(writer);
+  json_writer_output_indent(writer);
+  json_writer_output_string(writer, string);
+  writer->got_key = 0;
+}
+
+void grpc_json_writer_value_raw_with_len(grpc_json_writer* writer, const char* string, size_t len) {
+  if (!writer->got_key) json_writer_value_end(writer);
+  json_writer_output_indent(writer);
+  json_writer_output_string_with_len(writer, string, len);
+  writer->got_key = 0;
+}
+
+void grpc_json_writer_value_string(grpc_json_writer* writer, const char* string) {
+  if (!writer->got_key) json_writer_value_end(writer);
+  json_writer_output_indent(writer);
+  json_writer_escape_string(writer, string);
+  writer->got_key = 0;
+}
diff --git a/src/core/json/json_writer.h b/src/core/json/json_writer.h
new file mode 100644
index 0000000..0568401
--- /dev/null
+++ b/src/core/json/json_writer.h
@@ -0,0 +1,93 @@
+/*
+ *
+ * Copyright 2014, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/* The idea of the writer is basically symmetrical of the reader. While the
+ * reader emits various calls to your code, the writer takes basically the
+ * same calls and emit json out of it. It doesn't try to make any check on
+ * the order of the calls you do on it. Meaning you can theorically force
+ * it to generate invalid json.
+ *
+ * Also, unlike the reader, the writer expects UTF-8 encoded input strings.
+ * These strings will be UTF-8 validated, and any invalid character will
+ * cut the conversion short, before any invalid UTF-8 sequence, thus forming
+ * a valid UTF-8 string overall.
+ */
+
+#ifndef __GRPC_SRC_CORE_JSON_JSON_WRITER_H__
+#define __GRPC_SRC_CORE_JSON_JSON_WRITER_H__
+
+#include <stdlib.h>
+
+#include "src/core/json/json_common.h"
+
+typedef struct grpc_json_writer_vtable {
+  /* Adds a character to the output stream. */
+  void (*output_char)(void* userdata, char);
+  /* Adds a zero-terminated string to the output stream. */
+  void (*output_string)(void* userdata, const char* str);
+  /* Adds a fixed-length string to the output stream. */
+  void (*output_string_with_len)(void* userdata, const char* str, size_t len);
+
+} grpc_json_writer_vtable;
+
+typedef struct grpc_json_writer {
+  void* userdata;
+  grpc_json_writer_vtable* vtable;
+  int indent;
+  int depth;
+  int container_empty;
+  int got_key;
+} grpc_json_writer;
+
+/* Call this to initialize your writer structure. The indent parameter is
+ * specifying the number of spaces to use for indenting the output. If you
+ * use indent=0, then the output will not have any newlines either, thus
+ * emitting a condensed json output.
+ */
+void grpc_json_writer_init(grpc_json_writer* writer, int indent,
+                           grpc_json_writer_vtable* vtable, void* userdata);
+
+/* Signals the beginning of a container. */
+void grpc_json_writer_container_begins(grpc_json_writer* writer, grpc_json_type type);
+/* Signals the end of a container. */
+void grpc_json_writer_container_ends(grpc_json_writer* writer, grpc_json_type type);
+/* Writes down an object key for the next value. */
+void grpc_json_writer_object_key(grpc_json_writer* writer, const char* string);
+/* Sets a raw value. Useful for numbers. */
+void grpc_json_writer_value_raw(grpc_json_writer* writer, const char* string);
+/* Sets a raw value with its length. Useful for values like true or false. */
+void grpc_json_writer_value_raw_with_len(grpc_json_writer* writer, const char* string, size_t len);
+/* Sets a string value. It'll be escaped, and utf-8 validated. */
+void grpc_json_writer_value_string(grpc_json_writer* writer, const char* string);
+
+#endif /* __GRPC_SRC_CORE_JSON_JSON_WRITER_H__ */
diff --git a/src/core/security/base64.c b/src/core/security/base64.c
index f6c3c52..6346794 100644
--- a/src/core/security/base64.c
+++ b/src/core/security/base64.c
@@ -113,7 +113,8 @@
     *current++ = GRPC_BASE64_PAD_CHAR;
   }
 
-  GPR_ASSERT((current - result) < result_projected_size);
+  GPR_ASSERT(current >= result);
+  GPR_ASSERT((gpr_uintptr)(current - result) < result_projected_size);
   result[current - result] = '\0';
   return result;
 }
diff --git a/src/core/security/credentials.c b/src/core/security/credentials.c
index 2f75556..7b7d8f3 100644
--- a/src/core/security/credentials.c
+++ b/src/core/security/credentials.c
@@ -42,7 +42,7 @@
 #include <grpc/support/sync.h>
 #include <grpc/support/time.h>
 
-#include "third_party/cJSON/cJSON.h"
+#include "src/core/json/json.h"
 
 #include <string.h>
 #include <stdio.h>
@@ -173,7 +173,9 @@
   gpr_free(creds);
 }
 
-static int ssl_has_request_metadata(const grpc_credentials *creds) { return 0; }
+static int ssl_has_request_metadata(const grpc_credentials *creds) {
+  return 0;
+}
 
 static int ssl_has_request_metadata_only(const grpc_credentials *creds) {
   return 0;
@@ -336,7 +338,7 @@
   char *null_terminated_body = NULL;
   char *new_access_token = NULL;
   grpc_credentials_status status = GRPC_CREDENTIALS_OK;
-  cJSON *json = NULL;
+  grpc_json *json = NULL;
 
   if (response->body_length > 0) {
     null_terminated_body = gpr_malloc(response->body_length + 1);
@@ -351,41 +353,48 @@
     status = GRPC_CREDENTIALS_ERROR;
     goto end;
   } else {
-    cJSON *access_token = NULL;
-    cJSON *token_type = NULL;
-    cJSON *expires_in = NULL;
-    json = cJSON_Parse(null_terminated_body);
+    grpc_json *access_token = NULL;
+    grpc_json *token_type = NULL;
+    grpc_json *expires_in = NULL;
+    grpc_json *ptr;
+    json = grpc_json_parse_string(null_terminated_body);
     if (json == NULL) {
       gpr_log(GPR_ERROR, "Could not parse JSON from %s", null_terminated_body);
       status = GRPC_CREDENTIALS_ERROR;
       goto end;
     }
-    if (json->type != cJSON_Object) {
+    if (json->type != GRPC_JSON_OBJECT) {
       gpr_log(GPR_ERROR, "Response should be a JSON object");
       status = GRPC_CREDENTIALS_ERROR;
       goto end;
     }
-    access_token = cJSON_GetObjectItem(json, "access_token");
-    if (access_token == NULL || access_token->type != cJSON_String) {
+    for (ptr = json->child; ptr; ptr = ptr->next) {
+      if (strcmp(ptr->key, "access_token") == 0) {
+        access_token = ptr;
+      } else if (strcmp(ptr->key, "token_type") == 0) {
+        token_type = ptr;
+      } else if (strcmp(ptr->key, "expires_in") == 0) {
+        expires_in = ptr;
+      }
+    }
+    if (access_token == NULL || access_token->type != GRPC_JSON_STRING) {
       gpr_log(GPR_ERROR, "Missing or invalid access_token in JSON.");
       status = GRPC_CREDENTIALS_ERROR;
       goto end;
     }
-    token_type = cJSON_GetObjectItem(json, "token_type");
-    if (token_type == NULL || token_type->type != cJSON_String) {
+    if (token_type == NULL || token_type->type != GRPC_JSON_STRING) {
       gpr_log(GPR_ERROR, "Missing or invalid token_type in JSON.");
       status = GRPC_CREDENTIALS_ERROR;
       goto end;
     }
-    expires_in = cJSON_GetObjectItem(json, "expires_in");
-    if (expires_in == NULL || expires_in->type != cJSON_Number) {
+    if (expires_in == NULL || expires_in->type != GRPC_JSON_NUMBER) {
       gpr_log(GPR_ERROR, "Missing or invalid expires_in in JSON.");
       status = GRPC_CREDENTIALS_ERROR;
       goto end;
     }
-    gpr_asprintf(&new_access_token, "%s %s", token_type->valuestring,
-                 access_token->valuestring);
-    token_lifetime->tv_sec = expires_in->valueint;
+    gpr_asprintf(&new_access_token, "%s %s", token_type->value,
+                 access_token->value);
+    token_lifetime->tv_sec = strtol(expires_in->value, NULL, 10);
     token_lifetime->tv_nsec = 0;
     if (*token_elem != NULL) grpc_mdelem_unref(*token_elem);
     *token_elem = grpc_mdelem_from_strings(ctx, GRPC_AUTHORIZATION_METADATA_KEY,
@@ -400,7 +409,7 @@
   }
   if (null_terminated_body != NULL) gpr_free(null_terminated_body);
   if (new_access_token != NULL) gpr_free(new_access_token);
-  if (json != NULL) cJSON_Delete(json);
+  if (json != NULL) grpc_json_destroy(json);
   return status;
 }
 
@@ -896,7 +905,9 @@
   gpr_free(c);
 }
 
-static int iam_has_request_metadata(const grpc_credentials *creds) { return 1; }
+static int iam_has_request_metadata(const grpc_credentials *creds) {
+  return 1;
+}
 
 static int iam_has_request_metadata_only(const grpc_credentials *creds) {
   return 1;
diff --git a/src/core/security/json_token.c b/src/core/security/json_token.c
index 82bd9b5..8e48686 100644
--- a/src/core/security/json_token.c
+++ b/src/core/security/json_token.c
@@ -44,7 +44,8 @@
 #include <openssl/bio.h>
 #include <openssl/evp.h>
 #include <openssl/pem.h>
-#include "third_party/cJSON/cJSON.h"
+
+#include "src/core/json/json.h"
 
 /* --- Constants. --- */
 
@@ -64,18 +65,20 @@
 
 /* --- grpc_auth_json_key. --- */
 
-static const char *json_get_string_property(cJSON *json,
+static const char *json_get_string_property(grpc_json *json,
                                             const char *prop_name) {
-  cJSON *child = NULL;
-  child = cJSON_GetObjectItem(json, prop_name);
-  if (child == NULL || child->type != cJSON_String) {
+  grpc_json *child;
+  for (child = json->child; child != NULL; child = child->next) {
+    if (strcmp(child->key, prop_name) == 0) break;
+  }
+  if (child == NULL || child->type != GRPC_JSON_STRING) {
     gpr_log(GPR_ERROR, "Invalid or missing %s property.", prop_name);
     return NULL;
   }
-  return child->valuestring;
+  return child->value;
 }
 
-static int set_json_key_string_property(cJSON *json, const char *prop_name,
+static int set_json_key_string_property(grpc_json *json, const char *prop_name,
                                         char **json_key_field) {
   const char *prop_value = json_get_string_property(json, prop_name);
   if (prop_value == NULL) return 0;
@@ -91,7 +94,8 @@
 grpc_auth_json_key grpc_auth_json_key_create_from_string(
     const char *json_string) {
   grpc_auth_json_key result;
-  cJSON *json = cJSON_Parse(json_string);
+  char *scratchpad = gpr_strdup(json_string);
+  grpc_json *json = grpc_json_parse_string(scratchpad);
   BIO *bio = NULL;
   const char *prop_value;
   int success = 0;
@@ -100,7 +104,7 @@
   result.type = GRPC_AUTH_JSON_KEY_TYPE_INVALID;
   if (json == NULL) {
     gpr_log(GPR_ERROR, "Invalid json string %s", json_string);
-    return result;
+    goto end;
   }
 
   prop_value = json_get_string_property(json, "type");
@@ -123,7 +127,8 @@
     goto end;
   }
   bio = BIO_new(BIO_s_mem());
-  if (BIO_puts(bio, prop_value) != strlen(prop_value)) {
+  success = BIO_puts(bio, prop_value);
+  if ((success < 0) || ((size_t)success != strlen(prop_value))) {
     gpr_log(GPR_ERROR, "Could not write into openssl BIO.");
     goto end;
   }
@@ -136,8 +141,9 @@
 
 end:
   if (bio != NULL) BIO_free(bio);
-  if (json != NULL) cJSON_Delete(json);
+  if (json != NULL) grpc_json_destroy(json);
   if (!success) grpc_auth_json_key_destruct(&result);
+  gpr_free(scratchpad);
   return result;
 }
 
@@ -164,49 +170,63 @@
 
 /* --- jwt encoding and signature. --- */
 
+static grpc_json *create_child(grpc_json *brother, grpc_json *parent,
+                         const char *key, const char *value,
+                         grpc_json_type type) {
+  grpc_json *child = grpc_json_create(type);
+  if (brother) brother->next = child;
+  if (!parent->child) parent->child = child;
+  child->parent = parent;
+  child->value = value;
+  child->key = key;
+  return child;
+}
+
 static char *encoded_jwt_header(const char *algorithm) {
-  cJSON *json = cJSON_CreateObject();
-  cJSON *child = cJSON_CreateString(algorithm);
+  grpc_json *json = grpc_json_create(GRPC_JSON_OBJECT);
+  grpc_json *child = NULL;
   char *json_str = NULL;
   char *result = NULL;
-  cJSON_AddItemToObject(json, "alg", child);
-  child = cJSON_CreateString(GRPC_JWT_TYPE);
-  cJSON_AddItemToObject(json, "typ", child);
-  json_str = cJSON_PrintUnformatted(json);
+
+  child = create_child(NULL, json, "alg", algorithm, GRPC_JSON_STRING);
+  create_child(child, json, "typ", GRPC_JWT_TYPE, GRPC_JSON_STRING);
+
+  json_str = grpc_json_dump_to_string(json, 0);
   result = grpc_base64_encode(json_str, strlen(json_str), 1, 0);
-  free(json_str);
-  cJSON_Delete(json);
+  gpr_free(json_str);
+  grpc_json_destroy(json);
   return result;
 }
 
 static char *encoded_jwt_claim(const grpc_auth_json_key *json_key,
                                const char *scope, gpr_timespec token_lifetime) {
-  cJSON *json = cJSON_CreateObject();
-  cJSON *child = NULL;
+  grpc_json *json = grpc_json_create(GRPC_JSON_OBJECT);
+  grpc_json *child = NULL;
   char *json_str = NULL;
   char *result = NULL;
   gpr_timespec now = gpr_now();
   gpr_timespec expiration = gpr_time_add(now, token_lifetime);
+  /* log10(2^64) ~= 20 */
+  char now_str[24];
+  char expiration_str[24];
   if (gpr_time_cmp(token_lifetime, grpc_max_auth_token_lifetime) > 0) {
     gpr_log(GPR_INFO, "Cropping token lifetime to maximum allowed value.");
     expiration = gpr_time_add(now, grpc_max_auth_token_lifetime);
   }
-  child = cJSON_CreateString(json_key->client_email);
-  cJSON_AddItemToObject(json, "iss", child);
-  child = cJSON_CreateString(scope);
-  cJSON_AddItemToObject(json, "scope", child);
-  child = cJSON_CreateString(GRPC_JWT_AUDIENCE);
-  cJSON_AddItemToObject(json, "aud", child);
-  child = cJSON_CreateNumber(now.tv_sec);
-  cJSON_SetIntValue(child, now.tv_sec);
-  cJSON_AddItemToObject(json, "iat", child);
-  child = cJSON_CreateNumber(expiration.tv_sec);
-  cJSON_SetIntValue(child, expiration.tv_sec);
-  cJSON_AddItemToObject(json, "exp", child);
-  json_str = cJSON_PrintUnformatted(json);
+  sprintf(now_str, "%ld", now.tv_sec);
+  sprintf(expiration_str, "%ld", expiration.tv_sec);
+
+  child = create_child(NULL, json, "iss", json_key->client_email,
+                       GRPC_JSON_STRING);
+  child = create_child(child, json, "scope", scope, GRPC_JSON_STRING);
+  child = create_child(child, json, "aud", GRPC_JWT_AUDIENCE, GRPC_JSON_STRING);
+  child = create_child(child, json, "iat", now_str, GRPC_JSON_NUMBER);
+  create_child(child, json, "exp", expiration_str, GRPC_JSON_NUMBER);
+
+  json_str = grpc_json_dump_to_string(json, 0);
   result = grpc_base64_encode(json_str, strlen(json_str), 1, 0);
-  free(json_str);
-  cJSON_Delete(json);
+  gpr_free(json_str);
+  grpc_json_destroy(json);
   return result;
 }
 
@@ -216,12 +236,13 @@
   size_t result_len = str1_len + 1 /* dot */ + str2_len;
   char *result = gpr_malloc(result_len + 1 /* NULL terminated */);
   char *current = result;
-  strncpy(current, str1, str1_len);
+  memcpy(current, str1, str1_len);
   current += str1_len;
   *(current++) = '.';
-  strncpy(current, str2, str2_len);
+  memcpy(current, str2, str2_len);
   current += str2_len;
-  GPR_ASSERT((current - result) == result_len);
+  GPR_ASSERT(current >= result);
+  GPR_ASSERT((gpr_uintptr)(current - result) == result_len);
   *current = '\0';
   gpr_free(str1);
   gpr_free(str2);
diff --git a/src/core/security/secure_endpoint.c b/src/core/security/secure_endpoint.c
index 9f12cf5..31138d6 100644
--- a/src/core/security/secure_endpoint.c
+++ b/src/core/security/secure_endpoint.c
@@ -111,7 +111,7 @@
 
 static void on_read(void *user_data, gpr_slice *slices, size_t nslices,
                     grpc_endpoint_cb_status error) {
-  int i = 0;
+  unsigned i;
   gpr_uint8 keep_looping = 0;
   int input_buffer_count = 0;
   tsi_result result = TSI_OK;
@@ -221,7 +221,7 @@
                                                  size_t nslices,
                                                  grpc_endpoint_write_cb cb,
                                                  void *user_data) {
-  int i = 0;
+  unsigned i;
   int output_buffer_count = 0;
   tsi_result result = TSI_OK;
   secure_endpoint *ep = (secure_endpoint *)secure_ep;
diff --git a/src/core/security/server_secure_chttp2.c b/src/core/security/server_secure_chttp2.c
index 9dd4327..480c882 100644
--- a/src/core/security/server_secure_chttp2.c
+++ b/src/core/security/server_secure_chttp2.c
@@ -92,7 +92,7 @@
   grpc_resolved_addresses *resolved = NULL;
   grpc_tcp_server *tcp = NULL;
   size_t i;
-  int count = 0;
+  unsigned count = 0;
   int port_num = -1;
   int port_temp;
 
@@ -127,6 +127,7 @@
   if (count != resolved->naddrs) {
     gpr_log(GPR_ERROR, "Only %d addresses added out of total %d resolved",
             count, resolved->naddrs);
+    /* if it's an error, don't we want to goto error; here ? */
   }
   grpc_resolved_addresses_destroy(resolved);
 
diff --git a/src/core/statistics/census_log.c b/src/core/statistics/census_log.c
index 404e92c..aea0a33 100644
--- a/src/core/statistics/census_log.c
+++ b/src/core/statistics/census_log.c
@@ -173,7 +173,7 @@
 struct census_log {
   int discard_old_records;
   /* Number of cores (aka hardware-contexts) */
-  int num_cores;
+  unsigned num_cores;
   /* number of CENSUS_LOG_2_MAX_RECORD_SIZE blocks in log */
   gpr_int32 num_blocks;
   cl_block* blocks;                       /* Block metadata. */
@@ -183,7 +183,7 @@
   /* Keeps the state of the reader iterator. A value of 0 indicates that
      iterator has reached the end. census_log_init_reader() resets the
      value to num_core to restart iteration. */
-  gpr_int32 read_iterator_state;
+  gpr_uint32 read_iterator_state;
   /* Points to the block being read. If non-NULL, the block is locked for
      reading (block_being_read_->reader_lock is held). */
   cl_block* block_being_read;
diff --git a/src/core/statistics/census_rpc_stats.c b/src/core/statistics/census_rpc_stats.c
index dd3c07e..785c091 100644
--- a/src/core/statistics/census_rpc_stats.c
+++ b/src/core/statistics/census_rpc_stats.c
@@ -184,7 +184,7 @@
   gpr_mu_lock(&g_mu);
   if (store != NULL) {
     size_t n;
-    int i, j;
+    unsigned i, j;
     gpr_timespec now = gpr_now();
     census_ht_kv* kv = census_ht_get_all_elements(store, &n);
     if (kv != NULL) {
diff --git a/src/core/statistics/hash_table.c b/src/core/statistics/hash_table.c
index 1aee86d..1f7c242 100644
--- a/src/core/statistics/hash_table.c
+++ b/src/core/statistics/hash_table.c
@@ -292,7 +292,7 @@
 }
 
 void census_ht_destroy(census_ht* ht) {
-  int i;
+  unsigned i;
   for (i = 0; i < ht->num_buckets; ++i) {
     ht_delete_entry_chain(&ht->options, ht->buckets[i].next);
   }
diff --git a/src/core/statistics/window_stats.c b/src/core/statistics/window_stats.c
index be53d81..42ff020 100644
--- a/src/core/statistics/window_stats.c
+++ b/src/core/statistics/window_stats.c
@@ -150,7 +150,7 @@
     is->width = size_ns / granularity;
     /* Check for possible overflow issues, and maximize interval size if the
        user requested something large enough. */
-    if (GPR_INT64_MAX - is->width > size_ns) {
+    if ((GPR_INT64_MAX - is->width) > size_ns) {
       is->top = size_ns + is->width;
     } else {
       is->top = GPR_INT64_MAX;
diff --git a/src/core/support/cpu.h b/src/core/support/cpu.h
index 1c2dde7..f8ec2c6 100644
--- a/src/core/support/cpu.h
+++ b/src/core/support/cpu.h
@@ -38,12 +38,12 @@
 
 /* Return the number of CPU cores on the current system. Will return 0 if
    if information is not available. */
-int gpr_cpu_num_cores(void);
+unsigned gpr_cpu_num_cores(void);
 
 /* Return the CPU on which the current thread is executing; N.B. This should
    be considered advisory only - it is possible that the thread is switched
    to a different CPU at any time. Returns a value in range
    [0, gpr_cpu_num_cores() - 1] */
-int gpr_cpu_current_cpu(void);
+unsigned gpr_cpu_current_cpu(void);
 
 #endif /* __GRPC_INTERNAL_SUPPORT_CPU_H__ */
diff --git a/src/core/support/cpu_linux.c b/src/core/support/cpu_linux.c
index d800628..eab8b7f 100644
--- a/src/core/support/cpu_linux.c
+++ b/src/core/support/cpu_linux.c
@@ -75,8 +75,9 @@
 
 #include <grpc/support/log.h>
 
-int gpr_cpu_num_cores(void) {
+unsigned gpr_cpu_num_cores(void) {
   static int ncpus = 0;
+  /* FIXME: !threadsafe */
   if (ncpus == 0) {
     ncpus = sysconf(_SC_NPROCESSORS_ONLN);
     if (ncpus < 1) {
@@ -87,7 +88,7 @@
   return ncpus;
 }
 
-int gpr_cpu_current_cpu(void) {
+unsigned gpr_cpu_current_cpu(void) {
   int cpu = sched_getcpu();
   if (cpu < 0) {
     gpr_log(GPR_ERROR, "Error determining current CPU: %s\n", strerror(errno));
diff --git a/src/core/support/cpu_posix.c b/src/core/support/cpu_posix.c
index 2ea8080..68e8cb9 100644
--- a/src/core/support/cpu_posix.c
+++ b/src/core/support/cpu_posix.c
@@ -45,7 +45,7 @@
 
 static __thread char magic_thread_local;
 
-int gpr_cpu_num_cores(void) {
+unsigned gpr_cpu_num_cores(void) {
   static int ncpus = 0;
   if (ncpus == 0) {
     ncpus = sysconf(_SC_NPROCESSORS_ONLN);
@@ -63,7 +63,7 @@
   return ((x >> 4) ^ (x >> 9) ^ (x >> 14)) % gpr_cpu_num_cores();
 }
 
-int gpr_cpu_current_cpu(void) {
+unsigned gpr_cpu_current_cpu(void) {
   /* NOTE: there's no way I know to return the actual cpu index portably...
      most code that's using this is using it to shard across work queues though,
      so here we use thread identity instead to achieve a similar though not
diff --git a/src/core/support/histogram.c b/src/core/support/histogram.c
index 425421c..cd360c5 100644
--- a/src/core/support/histogram.c
+++ b/src/core/support/histogram.c
@@ -77,7 +77,6 @@
 /* bounds checked version of the above */
 static size_t bucket_for(gpr_histogram *h, double x) {
   size_t bucket = bucket_for_unchecked(h, GPR_CLAMP(x, 0, h->max_possible));
-  GPR_ASSERT(bucket >= 0);
   GPR_ASSERT(bucket < h->num_buckets);
   return bucket;
 }
diff --git a/src/core/support/log_posix.c b/src/core/support/log_posix.c
index 1292c9e..ab2d2e5 100644
--- a/src/core/support/log_posix.c
+++ b/src/core/support/log_posix.c
@@ -64,7 +64,7 @@
   va_end(args);
   if (ret < 0) {
     message = NULL;
-  } else if (ret <= sizeof(buf) - 1) {
+  } else if ((size_t)ret <= sizeof(buf) - 1) {
     message = buf;
   } else {
     message = allocated = gpr_malloc(ret + 1);
diff --git a/src/core/support/slice_buffer.c b/src/core/support/slice_buffer.c
index 2ade049..22bda96 100644
--- a/src/core/support/slice_buffer.c
+++ b/src/core/support/slice_buffer.c
@@ -55,7 +55,7 @@
   gpr_free(sb->slices);
 }
 
-gpr_uint8 *gpr_slice_buffer_tiny_add(gpr_slice_buffer *sb, int n) {
+gpr_uint8 *gpr_slice_buffer_tiny_add(gpr_slice_buffer *sb, unsigned n) {
   gpr_slice *back;
   gpr_uint8 *out;
 
@@ -64,7 +64,7 @@
   if (sb->count == 0) goto add_new;
   back = &sb->slices[sb->count - 1];
   if (back->refcount) goto add_new;
-  if (back->data.inlined.length + n > sizeof(back->data.inlined.bytes))
+  if ((back->data.inlined.length + n) > sizeof(back->data.inlined.bytes))
     goto add_new;
   out = back->data.inlined.bytes + back->data.inlined.length;
   back->data.inlined.length += n;
diff --git a/src/core/support/string_posix.c b/src/core/support/string_posix.c
index 7b7e82e..5783281 100644
--- a/src/core/support/string_posix.c
+++ b/src/core/support/string_posix.c
@@ -57,7 +57,7 @@
   va_start(args, format);
   ret = vsnprintf(buf, sizeof(buf), format, args);
   va_end(args);
-  if (!(0 <= ret && ret < ~(size_t)0)) {
+  if (!(0 <= ret)) {
     *strp = NULL;
     return -1;
   }
@@ -79,7 +79,7 @@
   va_start(args, format);
   ret = vsnprintf(*strp, strp_buflen, format, args);
   va_end(args);
-  if (ret == strp_buflen - 1) {
+  if ((size_t)ret == strp_buflen - 1) {
     return ret;
   }
 
diff --git a/src/core/support/thd_posix.c b/src/core/support/thd_posix.c
index 1189e0c..bac1d9c 100644
--- a/src/core/support/thd_posix.c
+++ b/src/core/support/thd_posix.c
@@ -43,6 +43,7 @@
 #include <grpc/support/alloc.h>
 #include <grpc/support/log.h>
 #include <grpc/support/thd.h>
+#include <grpc/support/useful.h>
 
 struct thd_arg {
   void (*body)(void *arg); /* body of a thread */
diff --git a/src/core/surface/server_chttp2.c b/src/core/surface/server_chttp2.c
index 47fca82..5ba7d47 100644
--- a/src/core/surface/server_chttp2.c
+++ b/src/core/surface/server_chttp2.c
@@ -75,7 +75,7 @@
   grpc_resolved_addresses *resolved = NULL;
   grpc_tcp_server *tcp = NULL;
   size_t i;
-  int count = 0;
+  unsigned count = 0;
   int port_num = -1;
   int port_temp;
 
diff --git a/src/core/transport/chttp2_transport.c b/src/core/transport/chttp2_transport.c
index 5465d33..48a1005 100644
--- a/src/core/transport/chttp2_transport.c
+++ b/src/core/transport/chttp2_transport.c
@@ -237,6 +237,9 @@
   /* state for a stream that's not yet been created */
   grpc_stream_op_buffer new_stream_sopb;
 
+  /* stream ops that need to be destroyed, but outside of the lock */
+  grpc_stream_op_buffer nuke_later_sopb;
+
   /* active parser */
   void *parser_data;
   stream *incoming_stream;
@@ -370,6 +373,8 @@
   }
   gpr_free(t->pending_goaways);
 
+  grpc_sopb_destroy(&t->nuke_later_sopb);
+
   gpr_free(t);
 }
 
@@ -416,6 +421,7 @@
   t->cap_pending_goaways = 0;
   gpr_slice_buffer_init(&t->outbuf);
   gpr_slice_buffer_init(&t->qbuf);
+  grpc_sopb_init(&t->nuke_later_sopb);
   if (is_client) {
     gpr_slice_buffer_add(&t->qbuf,
                          gpr_slice_from_copied_string(CLIENT_CONNECT_STRING));
@@ -555,6 +561,11 @@
   return 0;
 }
 
+static void schedule_nuke_sopb(transport *t, grpc_stream_op_buffer *sopb) {
+  grpc_sopb_append(&t->nuke_later_sopb, sopb->ops, sopb->nops);
+  sopb->nops = 0;
+}
+
 static void destroy_stream(grpc_transport *gt, grpc_stream *gs) {
   transport *t = (transport *)gt;
   stream *s = (stream *)gs;
@@ -681,6 +692,11 @@
   int i;
   pending_goaway *goaways = NULL;
   grpc_endpoint *ep = t->ep;
+  grpc_stream_op_buffer nuke_now = t->nuke_later_sopb;
+
+  if (nuke_now.nops) {
+    memset(&t->nuke_later_sopb, 0, sizeof(t->nuke_later_sopb));
+  }
 
   /* see if we need to trigger a write - and if so, get the data ready */
   if (ep && !t->writing) {
@@ -750,6 +766,10 @@
     unref_transport(t);
   }
 
+  if (nuke_now.nops) {
+    grpc_sopb_destroy(&nuke_now);
+  }
+
   gpr_free(goaways);
 }
 
@@ -1006,9 +1026,9 @@
 
   if (s) {
     /* clear out any unreported input & output: nobody cares anymore */
-    grpc_sopb_reset(&s->parser.incoming_sopb);
     had_outgoing = s->outgoing_sopb.nops != 0;
-    grpc_sopb_reset(&s->outgoing_sopb);
+    schedule_nuke_sopb(t, &s->parser.incoming_sopb);
+    schedule_nuke_sopb(t, &s->outgoing_sopb);
     if (s->cancelled) {
       send_rst = 0;
     } else if (!s->read_closed || !s->sent_write_closed || had_outgoing) {
@@ -1518,7 +1538,7 @@
     dts_fh_0:
     case DTS_FH_0:
       GPR_ASSERT(cur < end);
-      t->incoming_frame_size = ((gpr_uint32) * cur) << 16;
+      t->incoming_frame_size = ((gpr_uint32)*cur) << 16;
       if (++cur == end) {
         t->deframe_state = DTS_FH_1;
         return 1;
@@ -1526,7 +1546,7 @@
     /* fallthrough */
     case DTS_FH_1:
       GPR_ASSERT(cur < end);
-      t->incoming_frame_size |= ((gpr_uint32) * cur) << 8;
+      t->incoming_frame_size |= ((gpr_uint32)*cur) << 8;
       if (++cur == end) {
         t->deframe_state = DTS_FH_2;
         return 1;
@@ -1558,7 +1578,7 @@
     /* fallthrough */
     case DTS_FH_5:
       GPR_ASSERT(cur < end);
-      t->incoming_stream_id = (((gpr_uint32) * cur) << 24) & 0x7f;
+      t->incoming_stream_id = (((gpr_uint32)*cur) << 24) & 0x7f;
       if (++cur == end) {
         t->deframe_state = DTS_FH_6;
         return 1;
@@ -1566,7 +1586,7 @@
     /* fallthrough */
     case DTS_FH_6:
       GPR_ASSERT(cur < end);
-      t->incoming_stream_id |= ((gpr_uint32) * cur) << 16;
+      t->incoming_stream_id |= ((gpr_uint32)*cur) << 16;
       if (++cur == end) {
         t->deframe_state = DTS_FH_7;
         return 1;
@@ -1574,7 +1594,7 @@
     /* fallthrough */
     case DTS_FH_7:
       GPR_ASSERT(cur < end);
-      t->incoming_stream_id |= ((gpr_uint32) * cur) << 8;
+      t->incoming_stream_id |= ((gpr_uint32)*cur) << 8;
       if (++cur == end) {
         t->deframe_state = DTS_FH_8;
         return 1;
@@ -1582,7 +1602,7 @@
     /* fallthrough */
     case DTS_FH_8:
       GPR_ASSERT(cur < end);
-      t->incoming_stream_id |= ((gpr_uint32) * cur);
+      t->incoming_stream_id |= ((gpr_uint32)*cur);
       t->deframe_state = DTS_FRAME;
       if (!init_frame_parser(t)) {
         return 0;
@@ -1738,9 +1758,9 @@
  */
 
 static const grpc_transport_vtable vtable = {
-    sizeof(stream),  init_stream,    send_batch,       set_allow_window_updates,
-    add_to_pollset,  destroy_stream, abort_stream,     goaway,
-    close_transport, send_ping,      destroy_transport};
+    sizeof(stream), init_stream, send_batch, set_allow_window_updates,
+    add_to_pollset, destroy_stream, abort_stream, goaway, close_transport,
+    send_ping, destroy_transport};
 
 void grpc_create_chttp2_transport(grpc_transport_setup_callback setup,
                                   void *arg,
diff --git a/src/core/tsi/fake_transport_security.c b/src/core/tsi/fake_transport_security.c
index 0b245e4..a96c7df 100644
--- a/src/core/tsi/fake_transport_security.c
+++ b/src/core/tsi/fake_transport_security.c
@@ -38,6 +38,7 @@
 
 #include <grpc/support/log.h>
 #include <grpc/support/port_platform.h>
+#include <grpc/support/useful.h>
 #include "src/core/tsi/transport_security.h"
 
 /* --- Constants. ---*/
@@ -412,7 +413,7 @@
     tsi_handshaker* self, const unsigned char* bytes, size_t* bytes_size) {
   tsi_result result = TSI_OK;
   tsi_fake_handshaker* impl = (tsi_fake_handshaker*)self;
-  int expected_msg = impl->next_message_to_send - 1;
+  tsi_fake_handshake_message expected_msg = impl->next_message_to_send - 1;
   tsi_fake_handshake_message received_msg;
 
   if (!impl->needs_incoming_message || impl->result == TSI_OK) {
diff --git a/src/core/tsi/ssl_transport_security.c b/src/core/tsi/ssl_transport_security.c
index 8801cc4..0f8cbcc 100644
--- a/src/core/tsi/ssl_transport_security.c
+++ b/src/core/tsi/ssl_transport_security.c
@@ -37,6 +37,7 @@
 
 #include <grpc/support/log.h>
 #include <grpc/support/sync.h>
+#include <grpc/support/useful.h>
 #include "src/core/tsi/transport_security.h"
 
 #include <openssl/bio.h>
@@ -565,7 +566,8 @@
     current += alpn_protocols_lengths[i];
   }
   /* Safety check. */
-  if ((current - *protocol_name_list) != *protocol_name_list_length) {
+  if ((current < *protocol_name_list) ||
+      ((gpr_uintptr)(current - *protocol_name_list) != *protocol_name_list_length)) {
     return TSI_INTERNAL_ERROR;
   }
   return TSI_OK;
@@ -1063,7 +1065,8 @@
   while ((client_current - in) < inlen) {
     unsigned char client_current_len = *(client_current++);
     const unsigned char* server_current = factory->alpn_protocol_list;
-    while ((server_current - factory->alpn_protocol_list) <
+    while ((server_current >= factory->alpn_protocol_list) &&
+           (gpr_uintptr)(server_current - factory->alpn_protocol_list) <
            factory->alpn_protocol_list_length) {
       unsigned char server_current_len = *(server_current++);
       if ((client_current_len == server_current_len) &&
diff --git a/src/cpp/server/thread_pool.cc b/src/cpp/server/thread_pool.cc
index a46d4c6..2027959 100644
--- a/src/cpp/server/thread_pool.cc
+++ b/src/cpp/server/thread_pool.cc
@@ -41,7 +41,10 @@
       for (;;) {
         std::unique_lock<std::mutex> lock(mu_);
         // Wait until work is available or we are shutting down.
-        cv_.wait(lock, [=]() { return shutdown_ || !callbacks_.empty(); });
+        auto have_work = [=]() { return shutdown_ || !callbacks_.empty(); };
+        if (!have_work()) {
+          cv_.wait(lock, have_work);
+        }
         // Drain callbacks before considering shutdown to ensure all work
         // gets completed.
         if (!callbacks_.empty()) {
@@ -71,7 +74,7 @@
 void ThreadPool::ScheduleCallback(const std::function<void()> &callback) {
   std::lock_guard<std::mutex> lock(mu_);
   callbacks_.push(callback);
-  cv_.notify_all();
+  cv_.notify_one();
 }
 
 }  // namespace grpc
diff --git a/src/node/examples/stock.proto b/src/node/examples/stock.proto
new file mode 100644
index 0000000..efe98d8
--- /dev/null
+++ b/src/node/examples/stock.proto
@@ -0,0 +1,62 @@
+// Copyright 2015, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto3";
+
+package examples;
+
+// Protocol type definitions
+message StockRequest {
+  optional string symbol = 1;
+  optional int32 num_trades_to_watch = 2 [default=0];
+};
+
+message StockReply {
+  optional float price = 1;
+  optional string symbol = 2;
+};
+
+
+// Interface exported by the server
+service Stock {
+  // Simple blocking RPC
+  rpc GetLastTradePrice(StockRequest) returns (StockReply) {
+  };
+  // Bidirectional streaming RPC
+  rpc GetLastTradePriceMultiple(stream StockRequest) returns
+    (stream StockReply) {
+  };
+  // Unidirectional server-to-client streaming RPC
+  rpc WatchFutureTrades(StockRequest) returns (stream StockReply) {
+  };
+  // Unidirectional client-to-server streaming RPC
+  rpc GetHighestTradePrice(stream StockRequest) returns (StockReply) {
+  };
+
+};
\ No newline at end of file
diff --git a/src/node/examples/stock_client.js b/src/node/examples/stock_client.js
new file mode 100644
index 0000000..8e99090
--- /dev/null
+++ b/src/node/examples/stock_client.js
@@ -0,0 +1,43 @@
+/*
+ *
+ * Copyright 2015, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+var grpc = require('..');
+var examples = grpc.load(__dirname + '/stock.proto').examples;
+
+/**
+ * This exports a client constructor for the Stock service. The usage looks like
+ *
+ * var StockClient = require('stock_client.js');
+ * var stockClient = new StockClient(server_address);
+ */
+module.exports = examples.Stock;
diff --git a/src/node/examples/stock_server.js b/src/node/examples/stock_server.js
new file mode 100644
index 0000000..c188181
--- /dev/null
+++ b/src/node/examples/stock_server.js
@@ -0,0 +1,83 @@
+/*
+ *
+ * Copyright 2015, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+var _ = require('underscore');
+var grpc = require('..');
+var examples = grpc.load(__dirname + '/stock.proto').examples;
+
+var StockServer = grpc.makeServerConstructor([examples.Stock.service]);
+
+function getLastTradePrice(call, callback) {
+  callback(null, {price: 88});
+}
+
+function watchFutureTrades(call) {
+  for (var i = 0; i < call.request.num_trades_to_watch; i++) {
+    call.write({price: 88.00 + i * 10.00});
+  }
+  call.end();
+}
+
+function getHighestTradePrice(call, callback) {
+  var trades = [];
+  call.on('data', function(data) {
+    trades.push({symbol: data.symbol, price: _.random(0, 100)});
+  });
+  call.on('end', function() {
+    if(_.isEmpty(trades)) {
+      callback(null, {});
+    } else {
+      callback(null, _.max(trades, function(trade){return trade.price;}));
+    }
+  });
+}
+
+function getLastTradePriceMultiple(call) {
+  call.on('data', function(data) {
+    call.write({price: 88});
+  });
+  call.on('end', function() {
+    call.end();
+  });
+}
+
+var stockServer = new StockServer({
+  'examples.Stock' : {
+    getLastTradePrice: getLastTradePrice,
+    getLastTradePriceMultiple: getLastTradePriceMultiple,
+    watchFutureTrades: watchFutureTrades,
+    getHighestTradePrice: getHighestTradePrice
+  }
+});
+
+exports.module = stockServer;
diff --git a/src/node/ext/byte_buffer.cc b/src/node/ext/byte_buffer.cc
index 1429514..695eced 100644
--- a/src/node/ext/byte_buffer.cc
+++ b/src/node/ext/byte_buffer.cc
@@ -39,13 +39,17 @@
 #include "grpc/grpc.h"
 #include "grpc/support/slice.h"
 
+#include "byte_buffer.h"
+
 namespace grpc {
 namespace node {
 
-#include "byte_buffer.h"
-
 using ::node::Buffer;
+using v8::Context;
+using v8::Function;
 using v8::Handle;
+using v8::Object;
+using v8::Number;
 using v8::Value;
 
 grpc_byte_buffer *BufferToByteBuffer(Handle<Value> buffer) {
@@ -73,7 +77,19 @@
     memcpy(result + offset, GPR_SLICE_START_PTR(next), GPR_SLICE_LENGTH(next));
     offset += GPR_SLICE_LENGTH(next);
   }
-  return NanEscapeScope(NanNewBufferHandle(result, length));
+  return NanEscapeScope(MakeFastBuffer(NanNewBufferHandle(result, length)));
+}
+
+Handle<Value> MakeFastBuffer(Handle<Value> slowBuffer) {
+  NanEscapableScope();
+  Handle<Object> globalObj = Context::GetCurrent()->Global();
+  Handle<Function> bufferConstructor = Handle<Function>::Cast(
+      globalObj->Get(NanNew("Buffer")));
+  Handle<Value> consArgs[3] = { slowBuffer,
+                                NanNew<Number>(Buffer::Length(slowBuffer)),
+                                NanNew<Number>(0) };
+  Handle<Object> fastBuffer = bufferConstructor->NewInstance(3, consArgs);
+  return NanEscapeScope(fastBuffer);
 }
 }  // namespace node
 }  // namespace grpc
diff --git a/src/node/ext/byte_buffer.h b/src/node/ext/byte_buffer.h
index ee2b4c0..5f1903a 100644
--- a/src/node/ext/byte_buffer.h
+++ b/src/node/ext/byte_buffer.h
@@ -50,6 +50,10 @@
 /* Convert a grpc_byte_buffer to a Node.js Buffer */
 v8::Handle<v8::Value> ByteBufferToBuffer(grpc_byte_buffer *buffer);
 
+/* Convert a ::node::Buffer to a fast Buffer, as defined in the Node
+   Buffer documentation */
+v8::Handle<v8::Value> MakeFastBuffer(v8::Handle<v8::Value> slowBuffer);
+
 }  // namespace node
 }  // namespace grpc
 
diff --git a/src/node/ext/call.cc b/src/node/ext/call.cc
index 6434c2f..3261b78 100644
--- a/src/node/ext/call.cc
+++ b/src/node/ext/call.cc
@@ -33,6 +33,7 @@
 
 #include <node.h>
 
+#include "grpc/support/log.h"
 #include "grpc/grpc.h"
 #include "grpc/support/time.h"
 #include "byte_buffer.h"
@@ -173,31 +174,43 @@
     return NanThrowTypeError("addMetadata can only be called on Call objects");
   }
   Call *call = ObjectWrap::Unwrap<Call>(args.This());
-  for (int i = 0; !args[i]->IsUndefined(); i++) {
-    if (!args[i]->IsObject()) {
+  if (!args[0]->IsObject()) {
+    return NanThrowTypeError("addMetadata's first argument must be an object");
+  }
+  Handle<Object> metadata = args[0]->ToObject();
+  Handle<Array> keys(metadata->GetOwnPropertyNames());
+  for (unsigned int i = 0; i < keys->Length(); i++) {
+    Handle<String> current_key(keys->Get(i)->ToString());
+    if (!metadata->Get(current_key)->IsArray()) {
       return NanThrowTypeError(
-          "addMetadata arguments must be objects with key and value");
+          "addMetadata's first argument's values must be arrays");
     }
-    Handle<Object> item = args[i]->ToObject();
-    Handle<Value> key = item->Get(NanNew("key"));
-    if (!key->IsString()) {
-      return NanThrowTypeError(
-          "objects passed to addMetadata must have key->string");
-    }
-    Handle<Value> value = item->Get(NanNew("value"));
-    if (!Buffer::HasInstance(value)) {
-      return NanThrowTypeError(
-          "objects passed to addMetadata must have value->Buffer");
-    }
-    grpc_metadata metadata;
-    NanUtf8String utf8_key(key);
-    metadata.key = *utf8_key;
-    metadata.value = Buffer::Data(value);
-    metadata.value_length = Buffer::Length(value);
-    grpc_call_error error =
-        grpc_call_add_metadata(call->wrapped_call, &metadata, 0);
-    if (error != GRPC_CALL_OK) {
-      return NanThrowError("addMetadata failed", error);
+    NanUtf8String utf8_key(current_key);
+    Handle<Array> values = Local<Array>::Cast(metadata->Get(current_key));
+    for (unsigned int j = 0; j < values->Length(); j++) {
+      Handle<Value> value = values->Get(j);
+      grpc_metadata metadata;
+      grpc_call_error error;
+      metadata.key = *utf8_key;
+      if (Buffer::HasInstance(value)) {
+        metadata.value = Buffer::Data(value);
+        metadata.value_length = Buffer::Length(value);
+        error = grpc_call_add_metadata(call->wrapped_call, &metadata, 0);
+      } else if (value->IsString()) {
+        Handle<String> string_value = value->ToString();
+        NanUtf8String utf8_value(string_value);
+        metadata.value = *utf8_value;
+        metadata.value_length = string_value->Length();
+        gpr_log(GPR_DEBUG, "adding metadata: %s, %s, %d", metadata.key,
+                metadata.value, metadata.value_length);
+        error = grpc_call_add_metadata(call->wrapped_call, &metadata, 0);
+      } else {
+        return NanThrowTypeError(
+            "addMetadata values must be strings or buffers");
+      }
+      if (error != GRPC_CALL_OK) {
+        return NanThrowError("addMetadata failed", error);
+      }
     }
   }
   NanReturnUndefined();
diff --git a/src/node/ext/event.cc b/src/node/ext/event.cc
index 2ca38b7..b944606 100644
--- a/src/node/ext/event.cc
+++ b/src/node/ext/event.cc
@@ -31,6 +31,8 @@
  *
  */
 
+#include <map>
+
 #include <node.h>
 #include <nan.h>
 #include "grpc/grpc.h"
@@ -43,6 +45,7 @@
 namespace grpc {
 namespace node {
 
+using ::node::Buffer;
 using v8::Array;
 using v8::Date;
 using v8::Handle;
@@ -53,6 +56,37 @@
 using v8::String;
 using v8::Value;
 
+Handle<Value> ParseMetadata(grpc_metadata *metadata_elements, size_t length) {
+  NanEscapableScope();
+  std::map<char*, size_t> size_map;
+  std::map<char*, size_t> index_map;
+
+  for (unsigned int i = 0; i < length; i++) {
+    char *key = metadata_elements[i].key;
+    if (size_map.count(key)) {
+      size_map[key] += 1;
+    }
+    index_map[key] = 0;
+  }
+  Handle<Object> metadata_object = NanNew<Object>();
+  for (unsigned int i = 0; i < length; i++) {
+    grpc_metadata* elem = &metadata_elements[i];
+    Handle<String> key_string = String::New(elem->key);
+    Handle<Array> array;
+    if (metadata_object->Has(key_string)) {
+      array = Handle<Array>::Cast(metadata_object->Get(key_string));
+    } else {
+      array = NanNew<Array>(size_map[elem->key]);
+      metadata_object->Set(key_string, array);
+    }
+    array->Set(index_map[elem->key],
+               MakeFastBuffer(
+                   NanNewBufferHandle(elem->value, elem->value_length)));
+    index_map[elem->key] += 1;
+  }
+  return NanEscapeScope(metadata_object);
+}
+
 Handle<Value> GetEventData(grpc_event *event) {
   NanEscapableScope();
   size_t count;
@@ -72,18 +106,7 @@
     case GRPC_CLIENT_METADATA_READ:
       count = event->data.client_metadata_read.count;
       items = event->data.client_metadata_read.elements;
-      metadata = NanNew<Array>(static_cast<int>(count));
-      for (unsigned int i = 0; i < count; i++) {
-        Handle<Object> item_obj = NanNew<Object>();
-        item_obj->Set(NanNew<String, const char *>("key"),
-                      NanNew<String, char *>(items[i].key));
-        item_obj->Set(
-            NanNew<String, const char *>("value"),
-            NanNew<String, char *>(items[i].value,
-                                   static_cast<int>(items[i].value_length)));
-        metadata->Set(i, item_obj);
-      }
-      return NanEscapeScope(metadata);
+      return NanEscapeScope(ParseMetadata(items, count));
     case GRPC_FINISHED:
       status = NanNew<Object>();
       status->Set(NanNew("code"), NanNew<Number>(event->data.finished.status));
@@ -93,18 +116,7 @@
       }
       count = event->data.finished.metadata_count;
       items = event->data.finished.metadata_elements;
-      metadata = NanNew<Array>(static_cast<int>(count));
-      for (unsigned int i = 0; i < count; i++) {
-        Handle<Object> item_obj = NanNew<Object>();
-        item_obj->Set(NanNew<String, const char *>("key"),
-                      NanNew<String, char *>(items[i].key));
-        item_obj->Set(
-            NanNew<String, const char *>("value"),
-            NanNew<String, char *>(items[i].value,
-                                   static_cast<int>(items[i].value_length)));
-        metadata->Set(i, item_obj);
-      }
-      status->Set(NanNew("metadata"), metadata);
+      status->Set(NanNew("metadata"), ParseMetadata(items, count));
       return NanEscapeScope(status);
     case GRPC_SERVER_RPC_NEW:
       rpc_new = NanNew<Object>();
@@ -133,7 +145,7 @@
                                    static_cast<int>(items[i].value_length)));
         metadata->Set(i, item_obj);
       }
-      rpc_new->Set(NanNew<String, const char *>("metadata"), metadata);
+      rpc_new->Set(NanNew("metadata"), ParseMetadata(items, count));
       return NanEscapeScope(rpc_new);
     default:
       return NanEscapeScope(NanNull());
diff --git a/src/node/interop/interop_client.js b/src/node/interop/interop_client.js
index 9306317..ce18f77 100644
--- a/src/node/interop/interop_client.js
+++ b/src/node/interop/interop_client.js
@@ -199,7 +199,6 @@
 
 /**
  * Run the empty_stream test.
- * NOTE: This does not work, but should with the new invoke API
  * @param {Client} client The client to test against
  * @param {function} done Callback to call when the test is completed. Included
  *     primarily for use with mocha
@@ -219,6 +218,44 @@
 }
 
 /**
+ * Run the cancel_after_begin test.
+ * @param {Client} client The client to test against
+ * @param {function} done Callback to call when the test is completed. Included
+ *     primarily for use with mocha
+ */
+function cancelAfterBegin(client, done) {
+  var call = client.streamingInputCall(function(err, resp) {
+    assert.strictEqual(err.code, grpc.status.CANCELLED);
+    done();
+  });
+  call.cancel();
+}
+
+/**
+ * Run the cancel_after_first_response test.
+ * @param {Client} client The client to test against
+ * @param {function} done Callback to call when the test is completed. Included
+ *     primarily for use with mocha
+ */
+function cancelAfterFirstResponse(client, done) {
+  var call = client.fullDuplexCall();
+  call.write({
+      response_type: testProto.PayloadType.COMPRESSABLE,
+      response_parameters: [
+        {size: 31415}
+      ],
+      payload: {body: zeroBuffer(27182)}
+  });
+  call.on('data', function(data) {
+    call.cancel();
+  });
+  call.on('status', function(status) {
+    assert.strictEqual(status.code, grpc.status.CANCELLED);
+    done();
+  });
+}
+
+/**
  * Map from test case names to test functions
  */
 var test_cases = {
@@ -227,7 +264,9 @@
   client_streaming: clientStreaming,
   server_streaming: serverStreaming,
   ping_pong: pingPong,
-  empty_stream: emptyStream
+  empty_stream: emptyStream,
+  cancel_after_begin: cancelAfterBegin,
+  cancel_after_first_response: cancelAfterFirstResponse
 };
 
 /**
diff --git a/src/node/interop/interop_server.js b/src/node/interop/interop_server.js
index ebf8478..54e9715 100644
--- a/src/node/interop/interop_server.js
+++ b/src/node/interop/interop_server.js
@@ -183,7 +183,7 @@
       fullDuplexCall: handleFullDuplex,
       halfDuplexCall: handleHalfDuplex
     }
-  }, options);
+  }, null, options);
   var port_num = server.bind('0.0.0.0:' + port, tls);
   return {server: server, port: port_num};
 }
diff --git a/src/node/src/server.js b/src/node/src/server.js
index 03cdbe6..e4f71ff 100644
--- a/src/node/src/server.js
+++ b/src/node/src/server.js
@@ -202,10 +202,13 @@
  * Constructs a server object that stores request handlers and delegates
  * incoming requests to those handlers
  * @constructor
- * @param {Array} options Options that should be passed to the internal server
+ * @param {function(string, Object<string, Array<Buffer>>):
+           Object<string, Array<Buffer|string>>=} getMetadata Callback that gets
+ *     metatada for a given method
+ * @param {Object=} options Options that should be passed to the internal server
  *     implementation
  */
-function Server(options) {
+function Server(getMetadata, options) {
   this.handlers = {};
   var handlers = this.handlers;
   var server = new grpc.Server(options);
@@ -240,15 +243,27 @@
       var handler = undefined;
       var deadline = data.absolute_deadline;
       var cancelled = false;
-      if (handlers.hasOwnProperty(data.method)) {
-        handler = handlers[data.method];
-      }
       call.serverAccept(function(event) {
         if (event.data.code === grpc.status.CANCELLED) {
           cancelled = true;
-          stream.emit('cancelled');
+          if (stream) {
+            stream.emit('cancelled');
+          }
         }
       }, 0);
+      if (handlers.hasOwnProperty(data.method)) {
+        handler = handlers[data.method];
+      } else {
+        call.serverEndInitialMetadata(0);
+        call.startWriteStatus(
+            grpc.status.UNIMPLEMENTED,
+            "This method is not available on this server.",
+            function() {});
+        return;
+      }
+      if (getMetadata) {
+        call.addMetadata(getMetadata(data.method, data.metadata));
+      }
       call.serverEndInitialMetadata(0);
       var stream = new GrpcServerStream(call, handler.serialize,
                                         handler.deserialize);
diff --git a/src/node/src/surface_server.js b/src/node/src/surface_server.js
index af23ec2..a47d1fa 100644
--- a/src/node/src/surface_server.js
+++ b/src/node/src/surface_server.js
@@ -129,16 +129,18 @@
 
 /**
  * Creates a binary stream handler function from a unary handler function
- * @param {function(Object, function(Error, *))} handler Unary call handler
- * @return {function(stream)} Binary stream handler
+ * @param {function(Object, function(Error, *), metadata=)} handler Unary call
+ *     handler
+ * @return {function(stream, metadata=)} Binary stream handler
  */
 function makeUnaryHandler(handler) {
   /**
    * Handles a stream by reading a single data value, passing it to the handler,
    * and writing the response back to the stream.
    * @param {stream} stream Binary data stream
+   * @param {metadata=} metadata Incoming metadata array
    */
-  return function handleUnaryCall(stream) {
+  return function handleUnaryCall(stream, metadata) {
     stream.on('data', function handleUnaryData(value) {
       var call = {request: value};
       Object.defineProperty(call, 'cancelled', {
@@ -154,7 +156,7 @@
           stream.write(value);
           stream.end();
         }
-      });
+      }, metadata);
     });
   };
 }
@@ -162,17 +164,18 @@
 /**
  * Creates a binary stream handler function from a client stream handler
  * function
- * @param {function(Readable, function(Error, *))} handler Client stream call
- *     handler
- * @return {function(stream)} Binary stream handler
+ * @param {function(Readable, function(Error, *), metadata=)} handler Client
+ *     stream call handler
+ * @return {function(stream, metadata=)} Binary stream handler
  */
 function makeClientStreamHandler(handler) {
   /**
    * Handles a stream by passing a deserializing stream to the handler and
    * writing the response back to the stream.
    * @param {stream} stream Binary data stream
+   * @param {metadata=} metadata Incoming metadata array
    */
-  return function handleClientStreamCall(stream) {
+  return function handleClientStreamCall(stream, metadata) {
     var object_stream = new ServerReadableObjectStream(stream);
     handler(object_stream, function sendClientStreamData(err, value) {
         if (err) {
@@ -181,35 +184,36 @@
           stream.write(value);
           stream.end();
         }
-    });
+    }, metadata);
   };
 }
 
 /**
  * Creates a binary stream handler function from a server stream handler
  * function
- * @param {function(Writable)} handler Server stream call handler
- * @return {function(stream)} Binary stream handler
+ * @param {function(Writable, metadata=)} handler Server stream call handler
+ * @return {function(stream, metadata=)} Binary stream handler
  */
 function makeServerStreamHandler(handler) {
   /**
    * Handles a stream by attaching it to a serializing stream, and passing it to
    * the handler.
    * @param {stream} stream Binary data stream
+   * @param {metadata=} metadata Incoming metadata array
    */
-  return function handleServerStreamCall(stream) {
+  return function handleServerStreamCall(stream, metadata) {
     stream.on('data', function handleClientData(value) {
       var object_stream = new ServerWritableObjectStream(stream);
       object_stream.request = value;
-      handler(object_stream);
+      handler(object_stream, metadata);
     });
   };
 }
 
 /**
  * Creates a binary stream handler function from a bidi stream handler function
- * @param {function(Duplex)} handler Unary call handler
- * @return {function(stream)} Binary stream handler
+ * @param {function(Duplex, metadata=)} handler Unary call handler
+ * @return {function(stream, metadata=)} Binary stream handler
  */
 function makeBidiStreamHandler(handler) {
   return handler;
@@ -252,10 +256,13 @@
    * @constructor
    * @param {Object} service_handlers Map from service names to map from method
    *     names to handlers
-   * @param {Object} options Options to pass to the underlying server
+   * @param {function(string, Object<string, Array<Buffer>>):
+             Object<string, Array<Buffer|string>>=} getMetadata Callback that
+   *     gets metatada for a given method
+   * @param {Object=} options Options to pass to the underlying server
    */
-  function SurfaceServer(service_handlers, options) {
-    var server = new Server(options);
+  function SurfaceServer(service_handlers, getMetadata, options) {
+    var server = new Server(getMetadata, options);
     this.inner_server = server;
     _.each(services, function(service) {
       var service_name = common.fullyQualifiedName(service);
diff --git a/src/node/test/call_test.js b/src/node/test/call_test.js
index b37c44a..dfa9aaa 100644
--- a/src/node/test/call_test.js
+++ b/src/node/test/call_test.js
@@ -99,25 +99,31 @@
     });
   });
   describe('addMetadata', function() {
-    it('should succeed with objects containing keys and values', function() {
+    it('should succeed with a map from strings to string arrays', function() {
       var call = new grpc.Call(channel, 'method', getDeadline(1));
       assert.doesNotThrow(function() {
-        call.addMetadata();
+        call.addMetadata({'key': ['value']});
       });
       assert.doesNotThrow(function() {
-        call.addMetadata({'key' : 'key',
-                          'value' : new Buffer('value')});
+        call.addMetadata({'key1': ['value1'], 'key2': ['value2']});
+      });
+    });
+    it('should succeed with a map from strings to buffer arrays', function() {
+      var call = new grpc.Call(channel, 'method', getDeadline(1));
+      assert.doesNotThrow(function() {
+        call.addMetadata({'key': [new Buffer('value')]});
       });
       assert.doesNotThrow(function() {
-        call.addMetadata({'key' : 'key1',
-                          'value' : new Buffer('value1')},
-                         {'key' : 'key2',
-                          'value' : new Buffer('value2')});
+        call.addMetadata({'key1': [new Buffer('value1')],
+                          'key2': [new Buffer('value2')]});
       });
     });
     it('should fail with other parameter types', function() {
       var call = new grpc.Call(channel, 'method', getDeadline(1));
       assert.throws(function() {
+        call.addMetadata();
+      });
+      assert.throws(function() {
         call.addMetadata(null);
       }, TypeError);
       assert.throws(function() {
@@ -133,7 +139,7 @@
                   function() {done();},
                   0);
       assert.throws(function() {
-        call.addMetadata({'key' : 'key', 'value' : new Buffer('value') });
+        call.addMetadata({'key': ['value']});
       }, function(err) {
         return err.code === grpc.callError.ALREADY_INVOKED;
       });
diff --git a/src/node/test/client_server_test.js b/src/node/test/client_server_test.js
index 9e1b2a7..1db9f69 100644
--- a/src/node/test/client_server_test.js
+++ b/src/node/test/client_server_test.js
@@ -84,6 +84,10 @@
   // do nothing
 }
 
+function metadataHandler(stream, metadata) {
+  stream.end();
+}
+
 /**
  * Serialize a string to a Buffer
  * @param {string} value The string to serialize
@@ -106,11 +110,14 @@
   var server;
   var channel;
   before(function() {
-    server = new Server();
+    server = new Server(function getMetadata(method, metadata) {
+      return {method: [method]};
+    });
     var port_num = server.bind('0.0.0.0:0');
     server.register('echo', echoHandler);
     server.register('error', errorHandler);
     server.register('cancellation', cancelHandler);
+    server.register('metadata', metadataHandler);
     server.start();
 
     channel = new grpc.Channel('localhost:' + port_num);
@@ -142,12 +149,19 @@
       done();
     });
   });
+  it('should recieve metadata set by the server', function(done) {
+    var stream = client.makeRequest(channel, 'metadata');
+    stream.on('metadata', function(metadata) {
+      assert.strictEqual(metadata.method[0].toString(), 'metadata');
+    });
+    stream.on('status', function(status) {
+      assert.equal(status.code, client.status.OK);
+      done();
+    });
+    stream.end();
+  });
   it('should get an error status that the server throws', function(done) {
-    var stream = client.makeRequest(
-        channel,
-        'error',
-        null,
-        getDeadline(1));
+    var stream = client.makeRequest(channel, 'error');
 
     stream.on('data', function() {});
     stream.write(new Buffer('test'));
@@ -171,6 +185,14 @@
       done();
     });
   });
+  it('should get correct status for unimplemented method', function(done) {
+    var stream = client.makeRequest(channel, 'unimplemented_method');
+    stream.end();
+    stream.on('status', function(status) {
+      assert.equal(status.code, grpc.status.UNIMPLEMENTED);
+      done();
+    });
+  });
 });
 /* TODO(mlumish): explore options for reducing duplication between this test
  * and the insecure echo client test */
@@ -189,7 +211,7 @@
                                                               key_data,
                                                               pem_data);
 
-          server = new Server({'credentials' : server_creds});
+          server = new Server(null, {'credentials' : server_creds});
           var port_num = server.bind('0.0.0.0:0', true);
           server.register('echo', echoHandler);
           server.start();
diff --git a/src/node/test/end_to_end_test.js b/src/node/test/end_to_end_test.js
index f8cb660..1f53df2 100644
--- a/src/node/test/end_to_end_test.js
+++ b/src/node/test/end_to_end_test.js
@@ -68,16 +68,14 @@
     server.shutdown();
   });
   it('should start and end a request without error', function(complete) {
-    var done = multiDone(function() {
-      complete();
-    }, 2);
+    var done = multiDone(complete, 2);
     var deadline = new Date();
     deadline.setSeconds(deadline.getSeconds() + 3);
     var status_text = 'xyz';
     var call = new grpc.Call(channel,
                              'dummy_method',
                              deadline);
-      call.invoke(function(event) {
+    call.invoke(function(event) {
       assert.strictEqual(event.type,
                          grpc.completionType.CLIENT_METADATA_READ);
     },function(event) {
@@ -112,13 +110,58 @@
       assert.strictEqual(event.data, grpc.opError.OK);
     });
   });
+  it('should successfully send and receive metadata', function(complete) {
+    var done = multiDone(complete, 2);
+    var deadline = new Date();
+    deadline.setSeconds(deadline.getSeconds() + 3);
+    var status_text = 'xyz';
+    var call = new grpc.Call(channel,
+                             'dummy_method',
+                             deadline);
+    call.addMetadata({'client_key': ['client_value']});
+    call.invoke(function(event) {
+      assert.strictEqual(event.type,
+                         grpc.completionType.CLIENT_METADATA_READ);
+      assert.strictEqual(event.data.server_key[0].toString(), 'server_value');
+    },function(event) {
+      assert.strictEqual(event.type, grpc.completionType.FINISHED);
+      var status = event.data;
+      assert.strictEqual(status.code, grpc.status.OK);
+      assert.strictEqual(status.details, status_text);
+      done();
+    }, 0);
+
+    server.requestCall(function(event) {
+      assert.strictEqual(event.type, grpc.completionType.SERVER_RPC_NEW);
+      assert.strictEqual(event.data.metadata.client_key[0].toString(),
+                         'client_value');
+      var server_call = event.call;
+      assert.notEqual(server_call, null);
+      server_call.serverAccept(function(event) {
+        assert.strictEqual(event.type, grpc.completionType.FINISHED);
+      }, 0);
+      server_call.addMetadata({'server_key': ['server_value']});
+      server_call.serverEndInitialMetadata(0);
+      server_call.startWriteStatus(
+          grpc.status.OK,
+          status_text,
+          function(event) {
+            assert.strictEqual(event.type,
+                               grpc.completionType.FINISH_ACCEPTED);
+            assert.strictEqual(event.data, grpc.opError.OK);
+            done();
+          });
+    });
+    call.writesDone(function(event) {
+      assert.strictEqual(event.type,
+                         grpc.completionType.FINISH_ACCEPTED);
+      assert.strictEqual(event.data, grpc.opError.OK);
+    });
+  });
   it('should send and receive data without error', function(complete) {
     var req_text = 'client_request';
     var reply_text = 'server_response';
-    var done = multiDone(function() {
-      complete();
-      server.shutdown();
-    }, 6);
+    var done = multiDone(complete, 6);
     var deadline = new Date();
     deadline.setSeconds(deadline.getSeconds() + 3);
     var status_text = 'success';
diff --git a/src/node/test/interop_sanity_test.js b/src/node/test/interop_sanity_test.js
index 6cc7d44..7ecaad8 100644
--- a/src/node/test/interop_sanity_test.js
+++ b/src/node/test/interop_sanity_test.js
@@ -71,4 +71,12 @@
   it('should pass empty_stream', function(done) {
     interop_client.runTest(port, name_override, 'empty_stream', true, done);
   });
+  it('should pass cancel_after_begin', function(done) {
+    interop_client.runTest(port, name_override, 'cancel_after_begin', true,
+                           done);
+  });
+  it('should pass cancel_after_first_response', function(done) {
+    interop_client.runTest(port, name_override, 'cancel_after_first_response',
+                           true, done);
+  });
 });
diff --git a/src/node/test/server_test.js b/src/node/test/server_test.js
index 5fad9a5..a3e1edf 100644
--- a/src/node/test/server_test.js
+++ b/src/node/test/server_test.js
@@ -75,6 +75,9 @@
 
     channel = new grpc.Channel('localhost:' + port_num);
   });
+  after(function() {
+    server.shutdown();
+  });
   it('should echo inputs as responses', function(done) {
     done = multiDone(done, 4);
 
@@ -95,7 +98,6 @@
       var status = event.data;
       assert.strictEqual(status.code, grpc.status.OK);
       assert.strictEqual(status.details, status_text);
-      server.shutdown();
       done();
     }, 0);
     call.startWrite(
diff --git a/src/node/test/surface_test.js b/src/node/test/surface_test.js
index 85f4841..1038f9a 100644
--- a/src/node/test/surface_test.js
+++ b/src/node/test/surface_test.js
@@ -75,7 +75,7 @@
     }, /math.Math/);
   });
 });
-describe('Surface client', function() {
+describe('Cancelling surface client', function() {
   var client;
   var server;
   before(function() {
diff --git a/src/php/ext/grpc/byte_buffer.c b/src/php/ext/grpc/byte_buffer.c
index e2f63e3..29d6fa0 100644
--- a/src/php/ext/grpc/byte_buffer.c
+++ b/src/php/ext/grpc/byte_buffer.c
@@ -16,9 +16,10 @@
 #include "grpc/support/slice.h"
 
 grpc_byte_buffer *string_to_byte_buffer(char *string, size_t length) {
-  gpr_slice slice = gpr_slice_malloc(length);
-  memcpy(GPR_SLICE_START_PTR(slice), string, length);
-  return grpc_byte_buffer_create(&slice, 1);
+  gpr_slice slice = gpr_slice_from_copied_buffer(string, length);
+  grpc_byte_buffer *buffer = grpc_byte_buffer_create(&slice, 1);
+  gpr_slice_unref(slice);
+  return buffer;
 }
 
 void byte_buffer_to_string(grpc_byte_buffer *buffer, char **out_string,
diff --git a/src/php/lib/Grpc/ActiveCall.php b/src/php/lib/Grpc/ActiveCall.php
index 836a4b0..e0ea43a 100755
--- a/src/php/lib/Grpc/ActiveCall.php
+++ b/src/php/lib/Grpc/ActiveCall.php
@@ -28,9 +28,9 @@
     $this->flags = $flags;
 
     // Invoke the call.
-    $this->call->start_invoke($this->completion_queue,
-                              CLIENT_METADATA_READ,
-                              FINISHED, 0);
+    $this->call->invoke($this->completion_queue,
+                        CLIENT_METADATA_READ,
+                        FINISHED, 0);
     $metadata_event = $this->completion_queue->pluck(CLIENT_METADATA_READ,
                                                      Timeval::inf_future());
     $this->metadata = $metadata_event->data;
diff --git a/src/php/lib/Grpc/BaseStub.php b/src/php/lib/Grpc/BaseStub.php
index e1feb12..ff293c0 100755
--- a/src/php/lib/Grpc/BaseStub.php
+++ b/src/php/lib/Grpc/BaseStub.php
@@ -10,8 +10,8 @@
 
   private $channel;
 
-  public function __construct($hostname) {
-    $this->channel = new Channel($hostname, []);
+  public function __construct($hostname, $opts) {
+    $this->channel = new Channel($hostname, $opts);
   }
 
   /**
@@ -33,10 +33,10 @@
    * @param array $metadata A metadata map to send to the server
    * @return SimpleSurfaceActiveCall The active call object
    */
-  protected function _simpleRequest($method,
-                                    $argument,
-                                    callable $deserialize,
-                                    $metadata = array()) {
+  public function _simpleRequest($method,
+                                 $argument,
+                                 callable $deserialize,
+                                 $metadata = array()) {
     return new SimpleSurfaceActiveCall($this->channel,
                                        $method,
                                        $deserialize,
@@ -55,10 +55,10 @@
    * @param array $metadata A metadata map to send to the server
    * @return ClientStreamingSurfaceActiveCall The active call object
    */
-  protected function _clientStreamRequest($method,
-                                          $arguments,
-                                          callable $deserialize,
-                                          $metadata = array()) {
+  public function _clientStreamRequest($method,
+                                       $arguments,
+                                       callable $deserialize,
+                                       $metadata = array()) {
     return new ClientStreamingSurfaceActiveCall($this->channel,
                                                 $method,
                                                 $deserialize,
@@ -76,10 +76,10 @@
    * @param array $metadata A metadata map to send to the server
    * @return ServerStreamingSurfaceActiveCall The active call object
    */
-  protected function _serverStreamRequest($method,
-                                          $argument,
-                                          callable $deserialize,
-                                          $metadata = array()) {
+  public function _serverStreamRequest($method,
+                                       $argument,
+                                       callable $deserialize,
+                                       $metadata = array()) {
     return new ServerStreamingSurfaceActiveCall($this->channel,
                                                 $method,
                                                 $deserialize,
@@ -95,9 +95,9 @@
    * @param array $metadata A metadata map to send to the server
    * @return BidiStreamingSurfaceActiveCall The active call object
    */
-  protected function _bidiRequest($method,
-                                  callable $deserialize,
-                                  $metadata = array()) {
+  public function _bidiRequest($method,
+                               callable $deserialize,
+                               $metadata = array()) {
     return new BidiStreamingSurfaceActiveCall($this->channel,
                                               $method,
                                               $deserialize,
diff --git a/src/php/tests/interop/empty.php b/src/php/tests/interop/empty.php
index 0107f25..22b1180 100755
--- a/src/php/tests/interop/empty.php
+++ b/src/php/tests/interop/empty.php
@@ -1,9 +1,9 @@
 <?php
 // DO NOT EDIT! Generated by Protobuf-PHP protoc plugin 1.0
-// Source: net/proto2/proto/empty.proto
-//   Date: 2014-12-03 22:02:20
+// Source: test/cpp/interop/empty.proto
+//   Date: 2015-01-30 23:30:46
 
-namespace proto2 {
+namespace grpc\testing {
 
   class EmptyMessage extends \DrSlump\Protobuf\Message {
 
@@ -13,7 +13,7 @@
 
     public static function descriptor()
     {
-      $descriptor = new \DrSlump\Protobuf\Descriptor(__CLASS__, 'proto2.EmptyMessage');
+      $descriptor = new \DrSlump\Protobuf\Descriptor(__CLASS__, 'grpc.testing.EmptyMessage');
 
       foreach (self::$__extensions as $cb) {
         $descriptor->addField($cb(), true);
@@ -23,4 +23,3 @@
     }
   }
 }
-
diff --git a/src/php/tests/interop/interop_client.php b/src/php/tests/interop/interop_client.php
index 43da47f..2ff2be7 100755
--- a/src/php/tests/interop/interop_client.php
+++ b/src/php/tests/interop/interop_client.php
@@ -25,7 +25,7 @@
  * @param $stub Stub object that has service methods
  */
 function emptyUnary($stub) {
-  list($result, $status) = $stub->EmptyCall(new proto2\EmptyMessage())->wait();
+  list($result, $status) = $stub->EmptyCall(new grpc\testing\EmptyMessage())->wait();
   hardAssert($status->code == Grpc\STATUS_OK, 'Call did not complete successfully');
   hardAssert($result != null, 'Call completed with a null response');
 }
@@ -161,11 +161,12 @@
 $credentials = Grpc\Credentials::createSsl(
     file_get_contents(dirname(__FILE__) . '/../data/ca.pem'));
 $stub = new grpc\testing\TestServiceClient(
-    $server_address,
-    [
-        'grpc.ssl_target_name_override' => 'foo.test.google.com',
-        'credentials' => $credentials
-     ]);
+    new Grpc\BaseStub(
+        $server_address,
+        [
+            'grpc.ssl_target_name_override' => 'foo.test.google.com',
+            'credentials' => $credentials
+         ]));
 
 echo "Connecting to $server_address\n";
 echo "Running test case $args[test_case]\n";
diff --git a/src/php/tests/interop/messages.php b/src/php/tests/interop/messages.php
index beaec7c..129c96f 100755
--- a/src/php/tests/interop/messages.php
+++ b/src/php/tests/interop/messages.php
@@ -1,7 +1,7 @@
 <?php
 // DO NOT EDIT! Generated by Protobuf-PHP protoc plugin 1.0
-// Source: third_party/stubby/testing/proto/messages.proto
-//   Date: 2014-12-03 22:02:20
+// Source: test/cpp/interop/messages.proto
+//   Date: 2015-01-30 23:30:46
 
 namespace grpc\testing {
 
@@ -142,6 +142,12 @@
     /**  @var \grpc\testing\Payload */
     public $payload = null;
     
+    /**  @var boolean */
+    public $fill_username = null;
+    
+    /**  @var boolean */
+    public $fill_oauth_scope = null;
+    
 
     /** @var \Closure[] */
     protected static $__extensions = array();
@@ -176,6 +182,22 @@
       $f->reference = '\grpc\testing\Payload';
       $descriptor->addField($f);
 
+      // OPTIONAL BOOL fill_username = 4
+      $f = new \DrSlump\Protobuf\Field();
+      $f->number    = 4;
+      $f->name      = "fill_username";
+      $f->type      = \DrSlump\Protobuf::TYPE_BOOL;
+      $f->rule      = \DrSlump\Protobuf::RULE_OPTIONAL;
+      $descriptor->addField($f);
+
+      // OPTIONAL BOOL fill_oauth_scope = 5
+      $f = new \DrSlump\Protobuf\Field();
+      $f->number    = 5;
+      $f->name      = "fill_oauth_scope";
+      $f->type      = \DrSlump\Protobuf::TYPE_BOOL;
+      $f->rule      = \DrSlump\Protobuf::RULE_OPTIONAL;
+      $descriptor->addField($f);
+
       foreach (self::$__extensions as $cb) {
         $descriptor->addField($cb(), true);
       }
@@ -293,6 +315,80 @@
     public function setPayload(\grpc\testing\Payload $value){
       return $this->_set(3, $value);
     }
+    
+    /**
+     * Check if <fill_username> has a value
+     *
+     * @return boolean
+     */
+    public function hasFillUsername(){
+      return $this->_has(4);
+    }
+    
+    /**
+     * Clear <fill_username> value
+     *
+     * @return \grpc\testing\SimpleRequest
+     */
+    public function clearFillUsername(){
+      return $this->_clear(4);
+    }
+    
+    /**
+     * Get <fill_username> value
+     *
+     * @return boolean
+     */
+    public function getFillUsername(){
+      return $this->_get(4);
+    }
+    
+    /**
+     * Set <fill_username> value
+     *
+     * @param boolean $value
+     * @return \grpc\testing\SimpleRequest
+     */
+    public function setFillUsername( $value){
+      return $this->_set(4, $value);
+    }
+    
+    /**
+     * Check if <fill_oauth_scope> has a value
+     *
+     * @return boolean
+     */
+    public function hasFillOauthScope(){
+      return $this->_has(5);
+    }
+    
+    /**
+     * Clear <fill_oauth_scope> value
+     *
+     * @return \grpc\testing\SimpleRequest
+     */
+    public function clearFillOauthScope(){
+      return $this->_clear(5);
+    }
+    
+    /**
+     * Get <fill_oauth_scope> value
+     *
+     * @return boolean
+     */
+    public function getFillOauthScope(){
+      return $this->_get(5);
+    }
+    
+    /**
+     * Set <fill_oauth_scope> value
+     *
+     * @param boolean $value
+     * @return \grpc\testing\SimpleRequest
+     */
+    public function setFillOauthScope( $value){
+      return $this->_set(5, $value);
+    }
   }
 }
 
@@ -303,8 +399,11 @@
     /**  @var \grpc\testing\Payload */
     public $payload = null;
     
-    /**  @var int */
-    public $effective_gaia_user_id = null;
+    /**  @var string */
+    public $username = null;
+    
+    /**  @var string */
+    public $oauth_scope = null;
     
 
     /** @var \Closure[] */
@@ -323,11 +422,19 @@
       $f->reference = '\grpc\testing\Payload';
       $descriptor->addField($f);
 
-      // OPTIONAL INT64 effective_gaia_user_id = 2
+      // OPTIONAL STRING username = 2
       $f = new \DrSlump\Protobuf\Field();
       $f->number    = 2;
-      $f->name      = "effective_gaia_user_id";
-      $f->type      = \DrSlump\Protobuf::TYPE_INT64;
+      $f->name      = "username";
+      $f->type      = \DrSlump\Protobuf::TYPE_STRING;
+      $f->rule      = \DrSlump\Protobuf::RULE_OPTIONAL;
+      $descriptor->addField($f);
+
+      // OPTIONAL STRING oauth_scope = 3
+      $f = new \DrSlump\Protobuf\Field();
+      $f->number    = 3;
+      $f->name      = "oauth_scope";
+      $f->type      = \DrSlump\Protobuf::TYPE_STRING;
       $f->rule      = \DrSlump\Protobuf::RULE_OPTIONAL;
       $descriptor->addField($f);
 
@@ -376,109 +483,77 @@
     }
     
     /**
-     * Check if <effective_gaia_user_id> has a value
+     * Check if <username> has a value
      *
      * @return boolean
      */
-    public function hasEffectiveGaiaUserId(){
+    public function hasUsername(){
       return $this->_has(2);
     }
     
     /**
-     * Clear <effective_gaia_user_id> value
+     * Clear <username> value
      *
      * @return \grpc\testing\SimpleResponse
      */
-    public function clearEffectiveGaiaUserId(){
+    public function clearUsername(){
       return $this->_clear(2);
     }
     
     /**
-     * Get <effective_gaia_user_id> value
+     * Get <username> value
      *
-     * @return int
+     * @return string
      */
-    public function getEffectiveGaiaUserId(){
+    public function getUsername(){
       return $this->_get(2);
     }
     
     /**
-     * Set <effective_gaia_user_id> value
+     * Set <username> value
      *
-     * @param int $value
+     * @param string $value
      * @return \grpc\testing\SimpleResponse
      */
-    public function setEffectiveGaiaUserId( $value){
+    public function setUsername( $value){
       return $this->_set(2, $value);
     }
-  }
-}
-
-namespace grpc\testing {
-
-  class SimpleContext extends \DrSlump\Protobuf\Message {
-
-    /**  @var string */
-    public $value = null;
     
-
-    /** @var \Closure[] */
-    protected static $__extensions = array();
-
-    public static function descriptor()
-    {
-      $descriptor = new \DrSlump\Protobuf\Descriptor(__CLASS__, 'grpc.testing.SimpleContext');
-
-      // OPTIONAL STRING value = 1
-      $f = new \DrSlump\Protobuf\Field();
-      $f->number    = 1;
-      $f->name      = "value";
-      $f->type      = \DrSlump\Protobuf::TYPE_STRING;
-      $f->rule      = \DrSlump\Protobuf::RULE_OPTIONAL;
-      $descriptor->addField($f);
-
-      foreach (self::$__extensions as $cb) {
-        $descriptor->addField($cb(), true);
-      }
-
-      return $descriptor;
-    }
-
     /**
-     * Check if <value> has a value
+     * Check if <oauth_scope> has a value
      *
      * @return boolean
      */
-    public function hasValue(){
-      return $this->_has(1);
+    public function hasOauthScope(){
+      return $this->_has(3);
     }
     
     /**
-     * Clear <value> value
+     * Clear <oauth_scope> value
      *
-     * @return \grpc\testing\SimpleContext
+     * @return \grpc\testing\SimpleResponse
      */
-    public function clearValue(){
-      return $this->_clear(1);
+    public function clearOauthScope(){
+      return $this->_clear(3);
     }
     
     /**
-     * Get <value> value
+     * Get <oauth_scope> value
      *
      * @return string
      */
-    public function getValue(){
-      return $this->_get(1);
+    public function getOauthScope(){
+      return $this->_get(3);
     }
     
     /**
-     * Set <value> value
+     * Set <oauth_scope> value
      *
      * @param string $value
-     * @return \grpc\testing\SimpleContext
+     * @return \grpc\testing\SimpleResponse
      */
-    public function setValue( $value){
-      return $this->_set(1, $value);
+    public function setOauthScope( $value){
+      return $this->_set(3, $value);
     }
   }
 }
@@ -997,15 +1072,3 @@
   }
 }
 
-namespace {
-  \proto2\bridge\MessageSet::extension(function(){
-      // OPTIONAL MESSAGE grpc\testing\SimpleContext\message_set_extension = 71139615
-    $f = new \DrSlump\Protobuf\Field();
-    $f->number    = 71139615;
-    $f->name      = "grpc\testing\SimpleContext\message_set_extension";
-    $f->type      = \DrSlump\Protobuf::TYPE_MESSAGE;
-    $f->rule      = \DrSlump\Protobuf::RULE_OPTIONAL;
-    $f->reference = '\grpc\testing\SimpleContext';
-    return $f;
-  });
-}
\ No newline at end of file
diff --git a/src/php/tests/interop/test.php b/src/php/tests/interop/test.php
index fe6d0fb..014bbc9 100755
--- a/src/php/tests/interop/test.php
+++ b/src/php/tests/interop/test.php
@@ -1,52 +1,52 @@
 <?php
 // DO NOT EDIT! Generated by Protobuf-PHP protoc plugin 1.0
-// Source: third_party/stubby/testing/proto/test.proto
-//   Date: 2014-12-03 22:02:20
+// Source: test/cpp/interop/test.proto
+//   Date: 2015-01-30 23:30:46
 
 namespace grpc\testing {
 
-  class TestServiceClient extends \Grpc\BaseStub {
+  class TestServiceClient{
+
+    private $rpc_impl;
+
+    public function __construct($rpc_impl) {
+      $this->rpc_impl = $rpc_impl;
+    }
     /**
-     * @param proto2\EmptyMessage $input
-     * @return proto2\EmptyMessage
+     * @param grpc\testing\EmptyMessage $input
      */
-    public function EmptyCall(\proto2\EmptyMessage $argument, $metadata = array()) {
-      return $this->_simpleRequest('/TestService/EmptyCall', $argument, '\proto2\EmptyMessage::deserialize', $metadata);
+    public function EmptyCall(\grpc\testing\EmptyMessage $argument, $metadata = array()) {
+      return $this->rpc_impl->_simpleRequest('/grpc.testing.TestService/EmptyCall', $argument, '\grpc\testing\EmptyMessage::deserialize', $metadata);
     }
     /**
      * @param grpc\testing\SimpleRequest $input
-     * @return grpc\testing\SimpleResponse
      */
     public function UnaryCall(\grpc\testing\SimpleRequest $argument, $metadata = array()) {
-      return $this->_simpleRequest('/TestService/UnaryCall', $argument, '\grpc\testing\SimpleResponse::deserialize', $metadata);
+      return $this->rpc_impl->_simpleRequest('/grpc.testing.TestService/UnaryCall', $argument, '\grpc\testing\SimpleResponse::deserialize', $metadata);
     }
     /**
      * @param grpc\testing\StreamingOutputCallRequest $input
-     * @return grpc\testing\StreamingOutputCallResponse
      */
     public function StreamingOutputCall($argument, $metadata = array()) {
-      return $this->_serverStreamRequest('/TestService/StreamingOutputCall', $argument, '\grpc\testing\StreamingOutputCallResponse::deserialize', $metadata);
+      return $this->rpc_impl->_serverStreamRequest('/grpc.testing.TestService/StreamingOutputCall', $argument, '\grpc\testing\StreamingOutputCallResponse::deserialize', $metadata);
     }
     /**
      * @param grpc\testing\StreamingInputCallRequest $input
-     * @return grpc\testing\StreamingInputCallResponse
      */
     public function StreamingInputCall($arguments, $metadata = array()) {
-      return $this->_clientStreamRequest('/TestService/StreamingInputCall', $arguments, '\grpc\testing\StreamingInputCallResponse::deserialize', $metadata);
+      return $this->rpc_impl->_clientStreamRequest('/grpc.testing.TestService/StreamingInputCall', $arguments, '\grpc\testing\StreamingInputCallResponse::deserialize', $metadata);
     }
     /**
      * @param grpc\testing\StreamingOutputCallRequest $input
-     * @return grpc\testing\StreamingOutputCallResponse
      */
     public function FullDuplexCall($metadata = array()) {
-      return $this->_bidiRequest('/TestService/FullDuplexCall', '\grpc\testing\StreamingOutputCallResponse::deserialize', $metadata);
+      return $this->rpc_impl->_bidiRequest('/grpc.testing.TestService/FullDuplexCall', '\grpc\testing\StreamingOutputCallResponse::deserialize', $metadata);
     }
     /**
      * @param grpc\testing\StreamingOutputCallRequest $input
-     * @return grpc\testing\StreamingOutputCallResponse
      */
     public function HalfDuplexCall($metadata = array()) {
-      return $this->_bidiRequest('/TestService/HalfDuplexCall', '\grpc\testing\StreamingOutputCallResponse::deserialize', $metadata);
+      return $this->rpc_impl->_bidiRequest('/grpc.testing.TestService/HalfDuplexCall', '\grpc\testing\StreamingOutputCallResponse::deserialize', $metadata);
     }
   }
 }
diff --git a/src/python/setup.py b/src/python/setup.py
new file mode 100644
index 0000000..58dc3b1
--- /dev/null
+++ b/src/python/setup.py
@@ -0,0 +1,86 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""A setup module for the GRPC Python package."""
+
+from distutils import core as _core
+
+_EXTENSION_SOURCES = (
+    'src/_adapter/_c.c',
+    'src/_adapter/_call.c',
+    'src/_adapter/_channel.c',
+    'src/_adapter/_completion_queue.c',
+    'src/_adapter/_error.c',
+    'src/_adapter/_server.c',
+)
+
+_EXTENSION_INCLUDE_DIRECTORIES = (
+    'src',
+    # TODO(nathaniel): Can this path specification be made to work?
+    #'../../include',
+)
+
+_EXTENSION_LIBRARIES = (
+    'gpr',
+    'grpc',
+)
+
+_EXTENSION_LIBRARY_DIRECTORIES = (
+    # TODO(nathaniel): Can this path specification be made to work?
+    #'../../libs/dbg',
+)
+
+_EXTENSION_MODULE = _core.Extension(
+    '_adapter._c', sources=list(_EXTENSION_SOURCES),
+    include_dirs=_EXTENSION_INCLUDE_DIRECTORIES,
+    libraries=_EXTENSION_LIBRARIES,
+    library_dirs=_EXTENSION_LIBRARY_DIRECTORIES)
+
+_PACKAGES=(
+    '_adapter',
+    '_framework',
+    '_framework.base',
+    '_framework.base.packets',
+    '_framework.common',
+    '_framework.face',
+    '_framework.face.testing',
+    '_framework.foundation',
+    '_junkdrawer',
+)
+
+_PACKAGE_DIRECTORIES = {
+    '_adapter': 'src/_adapter',
+    '_framework': 'src/_framework',
+    '_junkdrawer': 'src/_junkdrawer',
+}
+
+_core.setup(
+    name='grpc', version='0.0.1',
+    ext_modules=[_EXTENSION_MODULE], packages=_PACKAGES,
+    package_dir=_PACKAGE_DIRECTORIES)
diff --git a/src/python/__init__.py b/src/python/src/__init__.py
similarity index 100%
rename from src/python/__init__.py
rename to src/python/src/__init__.py
diff --git a/src/python/_junkdrawer/__init__.py b/src/python/src/_adapter/__init__.py
similarity index 100%
copy from src/python/_junkdrawer/__init__.py
copy to src/python/src/_adapter/__init__.py
diff --git a/src/python/src/_adapter/_blocking_invocation_inline_service_test.py b/src/python/src/_adapter/_blocking_invocation_inline_service_test.py
new file mode 100644
index 0000000..873ce9a
--- /dev/null
+++ b/src/python/src/_adapter/_blocking_invocation_inline_service_test.py
@@ -0,0 +1,17 @@
+"""One of the tests of the Face layer of RPC Framework."""
+
+import unittest
+
+from _adapter import _face_test_case
+from _framework.face.testing import blocking_invocation_inline_service_test_case as test_case
+
+
+class BlockingInvocationInlineServiceTest(
+    _face_test_case.FaceTestCase,
+    test_case.BlockingInvocationInlineServiceTestCase,
+    unittest.TestCase):
+  pass
+
+
+if __name__ == '__main__':
+  unittest.main()
diff --git a/src/python/src/_adapter/_c.c b/src/python/src/_adapter/_c.c
new file mode 100644
index 0000000..d1f7fbb
--- /dev/null
+++ b/src/python/src/_adapter/_c.c
@@ -0,0 +1,77 @@
+/*
+ *
+ * Copyright 2015, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <Python.h>
+#include <grpc/grpc.h>
+
+#include "_adapter/_completion_queue.h"
+#include "_adapter/_channel.h"
+#include "_adapter/_call.h"
+#include "_adapter/_server.h"
+
+static PyObject *init(PyObject *self, PyObject *args) {
+  grpc_init();
+  Py_RETURN_NONE;
+}
+
+static PyObject *shutdown(PyObject *self, PyObject *args) {
+  grpc_shutdown();
+  Py_RETURN_NONE;
+}
+
+static PyMethodDef _c_methods[] = {
+    {"init", init, METH_VARARGS, "Initialize the module's static state."},
+    {"shut_down", shutdown, METH_VARARGS,
+     "Shut down the module's static state."},
+    {NULL},
+};
+
+PyMODINIT_FUNC init_c(void) {
+  PyObject *module;
+
+  module = Py_InitModule3("_c", _c_methods,
+                          "Wrappings of C structures and functions.");
+
+  if (pygrpc_add_completion_queue(module) == -1) {
+    return;
+  }
+  if (pygrpc_add_channel(module) == -1) {
+    return;
+  }
+  if (pygrpc_add_call(module) == -1) {
+    return;
+  }
+  if (pygrpc_add_server(module) == -1) {
+    return;
+  }
+}
diff --git a/src/python/src/_adapter/_c_test.py b/src/python/src/_adapter/_c_test.py
new file mode 100644
index 0000000..bc0a622
--- /dev/null
+++ b/src/python/src/_adapter/_c_test.py
@@ -0,0 +1,141 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Tests for _adapter._c."""
+
+import threading
+import time
+import unittest
+
+from _adapter import _c
+from _adapter import _datatypes
+
+_TIMEOUT = 3
+_FUTURE = time.time() + 60 * 60 * 24
+_IDEMPOTENCE_DEMONSTRATION = 7
+
+
+class _CTest(unittest.TestCase):
+
+  def testUpAndDown(self):
+    _c.init()
+    _c.shut_down()
+
+  def testCompletionQueue(self):
+    _c.init()
+
+    completion_queue = _c.CompletionQueue()
+    event = completion_queue.get(0)
+    self.assertIsNone(event)
+    event = completion_queue.get(time.time())
+    self.assertIsNone(event)
+    event = completion_queue.get(time.time() + _TIMEOUT)
+    self.assertIsNone(event)
+    completion_queue.stop()
+    for _ in range(_IDEMPOTENCE_DEMONSTRATION):
+      event = completion_queue.get(time.time() + _TIMEOUT)
+      self.assertIs(event.kind, _datatypes.Event.Kind.STOP)
+
+    del completion_queue
+    del event
+
+    _c.shut_down()
+
+  def testChannel(self):
+    _c.init()
+
+    channel = _c.Channel('test host:12345')
+    del channel
+
+    _c.shut_down()
+
+  def testCall(self):
+    method = 'test method'
+    host = 'test host'
+
+    _c.init()
+
+    channel = _c.Channel('%s:%d' % (host, 12345))
+    call = _c.Call(channel, method, host, time.time() + _TIMEOUT)
+    del call
+    del channel
+
+    _c.shut_down()
+
+  def testServer(self):
+    _c.init()
+
+    completion_queue = _c.CompletionQueue()
+    server = _c.Server(completion_queue)
+    server.add_http2_addr('[::]:0')
+    server.start()
+    server.stop()
+    completion_queue.stop()
+    del server
+    del completion_queue
+
+    service_tag = object()
+    completion_queue = _c.CompletionQueue()
+    server = _c.Server(completion_queue)
+    server.add_http2_addr('[::]:0')
+    server.start()
+    server.service(service_tag)
+    server.stop()
+    completion_queue.stop()
+    event = completion_queue.get(time.time() + _TIMEOUT)
+    self.assertIs(event.kind, _datatypes.Event.Kind.SERVICE_ACCEPTED)
+    self.assertIs(event.tag, service_tag)
+    self.assertIsNone(event.service_acceptance)
+    for _ in range(_IDEMPOTENCE_DEMONSTRATION):
+      event = completion_queue.get(time.time() + _TIMEOUT)
+      self.assertIs(event.kind, _datatypes.Event.Kind.STOP)
+    del server
+    del completion_queue
+
+    completion_queue = _c.CompletionQueue()
+    server = _c.Server(completion_queue)
+    server.add_http2_addr('[::]:0')
+    server.start()
+    thread = threading.Thread(target=completion_queue.get, args=(_FUTURE,))
+    thread.start()
+    time.sleep(1)
+    server.stop()
+    completion_queue.stop()
+    for _ in range(_IDEMPOTENCE_DEMONSTRATION):
+      event = completion_queue.get(time.time() + _TIMEOUT)
+      self.assertIs(event.kind, _datatypes.Event.Kind.STOP)
+    thread.join()
+    del server
+    del completion_queue
+
+    _c.shut_down()
+
+
+if __name__ == '__main__':
+  unittest.main()
diff --git a/src/python/src/_adapter/_call.c b/src/python/src/_adapter/_call.c
new file mode 100644
index 0000000..1f91090
--- /dev/null
+++ b/src/python/src/_adapter/_call.c
@@ -0,0 +1,292 @@
+/*
+ *
+ * Copyright 2014, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "_adapter/_call.h"
+
+#include <math.h>
+#include <Python.h>
+#include <grpc/grpc.h>
+
+#include "_adapter/_channel.h"
+#include "_adapter/_completion_queue.h"
+#include "_adapter/_error.h"
+
+static int pygrpc_call_init(Call *self, PyObject *args, PyObject *kwds) {
+  const PyObject *channel;
+  const char *method;
+  const char *host;
+  const double deadline;
+
+  if (!PyArg_ParseTuple(args, "O!ssd", &pygrpc_ChannelType, &channel, &method,
+                        &host, &deadline)) {
+    self->c_call = NULL;
+    return -1;
+  }
+
+  /* TODO(nathaniel): Hoist the gpr_timespec <-> PyFloat arithmetic into its own
+   * function with its own test coverage.
+   */
+  self->c_call =
+      grpc_channel_create_call(((Channel *)channel)->c_channel, method, host,
+                               gpr_time_from_nanos(deadline * GPR_NS_PER_SEC));
+
+  return 0;
+}
+
+static void pygrpc_call_dealloc(Call *self) {
+  if (self->c_call != NULL) {
+    grpc_call_destroy(self->c_call);
+  }
+  self->ob_type->tp_free((PyObject *)self);
+}
+
+static const PyObject *pygrpc_call_invoke(Call *self, PyObject *args) {
+  const PyObject *completion_queue;
+  const PyObject *metadata_tag;
+  const PyObject *finish_tag;
+  grpc_call_error call_error;
+  const PyObject *result;
+
+  if (!(PyArg_ParseTuple(args, "O!OO", &pygrpc_CompletionQueueType,
+                         &completion_queue, &metadata_tag, &finish_tag))) {
+    return NULL;
+  }
+
+  call_error = grpc_call_invoke(
+      self->c_call, ((CompletionQueue *)completion_queue)->c_completion_queue,
+      (void *)metadata_tag, (void *)finish_tag, 0);
+
+  result = pygrpc_translate_call_error(call_error);
+  if (result != NULL) {
+    Py_INCREF(metadata_tag);
+    Py_INCREF(finish_tag);
+  }
+  return result;
+}
+
+static const PyObject *pygrpc_call_write(Call *self, PyObject *args) {
+  const char *bytes;
+  int length;
+  const PyObject *tag;
+  gpr_slice slice;
+  grpc_byte_buffer *byte_buffer;
+  grpc_call_error call_error;
+  const PyObject *result;
+
+  if (!(PyArg_ParseTuple(args, "s#O", &bytes, &length, &tag))) {
+    return NULL;
+  }
+
+  slice = gpr_slice_from_copied_buffer(bytes, length);
+  byte_buffer = grpc_byte_buffer_create(&slice, 1);
+  gpr_slice_unref(slice);
+
+  call_error = grpc_call_start_write(self->c_call, byte_buffer, (void *)tag, 0);
+
+  grpc_byte_buffer_destroy(byte_buffer);
+
+  result = pygrpc_translate_call_error(call_error);
+  if (result != NULL) {
+    Py_INCREF(tag);
+  }
+  return result;
+}
+
+static const PyObject *pygrpc_call_complete(Call *self, PyObject *args) {
+  const PyObject *tag;
+  grpc_call_error call_error;
+  const PyObject *result;
+
+  if (!(PyArg_ParseTuple(args, "O", &tag))) {
+    return NULL;
+  }
+
+  call_error = grpc_call_writes_done(self->c_call, (void *)tag);
+
+  result = pygrpc_translate_call_error(call_error);
+  if (result != NULL) {
+    Py_INCREF(tag);
+  }
+  return result;
+}
+
+static const PyObject *pygrpc_call_accept(Call *self, PyObject *args) {
+  const PyObject *completion_queue;
+  const PyObject *tag;
+  grpc_call_error call_error;
+  const PyObject *result;
+
+  if (!(PyArg_ParseTuple(args, "O!O", &pygrpc_CompletionQueueType,
+                         &completion_queue, &tag))) {
+    return NULL;
+  }
+
+  call_error = grpc_call_server_accept(
+      self->c_call, ((CompletionQueue *)completion_queue)->c_completion_queue,
+      (void *)tag);
+  result = pygrpc_translate_call_error(call_error);
+
+  if (result != NULL) {
+    Py_INCREF(tag);
+  }
+
+  return result;
+}
+
+static const PyObject *pygrpc_call_premetadata(Call *self, PyObject *args) {
+  /* TODO(b/18702680): Actually support metadata. */
+  return pygrpc_translate_call_error(
+      grpc_call_server_end_initial_metadata(self->c_call, 0));
+}
+
+static const PyObject *pygrpc_call_read(Call *self, PyObject *args) {
+  const PyObject *tag;
+  grpc_call_error call_error;
+  const PyObject *result;
+
+  if (!(PyArg_ParseTuple(args, "O", &tag))) {
+    return NULL;
+  }
+
+  call_error = grpc_call_start_read(self->c_call, (void *)tag);
+
+  result = pygrpc_translate_call_error(call_error);
+  if (result != NULL) {
+    Py_INCREF(tag);
+  }
+  return result;
+}
+
+static const PyObject *pygrpc_call_status(Call *self, PyObject *args) {
+  PyObject *status;
+  PyObject *code;
+  PyObject *details;
+  const PyObject *tag;
+  grpc_status_code c_code;
+  char *c_message;
+  grpc_call_error call_error;
+  const PyObject *result;
+
+  if (!(PyArg_ParseTuple(args, "OO", &status, &tag))) {
+    return NULL;
+  }
+
+  code = PyObject_GetAttrString(status, "code");
+  details = PyObject_GetAttrString(status, "details");
+  c_code = PyInt_AsLong(code);
+  c_message = PyBytes_AsString(details);
+  Py_DECREF(code);
+  Py_DECREF(details);
+
+  call_error = grpc_call_start_write_status(self->c_call, c_code, c_message,
+                                            (void *)tag);
+
+  result = pygrpc_translate_call_error(call_error);
+  if (result != NULL) {
+    Py_INCREF(tag);
+  }
+  return result;
+}
+
+static const PyObject *pygrpc_call_cancel(Call *self) {
+  return pygrpc_translate_call_error(grpc_call_cancel(self->c_call));
+}
+
+static PyMethodDef methods[] = {
+    {"invoke", (PyCFunction)pygrpc_call_invoke, METH_VARARGS,
+     "Invoke this call."},
+    {"write", (PyCFunction)pygrpc_call_write, METH_VARARGS,
+     "Write bytes to this call."},
+    {"complete", (PyCFunction)pygrpc_call_complete, METH_VARARGS,
+     "Complete writes to this call."},
+    {"accept", (PyCFunction)pygrpc_call_accept, METH_VARARGS, "Accept an RPC."},
+    {"premetadata", (PyCFunction)pygrpc_call_premetadata, METH_VARARGS,
+     "Indicate the end of leading metadata in the response."},
+    {"read", (PyCFunction)pygrpc_call_read, METH_VARARGS,
+     "Read bytes from this call."},
+    {"status", (PyCFunction)pygrpc_call_status, METH_VARARGS,
+     "Report this call's status."},
+    {"cancel", (PyCFunction)pygrpc_call_cancel, METH_NOARGS,
+     "Cancel this call."},
+    {NULL}};
+
+PyTypeObject pygrpc_CallType = {
+    PyObject_HEAD_INIT(NULL)0,       /*ob_size*/
+    "_grpc.Call",                    /*tp_name*/
+    sizeof(Call),                    /*tp_basicsize*/
+    0,                               /*tp_itemsize*/
+    (destructor)pygrpc_call_dealloc, /*tp_dealloc*/
+    0,                               /*tp_print*/
+    0,                               /*tp_getattr*/
+    0,                               /*tp_setattr*/
+    0,                               /*tp_compare*/
+    0,                               /*tp_repr*/
+    0,                               /*tp_as_number*/
+    0,                               /*tp_as_sequence*/
+    0,                               /*tp_as_mapping*/
+    0,                               /*tp_hash */
+    0,                               /*tp_call*/
+    0,                               /*tp_str*/
+    0,                               /*tp_getattro*/
+    0,                               /*tp_setattro*/
+    0,                               /*tp_as_buffer*/
+    Py_TPFLAGS_DEFAULT,              /*tp_flags*/
+    "Wrapping of grpc_call.",        /* tp_doc */
+    0,                               /* tp_traverse */
+    0,                               /* tp_clear */
+    0,                               /* tp_richcompare */
+    0,                               /* tp_weaklistoffset */
+    0,                               /* tp_iter */
+    0,                               /* tp_iternext */
+    methods,                         /* tp_methods */
+    0,                               /* tp_members */
+    0,                               /* tp_getset */
+    0,                               /* tp_base */
+    0,                               /* tp_dict */
+    0,                               /* tp_descr_get */
+    0,                               /* tp_descr_set */
+    0,                               /* tp_dictoffset */
+    (initproc)pygrpc_call_init,      /* tp_init */
+};
+
+int pygrpc_add_call(PyObject *module) {
+  pygrpc_CallType.tp_new = PyType_GenericNew;
+  if (PyType_Ready(&pygrpc_CallType) < 0) {
+    PyErr_SetString(PyExc_RuntimeError, "Error defining pygrpc_CallType!");
+    return -1;
+  }
+  if (PyModule_AddObject(module, "Call", (PyObject *)&pygrpc_CallType) == -1) {
+    PyErr_SetString(PyExc_ImportError, "Couldn't add Call type to module!");
+  }
+  return 0;
+}
diff --git a/src/python/src/_adapter/_call.h b/src/python/src/_adapter/_call.h
new file mode 100644
index 0000000..a936e23
--- /dev/null
+++ b/src/python/src/_adapter/_call.h
@@ -0,0 +1,46 @@
+/*
+ *
+ * Copyright 2015, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef _ADAPTER__CALL_H_
+#define _ADAPTER__CALL_H_
+
+#include <Python.h>
+#include <grpc/grpc.h>
+
+typedef struct { PyObject_HEAD grpc_call *c_call; } Call;
+
+PyTypeObject pygrpc_CallType;
+
+int pygrpc_add_call(PyObject *module);
+
+#endif /* _ADAPTER__CALL_H_ */
diff --git a/src/python/src/_adapter/_channel.c b/src/python/src/_adapter/_channel.c
new file mode 100644
index 0000000..d41ebd4
--- /dev/null
+++ b/src/python/src/_adapter/_channel.c
@@ -0,0 +1,109 @@
+/*
+ *
+ * Copyright 2015, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "_adapter/_channel.h"
+
+#include <Python.h>
+#include <grpc/grpc.h>
+
+static int pygrpc_channel_init(Channel *self, PyObject *args, PyObject *kwds) {
+  const char *hostport;
+
+  if (!(PyArg_ParseTuple(args, "s", &hostport))) {
+    self->c_channel = NULL;
+    return -1;
+  }
+
+  self->c_channel = grpc_channel_create(hostport, NULL);
+  return 0;
+}
+
+static void pygrpc_channel_dealloc(Channel *self) {
+  if (self->c_channel != NULL) {
+    grpc_channel_destroy(self->c_channel);
+  }
+  self->ob_type->tp_free((PyObject *)self);
+}
+
+PyTypeObject pygrpc_ChannelType = {
+    PyObject_HEAD_INIT(NULL)0,          /*ob_size*/
+    "_grpc.Channel",                    /*tp_name*/
+    sizeof(Channel),                    /*tp_basicsize*/
+    0,                                  /*tp_itemsize*/
+    (destructor)pygrpc_channel_dealloc, /*tp_dealloc*/
+    0,                                  /*tp_print*/
+    0,                                  /*tp_getattr*/
+    0,                                  /*tp_setattr*/
+    0,                                  /*tp_compare*/
+    0,                                  /*tp_repr*/
+    0,                                  /*tp_as_number*/
+    0,                                  /*tp_as_sequence*/
+    0,                                  /*tp_as_mapping*/
+    0,                                  /*tp_hash */
+    0,                                  /*tp_call*/
+    0,                                  /*tp_str*/
+    0,                                  /*tp_getattro*/
+    0,                                  /*tp_setattro*/
+    0,                                  /*tp_as_buffer*/
+    Py_TPFLAGS_DEFAULT,                 /*tp_flags*/
+    "Wrapping of grpc_channel.",        /* tp_doc */
+    0,                                  /* tp_traverse */
+    0,                                  /* tp_clear */
+    0,                                  /* tp_richcompare */
+    0,                                  /* tp_weaklistoffset */
+    0,                                  /* tp_iter */
+    0,                                  /* tp_iternext */
+    0,                                  /* tp_methods */
+    0,                                  /* tp_members */
+    0,                                  /* tp_getset */
+    0,                                  /* tp_base */
+    0,                                  /* tp_dict */
+    0,                                  /* tp_descr_get */
+    0,                                  /* tp_descr_set */
+    0,                                  /* tp_dictoffset */
+    (initproc)pygrpc_channel_init,      /* tp_init */
+};
+
+int pygrpc_add_channel(PyObject *module) {
+  pygrpc_ChannelType.tp_new = PyType_GenericNew;
+  if (PyType_Ready(&pygrpc_ChannelType) < 0) {
+    PyErr_SetString(PyExc_RuntimeError, "Error defining pygrpc_ChannelType!");
+    return -1;
+  }
+  if (PyModule_AddObject(module, "Channel", (PyObject *)&pygrpc_ChannelType) ==
+      -1) {
+    PyErr_SetString(PyExc_ImportError, "Couldn't add Channel type to module!");
+    return -1;
+  }
+  return 0;
+}
diff --git a/src/python/src/_adapter/_channel.h b/src/python/src/_adapter/_channel.h
new file mode 100644
index 0000000..6241ccd
--- /dev/null
+++ b/src/python/src/_adapter/_channel.h
@@ -0,0 +1,46 @@
+/*
+ *
+ * Copyright 2015, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef _ADAPTER__CHANNEL_H_
+#define _ADAPTER__CHANNEL_H_
+
+#include <Python.h>
+#include <grpc/grpc.h>
+
+typedef struct { PyObject_HEAD grpc_channel *c_channel; } Channel;
+
+PyTypeObject pygrpc_ChannelType;
+
+int pygrpc_add_channel(PyObject *module);
+
+#endif /* _ADAPTER__CHANNEL_H_ */
diff --git a/src/python/src/_adapter/_common.py b/src/python/src/_adapter/_common.py
new file mode 100644
index 0000000..492849f
--- /dev/null
+++ b/src/python/src/_adapter/_common.py
@@ -0,0 +1,76 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""State used by both invocation-side and service-side code."""
+
+import enum
+
+
+@enum.unique
+class HighWrite(enum.Enum):
+  """The possible categories of high-level write state."""
+
+  OPEN = 'OPEN'
+  CLOSED = 'CLOSED'
+
+
+class WriteState(object):
+  """A description of the state of writing to an RPC.
+
+  Attributes:
+    low: A side-specific value describing the low-level state of writing.
+    high: A HighWrite value describing the high-level state of writing.
+    pending: A list of bytestrings for the RPC waiting to be written to the
+      other side of the RPC.
+  """
+
+  def __init__(self, low, high, pending):
+    self.low = low
+    self.high = high
+    self.pending = pending
+
+
+class CommonRPCState(object):
+  """A description of an RPC's state.
+
+  Attributes:
+    write: A WriteState describing the state of writing to the RPC.
+    sequence_number: The lowest-unused sequence number for use in generating
+      tickets locally describing the progress of the RPC.
+    deserializer: The behavior to be used to deserialize payload bytestreams
+      taken off the wire.
+    serializer: The behavior to be used to serialize payloads to be sent on the
+      wire.
+  """
+
+  def __init__(self, write, sequence_number, deserializer, serializer):
+    self.write = write
+    self.sequence_number = sequence_number
+    self.deserializer = deserializer
+    self.serializer = serializer
diff --git a/src/python/src/_adapter/_completion_queue.c b/src/python/src/_adapter/_completion_queue.c
new file mode 100644
index 0000000..7c951d2
--- /dev/null
+++ b/src/python/src/_adapter/_completion_queue.c
@@ -0,0 +1,541 @@
+/*
+ *
+ * Copyright 2015, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "_adapter/_completion_queue.h"
+
+#include <Python.h>
+#include <grpc/grpc.h>
+#include <grpc/support/alloc.h>
+
+#include "_adapter/_call.h"
+
+static PyObject *status_class;
+static PyObject *service_acceptance_class;
+static PyObject *event_class;
+
+static PyObject *ok_status_code;
+static PyObject *cancelled_status_code;
+static PyObject *unknown_status_code;
+static PyObject *invalid_argument_status_code;
+static PyObject *expired_status_code;
+static PyObject *not_found_status_code;
+static PyObject *already_exists_status_code;
+static PyObject *permission_denied_status_code;
+static PyObject *unauthenticated_status_code;
+static PyObject *resource_exhausted_status_code;
+static PyObject *failed_precondition_status_code;
+static PyObject *aborted_status_code;
+static PyObject *out_of_range_status_code;
+static PyObject *unimplemented_status_code;
+static PyObject *internal_error_status_code;
+static PyObject *unavailable_status_code;
+static PyObject *data_loss_status_code;
+
+static PyObject *stop_event_kind;
+static PyObject *write_event_kind;
+static PyObject *complete_event_kind;
+static PyObject *service_event_kind;
+static PyObject *read_event_kind;
+static PyObject *metadata_event_kind;
+static PyObject *finish_event_kind;
+
+static PyObject *pygrpc_as_py_time(gpr_timespec *timespec) {
+  return Py_BuildValue("f",
+                       timespec->tv_sec + ((double)timespec->tv_nsec) / 1.0E9);
+}
+
+static PyObject *pygrpc_status_code(grpc_status_code c_status_code) {
+  switch (c_status_code) {
+    case GRPC_STATUS_OK:
+      return ok_status_code;
+    case GRPC_STATUS_CANCELLED:
+      return cancelled_status_code;
+    case GRPC_STATUS_UNKNOWN:
+      return unknown_status_code;
+    case GRPC_STATUS_INVALID_ARGUMENT:
+      return invalid_argument_status_code;
+    case GRPC_STATUS_DEADLINE_EXCEEDED:
+      return expired_status_code;
+    case GRPC_STATUS_NOT_FOUND:
+      return not_found_status_code;
+    case GRPC_STATUS_ALREADY_EXISTS:
+      return already_exists_status_code;
+    case GRPC_STATUS_PERMISSION_DENIED:
+      return permission_denied_status_code;
+    case GRPC_STATUS_UNAUTHENTICATED:
+      return unauthenticated_status_code;
+    case GRPC_STATUS_RESOURCE_EXHAUSTED:
+      return resource_exhausted_status_code;
+    case GRPC_STATUS_FAILED_PRECONDITION:
+      return failed_precondition_status_code;
+    case GRPC_STATUS_ABORTED:
+      return aborted_status_code;
+    case GRPC_STATUS_OUT_OF_RANGE:
+      return out_of_range_status_code;
+    case GRPC_STATUS_UNIMPLEMENTED:
+      return unimplemented_status_code;
+    case GRPC_STATUS_INTERNAL:
+      return internal_error_status_code;
+    case GRPC_STATUS_UNAVAILABLE:
+      return unavailable_status_code;
+    case GRPC_STATUS_DATA_LOSS:
+      return data_loss_status_code;
+    default:
+      return NULL;
+  }
+}
+
+static PyObject *pygrpc_stop_event_args(grpc_event *c_event) {
+  return Py_BuildValue("(OOOOOOO)", stop_event_kind, Py_None, Py_None, Py_None,
+                       Py_None, Py_None, Py_None);
+}
+
+static PyObject *pygrpc_write_event_args(grpc_event *c_event) {
+  PyObject *write_accepted =
+      c_event->data.write_accepted == GRPC_OP_OK ? Py_True : Py_False;
+  return Py_BuildValue("(OOOOOOO)", write_event_kind, (PyObject *)c_event->tag,
+                       write_accepted, Py_None, Py_None, Py_None, Py_None);
+}
+
+static PyObject *pygrpc_complete_event_args(grpc_event *c_event) {
+  PyObject *complete_accepted =
+      c_event->data.finish_accepted == GRPC_OP_OK ? Py_True : Py_False;
+  return Py_BuildValue("(OOOOOOO)", complete_event_kind,
+                       (PyObject *)c_event->tag, Py_None, complete_accepted,
+                       Py_None, Py_None, Py_None);
+}
+
+static PyObject *pygrpc_service_event_args(grpc_event *c_event) {
+  if (c_event->data.server_rpc_new.method == NULL) {
+    return Py_BuildValue("(OOOOOOO)", service_event_kind, c_event->tag,
+                         Py_None, Py_None, Py_None, Py_None, Py_None);
+  } else {
+    PyObject *method = PyBytes_FromString(c_event->data.server_rpc_new.method);
+    PyObject *host = PyBytes_FromString(c_event->data.server_rpc_new.host);
+    PyObject *service_deadline =
+        pygrpc_as_py_time(&c_event->data.server_rpc_new.deadline);
+
+    Call *call;
+    PyObject *service_acceptance_args;
+    PyObject *service_acceptance;
+    PyObject *event_args;
+
+    call = PyObject_New(Call, &pygrpc_CallType);
+    call->c_call = c_event->call;
+
+    service_acceptance_args =
+        Py_BuildValue("(OOOO)", call, method, host, service_deadline);
+    Py_DECREF(call);
+    Py_DECREF(method);
+    Py_DECREF(host);
+    Py_DECREF(service_deadline);
+
+    service_acceptance =
+        PyObject_CallObject(service_acceptance_class, service_acceptance_args);
+    Py_DECREF(service_acceptance_args);
+
+    event_args = Py_BuildValue("(OOOOOOO)", service_event_kind,
+                               (PyObject *)c_event->tag, Py_None, Py_None,
+                               service_acceptance, Py_None, Py_None);
+    Py_DECREF(service_acceptance);
+    return event_args;
+  }
+}
+
+static PyObject *pygrpc_read_event_args(grpc_event *c_event) {
+  if (c_event->data.read == NULL) {
+    return Py_BuildValue("(OOOOOOO)", read_event_kind,
+                         (PyObject *)c_event->tag, Py_None, Py_None, Py_None,
+                         Py_None, Py_None);
+  } else {
+    size_t length;
+    size_t offset;
+    grpc_byte_buffer_reader *reader;
+    gpr_slice slice;
+    char *c_bytes;
+    PyObject *bytes;
+    PyObject *event_args;
+
+    length = grpc_byte_buffer_length(c_event->data.read);
+    reader = grpc_byte_buffer_reader_create(c_event->data.read);
+    c_bytes = gpr_malloc(length);
+    offset = 0;
+    while (grpc_byte_buffer_reader_next(reader, &slice)) {
+      memcpy(c_bytes + offset, GPR_SLICE_START_PTR(slice),
+             GPR_SLICE_LENGTH(slice));
+      offset += GPR_SLICE_LENGTH(slice);
+    }
+    grpc_byte_buffer_reader_destroy(reader);
+    bytes = PyBytes_FromStringAndSize(c_bytes, length);
+    gpr_free(c_bytes);
+    event_args =
+        Py_BuildValue("(OOOOOOO)", read_event_kind, (PyObject *)c_event->tag,
+                      Py_None, Py_None, Py_None, bytes, Py_None);
+    Py_DECREF(bytes);
+    return event_args;
+  }
+}
+
+static PyObject *pygrpc_metadata_event_args(grpc_event *c_event) {
+  /* TODO(nathaniel): Actual transmission of metadata. */
+  return Py_BuildValue("(OOOOOOO)", metadata_event_kind,
+                       (PyObject *)c_event->tag, Py_None, Py_None, Py_None,
+                       Py_None, Py_None);
+}
+
+static PyObject *pygrpc_finished_event_args(grpc_event *c_event) {
+  PyObject *code;
+  PyObject *details;
+  PyObject *status_args;
+  PyObject *status;
+  PyObject *event_args;
+
+  code = pygrpc_status_code(c_event->data.finished.status);
+  if (code == NULL) {
+    PyErr_SetString(PyExc_RuntimeError, "Unrecognized status code!");
+    return NULL;
+  }
+  if (c_event->data.finished.details == NULL) {
+    details = PyBytes_FromString("");
+  } else {
+    details = PyBytes_FromString(c_event->data.finished.details);
+  }
+  status_args = Py_BuildValue("(OO)", code, details);
+  Py_DECREF(details);
+  status = PyObject_CallObject(status_class, status_args);
+  Py_DECREF(status_args);
+  event_args =
+      Py_BuildValue("(OOOOOOO)", finish_event_kind, (PyObject *)c_event->tag,
+                    Py_None, Py_None, Py_None, Py_None, status);
+  Py_DECREF(status);
+  return event_args;
+}
+
+static int pygrpc_completion_queue_init(CompletionQueue *self, PyObject *args,
+                                        PyObject *kwds) {
+  self->c_completion_queue = grpc_completion_queue_create();
+  return 0;
+}
+
+static void pygrpc_completion_queue_dealloc(CompletionQueue *self) {
+  grpc_completion_queue_destroy(self->c_completion_queue);
+  self->ob_type->tp_free((PyObject *)self);
+}
+
+static PyObject *pygrpc_completion_queue_get(CompletionQueue *self,
+                                             PyObject *args) {
+  PyObject *deadline;
+  double double_deadline;
+  gpr_timespec deadline_timespec;
+  grpc_event *c_event;
+
+  PyObject *event_args;
+  PyObject *event;
+
+  if (!(PyArg_ParseTuple(args, "O", &deadline))) {
+    return NULL;
+  }
+
+  if (deadline == Py_None) {
+    deadline_timespec = gpr_inf_future;
+  } else {
+    double_deadline = PyFloat_AsDouble(deadline);
+    deadline_timespec = gpr_time_from_nanos((long)(double_deadline * 1.0E9));
+  }
+
+  /* TODO(nathaniel): Suppress clang-format in this block and remove the
+     unnecessary and unPythonic semicolons trailing the _ALLOW_THREADS macros.
+     (Right now clang-format only understands //-demarcated suppressions.) */
+  Py_BEGIN_ALLOW_THREADS;
+  c_event =
+      grpc_completion_queue_next(self->c_completion_queue, deadline_timespec);
+  Py_END_ALLOW_THREADS;
+
+  if (c_event == NULL) {
+    Py_RETURN_NONE;
+  }
+
+  switch (c_event->type) {
+    case GRPC_QUEUE_SHUTDOWN:
+      event_args = pygrpc_stop_event_args(c_event);
+      break;
+    case GRPC_WRITE_ACCEPTED:
+      event_args = pygrpc_write_event_args(c_event);
+      break;
+    case GRPC_FINISH_ACCEPTED:
+      event_args = pygrpc_complete_event_args(c_event);
+      break;
+    case GRPC_SERVER_RPC_NEW:
+      event_args = pygrpc_service_event_args(c_event);
+      break;
+    case GRPC_READ:
+      event_args = pygrpc_read_event_args(c_event);
+      break;
+    case GRPC_CLIENT_METADATA_READ:
+      event_args = pygrpc_metadata_event_args(c_event);
+      break;
+    case GRPC_FINISHED:
+      event_args = pygrpc_finished_event_args(c_event);
+      break;
+    default:
+      PyErr_SetString(PyExc_Exception, "Unrecognized event type!");
+      return NULL;
+  }
+
+  if (event_args == NULL) {
+    return NULL;
+  }
+
+  event = PyObject_CallObject(event_class, event_args);
+
+  Py_DECREF(event_args);
+  Py_XDECREF((PyObject *)c_event->tag);
+  grpc_event_finish(c_event);
+
+  return event;
+}
+
+static PyObject *pygrpc_completion_queue_stop(CompletionQueue *self) {
+  grpc_completion_queue_shutdown(self->c_completion_queue);
+
+  Py_RETURN_NONE;
+}
+
+static PyMethodDef methods[] = {
+    {"get", (PyCFunction)pygrpc_completion_queue_get, METH_VARARGS,
+     "Get the next event."},
+    {"stop", (PyCFunction)pygrpc_completion_queue_stop, METH_NOARGS,
+     "Stop this completion queue."},
+    {NULL}};
+
+PyTypeObject pygrpc_CompletionQueueType = {
+    PyObject_HEAD_INIT(NULL)0,                   /*ob_size*/
+    "_gprc.CompletionQueue",                     /*tp_name*/
+    sizeof(CompletionQueue),                     /*tp_basicsize*/
+    0,                                           /*tp_itemsize*/
+    (destructor)pygrpc_completion_queue_dealloc, /*tp_dealloc*/
+    0,                                           /*tp_print*/
+    0,                                           /*tp_getattr*/
+    0,                                           /*tp_setattr*/
+    0,                                           /*tp_compare*/
+    0,                                           /*tp_repr*/
+    0,                                           /*tp_as_number*/
+    0,                                           /*tp_as_sequence*/
+    0,                                           /*tp_as_mapping*/
+    0,                                           /*tp_hash */
+    0,                                           /*tp_call*/
+    0,                                           /*tp_str*/
+    0,                                           /*tp_getattro*/
+    0,                                           /*tp_setattro*/
+    0,                                           /*tp_as_buffer*/
+    Py_TPFLAGS_DEFAULT,                          /*tp_flags*/
+    "Wrapping of grpc_completion_queue.",        /* tp_doc */
+    0,                                           /* tp_traverse */
+    0,                                           /* tp_clear */
+    0,                                           /* tp_richcompare */
+    0,                                           /* tp_weaklistoffset */
+    0,                                           /* tp_iter */
+    0,                                           /* tp_iternext */
+    methods,                                     /* tp_methods */
+    0,                                           /* tp_members */
+    0,                                           /* tp_getset */
+    0,                                           /* tp_base */
+    0,                                           /* tp_dict */
+    0,                                           /* tp_descr_get */
+    0,                                           /* tp_descr_set */
+    0,                                           /* tp_dictoffset */
+    (initproc)pygrpc_completion_queue_init,      /* tp_init */
+};
+
+static int pygrpc_get_status_codes(PyObject *datatypes_module) {
+  PyObject *code_class = PyObject_GetAttrString(datatypes_module, "Code");
+  if (code_class == NULL) {
+    return -1;
+  }
+  ok_status_code = PyObject_GetAttrString(code_class, "OK");
+  if (ok_status_code == NULL) {
+    return -1;
+  }
+  cancelled_status_code = PyObject_GetAttrString(code_class, "CANCELLED");
+  if (cancelled_status_code == NULL) {
+    return -1;
+  }
+  unknown_status_code = PyObject_GetAttrString(code_class, "UNKNOWN");
+  if (unknown_status_code == NULL) {
+    return -1;
+  }
+  invalid_argument_status_code =
+      PyObject_GetAttrString(code_class, "INVALID_ARGUMENT");
+  if (invalid_argument_status_code == NULL) {
+    return -1;
+  }
+  expired_status_code = PyObject_GetAttrString(code_class, "EXPIRED");
+  if (expired_status_code == NULL) {
+    return -1;
+  }
+  not_found_status_code = PyObject_GetAttrString(code_class, "NOT_FOUND");
+  if (not_found_status_code == NULL) {
+    return -1;
+  }
+  already_exists_status_code =
+      PyObject_GetAttrString(code_class, "ALREADY_EXISTS");
+  if (already_exists_status_code == NULL) {
+    return -1;
+  }
+  permission_denied_status_code =
+      PyObject_GetAttrString(code_class, "PERMISSION_DENIED");
+  if (permission_denied_status_code == NULL) {
+    return -1;
+  }
+  unauthenticated_status_code =
+      PyObject_GetAttrString(code_class, "UNAUTHENTICATED");
+  if (unauthenticated_status_code == NULL) {
+    return -1;
+  }
+  resource_exhausted_status_code =
+      PyObject_GetAttrString(code_class, "RESOURCE_EXHAUSTED");
+  if (resource_exhausted_status_code == NULL) {
+    return -1;
+  }
+  failed_precondition_status_code =
+      PyObject_GetAttrString(code_class, "FAILED_PRECONDITION");
+  if (failed_precondition_status_code == NULL) {
+    return -1;
+  }
+  aborted_status_code = PyObject_GetAttrString(code_class, "ABORTED");
+  if (aborted_status_code == NULL) {
+    return -1;
+  }
+  out_of_range_status_code = PyObject_GetAttrString(code_class, "OUT_OF_RANGE");
+  if (out_of_range_status_code == NULL) {
+    return -1;
+  }
+  unimplemented_status_code =
+      PyObject_GetAttrString(code_class, "UNIMPLEMENTED");
+  if (unimplemented_status_code == NULL) {
+    return -1;
+  }
+  internal_error_status_code =
+      PyObject_GetAttrString(code_class, "INTERNAL_ERROR");
+  if (internal_error_status_code == NULL) {
+    return -1;
+  }
+  unavailable_status_code = PyObject_GetAttrString(code_class, "UNAVAILABLE");
+  if (unavailable_status_code == NULL) {
+    return -1;
+  }
+  data_loss_status_code = PyObject_GetAttrString(code_class, "DATA_LOSS");
+  if (data_loss_status_code == NULL) {
+    return -1;
+  }
+  Py_DECREF(code_class);
+  return 0;
+}
+
+static int pygrpc_get_event_kinds(PyObject *event_class) {
+  PyObject *kind_class = PyObject_GetAttrString(event_class, "Kind");
+  if (kind_class == NULL) {
+    return -1;
+  }
+  stop_event_kind = PyObject_GetAttrString(kind_class, "STOP");
+  if (stop_event_kind == NULL) {
+    return -1;
+  }
+  write_event_kind = PyObject_GetAttrString(kind_class, "WRITE_ACCEPTED");
+  if (write_event_kind == NULL) {
+    return -1;
+  }
+  complete_event_kind = PyObject_GetAttrString(kind_class, "COMPLETE_ACCEPTED");
+  if (complete_event_kind == NULL) {
+    return -1;
+  }
+  service_event_kind = PyObject_GetAttrString(kind_class, "SERVICE_ACCEPTED");
+  if (service_event_kind == NULL) {
+    return -1;
+  }
+  read_event_kind = PyObject_GetAttrString(kind_class, "READ_ACCEPTED");
+  if (read_event_kind == NULL) {
+    return -1;
+  }
+  metadata_event_kind = PyObject_GetAttrString(kind_class, "METADATA_ACCEPTED");
+  if (metadata_event_kind == NULL) {
+    return -1;
+  }
+  finish_event_kind = PyObject_GetAttrString(kind_class, "FINISH");
+  if (finish_event_kind == NULL) {
+    return -1;
+  }
+  Py_DECREF(kind_class);
+  return 0;
+}
+
+int pygrpc_add_completion_queue(PyObject *module) {
+  char *datatypes_module_path = "_adapter._datatypes";
+  PyObject *datatypes_module = PyImport_ImportModule(datatypes_module_path);
+  if (datatypes_module == NULL) {
+    PyErr_SetString(PyExc_ImportError, datatypes_module_path);
+    return -1;
+  }
+  status_class = PyObject_GetAttrString(datatypes_module, "Status");
+  service_acceptance_class =
+      PyObject_GetAttrString(datatypes_module, "ServiceAcceptance");
+  event_class = PyObject_GetAttrString(datatypes_module, "Event");
+  if (status_class == NULL || service_acceptance_class == NULL ||
+      event_class == NULL) {
+    PyErr_SetString(PyExc_ImportError, "Missing classes in _datatypes module!");
+    return -1;
+  }
+  if (pygrpc_get_status_codes(datatypes_module) == -1) {
+    PyErr_SetString(PyExc_ImportError, "Status codes import broken!");
+    return -1;
+  }
+  if (pygrpc_get_event_kinds(event_class) == -1) {
+    PyErr_SetString(PyExc_ImportError, "Event kinds import broken!");
+    return -1;
+  }
+  Py_DECREF(datatypes_module);
+
+  pygrpc_CompletionQueueType.tp_new = PyType_GenericNew;
+  if (PyType_Ready(&pygrpc_CompletionQueueType) < 0) {
+    PyErr_SetString(PyExc_RuntimeError,
+                    "Error defining pygrpc_CompletionQueueType!");
+    return -1;
+  }
+  if (PyModule_AddObject(module, "CompletionQueue",
+                         (PyObject *)&pygrpc_CompletionQueueType) == -1) {
+    PyErr_SetString(PyExc_ImportError,
+                    "Couldn't add CompletionQueue type to module!");
+    return -1;
+  }
+  return 0;
+}
diff --git a/src/python/src/_adapter/_completion_queue.h b/src/python/src/_adapter/_completion_queue.h
new file mode 100644
index 0000000..8e5ee9f
--- /dev/null
+++ b/src/python/src/_adapter/_completion_queue.h
@@ -0,0 +1,48 @@
+/*
+ *
+ * Copyright 2015, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef _ADAPTER__COMPLETION_QUEUE_H_
+#define _ADAPTER__COMPLETION_QUEUE_H_
+
+#include <Python.h>
+#include <grpc/grpc.h>
+
+typedef struct {
+  PyObject_HEAD grpc_completion_queue *c_completion_queue;
+} CompletionQueue;
+
+PyTypeObject pygrpc_CompletionQueueType;
+
+int pygrpc_add_completion_queue(PyObject *module);
+
+#endif /* _ADAPTER__COMPLETION_QUEUE_H_ */
diff --git a/src/python/src/_adapter/_datatypes.py b/src/python/src/_adapter/_datatypes.py
new file mode 100644
index 0000000..e271ec8
--- /dev/null
+++ b/src/python/src/_adapter/_datatypes.py
@@ -0,0 +1,86 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Datatypes passed between Python and C code."""
+
+import collections
+import enum
+
+
+@enum.unique
+class Code(enum.IntEnum):
+  """One Platform error codes (see status.h and codes.proto)."""
+
+  OK = 0
+  CANCELLED = 1
+  UNKNOWN = 2
+  INVALID_ARGUMENT = 3
+  EXPIRED = 4
+  NOT_FOUND = 5
+  ALREADY_EXISTS = 6
+  PERMISSION_DENIED = 7
+  UNAUTHENTICATED = 16
+  RESOURCE_EXHAUSTED = 8
+  FAILED_PRECONDITION = 9
+  ABORTED = 10
+  OUT_OF_RANGE = 11
+  UNIMPLEMENTED = 12
+  INTERNAL_ERROR = 13
+  UNAVAILABLE = 14
+  DATA_LOSS = 15
+
+
+class Status(collections.namedtuple('Status', ['code', 'details'])):
+  """Describes an RPC's overall status."""
+
+
+class ServiceAcceptance(
+    collections.namedtuple(
+        'ServiceAcceptance', ['call', 'method', 'host', 'deadline'])):
+  """Describes an RPC on the service side at the start of service."""
+
+
+class Event(
+    collections.namedtuple(
+        'Event',
+        ['kind', 'tag', 'write_accepted', 'complete_accepted',
+         'service_acceptance', 'bytes', 'status'])):
+  """Describes an event emitted from a completion queue."""
+
+  @enum.unique
+  class Kind(enum.Enum):
+    """Describes the kind of an event."""
+
+    STOP = object()
+    WRITE_ACCEPTED = object()
+    COMPLETE_ACCEPTED = object()
+    SERVICE_ACCEPTED = object()
+    READ_ACCEPTED = object()
+    METADATA_ACCEPTED = object()
+    FINISH = object()
diff --git a/src/python/src/_adapter/_error.c b/src/python/src/_adapter/_error.c
new file mode 100644
index 0000000..8c04f4b
--- /dev/null
+++ b/src/python/src/_adapter/_error.c
@@ -0,0 +1,79 @@
+/*
+ *
+ * Copyright 2015, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "_adapter/_error.h"
+
+#include <Python.h>
+#include <grpc/grpc.h>
+
+const PyObject *pygrpc_translate_call_error(grpc_call_error call_error) {
+  switch (call_error) {
+    case GRPC_CALL_OK:
+      Py_RETURN_NONE;
+    case GRPC_CALL_ERROR:
+      PyErr_SetString(PyExc_Exception, "Defect: unknown defect!");
+      return NULL;
+    case GRPC_CALL_ERROR_NOT_ON_SERVER:
+      PyErr_SetString(PyExc_Exception,
+                      "Defect: client-only method called on server!");
+      return NULL;
+    case GRPC_CALL_ERROR_NOT_ON_CLIENT:
+      PyErr_SetString(PyExc_Exception,
+                      "Defect: server-only method called on client!");
+      return NULL;
+    case GRPC_CALL_ERROR_ALREADY_ACCEPTED:
+      PyErr_SetString(PyExc_Exception,
+                      "Defect: attempted to accept already-accepted call!");
+      return NULL;
+    case GRPC_CALL_ERROR_ALREADY_INVOKED:
+      PyErr_SetString(PyExc_Exception,
+                      "Defect: attempted to invoke already-invoked call!");
+      return NULL;
+    case GRPC_CALL_ERROR_NOT_INVOKED:
+      PyErr_SetString(PyExc_Exception, "Defect: Call not yet invoked!");
+      return NULL;
+    case GRPC_CALL_ERROR_ALREADY_FINISHED:
+      PyErr_SetString(PyExc_Exception, "Defect: Call already finished!");
+      return NULL;
+    case GRPC_CALL_ERROR_TOO_MANY_OPERATIONS:
+      PyErr_SetString(PyExc_Exception,
+                      "Defect: Attempted extra read or extra write on call!");
+      return NULL;
+    case GRPC_CALL_ERROR_INVALID_FLAGS:
+      PyErr_SetString(PyExc_Exception, "Defect: invalid flags!");
+      return NULL;
+    default:
+      PyErr_SetString(PyExc_Exception, "Defect: Unknown call error!");
+      return NULL;
+  }
+}
diff --git a/src/python/src/_adapter/_error.h b/src/python/src/_adapter/_error.h
new file mode 100644
index 0000000..6988b1c
--- /dev/null
+++ b/src/python/src/_adapter/_error.h
@@ -0,0 +1,42 @@
+/*
+ *
+ * Copyright 2015, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef _ADAPTER__ERROR_H_
+#define _ADAPTER__ERROR_H_
+
+#include <Python.h>
+#include <grpc/grpc.h>
+
+const PyObject *pygrpc_translate_call_error(grpc_call_error call_error);
+
+#endif /* _ADAPTER__ERROR_H_ */
diff --git a/src/python/src/_adapter/_event_invocation_synchronous_event_service_test.py b/src/python/src/_adapter/_event_invocation_synchronous_event_service_test.py
new file mode 100644
index 0000000..69d91ec
--- /dev/null
+++ b/src/python/src/_adapter/_event_invocation_synchronous_event_service_test.py
@@ -0,0 +1,46 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""One of the tests of the Face layer of RPC Framework."""
+
+import unittest
+
+from _adapter import _face_test_case
+from _framework.face.testing import event_invocation_synchronous_event_service_test_case as test_case
+
+
+class EventInvocationSynchronousEventServiceTest(
+    _face_test_case.FaceTestCase,
+    test_case.EventInvocationSynchronousEventServiceTestCase,
+    unittest.TestCase):
+  pass
+
+
+if __name__ == '__main__':
+  unittest.main()
diff --git a/src/python/src/_adapter/_face_test_case.py b/src/python/src/_adapter/_face_test_case.py
new file mode 100644
index 0000000..112dcfb
--- /dev/null
+++ b/src/python/src/_adapter/_face_test_case.py
@@ -0,0 +1,124 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Common construction and destruction for GRPC-backed Face-layer tests."""
+
+import unittest
+
+from _adapter import fore
+from _adapter import rear
+from _framework.base import util
+from _framework.base.packets import implementations as tickets_implementations
+from _framework.face import implementations as face_implementations
+from _framework.face.testing import coverage
+from _framework.face.testing import serial
+from _framework.face.testing import test_case
+from _framework.foundation import logging_pool
+
+_TIMEOUT = 3
+_MAXIMUM_TIMEOUT = 90
+_MAXIMUM_POOL_SIZE = 400
+
+
+class FaceTestCase(test_case.FaceTestCase, coverage.BlockingCoverage):
+  """Provides abstract Face-layer tests a GRPC-backed implementation."""
+
+  def set_up_implementation(
+      self,
+      name,
+      methods,
+      inline_value_in_value_out_methods,
+      inline_value_in_stream_out_methods,
+      inline_stream_in_value_out_methods,
+      inline_stream_in_stream_out_methods,
+      event_value_in_value_out_methods,
+      event_value_in_stream_out_methods,
+      event_stream_in_value_out_methods,
+      event_stream_in_stream_out_methods,
+      multi_method):
+    pool = logging_pool.pool(_MAXIMUM_POOL_SIZE)
+
+    servicer = face_implementations.servicer(
+        pool,
+        inline_value_in_value_out_methods=inline_value_in_value_out_methods,
+        inline_value_in_stream_out_methods=inline_value_in_stream_out_methods,
+        inline_stream_in_value_out_methods=inline_stream_in_value_out_methods,
+        inline_stream_in_stream_out_methods=inline_stream_in_stream_out_methods,
+        event_value_in_value_out_methods=event_value_in_value_out_methods,
+        event_value_in_stream_out_methods=event_value_in_stream_out_methods,
+        event_stream_in_value_out_methods=event_stream_in_value_out_methods,
+        event_stream_in_stream_out_methods=event_stream_in_stream_out_methods,
+        multi_method=multi_method)
+
+    serialization = serial.serialization(methods)
+
+    fore_link = fore.ForeLink(
+        pool, serialization.request_deserializers,
+        serialization.response_serializers)
+    port = fore_link.start()
+    rear_link = rear.RearLink(
+        'localhost', port, pool,
+        serialization.request_serializers, serialization.response_deserializers)
+    rear_link.start()
+    front = tickets_implementations.front(pool, pool, pool)
+    back = tickets_implementations.back(
+        servicer, pool, pool, pool, _TIMEOUT, _MAXIMUM_TIMEOUT)
+    fore_link.join_rear_link(back)
+    back.join_fore_link(fore_link)
+    rear_link.join_fore_link(front)
+    front.join_rear_link(rear_link)
+
+    server = face_implementations.server()
+    stub = face_implementations.stub(front, pool)
+    return server, stub, (rear_link, fore_link, front, back)
+
+  def tear_down_implementation(self, memo):
+    rear_link, fore_link, front, back = memo
+    # TODO(nathaniel): Waiting for the front and back to idle possibly should
+    # not be necessary - investigate as part of graceful shutdown work.
+    util.wait_for_idle(front)
+    util.wait_for_idle(back)
+    rear_link.stop()
+    fore_link.stop()
+
+  @unittest.skip('Service-side failure not transmitted by GRPC.')
+  def testFailedUnaryRequestUnaryResponse(self):
+    raise NotImplementedError()
+
+  @unittest.skip('Service-side failure not transmitted by GRPC.')
+  def testFailedUnaryRequestStreamResponse(self):
+    raise NotImplementedError()
+
+  @unittest.skip('Service-side failure not transmitted by GRPC.')
+  def testFailedStreamRequestUnaryResponse(self):
+    raise NotImplementedError()
+
+  @unittest.skip('Service-side failure not transmitted by GRPC.')
+  def testFailedStreamRequestStreamResponse(self):
+    raise NotImplementedError()
diff --git a/src/python/src/_adapter/_future_invocation_asynchronous_event_service_test.py b/src/python/src/_adapter/_future_invocation_asynchronous_event_service_test.py
new file mode 100644
index 0000000..3db39dd
--- /dev/null
+++ b/src/python/src/_adapter/_future_invocation_asynchronous_event_service_test.py
@@ -0,0 +1,46 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""One of the tests of the Face layer of RPC Framework."""
+
+import unittest
+
+from _adapter import _face_test_case
+from _framework.face.testing import future_invocation_asynchronous_event_service_test_case as test_case
+
+
+class FutureInvocationAsynchronousEventServiceTest(
+    _face_test_case.FaceTestCase,
+    test_case.FutureInvocationAsynchronousEventServiceTestCase,
+    unittest.TestCase):
+  pass
+
+
+if __name__ == '__main__':
+  unittest.main()
diff --git a/src/python/src/_adapter/_links_test.py b/src/python/src/_adapter/_links_test.py
new file mode 100644
index 0000000..8341460
--- /dev/null
+++ b/src/python/src/_adapter/_links_test.py
@@ -0,0 +1,246 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Test of the GRPC-backed ForeLink and RearLink."""
+
+import threading
+import unittest
+
+from _adapter import _proto_scenarios
+from _adapter import _test_links
+from _adapter import fore
+from _adapter import rear
+from _framework.base import interfaces
+from _framework.base.packets import packets as tickets
+from _framework.foundation import logging_pool
+
+_IDENTITY = lambda x: x
+_TIMEOUT = 2
+
+
+class RoundTripTest(unittest.TestCase):
+
+  def setUp(self):
+    self.fore_link_pool = logging_pool.pool(80)
+    self.rear_link_pool = logging_pool.pool(80)
+
+  def tearDown(self):
+    self.rear_link_pool.shutdown(wait=True)
+    self.fore_link_pool.shutdown(wait=True)
+
+  def testZeroMessageRoundTrip(self):
+    test_operation_id = object()
+    test_method = 'test method'
+    test_fore_link = _test_links.ForeLink(None, None)
+    def rear_action(front_to_back_ticket, fore_link):
+      if front_to_back_ticket.kind in (
+          tickets.Kind.COMPLETION, tickets.Kind.ENTIRE):
+        back_to_front_ticket = tickets.BackToFrontPacket(
+            front_to_back_ticket.operation_id, 0, tickets.Kind.COMPLETION, None)
+        fore_link.accept_back_to_front_ticket(back_to_front_ticket)
+    test_rear_link = _test_links.RearLink(rear_action, None)
+
+    fore_link = fore.ForeLink(
+        self.fore_link_pool, {test_method: None}, {test_method: None})
+    fore_link.join_rear_link(test_rear_link)
+    test_rear_link.join_fore_link(fore_link)
+    port = fore_link.start()
+
+    rear_link = rear.RearLink(
+        'localhost', port, self.rear_link_pool, {test_method: None},
+        {test_method: None})
+    rear_link.join_fore_link(test_fore_link)
+    test_fore_link.join_rear_link(rear_link)
+    rear_link.start()
+
+    front_to_back_ticket = tickets.FrontToBackPacket(
+        test_operation_id, 0, tickets.Kind.ENTIRE, test_method,
+        interfaces.ServicedSubscription.Kind.FULL, None, None, _TIMEOUT)
+    rear_link.accept_front_to_back_ticket(front_to_back_ticket)
+
+    with test_fore_link.condition:
+      while (not test_fore_link.tickets or
+             test_fore_link.tickets[-1].kind is tickets.Kind.CONTINUATION):
+        test_fore_link.condition.wait()
+
+    rear_link.stop()
+    fore_link.stop()
+
+    with test_fore_link.condition:
+      self.assertIs(test_fore_link.tickets[-1].kind, tickets.Kind.COMPLETION)
+
+  def testEntireRoundTrip(self):
+    test_operation_id = object()
+    test_method = 'test method'
+    test_front_to_back_datum = b'\x07'
+    test_back_to_front_datum = b'\x08'
+    test_fore_link = _test_links.ForeLink(None, None)
+    rear_sequence_number = [0]
+    def rear_action(front_to_back_ticket, fore_link):
+      if front_to_back_ticket.payload is None:
+        payload = None
+      else:
+        payload = test_back_to_front_datum
+      terminal = front_to_back_ticket.kind in (
+          tickets.Kind.COMPLETION, tickets.Kind.ENTIRE)
+      if payload is not None or terminal:
+        back_to_front_ticket = tickets.BackToFrontPacket(
+            front_to_back_ticket.operation_id, rear_sequence_number[0],
+            tickets.Kind.COMPLETION if terminal else tickets.Kind.CONTINUATION,
+            payload)
+        rear_sequence_number[0] += 1
+        fore_link.accept_back_to_front_ticket(back_to_front_ticket)
+    test_rear_link = _test_links.RearLink(rear_action, None)
+
+    fore_link = fore.ForeLink(
+        self.fore_link_pool, {test_method: _IDENTITY},
+        {test_method: _IDENTITY})
+    fore_link.join_rear_link(test_rear_link)
+    test_rear_link.join_fore_link(fore_link)
+    port = fore_link.start()
+
+    rear_link = rear.RearLink(
+        'localhost', port, self.rear_link_pool, {test_method: _IDENTITY},
+        {test_method: _IDENTITY})
+    rear_link.join_fore_link(test_fore_link)
+    test_fore_link.join_rear_link(rear_link)
+    rear_link.start()
+
+    front_to_back_ticket = tickets.FrontToBackPacket(
+        test_operation_id, 0, tickets.Kind.ENTIRE, test_method,
+        interfaces.ServicedSubscription.Kind.FULL, None,
+        test_front_to_back_datum, _TIMEOUT)
+    rear_link.accept_front_to_back_ticket(front_to_back_ticket)
+
+    with test_fore_link.condition:
+      while (not test_fore_link.tickets or
+             test_fore_link.tickets[-1].kind is not tickets.Kind.COMPLETION):
+        test_fore_link.condition.wait()
+
+    rear_link.stop()
+    fore_link.stop()
+
+    with test_rear_link.condition:
+      front_to_back_payloads = tuple(
+          ticket.payload for ticket in test_rear_link.tickets
+          if ticket.payload is not None)
+    with test_fore_link.condition:
+      back_to_front_payloads = tuple(
+          ticket.payload for ticket in test_fore_link.tickets
+          if ticket.payload is not None)
+    self.assertTupleEqual((test_front_to_back_datum,), front_to_back_payloads)
+    self.assertTupleEqual((test_back_to_front_datum,), back_to_front_payloads)
+
+  def _perform_scenario_test(self, scenario):
+    test_operation_id = object()
+    test_method = scenario.method()
+    test_fore_link = _test_links.ForeLink(None, None)
+    rear_lock = threading.Lock()
+    rear_sequence_number = [0]
+    def rear_action(front_to_back_ticket, fore_link):
+      with rear_lock:
+        if front_to_back_ticket.payload is not None:
+          response = scenario.response_for_request(front_to_back_ticket.payload)
+        else:
+          response = None
+      terminal = front_to_back_ticket.kind in (
+          tickets.Kind.COMPLETION, tickets.Kind.ENTIRE)
+      if response is not None or terminal:
+        back_to_front_ticket = tickets.BackToFrontPacket(
+            front_to_back_ticket.operation_id, rear_sequence_number[0],
+            tickets.Kind.COMPLETION if terminal else tickets.Kind.CONTINUATION,
+            response)
+        rear_sequence_number[0] += 1
+        fore_link.accept_back_to_front_ticket(back_to_front_ticket)
+    test_rear_link = _test_links.RearLink(rear_action, None)
+
+    fore_link = fore.ForeLink(
+        self.fore_link_pool, {test_method: scenario.deserialize_request},
+        {test_method: scenario.serialize_response})
+    fore_link.join_rear_link(test_rear_link)
+    test_rear_link.join_fore_link(fore_link)
+    port = fore_link.start()
+
+    rear_link = rear.RearLink(
+        'localhost', port, self.rear_link_pool,
+        {test_method: scenario.serialize_request},
+        {test_method: scenario.deserialize_response})
+    rear_link.join_fore_link(test_fore_link)
+    test_fore_link.join_rear_link(rear_link)
+    rear_link.start()
+
+    commencement_ticket = tickets.FrontToBackPacket(
+        test_operation_id, 0, tickets.Kind.COMMENCEMENT, test_method,
+        interfaces.ServicedSubscription.Kind.FULL, None, None, _TIMEOUT)
+    fore_sequence_number = 1
+    rear_link.accept_front_to_back_ticket(commencement_ticket)
+    for request in scenario.requests():
+      continuation_ticket = tickets.FrontToBackPacket(
+          test_operation_id, fore_sequence_number, tickets.Kind.CONTINUATION,
+          None, None, None, request, None)
+      fore_sequence_number += 1
+      rear_link.accept_front_to_back_ticket(continuation_ticket)
+    completion_ticket = tickets.FrontToBackPacket(
+        test_operation_id, fore_sequence_number, tickets.Kind.COMPLETION, None,
+        None, None, None, None)
+    fore_sequence_number += 1
+    rear_link.accept_front_to_back_ticket(completion_ticket)
+
+    with test_fore_link.condition:
+      while (not test_fore_link.tickets or
+             test_fore_link.tickets[-1].kind is not tickets.Kind.COMPLETION):
+        test_fore_link.condition.wait()
+
+    rear_link.stop()
+    fore_link.stop()
+
+    with test_rear_link.condition:
+      requests = tuple(
+          ticket.payload for ticket in test_rear_link.tickets
+          if ticket.payload is not None)
+    with test_fore_link.condition:
+      responses = tuple(
+          ticket.payload for ticket in test_fore_link.tickets
+          if ticket.payload is not None)
+    self.assertTrue(scenario.verify_requests(requests))
+    self.assertTrue(scenario.verify_responses(responses))
+
+  def testEmptyScenario(self):
+    self._perform_scenario_test(_proto_scenarios.EmptyScenario())
+
+  def testBidirectionallyUnaryScenario(self):
+    self._perform_scenario_test(_proto_scenarios.BidirectionallyUnaryScenario())
+
+  def testBidirectionallyStreamingScenario(self):
+    self._perform_scenario_test(
+        _proto_scenarios.BidirectionallyStreamingScenario())
+
+
+if __name__ == '__main__':
+  unittest.main()
diff --git a/src/python/src/_adapter/_lonely_rear_link_test.py b/src/python/src/_adapter/_lonely_rear_link_test.py
new file mode 100644
index 0000000..7ccdb0b
--- /dev/null
+++ b/src/python/src/_adapter/_lonely_rear_link_test.py
@@ -0,0 +1,97 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""A test of invocation-side code unconnected to an RPC server."""
+
+import unittest
+
+from _adapter import _test_links
+from _adapter import rear
+from _framework.base import interfaces
+from _framework.base.packets import packets
+from _framework.foundation import logging_pool
+
+_IDENTITY = lambda x: x
+_TIMEOUT = 2
+
+
+class LonelyRearLinkTest(unittest.TestCase):
+
+  def setUp(self):
+    self.pool = logging_pool.pool(80)
+
+  def tearDown(self):
+    self.pool.shutdown(wait=True)
+
+  def testUpAndDown(self):
+    rear_link = rear.RearLink('nonexistent', 54321, self.pool, {}, {})
+
+    rear_link.start()
+    rear_link.stop()
+
+  def _perform_lonely_client_test_with_ticket_kind(
+      self, front_to_back_ticket_kind):
+    test_operation_id = object()
+    test_method = 'test method'
+    fore_link = _test_links.ForeLink(None, None)
+
+    rear_link = rear.RearLink(
+        'nonexistent', 54321, self.pool, {test_method: None},
+        {test_method: None})
+    rear_link.join_fore_link(fore_link)
+    rear_link.start()
+
+    front_to_back_ticket = packets.FrontToBackPacket(
+        test_operation_id, 0, front_to_back_ticket_kind, test_method,
+        interfaces.ServicedSubscription.Kind.FULL, None, None, _TIMEOUT)
+    rear_link.accept_front_to_back_ticket(front_to_back_ticket)
+
+    with fore_link.condition:
+      while True:
+        if (fore_link.tickets and
+            fore_link.tickets[-1].kind is not packets.Kind.CONTINUATION):
+          break
+        fore_link.condition.wait()
+
+    rear_link.stop()
+
+    with fore_link.condition:
+      self.assertIsNot(fore_link.tickets[-1].kind, packets.Kind.COMPLETION)
+      
+  @unittest.skip('TODO(nathaniel): This seems to have broken in the last few weeks; fix it.')
+  def testLonelyClientCommencementPacket(self):
+    self._perform_lonely_client_test_with_ticket_kind(
+        packets.Kind.COMMENCEMENT)
+
+  def testLonelyClientEntirePacket(self):
+    self._perform_lonely_client_test_with_ticket_kind(packets.Kind.ENTIRE)
+
+
+if __name__ == '__main__':
+  unittest.main()
diff --git a/src/python/src/_adapter/_low.py b/src/python/src/_adapter/_low.py
new file mode 100644
index 0000000..6c24087
--- /dev/null
+++ b/src/python/src/_adapter/_low.py
@@ -0,0 +1,55 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""A Python interface for GRPC C core structures and behaviors."""
+
+import atexit
+import gc
+
+from _adapter import _c
+from _adapter import _datatypes
+
+def _shut_down():
+  # force garbage collection before shutting down grpc, to ensure all grpc
+  # objects are cleaned up
+  gc.collect()
+  _c.shut_down()
+
+_c.init()
+atexit.register(_shut_down)
+
+# pylint: disable=invalid-name
+Code = _datatypes.Code
+Status = _datatypes.Status
+Event = _datatypes.Event
+Call = _c.Call
+Channel = _c.Channel
+CompletionQueue = _c.CompletionQueue
+Server = _c.Server
+# pylint: enable=invalid-name
diff --git a/src/python/src/_adapter/_low_test.py b/src/python/src/_adapter/_low_test.py
new file mode 100644
index 0000000..57b3be6
--- /dev/null
+++ b/src/python/src/_adapter/_low_test.py
@@ -0,0 +1,371 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Tests for _adapter._low."""
+
+import time
+import unittest
+
+from _adapter import _low
+
+_STREAM_LENGTH = 300
+_TIMEOUT = 5
+_AFTER_DELAY = 2
+_FUTURE = time.time() + 60 * 60 * 24
+_BYTE_SEQUENCE = b'\abcdefghijklmnopqrstuvwxyz0123456789' * 200
+_BYTE_SEQUENCE_SEQUENCE = tuple(
+    bytes(bytearray((row + column) % 256 for column in range(row)))
+    for row in range(_STREAM_LENGTH))
+
+
+class LonelyClientTest(unittest.TestCase):
+
+  def testLonelyClient(self):
+    host = 'nosuchhostexists'
+    port = 54321
+    method = 'test method'
+    deadline = time.time() + _TIMEOUT
+    after_deadline = deadline + _AFTER_DELAY
+    metadata_tag = object()
+    finish_tag = object()
+
+    completion_queue = _low.CompletionQueue()
+    channel = _low.Channel('%s:%d' % (host, port))
+    client_call = _low.Call(channel, method, host, deadline)
+
+    client_call.invoke(completion_queue, metadata_tag, finish_tag)
+    first_event = completion_queue.get(after_deadline)
+    self.assertIsNotNone(first_event)
+    second_event = completion_queue.get(after_deadline)
+    self.assertIsNotNone(second_event)
+    kinds = [event.kind for event in (first_event, second_event)]
+    self.assertItemsEqual(
+        (_low.Event.Kind.METADATA_ACCEPTED, _low.Event.Kind.FINISH),
+        kinds)
+
+    self.assertIsNone(completion_queue.get(after_deadline))
+
+    completion_queue.stop()
+    stop_event = completion_queue.get(_FUTURE)
+    self.assertEqual(_low.Event.Kind.STOP, stop_event.kind)
+
+
+class EchoTest(unittest.TestCase):
+
+  def setUp(self):
+    self.host = 'localhost'
+
+    self.server_completion_queue = _low.CompletionQueue()
+    self.server = _low.Server(self.server_completion_queue)
+    port = self.server.add_http2_addr('[::]:0')
+    self.server.start()
+
+    self.client_completion_queue = _low.CompletionQueue()
+    self.channel = _low.Channel('%s:%d' % (self.host, port))
+
+  def tearDown(self):
+    self.server.stop()
+    # NOTE(nathaniel): Yep, this is weird; it's a consequence of
+    # grpc_server_destroy's being what has the effect of telling the server's
+    # completion queue to pump out all pending events/tags immediately rather
+    # than gracefully completing all outstanding RPCs while accepting no new
+    # ones.
+    # TODO(nathaniel): Deallocation of a Python object shouldn't have this kind
+    # of observable side effect let alone such an important one.
+    del self.server
+    self.server_completion_queue.stop()
+    self.client_completion_queue.stop()
+    while True:
+      event = self.server_completion_queue.get(_FUTURE)
+      if event is not None and event.kind is _low.Event.Kind.STOP:
+        break
+    while True:
+      event = self.client_completion_queue.get(_FUTURE)
+      if event is not None and event.kind is _low.Event.Kind.STOP:
+        break
+    self.server_completion_queue = None
+    self.client_completion_queue = None
+
+  def _perform_echo_test(self, test_data):
+    method = 'test method'
+    details = 'test details'
+    deadline = _FUTURE
+    metadata_tag = object()
+    finish_tag = object()
+    write_tag = object()
+    complete_tag = object()
+    service_tag = object()
+    read_tag = object()
+    status_tag = object()
+
+    server_data = []
+    client_data = []
+
+    client_call = _low.Call(self.channel, method, self.host, deadline)
+
+    client_call.invoke(self.client_completion_queue, metadata_tag, finish_tag)
+
+    self.server.service(service_tag)
+    service_accepted = self.server_completion_queue.get(_FUTURE)
+    self.assertIsNotNone(service_accepted)
+    self.assertIs(service_accepted.kind, _low.Event.Kind.SERVICE_ACCEPTED)
+    self.assertIs(service_accepted.tag, service_tag)
+    self.assertEqual(method, service_accepted.service_acceptance.method)
+    self.assertEqual(self.host, service_accepted.service_acceptance.host)
+    self.assertIsNotNone(service_accepted.service_acceptance.call)
+    server_call = service_accepted.service_acceptance.call
+    server_call.accept(self.server_completion_queue, finish_tag)
+    server_call.premetadata()
+
+    metadata_accepted = self.client_completion_queue.get(_FUTURE)
+    self.assertIsNotNone(metadata_accepted)
+    self.assertEqual(_low.Event.Kind.METADATA_ACCEPTED, metadata_accepted.kind)
+    self.assertEqual(metadata_tag, metadata_accepted.tag)
+    # TODO(nathaniel): Test transmission and reception of metadata.
+
+    for datum in test_data:
+      client_call.write(datum, write_tag)
+      write_accepted = self.client_completion_queue.get(_FUTURE)
+      self.assertIsNotNone(write_accepted)
+      self.assertIs(write_accepted.kind, _low.Event.Kind.WRITE_ACCEPTED)
+      self.assertIs(write_accepted.tag, write_tag)
+      self.assertIs(write_accepted.write_accepted, True)
+
+      server_call.read(read_tag)
+      read_accepted = self.server_completion_queue.get(_FUTURE)
+      self.assertIsNotNone(read_accepted)
+      self.assertEqual(_low.Event.Kind.READ_ACCEPTED, read_accepted.kind)
+      self.assertEqual(read_tag, read_accepted.tag)
+      self.assertIsNotNone(read_accepted.bytes)
+      server_data.append(read_accepted.bytes)
+
+      server_call.write(read_accepted.bytes, write_tag)
+      write_accepted = self.server_completion_queue.get(_FUTURE)
+      self.assertIsNotNone(write_accepted)
+      self.assertEqual(_low.Event.Kind.WRITE_ACCEPTED, write_accepted.kind)
+      self.assertEqual(write_tag, write_accepted.tag)
+      self.assertTrue(write_accepted.write_accepted)
+
+      client_call.read(read_tag)
+      read_accepted = self.client_completion_queue.get(_FUTURE)
+      self.assertIsNotNone(read_accepted)
+      self.assertEqual(_low.Event.Kind.READ_ACCEPTED, read_accepted.kind)
+      self.assertEqual(read_tag, read_accepted.tag)
+      self.assertIsNotNone(read_accepted.bytes)
+      client_data.append(read_accepted.bytes)
+
+    client_call.complete(complete_tag)
+    complete_accepted = self.client_completion_queue.get(_FUTURE)
+    self.assertIsNotNone(complete_accepted)
+    self.assertIs(complete_accepted.kind, _low.Event.Kind.COMPLETE_ACCEPTED)
+    self.assertIs(complete_accepted.tag, complete_tag)
+    self.assertIs(complete_accepted.complete_accepted, True)
+
+    server_call.read(read_tag)
+    read_accepted = self.server_completion_queue.get(_FUTURE)
+    self.assertIsNotNone(read_accepted)
+    self.assertEqual(_low.Event.Kind.READ_ACCEPTED, read_accepted.kind)
+    self.assertEqual(read_tag, read_accepted.tag)
+    self.assertIsNone(read_accepted.bytes)
+
+    server_call.status(_low.Status(_low.Code.OK, details), status_tag)
+    server_terminal_event_one = self.server_completion_queue.get(_FUTURE)
+    server_terminal_event_two = self.server_completion_queue.get(_FUTURE)
+    if server_terminal_event_one.kind == _low.Event.Kind.COMPLETE_ACCEPTED:
+      status_accepted = server_terminal_event_one
+      rpc_accepted = server_terminal_event_two
+    else:
+      status_accepted = server_terminal_event_two
+      rpc_accepted = server_terminal_event_one
+    self.assertIsNotNone(status_accepted)
+    self.assertIsNotNone(rpc_accepted)
+    self.assertEqual(_low.Event.Kind.COMPLETE_ACCEPTED, status_accepted.kind)
+    self.assertEqual(status_tag, status_accepted.tag)
+    self.assertTrue(status_accepted.complete_accepted)
+    self.assertEqual(_low.Event.Kind.FINISH, rpc_accepted.kind)
+    self.assertEqual(finish_tag, rpc_accepted.tag)
+    self.assertEqual(_low.Status(_low.Code.OK, ''), rpc_accepted.status)
+
+    client_call.read(read_tag)
+    client_terminal_event_one = self.client_completion_queue.get(_FUTURE)
+    client_terminal_event_two = self.client_completion_queue.get(_FUTURE)
+    if client_terminal_event_one.kind == _low.Event.Kind.READ_ACCEPTED:
+      read_accepted = client_terminal_event_one
+      finish_accepted = client_terminal_event_two
+    else:
+      read_accepted = client_terminal_event_two
+      finish_accepted = client_terminal_event_one
+    self.assertIsNotNone(read_accepted)
+    self.assertIsNotNone(finish_accepted)
+    self.assertEqual(_low.Event.Kind.READ_ACCEPTED, read_accepted.kind)
+    self.assertEqual(read_tag, read_accepted.tag)
+    self.assertIsNone(read_accepted.bytes)
+    self.assertEqual(_low.Event.Kind.FINISH, finish_accepted.kind)
+    self.assertEqual(finish_tag, finish_accepted.tag)
+    self.assertEqual(_low.Status(_low.Code.OK, details), finish_accepted.status)
+
+    server_timeout_none_event = self.server_completion_queue.get(0)
+    self.assertIsNone(server_timeout_none_event)
+    client_timeout_none_event = self.client_completion_queue.get(0)
+    self.assertIsNone(client_timeout_none_event)
+
+    self.assertSequenceEqual(test_data, server_data)
+    self.assertSequenceEqual(test_data, client_data)
+
+  def testNoEcho(self):
+    self._perform_echo_test(())
+
+  def testOneByteEcho(self):
+    self._perform_echo_test([b'\x07'])
+
+  def testOneManyByteEcho(self):
+    self._perform_echo_test([_BYTE_SEQUENCE])
+
+  def testManyOneByteEchoes(self):
+    self._perform_echo_test(_BYTE_SEQUENCE)
+
+  def testManyManyByteEchoes(self):
+    self._perform_echo_test(_BYTE_SEQUENCE_SEQUENCE)
+
+
+class CancellationTest(unittest.TestCase):
+
+  def setUp(self):
+    self.host = 'localhost'
+
+    self.server_completion_queue = _low.CompletionQueue()
+    self.server = _low.Server(self.server_completion_queue)
+    port = self.server.add_http2_addr('[::]:0')
+    self.server.start()
+
+    self.client_completion_queue = _low.CompletionQueue()
+    self.channel = _low.Channel('%s:%d' % (self.host, port))
+
+  def tearDown(self):
+    self.server.stop()
+    del self.server
+    self.server_completion_queue.stop()
+    self.client_completion_queue.stop()
+    while True:
+      event = self.server_completion_queue.get(0)
+      if event is not None and event.kind is _low.Event.Kind.STOP:
+        break
+    while True:
+      event = self.client_completion_queue.get(0)
+      if event is not None and event.kind is _low.Event.Kind.STOP:
+        break
+
+  def testCancellation(self):
+    method = 'test method'
+    deadline = _FUTURE
+    metadata_tag = object()
+    finish_tag = object()
+    write_tag = object()
+    service_tag = object()
+    read_tag = object()
+    test_data = _BYTE_SEQUENCE_SEQUENCE
+
+    server_data = []
+    client_data = []
+
+    client_call = _low.Call(self.channel, method, self.host, deadline)
+
+    client_call.invoke(self.client_completion_queue, metadata_tag, finish_tag)
+
+    self.server.service(service_tag)
+    service_accepted = self.server_completion_queue.get(_FUTURE)
+    server_call = service_accepted.service_acceptance.call
+
+    server_call.accept(self.server_completion_queue, finish_tag)
+    server_call.premetadata()
+
+    metadata_accepted = self.client_completion_queue.get(_FUTURE)
+    self.assertIsNotNone(metadata_accepted)
+
+    for datum in test_data:
+      client_call.write(datum, write_tag)
+      write_accepted = self.client_completion_queue.get(_FUTURE)
+
+      server_call.read(read_tag)
+      read_accepted = self.server_completion_queue.get(_FUTURE)
+      server_data.append(read_accepted.bytes)
+
+      server_call.write(read_accepted.bytes, write_tag)
+      write_accepted = self.server_completion_queue.get(_FUTURE)
+      self.assertIsNotNone(write_accepted)
+
+      client_call.read(read_tag)
+      read_accepted = self.client_completion_queue.get(_FUTURE)
+      client_data.append(read_accepted.bytes)
+
+    client_call.cancel()
+    # cancel() is idempotent.
+    client_call.cancel()
+    client_call.cancel()
+    client_call.cancel()
+
+    server_call.read(read_tag)
+
+    server_terminal_event_one = self.server_completion_queue.get(_FUTURE)
+    server_terminal_event_two = self.server_completion_queue.get(_FUTURE)
+    if server_terminal_event_one.kind == _low.Event.Kind.READ_ACCEPTED:
+      read_accepted = server_terminal_event_one
+      rpc_accepted = server_terminal_event_two
+    else:
+      read_accepted = server_terminal_event_two
+      rpc_accepted = server_terminal_event_one
+    self.assertIsNotNone(read_accepted)
+    self.assertIsNotNone(rpc_accepted)
+    self.assertEqual(_low.Event.Kind.READ_ACCEPTED, read_accepted.kind)
+    self.assertIsNone(read_accepted.bytes)
+    self.assertEqual(_low.Event.Kind.FINISH, rpc_accepted.kind)
+    self.assertEqual(_low.Status(_low.Code.CANCELLED, ''), rpc_accepted.status)
+
+    finish_event = self.client_completion_queue.get(_FUTURE)
+    self.assertEqual(_low.Event.Kind.FINISH, finish_event.kind)
+    self.assertEqual(_low.Status(_low.Code.CANCELLED, ''), finish_event.status)
+
+    server_timeout_none_event = self.server_completion_queue.get(0)
+    self.assertIsNone(server_timeout_none_event)
+    client_timeout_none_event = self.client_completion_queue.get(0)
+    self.assertIsNone(client_timeout_none_event)
+
+    self.assertSequenceEqual(test_data, server_data)
+    self.assertSequenceEqual(test_data, client_data)
+
+
+class ExpirationTest(unittest.TestCase):
+
+  @unittest.skip('TODO(nathaniel): Expiration test!')
+  def testExpiration(self):
+    pass
+
+
+if __name__ == '__main__':
+  unittest.main()
diff --git a/src/python/src/_adapter/_proto_scenarios.py b/src/python/src/_adapter/_proto_scenarios.py
new file mode 100644
index 0000000..c452fb5
--- /dev/null
+++ b/src/python/src/_adapter/_proto_scenarios.py
@@ -0,0 +1,261 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Test scenarios using protocol buffers."""
+
+import abc
+import threading
+
+from _junkdrawer import math_pb2
+
+
+class ProtoScenario(object):
+  """An RPC test scenario using protocol buffers."""
+  __metaclass__ = abc.ABCMeta
+
+  @abc.abstractmethod
+  def method(self):
+    """Access the test method name.
+
+    Returns:
+      The test method name.
+    """
+    raise NotImplementedError()
+
+  @abc.abstractmethod
+  def serialize_request(self, request):
+    """Serialize a request protocol buffer.
+
+    Args:
+      request: A request protocol buffer.
+
+    Returns:
+      The bytestring serialization of the given request protocol buffer.
+    """
+    raise NotImplementedError()
+
+  @abc.abstractmethod
+  def deserialize_request(self, request_bytestring):
+    """Deserialize a request protocol buffer.
+
+    Args:
+      request_bytestring: The bytestring serialization of a request protocol
+        buffer.
+
+    Returns:
+      The request protocol buffer deserialized from the given byte string.
+    """
+    raise NotImplementedError()
+
+  @abc.abstractmethod
+  def serialize_response(self, response):
+    """Serialize a response protocol buffer.
+
+    Args:
+      response: A response protocol buffer.
+
+    Returns:
+      The bytestring serialization of the given response protocol buffer.
+    """
+    raise NotImplementedError()
+
+  @abc.abstractmethod
+  def deserialize_response(self, response_bytestring):
+    """Deserialize a response protocol buffer.
+
+    Args:
+      response_bytestring: The bytestring serialization of a response protocol
+        buffer.
+
+    Returns:
+      The response protocol buffer deserialized from the given byte string.
+    """
+    raise NotImplementedError()
+
+  @abc.abstractmethod
+  def requests(self):
+    """Access the sequence of requests for this scenario.
+
+    Returns:
+      A sequence of request protocol buffers.
+    """
+    raise NotImplementedError()
+
+  @abc.abstractmethod
+  def response_for_request(self, request):
+    """Access the response for a particular request.
+
+    Args:
+      request: A request protocol buffer.
+
+    Returns:
+      The response protocol buffer appropriate for the given request.
+    """
+    raise NotImplementedError()
+
+  @abc.abstractmethod
+  def verify_requests(self, experimental_requests):
+    """Verify the requests transmitted through the system under test.
+
+    Args:
+      experimental_requests: The request protocol buffers transmitted through
+        the system under test.
+
+    Returns:
+      True if the requests satisfy this test scenario; False otherwise.
+    """
+    raise NotImplementedError()
+
+  @abc.abstractmethod
+  def verify_responses(self, experimental_responses):
+    """Verify the responses transmitted through the system under test.
+
+    Args:
+      experimental_responses: The response protocol buffers transmitted through
+        the system under test.
+
+    Returns:
+      True if the responses satisfy this test scenario; False otherwise.
+    """
+    raise NotImplementedError()
+
+
+class EmptyScenario(ProtoScenario):
+  """A scenario that transmits no protocol buffers in either direction."""
+
+  def method(self):
+    return 'DivMany'
+
+  def serialize_request(self, request):
+    raise ValueError('This should not be necessary to call!')
+
+  def deserialize_request(self, request_bytestring):
+    raise ValueError('This should not be necessary to call!')
+
+  def serialize_response(self, response):
+    raise ValueError('This should not be necessary to call!')
+
+  def deserialize_response(self, response_bytestring):
+    raise ValueError('This should not be necessary to call!')
+
+  def requests(self):
+    return ()
+
+  def response_for_request(self, request):
+    raise ValueError('This should not be necessary to call!')
+
+  def verify_requests(self, experimental_requests):
+    return not experimental_requests
+
+  def verify_responses(self, experimental_responses):
+    return not experimental_responses
+
+
+class BidirectionallyUnaryScenario(ProtoScenario):
+  """A scenario that transmits no protocol buffers in either direction."""
+
+  _DIVIDEND = 59
+  _DIVISOR = 7
+  _QUOTIENT = 8
+  _REMAINDER = 3
+
+  _REQUEST = math_pb2.DivArgs(dividend=_DIVIDEND, divisor=_DIVISOR)
+  _RESPONSE = math_pb2.DivReply(quotient=_QUOTIENT, remainder=_REMAINDER)
+
+  def method(self):
+    return 'Div'
+
+  def serialize_request(self, request):
+    return request.SerializeToString()
+
+  def deserialize_request(self, request_bytestring):
+    return math_pb2.DivArgs.FromString(request_bytestring)
+
+  def serialize_response(self, response):
+    return response.SerializeToString()
+
+  def deserialize_response(self, response_bytestring):
+    return math_pb2.DivReply.FromString(response_bytestring)
+
+  def requests(self):
+    return [self._REQUEST]
+
+  def response_for_request(self, request):
+    return self._RESPONSE
+
+  def verify_requests(self, experimental_requests):
+    return tuple(experimental_requests) == (self._REQUEST,)
+
+  def verify_responses(self, experimental_responses):
+    return tuple(experimental_responses) == (self._RESPONSE,)
+
+
+class BidirectionallyStreamingScenario(ProtoScenario):
+  """A scenario that transmits no protocol buffers in either direction."""
+
+  _STREAM_LENGTH = 200
+  _REQUESTS = tuple(
+      math_pb2.DivArgs(dividend=59 + index, divisor=7 + index)
+      for index in range(_STREAM_LENGTH))
+
+  def __init__(self):
+    self._lock = threading.Lock()
+    self._responses = []
+
+  def method(self):
+    return 'DivMany'
+
+  def serialize_request(self, request):
+    return request.SerializeToString()
+
+  def deserialize_request(self, request_bytestring):
+    return math_pb2.DivArgs.FromString(request_bytestring)
+
+  def serialize_response(self, response):
+    return response.SerializeToString()
+
+  def deserialize_response(self, response_bytestring):
+    return math_pb2.DivReply.FromString(response_bytestring)
+
+  def requests(self):
+    return self._REQUESTS
+
+  def response_for_request(self, request):
+    quotient, remainder = divmod(request.dividend, request.divisor)
+    response = math_pb2.DivReply(quotient=quotient, remainder=remainder)
+    with self._lock:
+      self._responses.append(response)
+    return response
+
+  def verify_requests(self, experimental_requests):
+    return tuple(experimental_requests) == self._REQUESTS
+
+  def verify_responses(self, experimental_responses):
+    with self._lock:
+      return tuple(experimental_responses) == tuple(self._responses)
diff --git a/src/python/src/_adapter/_server.c b/src/python/src/_adapter/_server.c
new file mode 100644
index 0000000..a40d32f
--- /dev/null
+++ b/src/python/src/_adapter/_server.c
@@ -0,0 +1,167 @@
+/*
+ *
+ * Copyright 2015, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "_adapter/_server.h"
+
+#include <Python.h>
+#include <grpc/grpc.h>
+
+#include "_adapter/_completion_queue.h"
+#include "_adapter/_error.h"
+
+static int pygrpc_server_init(Server *self, PyObject *args, PyObject *kwds) {
+  const PyObject *completion_queue;
+  if (!(PyArg_ParseTuple(args, "O!", &pygrpc_CompletionQueueType,
+                         &completion_queue))) {
+    self->c_server = NULL;
+    return -1;
+  }
+
+  self->c_server = grpc_server_create(
+      ((CompletionQueue *)completion_queue)->c_completion_queue, NULL);
+  return 0;
+}
+
+static void pygrpc_server_dealloc(Server *self) {
+  if (self->c_server != NULL) {
+    grpc_server_destroy(self->c_server);
+  }
+  self->ob_type->tp_free((PyObject *)self);
+}
+
+static PyObject *pygrpc_server_add_http2_addr(Server *self, PyObject *args) {
+  const char *addr;
+  int port;
+  PyArg_ParseTuple(args, "s", &addr);
+
+  port = grpc_server_add_http2_port(self->c_server, addr);
+  if (port == 0) {
+    PyErr_SetString(PyExc_RuntimeError, "Couldn't add port to server!");
+    return NULL;
+  }
+
+  return PyInt_FromLong(port);
+}
+
+static PyObject *pygrpc_server_start(Server *self) {
+  grpc_server_start(self->c_server);
+
+  Py_RETURN_NONE;
+}
+
+static const PyObject *pygrpc_server_service(Server *self, PyObject *args) {
+  const PyObject *tag;
+  grpc_call_error call_error;
+  const PyObject *result;
+
+  if (!(PyArg_ParseTuple(args, "O", &tag))) {
+    return NULL;
+  }
+
+  call_error = grpc_server_request_call(self->c_server, (void *)tag);
+
+  result = pygrpc_translate_call_error(call_error);
+  if (result != NULL) {
+    Py_INCREF(tag);
+  }
+  return result;
+}
+
+static PyObject *pygrpc_server_stop(Server *self) {
+  grpc_server_shutdown(self->c_server);
+
+  Py_RETURN_NONE;
+}
+
+static PyMethodDef methods[] = {
+    {"add_http2_addr", (PyCFunction)pygrpc_server_add_http2_addr, METH_VARARGS,
+     "Add an HTTP2 address."},
+    {"start", (PyCFunction)pygrpc_server_start, METH_NOARGS,
+     "Starts the server."},
+    {"service", (PyCFunction)pygrpc_server_service, METH_VARARGS,
+     "Services a call."},
+    {"stop", (PyCFunction)pygrpc_server_stop, METH_NOARGS, "Stops the server."},
+    {NULL}};
+
+static PyTypeObject pygrpc_ServerType = {
+    PyObject_HEAD_INIT(NULL)0,         /*ob_size*/
+    "_gprc.Server",                    /*tp_name*/
+    sizeof(Server),                    /*tp_basicsize*/
+    0,                                 /*tp_itemsize*/
+    (destructor)pygrpc_server_dealloc, /*tp_dealloc*/
+    0,                                 /*tp_print*/
+    0,                                 /*tp_getattr*/
+    0,                                 /*tp_setattr*/
+    0,                                 /*tp_compare*/
+    0,                                 /*tp_repr*/
+    0,                                 /*tp_as_number*/
+    0,                                 /*tp_as_sequence*/
+    0,                                 /*tp_as_mapping*/
+    0,                                 /*tp_hash */
+    0,                                 /*tp_call*/
+    0,                                 /*tp_str*/
+    0,                                 /*tp_getattro*/
+    0,                                 /*tp_setattro*/
+    0,                                 /*tp_as_buffer*/
+    Py_TPFLAGS_DEFAULT,                /*tp_flags*/
+    "Wrapping of grpc_server.",        /* tp_doc */
+    0,                                 /* tp_traverse */
+    0,                                 /* tp_clear */
+    0,                                 /* tp_richcompare */
+    0,                                 /* tp_weaklistoffset */
+    0,                                 /* tp_iter */
+    0,                                 /* tp_iternext */
+    methods,                           /* tp_methods */
+    0,                                 /* tp_members */
+    0,                                 /* tp_getset */
+    0,                                 /* tp_base */
+    0,                                 /* tp_dict */
+    0,                                 /* tp_descr_get */
+    0,                                 /* tp_descr_set */
+    0,                                 /* tp_dictoffset */
+    (initproc)pygrpc_server_init,      /* tp_init */
+};
+
+int pygrpc_add_server(PyObject *module) {
+  pygrpc_ServerType.tp_new = PyType_GenericNew;
+  if (PyType_Ready(&pygrpc_ServerType) < 0) {
+    PyErr_SetString(PyExc_RuntimeError, "Error defining pygrpc_ServerType!");
+    return -1;
+  }
+  if (PyModule_AddObject(module, "Server", (PyObject *)&pygrpc_ServerType) ==
+      -1) {
+    PyErr_SetString(PyExc_ImportError, "Couldn't add Server type to module!");
+    return -1;
+  }
+  return 0;
+}
diff --git a/src/python/src/_adapter/_server.h b/src/python/src/_adapter/_server.h
new file mode 100644
index 0000000..0c517e3
--- /dev/null
+++ b/src/python/src/_adapter/_server.h
@@ -0,0 +1,44 @@
+/*
+ *
+ * Copyright 2015, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef _ADAPTER__SERVER_H_
+#define _ADAPTER__SERVER_H_
+
+#include <Python.h>
+#include <grpc/grpc.h>
+
+typedef struct { PyObject_HEAD grpc_server *c_server; } Server;
+
+int pygrpc_add_server(PyObject *module);
+
+#endif /* _ADAPTER__SERVER_H_ */
diff --git a/src/python/src/_adapter/_test_links.py b/src/python/src/_adapter/_test_links.py
new file mode 100644
index 0000000..77d1b00
--- /dev/null
+++ b/src/python/src/_adapter/_test_links.py
@@ -0,0 +1,80 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Links suitable for use in tests."""
+
+import threading
+
+from _framework.base.packets import interfaces
+
+
+class ForeLink(interfaces.ForeLink):
+  """A ForeLink suitable for use in tests of RearLinks."""
+
+  def __init__(self, action, rear_link):
+    self.condition = threading.Condition()
+    self.tickets = []
+    self.action = action
+    self.rear_link = rear_link
+
+  def accept_back_to_front_ticket(self, ticket):
+    with self.condition:
+      self.tickets.append(ticket)
+      self.condition.notify_all()
+      action, rear_link = self.action, self.rear_link
+
+    if action is not None:
+      action(ticket, rear_link)
+
+  def join_rear_link(self, rear_link):
+    with self.condition:
+      self.rear_link = rear_link
+
+
+class RearLink(interfaces.RearLink):
+  """A RearLink suitable for use in tests of ForeLinks."""
+
+  def __init__(self, action, fore_link):
+    self.condition = threading.Condition()
+    self.tickets = []
+    self.action = action
+    self.fore_link = fore_link
+
+  def accept_front_to_back_ticket(self, ticket):
+    with self.condition:
+      self.tickets.append(ticket)
+      self.condition.notify_all()
+      action, fore_link = self.action, self.fore_link
+
+    if action is not None:
+      action(ticket, fore_link)
+
+  def join_fore_link(self, fore_link):
+    with self.condition:
+      self.fore_link = fore_link
diff --git a/src/python/src/_adapter/fore.py b/src/python/src/_adapter/fore.py
new file mode 100644
index 0000000..c307e7c
--- /dev/null
+++ b/src/python/src/_adapter/fore.py
@@ -0,0 +1,310 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""The RPC-service-side bridge between RPC Framework and GRPC-on-the-wire."""
+
+import enum
+import logging
+import threading
+import time
+
+from _adapter import _common
+from _adapter import _low
+from _framework.base import interfaces
+from _framework.base.packets import interfaces as ticket_interfaces
+from _framework.base.packets import null
+from _framework.base.packets import packets as tickets
+
+
+@enum.unique
+class _LowWrite(enum.Enum):
+  """The possible categories of low-level write state."""
+
+  OPEN = 'OPEN'
+  ACTIVE = 'ACTIVE'
+  CLOSED = 'CLOSED'
+
+
+def _write(call, rpc_state, payload):
+  serialized_payload = rpc_state.serializer(payload)
+  if rpc_state.write.low is _LowWrite.OPEN:
+    call.write(serialized_payload, call)
+    rpc_state.write.low = _LowWrite.ACTIVE
+  else:
+    rpc_state.write.pending.append(serialized_payload)
+
+
+def _status(call, rpc_state):
+  call.status(_low.Status(_low.Code.OK, ''), call)
+  rpc_state.write.low = _LowWrite.CLOSED
+
+
+class ForeLink(ticket_interfaces.ForeLink):
+  """A service-side bridge between RPC Framework and the C-ish _low code."""
+
+  def __init__(
+      self, pool, request_deserializers, response_serializers, port=None):
+    """Constructor.
+
+    Args:
+      pool: A thread pool.
+      request_deserializers: A dict from RPC method names to request object
+        deserializer behaviors.
+      response_serializers: A dict from RPC method names to response object
+        serializer behaviors.
+      port: The port on which to serve, or None to have a port selected
+        automatically.
+    """
+    self._condition = threading.Condition()
+    self._pool = pool
+    self._request_deserializers = request_deserializers
+    self._response_serializers = response_serializers
+    self._port = port
+
+    self._rear_link = null.NULL_REAR_LINK
+    self._completion_queue = None
+    self._server = None
+    self._rpc_states = {}
+    self._spinning = False
+
+  def _on_stop_event(self):
+    self._spinning = False
+    self._condition.notify_all()
+
+  def _on_service_acceptance_event(self, event, server):
+    """Handle a service invocation event."""
+    service_acceptance = event.service_acceptance
+    if service_acceptance is None:
+      return
+
+    call = service_acceptance.call
+    call.accept(self._completion_queue, call)
+    # TODO(nathaniel): Metadata support.
+    call.premetadata()
+    call.read(call)
+    method = service_acceptance.method
+
+    self._rpc_states[call] = _common.CommonRPCState(
+        _common.WriteState(_LowWrite.OPEN, _common.HighWrite.OPEN, []), 1,
+        self._request_deserializers[method],
+        self._response_serializers[method])
+
+    ticket = tickets.FrontToBackPacket(
+        call, 0, tickets.Kind.COMMENCEMENT, method,
+        interfaces.ServicedSubscription.Kind.FULL, None, None,
+        service_acceptance.deadline - time.time())
+    self._rear_link.accept_front_to_back_ticket(ticket)
+
+    server.service(None)
+
+  def _on_read_event(self, event):
+    """Handle data arriving during an RPC."""
+    call = event.tag
+    rpc_state = self._rpc_states.get(call, None)
+    if rpc_state is None:
+      return
+
+    sequence_number = rpc_state.sequence_number
+    rpc_state.sequence_number += 1
+    if event.bytes is None:
+      ticket = tickets.FrontToBackPacket(
+          call, sequence_number, tickets.Kind.COMPLETION, None, None, None,
+          None, None)
+    else:
+      call.read(call)
+      ticket = tickets.FrontToBackPacket(
+          call, sequence_number, tickets.Kind.CONTINUATION, None, None, None,
+          rpc_state.deserializer(event.bytes), None)
+
+    self._rear_link.accept_front_to_back_ticket(ticket)
+
+  def _on_write_event(self, event):
+    call = event.tag
+    rpc_state = self._rpc_states.get(call, None)
+    if rpc_state is None:
+      return
+
+    if rpc_state.write.pending:
+      serialized_payload = rpc_state.write.pending.pop(0)
+      call.write(serialized_payload, call)
+    elif rpc_state.write.high is _common.HighWrite.CLOSED:
+      _status(call, rpc_state)
+    else:
+      rpc_state.write.low = _LowWrite.OPEN
+
+  def _on_complete_event(self, event):
+    if not event.complete_accepted:
+      logging.error('Complete not accepted! %s', (event,))
+      call = event.tag
+      rpc_state = self._rpc_states.pop(call, None)
+      if rpc_state is None:
+        return
+
+      sequence_number = rpc_state.sequence_number
+      rpc_state.sequence_number += 1
+      ticket = tickets.FrontToBackPacket(
+          call, sequence_number, tickets.Kind.TRANSMISSION_FAILURE, None, None,
+          None, None, None)
+      self._rear_link.accept_front_to_back_ticket(ticket)
+
+  def _on_finish_event(self, event):
+    """Handle termination of an RPC."""
+    call = event.tag
+    rpc_state = self._rpc_states.pop(call, None)
+    if rpc_state is None:
+      return
+
+    code = event.status.code
+    if code is _low.Code.OK:
+      return
+
+    sequence_number = rpc_state.sequence_number
+    rpc_state.sequence_number += 1
+    if code is _low.Code.CANCELLED:
+      ticket = tickets.FrontToBackPacket(
+          call, sequence_number, tickets.Kind.CANCELLATION, None, None, None,
+          None, None)
+    elif code is _low.Code.EXPIRED:
+      ticket = tickets.FrontToBackPacket(
+          call, sequence_number, tickets.Kind.EXPIRATION, None, None, None,
+          None, None)
+    else:
+      # TODO(nathaniel): Better mapping of codes to ticket-categories
+      ticket = tickets.FrontToBackPacket(
+          call, sequence_number, tickets.Kind.TRANSMISSION_FAILURE, None, None,
+          None, None, None)
+    self._rear_link.accept_front_to_back_ticket(ticket)
+
+  def _spin(self, completion_queue, server):
+    while True:
+      event = completion_queue.get(None)
+
+      with self._condition:
+        if event.kind is _low.Event.Kind.STOP:
+          self._on_stop_event()
+          return
+        elif self._server is None:
+          continue
+        elif event.kind is _low.Event.Kind.SERVICE_ACCEPTED:
+          self._on_service_acceptance_event(event, server)
+        elif event.kind is _low.Event.Kind.READ_ACCEPTED:
+          self._on_read_event(event)
+        elif event.kind is _low.Event.Kind.WRITE_ACCEPTED:
+          self._on_write_event(event)
+        elif event.kind is _low.Event.Kind.COMPLETE_ACCEPTED:
+          self._on_complete_event(event)
+        elif event.kind is _low.Event.Kind.FINISH:
+          self._on_finish_event(event)
+        else:
+          logging.error('Illegal event! %s', (event,))
+
+  def _continue(self, call, payload):
+    rpc_state = self._rpc_states.get(call, None)
+    if rpc_state is None:
+      return
+
+    _write(call, rpc_state, payload)
+
+  def _complete(self, call, payload):
+    """Handle completion of the writes of an RPC."""
+    rpc_state = self._rpc_states.get(call, None)
+    if rpc_state is None:
+      return
+
+    if rpc_state.write.low is _LowWrite.OPEN:
+      if payload is None:
+        _status(call, rpc_state)
+      else:
+        _write(call, rpc_state, payload)
+    elif rpc_state.write.low is _LowWrite.ACTIVE:
+      if payload is not None:
+        rpc_state.write.pending.append(rpc_state.serializer(payload))
+    else:
+      raise ValueError('Called to complete after having already completed!')
+    rpc_state.write.high = _common.HighWrite.CLOSED
+
+  def _cancel(self, call):
+    call.cancel()
+    self._rpc_states.pop(call, None)
+
+  def join_rear_link(self, rear_link):
+    """See ticket_interfaces.ForeLink.join_rear_link for specification."""
+    self._rear_link = null.NULL_REAR_LINK if rear_link is None else rear_link
+
+  def start(self):
+    """Starts this ForeLink.
+
+    This method must be called before attempting to exchange tickets with this
+    object.
+    """
+    with self._condition:
+      self._completion_queue = _low.CompletionQueue()
+      self._server = _low.Server(self._completion_queue)
+      port = self._server.add_http2_addr(
+          '[::]:%d' % (0 if self._port is None else self._port))
+      self._server.start()
+
+      self._server.service(None)
+
+      self._pool.submit(self._spin, self._completion_queue, self._server)
+      self._spinning = True
+
+      return port
+
+  # TODO(nathaniel): Expose graceful-shutdown semantics in which this object
+  # enters a state in which it finishes ongoing RPCs but refuses new ones.
+  def stop(self):
+    """Stops this ForeLink.
+
+    This method must be called for proper termination of this object, and no
+    attempts to exchange tickets with this object may be made after this method
+    has been called.
+    """
+    with self._condition:
+      self._server.stop()
+      # TODO(b/18904187): Yep, this is weird. Deleting a server shouldn't have a
+      # behaviorally significant side-effect.
+      self._server = None
+      self._completion_queue.stop()
+
+      while self._spinning:
+        self._condition.wait()
+
+  def accept_back_to_front_ticket(self, ticket):
+    """See ticket_interfaces.ForeLink.accept_back_to_front_ticket for spec."""
+    with self._condition:
+      if self._server is None:
+        return
+
+      if ticket.kind is tickets.Kind.CONTINUATION:
+        self._continue(ticket.operation_id, ticket.payload)
+      elif ticket.kind is tickets.Kind.COMPLETION:
+        self._complete(ticket.operation_id, ticket.payload)
+      else:
+        self._cancel(ticket.operation_id)
diff --git a/src/python/src/_adapter/rear.py b/src/python/src/_adapter/rear.py
new file mode 100644
index 0000000..5e0975a
--- /dev/null
+++ b/src/python/src/_adapter/rear.py
@@ -0,0 +1,344 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""The RPC-invocation-side bridge between RPC Framework and GRPC-on-the-wire."""
+
+import enum
+import logging
+import threading
+import time
+
+from _adapter import _common
+from _adapter import _low
+from _framework.base.packets import interfaces as ticket_interfaces
+from _framework.base.packets import null
+from _framework.base.packets import packets as tickets
+
+_INVOCATION_EVENT_KINDS = (
+    _low.Event.Kind.METADATA_ACCEPTED,
+    _low.Event.Kind.FINISH
+)
+
+
+@enum.unique
+class _LowWrite(enum.Enum):
+  """The possible categories of low-level write state."""
+
+  OPEN = 'OPEN'
+  ACTIVE = 'ACTIVE'
+  CLOSED = 'CLOSED'
+
+
+class _RPCState(object):
+  """The full state of any tracked RPC.
+
+  Attributes:
+    call: The _low.Call object for the RPC.
+    outstanding: The set of Event.Kind values describing expected future events
+      for the RPC.
+    active: A boolean indicating whether or not the RPC is active.
+    common: An _common.RPCState describing additional state for the RPC.
+  """
+
+  def __init__(self, call, outstanding, active, common):
+    self.call = call
+    self.outstanding = outstanding
+    self.active = active
+    self.common = common
+
+
+def _write(operation_id, call, outstanding, write_state, serialized_payload):
+  if write_state.low is _LowWrite.OPEN:
+    call.write(serialized_payload, operation_id)
+    outstanding.add(_low.Event.Kind.WRITE_ACCEPTED)
+    write_state.low = _LowWrite.ACTIVE
+  elif write_state.low is _LowWrite.ACTIVE:
+    write_state.pending.append(serialized_payload)
+  else:
+    raise ValueError('Write attempted after writes completed!')
+
+
+class RearLink(ticket_interfaces.RearLink):
+  """An invocation-side bridge between RPC Framework and the C-ish _low code."""
+
+  def __init__(
+      self, host, port, pool, request_serializers, response_deserializers):
+    """Constructor.
+
+    Args:
+      host: The host to which to connect for RPC service.
+      port: The port to which to connect for RPC service.
+      pool: A thread pool.
+      request_serializers: A dict from RPC method names to request object
+        serializer behaviors.
+      response_deserializers: A dict from RPC method names to response object
+        deserializer behaviors.
+    """
+    self._condition = threading.Condition()
+    self._host = host
+    self._port = port
+    self._pool = pool
+    self._request_serializers = request_serializers
+    self._response_deserializers = response_deserializers
+
+    self._fore_link = null.NULL_FORE_LINK
+    self._completion_queue = None
+    self._channel = None
+    self._rpc_states = {}
+    self._spinning = False
+
+  def _on_write_event(self, operation_id, event, rpc_state):
+    if event.write_accepted:
+      if rpc_state.common.write.pending:
+        rpc_state.call.write(
+            rpc_state.common.write.pending.pop(0), operation_id)
+        rpc_state.outstanding.add(_low.Event.Kind.WRITE_ACCEPTED)
+      elif rpc_state.common.write.high is _common.HighWrite.CLOSED:
+        rpc_state.call.complete(operation_id)
+        rpc_state.outstanding.add(_low.Event.Kind.COMPLETE_ACCEPTED)
+        rpc_state.common.write.low = _LowWrite.CLOSED
+      else:
+        rpc_state.common.write.low = _LowWrite.OPEN
+    else:
+      logging.error('RPC write not accepted! Event: %s', (event,))
+      rpc_state.active = False
+      ticket = tickets.BackToFrontPacket(
+          operation_id, rpc_state.common.sequence_number,
+          tickets.Kind.TRANSMISSION_FAILURE, None)
+      rpc_state.common.sequence_number += 1
+      self._fore_link.accept_back_to_front_ticket(ticket)
+
+  def _on_read_event(self, operation_id, event, rpc_state):
+    if event.bytes is not None:
+      rpc_state.call.read(operation_id)
+      rpc_state.outstanding.add(_low.Event.Kind.READ_ACCEPTED)
+
+      ticket = tickets.BackToFrontPacket(
+          operation_id, rpc_state.common.sequence_number,
+          tickets.Kind.CONTINUATION, rpc_state.common.deserializer(event.bytes))
+      rpc_state.common.sequence_number += 1
+      self._fore_link.accept_back_to_front_ticket(ticket)
+
+  def _on_complete_event(self, operation_id, event, rpc_state):
+    if not event.complete_accepted:
+      logging.error('RPC complete not accepted! Event: %s', (event,))
+      rpc_state.active = False
+      ticket = tickets.BackToFrontPacket(
+          operation_id, rpc_state.common.sequence_number,
+          tickets.Kind.TRANSMISSION_FAILURE, None)
+      rpc_state.common.sequence_number += 1
+      self._fore_link.accept_back_to_front_ticket(ticket)
+
+  # TODO(nathaniel): Metadata support.
+  def _on_metadata_event(self, operation_id, event, rpc_state):  # pylint: disable=unused-argument
+    rpc_state.call.read(operation_id)
+    rpc_state.outstanding.add(_low.Event.Kind.READ_ACCEPTED)
+
+  def _on_finish_event(self, operation_id, event, rpc_state):
+    """Handle termination of an RPC."""
+    # TODO(nathaniel): Cover all statuses.
+    if event.status.code is _low.Code.OK:
+      category = tickets.Kind.COMPLETION
+    elif event.status.code is _low.Code.CANCELLED:
+      category = tickets.Kind.CANCELLATION
+    elif event.status.code is _low.Code.EXPIRED:
+      category = tickets.Kind.EXPIRATION
+    else:
+      category = tickets.Kind.TRANSMISSION_FAILURE
+    ticket = tickets.BackToFrontPacket(
+        operation_id, rpc_state.common.sequence_number, category,
+        None)
+    rpc_state.common.sequence_number += 1
+    self._fore_link.accept_back_to_front_ticket(ticket)
+
+  def _spin(self, completion_queue):
+    while True:
+      event = completion_queue.get(None)
+      operation_id = event.tag
+
+      with self._condition:
+        rpc_state = self._rpc_states[operation_id]
+        rpc_state.outstanding.remove(event.kind)
+        if rpc_state.active and self._completion_queue is not None:
+          if event.kind is _low.Event.Kind.WRITE_ACCEPTED:
+            self._on_write_event(operation_id, event, rpc_state)
+          elif event.kind is _low.Event.Kind.METADATA_ACCEPTED:
+            self._on_metadata_event(operation_id, event, rpc_state)
+          elif event.kind is _low.Event.Kind.READ_ACCEPTED:
+            self._on_read_event(operation_id, event, rpc_state)
+          elif event.kind is _low.Event.Kind.COMPLETE_ACCEPTED:
+            self._on_complete_event(operation_id, event, rpc_state)
+          elif event.kind is _low.Event.Kind.FINISH:
+            self._on_finish_event(operation_id, event, rpc_state)
+          else:
+            logging.error('Illegal RPC event! %s', (event,))
+
+        if not rpc_state.outstanding:
+          self._rpc_states.pop(operation_id)
+        if not self._rpc_states:
+          self._spinning = False
+          self._condition.notify_all()
+          return
+
+  def _invoke(self, operation_id, name, high_state, payload, timeout):
+    """Invoke an RPC.
+
+    Args:
+      operation_id: Any object to be used as an operation ID for the RPC.
+      name: The RPC method name.
+      high_state: A _common.HighWrite value representing the "high write state"
+        of the RPC.
+      payload: A payload object for the RPC or None if no payload was given at
+        invocation-time.
+      timeout: A duration of time in seconds to allow for the RPC.
+    """
+    request_serializer = self._request_serializers[name]
+    call = _low.Call(self._channel, name, self._host, time.time() + timeout)
+    call.invoke(self._completion_queue, operation_id, operation_id)
+    outstanding = set(_INVOCATION_EVENT_KINDS)
+
+    if payload is None:
+      if high_state is _common.HighWrite.CLOSED:
+        call.complete(operation_id)
+        low_state = _LowWrite.CLOSED
+        outstanding.add(_low.Event.Kind.COMPLETE_ACCEPTED)
+      else:
+        low_state = _LowWrite.OPEN
+    else:
+      serialized_payload = request_serializer(payload)
+      call.write(serialized_payload, operation_id)
+      outstanding.add(_low.Event.Kind.WRITE_ACCEPTED)
+      low_state = _LowWrite.ACTIVE
+
+    write_state = _common.WriteState(low_state, high_state, [])
+    common_state = _common.CommonRPCState(
+        write_state, 0, self._response_deserializers[name], request_serializer)
+    self._rpc_states[operation_id] = _RPCState(
+        call, outstanding, True, common_state)
+
+    if not self._spinning:
+      self._pool.submit(self._spin, self._completion_queue)
+      self._spinning = True
+
+  def _commence(self, operation_id, name, payload, timeout):
+    self._invoke(operation_id, name, _common.HighWrite.OPEN, payload, timeout)
+
+  def _continue(self, operation_id, payload):
+    rpc_state = self._rpc_states.get(operation_id, None)
+    if rpc_state is None or not rpc_state.active:
+      return
+
+    _write(
+        operation_id, rpc_state.call, rpc_state.outstanding,
+        rpc_state.common.write, rpc_state.common.serializer(payload))
+
+  def _complete(self, operation_id, payload):
+    """Close writes associated with an ongoing RPC.
+
+    Args:
+      operation_id: Any object being use as an operation ID for the RPC.
+      payload: A payload object for the RPC (and thus the last payload object
+        for the RPC) or None if no payload was given along with the instruction
+        to indicate the end of writes for the RPC.
+    """
+    rpc_state = self._rpc_states.get(operation_id, None)
+    if rpc_state is None or not rpc_state.active:
+      return
+
+    write_state = rpc_state.common.write
+    if payload is None:
+      if write_state.low is _LowWrite.OPEN:
+        rpc_state.call.complete(operation_id)
+        rpc_state.outstanding.add(_low.Event.Kind.COMPLETE_ACCEPTED)
+        write_state.low = _LowWrite.CLOSED
+    else:
+      _write(
+          operation_id, rpc_state.call, rpc_state.outstanding, write_state,
+          rpc_state.common.serializer(payload))
+    write_state.high = _common.HighWrite.CLOSED
+
+  def _entire(self, operation_id, name, payload, timeout):
+    self._invoke(operation_id, name, _common.HighWrite.CLOSED, payload, timeout)
+
+  def _cancel(self, operation_id):
+    rpc_state = self._rpc_states.get(operation_id, None)
+    if rpc_state is not None and rpc_state.active:
+      rpc_state.call.cancel()
+      rpc_state.active = False
+
+  def join_fore_link(self, fore_link):
+    """See ticket_interfaces.RearLink.join_fore_link for specification."""
+    with self._condition:
+      self._fore_link = null.NULL_FORE_LINK if fore_link is None else fore_link
+
+  def start(self):
+    """Starts this RearLink.
+
+    This method must be called before attempting to exchange tickets with this
+    object.
+    """
+    with self._condition:
+      self._completion_queue = _low.CompletionQueue()
+      self._channel = _low.Channel('%s:%d' % (self._host, self._port))
+
+  def stop(self):
+    """Stops this RearLink.
+
+    This method must be called for proper termination of this object, and no
+    attempts to exchange tickets with this object may be made after this method
+    has been called.
+    """
+    with self._condition:
+      self._completion_queue.stop()
+      self._completion_queue = None
+
+      while self._spinning:
+        self._condition.wait()
+
+  def accept_front_to_back_ticket(self, ticket):
+    """See ticket_interfaces.RearLink.accept_front_to_back_ticket for spec."""
+    with self._condition:
+      if self._completion_queue is None:
+        return
+
+      if ticket.kind is tickets.Kind.COMMENCEMENT:
+        self._commence(
+            ticket.operation_id, ticket.name, ticket.payload, ticket.timeout)
+      elif ticket.kind is tickets.Kind.CONTINUATION:
+        self._continue(ticket.operation_id, ticket.payload)
+      elif ticket.kind is tickets.Kind.COMPLETION:
+        self._complete(ticket.operation_id, ticket.payload)
+      elif ticket.kind is tickets.Kind.ENTIRE:
+        self._entire(
+            ticket.operation_id, ticket.name, ticket.payload, ticket.timeout)
+      elif ticket.kind is tickets.Kind.CANCELLATION:
+        self._cancel(ticket.operation_id)
+      else:
+        # NOTE(nathaniel): All other categories are treated as cancellation.
+        self._cancel(ticket.operation_id)
diff --git a/src/python/_framework/__init__.py b/src/python/src/_framework/__init__.py
similarity index 100%
rename from src/python/_framework/__init__.py
rename to src/python/src/_framework/__init__.py
diff --git a/src/python/_framework/base/__init__.py b/src/python/src/_framework/base/__init__.py
similarity index 100%
rename from src/python/_framework/base/__init__.py
rename to src/python/src/_framework/base/__init__.py
diff --git a/src/python/_framework/base/exceptions.py b/src/python/src/_framework/base/exceptions.py
similarity index 100%
rename from src/python/_framework/base/exceptions.py
rename to src/python/src/_framework/base/exceptions.py
diff --git a/src/python/_framework/base/interfaces.py b/src/python/src/_framework/base/interfaces.py
similarity index 86%
rename from src/python/_framework/base/interfaces.py
rename to src/python/src/_framework/base/interfaces.py
index de7137c..70030e5 100644
--- a/src/python/_framework/base/interfaces.py
+++ b/src/python/src/_framework/base/interfaces.py
@@ -29,27 +29,24 @@
 
 """Interfaces defined and used by the base layer of RPC Framework."""
 
-# TODO(nathaniel): Use Python's new enum library for enumerated types rather
-# than constants merely placed close together.
-
 import abc
+import enum
 
 # stream is referenced from specification in this module.
 from _framework.foundation import stream  # pylint: disable=unused-import
 
-# Operation outcomes.
-COMPLETED = 'completed'
-CANCELLED = 'cancelled'
-EXPIRED = 'expired'
-RECEPTION_FAILURE = 'reception failure'
-TRANSMISSION_FAILURE = 'transmission failure'
-SERVICER_FAILURE = 'servicer failure'
-SERVICED_FAILURE = 'serviced failure'
 
-# Subscription categories.
-FULL = 'full'
-TERMINATION_ONLY = 'termination only'
-NONE = 'none'
+@enum.unique
+class Outcome(enum.Enum):
+  """Operation outcomes."""
+
+  COMPLETED = 'completed'
+  CANCELLED = 'cancelled'
+  EXPIRED = 'expired'
+  RECEPTION_FAILURE = 'reception failure'
+  TRANSMISSION_FAILURE = 'transmission failure'
+  SERVICER_FAILURE = 'servicer failure'
+  SERVICED_FAILURE = 'serviced failure'
 
 
 class OperationContext(object):
@@ -70,9 +67,7 @@
     """Adds a function to be called upon operation termination.
 
     Args:
-      callback: A callable that will be passed one of COMPLETED, CANCELLED,
-        EXPIRED, RECEPTION_FAILURE, TRANSMISSION_FAILURE, SERVICER_FAILURE, or
-        SERVICED_FAILURE.
+      callback: A callable that will be passed an Outcome value.
     """
     raise NotImplementedError()
 
@@ -167,11 +162,20 @@
   """A sum type representing a serviced's interest in an operation.
 
   Attributes:
-    category: One of FULL, TERMINATION_ONLY, or NONE.
-    ingestor: A ServicedIngestor. Must be present if category is FULL.
+    kind: A Kind value.
+    ingestor: A ServicedIngestor. Must be present if kind is Kind.FULL. Must
+      be None if kind is Kind.TERMINATION_ONLY or Kind.NONE.
   """
   __metaclass__ = abc.ABCMeta
 
+  @enum.unique
+  class Kind(enum.Enum):
+    """Kinds of subscription."""
+
+    FULL = 'full'
+    TERMINATION_ONLY = 'termination only'
+    NONE = 'none'
+
 
 class End(object):
   """Common type for entry-point objects on both sides of an operation."""
@@ -182,9 +186,8 @@
     """Reports the number of terminated operations broken down by outcome.
 
     Returns:
-      A dictionary from operation outcome constant (COMPLETED, CANCELLED,
-        EXPIRED, and so on) to an integer representing the number of operations
-        that terminated with that outcome.
+      A dictionary from Outcome value to an integer identifying the number
+        of operations that terminated with that outcome.
     """
     raise NotImplementedError()
 
diff --git a/src/python/_framework/base/interfaces_test.py b/src/python/src/_framework/base/interfaces_test.py
similarity index 89%
rename from src/python/_framework/base/interfaces_test.py
rename to src/python/src/_framework/base/interfaces_test.py
index 6eb07ea..8e26d88 100644
--- a/src/python/_framework/base/interfaces_test.py
+++ b/src/python/src/_framework/base/interfaces_test.py
@@ -49,13 +49,13 @@
 WAIT_ON_CONDITION = 'wait on condition'
 
 EMPTY_OUTCOME_DICT = {
-    interfaces.COMPLETED: 0,
-    interfaces.CANCELLED: 0,
-    interfaces.EXPIRED: 0,
-    interfaces.RECEPTION_FAILURE: 0,
-    interfaces.TRANSMISSION_FAILURE: 0,
-    interfaces.SERVICER_FAILURE: 0,
-    interfaces.SERVICED_FAILURE: 0,
+    interfaces.Outcome.COMPLETED: 0,
+    interfaces.Outcome.CANCELLED: 0,
+    interfaces.Outcome.EXPIRED: 0,
+    interfaces.Outcome.RECEPTION_FAILURE: 0,
+    interfaces.Outcome.TRANSMISSION_FAILURE: 0,
+    interfaces.Outcome.SERVICER_FAILURE: 0,
+    interfaces.Outcome.SERVICED_FAILURE: 0,
     }
 
 
@@ -169,7 +169,8 @@
         SYNCHRONOUS_ECHO, None, True, SMALL_TIMEOUT,
         util.none_serviced_subscription(), 'test trace ID')
     util.wait_for_idle(self.front)
-    self.assertEqual(1, self.front.operation_stats()[interfaces.COMPLETED])
+    self.assertEqual(
+        1, self.front.operation_stats()[interfaces.Outcome.COMPLETED])
 
     # Assuming nothing really pathological (such as pauses on the order of
     # SMALL_TIMEOUT interfering with this test) there are a two different ways
@@ -183,7 +184,7 @@
     first_back_possibility = EMPTY_OUTCOME_DICT
     # (2) The packet arrived at the back and the back completed the operation.
     second_back_possibility = dict(EMPTY_OUTCOME_DICT)
-    second_back_possibility[interfaces.COMPLETED] = 1
+    second_back_possibility[interfaces.Outcome.COMPLETED] = 1
     self.assertIn(
         back_operation_stats, (first_back_possibility, second_back_possibility))
     # It's true that if the packet had arrived at the back and the back had
@@ -204,8 +205,10 @@
 
     util.wait_for_idle(self.front)
     util.wait_for_idle(self.back)
-    self.assertEqual(1, self.front.operation_stats()[interfaces.COMPLETED])
-    self.assertEqual(1, self.back.operation_stats()[interfaces.COMPLETED])
+    self.assertEqual(
+        1, self.front.operation_stats()[interfaces.Outcome.COMPLETED])
+    self.assertEqual(
+        1, self.back.operation_stats()[interfaces.Outcome.COMPLETED])
     self.assertListEqual([(test_payload, True)], test_consumer.calls)
 
   def testBidirectionalStreamingEcho(self):
@@ -226,8 +229,10 @@
 
     util.wait_for_idle(self.front)
     util.wait_for_idle(self.back)
-    self.assertEqual(1, self.front.operation_stats()[interfaces.COMPLETED])
-    self.assertEqual(1, self.back.operation_stats()[interfaces.COMPLETED])
+    self.assertEqual(
+        1, self.front.operation_stats()[interfaces.Outcome.COMPLETED])
+    self.assertEqual(
+        1, self.back.operation_stats()[interfaces.Outcome.COMPLETED])
     self.assertListEqual(test_payloads, test_consumer.values())
 
   def testCancellation(self):
@@ -242,7 +247,8 @@
     operation.cancel()
 
     util.wait_for_idle(self.front)
-    self.assertEqual(1, self.front.operation_stats()[interfaces.CANCELLED])
+    self.assertEqual(
+        1, self.front.operation_stats()[interfaces.Outcome.CANCELLED])
     util.wait_for_idle(self.back)
     self.assertListEqual([], test_consumer.calls)
 
@@ -260,7 +266,7 @@
     # The back started processing based on the first packet and then stopped
     # upon receiving the cancellation packet.
     second_back_possibility = dict(EMPTY_OUTCOME_DICT)
-    second_back_possibility[interfaces.CANCELLED] = 1
+    second_back_possibility[interfaces.Outcome.CANCELLED] = 1
     self.assertIn(
         back_operation_stats, (first_back_possibility, second_back_possibility))
 
@@ -292,8 +298,10 @@
     duration = termination_time_cell[0] - start_time
     self.assertLessEqual(timeout, duration)
     self.assertLess(duration, timeout + allowance)
-    self.assertEqual(interfaces.EXPIRED, outcome_cell[0])
+    self.assertEqual(interfaces.Outcome.EXPIRED, outcome_cell[0])
     util.wait_for_idle(self.front)
-    self.assertEqual(1, self.front.operation_stats()[interfaces.EXPIRED])
+    self.assertEqual(
+        1, self.front.operation_stats()[interfaces.Outcome.EXPIRED])
     util.wait_for_idle(self.back)
-    self.assertLessEqual(1, self.back.operation_stats()[interfaces.EXPIRED])
+    self.assertLessEqual(
+        1, self.back.operation_stats()[interfaces.Outcome.EXPIRED])
diff --git a/src/python/_framework/base/packets/__init__.py b/src/python/src/_framework/base/packets/__init__.py
similarity index 100%
rename from src/python/_framework/base/packets/__init__.py
rename to src/python/src/_framework/base/packets/__init__.py
diff --git a/src/python/_framework/base/packets/_cancellation.py b/src/python/src/_framework/base/packets/_cancellation.py
similarity index 100%
rename from src/python/_framework/base/packets/_cancellation.py
rename to src/python/src/_framework/base/packets/_cancellation.py
diff --git a/src/python/_framework/base/packets/_constants.py b/src/python/src/_framework/base/packets/_constants.py
similarity index 100%
rename from src/python/_framework/base/packets/_constants.py
rename to src/python/src/_framework/base/packets/_constants.py
diff --git a/src/python/_framework/base/packets/_context.py b/src/python/src/_framework/base/packets/_context.py
similarity index 100%
rename from src/python/_framework/base/packets/_context.py
rename to src/python/src/_framework/base/packets/_context.py
diff --git a/src/python/_framework/base/packets/_emission.py b/src/python/src/_framework/base/packets/_emission.py
similarity index 100%
rename from src/python/_framework/base/packets/_emission.py
rename to src/python/src/_framework/base/packets/_emission.py
diff --git a/src/python/_framework/base/packets/_ends.py b/src/python/src/_framework/base/packets/_ends.py
similarity index 96%
rename from src/python/_framework/base/packets/_ends.py
rename to src/python/src/_framework/base/packets/_ends.py
index baaf5ca..b1d1645 100644
--- a/src/python/_framework/base/packets/_ends.py
+++ b/src/python/src/_framework/base/packets/_ends.py
@@ -51,13 +51,13 @@
 _IDLE_ACTION_EXCEPTION_LOG_MESSAGE = 'Exception calling idle action!'
 
 _OPERATION_OUTCOMES = (
-    base_interfaces.COMPLETED,
-    base_interfaces.CANCELLED,
-    base_interfaces.EXPIRED,
-    base_interfaces.RECEPTION_FAILURE,
-    base_interfaces.TRANSMISSION_FAILURE,
-    base_interfaces.SERVICER_FAILURE,
-    base_interfaces.SERVICED_FAILURE,
+    base_interfaces.Outcome.COMPLETED,
+    base_interfaces.Outcome.CANCELLED,
+    base_interfaces.Outcome.EXPIRED,
+    base_interfaces.Outcome.RECEPTION_FAILURE,
+    base_interfaces.Outcome.TRANSMISSION_FAILURE,
+    base_interfaces.Outcome.SERVICER_FAILURE,
+    base_interfaces.Outcome.SERVICED_FAILURE,
     )
 
 
@@ -193,10 +193,10 @@
   lock = threading.Lock()
   with lock:
     termination_manager = _termination.front_termination_manager(
-        work_pool, utility_pool, termination_action, subscription.category)
+        work_pool, utility_pool, termination_action, subscription.kind)
     transmission_manager = _transmission.front_transmission_manager(
         lock, transmission_pool, callback, operation_id, name,
-        subscription.category, trace_id, timeout, termination_manager)
+        subscription.kind, trace_id, timeout, termination_manager)
     operation_context = _context.OperationContext(
         lock, operation_id, packets.Kind.SERVICED_FAILURE,
         termination_manager, transmission_manager)
@@ -225,9 +225,10 @@
 
     transmission_manager.inmit(payload, complete)
 
-    returned_reception_manager = (
-        None if subscription.category == base_interfaces.NONE
-        else reception_manager)
+    if subscription.kind is base_interfaces.ServicedSubscription.Kind.NONE:
+      returned_reception_manager = None
+    else:
+      returned_reception_manager = reception_manager
 
     return _FrontManagement(
         returned_reception_manager, emission_manager, operation_context,
diff --git a/src/python/_framework/base/packets/_expiration.py b/src/python/src/_framework/base/packets/_expiration.py
similarity index 100%
rename from src/python/_framework/base/packets/_expiration.py
rename to src/python/src/_framework/base/packets/_expiration.py
diff --git a/src/python/_framework/base/packets/_ingestion.py b/src/python/src/_framework/base/packets/_ingestion.py
similarity index 99%
rename from src/python/_framework/base/packets/_ingestion.py
rename to src/python/src/_framework/base/packets/_ingestion.py
index ad5ed4c..abc1e7a 100644
--- a/src/python/_framework/base/packets/_ingestion.py
+++ b/src/python/src/_framework/base/packets/_ingestion.py
@@ -111,7 +111,7 @@
 
   def create_consumer(self, requirement):
     """See _ConsumerCreator.create_consumer for specification."""
-    if self._subscription.category == interfaces.FULL:
+    if self._subscription.kind is interfaces.ServicedSubscription.Kind.FULL:
       try:
         return _ConsumerCreation(
             self._subscription.ingestor.consumer(self._operation_context),
diff --git a/src/python/_framework/base/packets/_interfaces.py b/src/python/src/_framework/base/packets/_interfaces.py
similarity index 96%
rename from src/python/_framework/base/packets/_interfaces.py
rename to src/python/src/_framework/base/packets/_interfaces.py
index 5f6c059..d1bda95 100644
--- a/src/python/_framework/base/packets/_interfaces.py
+++ b/src/python/src/_framework/base/packets/_interfaces.py
@@ -58,10 +58,7 @@
     immediately.
 
     Args:
-      callback: A callable that will be passed one of base_interfaces.COMPLETED,
-        base_interfaces.CANCELLED, base_interfaces.EXPIRED,
-        base_interfaces.RECEPTION_FAILURE, base_interfaces.TRANSMISSION_FAILURE,
-        base_interfaces.SERVICER_FAILURE, or base_interfaces.SERVICED_FAILURE.
+      callback: A callable that will be passed a base_interfaces.Outcome value.
     """
     raise NotImplementedError()
 
diff --git a/src/python/_framework/base/packets/_reception.py b/src/python/src/_framework/base/packets/_reception.py
similarity index 100%
rename from src/python/_framework/base/packets/_reception.py
rename to src/python/src/_framework/base/packets/_reception.py
diff --git a/src/python/_framework/base/packets/_termination.py b/src/python/src/_framework/base/packets/_termination.py
similarity index 75%
rename from src/python/_framework/base/packets/_termination.py
rename to src/python/src/_framework/base/packets/_termination.py
index d586c21..ae3ba1c 100644
--- a/src/python/_framework/base/packets/_termination.py
+++ b/src/python/src/_framework/base/packets/_termination.py
@@ -29,6 +29,8 @@
 
 """State and behavior for operation termination."""
 
+import enum
+
 from _framework.base import interfaces
 from _framework.base.packets import _constants
 from _framework.base.packets import _interfaces
@@ -37,26 +39,32 @@
 
 _CALLBACK_EXCEPTION_LOG_MESSAGE = 'Exception calling termination callback!'
 
-# TODO(nathaniel): enum module.
-_EMISSION = 'emission'
-_TRANSMISSION = 'transmission'
-_INGESTION = 'ingestion'
-
-_FRONT_NOT_LISTENING_REQUIREMENTS = (_TRANSMISSION,)
-_BACK_NOT_LISTENING_REQUIREMENTS = (_EMISSION, _INGESTION,)
-_LISTENING_REQUIREMENTS = (_TRANSMISSION, _INGESTION,)
-
 _KINDS_TO_OUTCOMES = {
-    packets.Kind.COMPLETION: interfaces.COMPLETED,
-    packets.Kind.CANCELLATION: interfaces.CANCELLED,
-    packets.Kind.EXPIRATION: interfaces.EXPIRED,
-    packets.Kind.RECEPTION_FAILURE: interfaces.RECEPTION_FAILURE,
-    packets.Kind.TRANSMISSION_FAILURE: interfaces.TRANSMISSION_FAILURE,
-    packets.Kind.SERVICER_FAILURE: interfaces.SERVICER_FAILURE,
-    packets.Kind.SERVICED_FAILURE: interfaces.SERVICED_FAILURE,
+    packets.Kind.COMPLETION: interfaces.Outcome.COMPLETED,
+    packets.Kind.CANCELLATION: interfaces.Outcome.CANCELLED,
+    packets.Kind.EXPIRATION: interfaces.Outcome.EXPIRED,
+    packets.Kind.RECEPTION_FAILURE: interfaces.Outcome.RECEPTION_FAILURE,
+    packets.Kind.TRANSMISSION_FAILURE: interfaces.Outcome.TRANSMISSION_FAILURE,
+    packets.Kind.SERVICER_FAILURE: interfaces.Outcome.SERVICER_FAILURE,
+    packets.Kind.SERVICED_FAILURE: interfaces.Outcome.SERVICED_FAILURE,
     }
 
 
+@enum.unique
+class _Requirement(enum.Enum):
+  """Symbols indicating events required for termination."""
+
+  EMISSION = 'emission'
+  TRANSMISSION = 'transmission'
+  INGESTION = 'ingestion'
+
+_FRONT_NOT_LISTENING_REQUIREMENTS = (_Requirement.TRANSMISSION,)
+_BACK_NOT_LISTENING_REQUIREMENTS = (
+    _Requirement.EMISSION, _Requirement.INGESTION,)
+_LISTENING_REQUIREMENTS = (
+    _Requirement.TRANSMISSION, _Requirement.INGESTION,)
+
+
 class _TerminationManager(_interfaces.TerminationManager):
   """An implementation of _interfaces.TerminationManager."""
 
@@ -68,9 +76,8 @@
       work_pool: A thread pool in which customer work will be done.
       utility_pool: A thread pool in which work utility work will be done.
       action: An action to call on operation termination.
-      requirements: A combination of _EMISSION, _TRANSMISSION, and _INGESTION
-        identifying what must finish for the operation to be considered
-        completed.
+      requirements: A combination of _Requirement values identifying what
+        must finish for the operation to be considered completed.
       local_failure: A packets.Kind specifying what constitutes local failure of
         customer work.
     """
@@ -137,21 +144,21 @@
   def emission_complete(self):
     """See superclass method for specification."""
     if self._outstanding_requirements is not None:
-      self._outstanding_requirements.discard(_EMISSION)
+      self._outstanding_requirements.discard(_Requirement.EMISSION)
       if not self._outstanding_requirements:
         self._terminate(packets.Kind.COMPLETION)
 
   def transmission_complete(self):
     """See superclass method for specification."""
     if self._outstanding_requirements is not None:
-      self._outstanding_requirements.discard(_TRANSMISSION)
+      self._outstanding_requirements.discard(_Requirement.TRANSMISSION)
       if not self._outstanding_requirements:
         self._terminate(packets.Kind.COMPLETION)
 
   def ingestion_complete(self):
     """See superclass method for specification."""
     if self._outstanding_requirements is not None:
-      self._outstanding_requirements.discard(_INGESTION)
+      self._outstanding_requirements.discard(_Requirement.INGESTION)
       if not self._outstanding_requirements:
         self._terminate(packets.Kind.COMPLETION)
 
@@ -163,39 +170,46 @@
       self._terminate(kind)
 
 
-def front_termination_manager(work_pool, utility_pool, action, subscription):
+def front_termination_manager(
+    work_pool, utility_pool, action, subscription_kind):
   """Creates a TerminationManager appropriate for front-side use.
 
   Args:
     work_pool: A thread pool in which customer work will be done.
     utility_pool: A thread pool in which work utility work will be done.
     action: An action to call on operation termination.
-    subscription: One of interfaces.FULL, interfaces.termination_only, or
-      interfaces.NONE.
+    subscription_kind: An interfaces.ServicedSubscription.Kind value.
 
   Returns:
     A TerminationManager appropriate for front-side use.
   """
+  if subscription_kind is interfaces.ServicedSubscription.Kind.NONE:
+    requirements = _FRONT_NOT_LISTENING_REQUIREMENTS
+  else:
+    requirements = _LISTENING_REQUIREMENTS
+
   return _TerminationManager(
-      work_pool, utility_pool, action,
-      _FRONT_NOT_LISTENING_REQUIREMENTS if subscription == interfaces.NONE else
-      _LISTENING_REQUIREMENTS, packets.Kind.SERVICED_FAILURE)
+      work_pool, utility_pool, action, requirements,
+      packets.Kind.SERVICED_FAILURE)
 
 
-def back_termination_manager(work_pool, utility_pool, action, subscription):
+def back_termination_manager(work_pool, utility_pool, action, subscription_kind):
   """Creates a TerminationManager appropriate for back-side use.
 
   Args:
     work_pool: A thread pool in which customer work will be done.
     utility_pool: A thread pool in which work utility work will be done.
     action: An action to call on operation termination.
-    subscription: One of interfaces.FULL, interfaces.termination_only, or
-      interfaces.NONE.
+    subscription_kind: An interfaces.ServicedSubscription.Kind value.
 
   Returns:
     A TerminationManager appropriate for back-side use.
   """
+  if subscription_kind is interfaces.ServicedSubscription.Kind.NONE:
+    requirements = _BACK_NOT_LISTENING_REQUIREMENTS
+  else:
+    requirements = _LISTENING_REQUIREMENTS
+
   return _TerminationManager(
-      work_pool, utility_pool, action,
-      _BACK_NOT_LISTENING_REQUIREMENTS if subscription == interfaces.NONE else
-      _LISTENING_REQUIREMENTS, packets.Kind.SERVICER_FAILURE)
+      work_pool, utility_pool, action, requirements,
+      packets.Kind.SERVICER_FAILURE)
diff --git a/src/python/_framework/base/packets/_transmission.py b/src/python/src/_framework/base/packets/_transmission.py
similarity index 92%
rename from src/python/_framework/base/packets/_transmission.py
rename to src/python/src/_framework/base/packets/_transmission.py
index 0061287..24fe6e6 100644
--- a/src/python/_framework/base/packets/_transmission.py
+++ b/src/python/src/_framework/base/packets/_transmission.py
@@ -91,20 +91,19 @@
 class _FrontPacketizer(_Packetizer):
   """Front-side packet-creating behavior."""
 
-  def __init__(self, name, subscription, trace_id, timeout):
+  def __init__(self, name, subscription_kind, trace_id, timeout):
     """Constructor.
 
     Args:
       name: The name of the operation.
-      subscription: One of interfaces.FULL, interfaces.TERMINATION_ONLY, or
-        interfaces.NONE describing the interest the front has in packets sent
-        from the back.
+      subscription_kind: An interfaces.ServicedSubscription.Kind value
+        describing the interest the front has in packets sent from the back.
       trace_id: A uuid.UUID identifying a set of related operations to which
         this operation belongs.
       timeout: A length of time in seconds to allow for the entire operation.
     """
     self._name = name
-    self._subscription = subscription
+    self._subscription_kind = subscription_kind
     self._trace_id = trace_id
     self._timeout = timeout
 
@@ -114,13 +113,13 @@
       return packets.FrontToBackPacket(
           operation_id, sequence_number,
           packets.Kind.COMPLETION if complete else packets.Kind.CONTINUATION,
-          self._name, self._subscription, self._trace_id, payload,
+          self._name, self._subscription_kind, self._trace_id, payload,
           self._timeout)
     else:
       return packets.FrontToBackPacket(
           operation_id, 0,
           packets.Kind.ENTIRE if complete else packets.Kind.COMMENCEMENT,
-          self._name, self._subscription, self._trace_id, payload,
+          self._name, self._subscription_kind, self._trace_id, payload,
           self._timeout)
 
   def packetize_abortion(self, operation_id, sequence_number, kind):
@@ -335,8 +334,8 @@
 
 
 def front_transmission_manager(
-    lock, pool, callback, operation_id, name, subscription, trace_id, timeout,
-    termination_manager):
+    lock, pool, callback, operation_id, name, subscription_kind, trace_id,
+    timeout, termination_manager):
   """Creates a TransmissionManager appropriate for front-side use.
 
   Args:
@@ -347,9 +346,8 @@
       of the operation.
     operation_id: The operation's ID.
     name: The name of the operation.
-    subscription: One of interfaces.FULL, interfaces.TERMINATION_ONLY, or
-      interfaces.NONE describing the interest the front has in packets sent
-      from the back.
+    subscription_kind: An interfaces.ServicedSubscription.Kind value
+      describing the interest the front has in packets sent from the back.
     trace_id: A uuid.UUID identifying a set of related operations to which
       this operation belongs.
     timeout: A length of time in seconds to allow for the entire operation.
@@ -361,12 +359,13 @@
   """
   return _TransmittingTransmissionManager(
       lock, pool, callback, operation_id, _FrontPacketizer(
-          name, subscription, trace_id, timeout),
+          name, subscription_kind, trace_id, timeout),
       termination_manager)
 
 
 def back_transmission_manager(
-    lock, pool, callback, operation_id, termination_manager, subscription):
+    lock, pool, callback, operation_id, termination_manager,
+    subscription_kind):
   """Creates a TransmissionManager appropriate for back-side use.
 
   Args:
@@ -378,14 +377,13 @@
     operation_id: The operation's ID.
     termination_manager: The _interfaces.TerminationManager associated with
       this operation.
-    subscription: One of interfaces.FULL, interfaces.TERMINATION_ONLY, or
-      interfaces.NONE describing the interest the front has in packets sent from
-      the back.
+    subscription_kind: An interfaces.ServicedSubscription.Kind value
+      describing the interest the front has in packets sent from the back.
 
   Returns:
     A TransmissionManager appropriate for back-side use.
   """
-  if subscription == interfaces.NONE:
+  if subscription_kind is interfaces.ServicedSubscription.Kind.NONE:
     return _EmptyTransmissionManager()
   else:
     return _TransmittingTransmissionManager(
diff --git a/src/python/_framework/base/packets/implementations.py b/src/python/src/_framework/base/packets/implementations.py
similarity index 100%
rename from src/python/_framework/base/packets/implementations.py
rename to src/python/src/_framework/base/packets/implementations.py
diff --git a/src/python/_framework/base/packets/implementations_test.py b/src/python/src/_framework/base/packets/implementations_test.py
similarity index 100%
rename from src/python/_framework/base/packets/implementations_test.py
rename to src/python/src/_framework/base/packets/implementations_test.py
diff --git a/src/python/_framework/base/packets/in_memory.py b/src/python/src/_framework/base/packets/in_memory.py
similarity index 100%
rename from src/python/_framework/base/packets/in_memory.py
rename to src/python/src/_framework/base/packets/in_memory.py
diff --git a/src/python/_framework/base/packets/interfaces.py b/src/python/src/_framework/base/packets/interfaces.py
similarity index 100%
rename from src/python/_framework/base/packets/interfaces.py
rename to src/python/src/_framework/base/packets/interfaces.py
diff --git a/src/python/_framework/base/packets/null.py b/src/python/src/_framework/base/packets/null.py
similarity index 100%
rename from src/python/_framework/base/packets/null.py
rename to src/python/src/_framework/base/packets/null.py
diff --git a/src/python/_framework/base/packets/packets.py b/src/python/src/_framework/base/packets/packets.py
similarity index 94%
rename from src/python/_framework/base/packets/packets.py
rename to src/python/src/_framework/base/packets/packets.py
index 1315ca6..f7503bd 100644
--- a/src/python/_framework/base/packets/packets.py
+++ b/src/python/src/_framework/base/packets/packets.py
@@ -71,10 +71,9 @@
       Kind.RECEPTION_FAILURE, or Kind.TRANSMISSION_FAILURE.
     name: The name of an operation. Must be present if kind is Kind.COMMENCEMENT
       or Kind.ENTIRE. Must be None for any other kind.
-    subscription: One of interfaces.FULL, interfaces.TERMINATION_ONLY, or
-      interfaces.NONE describing the interest the front has in packets sent from
-      the back. Must be present if kind is Kind.COMMENCEMENT or Kind.ENTIRE.
-      Must be None for any other kind.
+    subscription: An interfaces.ServicedSubscription.Kind value describing the
+      interest the front has in packets sent from the back. Must be present if
+      kind is Kind.COMMENCEMENT or Kind.ENTIRE. Must be None for any other kind.
     trace_id: A uuid.UUID identifying a set of related operations to which this
       operation belongs. May be None.
     payload: A customer payload object. Must be present if kind is
diff --git a/src/python/_framework/base/util.py b/src/python/src/_framework/base/util.py
similarity index 86%
rename from src/python/_framework/base/util.py
rename to src/python/src/_framework/base/util.py
index 6bbd18a..35ce044 100644
--- a/src/python/_framework/base/util.py
+++ b/src/python/src/_framework/base/util.py
@@ -36,13 +36,14 @@
 
 
 class _ServicedSubscription(
-    collections.namedtuple('_ServicedSubscription', ['category', 'ingestor']),
+    collections.namedtuple('_ServicedSubscription', ['kind', 'ingestor']),
     interfaces.ServicedSubscription):
   """See interfaces.ServicedSubscription for specification."""
 
-_NONE_SUBSCRIPTION = _ServicedSubscription(interfaces.NONE, None)
+_NONE_SUBSCRIPTION = _ServicedSubscription(
+    interfaces.ServicedSubscription.Kind.NONE, None)
 _TERMINATION_ONLY_SUBSCRIPTION = _ServicedSubscription(
-    interfaces.TERMINATION_ONLY, None)
+    interfaces.ServicedSubscription.Kind.TERMINATION_ONLY, None)
 
 
 def none_serviced_subscription():
@@ -72,12 +73,14 @@
   """Creates a "full" interfaces.ServicedSubscription object.
 
   Args:
-    ingestor: A ServicedIngestor.
+    ingestor: An interfaces.ServicedIngestor.
 
   Returns:
-    A ServicedSubscription object indicating a full subscription.
+    An interfaces.ServicedSubscription object indicating a full
+      subscription.
   """
-  return _ServicedSubscription(interfaces.FULL, ingestor)
+  return _ServicedSubscription(
+      interfaces.ServicedSubscription.Kind.FULL, ingestor)
 
 
 def wait_for_idle(end):
diff --git a/src/python/_framework/common/__init__.py b/src/python/src/_framework/common/__init__.py
similarity index 100%
rename from src/python/_framework/common/__init__.py
rename to src/python/src/_framework/common/__init__.py
diff --git a/src/python/_framework/common/cardinality.py b/src/python/src/_framework/common/cardinality.py
similarity index 100%
rename from src/python/_framework/common/cardinality.py
rename to src/python/src/_framework/common/cardinality.py
diff --git a/src/python/_framework/face/__init__.py b/src/python/src/_framework/face/__init__.py
similarity index 100%
rename from src/python/_framework/face/__init__.py
rename to src/python/src/_framework/face/__init__.py
diff --git a/src/python/_framework/face/_calls.py b/src/python/src/_framework/face/_calls.py
similarity index 95%
rename from src/python/_framework/face/_calls.py
rename to src/python/src/_framework/face/_calls.py
index ab58e63..9128aef 100644
--- a/src/python/_framework/face/_calls.py
+++ b/src/python/src/_framework/face/_calls.py
@@ -94,7 +94,7 @@
 
   def cancel(self):
     self._operation.cancel()
-    self._rendezvous.set_outcome(base_interfaces.CANCELLED)
+    self._rendezvous.set_outcome(base_interfaces.Outcome.CANCELLED)
 
 
 class _OperationFuture(future.Future):
@@ -150,15 +150,12 @@
     """Indicates to this object that the operation has terminated.
 
     Args:
-      operation_outcome: One of base_interfaces.COMPLETED,
-        base_interfaces.CANCELLED, base_interfaces.EXPIRED,
-        base_interfaces.RECEPTION_FAILURE, base_interfaces.TRANSMISSION_FAILURE,
-        base_interfaces.SERVICED_FAILURE, or base_interfaces.SERVICER_FAILURE
-        indicating the categorical outcome of the operation.
+      operation_outcome: A base_interfaces.Outcome value indicating the
+        outcome of the operation.
     """
     with self._condition:
       if (self._outcome is None and
-          operation_outcome != base_interfaces.COMPLETED):
+          operation_outcome is not base_interfaces.Outcome.COMPLETED):
         self._outcome = future.raised(
             _control.abortion_outcome_to_exception(operation_outcome))
         self._condition.notify_all()
diff --git a/src/python/_framework/face/_control.py b/src/python/src/_framework/face/_control.py
similarity index 88%
rename from src/python/_framework/face/_control.py
rename to src/python/src/_framework/face/_control.py
index 2c22132..9f1bf6d 100644
--- a/src/python/_framework/face/_control.py
+++ b/src/python/src/_framework/face/_control.py
@@ -40,13 +40,17 @@
 INTERNAL_ERROR_LOG_MESSAGE = ':-( RPC Framework (Face) Internal Error! :-('
 
 _OPERATION_OUTCOME_TO_RPC_ABORTION = {
-    base_interfaces.CANCELLED: interfaces.CANCELLED,
-    base_interfaces.EXPIRED: interfaces.EXPIRED,
-    base_interfaces.RECEPTION_FAILURE: interfaces.NETWORK_FAILURE,
-    base_interfaces.TRANSMISSION_FAILURE: interfaces.NETWORK_FAILURE,
-    base_interfaces.SERVICED_FAILURE: interfaces.SERVICED_FAILURE,
-    base_interfaces.SERVICER_FAILURE: interfaces.SERVICER_FAILURE,
-    }
+    base_interfaces.Outcome.CANCELLED: interfaces.Abortion.CANCELLED,
+    base_interfaces.Outcome.EXPIRED: interfaces.Abortion.EXPIRED,
+    base_interfaces.Outcome.RECEPTION_FAILURE:
+        interfaces.Abortion.NETWORK_FAILURE,
+    base_interfaces.Outcome.TRANSMISSION_FAILURE:
+        interfaces.Abortion.NETWORK_FAILURE,
+    base_interfaces.Outcome.SERVICED_FAILURE:
+        interfaces.Abortion.SERVICED_FAILURE,
+    base_interfaces.Outcome.SERVICER_FAILURE:
+        interfaces.Abortion.SERVICER_FAILURE,
+}
 
 
 def _as_operation_termination_callback(rpc_abortion_callback):
@@ -59,13 +63,13 @@
 
 
 def _abortion_outcome_to_exception(abortion_outcome):
-  if abortion_outcome == base_interfaces.CANCELLED:
+  if abortion_outcome == base_interfaces.Outcome.CANCELLED:
     return exceptions.CancellationError()
-  elif abortion_outcome == base_interfaces.EXPIRED:
+  elif abortion_outcome == base_interfaces.Outcome.EXPIRED:
     return exceptions.ExpirationError()
-  elif abortion_outcome == base_interfaces.SERVICER_FAILURE:
+  elif abortion_outcome == base_interfaces.Outcome.SERVICER_FAILURE:
     return exceptions.ServicerError()
-  elif abortion_outcome == base_interfaces.SERVICED_FAILURE:
+  elif abortion_outcome == base_interfaces.Outcome.SERVICED_FAILURE:
     return exceptions.ServicedError()
   else:
     return exceptions.NetworkError()
@@ -133,7 +137,7 @@
 
   def set_outcome(self, outcome):
     with self._condition:
-      if outcome != base_interfaces.COMPLETED:
+      if outcome is not base_interfaces.Outcome.COMPLETED:
         self._abortion = outcome
         self._condition.notify()
 
diff --git a/src/python/_framework/face/_service.py b/src/python/src/_framework/face/_service.py
similarity index 100%
rename from src/python/_framework/face/_service.py
rename to src/python/src/_framework/face/_service.py
diff --git a/src/python/_framework/face/_test_case.py b/src/python/src/_framework/face/_test_case.py
similarity index 100%
rename from src/python/_framework/face/_test_case.py
rename to src/python/src/_framework/face/_test_case.py
diff --git a/src/python/_framework/face/blocking_invocation_inline_service_test.py b/src/python/src/_framework/face/blocking_invocation_inline_service_test.py
similarity index 100%
rename from src/python/_framework/face/blocking_invocation_inline_service_test.py
rename to src/python/src/_framework/face/blocking_invocation_inline_service_test.py
diff --git a/src/python/_framework/face/demonstration.py b/src/python/src/_framework/face/demonstration.py
similarity index 100%
rename from src/python/_framework/face/demonstration.py
rename to src/python/src/_framework/face/demonstration.py
diff --git a/src/python/_framework/face/event_invocation_synchronous_event_service_test.py b/src/python/src/_framework/face/event_invocation_synchronous_event_service_test.py
similarity index 100%
rename from src/python/_framework/face/event_invocation_synchronous_event_service_test.py
rename to src/python/src/_framework/face/event_invocation_synchronous_event_service_test.py
diff --git a/src/python/_framework/face/exceptions.py b/src/python/src/_framework/face/exceptions.py
similarity index 100%
rename from src/python/_framework/face/exceptions.py
rename to src/python/src/_framework/face/exceptions.py
diff --git a/src/python/_framework/face/future_invocation_asynchronous_event_service_test.py b/src/python/src/_framework/face/future_invocation_asynchronous_event_service_test.py
similarity index 100%
rename from src/python/_framework/face/future_invocation_asynchronous_event_service_test.py
rename to src/python/src/_framework/face/future_invocation_asynchronous_event_service_test.py
diff --git a/src/python/_framework/face/implementations.py b/src/python/src/_framework/face/implementations.py
similarity index 100%
rename from src/python/_framework/face/implementations.py
rename to src/python/src/_framework/face/implementations.py
diff --git a/src/python/_framework/face/interfaces.py b/src/python/src/_framework/face/interfaces.py
similarity index 93%
rename from src/python/_framework/face/interfaces.py
rename to src/python/src/_framework/face/interfaces.py
index 0cc7c70..2480454 100644
--- a/src/python/_framework/face/interfaces.py
+++ b/src/python/src/_framework/face/interfaces.py
@@ -30,6 +30,7 @@
 """Interfaces for the face layer of RPC Framework."""
 
 import abc
+import enum
 
 # exceptions, abandonment, and future are referenced from specification in this
 # module.
@@ -58,14 +59,15 @@
     raise NotImplementedError()
 
 
-# Constants that categorize RPC abortion.
-# TODO(nathaniel): Learn and use Python's enum library for this de facto
-# enumerated type
-CANCELLED = 'abortion: cancelled'
-EXPIRED = 'abortion: expired'
-NETWORK_FAILURE = 'abortion: network failure'
-SERVICED_FAILURE = 'abortion: serviced failure'
-SERVICER_FAILURE = 'abortion: servicer failure'
+@enum.unique
+class Abortion(enum.Enum):
+  """Categories of RPC abortion."""
+
+  CANCELLED = 'cancelled'
+  EXPIRED = 'expired'
+  NETWORK_FAILURE = 'network failure'
+  SERVICED_FAILURE = 'serviced failure'
+  SERVICER_FAILURE = 'servicer failure'
 
 
 class RpcContext(object):
@@ -93,9 +95,8 @@
     """Registers a callback to be called if the RPC is aborted.
 
     Args:
-      abortion_callback: A callable to be called and passed one of CANCELLED,
-        EXPIRED, NETWORK_FAILURE, SERVICED_FAILURE, or SERVICER_FAILURE in the
-        event of RPC abortion.
+      abortion_callback: A callable to be called and passed an Abortion value
+        in the event of RPC abortion.
     """
     raise NotImplementedError()
 
@@ -474,9 +475,8 @@
       request: The request value for the RPC.
       response_callback: A callback to be called to accept the response value
         of the RPC.
-      abortion_callback: A callback to be called to accept one of CANCELLED,
-        EXPIRED, NETWORK_FAILURE, or SERVICER_FAILURE in the event of RPC
-        abortion.
+      abortion_callback: A callback to be called and passed an Abortion value
+        in the event of RPC abortion.
       timeout: A duration of time in seconds to allow for the RPC.
 
     Returns:
@@ -494,9 +494,8 @@
       request: The request value for the RPC.
       response_consumer: A stream.Consumer to be called to accept the response
         values of the RPC.
-      abortion_callback: A callback to be called to accept one of CANCELLED,
-        EXPIRED, NETWORK_FAILURE, or SERVICER_FAILURE in the event of RPC
-        abortion.
+      abortion_callback: A callback to be called and passed an Abortion value
+        in the event of RPC abortion.
       timeout: A duration of time in seconds to allow for the RPC.
 
     Returns:
@@ -513,9 +512,8 @@
       name: The RPC method name.
       response_callback: A callback to be called to accept the response value
         of the RPC.
-      abortion_callback: A callback to be called to accept one of CANCELLED,
-        EXPIRED, NETWORK_FAILURE, or SERVICER_FAILURE in the event of RPC
-        abortion.
+      abortion_callback: A callback to be called and passed an Abortion value
+        in the event of RPC abortion.
       timeout: A duration of time in seconds to allow for the RPC.
 
     Returns:
@@ -533,9 +531,8 @@
       name: The RPC method name.
       response_consumer: A stream.Consumer to be called to accept the response
         values of the RPC.
-      abortion_callback: A callback to be called to accept one of CANCELLED,
-        EXPIRED, NETWORK_FAILURE, or SERVICER_FAILURE in the event of RPC
-        abortion.
+      abortion_callback: A callback to be called and passed an Abortion value
+        in the event of RPC abortion.
       timeout: A duration of time in seconds to allow for the RPC.
 
     Returns:
diff --git a/src/python/_framework/face/testing/__init__.py b/src/python/src/_framework/face/testing/__init__.py
similarity index 100%
rename from src/python/_framework/face/testing/__init__.py
rename to src/python/src/_framework/face/testing/__init__.py
diff --git a/src/python/_framework/face/testing/base_util.py b/src/python/src/_framework/face/testing/base_util.py
similarity index 100%
rename from src/python/_framework/face/testing/base_util.py
rename to src/python/src/_framework/face/testing/base_util.py
diff --git a/src/python/_framework/face/testing/blocking_invocation_inline_service_test_case.py b/src/python/src/_framework/face/testing/blocking_invocation_inline_service_test_case.py
similarity index 100%
rename from src/python/_framework/face/testing/blocking_invocation_inline_service_test_case.py
rename to src/python/src/_framework/face/testing/blocking_invocation_inline_service_test_case.py
diff --git a/src/python/_framework/face/testing/callback.py b/src/python/src/_framework/face/testing/callback.py
similarity index 100%
rename from src/python/_framework/face/testing/callback.py
rename to src/python/src/_framework/face/testing/callback.py
diff --git a/src/python/_framework/face/testing/control.py b/src/python/src/_framework/face/testing/control.py
similarity index 100%
rename from src/python/_framework/face/testing/control.py
rename to src/python/src/_framework/face/testing/control.py
diff --git a/src/python/_framework/face/testing/coverage.py b/src/python/src/_framework/face/testing/coverage.py
similarity index 100%
rename from src/python/_framework/face/testing/coverage.py
rename to src/python/src/_framework/face/testing/coverage.py
diff --git a/src/python/_framework/face/testing/digest.py b/src/python/src/_framework/face/testing/digest.py
similarity index 100%
rename from src/python/_framework/face/testing/digest.py
rename to src/python/src/_framework/face/testing/digest.py
diff --git a/src/python/_framework/face/testing/event_invocation_synchronous_event_service_test_case.py b/src/python/src/_framework/face/testing/event_invocation_synchronous_event_service_test_case.py
similarity index 93%
rename from src/python/_framework/face/testing/event_invocation_synchronous_event_service_test_case.py
rename to src/python/src/_framework/face/testing/event_invocation_synchronous_event_service_test_case.py
index dba73a9..cb786f5 100644
--- a/src/python/_framework/face/testing/event_invocation_synchronous_event_service_test_case.py
+++ b/src/python/src/_framework/face/testing/event_invocation_synchronous_event_service_test_case.py
@@ -176,7 +176,7 @@
               name, request, callback.complete, callback.abort, _TIMEOUT)
           callback.block_until_terminated()
 
-        self.assertEqual(interfaces.EXPIRED, callback.abortion())
+        self.assertEqual(interfaces.Abortion.EXPIRED, callback.abortion())
 
   def testExpiredUnaryRequestStreamResponse(self):
     for name, test_messages_sequence in (
@@ -190,7 +190,7 @@
               name, request, callback, callback.abort, _TIMEOUT)
           callback.block_until_terminated()
 
-        self.assertEqual(interfaces.EXPIRED, callback.abortion())
+        self.assertEqual(interfaces.Abortion.EXPIRED, callback.abortion())
 
   def testExpiredStreamRequestUnaryResponse(self):
     for name, test_messages_sequence in (
@@ -202,7 +202,7 @@
             name, callback.complete, callback.abort, _TIMEOUT)
         callback.block_until_terminated()
 
-        self.assertEqual(interfaces.EXPIRED, callback.abortion())
+        self.assertEqual(interfaces.Abortion.EXPIRED, callback.abortion())
 
   def testExpiredStreamRequestStreamResponse(self):
     for name, test_messages_sequence in (
@@ -217,7 +217,7 @@
           request_consumer.consume(request)
         callback.block_until_terminated()
 
-        self.assertEqual(interfaces.EXPIRED, callback.abortion())
+        self.assertEqual(interfaces.Abortion.EXPIRED, callback.abortion())
 
   def testFailedUnaryRequestUnaryResponse(self):
     for name, test_messages_sequence in (
@@ -231,7 +231,7 @@
               name, request, callback.complete, callback.abort, _TIMEOUT)
           callback.block_until_terminated()
 
-        self.assertEqual(interfaces.SERVICER_FAILURE, callback.abortion())
+        self.assertEqual(interfaces.Abortion.SERVICER_FAILURE, callback.abortion())
 
   def testFailedUnaryRequestStreamResponse(self):
     for name, test_messages_sequence in (
@@ -245,7 +245,7 @@
               name, request, callback, callback.abort, _TIMEOUT)
           callback.block_until_terminated()
 
-        self.assertEqual(interfaces.SERVICER_FAILURE, callback.abortion())
+        self.assertEqual(interfaces.Abortion.SERVICER_FAILURE, callback.abortion())
 
   def testFailedStreamRequestUnaryResponse(self):
     for name, test_messages_sequence in (
@@ -262,7 +262,7 @@
           request_consumer.terminate()
           callback.block_until_terminated()
 
-        self.assertEqual(interfaces.SERVICER_FAILURE, callback.abortion())
+        self.assertEqual(interfaces.Abortion.SERVICER_FAILURE, callback.abortion())
 
   def testFailedStreamRequestStreamResponse(self):
     for name, test_messages_sequence in (
@@ -279,7 +279,7 @@
           request_consumer.terminate()
           callback.block_until_terminated()
 
-        self.assertEqual(interfaces.SERVICER_FAILURE, callback.abortion())
+        self.assertEqual(interfaces.Abortion.SERVICER_FAILURE, callback.abortion())
 
   def testParallelInvocations(self):
     for name, test_messages_sequence in (
@@ -321,7 +321,7 @@
           call.cancel()
           callback.block_until_terminated()
 
-        self.assertEqual(interfaces.CANCELLED, callback.abortion())
+        self.assertEqual(interfaces.Abortion.CANCELLED, callback.abortion())
 
   def testCancelledUnaryRequestStreamResponse(self):
     for name, test_messages_sequence in (
@@ -335,7 +335,7 @@
         call.cancel()
         callback.block_until_terminated()
 
-        self.assertEqual(interfaces.CANCELLED, callback.abortion())
+        self.assertEqual(interfaces.Abortion.CANCELLED, callback.abortion())
 
   def testCancelledStreamRequestUnaryResponse(self):
     for name, test_messages_sequence in (
@@ -351,7 +351,7 @@
         call.cancel()
         callback.block_until_terminated()
 
-        self.assertEqual(interfaces.CANCELLED, callback.abortion())
+        self.assertEqual(interfaces.Abortion.CANCELLED, callback.abortion())
 
   def testCancelledStreamRequestStreamResponse(self):
     for name, test_messages_sequence in (
@@ -364,4 +364,4 @@
         call.cancel()
         callback.block_until_terminated()
 
-        self.assertEqual(interfaces.CANCELLED, callback.abortion())
+        self.assertEqual(interfaces.Abortion.CANCELLED, callback.abortion())
diff --git a/src/python/_framework/face/testing/future_invocation_asynchronous_event_service_test_case.py b/src/python/src/_framework/face/testing/future_invocation_asynchronous_event_service_test_case.py
similarity index 100%
rename from src/python/_framework/face/testing/future_invocation_asynchronous_event_service_test_case.py
rename to src/python/src/_framework/face/testing/future_invocation_asynchronous_event_service_test_case.py
diff --git a/src/python/_framework/face/testing/interfaces.py b/src/python/src/_framework/face/testing/interfaces.py
similarity index 100%
rename from src/python/_framework/face/testing/interfaces.py
rename to src/python/src/_framework/face/testing/interfaces.py
diff --git a/src/python/_framework/face/testing/serial.py b/src/python/src/_framework/face/testing/serial.py
similarity index 100%
rename from src/python/_framework/face/testing/serial.py
rename to src/python/src/_framework/face/testing/serial.py
diff --git a/src/python/_framework/face/testing/service.py b/src/python/src/_framework/face/testing/service.py
similarity index 100%
rename from src/python/_framework/face/testing/service.py
rename to src/python/src/_framework/face/testing/service.py
diff --git a/src/python/_framework/face/testing/stock_service.py b/src/python/src/_framework/face/testing/stock_service.py
similarity index 100%
rename from src/python/_framework/face/testing/stock_service.py
rename to src/python/src/_framework/face/testing/stock_service.py
diff --git a/src/python/_framework/face/testing/test_case.py b/src/python/src/_framework/face/testing/test_case.py
similarity index 100%
rename from src/python/_framework/face/testing/test_case.py
rename to src/python/src/_framework/face/testing/test_case.py
diff --git a/src/python/_framework/foundation/__init__.py b/src/python/src/_framework/foundation/__init__.py
similarity index 100%
rename from src/python/_framework/foundation/__init__.py
rename to src/python/src/_framework/foundation/__init__.py
diff --git a/src/python/_framework/foundation/_later_test.py b/src/python/src/_framework/foundation/_later_test.py
similarity index 100%
rename from src/python/_framework/foundation/_later_test.py
rename to src/python/src/_framework/foundation/_later_test.py
diff --git a/src/python/_framework/foundation/_logging_pool_test.py b/src/python/src/_framework/foundation/_logging_pool_test.py
similarity index 100%
rename from src/python/_framework/foundation/_logging_pool_test.py
rename to src/python/src/_framework/foundation/_logging_pool_test.py
diff --git a/src/python/_framework/foundation/_timer_future.py b/src/python/src/_framework/foundation/_timer_future.py
similarity index 100%
rename from src/python/_framework/foundation/_timer_future.py
rename to src/python/src/_framework/foundation/_timer_future.py
diff --git a/src/python/_framework/foundation/abandonment.py b/src/python/src/_framework/foundation/abandonment.py
similarity index 100%
rename from src/python/_framework/foundation/abandonment.py
rename to src/python/src/_framework/foundation/abandonment.py
diff --git a/src/python/_framework/foundation/callable_util.py b/src/python/src/_framework/foundation/callable_util.py
similarity index 100%
rename from src/python/_framework/foundation/callable_util.py
rename to src/python/src/_framework/foundation/callable_util.py
diff --git a/src/python/_framework/foundation/future.py b/src/python/src/_framework/foundation/future.py
similarity index 100%
rename from src/python/_framework/foundation/future.py
rename to src/python/src/_framework/foundation/future.py
diff --git a/src/python/_framework/foundation/later.py b/src/python/src/_framework/foundation/later.py
similarity index 100%
rename from src/python/_framework/foundation/later.py
rename to src/python/src/_framework/foundation/later.py
diff --git a/src/python/_framework/foundation/logging_pool.py b/src/python/src/_framework/foundation/logging_pool.py
similarity index 100%
rename from src/python/_framework/foundation/logging_pool.py
rename to src/python/src/_framework/foundation/logging_pool.py
diff --git a/src/python/_framework/foundation/stream.py b/src/python/src/_framework/foundation/stream.py
similarity index 100%
rename from src/python/_framework/foundation/stream.py
rename to src/python/src/_framework/foundation/stream.py
diff --git a/src/python/_framework/foundation/stream_testing.py b/src/python/src/_framework/foundation/stream_testing.py
similarity index 100%
rename from src/python/_framework/foundation/stream_testing.py
rename to src/python/src/_framework/foundation/stream_testing.py
diff --git a/src/python/_framework/foundation/stream_util.py b/src/python/src/_framework/foundation/stream_util.py
similarity index 100%
rename from src/python/_framework/foundation/stream_util.py
rename to src/python/src/_framework/foundation/stream_util.py
diff --git a/src/python/_junkdrawer/__init__.py b/src/python/src/_junkdrawer/__init__.py
similarity index 100%
rename from src/python/_junkdrawer/__init__.py
rename to src/python/src/_junkdrawer/__init__.py
diff --git a/src/python/src/_junkdrawer/math_pb2.py b/src/python/src/_junkdrawer/math_pb2.py
new file mode 100644
index 0000000..2016595
--- /dev/null
+++ b/src/python/src/_junkdrawer/math_pb2.py
@@ -0,0 +1,266 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# TODO(nathaniel): Remove this from source control after having made
+# generation from the math.proto source part of GRPC's build-and-test
+# process.
+
+# Generated by the protocol buffer compiler.  DO NOT EDIT!
+# source: math.proto
+
+import sys
+_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import message as _message
+from google.protobuf import reflection as _reflection
+from google.protobuf import symbol_database as _symbol_database
+from google.protobuf import descriptor_pb2
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+
+
+DESCRIPTOR = _descriptor.FileDescriptor(
+  name='math.proto',
+  package='math',
+  serialized_pb=_b('\n\nmath.proto\x12\x04math\",\n\x07\x44ivArgs\x12\x10\n\x08\x64ividend\x18\x01 \x02(\x03\x12\x0f\n\x07\x64ivisor\x18\x02 \x02(\x03\"/\n\x08\x44ivReply\x12\x10\n\x08quotient\x18\x01 \x02(\x03\x12\x11\n\tremainder\x18\x02 \x02(\x03\"\x18\n\x07\x46ibArgs\x12\r\n\x05limit\x18\x01 \x01(\x03\"\x12\n\x03Num\x12\x0b\n\x03num\x18\x01 \x02(\x03\"\x19\n\x08\x46ibReply\x12\r\n\x05\x63ount\x18\x01 \x02(\x03\x32\xa4\x01\n\x04Math\x12&\n\x03\x44iv\x12\r.math.DivArgs\x1a\x0e.math.DivReply\"\x00\x12.\n\x07\x44ivMany\x12\r.math.DivArgs\x1a\x0e.math.DivReply\"\x00(\x01\x30\x01\x12#\n\x03\x46ib\x12\r.math.FibArgs\x1a\t.math.Num\"\x00\x30\x01\x12\x1f\n\x03Sum\x12\t.math.Num\x1a\t.math.Num\"\x00(\x01')
+)
+_sym_db.RegisterFileDescriptor(DESCRIPTOR)
+
+
+
+
+_DIVARGS = _descriptor.Descriptor(
+  name='DivArgs',
+  full_name='math.DivArgs',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='dividend', full_name='math.DivArgs.dividend', index=0,
+      number=1, type=3, cpp_type=2, label=2,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='divisor', full_name='math.DivArgs.divisor', index=1,
+      number=2, type=3, cpp_type=2, label=2,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=20,
+  serialized_end=64,
+)
+
+
+_DIVREPLY = _descriptor.Descriptor(
+  name='DivReply',
+  full_name='math.DivReply',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='quotient', full_name='math.DivReply.quotient', index=0,
+      number=1, type=3, cpp_type=2, label=2,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='remainder', full_name='math.DivReply.remainder', index=1,
+      number=2, type=3, cpp_type=2, label=2,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=66,
+  serialized_end=113,
+)
+
+
+_FIBARGS = _descriptor.Descriptor(
+  name='FibArgs',
+  full_name='math.FibArgs',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='limit', full_name='math.FibArgs.limit', index=0,
+      number=1, type=3, cpp_type=2, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=115,
+  serialized_end=139,
+)
+
+
+_NUM = _descriptor.Descriptor(
+  name='Num',
+  full_name='math.Num',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='num', full_name='math.Num.num', index=0,
+      number=1, type=3, cpp_type=2, label=2,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=141,
+  serialized_end=159,
+)
+
+
+_FIBREPLY = _descriptor.Descriptor(
+  name='FibReply',
+  full_name='math.FibReply',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='count', full_name='math.FibReply.count', index=0,
+      number=1, type=3, cpp_type=2, label=2,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=161,
+  serialized_end=186,
+)
+
+DESCRIPTOR.message_types_by_name['DivArgs'] = _DIVARGS
+DESCRIPTOR.message_types_by_name['DivReply'] = _DIVREPLY
+DESCRIPTOR.message_types_by_name['FibArgs'] = _FIBARGS
+DESCRIPTOR.message_types_by_name['Num'] = _NUM
+DESCRIPTOR.message_types_by_name['FibReply'] = _FIBREPLY
+
+DivArgs = _reflection.GeneratedProtocolMessageType('DivArgs', (_message.Message,), dict(
+  DESCRIPTOR = _DIVARGS,
+  __module__ = 'math_pb2'
+  # @@protoc_insertion_point(class_scope:math.DivArgs)
+  ))
+_sym_db.RegisterMessage(DivArgs)
+
+DivReply = _reflection.GeneratedProtocolMessageType('DivReply', (_message.Message,), dict(
+  DESCRIPTOR = _DIVREPLY,
+  __module__ = 'math_pb2'
+  # @@protoc_insertion_point(class_scope:math.DivReply)
+  ))
+_sym_db.RegisterMessage(DivReply)
+
+FibArgs = _reflection.GeneratedProtocolMessageType('FibArgs', (_message.Message,), dict(
+  DESCRIPTOR = _FIBARGS,
+  __module__ = 'math_pb2'
+  # @@protoc_insertion_point(class_scope:math.FibArgs)
+  ))
+_sym_db.RegisterMessage(FibArgs)
+
+Num = _reflection.GeneratedProtocolMessageType('Num', (_message.Message,), dict(
+  DESCRIPTOR = _NUM,
+  __module__ = 'math_pb2'
+  # @@protoc_insertion_point(class_scope:math.Num)
+  ))
+_sym_db.RegisterMessage(Num)
+
+FibReply = _reflection.GeneratedProtocolMessageType('FibReply', (_message.Message,), dict(
+  DESCRIPTOR = _FIBREPLY,
+  __module__ = 'math_pb2'
+  # @@protoc_insertion_point(class_scope:math.FibReply)
+  ))
+_sym_db.RegisterMessage(FibReply)
+
+
+# @@protoc_insertion_point(module_scope)
diff --git a/src/python/_junkdrawer/stock_pb2.py b/src/python/src/_junkdrawer/stock_pb2.py
similarity index 100%
rename from src/python/_junkdrawer/stock_pb2.py
rename to src/python/src/_junkdrawer/stock_pb2.py
diff --git a/src/ruby/Rakefile b/src/ruby/Rakefile
index 6ba9a97..b27305d 100755
--- a/src/ruby/Rakefile
+++ b/src/ruby/Rakefile
@@ -13,9 +13,11 @@
 SPEC_SUITES = [
   { id: :wrapper, title: 'wrapper layer', files: %w(spec/*.rb) },
   { id: :idiomatic, title: 'idiomatic layer', dir: %w(spec/generic),
-    tag: '~bidi' },
+    tags: ['~bidi', '~server'] },
   { id: :bidi, title: 'bidi tests', dir: %w(spec/generic),
-    tag: 'bidi' }
+    tag: 'bidi' },
+  { id: :server, title: 'rpc server thread tests', dir: %w(spec/generic),
+    tag: 'server' }
 ]
 
 desc 'Run all RSpec tests'
@@ -33,12 +35,20 @@
 
         t.pattern = spec_files
         t.rspec_opts = "--tag #{suite[:tag]}" if suite[:tag]
+        if suite[:tags]
+          t.rspec_opts = suite[:tags].map { |x| "--tag #{x}" }.join(' ')
+        end
       end
     end
   end
 end
 
-task default: 'spec:suite:idiomatic'  # this should be spec:suite:bidi
-task 'spec:suite:wrapper' => :compile
+desc 'Compiles the extension then runs all the tests'
+task :all
+
+task default: :all
+task 'spec:suite:wrapper' => [:compile, :rubocop]
 task 'spec:suite:idiomatic' => 'spec:suite:wrapper'
-task 'spec:suite:bidi' => 'spec:suite:idiomatic'
+task 'spec:suite:bidi' => 'spec:suite:wrapper'
+task 'spec:suite:server' => 'spec:suite:wrapper'
+task all: ['spec:suite:idiomatic', 'spec:suite:bidi', 'spec:suite:server']
diff --git a/src/ruby/bin/apis/google/protobuf/empty.rb b/src/ruby/bin/apis/google/protobuf/empty.rb
new file mode 100644
index 0000000..33e8a92
--- /dev/null
+++ b/src/ruby/bin/apis/google/protobuf/empty.rb
@@ -0,0 +1,44 @@
+# Copyright 2014, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# Generated by the protocol buffer compiler.  DO NOT EDIT!
+# source: google/protobuf/empty.proto
+
+require 'google/protobuf'
+
+Google::Protobuf::DescriptorPool.generated_pool.build do
+  add_message "google.protobuf.Empty" do
+  end
+end
+
+module Google
+  module Protobuf
+    Empty = Google::Protobuf::DescriptorPool.generated_pool.lookup("google.protobuf.Empty").msgclass
+  end
+end
diff --git a/src/ruby/bin/apis/pubsub_demo.rb b/src/ruby/bin/apis/pubsub_demo.rb
new file mode 100755
index 0000000..8ebac19
--- /dev/null
+++ b/src/ruby/bin/apis/pubsub_demo.rb
@@ -0,0 +1,278 @@
+#!/usr/bin/env ruby
+
+# Copyright 2014, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# pubsub_demo demos accesses the Google PubSub API via its gRPC interface
+#
+# TODO: update the Usage once the usable auth gem is available
+# $ SSL_CERT_FILE=<path/to/ssl/certs> \
+#   path/to/pubsub_demo.rb \
+#   --service_account_key_file=<path_to_service_account> \
+#   [--action=<chosen_demo_action> ]
+#
+# There are options related to the chosen action, see #parse_args below.
+# - the possible actions are given by the method names of NamedAction class
+# - the default action is list_some_topics
+
+this_dir = File.expand_path(File.dirname(__FILE__))
+lib_dir = File.join(File.dirname(File.dirname(this_dir)), 'lib')
+$LOAD_PATH.unshift(lib_dir) unless $LOAD_PATH.include?(lib_dir)
+$LOAD_PATH.unshift(this_dir) unless $LOAD_PATH.include?(this_dir)
+
+require 'optparse'
+
+require 'grpc'
+require 'google/protobuf'
+
+require 'google/protobuf/empty'
+require 'tech/pubsub/proto/pubsub'
+require 'tech/pubsub/proto/pubsub_services'
+
+# loads the certificates used to access the test server securely.
+def load_prod_cert
+  fail 'could not find a production cert' if ENV['SSL_CERT_FILE'].nil?
+  p "loading prod certs from #{ENV['SSL_CERT_FILE']}"
+  File.open(ENV['SSL_CERT_FILE']).read
+end
+
+# creates a SSL Credentials from the production certificates.
+def ssl_creds
+  GRPC::Core::Credentials.new(load_prod_cert)
+end
+
+# Builds the metadata authentication update proc.
+#
+# TODO: replace this once the ruby usable auth repo is available.
+def auth_proc(opts)
+  if GRPC::Auth::GCECredentials.on_gce?
+    return GRPC::Auth::GCECredentials.new.updater_proc
+  end
+  fd = StringIO.new(File.read(opts.oauth_key_file))
+  GRPC::Auth::ServiceAccountCredentials.new(opts.oauth_scope, fd).updater_proc
+end
+
+# Creates a stub for accessing the publisher service.
+def publisher_stub(opts)
+  address = "#{opts.host}:#{opts.port}"
+  stub_clz = Tech::Pubsub::PublisherService::Stub # shorter
+  logger.info("... access PublisherService at #{address}")
+  stub_clz.new(address,
+               creds: ssl_creds, update_metadata: auth_proc(opts),
+               GRPC::Core::Channel::SSL_TARGET => opts.host)
+end
+
+# Creates a stub for accessing the subscriber service.
+def subscriber_stub(opts)
+  address = "#{opts.host}:#{opts.port}"
+  stub_clz = Tech::Pubsub::SubscriberService::Stub # shorter
+  logger.info("... access SubscriberService at #{address}")
+  stub_clz.new(address,
+               creds: ssl_creds, update_metadata: auth_proc(opts),
+               GRPC::Core::Channel::SSL_TARGET => opts.host)
+end
+
+# defines methods corresponding to each interop test case.
+class NamedActions
+  include Tech::Pubsub
+
+  # Initializes NamedActions
+  #
+  # @param pub [Stub] a stub for accessing the publisher service
+  # @param sub [Stub] a stub for accessing the publisher service
+  # @param args [Args] provides access to the command line
+  def initialize(pub, sub, args)
+    @pub = pub
+    @sub = sub
+    @args = args
+  end
+
+  # Removes the test topic if it exists
+  def remove_topic
+    name = test_topic_name
+    p "... removing Topic #{name}"
+    @pub.delete_topic(DeleteTopicRequest.new(topic: name))
+    p "removed Topic: #{name} OK"
+  rescue GRPC::BadStatus => e
+    p "Could not delete a topics: rpc failed with '#{e}'"
+  end
+
+  # Creates a test topic
+  def create_topic
+    name = test_topic_name
+    p "... creating Topic #{name}"
+    resp = @pub.create_topic(Topic.new(name: name))
+    p "created Topic: #{resp.name} OK"
+  rescue GRPC::BadStatus => e
+    p "Could not create a topics: rpc failed with '#{e}'"
+  end
+
+  # Lists topics in the project
+  def list_some_topics
+    p 'Listing topics'
+    p '-------------_'
+    list_project_topics.topic.each { |t| p t.name }
+  rescue GRPC::BadStatus => e
+    p "Could not list topics: rpc failed with '#{e}'"
+  end
+
+  # Checks if a topics exists in a project
+  def check_exists
+    name = test_topic_name
+    p "... checking for topic #{name}"
+    exists = topic_exists?(name)
+    p "#{name} is a topic" if exists
+    p "#{name} is not a topic" unless exists
+  rescue GRPC::BadStatus => e
+    p "Could not check for a topics: rpc failed with '#{e}'"
+  end
+
+  # Publishes some messages
+  def random_pub_sub
+    topic_name, sub_name = test_topic_name, test_sub_name
+    create_topic_if_needed(topic_name)
+    @sub.create_subscription(Subscription.new(name: sub_name,
+                                              topic: topic_name))
+    msg_count = rand(10..30)
+    msg_count.times do |x|
+      msg = PubsubMessage.new(data: "message #{x}")
+      @pub.publish(PublishRequest.new(topic: topic_name, message: msg))
+    end
+    p "Sent #{msg_count} messages to #{topic_name}, checking for them now."
+    batch = @sub.pull_batch(PullBatchRequest.new(subscription: sub_name,
+                                                 max_events: msg_count))
+    ack_ids = batch.pull_responses.map { |x| x.ack_id }
+    p "Got #{ack_ids.size} messages; acknowledging them.."
+    @sub.acknowledge(AcknowledgeRequest.new(subscription: sub_name,
+                                            ack_id: ack_ids))
+    p "Test messages were acknowledged OK, deleting the subscription"
+    del_req = DeleteSubscriptionRequest.new(subscription: sub_name)
+    @sub.delete_subscription(del_req)
+  rescue GRPC::BadStatus => e
+    p "Could not do random pub sub: rpc failed with '#{e}'"
+  end
+
+  private
+
+  # test_topic_name is the topic name to use in this test.
+  def test_topic_name
+    unless @args.topic_name.nil?
+      return "/topics/#{@args.project_id}/#{@args.topic_name}"
+    end
+    now_text = Time.now.utc.strftime('%Y%m%d%H%M%S%L')
+    "/topics/#{@args.project_id}/#{ENV['USER']}-#{now_text}"
+  end
+
+  # test_sub_name is the subscription name to use in this test.
+  def test_sub_name
+    unless @args.sub_name.nil?
+      return "/subscriptions/#{@args.project_id}/#{@args.sub_name}"
+    end
+    now_text = Time.now.utc.strftime('%Y%m%d%H%M%S%L')
+    "/subscriptions/#{@args.project_id}/#{ENV['USER']}-#{now_text}"
+  end
+
+  # determines if the topic name exists
+  def topic_exists?(name)
+    topics = list_project_topics.topic.map { |t| t.name }
+    topics.include?(name)
+  end
+
+  def create_topic_if_needed(name)
+    return if topic_exists?(name)
+    @pub.create_topic(Topic.new(name: name))
+  end
+
+  def list_project_topics
+    q = "cloud.googleapis.com/project in (/projects/#{@args.project_id})"
+    @pub.list_topics(ListTopicsRequest.new(query: q))
+  end
+end
+
+# Args is used to hold the command line info.
+Args = Struct.new(:host, :oauth_scope, :oauth_key_file, :port, :action,
+                  :project_id, :topic_name, :sub_name)
+
+# validates the the command line options, returning them as an Arg.
+def parse_args
+  args = Args.new('pubsub-staging.googleapis.com',
+                  'https://www.googleapis.com/auth/pubsub',
+                  nil, 443, 'list_some_topics', 'stoked-keyword-656')
+  OptionParser.new do |opts|
+    opts.on('--oauth_scope scope',
+            'Scope for OAuth tokens') { |v| args['oauth_scope'] = v }
+    opts.on('--server_host SERVER_HOST', 'server hostname') do |v|
+      args.host = v
+    end
+    opts.on('--server_port SERVER_PORT', 'server port') do |v|
+      args.port = v
+    end
+    opts.on('--service_account_key_file PATH',
+            'Path to the service account json key file') do |v|
+      args.oauth_key_file = v
+    end
+
+    # instance_methods(false) gives only the methods defined in that class.
+    scenes = NamedActions.instance_methods(false).map { |t| t.to_s }
+    scene_list = scenes.join(',')
+    opts.on("--action CODE", scenes, {}, 'pick a demo action',
+            "  (#{scene_list})") do |v|
+      args.action = v
+    end
+
+    # Set the remaining values.
+    %w(project_id topic_name sub_name).each do |o|
+      opts.on("--#{o} VALUE", "#{o}") do |v|
+        args[o] = v
+      end
+    end
+  end.parse!
+  _check_args(args)
+end
+
+def _check_args(args)
+  %w(host port action).each do |a|
+    if args[a].nil?
+      raise OptionParser::MissingArgument.new("please specify --#{a}")
+    end
+  end
+  if args['oauth_key_file'].nil? || args['oauth_scope'].nil?
+    fail(OptionParser::MissingArgument,
+         'please specify both of --service_account_key_file and --oauth_scope')
+  end
+  args
+end
+
+def main
+  args = parse_args
+  pub, sub = publisher_stub(args), subscriber_stub(args)
+  NamedActions.new(pub, sub, args).method(args.action).call
+end
+
+main
diff --git a/src/ruby/bin/apis/tech/pubsub/proto/pubsub.rb b/src/ruby/bin/apis/tech/pubsub/proto/pubsub.rb
new file mode 100644
index 0000000..aa7893d
--- /dev/null
+++ b/src/ruby/bin/apis/tech/pubsub/proto/pubsub.rb
@@ -0,0 +1,174 @@
+# Copyright 2014, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# Generated by the protocol buffer compiler.  DO NOT EDIT!
+# source: tech/pubsub/proto/pubsub.proto
+
+require 'google/protobuf'
+
+require 'google/protobuf/empty'
+Google::Protobuf::DescriptorPool.generated_pool.build do
+  add_message "tech.pubsub.Topic" do
+    optional :name, :string, 1
+  end
+  add_message "tech.pubsub.PubsubMessage" do
+    optional :data, :string, 1
+    optional :message_id, :string, 3
+  end
+  add_message "tech.pubsub.GetTopicRequest" do
+    optional :topic, :string, 1
+  end
+  add_message "tech.pubsub.PublishRequest" do
+    optional :topic, :string, 1
+    optional :message, :message, 2, "tech.pubsub.PubsubMessage"
+  end
+  add_message "tech.pubsub.PublishBatchRequest" do
+    optional :topic, :string, 1
+    repeated :messages, :message, 2, "tech.pubsub.PubsubMessage"
+  end
+  add_message "tech.pubsub.PublishBatchResponse" do
+    repeated :message_ids, :string, 1
+  end
+  add_message "tech.pubsub.ListTopicsRequest" do
+    optional :query, :string, 1
+    optional :max_results, :int32, 2
+    optional :page_token, :string, 3
+  end
+  add_message "tech.pubsub.ListTopicsResponse" do
+    repeated :topic, :message, 1, "tech.pubsub.Topic"
+    optional :next_page_token, :string, 2
+  end
+  add_message "tech.pubsub.DeleteTopicRequest" do
+    optional :topic, :string, 1
+  end
+  add_message "tech.pubsub.Subscription" do
+    optional :name, :string, 1
+    optional :topic, :string, 2
+    optional :query, :string, 3
+    optional :truncation_policy, :message, 4, "tech.pubsub.Subscription.TruncationPolicy"
+    optional :push_config, :message, 5, "tech.pubsub.PushConfig"
+    optional :ack_deadline_seconds, :int32, 6
+    optional :garbage_collect_seconds, :int64, 7
+  end
+  add_message "tech.pubsub.Subscription.TruncationPolicy" do
+    optional :max_bytes, :int64, 1
+    optional :max_age_seconds, :int64, 2
+  end
+  add_message "tech.pubsub.PushConfig" do
+    optional :push_endpoint, :string, 1
+  end
+  add_message "tech.pubsub.PubsubEvent" do
+    optional :subscription, :string, 1
+    optional :message, :message, 2, "tech.pubsub.PubsubMessage"
+    optional :truncated, :bool, 3
+    optional :deleted, :bool, 4
+  end
+  add_message "tech.pubsub.GetSubscriptionRequest" do
+    optional :subscription, :string, 1
+  end
+  add_message "tech.pubsub.ListSubscriptionsRequest" do
+    optional :query, :string, 1
+    optional :max_results, :int32, 3
+    optional :page_token, :string, 4
+  end
+  add_message "tech.pubsub.ListSubscriptionsResponse" do
+    repeated :subscription, :message, 1, "tech.pubsub.Subscription"
+    optional :next_page_token, :string, 2
+  end
+  add_message "tech.pubsub.TruncateSubscriptionRequest" do
+    optional :subscription, :string, 1
+  end
+  add_message "tech.pubsub.DeleteSubscriptionRequest" do
+    optional :subscription, :string, 1
+  end
+  add_message "tech.pubsub.ModifyPushConfigRequest" do
+    optional :subscription, :string, 1
+    optional :push_config, :message, 2, "tech.pubsub.PushConfig"
+  end
+  add_message "tech.pubsub.PullRequest" do
+    optional :subscription, :string, 1
+    optional :return_immediately, :bool, 2
+  end
+  add_message "tech.pubsub.PullResponse" do
+    optional :ack_id, :string, 1
+    optional :pubsub_event, :message, 2, "tech.pubsub.PubsubEvent"
+  end
+  add_message "tech.pubsub.PullBatchRequest" do
+    optional :subscription, :string, 1
+    optional :return_immediately, :bool, 2
+    optional :max_events, :int32, 3
+  end
+  add_message "tech.pubsub.PullBatchResponse" do
+    repeated :pull_responses, :message, 2, "tech.pubsub.PullResponse"
+  end
+  add_message "tech.pubsub.ModifyAckDeadlineRequest" do
+    optional :subscription, :string, 1
+    optional :ack_id, :string, 2
+    optional :ack_deadline_seconds, :int32, 3
+  end
+  add_message "tech.pubsub.AcknowledgeRequest" do
+    optional :subscription, :string, 1
+    repeated :ack_id, :string, 2
+  end
+  add_message "tech.pubsub.NackRequest" do
+    optional :subscription, :string, 1
+    repeated :ack_id, :string, 2
+  end
+end
+
+module Tech
+  module Pubsub
+    Topic = Google::Protobuf::DescriptorPool.generated_pool.lookup("tech.pubsub.Topic").msgclass
+    PubsubMessage = Google::Protobuf::DescriptorPool.generated_pool.lookup("tech.pubsub.PubsubMessage").msgclass
+    GetTopicRequest = Google::Protobuf::DescriptorPool.generated_pool.lookup("tech.pubsub.GetTopicRequest").msgclass
+    PublishRequest = Google::Protobuf::DescriptorPool.generated_pool.lookup("tech.pubsub.PublishRequest").msgclass
+    PublishBatchRequest = Google::Protobuf::DescriptorPool.generated_pool.lookup("tech.pubsub.PublishBatchRequest").msgclass
+    PublishBatchResponse = Google::Protobuf::DescriptorPool.generated_pool.lookup("tech.pubsub.PublishBatchResponse").msgclass
+    ListTopicsRequest = Google::Protobuf::DescriptorPool.generated_pool.lookup("tech.pubsub.ListTopicsRequest").msgclass
+    ListTopicsResponse = Google::Protobuf::DescriptorPool.generated_pool.lookup("tech.pubsub.ListTopicsResponse").msgclass
+    DeleteTopicRequest = Google::Protobuf::DescriptorPool.generated_pool.lookup("tech.pubsub.DeleteTopicRequest").msgclass
+    Subscription = Google::Protobuf::DescriptorPool.generated_pool.lookup("tech.pubsub.Subscription").msgclass
+    Subscription::TruncationPolicy = Google::Protobuf::DescriptorPool.generated_pool.lookup("tech.pubsub.Subscription.TruncationPolicy").msgclass
+    PushConfig = Google::Protobuf::DescriptorPool.generated_pool.lookup("tech.pubsub.PushConfig").msgclass
+    PubsubEvent = Google::Protobuf::DescriptorPool.generated_pool.lookup("tech.pubsub.PubsubEvent").msgclass
+    GetSubscriptionRequest = Google::Protobuf::DescriptorPool.generated_pool.lookup("tech.pubsub.GetSubscriptionRequest").msgclass
+    ListSubscriptionsRequest = Google::Protobuf::DescriptorPool.generated_pool.lookup("tech.pubsub.ListSubscriptionsRequest").msgclass
+    ListSubscriptionsResponse = Google::Protobuf::DescriptorPool.generated_pool.lookup("tech.pubsub.ListSubscriptionsResponse").msgclass
+    TruncateSubscriptionRequest = Google::Protobuf::DescriptorPool.generated_pool.lookup("tech.pubsub.TruncateSubscriptionRequest").msgclass
+    DeleteSubscriptionRequest = Google::Protobuf::DescriptorPool.generated_pool.lookup("tech.pubsub.DeleteSubscriptionRequest").msgclass
+    ModifyPushConfigRequest = Google::Protobuf::DescriptorPool.generated_pool.lookup("tech.pubsub.ModifyPushConfigRequest").msgclass
+    PullRequest = Google::Protobuf::DescriptorPool.generated_pool.lookup("tech.pubsub.PullRequest").msgclass
+    PullResponse = Google::Protobuf::DescriptorPool.generated_pool.lookup("tech.pubsub.PullResponse").msgclass
+    PullBatchRequest = Google::Protobuf::DescriptorPool.generated_pool.lookup("tech.pubsub.PullBatchRequest").msgclass
+    PullBatchResponse = Google::Protobuf::DescriptorPool.generated_pool.lookup("tech.pubsub.PullBatchResponse").msgclass
+    ModifyAckDeadlineRequest = Google::Protobuf::DescriptorPool.generated_pool.lookup("tech.pubsub.ModifyAckDeadlineRequest").msgclass
+    AcknowledgeRequest = Google::Protobuf::DescriptorPool.generated_pool.lookup("tech.pubsub.AcknowledgeRequest").msgclass
+    NackRequest = Google::Protobuf::DescriptorPool.generated_pool.lookup("tech.pubsub.NackRequest").msgclass
+  end
+end
diff --git a/src/ruby/bin/apis/tech/pubsub/proto/pubsub_services.rb b/src/ruby/bin/apis/tech/pubsub/proto/pubsub_services.rb
new file mode 100644
index 0000000..0023f48
--- /dev/null
+++ b/src/ruby/bin/apis/tech/pubsub/proto/pubsub_services.rb
@@ -0,0 +1,103 @@
+# Copyright 2014, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# Generated by the protocol buffer compiler.  DO NOT EDIT!
+# Source: tech/pubsub/proto/pubsub.proto for package 'tech.pubsub'
+
+require 'grpc'
+require 'google/protobuf/empty'
+require 'tech/pubsub/proto/pubsub'
+
+module Tech
+  module Pubsub
+    module PublisherService
+
+      # TODO: add proto service documentation here
+      class Service
+
+        include GRPC::GenericService
+
+        self.marshal_class_method = :encode
+        self.unmarshal_class_method = :decode
+        self.service_name = 'tech.pubsub.PublisherService'
+
+        rpc :CreateTopic, Topic, Topic
+        rpc :Publish, PublishRequest, Google::Protobuf::Empty
+        rpc :PublishBatch, PublishBatchRequest, PublishBatchResponse
+        rpc :GetTopic, GetTopicRequest, Topic
+        rpc :ListTopics, ListTopicsRequest, ListTopicsResponse
+        rpc :DeleteTopic, DeleteTopicRequest, Google::Protobuf::Empty
+      end
+
+      Stub = Service.rpc_stub_class
+    end
+    module SubscriberService
+
+      # TODO: add proto service documentation here
+      class Service
+
+        include GRPC::GenericService
+
+        self.marshal_class_method = :encode
+        self.unmarshal_class_method = :decode
+        self.service_name = 'tech.pubsub.SubscriberService'
+
+        rpc :CreateSubscription, Subscription, Subscription
+        rpc :GetSubscription, GetSubscriptionRequest, Subscription
+        rpc :ListSubscriptions, ListSubscriptionsRequest, ListSubscriptionsResponse
+        rpc :DeleteSubscription, DeleteSubscriptionRequest, Google::Protobuf::Empty
+        rpc :TruncateSubscription, TruncateSubscriptionRequest, Google::Protobuf::Empty
+        rpc :ModifyPushConfig, ModifyPushConfigRequest, Google::Protobuf::Empty
+        rpc :Pull, PullRequest, PullResponse
+        rpc :PullBatch, PullBatchRequest, PullBatchResponse
+        rpc :ModifyAckDeadline, ModifyAckDeadlineRequest, Google::Protobuf::Empty
+        rpc :Acknowledge, AcknowledgeRequest, Google::Protobuf::Empty
+        rpc :Nack, NackRequest, Google::Protobuf::Empty
+      end
+
+      Stub = Service.rpc_stub_class
+    end
+    module PushEndpointService
+
+      # TODO: add proto service documentation here
+      class Service
+
+        include GRPC::GenericService
+
+        self.marshal_class_method = :encode
+        self.unmarshal_class_method = :decode
+        self.service_name = 'tech.pubsub.PushEndpointService'
+
+        rpc :HandlePubsubEvent, PubsubEvent, Google::Protobuf::Empty
+      end
+
+      Stub = Service.rpc_stub_class
+    end
+  end
+end
diff --git a/src/ruby/bin/interop/interop_client.rb b/src/ruby/bin/interop/interop_client.rb
index 86739b7..e29e22b 100755
--- a/src/ruby/bin/interop/interop_client.rb
+++ b/src/ruby/bin/interop/interop_client.rb
@@ -56,6 +56,8 @@
 
 require 'signet/ssl_config'
 
+include Google::RPC::Auth
+
 # loads the certificates used to access the test server securely.
 def load_test_certs
   this_dir = File.expand_path(File.dirname(__FILE__))
@@ -67,40 +69,54 @@
 # loads the certificates used to access the test server securely.
 def load_prod_cert
   fail 'could not find a production cert' if ENV['SSL_CERT_FILE'].nil?
-  p "loading prod certs from #{ENV['SSL_CERT_FILE']}"
+  logger.info("loading prod certs from #{ENV['SSL_CERT_FILE']}")
   File.open(ENV['SSL_CERT_FILE']).read
 end
 
-# creates a Credentials from the test certificates.
+# creates SSL Credentials from the test certificates.
 def test_creds
   certs = load_test_certs
   GRPC::Core::Credentials.new(certs[0])
 end
 
-RX_CERT = /-----BEGIN CERTIFICATE-----\n.*?-----END CERTIFICATE-----\n/m
-
-
-# creates a Credentials from the production certificates.
+# creates SSL Credentials from the production certificates.
 def prod_creds
   cert_text = load_prod_cert
   GRPC::Core::Credentials.new(cert_text)
 end
 
+# creates the SSL Credentials.
+def ssl_creds(use_test_ca)
+  return test_creds if use_test_ca
+  prod_creds
+end
+
 # creates a test stub that accesses host:port securely.
-def create_stub(host, port, is_secure, host_override, use_test_ca)
-  address = "#{host}:#{port}"
-  if is_secure
-    creds = nil
-    if use_test_ca
-      creds = test_creds
-    else
-      creds = prod_creds
+def create_stub(opts)
+  address = "#{opts.host}:#{opts.port}"
+  if opts.secure
+    stub_opts = {
+      :creds => ssl_creds(opts.use_test_ca),
+      GRPC::Core::Channel::SSL_TARGET => opts.host_override
+    }
+
+    # Add service account creds if specified
+    if %w(all service_account_creds).include?(opts.test_case)
+      unless opts.oauth_scope.nil?
+        fd = StringIO.new(File.read(opts.oauth_key_file))
+        logger.info("loading oauth certs from #{opts.oauth_key_file}")
+        auth_creds = ServiceAccountCredentials.new(opts.oauth_scope, fd)
+        stub_opts[:update_metadata] = auth_creds.updater_proc
+      end
     end
 
-    stub_opts = {
-      :creds => creds,
-      GRPC::Core::Channel::SSL_TARGET => host_override
-    }
+    # Add compute engine creds if specified
+    if %w(all compute_engine_creds).include?(opts.test_case)
+      unless opts.oauth_scope.nil?
+        stub_opts[:update_metadata] = GCECredentials.new.update_proc
+      end
+    end
+
     logger.info("... connecting securely to #{address}")
     Grpc::Testing::TestService::Stub.new(address, **stub_opts)
   else
@@ -158,9 +174,10 @@
   include Grpc::Testing::PayloadType
   attr_accessor :assertions # required by Minitest::Assertions
 
-  def initialize(stub)
+  def initialize(stub, args)
     @assertions = 0  # required by Minitest::Assertions
     @stub = stub
+    @args = args
   end
 
   def empty_unary
@@ -170,21 +187,37 @@
   end
 
   def large_unary
-    req_size, wanted_response_size = 271_828, 314_159
-    payload = Payload.new(type: :COMPRESSABLE, body: nulls(req_size))
-    req = SimpleRequest.new(response_type: :COMPRESSABLE,
-                            response_size: wanted_response_size,
-                            payload: payload)
-    resp = @stub.unary_call(req)
-    assert_equal(:COMPRESSABLE, resp.payload.type,
-                 'large_unary: payload had the wrong type')
-    assert_equal(wanted_response_size, resp.payload.body.length,
-                 'large_unary: payload had the wrong length')
-    assert_equal(nulls(wanted_response_size), resp.payload.body,
-                 'large_unary: payload content is invalid')
+    perform_large_unary
     p 'OK: large_unary'
   end
 
+  def service_account_creds
+    # ignore this test if the oauth options are not set
+    if @args.oauth_scope.nil? || @args.oauth_key_file.nil?
+      p 'NOT RUN: service_account_creds; no service_account settings'
+      return
+    end
+    json_key = File.read(@args.oauth_key_file)
+    wanted_email = MultiJson.load(json_key)['client_email']
+    resp = perform_large_unary(fill_username: true,
+                               fill_oauth_scope: true)
+    assert_equal(wanted_email, resp.username,
+                 'service_account_creds: incorrect username')
+    assert(@args.oauth_scope.include?(resp.oauth_scope),
+           'service_account_creds: incorrect oauth_scope')
+    p 'OK: service_account_creds'
+  end
+
+  def compute_engine_creds
+    resp = perform_large_unary(fill_username: true,
+                               fill_oauth_scope: true)
+    assert(@args.oauth_scope.include?(resp.oauth_scope),
+           'service_account_creds: incorrect oauth_scope')
+    assert_equal(@args.default_service_account, resp.username,
+                 'service_account_creds: incorrect username')
+    p 'OK: compute_engine_creds'
+  end
+
   def client_streaming
     msg_sizes = [27_182, 8, 1828, 45_904]
     wanted_aggregate_size = 74_922
@@ -230,64 +263,89 @@
       method(m).call
     end
   end
+
+  private
+
+  def perform_large_unary(fill_username: false, fill_oauth_scope: false)
+    req_size, wanted_response_size = 271_828, 314_159
+    payload = Payload.new(type: :COMPRESSABLE, body: nulls(req_size))
+    req = SimpleRequest.new(response_type: :COMPRESSABLE,
+                            response_size: wanted_response_size,
+                            payload: payload)
+    req.fill_username = fill_username
+    req.fill_oauth_scope = fill_oauth_scope
+    resp = @stub.unary_call(req)
+    assert_equal(:COMPRESSABLE, resp.payload.type,
+                 'large_unary: payload had the wrong type')
+    assert_equal(wanted_response_size, resp.payload.body.length,
+                 'large_unary: payload had the wrong length')
+    assert_equal(nulls(wanted_response_size), resp.payload.body,
+                 'large_unary: payload content is invalid')
+    resp
+  end
 end
 
+# Args is used to hold the command line info.
+Args = Struct.new(:default_service_account, :host, :host_override,
+                  :oauth_scope, :oauth_key_file, :port, :secure, :test_case,
+                  :use_test_ca)
+
 # validates the the command line options, returning them as a Hash.
-def parse_options
-  options = {
-    'secure' => false,
-    'server_host' => nil,
-    'server_host_override' => nil,
-    'server_port' => nil,
-    'test_case' => nil
-  }
+def parse_args
+  args = Args.new
+  args.host_override = 'foo.test.google.com'
   OptionParser.new do |opts|
-    opts.banner = 'Usage: --server_host <server_host> --server_port server_port'
+    opts.on('--oauth_scope scope',
+            'Scope for OAuth tokens') { |v| args['oauth_scope'] = v }
     opts.on('--server_host SERVER_HOST', 'server hostname') do |v|
-      options['server_host'] = v
+      args['host'] = v
+    end
+    opts.on('--default_service_account email_address',
+            'email address of the default service account') do |v|
+      args['default_service_account'] = v
+    end
+    opts.on('--service_account_key_file PATH',
+            'Path to the service account json key file') do |v|
+      args['oauth_key_file'] = v
     end
     opts.on('--server_host_override HOST_OVERRIDE',
             'override host via a HTTP header') do |v|
-      options['server_host_override'] = v
+      args['host_override'] = v
     end
-    opts.on('--server_port SERVER_PORT', 'server port') do |v|
-      options['server_port'] = v
-    end
+    opts.on('--server_port SERVER_PORT', 'server port') { |v| args['port'] = v }
     # instance_methods(false) gives only the methods defined in that class
     test_cases = NamedTests.instance_methods(false).map(&:to_s)
     test_case_list = test_cases.join(',')
     opts.on('--test_case CODE', test_cases, {}, 'select a test_case',
-            "  (#{test_case_list})") do |v|
-      options['test_case'] = v
-    end
+            "  (#{test_case_list})") { |v| args['test_case'] = v }
     opts.on('-s', '--use_tls', 'require a secure connection?') do |v|
-      options['secure'] = v
+      args['secure'] = v
     end
     opts.on('-t', '--use_test_ca',
             'if secure, use the test certificate?') do |v|
-      options['use_test_ca'] = v
+      args['use_test_ca'] = v
     end
   end.parse!
-  _check_options(options)
+  _check_args(args)
 end
 
-def _check_options(opts)
-  %w(server_host server_port test_case).each do |arg|
-    if opts[arg].nil?
+def _check_args(args)
+  %w(host port test_case).each do |a|
+    if args[a].nil?
       fail(OptionParser::MissingArgument, "please specify --#{arg}")
     end
   end
-  if opts['server_host_override'].nil?
-    opts['server_host_override'] = opts['server_host']
+  if args['oauth_key_file'].nil? ^ args['oauth_scope'].nil?
+    fail(OptionParser::MissingArgument,
+         'please specify both of --service_account_key_file and --oauth_scope')
   end
-  opts
+  args
 end
 
 def main
-  opts = parse_options
-  stub = create_stub(opts['server_host'], opts['server_port'], opts['secure'],
-                     opts['server_host_override'], opts['use_test_ca'])
-  NamedTests.new(stub).method(opts['test_case']).call
+  opts = parse_args
+  stub = create_stub(opts)
+  NamedTests.new(stub, opts).method(opts['test_case']).call
 end
 
 main
diff --git a/src/ruby/bin/interop/test/cpp/interop/messages.rb b/src/ruby/bin/interop/test/cpp/interop/messages.rb
index 491608b..b86cd39 100644
--- a/src/ruby/bin/interop/test/cpp/interop/messages.rb
+++ b/src/ruby/bin/interop/test/cpp/interop/messages.rb
@@ -41,10 +41,13 @@
     optional :response_type, :enum, 1, "grpc.testing.PayloadType"
     optional :response_size, :int32, 2
     optional :payload, :message, 3, "grpc.testing.Payload"
+    optional :fill_username, :bool, 4
+    optional :fill_oauth_scope, :bool, 5
   end
   add_message "grpc.testing.SimpleResponse" do
     optional :payload, :message, 1, "grpc.testing.Payload"
-    optional :effective_gaia_user_id, :int64, 2
+    optional :username, :string, 2
+    optional :oauth_scope, :string, 3
   end
   add_message "grpc.testing.StreamingInputCallRequest" do
     optional :payload, :message, 1, "grpc.testing.Payload"
diff --git a/src/ruby/bin/math.rb b/src/ruby/bin/math.rb
old mode 100644
new mode 100755
diff --git a/src/ruby/bin/math_services.rb b/src/ruby/bin/math_services.rb
old mode 100644
new mode 100755
diff --git a/src/ruby/grpc.gemspec b/src/ruby/grpc.gemspec
index ffd084d..2ce242d 100755
--- a/src/ruby/grpc.gemspec
+++ b/src/ruby/grpc.gemspec
@@ -1,3 +1,4 @@
+# -*- ruby -*-
 # encoding: utf-8
 $LOAD_PATH.push File.expand_path('../lib', __FILE__)
 require 'grpc/version'
@@ -19,11 +20,14 @@
   s.require_paths = ['lib']
   s.platform      = Gem::Platform::RUBY
 
-  s.add_dependency 'xray'
-  s.add_dependency 'logging', '~> 1.8'
+  s.add_dependency 'faraday', '~> 0.9'
   s.add_dependency 'google-protobuf', '~> 3.0.0alpha.1.1'
-  s.add_dependency 'signet', '~> 0.5.1'
+  s.add_dependency 'logging', '~> 1.8'
+  s.add_dependency 'jwt', '~> 1.2.1'
   s.add_dependency 'minitest', '~> 5.4'  # reqd for interop tests
+  s.add_dependency 'multijson', '1.10.1'
+  s.add_dependency 'signet', '~> 0.6.0'
+  s.add_dependency 'xray', '~> 1.1'
 
   s.add_development_dependency 'bundler', '~> 1.7'
   s.add_development_dependency 'rake', '~> 10.0'
diff --git a/src/ruby/lib/grpc.rb b/src/ruby/lib/grpc.rb
index 81c67ec..758ac0c 100644
--- a/src/ruby/lib/grpc.rb
+++ b/src/ruby/lib/grpc.rb
@@ -27,6 +27,8 @@
 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+require 'grpc/auth/compute_engine.rb'
+require 'grpc/auth/service_account.rb'
 require 'grpc/errors'
 require 'grpc/grpc'
 require 'grpc/logconfig'
diff --git a/src/ruby/lib/grpc/auth/compute_engine.rb b/src/ruby/lib/grpc/auth/compute_engine.rb
new file mode 100644
index 0000000..9004bef
--- /dev/null
+++ b/src/ruby/lib/grpc/auth/compute_engine.rb
@@ -0,0 +1,69 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+require 'faraday'
+require 'grpc/auth/signet'
+
+module Google
+  module RPC
+    # Module Auth provides classes that provide Google-specific authentication
+    # used to access Google gRPC services.
+    module Auth
+      # Extends Signet::OAuth2::Client so that the auth token is obtained from
+      # the GCE metadata server.
+      class GCECredentials < Signet::OAuth2::Client
+        COMPUTE_AUTH_TOKEN_URI = 'http://metadata/computeMetadata/v1/'\
+                                 'instance/service-accounts/default/token'
+        COMPUTE_CHECK_URI = 'http://metadata.google.internal'
+
+        # Detect if this appear to be a GCE instance, by checking if metadata
+        # is available
+        def self.on_gce?(options = {})
+          c = options[:connection] || Faraday.default_connection
+          resp = c.get(COMPUTE_CHECK_URI)
+          return false unless resp.status == 200
+          return false unless resp.headers.key?('Metadata-Flavor')
+          return resp.headers['Metadata-Flavor'] == 'Google'
+        rescue Faraday::ConnectionFailed
+          return false
+        end
+
+        # Overrides the super class method to change how access tokens are
+        # fetched.
+        def fetch_access_token(options = {})
+          c = options[:connection] || Faraday.default_connection
+          c.headers = { 'Metadata-Flavor' => 'Google' }
+          resp = c.get(COMPUTE_AUTH_TOKEN_URI)
+          Signet::OAuth2.parse_credentials(resp.body,
+                                           resp.headers['content-type'])
+        end
+      end
+    end
+  end
+end
diff --git a/src/ruby/lib/grpc/auth/service_account.rb b/src/ruby/lib/grpc/auth/service_account.rb
new file mode 100644
index 0000000..35b5cbf
--- /dev/null
+++ b/src/ruby/lib/grpc/auth/service_account.rb
@@ -0,0 +1,68 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+require 'grpc/auth/signet'
+require 'multi_json'
+require 'openssl'
+
+# Reads the private key and client email fields from service account JSON key.
+def read_json_key(json_key_io)
+  json_key = MultiJson.load(json_key_io.read)
+  fail 'missing client_email' unless json_key.key?('client_email')
+  fail 'missing private_key' unless json_key.key?('private_key')
+  [json_key['private_key'], json_key['client_email']]
+end
+
+module Google
+  module RPC
+    # Module Auth provides classes that provide Google-specific authentication
+    # used to access Google gRPC services.
+    module Auth
+      # Authenticates requests using Google's Service Account credentials.
+      # (cf https://developers.google.com/accounts/docs/OAuth2ServiceAccount)
+      class ServiceAccountCredentials < Signet::OAuth2::Client
+        TOKEN_CRED_URI = 'https://www.googleapis.com/oauth2/v3/token'
+        AUDIENCE = TOKEN_CRED_URI
+
+        # Initializes a ServiceAccountCredentials.
+        #
+        # @param scope [string|array] the scope(s) to access
+        # @param json_key_io [IO] an IO from which the JSON key can be read
+        def initialize(scope, json_key_io)
+          private_key, client_email = read_json_key(json_key_io)
+          super(token_credential_uri: TOKEN_CRED_URI,
+                audience: AUDIENCE,
+                scope: scope,
+                issuer: client_email,
+                signing_key: OpenSSL::PKey::RSA.new(private_key))
+        end
+      end
+    end
+  end
+end
diff --git a/src/ruby/lib/grpc/auth/signet.rb b/src/ruby/lib/grpc/auth/signet.rb
new file mode 100644
index 0000000..a8bce12
--- /dev/null
+++ b/src/ruby/lib/grpc/auth/signet.rb
@@ -0,0 +1,67 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+require 'signet/oauth_2/client'
+
+module Signet
+  # Signet::OAuth2 supports OAuth2 authentication.
+  module OAuth2
+    AUTH_METADATA_KEY = :Authorization
+    # Signet::OAuth2::Client creates an OAuth2 client
+    #
+    # Here client is re-opened to add the #apply and #apply! methods which
+    # update a hash map with the fetched authentication token
+    #
+    # Eventually, this change may be merged into signet itself, or some other
+    # package that provides Google-specific auth via signet, and this extension
+    # will be unnecessary.
+    class Client
+      # Updates a_hash updated with the authentication token
+      def apply!(a_hash, opts = {})
+        # fetch the access token there is currently not one, or if the client
+        # has expired
+        fetch_access_token!(opts) if access_token.nil? || expired?
+        a_hash[AUTH_METADATA_KEY] = "Bearer #{access_token}"
+      end
+
+      # Returns a clone of a_hash updated with the authentication token
+      def apply(a_hash, opts = {})
+        a_copy = a_hash.clone
+        apply!(a_copy, opts)
+        a_copy
+      end
+
+      # Returns a reference to the #apply method, suitable for passing as
+      # a closure
+      def updater_proc
+        lambda(&method(:apply))
+      end
+    end
+  end
+end
diff --git a/src/ruby/spec/auth/apply_auth_examples.rb b/src/ruby/spec/auth/apply_auth_examples.rb
new file mode 100644
index 0000000..09b3930
--- /dev/null
+++ b/src/ruby/spec/auth/apply_auth_examples.rb
@@ -0,0 +1,163 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+spec_dir = File.expand_path(File.join(File.dirname(__FILE__)))
+$LOAD_PATH.unshift(spec_dir)
+$LOAD_PATH.uniq!
+
+require 'faraday'
+require 'spec_helper'
+
+def build_json_response(payload)
+  [200,
+   { 'Content-Type' => 'application/json; charset=utf-8' },
+   MultiJson.dump(payload)]
+end
+
+WANTED_AUTH_KEY = :Authorization
+
+shared_examples 'apply/apply! are OK' do
+  # tests that use these examples need to define
+  #
+  # @client which should be an auth client
+  #
+  # @make_auth_stubs, which should stub out the expected http behaviour of the
+  # auth client
+  describe '#fetch_access_token' do
+    it 'should set access_token to the fetched value' do
+      token = '1/abcdef1234567890'
+      stubs = make_auth_stubs with_access_token: token
+      c = Faraday.new do |b|
+        b.adapter(:test, stubs)
+      end
+
+      @client.fetch_access_token!(connection: c)
+      expect(@client.access_token).to eq(token)
+      stubs.verify_stubbed_calls
+    end
+  end
+
+  describe '#apply!' do
+    it 'should update the target hash with fetched access token' do
+      token = '1/abcdef1234567890'
+      stubs = make_auth_stubs with_access_token: token
+      c = Faraday.new do |b|
+        b.adapter(:test, stubs)
+      end
+
+      md = { foo: 'bar' }
+      @client.apply!(md, connection: c)
+      want = { :foo => 'bar', WANTED_AUTH_KEY => "Bearer #{token}" }
+      expect(md).to eq(want)
+      stubs.verify_stubbed_calls
+    end
+  end
+
+  describe 'updater_proc' do
+    it 'should provide a proc that updates a hash with the access token' do
+      token = '1/abcdef1234567890'
+      stubs = make_auth_stubs with_access_token: token
+      c = Faraday.new do |b|
+        b.adapter(:test, stubs)
+      end
+
+      md = { foo: 'bar' }
+      the_proc = @client.updater_proc
+      got = the_proc.call(md, connection: c)
+      want = { :foo => 'bar', WANTED_AUTH_KEY => "Bearer #{token}" }
+      expect(got).to eq(want)
+      stubs.verify_stubbed_calls
+    end
+  end
+
+  describe '#apply' do
+    it 'should not update the original hash with the access token' do
+      token = '1/abcdef1234567890'
+      stubs = make_auth_stubs with_access_token: token
+      c = Faraday.new do |b|
+        b.adapter(:test, stubs)
+      end
+
+      md = { foo: 'bar' }
+      @client.apply(md, connection: c)
+      want = { foo: 'bar' }
+      expect(md).to eq(want)
+      stubs.verify_stubbed_calls
+    end
+
+    it 'should add the token to the returned hash' do
+      token = '1/abcdef1234567890'
+      stubs = make_auth_stubs with_access_token: token
+      c = Faraday.new do |b|
+        b.adapter(:test, stubs)
+      end
+
+      md = { foo: 'bar' }
+      got = @client.apply(md, connection: c)
+      want = { :foo => 'bar', WANTED_AUTH_KEY => "Bearer #{token}" }
+      expect(got).to eq(want)
+      stubs.verify_stubbed_calls
+    end
+
+    it 'should not fetch a new token if the current is not expired' do
+      token = '1/abcdef1234567890'
+      stubs = make_auth_stubs with_access_token: token
+      c = Faraday.new do |b|
+        b.adapter(:test, stubs)
+      end
+
+      n = 5 # arbitrary
+      n.times do |_t|
+        md = { foo: 'bar' }
+        got = @client.apply(md, connection: c)
+        want = { :foo => 'bar', WANTED_AUTH_KEY => "Bearer #{token}" }
+        expect(got).to eq(want)
+      end
+      stubs.verify_stubbed_calls
+    end
+
+    it 'should fetch a new token if the current one is expired' do
+      token_1 = '1/abcdef1234567890'
+      token_2 = '2/abcdef1234567890'
+
+      [token_1, token_2].each do |t|
+        stubs = make_auth_stubs with_access_token: t
+        c = Faraday.new do |b|
+          b.adapter(:test, stubs)
+        end
+        md = { foo: 'bar' }
+        got = @client.apply(md, connection: c)
+        want = { :foo => 'bar', WANTED_AUTH_KEY => "Bearer #{t}" }
+        expect(got).to eq(want)
+        stubs.verify_stubbed_calls
+        @client.expires_at -= 3601 # default is to expire in 1hr
+      end
+    end
+  end
+end
diff --git a/src/ruby/spec/auth/compute_engine_spec.rb b/src/ruby/spec/auth/compute_engine_spec.rb
new file mode 100644
index 0000000..9e0b466
--- /dev/null
+++ b/src/ruby/spec/auth/compute_engine_spec.rb
@@ -0,0 +1,108 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+spec_dir = File.expand_path(File.join(File.dirname(__FILE__)))
+$LOAD_PATH.unshift(spec_dir)
+$LOAD_PATH.uniq!
+
+require 'apply_auth_examples'
+require 'faraday'
+require 'grpc/auth/compute_engine'
+require 'spec_helper'
+
+describe Google::RPC::Auth::GCECredentials do
+  MD_URI = '/computeMetadata/v1/instance/service-accounts/default/token'
+  GCECredentials = Google::RPC::Auth::GCECredentials
+
+  before(:example) do
+    @client = GCECredentials.new
+  end
+
+  def make_auth_stubs(with_access_token: '')
+    Faraday::Adapter::Test::Stubs.new do |stub|
+      stub.get(MD_URI) do |env|
+        headers = env[:request_headers]
+        expect(headers['Metadata-Flavor']).to eq('Google')
+        build_json_response(
+            'access_token' => with_access_token,
+            'token_type' => 'Bearer',
+            'expires_in' => 3600)
+      end
+    end
+  end
+
+  it_behaves_like 'apply/apply! are OK'
+
+  describe '#on_gce?' do
+    it 'should be true when Metadata-Flavor is Google' do
+      stubs = Faraday::Adapter::Test::Stubs.new do |stub|
+        stub.get('/') do |_env|
+          [200,
+           { 'Metadata-Flavor' => 'Google' },
+           '']
+        end
+      end
+      c = Faraday.new do |b|
+        b.adapter(:test, stubs)
+      end
+      expect(GCECredentials.on_gce?(connection: c)).to eq(true)
+      stubs.verify_stubbed_calls
+    end
+
+    it 'should be false when Metadata-Flavor is not Google' do
+      stubs = Faraday::Adapter::Test::Stubs.new do |stub|
+        stub.get('/') do |_env|
+          [200,
+           { 'Metadata-Flavor' => 'NotGoogle' },
+           '']
+        end
+      end
+      c = Faraday.new do |b|
+        b.adapter(:test, stubs)
+      end
+      expect(GCECredentials.on_gce?(connection: c)).to eq(false)
+      stubs.verify_stubbed_calls
+    end
+
+    it 'should be false if the response is not 200' do
+      stubs = Faraday::Adapter::Test::Stubs.new do |stub|
+        stub.get('/') do |_env|
+          [404,
+           { 'Metadata-Flavor' => 'Google' },
+           '']
+        end
+      end
+      c = Faraday.new do |b|
+        b.adapter(:test, stubs)
+      end
+      expect(GCECredentials.on_gce?(connection: c)).to eq(false)
+      stubs.verify_stubbed_calls
+    end
+  end
+end
diff --git a/src/ruby/spec/auth/service_account_spec.rb b/src/ruby/spec/auth/service_account_spec.rb
new file mode 100644
index 0000000..cbc6a73
--- /dev/null
+++ b/src/ruby/spec/auth/service_account_spec.rb
@@ -0,0 +1,75 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+spec_dir = File.expand_path(File.join(File.dirname(__FILE__)))
+$LOAD_PATH.unshift(spec_dir)
+$LOAD_PATH.uniq!
+
+require 'apply_auth_examples'
+require 'grpc/auth/service_account'
+require 'jwt'
+require 'multi_json'
+require 'openssl'
+require 'spec_helper'
+
+describe Google::RPC::Auth::ServiceAccountCredentials do
+  before(:example) do
+    @key = OpenSSL::PKey::RSA.new(2048)
+    cred_json = {
+      private_key_id: 'a_private_key_id',
+      private_key: @key.to_pem,
+      client_email: 'app@developer.gserviceaccount.com',
+      client_id: 'app.apps.googleusercontent.com',
+      type: 'service_account'
+    }
+    cred_json_text = MultiJson.dump(cred_json)
+    @client = Google::RPC::Auth::ServiceAccountCredentials.new(
+        'https://www.googleapis.com/auth/userinfo.profile',
+        StringIO.new(cred_json_text))
+  end
+
+  def make_auth_stubs(with_access_token: '')
+    Faraday::Adapter::Test::Stubs.new do |stub|
+      stub.post('/oauth2/v3/token') do |env|
+        params = Addressable::URI.form_unencode(env[:body])
+        _claim, _header = JWT.decode(params.assoc('assertion').last,
+                                     @key.public_key)
+        want = ['grant_type', 'urn:ietf:params:oauth:grant-type:jwt-bearer']
+        expect(params.assoc('grant_type')).to eq(want)
+        build_json_response(
+          'access_token' => with_access_token,
+          'token_type' => 'Bearer',
+          'expires_in' => 3600
+        )
+      end
+    end
+  end
+
+  it_behaves_like 'apply/apply! are OK'
+end
diff --git a/src/ruby/spec/auth/signet_spec.rb b/src/ruby/spec/auth/signet_spec.rb
new file mode 100644
index 0000000..1712edf
--- /dev/null
+++ b/src/ruby/spec/auth/signet_spec.rb
@@ -0,0 +1,70 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+spec_dir = File.expand_path(File.join(File.dirname(__FILE__)))
+$LOAD_PATH.unshift(spec_dir)
+$LOAD_PATH.uniq!
+
+require 'apply_auth_examples'
+require 'grpc/auth/signet'
+require 'jwt'
+require 'openssl'
+require 'spec_helper'
+
+describe Signet::OAuth2::Client do
+  before(:example) do
+    @key = OpenSSL::PKey::RSA.new(2048)
+    @client = Signet::OAuth2::Client.new(
+        token_credential_uri: 'https://accounts.google.com/o/oauth2/token',
+        scope: 'https://www.googleapis.com/auth/userinfo.profile',
+        issuer: 'app@example.com',
+        audience: 'https://accounts.google.com/o/oauth2/token',
+        signing_key: @key
+      )
+  end
+
+  def make_auth_stubs(with_access_token: '')
+    Faraday::Adapter::Test::Stubs.new do |stub|
+      stub.post('/o/oauth2/token') do |env|
+        params = Addressable::URI.form_unencode(env[:body])
+        _claim, _header = JWT.decode(params.assoc('assertion').last,
+                                     @key.public_key)
+        want = ['grant_type', 'urn:ietf:params:oauth:grant-type:jwt-bearer']
+        expect(params.assoc('grant_type')).to eq(want)
+        build_json_response(
+          'access_token' => with_access_token,
+          'token_type' => 'Bearer',
+          'expires_in' => 3600
+        )
+      end
+    end
+  end
+
+  it_behaves_like 'apply/apply! are OK'
+end
diff --git a/src/ruby/spec/channel_spec.rb b/src/ruby/spec/channel_spec.rb
index 189d1c6..82c7915 100644
--- a/src/ruby/spec/channel_spec.rb
+++ b/src/ruby/spec/channel_spec.rb
@@ -29,8 +29,6 @@
 
 require 'grpc'
 
-FAKE_HOST='localhost:0'
-
 def load_test_certs
   test_root = File.join(File.dirname(__FILE__), 'testdata')
   files = ['ca.pem', 'server1.key', 'server1.pem']
@@ -38,6 +36,8 @@
 end
 
 describe GRPC::Core::Channel do
+  FAKE_HOST = 'localhost:0'
+
   def create_test_cert
     GRPC::Core::Credentials.new(load_test_certs[0])
   end
diff --git a/src/ruby/spec/completion_queue_spec.rb b/src/ruby/spec/completion_queue_spec.rb
index 022a066..6117e06 100644
--- a/src/ruby/spec/completion_queue_spec.rb
+++ b/src/ruby/spec/completion_queue_spec.rb
@@ -30,6 +30,10 @@
 require 'grpc'
 
 describe GRPC::Core::CompletionQueue do
+  before(:example) do
+    @cq = GRPC::Core::CompletionQueue.new
+  end
+
   describe '#new' do
     it 'is constructed successufully' do
       expect { GRPC::Core::CompletionQueue.new }.not_to raise_error
@@ -38,39 +42,33 @@
 
   describe '#next' do
     it 'can be called without failing' do
-      ch = GRPC::Core::CompletionQueue.new
-      expect { ch.next(3) }.not_to raise_error
+      expect { @cq.next(3) }.not_to raise_error
     end
 
-    it 'can be called with the time constants' do
-      ch = GRPC::Core::CompletionQueue.new
-      # don't use INFINITE_FUTURE, as there we have no events.
-      non_blocking_consts = [:ZERO, :INFINITE_PAST]
-      m = GRPC::Core::TimeConsts
-      non_blocking_consts.each do |c|
-        a_time = m.const_get(c)
-        expect { ch.next(a_time) }.not_to raise_error
-      end
+    it 'can be called with a time constant' do
+      # don't use INFINITE_FUTURE, as are no events and this blocks.
+      #
+      # don't use INFINITE_PAST, as this fails on docker, and does not need to
+      # be tested, as its not used anywhere in the ruby implementation
+      a_time = GRPC::Core::TimeConsts::ZERO
+      expect { @cq.next(a_time) }.not_to raise_error
     end
   end
 
   describe '#pluck' do
     it 'can be called without failing' do
-      ch = GRPC::Core::CompletionQueue.new
       tag = Object.new
-      expect { ch.pluck(tag, 3) }.not_to raise_error
+      expect { @cq.pluck(tag, 3) }.not_to raise_error
     end
 
-    it 'can be called with the time constants' do
-      ch = GRPC::Core::CompletionQueue.new
-      # don't use INFINITE_FUTURE, as there we have no events.
-      non_blocking_consts = [:ZERO, :INFINITE_PAST]
-      m = GRPC::Core::TimeConsts
+    it 'can be called with a time constant' do
+      # don't use INFINITE_FUTURE, as there no events and this blocks.
+      #
+      # don't use INFINITE_PAST, as this fails on docker, and does not need to
+      # be tested, as its not used anywhere in the ruby implementation
       tag = Object.new
-      non_blocking_consts.each do |c|
-        a_time = m.const_get(c)
-        expect { ch.pluck(tag, a_time) }.not_to raise_error
-      end
+      a_time = GRPC::Core::TimeConsts::ZERO
+      expect { @cq.pluck(tag, a_time) }.not_to raise_error
     end
   end
 end
diff --git a/src/ruby/spec/generic/active_call_spec.rb b/src/ruby/spec/generic/active_call_spec.rb
index e81b216..599e68b 100644
--- a/src/ruby/spec/generic/active_call_spec.rb
+++ b/src/ruby/spec/generic/active_call_spec.rb
@@ -371,6 +371,6 @@
   end
 
   def deadline
-    Time.now + 0.25  # in 0.25 seconds; arbitrary
+    Time.now + 1  # in 1 second; arbitrary
   end
 end
diff --git a/src/ruby/spec/generic/rpc_server_spec.rb b/src/ruby/spec/generic/rpc_server_spec.rb
index e083bc1..0ec7957 100644
--- a/src/ruby/spec/generic/rpc_server_spec.rb
+++ b/src/ruby/spec/generic/rpc_server_spec.rb
@@ -323,7 +323,7 @@
     end
 
     describe 'when running' do
-      it 'should return NOT_FOUND status for requests on unknown methods' do
+      it 'should return NOT_FOUND status on unknown methods', server: true do
         @srv.handle(EchoService)
         t = Thread.new { @srv.run }
         @srv.wait_till_running
@@ -338,7 +338,7 @@
         t.join
       end
 
-      it 'should obtain responses for multiple sequential requests' do
+      it 'should handle multiple sequential requests', server: true do
         @srv.handle(EchoService)
         t = Thread.new { @srv.run }
         @srv.wait_till_running
@@ -350,7 +350,7 @@
         t.join
       end
 
-      it 'should obtain responses for multiple parallel requests' do
+      it 'should handle multiple parallel requests', server: true do
         @srv.handle(EchoService)
         Thread.new { @srv.run }
         @srv.wait_till_running
@@ -368,7 +368,7 @@
         threads.each(&:join)
       end
 
-      it 'should return UNAVAILABLE status if there too many jobs' do
+      it 'should return UNAVAILABLE on too many jobs', server: true do
         opts = {
           a_channel_arg: 'an_arg',
           server_override: @server,
diff --git a/src/ruby/spec/spec_helper.rb b/src/ruby/spec/spec_helper.rb
index 3322674..ea0a256 100644
--- a/src/ruby/spec/spec_helper.rb
+++ b/src/ruby/spec/spec_helper.rb
@@ -27,10 +27,22 @@
 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+spec_dir = File.expand_path(File.dirname(__FILE__))
+root_dir = File.expand_path(File.join(spec_dir, '..'))
+lib_dir = File.expand_path(File.join(root_dir, 'lib'))
+
+$LOAD_PATH.unshift(spec_dir)
+$LOAD_PATH.unshift(lib_dir)
+$LOAD_PATH.uniq!
+
+require 'faraday'
 require 'rspec'
 require 'logging'
 require 'rspec/logging_helper'
 
+# Allow Faraday to support test stubs
+Faraday::Adapter.load_middleware(:test)
+
 # Configure RSpec to capture log messages for each test. The output from the
 # logs will be stored in the @log_output variable. It is a StringIO instance.
 RSpec.configure do |config|
diff --git a/templates/Makefile.template b/templates/Makefile.template
index b80e80c..142d188 100644
--- a/templates/Makefile.template
+++ b/templates/Makefile.template
@@ -139,7 +139,7 @@
 
 CFLAGS += -std=c89 -pedantic
 CXXFLAGS += -std=c++11
-CPPFLAGS += -g -fPIC -Wall -Werror -Wno-long-long
+CPPFLAGS += -g -fPIC -Wall -Wextra -Werror -Wno-long-long -Wno-unused-parameter
 LDFLAGS += -g -fPIC
 
 INCLUDES = . include gens
@@ -481,6 +481,7 @@
 # This prevents proper debugging after running make install.
 
 strip-static_c: static_c
+ifeq ($(CONFIG),opt)
 % for lib in libs:
 % if lib.language == "c":
 % if lib.build == "all":
@@ -489,8 +490,10 @@
 % endif
 % endif
 % endfor
+endif
 
 strip-static_cxx: static_cxx
+ifeq ($(CONFIG),opt)
 % for lib in libs:
 % if lib.language == "c++":
 % if lib.build == "all":
@@ -499,8 +502,10 @@
 % endif
 % endif
 % endfor
+endif
 
 strip-shared_c: shared_c
+ifeq ($(CONFIG),opt)
 % for lib in libs:
 % if lib.language == "c":
 % if lib.build == "all":
@@ -509,8 +514,10 @@
 % endif
 % endif
 % endfor
+endif
 
 strip-shared_cxx: shared_cxx
+ifeq ($(CONFIG),opt)
 % for lib in libs:
 % if lib.language == "c++":
 % if lib.build == "all":
@@ -519,6 +526,7 @@
 % endif
 % endif
 % endfor
+endif
 
 % for p in protos:
 gens/${p}.pb.cc: ${p}.proto $(PROTOC_PLUGINS)
@@ -765,6 +773,7 @@
 	$(Q) ${ld} $(LDFLAGS) -Llibs/$(CONFIG) -dynamiclib -o ${out_libbase}.$(SHARED_EXT) ${common}${libs}
 else
 	$(Q) ${ld} $(LDFLAGS) -Llibs/$(CONFIG) -shared -Wl,-soname,lib${lib.name}.so.${settings.version.major} -o ${out_libbase}.$(SHARED_EXT) ${common}${libs}
+	$(Q) ln -sf lib${lib.name}.$(SHARED_EXT) ${out_libbase}.so.${settings.version.major}
 	$(Q) ln -sf lib${lib.name}.$(SHARED_EXT) ${out_libbase}.so
 endif
 endif
diff --git a/test/core/channel/channel_stack_test.c b/test/core/channel/channel_stack_test.c
index a400fb1..d53098b 100644
--- a/test/core/channel/channel_stack_test.c
+++ b/test/core/channel/channel_stack_test.c
@@ -80,7 +80,7 @@
   const grpc_channel_filter
       filter = {call_func,         channel_func,         sizeof(int),
                 call_init_func,    call_destroy_func,    sizeof(int),
-                channel_init_func, channel_destroy_func, };
+                channel_init_func, channel_destroy_func, "some_test_filter" };
   const grpc_channel_filter *filters = &filter;
   grpc_channel_stack *channel_stack;
   grpc_call_stack *call_stack;
diff --git a/test/core/channel/metadata_buffer_test.c b/test/core/channel/metadata_buffer_test.c
index 9d7e159..6081308 100644
--- a/test/core/channel/metadata_buffer_test.c
+++ b/test/core/channel/metadata_buffer_test.c
@@ -112,12 +112,12 @@
 static const grpc_channel_filter top_filter = {
     fail_call_op,      fail_channel_op,     sizeof(size_t),
     init_call_elem,    destroy_call_elem,   sizeof(channel_data),
-    init_channel_elem, destroy_channel_elem};
+    init_channel_elem, destroy_channel_elem, "top_filter" };
 
 static const grpc_channel_filter bottom_filter = {
     expect_call_op,    fail_channel_op,     sizeof(size_t),
     init_call_elem,    destroy_call_elem,   sizeof(channel_data),
-    init_channel_elem, destroy_channel_elem};
+    init_channel_elem, destroy_channel_elem, "bottom_filter" };
 
 static const grpc_channel_filter *filters[2] = {&top_filter, &bottom_filter};
 
diff --git a/test/core/compression/message_compress_test.c b/test/core/compression/message_compress_test.c
index d1e5935..5f55fa6 100644
--- a/test/core/compression/message_compress_test.c
+++ b/test/core/compression/message_compress_test.c
@@ -166,7 +166,7 @@
 }
 
 int main(int argc, char **argv) {
-  int i, j, k, m;
+  unsigned i, j, k, m;
   grpc_slice_split_mode uncompressed_split_modes[] = {
       GRPC_SLICE_SPLIT_IDENTITY, GRPC_SLICE_SPLIT_ONE_BYTE};
   grpc_slice_split_mode compressed_split_modes[] = {GRPC_SLICE_SPLIT_MERGE_ALL,
diff --git a/test/core/echo/client.c b/test/core/echo/client.c
index 2ad29df..bb478c4 100644
--- a/test/core/echo/client.c
+++ b/test/core/echo/client.c
@@ -66,7 +66,7 @@
   grpc_completion_queue *cq = NULL;
   int bytes_written = 0;
   int bytes_read = 0;
-  int i = 0;
+  unsigned i = 0;
   int waiting_finishes;
   gpr_slice read_slice;
 
@@ -78,7 +78,9 @@
 
   GPR_ASSERT(argc == 2);
   channel = grpc_channel_create(argv[1], NULL);
-  call = grpc_channel_create_call(channel, "/foo", "localhost", gpr_inf_future);
+  call = grpc_channel_create_call(channel, "/foo", "localhost",
+                                  gpr_time_add(gpr_time_from_seconds(5),
+                                               gpr_now()));
   GPR_ASSERT(grpc_call_invoke(call, cq, (void *)1, (void *)1, 0) ==
              GRPC_CALL_OK);
 
diff --git a/test/core/echo/echo_test.c b/test/core/echo/echo_test.c
index 6449b24..83b83ab 100644
--- a/test/core/echo/echo_test.c
+++ b/test/core/echo/echo_test.c
@@ -49,18 +49,19 @@
 #include "test/core/util/port.h"
 
 int test_client(const char *root, const char *host, int port) {
-  char *args[3];
   int status;
   pid_t cli;
   cli = fork();
   if (cli == 0) {
-    gpr_asprintf(&args[0], "%s/echo_client", root);
-    gpr_join_host_port(&args[1], host, port);
-    args[2] = 0;
-    execv(args[0], args);
+    char *binary_path;
+    char *binding;
+    gpr_asprintf(&binary_path, "%s/echo_client", root);
+    gpr_join_host_port(&binding, host, port);
 
-    gpr_free(args[0]);
-    gpr_free(args[1]);
+    execl(binary_path, binary_path, binding, NULL);
+
+    gpr_free(binary_path);
+    gpr_free(binding);
     return 1;
   }
   /* wait for client */
@@ -76,7 +77,6 @@
   char *lslash = strrchr(me, '/');
   char root[1024];
   int port = grpc_pick_unused_port_or_die();
-  char *args[3];
   int status;
   pid_t svr;
   int ret;
@@ -98,13 +98,15 @@
   /* start the server */
   svr = fork();
   if (svr == 0) {
-    gpr_asprintf(&args[0], "%s/echo_server", root);
-    gpr_join_host_port(&args[1], "::", port);
-    args[2] = 0;
-    execv(args[0], args);
+    char *binary_path;
+    char *binding;
+    gpr_asprintf(&binary_path, "%s/echo_server", root);
+    gpr_join_host_port(&binding, "::", port);
 
-    gpr_free(args[0]);
-    gpr_free(args[1]);
+    execl(binary_path, binary_path, "-bind", binding, NULL);
+
+    gpr_free(binary_path);
+    gpr_free(binding);
     return 1;
   }
   /* wait a little */
diff --git a/test/core/echo/server.c b/test/core/echo/server.c
index 6b67334..2764a9e 100644
--- a/test/core/echo/server.c
+++ b/test/core/echo/server.c
@@ -70,7 +70,7 @@
 static void assert_read_ok(call_state *s, grpc_byte_buffer *b) {
   grpc_byte_buffer_reader *bb_reader = NULL;
   gpr_slice read_slice;
-  int i;
+  unsigned i;
 
   bb_reader = grpc_byte_buffer_reader_create(b);
   while (grpc_byte_buffer_reader_next(bb_reader, &read_slice)) {
diff --git a/test/core/end2end/tests/cancel_after_accept.c b/test/core/end2end/tests/cancel_after_accept.c
index 33aed98..05a2dc8 100644
--- a/test/core/end2end/tests/cancel_after_accept.c
+++ b/test/core/end2end/tests/cancel_after_accept.c
@@ -149,7 +149,7 @@
 }
 
 void grpc_end2end_tests(grpc_end2end_test_config config) {
-  int i;
+  unsigned i;
 
   for (i = 0; i < GPR_ARRAY_SIZE(cancellation_modes); i++) {
     test_cancel_after_accept(config, cancellation_modes[i]);
diff --git a/test/core/end2end/tests/cancel_after_accept_and_writes_closed.c b/test/core/end2end/tests/cancel_after_accept_and_writes_closed.c
index f348488..db245a3 100644
--- a/test/core/end2end/tests/cancel_after_accept_and_writes_closed.c
+++ b/test/core/end2end/tests/cancel_after_accept_and_writes_closed.c
@@ -157,7 +157,7 @@
 }
 
 void grpc_end2end_tests(grpc_end2end_test_config config) {
-  int i;
+  unsigned i;
 
   for (i = 0; i < GPR_ARRAY_SIZE(cancellation_modes); i++) {
     test_cancel_after_accept_and_writes_closed(config, cancellation_modes[i]);
diff --git a/test/core/end2end/tests/cancel_after_invoke.c b/test/core/end2end/tests/cancel_after_invoke.c
index 3bb8672..5dfb3f7 100644
--- a/test/core/end2end/tests/cancel_after_invoke.c
+++ b/test/core/end2end/tests/cancel_after_invoke.c
@@ -132,7 +132,7 @@
 }
 
 void grpc_end2end_tests(grpc_end2end_test_config config) {
-  int i;
+  unsigned i;
 
   for (i = 0; i < GPR_ARRAY_SIZE(cancellation_modes); i++) {
     test_cancel_after_invoke(config, cancellation_modes[i]);
diff --git a/test/core/end2end/tests/cancel_in_a_vacuum.c b/test/core/end2end/tests/cancel_in_a_vacuum.c
index e4f9dee..5257ece 100644
--- a/test/core/end2end/tests/cancel_in_a_vacuum.c
+++ b/test/core/end2end/tests/cancel_in_a_vacuum.c
@@ -122,7 +122,7 @@
 }
 
 void grpc_end2end_tests(grpc_end2end_test_config config) {
-  int i;
+  unsigned i;
 
   for (i = 0; i < GPR_ARRAY_SIZE(cancellation_modes); i++) {
     test_cancel_in_a_vacuum(config, cancellation_modes[i]);
diff --git a/test/core/fling/client.c b/test/core/fling/client.c
index 7947ffe..d6561e9 100644
--- a/test/core/fling/client.c
+++ b/test/core/fling/client.c
@@ -103,7 +103,7 @@
 int main(int argc, char **argv) {
   gpr_slice slice = gpr_slice_from_copied_string("x");
   double start, stop;
-  int i;
+  unsigned i;
 
   char *fake_argv[1];
 
@@ -113,7 +113,7 @@
   char *target = "localhost:443";
   gpr_cmdline *cl;
   char *scenario_name = "ping-pong-request";
-  scenario sc = {NULL};
+  scenario sc = {NULL, NULL, NULL};
 
   GPR_ASSERT(argc >= 1);
   fake_argv[0] = argv[0];
diff --git a/test/core/iomgr/poll_kick_posix_test.c b/test/core/iomgr/poll_kick_posix_test.c
index 3c6d815..2c5b444 100644
--- a/test/core/iomgr/poll_kick_posix_test.c
+++ b/test/core/iomgr/poll_kick_posix_test.c
@@ -105,6 +105,7 @@
     grpc_pollset_kick_post_poll(&kick_state[i]);
     grpc_pollset_kick_destroy(&kick_state[i]);
   }
+  gpr_free(kick_state);
 }
 
 static void run_tests(void) {
diff --git a/test/core/iomgr/resolve_address_test.c b/test/core/iomgr/resolve_address_test.c
index 4cc101b..1f97724 100644
--- a/test/core/iomgr/resolve_address_test.c
+++ b/test/core/iomgr/resolve_address_test.c
@@ -85,7 +85,7 @@
 static void test_ipv6_without_port(void) {
   const char* const kCases[] = {"2001:db8::1", "2001:db8::1.2.3.4",
                                 "[2001:db8::1]", };
-  int i;
+  unsigned i;
   for (i = 0; i < sizeof(kCases) / sizeof(*kCases); i++) {
     gpr_event ev;
     gpr_event_init(&ev);
@@ -96,7 +96,7 @@
 
 static void test_invalid_ip_addresses(void) {
   const char* const kCases[] = {"293.283.1238.3:1", "[2001:db8::11111]:1", };
-  int i;
+  unsigned i;
   for (i = 0; i < sizeof(kCases) / sizeof(*kCases); i++) {
     gpr_event ev;
     gpr_event_init(&ev);
@@ -108,7 +108,7 @@
 static void test_unparseable_hostports(void) {
   const char* const kCases[] = {"[",         "[::1",        "[::1]bad",
                                 "[1.2.3.4]", "[localhost]", "[localhost]:1", };
-  int i;
+  unsigned i;
   for (i = 0; i < sizeof(kCases) / sizeof(*kCases); i++) {
     gpr_event ev;
     gpr_event_init(&ev);
diff --git a/test/core/iomgr/sockaddr_utils_test.c b/test/core/iomgr/sockaddr_utils_test.c
index 3e653da..6cbdc4e 100644
--- a/test/core/iomgr/sockaddr_utils_test.c
+++ b/test/core/iomgr/sockaddr_utils_test.c
@@ -182,7 +182,8 @@
   gpr_log(GPR_INFO, "  expect_sockaddr_str(%s)", expected);
   result = grpc_sockaddr_to_string(&str, (struct sockaddr *)addr, normalize);
   GPR_ASSERT(str != NULL);
-  GPR_ASSERT(result == strlen(str));
+  GPR_ASSERT(result >= 0);
+  GPR_ASSERT((size_t)result == strlen(str));
   GPR_ASSERT(strcmp(expected, str) == 0);
   gpr_free(str);
 }
@@ -194,7 +195,7 @@
 
   gpr_log(GPR_INFO, "%s", __FUNCTION__);
 
-  errno = 0xDEADBEEF;
+  errno = 0x7EADBEEF;
 
   input4 = make_addr4(kIPv4, sizeof(kIPv4));
   expect_sockaddr_str("192.0.2.1:12345", &input4, 0);
@@ -217,7 +218,7 @@
   expect_sockaddr_str("(sockaddr family=123)", &dummy, 0);
   expect_sockaddr_str("(sockaddr family=123)", &dummy, 1);
 
-  GPR_ASSERT(errno == 0xDEADBEEF);
+  GPR_ASSERT(errno == 0x7EADBEEF);
 }
 
 int main(int argc, char **argv) {
diff --git a/test/core/iomgr/tcp_posix_test.c b/test/core/iomgr/tcp_posix_test.c
index 24634b4..f52ae22 100644
--- a/test/core/iomgr/tcp_posix_test.c
+++ b/test/core/iomgr/tcp_posix_test.c
@@ -94,7 +94,7 @@
   ssize_t write_bytes;
   size_t total_bytes = 0;
   unsigned char *buf = malloc(bytes);
-  int i;
+  unsigned i;
   for (i = 0; i < bytes; ++i) {
     buf[i] = i % 256;
   }
@@ -115,15 +115,14 @@
   grpc_endpoint *ep;
   gpr_mu mu;
   gpr_cv cv;
-  size_t read_bytes;
+  ssize_t read_bytes;
   ssize_t target_read_bytes;
 };
 
 static ssize_t count_and_unref_slices(gpr_slice *slices, size_t nslices,
                                       int *current_data) {
   ssize_t num_bytes = 0;
-  int i;
-  int j;
+  unsigned i, j;
   unsigned char *buf;
   for (i = 0; i < nslices; ++i) {
     buf = GPR_SLICE_START_PTR(slices[i]);
@@ -254,8 +253,7 @@
   ssize_t nslices = num_bytes / slice_size + (num_bytes % slice_size ? 1 : 0);
   gpr_slice *slices = gpr_malloc(sizeof(gpr_slice) * nslices);
   ssize_t num_bytes_left = num_bytes;
-  int i;
-  int j;
+  unsigned i, j;
   unsigned char *buf;
   *num_blocks = nslices;
 
diff --git a/test/core/json/json_rewrite.c b/test/core/json/json_rewrite.c
new file mode 100644
index 0000000..a761a67
--- /dev/null
+++ b/test/core/json/json_rewrite.c
@@ -0,0 +1,261 @@
+/*
+ *
+ * Copyright 2014, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <grpc/support/cmdline.h>
+#include <grpc/support/alloc.h>
+
+#include "src/core/json/json_reader.h"
+#include "src/core/json/json_writer.h"
+
+typedef struct json_writer_userdata {
+  FILE* out;
+} json_writer_userdata;
+
+typedef struct stacked_container {
+  grpc_json_type type;
+  struct stacked_container* next;
+} stacked_container;
+
+typedef struct json_reader_userdata {
+  FILE* in;
+  grpc_json_writer* writer;
+  char* scratchpad;
+  char* ptr;
+  size_t free_space;
+  size_t allocated;
+  size_t string_len;
+  stacked_container* top;
+} json_reader_userdata;
+
+static void json_writer_output_char(void* userdata, char c) {
+  json_writer_userdata* state = userdata;
+  fputc(c, state->out);
+}
+
+static void json_writer_output_string(void* userdata, const char* str) {
+  json_writer_userdata* state = userdata;
+  fputs(str, state->out);
+}
+
+static void json_writer_output_string_with_len(void* userdata, const char* str,
+                                               size_t len) {
+  json_writer_userdata* state = userdata;
+  fwrite(str, len, 1, state->out);
+}
+
+grpc_json_writer_vtable writer_vtable = {
+  json_writer_output_char,
+  json_writer_output_string,
+  json_writer_output_string_with_len
+};
+
+static void check_string(json_reader_userdata* state, size_t needed) {
+  if (state->free_space >= needed) return;
+  needed -= state->free_space;
+  needed = (needed + 0xff) & ~0xff;
+  state->scratchpad = gpr_realloc(state->scratchpad, state->allocated + needed);
+  state->free_space += needed;
+  state->allocated += needed;
+}
+
+static void json_reader_string_clear(void* userdata) {
+  json_reader_userdata* state = userdata;
+  state->free_space = state->allocated;
+  state->string_len = 0;
+}
+
+static void json_reader_string_add_char(void* userdata, gpr_uint32 c) {
+  json_reader_userdata* state = userdata;
+  check_string(state, 1);
+  state->scratchpad[state->string_len++] = c;
+}
+
+static void json_reader_string_add_utf32(void* userdata, gpr_uint32 c) {
+  if (c <= 0x7f) {
+    json_reader_string_add_char(userdata, c);
+  } else if (c <= 0x7ff) {
+    int b1 = 0xc0 | ((c >> 6) & 0x1f);
+    int b2 = 0x80 | (c & 0x3f);
+    json_reader_string_add_char(userdata, b1);
+    json_reader_string_add_char(userdata, b2);
+  } else if (c <= 0xffff) {
+    int b1 = 0xe0 | ((c >> 12) & 0x0f);
+    int b2 = 0x80 | ((c >> 6) & 0x3f);
+    int b3 = 0x80 | (c & 0x3f);
+    json_reader_string_add_char(userdata, b1);
+    json_reader_string_add_char(userdata, b2);
+    json_reader_string_add_char(userdata, b3);
+  } else if (c <= 0x1fffff) {
+    int b1 = 0xf0 | ((c >> 18) & 0x07);
+    int b2 = 0x80 | ((c >> 12) & 0x3f);
+    int b3 = 0x80 | ((c >> 6) & 0x3f);
+    int b4 = 0x80 | (c & 0x3f);
+    json_reader_string_add_char(userdata, b1);
+    json_reader_string_add_char(userdata, b2);
+    json_reader_string_add_char(userdata, b3);
+    json_reader_string_add_char(userdata, b4);
+  }
+}
+
+static gpr_uint32 json_reader_read_char(void* userdata) {
+  int r;
+  json_reader_userdata* state = userdata;
+
+  r = fgetc(state->in);
+  if (r == EOF) r = GRPC_JSON_READ_CHAR_EOF;
+  return r;
+}
+
+static void json_reader_container_begins(void* userdata, grpc_json_type type) {
+  json_reader_userdata* state = userdata;
+  stacked_container* container = gpr_malloc(sizeof(stacked_container));
+
+  container->type = type;
+  container->next = state->top;
+  state->top = container;
+
+  grpc_json_writer_container_begins(state->writer, type);
+}
+
+static grpc_json_type json_reader_container_ends(void* userdata) {
+  json_reader_userdata* state = userdata;
+  stacked_container* container = state->top;
+
+  grpc_json_writer_container_ends(state->writer, container->type);
+  state->top = container->next;
+  gpr_free(container);
+  return state->top ? state->top->type : GRPC_JSON_TOP_LEVEL;
+}
+
+static void json_reader_set_key(void* userdata) {
+  json_reader_userdata* state = userdata;
+  json_reader_string_add_char(userdata, 0);
+
+  grpc_json_writer_object_key(state->writer, state->scratchpad);
+}
+
+static void json_reader_set_string(void* userdata) {
+  json_reader_userdata* state = userdata;
+  json_reader_string_add_char(userdata, 0);
+
+  grpc_json_writer_value_string(state->writer, state->scratchpad);
+}
+
+static int json_reader_set_number(void* userdata) {
+  json_reader_userdata* state = userdata;
+
+  grpc_json_writer_value_raw_with_len(state->writer, state->scratchpad,
+                                      state->string_len);
+
+  return 1;
+}
+
+static void json_reader_set_true(void* userdata) {
+  json_reader_userdata* state = userdata;
+
+  grpc_json_writer_value_raw_with_len(state->writer, "true", 4);
+}
+
+static void json_reader_set_false(void* userdata) {
+  json_reader_userdata* state = userdata;
+
+  grpc_json_writer_value_raw_with_len(state->writer, "false", 5);
+}
+
+static void json_reader_set_null(void* userdata) {
+  json_reader_userdata* state = userdata;
+
+  grpc_json_writer_value_raw_with_len(state->writer, "null", 4);
+}
+
+static grpc_json_reader_vtable reader_vtable = {
+  json_reader_string_clear,
+  json_reader_string_add_char,
+  json_reader_string_add_utf32,
+  json_reader_read_char,
+  json_reader_container_begins,
+  json_reader_container_ends,
+  json_reader_set_key,
+  json_reader_set_string,
+  json_reader_set_number,
+  json_reader_set_true,
+  json_reader_set_false,
+  json_reader_set_null
+};
+
+int rewrite(FILE* in, FILE* out, int indent) {
+  grpc_json_writer writer;
+  grpc_json_reader reader;
+  grpc_json_reader_status status;
+  json_writer_userdata writer_user;
+  json_reader_userdata reader_user;
+
+  reader_user.writer = &writer;
+  reader_user.in = in;
+  reader_user.top = NULL;
+  reader_user.scratchpad = NULL;
+  reader_user.string_len = 0;
+  reader_user.free_space = 0;
+  reader_user.allocated = 0;
+
+  writer_user.out = out;
+
+  grpc_json_writer_init(&writer, indent, &writer_vtable, &writer_user);
+  grpc_json_reader_init(&reader, &reader_vtable, &reader_user);
+
+  status = grpc_json_reader_run(&reader);
+
+  free(reader_user.scratchpad);
+  while (reader_user.top) {
+    stacked_container* container = reader_user.top;
+    reader_user.top = container->next;
+    free(container);
+  }
+
+  return status == GRPC_JSON_DONE;
+}
+
+int main(int argc, char** argv) {
+  int indent = 2;
+  gpr_cmdline* cl;
+
+  cl = gpr_cmdline_create(NULL);
+  gpr_cmdline_add_int(cl, "indent", NULL, &indent);
+  gpr_cmdline_parse(cl, argc, argv);
+  gpr_cmdline_destroy(cl);
+
+  return rewrite(stdin, stdout, indent) ? 0 : 1;
+}
diff --git a/test/core/json/json_rewrite_test.c b/test/core/json/json_rewrite_test.c
new file mode 100644
index 0000000..4ce406c
--- /dev/null
+++ b/test/core/json/json_rewrite_test.c
@@ -0,0 +1,322 @@
+/*
+ *
+ * Copyright 2014, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/useful.h>
+#include <grpc/support/log.h>
+#include "test/core/util/test_config.h"
+
+#include "src/core/json/json_reader.h"
+#include "src/core/json/json_writer.h"
+
+typedef struct json_writer_userdata {
+  FILE* cmp;
+} json_writer_userdata;
+
+typedef struct stacked_container {
+  grpc_json_type type;
+  struct stacked_container* next;
+} stacked_container;
+
+typedef struct json_reader_userdata {
+  FILE* in;
+  grpc_json_writer* writer;
+  char* scratchpad;
+  char* ptr;
+  size_t free_space;
+  size_t allocated;
+  size_t string_len;
+  stacked_container* top;
+  int did_eagain;
+} json_reader_userdata;
+
+static void json_writer_output_char(void* userdata, char c) {
+  json_writer_userdata* state = userdata;
+  int cmp = fgetc(state->cmp);
+  GPR_ASSERT(cmp == c);
+}
+
+static void json_writer_output_string(void* userdata, const char* str) {
+  while (*str) {
+    json_writer_output_char(userdata, *str++);
+  }
+}
+
+static void json_writer_output_string_with_len(void* userdata, const char* str,
+                                               size_t len) {
+  size_t i;
+  for (i = 0; i < len; i++) {
+    json_writer_output_char(userdata, str[i]);
+  }
+}
+
+grpc_json_writer_vtable writer_vtable = {
+  json_writer_output_char,
+  json_writer_output_string,
+  json_writer_output_string_with_len
+};
+
+static void check_string(json_reader_userdata* state, size_t needed) {
+  if (state->free_space >= needed) return;
+  needed -= state->free_space;
+  needed = (needed + 0xff) & ~0xff;
+  state->scratchpad = gpr_realloc(state->scratchpad, state->allocated + needed);
+  state->free_space += needed;
+  state->allocated += needed;
+}
+
+static void json_reader_string_clear(void* userdata) {
+  json_reader_userdata* state = userdata;
+  state->free_space = state->allocated;
+  state->string_len = 0;
+}
+
+static void json_reader_string_add_char(void* userdata, gpr_uint32 c) {
+  json_reader_userdata* state = userdata;
+  check_string(state, 1);
+  state->scratchpad[state->string_len++] = c;
+}
+
+static void json_reader_string_add_utf32(void* userdata, gpr_uint32 c) {
+  if (c <= 0x7f) {
+    json_reader_string_add_char(userdata, c);
+  } else if (c <= 0x7ff) {
+    int b1 = 0xc0 | ((c >> 6) & 0x1f);
+    int b2 = 0x80 | (c & 0x3f);
+    json_reader_string_add_char(userdata, b1);
+    json_reader_string_add_char(userdata, b2);
+  } else if (c <= 0xffff) {
+    int b1 = 0xe0 | ((c >> 12) & 0x0f);
+    int b2 = 0x80 | ((c >> 6) & 0x3f);
+    int b3 = 0x80 | (c & 0x3f);
+    json_reader_string_add_char(userdata, b1);
+    json_reader_string_add_char(userdata, b2);
+    json_reader_string_add_char(userdata, b3);
+  } else if (c <= 0x1fffff) {
+    int b1 = 0xf0 | ((c >> 18) & 0x07);
+    int b2 = 0x80 | ((c >> 12) & 0x3f);
+    int b3 = 0x80 | ((c >> 6) & 0x3f);
+    int b4 = 0x80 | (c & 0x3f);
+    json_reader_string_add_char(userdata, b1);
+    json_reader_string_add_char(userdata, b2);
+    json_reader_string_add_char(userdata, b3);
+    json_reader_string_add_char(userdata, b4);
+  }
+}
+
+static gpr_uint32 json_reader_read_char(void* userdata) {
+  int r;
+  json_reader_userdata* state = userdata;
+
+  if (!state->did_eagain) {
+    state->did_eagain = 1;
+    return GRPC_JSON_READ_CHAR_EAGAIN;
+  }
+
+  state->did_eagain = 0;
+
+  r = fgetc(state->in);
+  if (r == EOF) r = GRPC_JSON_READ_CHAR_EOF;
+  return r;
+}
+
+static void json_reader_container_begins(void* userdata, grpc_json_type type) {
+  json_reader_userdata* state = userdata;
+  stacked_container* container = gpr_malloc(sizeof(stacked_container));
+
+  container->type = type;
+  container->next = state->top;
+  state->top = container;
+
+  grpc_json_writer_container_begins(state->writer, type);
+}
+
+static grpc_json_type json_reader_container_ends(void* userdata) {
+  json_reader_userdata* state = userdata;
+  stacked_container* container = state->top;
+
+  grpc_json_writer_container_ends(state->writer, container->type);
+  state->top = container->next;
+  gpr_free(container);
+  return state->top ? state->top->type : GRPC_JSON_TOP_LEVEL;
+}
+
+static void json_reader_set_key(void* userdata) {
+  json_reader_userdata* state = userdata;
+  json_reader_string_add_char(userdata, 0);
+
+  grpc_json_writer_object_key(state->writer, state->scratchpad);
+}
+
+static void json_reader_set_string(void* userdata) {
+  json_reader_userdata* state = userdata;
+  json_reader_string_add_char(userdata, 0);
+
+  grpc_json_writer_value_string(state->writer, state->scratchpad);
+}
+
+static int json_reader_set_number(void* userdata) {
+  json_reader_userdata* state = userdata;
+
+  grpc_json_writer_value_raw_with_len(state->writer, state->scratchpad,
+                                      state->string_len);
+
+  return 1;
+}
+
+static void json_reader_set_true(void* userdata) {
+  json_reader_userdata* state = userdata;
+
+  grpc_json_writer_value_raw_with_len(state->writer, "true", 4);
+}
+
+static void json_reader_set_false(void* userdata) {
+  json_reader_userdata* state = userdata;
+
+  grpc_json_writer_value_raw_with_len(state->writer, "false", 5);
+}
+
+static void json_reader_set_null(void* userdata) {
+  json_reader_userdata* state = userdata;
+
+  grpc_json_writer_value_raw_with_len(state->writer, "null", 4);
+}
+
+static grpc_json_reader_vtable reader_vtable = {
+  json_reader_string_clear,
+  json_reader_string_add_char,
+  json_reader_string_add_utf32,
+  json_reader_read_char,
+  json_reader_container_begins,
+  json_reader_container_ends,
+  json_reader_set_key,
+  json_reader_set_string,
+  json_reader_set_number,
+  json_reader_set_true,
+  json_reader_set_false,
+  json_reader_set_null
+};
+
+int rewrite_and_compare(FILE* in, FILE* cmp, int indent) {
+  grpc_json_writer writer;
+  grpc_json_reader reader;
+  grpc_json_reader_status status;
+  json_writer_userdata writer_user;
+  json_reader_userdata reader_user;
+
+  GPR_ASSERT(in);
+  GPR_ASSERT(cmp);
+
+  reader_user.writer = &writer;
+  reader_user.in = in;
+  reader_user.top = NULL;
+  reader_user.scratchpad = NULL;
+  reader_user.string_len = 0;
+  reader_user.free_space = 0;
+  reader_user.allocated = 0;
+  reader_user.did_eagain = 0;
+
+  writer_user.cmp = cmp;
+
+  grpc_json_writer_init(&writer, indent, &writer_vtable, &writer_user);
+  grpc_json_reader_init(&reader, &reader_vtable, &reader_user);
+
+  do {
+    status = grpc_json_reader_run(&reader);
+  } while (status == GRPC_JSON_EAGAIN);
+
+  free(reader_user.scratchpad);
+  while (reader_user.top) {
+    stacked_container* container = reader_user.top;
+    reader_user.top = container->next;
+    free(container);
+  }
+
+  return status == GRPC_JSON_DONE;
+}
+
+typedef struct test_file {
+  const char* input;
+  const char* cmp;
+  int indent;
+} test_file;
+
+static test_file test_files[] = {
+  {
+    "test/core/json/rewrite_test_input.json",
+    "test/core/json/rewrite_test_output_condensed.json",
+    0
+  },
+  {
+    "test/core/json/rewrite_test_input.json",
+    "test/core/json/rewrite_test_output_indented.json",
+    2
+  },
+  {
+    "test/core/json/rewrite_test_output_indented.json",
+    "test/core/json/rewrite_test_output_condensed.json",
+    0
+  },
+  {
+    "test/core/json/rewrite_test_output_condensed.json",
+    "test/core/json/rewrite_test_output_indented.json",
+    2
+  },
+};
+
+void test_rewrites() {
+  unsigned i;
+
+  for (i = 0; i < GPR_ARRAY_SIZE(test_files); i++) {
+    test_file* test = test_files + i;
+    FILE* input = fopen(test->input, "rb");
+    FILE* cmp = fopen(test->cmp, "rb");
+    int status;
+    gpr_log(GPR_INFO, "Testing file %s against %s using indent=%i",
+            test->input, test->cmp, test->indent);
+    status = rewrite_and_compare(input, cmp, test->indent);
+    GPR_ASSERT(status);
+    fclose(input);
+    fclose(cmp);
+  }
+}
+
+int main(int argc, char** argv) {
+  grpc_test_init(argc, argv);
+  test_rewrites();
+  gpr_log(GPR_INFO, "json_rewrite_test success");
+  return 0;
+}
diff --git a/test/core/json/json_test.c b/test/core/json/json_test.c
new file mode 100644
index 0000000..11659a5
--- /dev/null
+++ b/test/core/json/json_test.c
@@ -0,0 +1,177 @@
+/*
+ *
+ * Copyright 2014, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <string.h>
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/useful.h>
+#include <grpc/support/log.h>
+#include "src/core/json/json.h"
+#include "src/core/support/string.h"
+
+#include "test/core/util/test_config.h"
+
+typedef struct testing_pair {
+  const char* input;
+  const char* output;
+} testing_pair;
+
+static testing_pair testing_pairs[] = {
+  /* Testing valid parsing. */
+
+  /* Testing trivial parses, with de-indentation. */
+  { " 0 ", "0" },
+  { " 1 ", "1" },
+  { " \"a\" ", "\"a\"" },
+  { " true ", "true" },
+  /* Testing the parser's ability to decode trivial UTF-16. */
+  { "\"\\u0020\\\\\\u0010\\u000a\\u000D\"", "\" \\\\\\u0010\\n\\r\"" },
+  /* Testing various UTF-8 sequences. */
+  { "\"ßâñć௵⇒\"", "\"\\u00df\\u00e2\\u00f1\\u0107\\u0bf5\\u21d2\"" },
+  { "\"\\u00df\\u00e2\\u00f1\\u0107\\u0bf5\\u21d2\"", "\"\\u00df\\u00e2\\u00f1\\u0107\\u0bf5\\u21d2\"" },
+  /* Testing UTF-8 character "𝄞", U+11D1E. */
+  { "\"\xf0\x9d\x84\x9e\"", "\"\\ud834\\udd1e\"" },
+  { "\"\\ud834\\udd1e\"", "\"\\ud834\\udd1e\"" },
+  /* Testing nested empty containers. */
+  { " [ [ ] , { } , [ ] ] ", "[[],{},[]]", },
+  /* Testing escapes and control chars in key strings. */
+  { " { \"\\n\\\\a , b\": 1, \"\": 0 } ", "{\"\\n\\\\a , b\":1,\"\":0}" },
+  /* Testing the writer's ability to cut off invalid UTF-8 sequences. */
+  { "\"abc\xf0\x9d\x24\"", "\"abc\"" },
+  { "\"\xff\"", "\"\"" },
+  /* Testing valid number parsing. */
+  { "[0, 42 , 0.0123, 123.456]", "[0,42,0.0123,123.456]"},
+  { "[1e4,-53.235e-31, 0.3e+3]", "[1e4,-53.235e-31,0.3e+3]" },
+  /* Testing keywords parsing. */
+  { "[true, false, null]", "[true,false,null]" },
+
+
+  /* Testing invalid parsing. */
+
+  /* Testing plain invalid things, exercising the state machine. */
+  { "\\", NULL },
+  { "nu ll", NULL },
+  { "fals", NULL },
+  /* Testing unterminated string. */
+  { "\"\\x", NULL },
+  /* Testing invalid UTF-16 number. */
+  { "\"\\u123x", NULL },
+  /* Testing imbalanced surrogate pairs. */
+  { "\"\\ud834f", NULL },
+  { "\"\\ud834\\n", NULL },
+  { "\"\\udd1ef", NULL },
+  { "\"\\ud834\\ud834\"", NULL },
+  { "\"\\ud834\\u1234\"", NULL },
+  /* Testing embedded invalid whitechars. */
+  { "\"\n\"", NULL },
+  { "\"\t\"", NULL },
+  /* Testing empty json data. */
+  { "", NULL },
+  /* Testing extra characters after end of parsing. */
+  { "{},", NULL },
+  /* Testing imbalanced containers. */
+  { "{}}", NULL },
+  { "[]]", NULL },
+  { "{{}", NULL },
+  { "[[]", NULL },
+  { "[}", NULL },
+  { "{]", NULL },
+  /*Testing trailing comma. */
+  { "{,}", NULL },
+  { "[1,2,3,4,]", NULL },
+  /* Testing having a key syntax in an array. */
+  { "[\"x\":0]", NULL },
+  /* Testing invalid numbers. */
+  { "1.", NULL },
+  { "1e", NULL },
+  { ".12", NULL },
+  { "1.x", NULL },
+  { "1.12x", NULL },
+  { "1ex", NULL },
+  { "1e12x", NULL },
+  { ".12x", NULL },
+  { "000", NULL },
+};
+
+static void test_pairs() {
+  unsigned i;
+
+  for (i = 0; i < GPR_ARRAY_SIZE(testing_pairs); i++) {
+    testing_pair* pair = testing_pairs + i;
+    char* scratchpad = gpr_strdup(pair->input);
+    grpc_json* json;
+
+    gpr_log(GPR_INFO, "parsing string %i - should %s", i,
+            pair->output ? "succeed" : "fail");
+    json = grpc_json_parse_string(scratchpad);
+
+    if (pair->output) {
+      char* output;
+
+      GPR_ASSERT(json);
+      output = grpc_json_dump_to_string(json, 0);
+      GPR_ASSERT(output);
+      gpr_log(GPR_INFO, "succeeded with output = %s", output);
+      GPR_ASSERT(strcmp(output, pair->output) == 0);
+
+      grpc_json_destroy(json);
+      gpr_free(output);
+    } else {
+      gpr_log(GPR_INFO, "failed");
+      GPR_ASSERT(!json);
+    }
+
+    free(scratchpad);
+  }
+}
+
+static void test_atypical() {
+  char* scratchpad = gpr_strdup("[[],[]]");
+  grpc_json* json = grpc_json_parse_string(scratchpad);
+  grpc_json* brother;
+
+  GPR_ASSERT(json);
+  GPR_ASSERT(json->child);
+  brother = json->child->next;
+  grpc_json_destroy(json->child);
+  json->child = brother;
+  grpc_json_destroy(json);
+}
+
+int main(int argc, char **argv) {
+  grpc_test_init(argc, argv);
+  test_pairs();
+  test_atypical();
+  gpr_log(GPR_INFO, "json_test success");
+  return 0;
+}
diff --git a/test/core/json/rewrite_test_input.json b/test/core/json/rewrite_test_input.json
new file mode 100644
index 0000000..5688914
--- /dev/null
+++ b/test/core/json/rewrite_test_input.json
@@ -0,0 +1,203 @@
+{
+"unicode, escape and empty test": { "a\tb": "\u00eb", "empty": [{},[],{}] },
+"some more unicode tests": {
+  "typical utf-8 input (plane 0)": "ßâñć⇒",
+  "atypical utf-8 input (plane 1)": "𝄞"
+},
+
+"whitespace test": { "trying"   :  
+"to"  
+  ,
+  
+  "break"
+  :
+  "the"  , 
+  "parser": "a bit" }  ,  
+
+"#": "All these examples are from http://json.org/example",
+"test1":
+{
+    "glossary": {
+        "title": "example glossary",
+        "GlossDiv": {
+            "title": "S",
+            "GlossList": {
+                "GlossEntry": {
+                    "ID": "SGML",
+                    "SortAs": "SGML",
+                    "GlossTerm": "Standard Generalized Markup Language",
+                    "Acronym": "SGML",
+                    "Abbrev": "ISO 8879:1986",
+                    "GlossDef": {
+                        "para": "A meta-markup language, used to create markup languages such as DocBook.",
+                        "GlossSeeAlso": ["GML", "XML"]
+                    },
+                    "GlossSee": "markup"
+                }
+            }
+        }
+    }
+},
+
+"test2":
+{"menu": {
+  "id": "file",
+  "value": "File",
+  "popup": {
+    "menuitem": [
+      {"value": "New", "onclick": "CreateNewDoc()"},
+      {"value": "Open", "onclick": "OpenDoc()"},
+      {"value": "Close", "onclick": "CloseDoc()"}
+    ]
+  }
+}},
+
+"test3":
+{"widget": {
+    "debug": "on",
+    "window": {
+        "title": "Sample Konfabulator Widget",
+        "name": "main_window",
+        "width": 500,
+        "height": 500
+    },
+    "image": { 
+        "src": "Images/Sun.png",
+        "name": "sun1",
+        "hOffset": 250,
+        "vOffset": 250,
+        "alignment": "center"
+    },
+    "text": {
+        "data": "Click Here",
+        "size": 36,
+        "style": "bold",
+        "name": "text1",
+        "hOffset": 250,
+        "vOffset": 100,
+        "alignment": "center",
+        "onMouseUp": "sun1.opacity = (sun1.opacity / 100) * 90;"
+    }
+}},
+
+"test4":
+{"web-app": {
+  "servlet": [   
+    {
+      "servlet-name": "cofaxCDS",
+      "servlet-class": "org.cofax.cds.CDSServlet",
+      "init-param": {
+        "configGlossary:installationAt": "Philadelphia, PA",
+        "configGlossary:adminEmail": "ksm@pobox.com",
+        "configGlossary:poweredBy": "Cofax",
+        "configGlossary:poweredByIcon": "/images/cofax.gif",
+        "configGlossary:staticPath": "/content/static",
+        "templateProcessorClass": "org.cofax.WysiwygTemplate",
+        "templateLoaderClass": "org.cofax.FilesTemplateLoader",
+        "templatePath": "templates",
+        "templateOverridePath": "",
+        "defaultListTemplate": "listTemplate.htm",
+        "defaultFileTemplate": "articleTemplate.htm",
+        "useJSP": false,
+        "jspListTemplate": "listTemplate.jsp",
+        "jspFileTemplate": "articleTemplate.jsp",
+        "cachePackageTagsTrack": 200,
+        "cachePackageTagsStore": 200,
+        "cachePackageTagsRefresh": 60,
+        "cacheTemplatesTrack": 100,
+        "cacheTemplatesStore": 50,
+        "cacheTemplatesRefresh": 15,
+        "cachePagesTrack": 200,
+        "cachePagesStore": 100,
+        "cachePagesRefresh": 10,
+        "cachePagesDirtyRead": 10,
+        "searchEngineListTemplate": "forSearchEnginesList.htm",
+        "searchEngineFileTemplate": "forSearchEngines.htm",
+        "searchEngineRobotsDb": "WEB-INF/robots.db",
+        "useDataStore": true,
+        "dataStoreClass": "org.cofax.SqlDataStore",
+        "redirectionClass": "org.cofax.SqlRedirection",
+        "dataStoreName": "cofax",
+        "dataStoreDriver": "com.microsoft.jdbc.sqlserver.SQLServerDriver",
+        "dataStoreUrl": "jdbc:microsoft:sqlserver://LOCALHOST:1433;DatabaseName=goon",
+        "dataStoreUser": "sa",
+        "dataStorePassword": "dataStoreTestQuery",
+        "dataStoreTestQuery": "SET NOCOUNT ON;select test='test';",
+        "dataStoreLogFile": "/usr/local/tomcat/logs/datastore.log",
+        "dataStoreInitConns": 10,
+        "dataStoreMaxConns": 100,
+        "dataStoreConnUsageLimit": 100,
+        "dataStoreLogLevel": "debug",
+        "maxUrlLength": 500}},
+    {
+      "servlet-name": "cofaxEmail",
+      "servlet-class": "org.cofax.cds.EmailServlet",
+      "init-param": {
+      "mailHost": "mail1",
+      "mailHostOverride": "mail2"}},
+    {
+      "servlet-name": "cofaxAdmin",
+      "servlet-class": "org.cofax.cds.AdminServlet"},
+ 
+    {
+      "servlet-name": "fileServlet",
+      "servlet-class": "org.cofax.cds.FileServlet"},
+    {
+      "servlet-name": "cofaxTools",
+      "servlet-class": "org.cofax.cms.CofaxToolsServlet",
+      "init-param": {
+        "templatePath": "toolstemplates/",
+        "log": 1,
+        "logLocation": "/usr/local/tomcat/logs/CofaxTools.log",
+        "logMaxSize": "",
+        "dataLog": 1,
+        "dataLogLocation": "/usr/local/tomcat/logs/dataLog.log",
+        "dataLogMaxSize": "",
+        "removePageCache": "/content/admin/remove?cache=pages&id=",
+        "removeTemplateCache": "/content/admin/remove?cache=templates&id=",
+        "fileTransferFolder": "/usr/local/tomcat/webapps/content/fileTransferFolder",
+        "lookInContext": 1,
+        "adminGroupID": 4,
+        "betaServer": true}}],
+  "servlet-mapping": {
+    "cofaxCDS": "/",
+    "cofaxEmail": "/cofaxutil/aemail/*",
+    "cofaxAdmin": "/admin/*",
+    "fileServlet": "/static/*",
+    "cofaxTools": "/tools/*"},
+ 
+  "taglib": {
+    "taglib-uri": "cofax.tld",
+    "taglib-location": "/WEB-INF/tlds/cofax.tld"}}},
+
+"test5":
+{"menu": {
+    "header": "SVG Viewer",
+    "items": [
+        {"id": "Open"},
+        {"id": "OpenNew", "label": "Open New"},
+        null,
+        {"id": "ZoomIn", "label": "Zoom In"},
+        {"id": "ZoomOut", "label": "Zoom Out"},
+        {"id": "OriginalView", "label": "Original View"},
+        null,
+        {"id": "Quality"},
+        {"id": "Pause"},
+        {"id": "Mute"},
+        null,
+        {"id": "Find", "label": "Find..."},
+        {"id": "FindAgain", "label": "Find Again"},
+        {"id": "Copy"},
+        {"id": "CopyAgain", "label": "Copy Again"},
+        {"id": "CopySVG", "label": "Copy SVG"},
+        {"id": "ViewSVG", "label": "View SVG"},
+        {"id": "ViewSource", "label": "View Source"},
+        {"id": "SaveAs", "label": "Save As"},
+        null,
+        {"id": "Help"},
+        {"id": "About", "label": "About Adobe CVG Viewer..."}
+    ]
+}}
+
+
+}
diff --git a/test/core/json/rewrite_test_output_condensed.json b/test/core/json/rewrite_test_output_condensed.json
new file mode 100644
index 0000000..3adbbd9
--- /dev/null
+++ b/test/core/json/rewrite_test_output_condensed.json
@@ -0,0 +1 @@
+{"unicode, escape and empty test":{"a\tb":"\u00eb","empty":[{},[],{}]},"some more unicode tests":{"typical utf-8 input (plane 0)":"\u00df\u00e2\u00f1\u0107\u21d2","atypical utf-8 input (plane 1)":"\ud834\udd1e"},"whitespace test":{"trying":"to","break":"the","parser":"a bit"},"#":"All these examples are from http://json.org/example","test1":{"glossary":{"title":"example glossary","GlossDiv":{"title":"S","GlossList":{"GlossEntry":{"ID":"SGML","SortAs":"SGML","GlossTerm":"Standard Generalized Markup Language","Acronym":"SGML","Abbrev":"ISO 8879:1986","GlossDef":{"para":"A meta-markup language, used to create markup languages such as DocBook.","GlossSeeAlso":["GML","XML"]},"GlossSee":"markup"}}}}},"test2":{"menu":{"id":"file","value":"File","popup":{"menuitem":[{"value":"New","onclick":"CreateNewDoc()"},{"value":"Open","onclick":"OpenDoc()"},{"value":"Close","onclick":"CloseDoc()"}]}}},"test3":{"widget":{"debug":"on","window":{"title":"Sample Konfabulator Widget","name":"main_window","width":500,"height":500},"image":{"src":"Images/Sun.png","name":"sun1","hOffset":250,"vOffset":250,"alignment":"center"},"text":{"data":"Click Here","size":36,"style":"bold","name":"text1","hOffset":250,"vOffset":100,"alignment":"center","onMouseUp":"sun1.opacity = (sun1.opacity / 100) * 90;"}}},"test4":{"web-app":{"servlet":[{"servlet-name":"cofaxCDS","servlet-class":"org.cofax.cds.CDSServlet","init-param":{"configGlossary:installationAt":"Philadelphia, PA","configGlossary:adminEmail":"ksm@pobox.com","configGlossary:poweredBy":"Cofax","configGlossary:poweredByIcon":"/images/cofax.gif","configGlossary:staticPath":"/content/static","templateProcessorClass":"org.cofax.WysiwygTemplate","templateLoaderClass":"org.cofax.FilesTemplateLoader","templatePath":"templates","templateOverridePath":"","defaultListTemplate":"listTemplate.htm","defaultFileTemplate":"articleTemplate.htm","useJSP":false,"jspListTemplate":"listTemplate.jsp","jspFileTemplate":"articleTemplate.jsp","cachePackageTagsTrack":200,"cachePackageTagsStore":200,"cachePackageTagsRefresh":60,"cacheTemplatesTrack":100,"cacheTemplatesStore":50,"cacheTemplatesRefresh":15,"cachePagesTrack":200,"cachePagesStore":100,"cachePagesRefresh":10,"cachePagesDirtyRead":10,"searchEngineListTemplate":"forSearchEnginesList.htm","searchEngineFileTemplate":"forSearchEngines.htm","searchEngineRobotsDb":"WEB-INF/robots.db","useDataStore":true,"dataStoreClass":"org.cofax.SqlDataStore","redirectionClass":"org.cofax.SqlRedirection","dataStoreName":"cofax","dataStoreDriver":"com.microsoft.jdbc.sqlserver.SQLServerDriver","dataStoreUrl":"jdbc:microsoft:sqlserver://LOCALHOST:1433;DatabaseName=goon","dataStoreUser":"sa","dataStorePassword":"dataStoreTestQuery","dataStoreTestQuery":"SET NOCOUNT ON;select test='test';","dataStoreLogFile":"/usr/local/tomcat/logs/datastore.log","dataStoreInitConns":10,"dataStoreMaxConns":100,"dataStoreConnUsageLimit":100,"dataStoreLogLevel":"debug","maxUrlLength":500}},{"servlet-name":"cofaxEmail","servlet-class":"org.cofax.cds.EmailServlet","init-param":{"mailHost":"mail1","mailHostOverride":"mail2"}},{"servlet-name":"cofaxAdmin","servlet-class":"org.cofax.cds.AdminServlet"},{"servlet-name":"fileServlet","servlet-class":"org.cofax.cds.FileServlet"},{"servlet-name":"cofaxTools","servlet-class":"org.cofax.cms.CofaxToolsServlet","init-param":{"templatePath":"toolstemplates/","log":1,"logLocation":"/usr/local/tomcat/logs/CofaxTools.log","logMaxSize":"","dataLog":1,"dataLogLocation":"/usr/local/tomcat/logs/dataLog.log","dataLogMaxSize":"","removePageCache":"/content/admin/remove?cache=pages&id=","removeTemplateCache":"/content/admin/remove?cache=templates&id=","fileTransferFolder":"/usr/local/tomcat/webapps/content/fileTransferFolder","lookInContext":1,"adminGroupID":4,"betaServer":true}}],"servlet-mapping":{"cofaxCDS":"/","cofaxEmail":"/cofaxutil/aemail/*","cofaxAdmin":"/admin/*","fileServlet":"/static/*","cofaxTools":"/tools/*"},"taglib":{"taglib-uri":"cofax.tld","taglib-location":"/WEB-INF/tlds/cofax.tld"}}},"test5":{"menu":{"header":"SVG Viewer","items":[{"id":"Open"},{"id":"OpenNew","label":"Open New"},null,{"id":"ZoomIn","label":"Zoom In"},{"id":"ZoomOut","label":"Zoom Out"},{"id":"OriginalView","label":"Original View"},null,{"id":"Quality"},{"id":"Pause"},{"id":"Mute"},null,{"id":"Find","label":"Find..."},{"id":"FindAgain","label":"Find Again"},{"id":"Copy"},{"id":"CopyAgain","label":"Copy Again"},{"id":"CopySVG","label":"Copy SVG"},{"id":"ViewSVG","label":"View SVG"},{"id":"ViewSource","label":"View Source"},{"id":"SaveAs","label":"Save As"},null,{"id":"Help"},{"id":"About","label":"About Adobe CVG Viewer..."}]}}}
\ No newline at end of file
diff --git a/test/core/json/rewrite_test_output_indented.json b/test/core/json/rewrite_test_output_indented.json
new file mode 100644
index 0000000..7ac9f49
--- /dev/null
+++ b/test/core/json/rewrite_test_output_indented.json
@@ -0,0 +1,272 @@
+{
+  "unicode, escape and empty test": {
+    "a\tb": "\u00eb",
+    "empty": [
+      {},
+      [],
+      {}
+    ]
+  },
+  "some more unicode tests": {
+    "typical utf-8 input (plane 0)": "\u00df\u00e2\u00f1\u0107\u21d2",
+    "atypical utf-8 input (plane 1)": "\ud834\udd1e"
+  },
+  "whitespace test": {
+    "trying": "to",
+    "break": "the",
+    "parser": "a bit"
+  },
+  "#": "All these examples are from http://json.org/example",
+  "test1": {
+    "glossary": {
+      "title": "example glossary",
+      "GlossDiv": {
+        "title": "S",
+        "GlossList": {
+          "GlossEntry": {
+            "ID": "SGML",
+            "SortAs": "SGML",
+            "GlossTerm": "Standard Generalized Markup Language",
+            "Acronym": "SGML",
+            "Abbrev": "ISO 8879:1986",
+            "GlossDef": {
+              "para": "A meta-markup language, used to create markup languages such as DocBook.",
+              "GlossSeeAlso": [
+                "GML",
+                "XML"
+              ]
+            },
+            "GlossSee": "markup"
+          }
+        }
+      }
+    }
+  },
+  "test2": {
+    "menu": {
+      "id": "file",
+      "value": "File",
+      "popup": {
+        "menuitem": [
+          {
+            "value": "New",
+            "onclick": "CreateNewDoc()"
+          },
+          {
+            "value": "Open",
+            "onclick": "OpenDoc()"
+          },
+          {
+            "value": "Close",
+            "onclick": "CloseDoc()"
+          }
+        ]
+      }
+    }
+  },
+  "test3": {
+    "widget": {
+      "debug": "on",
+      "window": {
+        "title": "Sample Konfabulator Widget",
+        "name": "main_window",
+        "width": 500,
+        "height": 500
+      },
+      "image": {
+        "src": "Images/Sun.png",
+        "name": "sun1",
+        "hOffset": 250,
+        "vOffset": 250,
+        "alignment": "center"
+      },
+      "text": {
+        "data": "Click Here",
+        "size": 36,
+        "style": "bold",
+        "name": "text1",
+        "hOffset": 250,
+        "vOffset": 100,
+        "alignment": "center",
+        "onMouseUp": "sun1.opacity = (sun1.opacity / 100) * 90;"
+      }
+    }
+  },
+  "test4": {
+    "web-app": {
+      "servlet": [
+        {
+          "servlet-name": "cofaxCDS",
+          "servlet-class": "org.cofax.cds.CDSServlet",
+          "init-param": {
+            "configGlossary:installationAt": "Philadelphia, PA",
+            "configGlossary:adminEmail": "ksm@pobox.com",
+            "configGlossary:poweredBy": "Cofax",
+            "configGlossary:poweredByIcon": "/images/cofax.gif",
+            "configGlossary:staticPath": "/content/static",
+            "templateProcessorClass": "org.cofax.WysiwygTemplate",
+            "templateLoaderClass": "org.cofax.FilesTemplateLoader",
+            "templatePath": "templates",
+            "templateOverridePath": "",
+            "defaultListTemplate": "listTemplate.htm",
+            "defaultFileTemplate": "articleTemplate.htm",
+            "useJSP": false,
+            "jspListTemplate": "listTemplate.jsp",
+            "jspFileTemplate": "articleTemplate.jsp",
+            "cachePackageTagsTrack": 200,
+            "cachePackageTagsStore": 200,
+            "cachePackageTagsRefresh": 60,
+            "cacheTemplatesTrack": 100,
+            "cacheTemplatesStore": 50,
+            "cacheTemplatesRefresh": 15,
+            "cachePagesTrack": 200,
+            "cachePagesStore": 100,
+            "cachePagesRefresh": 10,
+            "cachePagesDirtyRead": 10,
+            "searchEngineListTemplate": "forSearchEnginesList.htm",
+            "searchEngineFileTemplate": "forSearchEngines.htm",
+            "searchEngineRobotsDb": "WEB-INF/robots.db",
+            "useDataStore": true,
+            "dataStoreClass": "org.cofax.SqlDataStore",
+            "redirectionClass": "org.cofax.SqlRedirection",
+            "dataStoreName": "cofax",
+            "dataStoreDriver": "com.microsoft.jdbc.sqlserver.SQLServerDriver",
+            "dataStoreUrl": "jdbc:microsoft:sqlserver://LOCALHOST:1433;DatabaseName=goon",
+            "dataStoreUser": "sa",
+            "dataStorePassword": "dataStoreTestQuery",
+            "dataStoreTestQuery": "SET NOCOUNT ON;select test='test';",
+            "dataStoreLogFile": "/usr/local/tomcat/logs/datastore.log",
+            "dataStoreInitConns": 10,
+            "dataStoreMaxConns": 100,
+            "dataStoreConnUsageLimit": 100,
+            "dataStoreLogLevel": "debug",
+            "maxUrlLength": 500
+          }
+        },
+        {
+          "servlet-name": "cofaxEmail",
+          "servlet-class": "org.cofax.cds.EmailServlet",
+          "init-param": {
+            "mailHost": "mail1",
+            "mailHostOverride": "mail2"
+          }
+        },
+        {
+          "servlet-name": "cofaxAdmin",
+          "servlet-class": "org.cofax.cds.AdminServlet"
+        },
+        {
+          "servlet-name": "fileServlet",
+          "servlet-class": "org.cofax.cds.FileServlet"
+        },
+        {
+          "servlet-name": "cofaxTools",
+          "servlet-class": "org.cofax.cms.CofaxToolsServlet",
+          "init-param": {
+            "templatePath": "toolstemplates/",
+            "log": 1,
+            "logLocation": "/usr/local/tomcat/logs/CofaxTools.log",
+            "logMaxSize": "",
+            "dataLog": 1,
+            "dataLogLocation": "/usr/local/tomcat/logs/dataLog.log",
+            "dataLogMaxSize": "",
+            "removePageCache": "/content/admin/remove?cache=pages&id=",
+            "removeTemplateCache": "/content/admin/remove?cache=templates&id=",
+            "fileTransferFolder": "/usr/local/tomcat/webapps/content/fileTransferFolder",
+            "lookInContext": 1,
+            "adminGroupID": 4,
+            "betaServer": true
+          }
+        }
+      ],
+      "servlet-mapping": {
+        "cofaxCDS": "/",
+        "cofaxEmail": "/cofaxutil/aemail/*",
+        "cofaxAdmin": "/admin/*",
+        "fileServlet": "/static/*",
+        "cofaxTools": "/tools/*"
+      },
+      "taglib": {
+        "taglib-uri": "cofax.tld",
+        "taglib-location": "/WEB-INF/tlds/cofax.tld"
+      }
+    }
+  },
+  "test5": {
+    "menu": {
+      "header": "SVG Viewer",
+      "items": [
+        {
+          "id": "Open"
+        },
+        {
+          "id": "OpenNew",
+          "label": "Open New"
+        },
+        null,
+        {
+          "id": "ZoomIn",
+          "label": "Zoom In"
+        },
+        {
+          "id": "ZoomOut",
+          "label": "Zoom Out"
+        },
+        {
+          "id": "OriginalView",
+          "label": "Original View"
+        },
+        null,
+        {
+          "id": "Quality"
+        },
+        {
+          "id": "Pause"
+        },
+        {
+          "id": "Mute"
+        },
+        null,
+        {
+          "id": "Find",
+          "label": "Find..."
+        },
+        {
+          "id": "FindAgain",
+          "label": "Find Again"
+        },
+        {
+          "id": "Copy"
+        },
+        {
+          "id": "CopyAgain",
+          "label": "Copy Again"
+        },
+        {
+          "id": "CopySVG",
+          "label": "Copy SVG"
+        },
+        {
+          "id": "ViewSVG",
+          "label": "View SVG"
+        },
+        {
+          "id": "ViewSource",
+          "label": "View Source"
+        },
+        {
+          "id": "SaveAs",
+          "label": "Save As"
+        },
+        null,
+        {
+          "id": "Help"
+        },
+        {
+          "id": "About",
+          "label": "About Adobe CVG Viewer..."
+        }
+      ]
+    }
+  }
+}
\ No newline at end of file
diff --git a/test/core/security/credentials_test.c b/test/core/security/credentials_test.c
index 1c83cc8..dd90a7e 100644
--- a/test/core/security/credentials_test.c
+++ b/test/core/security/credentials_test.c
@@ -55,23 +55,23 @@
    Maximum size for a string literal is 509 chars in C89, yay!  */
 static const char test_json_key_str_part1[] =
     "{ \"private_key\": \"-----BEGIN PRIVATE KEY-----"
-    "\nMIICeAIBADANBgkqhkiG9w0BAQEFAASCAmIwggJeAgEAAoGBAOEvJsnoHnyHkXcp\n7mJEqg"
-    "WGjiw71NfXByguekSKho65FxaGbsnSM9SMQAqVk7Q2rG+I0OpsT0LrWQtZ\nyjSeg/"
+    "\\nMIICeAIBADANBgkqhkiG9w0BAQEFAASCAmIwggJeAgEAAoGBAOEvJsnoHnyHkXcp\\n7mJEqg"
+    "WGjiw71NfXByguekSKho65FxaGbsnSM9SMQAqVk7Q2rG+I0OpsT0LrWQtZ\\nyjSeg/"
     "rWBQvS4hle4LfijkP3J5BG+"
-    "IXDMP8RfziNRQsenAXDNPkY4kJCvKux2xdD\nOnVF6N7dL3nTYZg+"
-    "uQrNsMTz9UxVAgMBAAECgYEAzbLewe1xe9vy+2GoSsfib+28\nDZgSE6Bu/"
-    "zuFoPrRc6qL9p2SsnV7txrunTyJkkOnPLND9ABAXybRTlcVKP/sGgza\n/"
+    "IXDMP8RfziNRQsenAXDNPkY4kJCvKux2xdD\\nOnVF6N7dL3nTYZg+"
+    "uQrNsMTz9UxVAgMBAAECgYEAzbLewe1xe9vy+2GoSsfib+28\\nDZgSE6Bu/"
+    "zuFoPrRc6qL9p2SsnV7txrunTyJkkOnPLND9ABAXybRTlcVKP/sGgza\\n/"
     "8HpCqFYM9V8f34SBWfD4fRFT+n/"
-    "73cfRUtGXdXpseva2lh8RilIQfPhNZAncenU\ngqXjDvpkypEusgXAykECQQD+";
+    "73cfRUtGXdXpseva2lh8RilIQfPhNZAncenU\\ngqXjDvpkypEusgXAykECQQD+";
 static const char test_json_key_str_part2[] =
-    "53XxNVnxBHsYb+AYEfklR96yVi8HywjVHP34+OQZ\nCslxoHQM8s+"
-    "dBnjfScLu22JqkPv04xyxmt0QAKm9+vTdAkEA4ib7YvEAn2jXzcCI\nEkoy2L/"
-    "XydR1GCHoacdfdAwiL2npOdnbvi4ZmdYRPY1LSTO058tQHKVXV7NLeCa3\nAARh2QJBAMKeDAG"
-    "W303SQv2cZTdbeaLKJbB5drz3eo3j7dDKjrTD9JupixFbzcGw\n8FZi5c8idxiwC36kbAL6HzA"
-    "ZoX+ofI0CQE6KCzPJTtYNqyShgKAZdJ8hwOcvCZtf\n6z8RJm0+"
+    "53XxNVnxBHsYb+AYEfklR96yVi8HywjVHP34+OQZ\\nCslxoHQM8s+"
+    "dBnjfScLu22JqkPv04xyxmt0QAKm9+vTdAkEA4ib7YvEAn2jXzcCI\\nEkoy2L/"
+    "XydR1GCHoacdfdAwiL2npOdnbvi4ZmdYRPY1LSTO058tQHKVXV7NLeCa3\\nAARh2QJBAMKeDAG"
+    "W303SQv2cZTdbeaLKJbB5drz3eo3j7dDKjrTD9JupixFbzcGw\\n8FZi5c8idxiwC36kbAL6HzA"
+    "ZoX+ofI0CQE6KCzPJTtYNqyShgKAZdJ8hwOcvCZtf\\n6z8RJm0+"
     "6YBd38lfh5j8mZd7aHFf6I17j5AQY7oPEc47TjJj/"
-    "5nZ68ECQQDvYuI3\nLyK5fS8g0SYbmPOL9TlcHDOqwG0mrX9qpg5DC2fniXNSrrZ64GTDKdzZY"
-    "Ap6LI9W\nIqv4vr6y38N79TTC\n-----END PRIVATE KEY-----\n\", ";
+    "5nZ68ECQQDvYuI3\\nLyK5fS8g0SYbmPOL9TlcHDOqwG0mrX9qpg5DC2fniXNSrrZ64GTDKdzZY"
+    "Ap6LI9W\\nIqv4vr6y38N79TTC\\n-----END PRIVATE KEY-----\\n\", ";
 static const char test_json_key_str_part3[] =
     "\"private_key_id\": \"e6b5137873db8d2ef81e06a47289e6434ec8a165\", "
     "\"client_email\": "
diff --git a/test/core/security/json_token_test.c b/test/core/security/json_token_test.c
index 3c26de6..2a9c8f8 100644
--- a/test/core/security/json_token_test.c
+++ b/test/core/security/json_token_test.c
@@ -41,7 +41,7 @@
 #include <grpc/support/log.h>
 #include <grpc/support/slice.h>
 #include "test/core/util/test_config.h"
-#include "third_party/cJSON/cJSON.h"
+#include "src/core/json/json.h"
 #include <openssl/evp.h>
 
 /* This JSON key was generated with the GCE console and revoked immediately.
@@ -49,23 +49,23 @@
    Maximum size for a string literal is 509 chars in C89, yay!  */
 static const char test_json_key_str_part1[] =
     "{ \"private_key\": \"-----BEGIN PRIVATE KEY-----"
-    "\nMIICeAIBADANBgkqhkiG9w0BAQEFAASCAmIwggJeAgEAAoGBAOEvJsnoHnyHkXcp\n7mJEqg"
-    "WGjiw71NfXByguekSKho65FxaGbsnSM9SMQAqVk7Q2rG+I0OpsT0LrWQtZ\nyjSeg/"
+    "\\nMIICeAIBADANBgkqhkiG9w0BAQEFAASCAmIwggJeAgEAAoGBAOEvJsnoHnyHkXcp\\n7mJEqg"
+    "WGjiw71NfXByguekSKho65FxaGbsnSM9SMQAqVk7Q2rG+I0OpsT0LrWQtZ\\nyjSeg/"
     "rWBQvS4hle4LfijkP3J5BG+"
-    "IXDMP8RfziNRQsenAXDNPkY4kJCvKux2xdD\nOnVF6N7dL3nTYZg+"
-    "uQrNsMTz9UxVAgMBAAECgYEAzbLewe1xe9vy+2GoSsfib+28\nDZgSE6Bu/"
-    "zuFoPrRc6qL9p2SsnV7txrunTyJkkOnPLND9ABAXybRTlcVKP/sGgza\n/"
+    "IXDMP8RfziNRQsenAXDNPkY4kJCvKux2xdD\\nOnVF6N7dL3nTYZg+"
+    "uQrNsMTz9UxVAgMBAAECgYEAzbLewe1xe9vy+2GoSsfib+28\\nDZgSE6Bu/"
+    "zuFoPrRc6qL9p2SsnV7txrunTyJkkOnPLND9ABAXybRTlcVKP/sGgza\\n/"
     "8HpCqFYM9V8f34SBWfD4fRFT+n/"
-    "73cfRUtGXdXpseva2lh8RilIQfPhNZAncenU\ngqXjDvpkypEusgXAykECQQD+";
+    "73cfRUtGXdXpseva2lh8RilIQfPhNZAncenU\\ngqXjDvpkypEusgXAykECQQD+";
 static const char test_json_key_str_part2[] =
-    "53XxNVnxBHsYb+AYEfklR96yVi8HywjVHP34+OQZ\nCslxoHQM8s+"
-    "dBnjfScLu22JqkPv04xyxmt0QAKm9+vTdAkEA4ib7YvEAn2jXzcCI\nEkoy2L/"
-    "XydR1GCHoacdfdAwiL2npOdnbvi4ZmdYRPY1LSTO058tQHKVXV7NLeCa3\nAARh2QJBAMKeDAG"
-    "W303SQv2cZTdbeaLKJbB5drz3eo3j7dDKjrTD9JupixFbzcGw\n8FZi5c8idxiwC36kbAL6HzA"
-    "ZoX+ofI0CQE6KCzPJTtYNqyShgKAZdJ8hwOcvCZtf\n6z8RJm0+"
+    "53XxNVnxBHsYb+AYEfklR96yVi8HywjVHP34+OQZ\\nCslxoHQM8s+"
+    "dBnjfScLu22JqkPv04xyxmt0QAKm9+vTdAkEA4ib7YvEAn2jXzcCI\\nEkoy2L/"
+    "XydR1GCHoacdfdAwiL2npOdnbvi4ZmdYRPY1LSTO058tQHKVXV7NLeCa3\\nAARh2QJBAMKeDAG"
+    "W303SQv2cZTdbeaLKJbB5drz3eo3j7dDKjrTD9JupixFbzcGw\\n8FZi5c8idxiwC36kbAL6HzA"
+    "ZoX+ofI0CQE6KCzPJTtYNqyShgKAZdJ8hwOcvCZtf\\n6z8RJm0+"
     "6YBd38lfh5j8mZd7aHFf6I17j5AQY7oPEc47TjJj/"
-    "5nZ68ECQQDvYuI3\nLyK5fS8g0SYbmPOL9TlcHDOqwG0mrX9qpg5DC2fniXNSrrZ64GTDKdzZY"
-    "Ap6LI9W\nIqv4vr6y38N79TTC\n-----END PRIVATE KEY-----\n\", ";
+    "5nZ68ECQQDvYuI3\\nLyK5fS8g0SYbmPOL9TlcHDOqwG0mrX9qpg5DC2fniXNSrrZ64GTDKdzZY"
+    "Ap6LI9W\\nIqv4vr6y38N79TTC\\n-----END PRIVATE KEY-----\\n\", ";
 static const char test_json_key_str_part3[] =
     "\"private_key_id\": \"e6b5137873db8d2ef81e06a47289e6434ec8a165\", "
     "\"client_email\": "
@@ -203,10 +203,11 @@
   grpc_auth_json_key_destruct(&json_key);
 }
 
-static cJSON *parse_json_part_from_jwt(const char *str, size_t len) {
+static grpc_json *parse_json_part_from_jwt(const char *str, size_t len,
+                                           char **scratchpad) {
   char *b64;
   char *decoded;
-  cJSON *json;
+  grpc_json *json;
   gpr_slice slice;
   b64 = gpr_malloc(len + 1);
   strncpy(b64, str, len);
@@ -217,59 +218,84 @@
   strncpy(decoded, (const char *)GPR_SLICE_START_PTR(slice),
           GPR_SLICE_LENGTH(slice));
   decoded[GPR_SLICE_LENGTH(slice)] = '\0';
-  json = cJSON_Parse(decoded);
+  json = grpc_json_parse_string(decoded);
   gpr_free(b64);
-  gpr_free(decoded);
+  *scratchpad = decoded;
   gpr_slice_unref(slice);
   return json;
 }
 
-static void check_jwt_header(cJSON *header) {
-  cJSON *child = cJSON_GetObjectItem(header, "alg");
-  GPR_ASSERT(child != NULL);
-  GPR_ASSERT(child->type == cJSON_String);
-  GPR_ASSERT(!strcmp(child->valuestring, "RS256"));
+static void check_jwt_header(grpc_json *header) {
+  grpc_json *ptr;
+  grpc_json *alg = NULL;
+  grpc_json *typ = NULL;
 
-  child = cJSON_GetObjectItem(header, "typ");
-  GPR_ASSERT(child != NULL);
-  GPR_ASSERT(child->type == cJSON_String);
-  GPR_ASSERT(!strcmp(child->valuestring, "JWT"));
+  for (ptr = header->child; ptr; ptr = ptr->next) {
+    if (strcmp(ptr->key, "alg") == 0) {
+      alg = ptr;
+    } else if (strcmp(ptr->key, "typ") == 0) {
+      typ = ptr;
+    }
+  }
+  GPR_ASSERT(alg != NULL);
+  GPR_ASSERT(alg->type == GRPC_JSON_STRING);
+  GPR_ASSERT(!strcmp(alg->value, "RS256"));
+
+  GPR_ASSERT(typ != NULL);
+  GPR_ASSERT(typ->type == GRPC_JSON_STRING);
+  GPR_ASSERT(!strcmp(typ->value, "JWT"));
 }
 
-static void check_jwt_claim(cJSON *claim) {
-  gpr_timespec exp = {0, 0};
+static void check_jwt_claim(grpc_json *claim) {
+  gpr_timespec expiration = {0, 0};
   gpr_timespec issue_time = {0, 0};
   gpr_timespec parsed_lifetime;
-  cJSON *child = cJSON_GetObjectItem(claim, "iss");
-  GPR_ASSERT(child != NULL);
-  GPR_ASSERT(child->type == cJSON_String);
+  grpc_json *iss = NULL;
+  grpc_json *scope = NULL;
+  grpc_json *aud = NULL;
+  grpc_json *exp = NULL;
+  grpc_json *iat = NULL;
+  grpc_json *ptr;
+
+  for (ptr = claim->child; ptr; ptr = ptr->next) {
+    if (strcmp(ptr->key, "iss") == 0) {
+      iss = ptr;
+    } else if (strcmp(ptr->key, "scope") == 0) {
+      scope = ptr;
+    } else if (strcmp(ptr->key, "aud") == 0) {
+      aud = ptr;
+    } else if (strcmp(ptr->key, "exp") == 0) {
+      exp = ptr;
+    } else if (strcmp(ptr->key, "iat") == 0) {
+      iat = ptr;
+    }
+  }
+
+  GPR_ASSERT(iss != NULL);
+  GPR_ASSERT(iss->type == GRPC_JSON_STRING);
   GPR_ASSERT(
       !strcmp(
-          child->valuestring,
+          iss->value,
           "777-abaslkan11hlb6nmim3bpspl31ud@developer.gserviceaccount.com"));
 
-  child = cJSON_GetObjectItem(claim, "scope");
-  GPR_ASSERT(child != NULL);
-  GPR_ASSERT(child->type == cJSON_String);
-  GPR_ASSERT(!strcmp(child->valuestring, test_scope));
+  GPR_ASSERT(scope != NULL);
+  GPR_ASSERT(scope->type == GRPC_JSON_STRING);
+  GPR_ASSERT(!strcmp(scope->value, test_scope));
 
-  child = cJSON_GetObjectItem(claim, "aud");
-  GPR_ASSERT(child != NULL);
-  GPR_ASSERT(child->type == cJSON_String);
-  GPR_ASSERT(!strcmp(child->valuestring,
+  GPR_ASSERT(aud != NULL);
+  GPR_ASSERT(aud->type == GRPC_JSON_STRING);
+  GPR_ASSERT(!strcmp(aud->value,
                      "https://www.googleapis.com/oauth2/v3/token"));
 
-  child = cJSON_GetObjectItem(claim, "exp");
-  GPR_ASSERT(child != NULL);
-  GPR_ASSERT(child->type == cJSON_Number);
-  exp.tv_sec = child->valueint;
+  GPR_ASSERT(exp != NULL);
+  GPR_ASSERT(exp->type == GRPC_JSON_NUMBER);
+  expiration.tv_sec = strtol(exp->value, NULL, 10);
 
-  child = cJSON_GetObjectItem(claim, "iat");
-  GPR_ASSERT(child != NULL);
-  GPR_ASSERT(child->type == cJSON_Number);
-  issue_time.tv_sec = child->valueint;
+  GPR_ASSERT(iat != NULL);
+  GPR_ASSERT(iat->type == GRPC_JSON_NUMBER);
+  issue_time.tv_sec = strtol(iat->value, NULL, 10);
 
-  parsed_lifetime = gpr_time_sub(exp, issue_time);
+  parsed_lifetime = gpr_time_sub(expiration, issue_time);
   GPR_ASSERT(parsed_lifetime.tv_sec == grpc_max_auth_token_lifetime.tv_sec);
 }
 
@@ -300,8 +326,9 @@
 
 static void test_jwt_encode_and_sign(void) {
   char *json_string = test_json_key_str(NULL);
-  cJSON *parsed_header = NULL;
-  cJSON *parsed_claim = NULL;
+  grpc_json *parsed_header = NULL;
+  grpc_json *parsed_claim = NULL;
+  char *scratchpad;
   grpc_auth_json_key json_key =
       grpc_auth_json_key_create_from_string(json_string);
   const char *b64_signature;
@@ -310,17 +337,21 @@
                                        grpc_max_auth_token_lifetime);
   const char *dot = strchr(jwt, '.');
   GPR_ASSERT(dot != NULL);
-  parsed_header = parse_json_part_from_jwt(jwt, dot - jwt);
+  parsed_header = parse_json_part_from_jwt(jwt, dot - jwt, &scratchpad);
   GPR_ASSERT(parsed_header != NULL);
   check_jwt_header(parsed_header);
   offset = dot - jwt + 1;
+  grpc_json_destroy(parsed_header);
+  gpr_free(scratchpad);
 
   dot = strchr(jwt + offset, '.');
   GPR_ASSERT(dot != NULL);
-  parsed_claim = parse_json_part_from_jwt(jwt + offset, dot - (jwt + offset));
+  parsed_claim = parse_json_part_from_jwt(jwt + offset, dot - (jwt + offset), &scratchpad);
   GPR_ASSERT(parsed_claim != NULL);
   check_jwt_claim(parsed_claim);
   offset = dot - jwt + 1;
+  grpc_json_destroy(parsed_claim);
+  gpr_free(scratchpad);
 
   dot = strchr(jwt + offset, '.');
   GPR_ASSERT(dot == NULL); /* no more part. */
@@ -328,8 +359,6 @@
   check_jwt_signature(b64_signature, json_key.private_key, jwt, offset - 1);
 
   gpr_free(json_string);
-  cJSON_Delete(parsed_header);
-  cJSON_Delete(parsed_claim);
   grpc_auth_json_key_destruct(&json_key);
   gpr_free(jwt);
 }
diff --git a/test/core/security/secure_endpoint_test.c b/test/core/security/secure_endpoint_test.c
index 5d87502..456515b 100644
--- a/test/core/security/secure_endpoint_test.c
+++ b/test/core/security/secure_endpoint_test.c
@@ -59,7 +59,7 @@
     f.client_ep =
         grpc_secure_endpoint_create(fake_read_protector, tcp.client, NULL, 0);
   } else {
-    int i;
+    unsigned i;
     tsi_result result;
     size_t still_pending_size;
     size_t total_buffer_size = 8192;
@@ -81,9 +81,8 @@
         message_bytes += processed_message_size;
         message_size -= processed_message_size;
         cur += protected_buffer_size_to_send;
+        GPR_ASSERT(buffer_size >= protected_buffer_size_to_send);
         buffer_size -= protected_buffer_size_to_send;
-
-        GPR_ASSERT(buffer_size >= 0);
       }
       gpr_slice_unref(plain);
     }
@@ -94,8 +93,8 @@
                                                  &still_pending_size);
       GPR_ASSERT(result == TSI_OK);
       cur += protected_buffer_size_to_send;
+      GPR_ASSERT(buffer_size >= protected_buffer_size_to_send);
       buffer_size -= protected_buffer_size_to_send;
-      GPR_ASSERT(buffer_size >= 0);
     } while (still_pending_size > 0);
     encrypted_leftover = gpr_slice_from_copied_buffer(
         (const char *)encrypted_buffer, total_buffer_size - buffer_size);
diff --git a/test/core/statistics/census_log_tests.c b/test/core/statistics/census_log_tests.c
index ca2d1de..c7b2b2e 100644
--- a/test/core/statistics/census_log_tests.c
+++ b/test/core/statistics/census_log_tests.c
@@ -415,8 +415,8 @@
 /* Tries reading beyond pending write. */
 void test_read_beyond_pending_record(void) {
   /* Start a write. */
-  gpr_int32 incomplete_record_size = 10;
-  gpr_int32 complete_record_size = 20;
+  gpr_uint32 incomplete_record_size = 10;
+  gpr_uint32 complete_record_size = 20;
   size_t bytes_available;
   void* complete_record;
   const void* record_read;
@@ -457,7 +457,7 @@
   size_t bytes_available;
   const void* record_read;
   void* record_written;
-  gpr_int32 block_read = 0;
+  gpr_uint32 block_read = 0;
   printf("Starting test: detached while reading\n");
   setup_test(0);
   /* Start a write. */
diff --git a/test/core/statistics/hash_table_test.c b/test/core/statistics/hash_table_test.c
index f8df257..e8e4d8b 100644
--- a/test/core/statistics/hash_table_test.c
+++ b/test/core/statistics/hash_table_test.c
@@ -190,7 +190,7 @@
   census_ht* ht = census_ht_create(&opt);
   char key_str[1000][GPR_LTOA_MIN_BUFSIZE];
   gpr_uint64 val = 0;
-  int i = 0;
+  unsigned i = 0;
   for (i = 0; i < 1000; i++) {
     census_ht_key key;
     key.ptr = key_str[i];
diff --git a/test/core/support/host_port_test.c b/test/core/support/host_port_test.c
index 5b06b70..6d14fab 100644
--- a/test/core/support/host_port_test.c
+++ b/test/core/support/host_port_test.c
@@ -43,7 +43,8 @@
   char *buf;
   int len;
   len = gpr_join_host_port(&buf, host, port);
-  GPR_ASSERT(strlen(expected) == len);
+  GPR_ASSERT(len >= 0);
+  GPR_ASSERT(strlen(expected) == (size_t)len);
   GPR_ASSERT(strcmp(expected, buf) == 0);
   gpr_free(buf);
 }
diff --git a/test/core/support/slice_test.c b/test/core/support/slice_test.c
index 2a7056f..469d7de 100644
--- a/test/core/support/slice_test.c
+++ b/test/core/support/slice_test.c
@@ -103,10 +103,10 @@
   GPR_ASSERT(do_nothing_with_len_1_calls == 1);
 }
 
-static void test_slice_sub_works(int length) {
+static void test_slice_sub_works(unsigned length) {
   gpr_slice slice;
   gpr_slice sub;
-  int i, j, k;
+  unsigned i, j, k;
 
   LOG_TEST_NAME();
   gpr_log(GPR_INFO, "length=%d", length);
@@ -212,7 +212,7 @@
 }
 
 int main(int argc, char **argv) {
-  int length;
+  unsigned length;
   grpc_test_init(argc, argv);
   test_slice_malloc_returns_something_sensible();
   test_slice_new_returns_something_sensible();
diff --git a/test/core/surface/completion_queue_test.c b/test/core/surface/completion_queue_test.c
index 71f9cc2..dc459d6 100644
--- a/test/core/surface/completion_queue_test.c
+++ b/test/core/surface/completion_queue_test.c
@@ -214,7 +214,7 @@
   grpc_event *ev;
   grpc_completion_queue *cc;
   void *tags[128];
-  int i, j;
+  unsigned i, j;
   int on_finish_called = 0;
 
   LOG_TEST();
diff --git a/test/core/transport/chttp2/timeout_encoding_test.c b/test/core/transport/chttp2/timeout_encoding_test.c
index 0ad90db..56a1e6e 100644
--- a/test/core/transport/chttp2/timeout_encoding_test.c
+++ b/test/core/transport/chttp2/timeout_encoding_test.c
@@ -94,7 +94,7 @@
   long test_vals[] = {1,       12,       123,       1234,     12345,   123456,
                       1234567, 12345678, 123456789, 98765432, 9876543, 987654,
                       98765,   9876,     987,       98,       9};
-  int i;
+  unsigned i;
   char *input;
   for (i = 0; i < GPR_ARRAY_SIZE(test_vals); i++) {
     gpr_asprintf(&input, "%ld%c", test_vals[i], ext);
diff --git a/test/core/transport/stream_op_test.c b/test/core/transport/stream_op_test.c
index 0d1122c..e6649ec 100644
--- a/test/core/transport/stream_op_test.c
+++ b/test/core/transport/stream_op_test.c
@@ -61,7 +61,7 @@
   gpr_slice test_slice_3 = gpr_slice_malloc(3);
   gpr_slice test_slice_4 = gpr_slice_malloc(4);
   char x;
-  int i;
+  unsigned i;
 
   grpc_stream_op_buffer buf;
   grpc_stream_op_buffer buf2;
diff --git a/test/core/transport/transport_end2end_tests.c b/test/core/transport/transport_end2end_tests.c
index 25276c0..2cd033b 100644
--- a/test/core/transport/transport_end2end_tests.c
+++ b/test/core/transport/transport_end2end_tests.c
@@ -913,7 +913,7 @@
                                                      100000, 1000000, };
 
 void grpc_transport_end2end_tests(grpc_transport_test_config *config) {
-  int i;
+  unsigned i;
 
   g_metadata_context = grpc_mdctx_create();
 
diff --git a/test/cpp/qps/client.cc b/test/cpp/qps/client.cc
index affc492..d2c83aa 100644
--- a/test/cpp/qps/client.cc
+++ b/test/cpp/qps/client.cc
@@ -44,6 +44,7 @@
 #include <google/gflags.h>
 #include <grpc++/client_context.h>
 #include <grpc++/status.h>
+#include "test/core/util/grpc_profiler.h"
 #include "test/cpp/util/create_test_channel.h"
 #include "test/cpp/qps/qpstest.pb.h"
 
@@ -129,6 +130,8 @@
   grpc::Status status_beg = stub_stats->CollectServerStats(
       &context_stats_begin, stats_request, &server_stats_begin);
 
+  grpc_profiler_start("qps_client.prof");
+
   for (int i = 0; i < client_threads; i++) {
     gpr_histogram *hist = gpr_histogram_create(0.01, 60e9);
     GPR_ASSERT(hist != NULL);
@@ -172,6 +175,9 @@
   for (auto &t : threads) {
     t.join();
   }
+
+  grpc_profiler_stop();
+
   for (int i = 0; i < client_threads; i++) {
     gpr_histogram *h = thread_stats[i];
     gpr_log(GPR_INFO, "latency at thread %d (50/90/95/99/99.9): %f/%f/%f/%f/%f",
diff --git a/test/cpp/qps/server.cc b/test/cpp/qps/server.cc
index eb810b8..c35d9eb 100644
--- a/test/cpp/qps/server.cc
+++ b/test/cpp/qps/server.cc
@@ -33,6 +33,7 @@
 
 #include <sys/time.h>
 #include <sys/resource.h>
+#include <sys/signal.h>
 #include <thread>
 
 #include <google/gflags.h>
@@ -43,6 +44,7 @@
 #include <grpc++/server_builder.h>
 #include <grpc++/server_context.h>
 #include <grpc++/status.h>
+#include "test/core/util/grpc_profiler.h"
 #include "test/cpp/qps/qpstest.pb.h"
 
 #include <grpc/grpc.h>
@@ -63,11 +65,15 @@
 using grpc::testing::TestService;
 using grpc::Status;
 
+static bool got_sigint = false;
+
+static void sigint_handler(int x) { got_sigint = 1; }
+
 static double time_double(struct timeval* tv) {
   return tv->tv_sec + 1e-6 * tv->tv_usec;
 }
 
-bool SetPayload(PayloadType type, int size, Payload* payload) {
+static bool SetPayload(PayloadType type, int size, Payload* payload) {
   PayloadType response_type = type;
   // TODO(yangg): Support UNCOMPRESSABLE payload.
   if (type != PayloadType::COMPRESSABLE) {
@@ -79,7 +85,9 @@
   return true;
 }
 
-class TestServiceImpl : public TestService::Service {
+namespace {
+
+class TestServiceImpl final : public TestService::Service {
  public:
   Status CollectServerStats(ServerContext* context, const StatsRequest*,
                             ServerStats* response) {
@@ -104,7 +112,9 @@
   }
 };
 
-void RunServer() {
+}  // namespace
+
+static void RunServer() {
   char* server_address = NULL;
   gpr_join_host_port(&server_address, "::", FLAGS_port);
 
@@ -118,10 +128,15 @@
   builder.RegisterService(service.service());
   std::unique_ptr<Server> server(builder.BuildAndStart());
   gpr_log(GPR_INFO, "Server listening on %s\n", server_address);
-  while (true) {
+
+  grpc_profiler_start("qps_server.prof");
+
+  while (!got_sigint) {
     std::this_thread::sleep_for(std::chrono::seconds(5));
   }
 
+  grpc_profiler_stop();
+
   gpr_free(server_address);
 }
 
@@ -129,6 +144,8 @@
   grpc_init();
   google::ParseCommandLineFlags(&argc, &argv, true);
 
+  signal(SIGINT, sigint_handler);
+  
   GPR_ASSERT(FLAGS_port != 0);
   GPR_ASSERT(!FLAGS_enable_ssl);
   RunServer();
@@ -136,3 +153,4 @@
   grpc_shutdown();
   return 0;
 }
+
diff --git a/third_party/cJSON/LICENSE b/third_party/cJSON/LICENSE
deleted file mode 100644
index fa0a438..0000000
--- a/third_party/cJSON/LICENSE
+++ /dev/null
@@ -1,20 +0,0 @@
-  Copyright (c) 2009 Dave Gamble
- 
-  Permission is hereby granted, free of charge, to any person obtaining a copy
-  of this software and associated documentation files (the "Software"), to deal
-  in the Software without restriction, including without limitation the rights
-  to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-  copies of the Software, and to permit persons to whom the Software is
-  furnished to do so, subject to the following conditions:
- 
-  The above copyright notice and this permission notice shall be included in
-  all copies or substantial portions of the Software.
- 
-  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-  IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-  FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-  AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-  LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-  OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-  THE SOFTWARE.
-
diff --git a/third_party/cJSON/README b/third_party/cJSON/README
deleted file mode 100644
index 7531c04..0000000
--- a/third_party/cJSON/README
+++ /dev/null
@@ -1,247 +0,0 @@
-/*
-  Copyright (c) 2009 Dave Gamble
-
-  Permission is hereby granted, free of charge, to any person obtaining a copy
-  of this software and associated documentation files (the "Software"), to deal
-  in the Software without restriction, including without limitation the rights
-  to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-  copies of the Software, and to permit persons to whom the Software is
-  furnished to do so, subject to the following conditions:
-
-  The above copyright notice and this permission notice shall be included in
-  all copies or substantial portions of the Software.
-
-  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-  IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-  FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-  AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-  LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-  OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-  THE SOFTWARE.
-*/
-
-Welcome to cJSON.
-
-cJSON aims to be the dumbest possible parser that you can get your job done with.
-It's a single file of C, and a single header file.
-
-JSON is described best here: http://www.json.org/
-It's like XML, but fat-free. You use it to move data around, store things, or just
-generally represent your program's state.
-
-
-First up, how do I build?
-Add cJSON.c to your project, and put cJSON.h somewhere in the header search path.
-For example, to build the test app:
-
-gcc cJSON.c test.c -o test -lm
-./test
-
-
-As a library, cJSON exists to take away as much legwork as it can, but not get in your way.
-As a point of pragmatism (i.e. ignoring the truth), I'm going to say that you can use it
-in one of two modes: Auto and Manual. Let's have a quick run-through.
-
-
-I lifted some JSON from this page: http://www.json.org/fatfree.html
-That page inspired me to write cJSON, which is a parser that tries to share the same
-philosophy as JSON itself. Simple, dumb, out of the way.
-
-Some JSON:
-{
-    "name": "Jack (\"Bee\") Nimble", 
-    "format": {
-        "type":       "rect", 
-        "width":      1920, 
-        "height":     1080, 
-        "interlace":  false, 
-        "frame rate": 24
-    }
-}
-
-Assume that you got this from a file, a webserver, or magic JSON elves, whatever,
-you have a char * to it. Everything is a cJSON struct.
-Get it parsed:
-	cJSON *root = cJSON_Parse(my_json_string);
-
-This is an object. We're in C. We don't have objects. But we do have structs.
-What's the framerate?
-
-	cJSON *format = cJSON_GetObjectItem(root,"format");
-	int framerate = cJSON_GetObjectItem(format,"frame rate")->valueint;
-
-
-Want to change the framerate?
-	cJSON_GetObjectItem(format,"frame rate")->valueint=25;
-	
-Back to disk?
-	char *rendered=cJSON_Print(root);
-
-Finished? Delete the root (this takes care of everything else).
-	cJSON_Delete(root);
-
-That's AUTO mode. If you're going to use Auto mode, you really ought to check pointers
-before you dereference them. If you want to see how you'd build this struct in code?
-	cJSON *root,*fmt;
-	root=cJSON_CreateObject();	
-	cJSON_AddItemToObject(root, "name", cJSON_CreateString("Jack (\"Bee\") Nimble"));
-	cJSON_AddItemToObject(root, "format", fmt=cJSON_CreateObject());
-	cJSON_AddStringToObject(fmt,"type",		"rect");
-	cJSON_AddNumberToObject(fmt,"width",		1920);
-	cJSON_AddNumberToObject(fmt,"height",		1080);
-	cJSON_AddFalseToObject (fmt,"interlace");
-	cJSON_AddNumberToObject(fmt,"frame rate",	24);
-
-Hopefully we can agree that's not a lot of code? There's no overhead, no unnecessary setup.
-Look at test.c for a bunch of nice examples, mostly all ripped off the json.org site, and
-a few from elsewhere.
-
-What about manual mode? First up you need some detail.
-Let's cover how the cJSON objects represent the JSON data.
-cJSON doesn't distinguish arrays from objects in handling; just type.
-Each cJSON has, potentially, a child, siblings, value, a name.
-
-The root object has: Object Type and a Child
-The Child has name "name", with value "Jack ("Bee") Nimble", and a sibling:
-Sibling has type Object, name "format", and a child.
-That child has type String, name "type", value "rect", and a sibling:
-Sibling has type Number, name "width", value 1920, and a sibling:
-Sibling has type Number, name "height", value 1080, and a sibling:
-Sibling hs type False, name "interlace", and a sibling:
-Sibling has type Number, name "frame rate", value 24
-
-Here's the structure:
-typedef struct cJSON {
-	struct cJSON *next,*prev;
-	struct cJSON *child;
-
-	int type;
-
-	char *valuestring;
-	int valueint;
-	double valuedouble;
-
-	char *string;
-} cJSON;
-
-By default all values are 0 unless set by virtue of being meaningful.
-
-next/prev is a doubly linked list of siblings. next takes you to your sibling,
-prev takes you back from your sibling to you.
-Only objects and arrays have a "child", and it's the head of the doubly linked list.
-A "child" entry will have prev==0, but next potentially points on. The last sibling has next=0.
-The type expresses Null/True/False/Number/String/Array/Object, all of which are #defined in
-cJSON.h
-
-A Number has valueint and valuedouble. If you're expecting an int, read valueint, if not read
-valuedouble.
-
-Any entry which is in the linked list which is the child of an object will have a "string"
-which is the "name" of the entry. When I said "name" in the above example, that's "string".
-"string" is the JSON name for the 'variable name' if you will.
-
-Now you can trivially walk the lists, recursively, and parse as you please.
-You can invoke cJSON_Parse to get cJSON to parse for you, and then you can take
-the root object, and traverse the structure (which is, formally, an N-tree),
-and tokenise as you please. If you wanted to build a callback style parser, this is how
-you'd do it (just an example, since these things are very specific):
-
-void parse_and_callback(cJSON *item,const char *prefix)
-{
-	while (item)
-	{
-		char *newprefix=malloc(strlen(prefix)+strlen(item->name)+2);
-		sprintf(newprefix,"%s/%s",prefix,item->name);
-		int dorecurse=callback(newprefix, item->type, item);
-		if (item->child && dorecurse) parse_and_callback(item->child,newprefix);
-		item=item->next;
-		free(newprefix);
-	}
-}
-
-The prefix process will build you a separated list, to simplify your callback handling.
-The 'dorecurse' flag would let the callback decide to handle sub-arrays on it's own, or
-let you invoke it per-item. For the item above, your callback might look like this:
-
-int callback(const char *name,int type,cJSON *item)
-{
-	if (!strcmp(name,"name"))	{ /* populate name */ }
-	else if (!strcmp(name,"format/type")	{ /* handle "rect" */ }
-	else if (!strcmp(name,"format/width")	{ /* 800 */ }
-	else if (!strcmp(name,"format/height")	{ /* 600 */ }
-	else if (!strcmp(name,"format/interlace")	{ /* false */ }
-	else if (!strcmp(name,"format/frame rate")	{ /* 24 */ }
-	return 1;
-}
-
-Alternatively, you might like to parse iteratively.
-You'd use:
-
-void parse_object(cJSON *item)
-{
-	int i; for (i=0;i<cJSON_GetArraySize(item);i++)
-	{
-		cJSON *subitem=cJSON_GetArrayItem(item,i);
-		// handle subitem.	
-	}
-}
-
-Or, for PROPER manual mode:
-
-void parse_object(cJSON *item)
-{
-	cJSON *subitem=item->child;
-	while (subitem)
-	{
-		// handle subitem
-		if (subitem->child) parse_object(subitem->child);
-		
-		subitem=subitem->next;
-	}
-}
-
-Of course, this should look familiar, since this is just a stripped-down version
-of the callback-parser.
-
-This should cover most uses you'll find for parsing. The rest should be possible
-to infer.. and if in doubt, read the source! There's not a lot of it! ;)
-
-
-In terms of constructing JSON data, the example code above is the right way to do it.
-You can, of course, hand your sub-objects to other functions to populate.
-Also, if you find a use for it, you can manually build the objects.
-For instance, suppose you wanted to build an array of objects?
-
-cJSON *objects[24];
-
-cJSON *Create_array_of_anything(cJSON **items,int num)
-{
-	int i;cJSON *prev, *root=cJSON_CreateArray();
-	for (i=0;i<24;i++)
-	{
-		if (!i)	root->child=objects[i];
-		else	prev->next=objects[i], objects[i]->prev=prev;
-		prev=objects[i];
-	}
-	return root;
-}
-	
-and simply: Create_array_of_anything(objects,24);
-
-cJSON doesn't make any assumptions about what order you create things in.
-You can attach the objects, as above, and later add children to each
-of those objects.
-
-As soon as you call cJSON_Print, it renders the structure to text.
-
-
-
-The test.c code shows how to handle a bunch of typical cases. If you uncomment
-the code, it'll load, parse and print a bunch of test files, also from json.org,
-which are more complex than I'd care to try and stash into a const char array[].
-
-
-Enjoy cJSON!
-
-
-- Dave Gamble, Aug 2009
diff --git a/third_party/cJSON/cJSON.c b/third_party/cJSON/cJSON.c
deleted file mode 100644
index fe446d6..0000000
--- a/third_party/cJSON/cJSON.c
+++ /dev/null
@@ -1,596 +0,0 @@
-/*
-  Copyright (c) 2009 Dave Gamble
-
-  Permission is hereby granted, free of charge, to any person obtaining a copy
-  of this software and associated documentation files (the "Software"), to deal
-  in the Software without restriction, including without limitation the rights
-  to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-  copies of the Software, and to permit persons to whom the Software is
-  furnished to do so, subject to the following conditions:
-
-  The above copyright notice and this permission notice shall be included in
-  all copies or substantial portions of the Software.
-
-  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-  IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-  FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-  AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-  LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-  OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-  THE SOFTWARE.
-*/
-
-/* cJSON */
-/* JSON parser in C. */
-
-#include <string.h>
-#include <stdio.h>
-#include <math.h>
-#include <stdlib.h>
-#include <float.h>
-#include <limits.h>
-#include <ctype.h>
-#include "cJSON.h"
-
-static const char *ep;
-
-const char *cJSON_GetErrorPtr(void) {return ep;}
-
-static int cJSON_strcasecmp(const char *s1,const char *s2)
-{
-	if (!s1) return (s1==s2)?0:1;if (!s2) return 1;
-	for(; tolower(*s1) == tolower(*s2); ++s1, ++s2)	if(*s1 == 0)	return 0;
-	return tolower(*(const unsigned char *)s1) - tolower(*(const unsigned char *)s2);
-}
-
-static void *(*cJSON_malloc)(size_t sz) = malloc;
-static void (*cJSON_free)(void *ptr) = free;
-
-static char* cJSON_strdup(const char* str)
-{
-      size_t len;
-      char* copy;
-
-      len = strlen(str) + 1;
-      if (!(copy = (char*)cJSON_malloc(len))) return 0;
-      memcpy(copy,str,len);
-      return copy;
-}
-
-void cJSON_InitHooks(cJSON_Hooks* hooks)
-{
-    if (!hooks) { /* Reset hooks */
-        cJSON_malloc = malloc;
-        cJSON_free = free;
-        return;
-    }
-
-	cJSON_malloc = (hooks->malloc_fn)?hooks->malloc_fn:malloc;
-	cJSON_free	 = (hooks->free_fn)?hooks->free_fn:free;
-}
-
-/* Internal constructor. */
-static cJSON *cJSON_New_Item(void)
-{
-	cJSON* node = (cJSON*)cJSON_malloc(sizeof(cJSON));
-	if (node) memset(node,0,sizeof(cJSON));
-	return node;
-}
-
-/* Delete a cJSON structure. */
-void cJSON_Delete(cJSON *c)
-{
-	cJSON *next;
-	while (c)
-	{
-		next=c->next;
-		if (!(c->type&cJSON_IsReference) && c->child) cJSON_Delete(c->child);
-		if (!(c->type&cJSON_IsReference) && c->valuestring) cJSON_free(c->valuestring);
-		if (c->string) cJSON_free(c->string);
-		cJSON_free(c);
-		c=next;
-	}
-}
-
-/* Parse the input text to generate a number, and populate the result into item. */
-static const char *parse_number(cJSON *item,const char *num)
-{
-	double n=0,sign=1,scale=0;int subscale=0,signsubscale=1;
-
-	if (*num=='-') sign=-1,num++;	/* Has sign? */
-	if (*num=='0') num++;			/* is zero */
-	if (*num>='1' && *num<='9')	do	n=(n*10.0)+(*num++ -'0');	while (*num>='0' && *num<='9');	/* Number? */
-	if (*num=='.' && num[1]>='0' && num[1]<='9') {num++;		do	n=(n*10.0)+(*num++ -'0'),scale--; while (*num>='0' && *num<='9');}	/* Fractional part? */
-	if (*num=='e' || *num=='E')		/* Exponent? */
-	{	num++;if (*num=='+') num++;	else if (*num=='-') signsubscale=-1,num++;		/* With sign? */
-		while (*num>='0' && *num<='9') subscale=(subscale*10)+(*num++ - '0');	/* Number? */
-	}
-
-	n=sign*n*pow(10.0,(scale+subscale*signsubscale));	/* number = +/- number.fraction * 10^+/- exponent */
-	
-	item->valuedouble=n;
-	item->valueint=(int)n;
-	item->type=cJSON_Number;
-	return num;
-}
-
-/* Render the number nicely from the given item into a string. */
-static char *print_number(cJSON *item)
-{
-	char *str;
-	double d=item->valuedouble;
-	if (fabs(((double)item->valueint)-d)<=DBL_EPSILON && d<=INT_MAX && d>=INT_MIN)
-	{
-		str=(char*)cJSON_malloc(21);	/* 2^64+1 can be represented in 21 chars. */
-		if (str) sprintf(str,"%d",item->valueint);
-	}
-	else
-	{
-		str=(char*)cJSON_malloc(64);	/* This is a nice tradeoff. */
-		if (str)
-		{
-			if (fabs(floor(d)-d)<=DBL_EPSILON && fabs(d)<1.0e60)sprintf(str,"%.0f",d);
-			else if (fabs(d)<1.0e-6 || fabs(d)>1.0e9)			sprintf(str,"%e",d);
-			else												sprintf(str,"%f",d);
-		}
-	}
-	return str;
-}
-
-static unsigned parse_hex4(const char *str)
-{
-	unsigned h=0;
-	if (*str>='0' && *str<='9') h+=(*str)-'0'; else if (*str>='A' && *str<='F') h+=10+(*str)-'A'; else if (*str>='a' && *str<='f') h+=10+(*str)-'a'; else return 0;
-	h=h<<4;str++;
-	if (*str>='0' && *str<='9') h+=(*str)-'0'; else if (*str>='A' && *str<='F') h+=10+(*str)-'A'; else if (*str>='a' && *str<='f') h+=10+(*str)-'a'; else return 0;
-	h=h<<4;str++;
-	if (*str>='0' && *str<='9') h+=(*str)-'0'; else if (*str>='A' && *str<='F') h+=10+(*str)-'A'; else if (*str>='a' && *str<='f') h+=10+(*str)-'a'; else return 0;
-	h=h<<4;str++;
-	if (*str>='0' && *str<='9') h+=(*str)-'0'; else if (*str>='A' && *str<='F') h+=10+(*str)-'A'; else if (*str>='a' && *str<='f') h+=10+(*str)-'a'; else return 0;
-	return h;
-}
-
-/* Parse the input text into an unescaped cstring, and populate item. */
-static const unsigned char firstByteMark[7] = { 0x00, 0x00, 0xC0, 0xE0, 0xF0, 0xF8, 0xFC };
-static const char *parse_string(cJSON *item,const char *str)
-{
-	const char *ptr=str+1;char *ptr2;char *out;int len=0;unsigned uc,uc2;
-	if (*str!='\"') {ep=str;return 0;}	/* not a string! */
-	
-	while (*ptr!='\"' && *ptr && ++len) if (*ptr++ == '\\') ptr++;	/* Skip escaped quotes. */
-	
-	out=(char*)cJSON_malloc(len+1);	/* This is how long we need for the string, roughly. */
-	if (!out) return 0;
-	
-	ptr=str+1;ptr2=out;
-	while (*ptr!='\"' && *ptr)
-	{
-		if (*ptr!='\\') *ptr2++=*ptr++;
-		else
-		{
-			ptr++;
-			switch (*ptr)
-			{
-				case 'b': *ptr2++='\b';	break;
-				case 'f': *ptr2++='\f';	break;
-				case 'n': *ptr2++='\n';	break;
-				case 'r': *ptr2++='\r';	break;
-				case 't': *ptr2++='\t';	break;
-				case 'u':	 /* transcode utf16 to utf8. */
-					uc=parse_hex4(ptr+1);ptr+=4;	/* get the unicode char. */
-
-					if ((uc>=0xDC00 && uc<=0xDFFF) || uc==0)	break;	/* check for invalid.	*/
-
-					if (uc>=0xD800 && uc<=0xDBFF)	/* UTF16 surrogate pairs.	*/
-					{
-						if (ptr[1]!='\\' || ptr[2]!='u')	break;	/* missing second-half of surrogate.	*/
-						uc2=parse_hex4(ptr+3);ptr+=6;
-						if (uc2<0xDC00 || uc2>0xDFFF)		break;	/* invalid second-half of surrogate.	*/
-						uc=0x10000 + (((uc&0x3FF)<<10) | (uc2&0x3FF));
-					}
-
-					len=4;if (uc<0x80) len=1;else if (uc<0x800) len=2;else if (uc<0x10000) len=3; ptr2+=len;
-					
-					switch (len) {
-						case 4: *--ptr2 =((uc | 0x80) & 0xBF); uc >>= 6;
-						case 3: *--ptr2 =((uc | 0x80) & 0xBF); uc >>= 6;
-						case 2: *--ptr2 =((uc | 0x80) & 0xBF); uc >>= 6;
-						case 1: *--ptr2 =(uc | firstByteMark[len]);
-					}
-					ptr2+=len;
-					break;
-				default:  *ptr2++=*ptr; break;
-			}
-			ptr++;
-		}
-	}
-	*ptr2=0;
-	if (*ptr=='\"') ptr++;
-	item->valuestring=out;
-	item->type=cJSON_String;
-	return ptr;
-}
-
-/* Render the cstring provided to an escaped version that can be printed. */
-static char *print_string_ptr(const char *str)
-{
-	const char *ptr;char *ptr2,*out;int len=0;unsigned char token;
-	
-	if (!str) return cJSON_strdup("");
-	ptr=str;while ((token=*ptr) && ++len) {if (strchr("\"\\\b\f\n\r\t",token)) len++; else if (token<32) len+=5;ptr++;}
-	
-	out=(char*)cJSON_malloc(len+3);
-	if (!out) return 0;
-
-	ptr2=out;ptr=str;
-	*ptr2++='\"';
-	while (*ptr)
-	{
-		if ((unsigned char)*ptr>31 && *ptr!='\"' && *ptr!='\\') *ptr2++=*ptr++;
-		else
-		{
-			*ptr2++='\\';
-			switch (token=*ptr++)
-			{
-				case '\\':	*ptr2++='\\';	break;
-				case '\"':	*ptr2++='\"';	break;
-				case '\b':	*ptr2++='b';	break;
-				case '\f':	*ptr2++='f';	break;
-				case '\n':	*ptr2++='n';	break;
-				case '\r':	*ptr2++='r';	break;
-				case '\t':	*ptr2++='t';	break;
-				default: sprintf(ptr2,"u%04x",token);ptr2+=5;	break;	/* escape and print */
-			}
-		}
-	}
-	*ptr2++='\"';*ptr2++=0;
-	return out;
-}
-/* Invote print_string_ptr (which is useful) on an item. */
-static char *print_string(cJSON *item)	{return print_string_ptr(item->valuestring);}
-
-/* Predeclare these prototypes. */
-static const char *parse_value(cJSON *item,const char *value);
-static char *print_value(cJSON *item,int depth,int fmt);
-static const char *parse_array(cJSON *item,const char *value);
-static char *print_array(cJSON *item,int depth,int fmt);
-static const char *parse_object(cJSON *item,const char *value);
-static char *print_object(cJSON *item,int depth,int fmt);
-
-/* Utility to jump whitespace and cr/lf */
-static const char *skip(const char *in) {while (in && *in && (unsigned char)*in<=32) in++; return in;}
-
-/* Parse an object - create a new root, and populate. */
-cJSON *cJSON_ParseWithOpts(const char *value,const char **return_parse_end,int require_null_terminated)
-{
-	const char *end=0;
-	cJSON *c=cJSON_New_Item();
-	ep=0;
-	if (!c) return 0;       /* memory fail */
-
-	end=parse_value(c,skip(value));
-	if (!end)	{cJSON_Delete(c);return 0;}	/* parse failure. ep is set. */
-
-	/* if we require null-terminated JSON without appended garbage, skip and then check for a null terminator */
-	if (require_null_terminated) {end=skip(end);if (*end) {cJSON_Delete(c);ep=end;return 0;}}
-	if (return_parse_end) *return_parse_end=end;
-	return c;
-}
-/* Default options for cJSON_Parse */
-cJSON *cJSON_Parse(const char *value) {return cJSON_ParseWithOpts(value,0,0);}
-
-/* Render a cJSON item/entity/structure to text. */
-char *cJSON_Print(cJSON *item)				{return print_value(item,0,1);}
-char *cJSON_PrintUnformatted(cJSON *item)	{return print_value(item,0,0);}
-
-/* Parser core - when encountering text, process appropriately. */
-static const char *parse_value(cJSON *item,const char *value)
-{
-	if (!value)						return 0;	/* Fail on null. */
-	if (!strncmp(value,"null",4))	{ item->type=cJSON_NULL;  return value+4; }
-	if (!strncmp(value,"false",5))	{ item->type=cJSON_False; return value+5; }
-	if (!strncmp(value,"true",4))	{ item->type=cJSON_True; item->valueint=1;	return value+4; }
-	if (*value=='\"')				{ return parse_string(item,value); }
-	if (*value=='-' || (*value>='0' && *value<='9'))	{ return parse_number(item,value); }
-	if (*value=='[')				{ return parse_array(item,value); }
-	if (*value=='{')				{ return parse_object(item,value); }
-
-	ep=value;return 0;	/* failure. */
-}
-
-/* Render a value to text. */
-static char *print_value(cJSON *item,int depth,int fmt)
-{
-	char *out=0;
-	if (!item) return 0;
-	switch ((item->type)&255)
-	{
-		case cJSON_NULL:	out=cJSON_strdup("null");	break;
-		case cJSON_False:	out=cJSON_strdup("false");break;
-		case cJSON_True:	out=cJSON_strdup("true"); break;
-		case cJSON_Number:	out=print_number(item);break;
-		case cJSON_String:	out=print_string(item);break;
-		case cJSON_Array:	out=print_array(item,depth,fmt);break;
-		case cJSON_Object:	out=print_object(item,depth,fmt);break;
-	}
-	return out;
-}
-
-/* Build an array from input text. */
-static const char *parse_array(cJSON *item,const char *value)
-{
-	cJSON *child;
-	if (*value!='[')	{ep=value;return 0;}	/* not an array! */
-
-	item->type=cJSON_Array;
-	value=skip(value+1);
-	if (*value==']') return value+1;	/* empty array. */
-
-	item->child=child=cJSON_New_Item();
-	if (!item->child) return 0;		 /* memory fail */
-	value=skip(parse_value(child,skip(value)));	/* skip any spacing, get the value. */
-	if (!value) return 0;
-
-	while (*value==',')
-	{
-		cJSON *new_item;
-		if (!(new_item=cJSON_New_Item())) return 0; 	/* memory fail */
-		child->next=new_item;new_item->prev=child;child=new_item;
-		value=skip(parse_value(child,skip(value+1)));
-		if (!value) return 0;	/* memory fail */
-	}
-
-	if (*value==']') return value+1;	/* end of array */
-	ep=value;return 0;	/* malformed. */
-}
-
-/* Render an array to text */
-static char *print_array(cJSON *item,int depth,int fmt)
-{
-	char **entries;
-	char *out=0,*ptr,*ret;int len=5;
-	cJSON *child=item->child;
-	int numentries=0,i=0,fail=0;
-	
-	/* How many entries in the array? */
-	while (child) numentries++,child=child->next;
-	/* Explicitly handle numentries==0 */
-	if (!numentries)
-	{
-		out=(char*)cJSON_malloc(3);
-		if (out) strcpy(out,"[]");
-		return out;
-	}
-	/* Allocate an array to hold the values for each */
-	entries=(char**)cJSON_malloc(numentries*sizeof(char*));
-	if (!entries) return 0;
-	memset(entries,0,numentries*sizeof(char*));
-	/* Retrieve all the results: */
-	child=item->child;
-	while (child && !fail)
-	{
-		ret=print_value(child,depth+1,fmt);
-		entries[i++]=ret;
-		if (ret) len+=strlen(ret)+2+(fmt?1:0); else fail=1;
-		child=child->next;
-	}
-	
-	/* If we didn't fail, try to malloc the output string */
-	if (!fail) out=(char*)cJSON_malloc(len);
-	/* If that fails, we fail. */
-	if (!out) fail=1;
-
-	/* Handle failure. */
-	if (fail)
-	{
-		for (i=0;i<numentries;i++) if (entries[i]) cJSON_free(entries[i]);
-		cJSON_free(entries);
-		return 0;
-	}
-	
-	/* Compose the output array. */
-	*out='[';
-	ptr=out+1;*ptr=0;
-	for (i=0;i<numentries;i++)
-	{
-		strcpy(ptr,entries[i]);ptr+=strlen(entries[i]);
-		if (i!=numentries-1) {*ptr++=',';if(fmt)*ptr++=' ';*ptr=0;}
-		cJSON_free(entries[i]);
-	}
-	cJSON_free(entries);
-	*ptr++=']';*ptr++=0;
-	return out;	
-}
-
-/* Build an object from the text. */
-static const char *parse_object(cJSON *item,const char *value)
-{
-	cJSON *child;
-	if (*value!='{')	{ep=value;return 0;}	/* not an object! */
-	
-	item->type=cJSON_Object;
-	value=skip(value+1);
-	if (*value=='}') return value+1;	/* empty array. */
-	
-	item->child=child=cJSON_New_Item();
-	if (!item->child) return 0;
-	value=skip(parse_string(child,skip(value)));
-	if (!value) return 0;
-	child->string=child->valuestring;child->valuestring=0;
-	if (*value!=':') {ep=value;return 0;}	/* fail! */
-	value=skip(parse_value(child,skip(value+1)));	/* skip any spacing, get the value. */
-	if (!value) return 0;
-	
-	while (*value==',')
-	{
-		cJSON *new_item;
-		if (!(new_item=cJSON_New_Item()))	return 0; /* memory fail */
-		child->next=new_item;new_item->prev=child;child=new_item;
-		value=skip(parse_string(child,skip(value+1)));
-		if (!value) return 0;
-		child->string=child->valuestring;child->valuestring=0;
-		if (*value!=':') {ep=value;return 0;}	/* fail! */
-		value=skip(parse_value(child,skip(value+1)));	/* skip any spacing, get the value. */
-		if (!value) return 0;
-	}
-	
-	if (*value=='}') return value+1;	/* end of array */
-	ep=value;return 0;	/* malformed. */
-}
-
-/* Render an object to text. */
-static char *print_object(cJSON *item,int depth,int fmt)
-{
-	char **entries=0,**names=0;
-	char *out=0,*ptr,*ret,*str;int len=7,i=0,j;
-	cJSON *child=item->child;
-	int numentries=0,fail=0;
-	/* Count the number of entries. */
-	while (child) numentries++,child=child->next;
-	/* Explicitly handle empty object case */
-	if (!numentries)
-	{
-		out=(char*)cJSON_malloc(fmt?depth+4:3);
-		if (!out)	return 0;
-		ptr=out;*ptr++='{';
-		if (fmt) {*ptr++='\n';for (i=0;i<depth-1;i++) *ptr++='\t';}
-		*ptr++='}';*ptr++=0;
-		return out;
-	}
-	/* Allocate space for the names and the objects */
-	entries=(char**)cJSON_malloc(numentries*sizeof(char*));
-	if (!entries) return 0;
-	names=(char**)cJSON_malloc(numentries*sizeof(char*));
-	if (!names) {cJSON_free(entries);return 0;}
-	memset(entries,0,sizeof(char*)*numentries);
-	memset(names,0,sizeof(char*)*numentries);
-
-	/* Collect all the results into our arrays: */
-	child=item->child;depth++;if (fmt) len+=depth;
-	while (child)
-	{
-		names[i]=str=print_string_ptr(child->string);
-		entries[i++]=ret=print_value(child,depth,fmt);
-		if (str && ret) len+=strlen(ret)+strlen(str)+2+(fmt?2+depth:0); else fail=1;
-		child=child->next;
-	}
-	
-	/* Try to allocate the output string */
-	if (!fail) out=(char*)cJSON_malloc(len);
-	if (!out) fail=1;
-
-	/* Handle failure */
-	if (fail)
-	{
-		for (i=0;i<numentries;i++) {if (names[i]) cJSON_free(names[i]);if (entries[i]) cJSON_free(entries[i]);}
-		cJSON_free(names);cJSON_free(entries);
-		return 0;
-	}
-	
-	/* Compose the output: */
-	*out='{';ptr=out+1;if (fmt)*ptr++='\n';*ptr=0;
-	for (i=0;i<numentries;i++)
-	{
-		if (fmt) for (j=0;j<depth;j++) *ptr++='\t';
-		strcpy(ptr,names[i]);ptr+=strlen(names[i]);
-		*ptr++=':';if (fmt) *ptr++='\t';
-		strcpy(ptr,entries[i]);ptr+=strlen(entries[i]);
-		if (i!=numentries-1) *ptr++=',';
-		if (fmt) *ptr++='\n';*ptr=0;
-		cJSON_free(names[i]);cJSON_free(entries[i]);
-	}
-	
-	cJSON_free(names);cJSON_free(entries);
-	if (fmt) for (i=0;i<depth-1;i++) *ptr++='\t';
-	*ptr++='}';*ptr++=0;
-	return out;	
-}
-
-/* Get Array size/item / object item. */
-int    cJSON_GetArraySize(cJSON *array)							{cJSON *c=array->child;int i=0;while(c)i++,c=c->next;return i;}
-cJSON *cJSON_GetArrayItem(cJSON *array,int item)				{cJSON *c=array->child;  while (c && item>0) item--,c=c->next; return c;}
-cJSON *cJSON_GetObjectItem(cJSON *object,const char *string)	{cJSON *c=object->child; while (c && cJSON_strcasecmp(c->string,string)) c=c->next; return c;}
-
-/* Utility for array list handling. */
-static void suffix_object(cJSON *prev,cJSON *item) {prev->next=item;item->prev=prev;}
-/* Utility for handling references. */
-static cJSON *create_reference(cJSON *item) {cJSON *ref=cJSON_New_Item();if (!ref) return 0;memcpy(ref,item,sizeof(cJSON));ref->string=0;ref->type|=cJSON_IsReference;ref->next=ref->prev=0;return ref;}
-
-/* Add item to array/object. */
-void   cJSON_AddItemToArray(cJSON *array, cJSON *item)						{cJSON *c=array->child;if (!item) return; if (!c) {array->child=item;} else {while (c && c->next) c=c->next; suffix_object(c,item);}}
-void   cJSON_AddItemToObject(cJSON *object,const char *string,cJSON *item)	{if (!item) return; if (item->string) cJSON_free(item->string);item->string=cJSON_strdup(string);cJSON_AddItemToArray(object,item);}
-void	cJSON_AddItemReferenceToArray(cJSON *array, cJSON *item)						{cJSON_AddItemToArray(array,create_reference(item));}
-void	cJSON_AddItemReferenceToObject(cJSON *object,const char *string,cJSON *item)	{cJSON_AddItemToObject(object,string,create_reference(item));}
-
-cJSON *cJSON_DetachItemFromArray(cJSON *array,int which)			{cJSON *c=array->child;while (c && which>0) c=c->next,which--;if (!c) return 0;
-	if (c->prev) c->prev->next=c->next;if (c->next) c->next->prev=c->prev;if (c==array->child) array->child=c->next;c->prev=c->next=0;return c;}
-void   cJSON_DeleteItemFromArray(cJSON *array,int which)			{cJSON_Delete(cJSON_DetachItemFromArray(array,which));}
-cJSON *cJSON_DetachItemFromObject(cJSON *object,const char *string) {int i=0;cJSON *c=object->child;while (c && cJSON_strcasecmp(c->string,string)) i++,c=c->next;if (c) return cJSON_DetachItemFromArray(object,i);return 0;}
-void   cJSON_DeleteItemFromObject(cJSON *object,const char *string) {cJSON_Delete(cJSON_DetachItemFromObject(object,string));}
-
-/* Replace array/object items with new ones. */
-void   cJSON_ReplaceItemInArray(cJSON *array,int which,cJSON *newitem)		{cJSON *c=array->child;while (c && which>0) c=c->next,which--;if (!c) return;
-	newitem->next=c->next;newitem->prev=c->prev;if (newitem->next) newitem->next->prev=newitem;
-	if (c==array->child) array->child=newitem; else newitem->prev->next=newitem;c->next=c->prev=0;cJSON_Delete(c);}
-void   cJSON_ReplaceItemInObject(cJSON *object,const char *string,cJSON *newitem){int i=0;cJSON *c=object->child;while(c && cJSON_strcasecmp(c->string,string))i++,c=c->next;if(c){newitem->string=cJSON_strdup(string);cJSON_ReplaceItemInArray(object,i,newitem);}}
-
-/* Create basic types: */
-cJSON *cJSON_CreateNull(void)					{cJSON *item=cJSON_New_Item();if(item)item->type=cJSON_NULL;return item;}
-cJSON *cJSON_CreateTrue(void)					{cJSON *item=cJSON_New_Item();if(item)item->type=cJSON_True;return item;}
-cJSON *cJSON_CreateFalse(void)					{cJSON *item=cJSON_New_Item();if(item)item->type=cJSON_False;return item;}
-cJSON *cJSON_CreateBool(int b)					{cJSON *item=cJSON_New_Item();if(item)item->type=b?cJSON_True:cJSON_False;return item;}
-cJSON *cJSON_CreateNumber(double num)			{cJSON *item=cJSON_New_Item();if(item){item->type=cJSON_Number;item->valuedouble=num;item->valueint=(int)num;}return item;}
-cJSON *cJSON_CreateString(const char *string)	{cJSON *item=cJSON_New_Item();if(item){item->type=cJSON_String;item->valuestring=cJSON_strdup(string);}return item;}
-cJSON *cJSON_CreateArray(void)					{cJSON *item=cJSON_New_Item();if(item)item->type=cJSON_Array;return item;}
-cJSON *cJSON_CreateObject(void)					{cJSON *item=cJSON_New_Item();if(item)item->type=cJSON_Object;return item;}
-
-/* Create Arrays: */
-cJSON *cJSON_CreateIntArray(const int *numbers,int count)		{int i;cJSON *n=0,*p=0,*a=cJSON_CreateArray();for(i=0;a && i<count;i++){n=cJSON_CreateNumber(numbers[i]);if(!i)a->child=n;else suffix_object(p,n);p=n;}return a;}
-cJSON *cJSON_CreateFloatArray(const float *numbers,int count)	{int i;cJSON *n=0,*p=0,*a=cJSON_CreateArray();for(i=0;a && i<count;i++){n=cJSON_CreateNumber(numbers[i]);if(!i)a->child=n;else suffix_object(p,n);p=n;}return a;}
-cJSON *cJSON_CreateDoubleArray(const double *numbers,int count)	{int i;cJSON *n=0,*p=0,*a=cJSON_CreateArray();for(i=0;a && i<count;i++){n=cJSON_CreateNumber(numbers[i]);if(!i)a->child=n;else suffix_object(p,n);p=n;}return a;}
-cJSON *cJSON_CreateStringArray(const char **strings,int count)	{int i;cJSON *n=0,*p=0,*a=cJSON_CreateArray();for(i=0;a && i<count;i++){n=cJSON_CreateString(strings[i]);if(!i)a->child=n;else suffix_object(p,n);p=n;}return a;}
-
-/* Duplication */
-cJSON *cJSON_Duplicate(cJSON *item,int recurse)
-{
-	cJSON *newitem,*cptr,*nptr=0,*newchild;
-	/* Bail on bad ptr */
-	if (!item) return 0;
-	/* Create new item */
-	newitem=cJSON_New_Item();
-	if (!newitem) return 0;
-	/* Copy over all vars */
-	newitem->type=item->type&(~cJSON_IsReference),newitem->valueint=item->valueint,newitem->valuedouble=item->valuedouble;
-	if (item->valuestring)	{newitem->valuestring=cJSON_strdup(item->valuestring);	if (!newitem->valuestring)	{cJSON_Delete(newitem);return 0;}}
-	if (item->string)		{newitem->string=cJSON_strdup(item->string);			if (!newitem->string)		{cJSON_Delete(newitem);return 0;}}
-	/* If non-recursive, then we're done! */
-	if (!recurse) return newitem;
-	/* Walk the ->next chain for the child. */
-	cptr=item->child;
-	while (cptr)
-	{
-		newchild=cJSON_Duplicate(cptr,1);		/* Duplicate (with recurse) each item in the ->next chain */
-		if (!newchild) {cJSON_Delete(newitem);return 0;}
-		if (nptr)	{nptr->next=newchild,newchild->prev=nptr;nptr=newchild;}	/* If newitem->child already set, then crosswire ->prev and ->next and move on */
-		else		{newitem->child=newchild;nptr=newchild;}					/* Set newitem->child and move to it */
-		cptr=cptr->next;
-	}
-	return newitem;
-}
-
-void cJSON_Minify(char *json)
-{
-	char *into=json;
-	while (*json)
-	{
-		if (*json==' ') json++;
-		else if (*json=='\t') json++;	/* Whitespace characters. */
-		else if (*json=='\r') json++;
-		else if (*json=='\n') json++;
-		else if (*json=='/' && json[1]=='/')  while (*json && *json!='\n') json++;	/* double-slash comments, to end of line. */
-		else if (*json=='/' && json[1]=='*') {while (*json && !(*json=='*' && json[1]=='/')) json++;json+=2;}	/* multiline comments. */
-		else if (*json=='\"'){*into++=*json++;while (*json && *json!='\"'){if (*json=='\\') *into++=*json++;*into++=*json++;}*into++=*json++;} /* string literals, which are \" sensitive. */
-		else *into++=*json++;			/* All other characters. */
-	}
-	*into=0;	/* and null-terminate. */
-}
diff --git a/third_party/cJSON/cJSON.h b/third_party/cJSON/cJSON.h
deleted file mode 100644
index 9bfc54f..0000000
--- a/third_party/cJSON/cJSON.h
+++ /dev/null
@@ -1,143 +0,0 @@
-/*
-  Copyright (c) 2009 Dave Gamble
-
-  Permission is hereby granted, free of charge, to any person obtaining a copy
-  of this software and associated documentation files (the "Software"), to deal
-  in the Software without restriction, including without limitation the rights
-  to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-  copies of the Software, and to permit persons to whom the Software is
-  furnished to do so, subject to the following conditions:
-
-  The above copyright notice and this permission notice shall be included in
-  all copies or substantial portions of the Software.
-
-  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-  IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-  FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-  AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-  LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-  OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-  THE SOFTWARE.
-*/
-
-#ifndef cJSON__h
-#define cJSON__h
-
-#ifdef __cplusplus
-extern "C"
-{
-#endif
-
-/* cJSON Types: */
-#define cJSON_False 0
-#define cJSON_True 1
-#define cJSON_NULL 2
-#define cJSON_Number 3
-#define cJSON_String 4
-#define cJSON_Array 5
-#define cJSON_Object 6
-
-#define cJSON_IsReference 256
-
-/* The cJSON structure: */
-typedef struct cJSON {
-	struct cJSON *next,*prev;	/* next/prev allow you to walk array/object chains. Alternatively, use GetArraySize/GetArrayItem/GetObjectItem */
-	struct cJSON *child;		/* An array or object item will have a child pointer pointing to a chain of the items in the array/object. */
-
-	int type;					/* The type of the item, as above. */
-
-	char *valuestring;			/* The item's string, if type==cJSON_String */
-	int valueint;				/* The item's number, if type==cJSON_Number */
-	double valuedouble;			/* The item's number, if type==cJSON_Number */
-
-	char *string;				/* The item's name string, if this item is the child of, or is in the list of subitems of an object. */
-} cJSON;
-
-typedef struct cJSON_Hooks {
-      void *(*malloc_fn)(size_t sz);
-      void (*free_fn)(void *ptr);
-} cJSON_Hooks;
-
-/* Supply malloc, realloc and free functions to cJSON */
-extern void cJSON_InitHooks(cJSON_Hooks* hooks);
-
-
-/* Supply a block of JSON, and this returns a cJSON object you can interrogate. Call cJSON_Delete when finished. */
-extern cJSON *cJSON_Parse(const char *value);
-/* Render a cJSON entity to text for transfer/storage. Free the char* when finished. */
-extern char  *cJSON_Print(cJSON *item);
-/* Render a cJSON entity to text for transfer/storage without any formatting. Free the char* when finished. */
-extern char  *cJSON_PrintUnformatted(cJSON *item);
-/* Delete a cJSON entity and all subentities. */
-extern void   cJSON_Delete(cJSON *c);
-
-/* Returns the number of items in an array (or object). */
-extern int	  cJSON_GetArraySize(cJSON *array);
-/* Retrieve item number "item" from array "array". Returns NULL if unsuccessful. */
-extern cJSON *cJSON_GetArrayItem(cJSON *array,int item);
-/* Get item "string" from object. Case insensitive. */
-extern cJSON *cJSON_GetObjectItem(cJSON *object,const char *string);
-
-/* For analysing failed parses. This returns a pointer to the parse error. You'll probably need to look a few chars back to make sense of it. Defined when cJSON_Parse() returns 0. 0 when cJSON_Parse() succeeds. */
-extern const char *cJSON_GetErrorPtr(void);
-
-/* These calls create a cJSON item of the appropriate type. */
-extern cJSON *cJSON_CreateNull(void);
-extern cJSON *cJSON_CreateTrue(void);
-extern cJSON *cJSON_CreateFalse(void);
-extern cJSON *cJSON_CreateBool(int b);
-extern cJSON *cJSON_CreateNumber(double num);
-extern cJSON *cJSON_CreateString(const char *string);
-extern cJSON *cJSON_CreateArray(void);
-extern cJSON *cJSON_CreateObject(void);
-
-/* These utilities create an Array of count items. */
-extern cJSON *cJSON_CreateIntArray(const int *numbers,int count);
-extern cJSON *cJSON_CreateFloatArray(const float *numbers,int count);
-extern cJSON *cJSON_CreateDoubleArray(const double *numbers,int count);
-extern cJSON *cJSON_CreateStringArray(const char **strings,int count);
-
-/* Append item to the specified array/object. */
-extern void cJSON_AddItemToArray(cJSON *array, cJSON *item);
-extern void	cJSON_AddItemToObject(cJSON *object,const char *string,cJSON *item);
-/* Append reference to item to the specified array/object. Use this when you want to add an existing cJSON to a new cJSON, but don't want to corrupt your existing cJSON. */
-extern void cJSON_AddItemReferenceToArray(cJSON *array, cJSON *item);
-extern void	cJSON_AddItemReferenceToObject(cJSON *object,const char *string,cJSON *item);
-
-/* Remove/Detatch items from Arrays/Objects. */
-extern cJSON *cJSON_DetachItemFromArray(cJSON *array,int which);
-extern void   cJSON_DeleteItemFromArray(cJSON *array,int which);
-extern cJSON *cJSON_DetachItemFromObject(cJSON *object,const char *string);
-extern void   cJSON_DeleteItemFromObject(cJSON *object,const char *string);
-
-/* Update array items. */
-extern void cJSON_ReplaceItemInArray(cJSON *array,int which,cJSON *newitem);
-extern void cJSON_ReplaceItemInObject(cJSON *object,const char *string,cJSON *newitem);
-
-/* Duplicate a cJSON item */
-extern cJSON *cJSON_Duplicate(cJSON *item,int recurse);
-/* Duplicate will create a new, identical cJSON item to the one you pass, in new memory that will
-need to be released. With recurse!=0, it will duplicate any children connected to the item.
-The item->next and ->prev pointers are always zero on return from Duplicate. */
-
-/* ParseWithOpts allows you to require (and check) that the JSON is null terminated, and to retrieve the pointer to the final byte parsed. */
-extern cJSON *cJSON_ParseWithOpts(const char *value,const char **return_parse_end,int require_null_terminated);
-
-extern void cJSON_Minify(char *json);
-
-/* Macros for creating things quickly. */
-#define cJSON_AddNullToObject(object,name)		cJSON_AddItemToObject(object, name, cJSON_CreateNull())
-#define cJSON_AddTrueToObject(object,name)		cJSON_AddItemToObject(object, name, cJSON_CreateTrue())
-#define cJSON_AddFalseToObject(object,name)		cJSON_AddItemToObject(object, name, cJSON_CreateFalse())
-#define cJSON_AddBoolToObject(object,name,b)	cJSON_AddItemToObject(object, name, cJSON_CreateBool(b))
-#define cJSON_AddNumberToObject(object,name,n)	cJSON_AddItemToObject(object, name, cJSON_CreateNumber(n))
-#define cJSON_AddStringToObject(object,name,s)	cJSON_AddItemToObject(object, name, cJSON_CreateString(s))
-
-/* When assigning an integer value, it needs to be propagated to valuedouble too. */
-#define cJSON_SetIntValue(object,val)			((object)?(object)->valueint=(object)->valuedouble=(val):(val))
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/third_party/cJSON/test.c b/third_party/cJSON/test.c
deleted file mode 100644
index b308a92..0000000
--- a/third_party/cJSON/test.c
+++ /dev/null
@@ -1,156 +0,0 @@
-/*
-  Copyright (c) 2009 Dave Gamble
- 
-  Permission is hereby granted, free of charge, to any person obtaining a copy
-  of this software and associated documentation files (the "Software"), to deal
-  in the Software without restriction, including without limitation the rights
-  to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-  copies of the Software, and to permit persons to whom the Software is
-  furnished to do so, subject to the following conditions:
- 
-  The above copyright notice and this permission notice shall be included in
-  all copies or substantial portions of the Software.
- 
-  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-  IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-  FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-  AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-  LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-  OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-  THE SOFTWARE.
-*/
-
-#include <stdio.h>
-#include <stdlib.h>
-#include "cJSON.h"
-
-/* Parse text to JSON, then render back to text, and print! */
-void doit(char *text)
-{
-	char *out;cJSON *json;
-	
-	json=cJSON_Parse(text);
-	if (!json) {printf("Error before: [%s]\n",cJSON_GetErrorPtr());}
-	else
-	{
-		out=cJSON_Print(json);
-		cJSON_Delete(json);
-		printf("%s\n",out);
-		free(out);
-	}
-}
-
-/* Read a file, parse, render back, etc. */
-void dofile(char *filename)
-{
-	FILE *f=fopen(filename,"rb");fseek(f,0,SEEK_END);long len=ftell(f);fseek(f,0,SEEK_SET);
-	char *data=(char*)malloc(len+1);fread(data,1,len,f);fclose(f);
-	doit(data);
-	free(data);
-}
-
-/* Used by some code below as an example datatype. */
-struct record {const char *precision;double lat,lon;const char *address,*city,*state,*zip,*country; };
-
-/* Create a bunch of objects as demonstration. */
-void create_objects()
-{
-	cJSON *root,*fmt,*img,*thm,*fld;char *out;int i;	/* declare a few. */
-
-	/* Here we construct some JSON standards, from the JSON site. */
-	
-	/* Our "Video" datatype: */
-	root=cJSON_CreateObject();	
-	cJSON_AddItemToObject(root, "name", cJSON_CreateString("Jack (\"Bee\") Nimble"));
-	cJSON_AddItemToObject(root, "format", fmt=cJSON_CreateObject());
-	cJSON_AddStringToObject(fmt,"type",		"rect");
-	cJSON_AddNumberToObject(fmt,"width",		1920);
-	cJSON_AddNumberToObject(fmt,"height",		1080);
-	cJSON_AddFalseToObject (fmt,"interlace");
-	cJSON_AddNumberToObject(fmt,"frame rate",	24);
-	
-	out=cJSON_Print(root);	cJSON_Delete(root);	printf("%s\n",out);	free(out);	/* Print to text, Delete the cJSON, print it, release the string. */
-
-	/* Our "days of the week" array: */
-	const char *strings[7]={"Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"};
-	root=cJSON_CreateStringArray(strings,7);
-
-	out=cJSON_Print(root);	cJSON_Delete(root);	printf("%s\n",out);	free(out);
-
-	/* Our matrix: */
-	int numbers[3][3]={{0,-1,0},{1,0,0},{0,0,1}};
-	root=cJSON_CreateArray();
-	for (i=0;i<3;i++) cJSON_AddItemToArray(root,cJSON_CreateIntArray(numbers[i],3));
-
-/*	cJSON_ReplaceItemInArray(root,1,cJSON_CreateString("Replacement")); */
-	
-	out=cJSON_Print(root);	cJSON_Delete(root);	printf("%s\n",out);	free(out);
-
-
-	/* Our "gallery" item: */
-	int ids[4]={116,943,234,38793};
-	root=cJSON_CreateObject();
-	cJSON_AddItemToObject(root, "Image", img=cJSON_CreateObject());
-	cJSON_AddNumberToObject(img,"Width",800);
-	cJSON_AddNumberToObject(img,"Height",600);
-	cJSON_AddStringToObject(img,"Title","View from 15th Floor");
-	cJSON_AddItemToObject(img, "Thumbnail", thm=cJSON_CreateObject());
-	cJSON_AddStringToObject(thm, "Url", "http:/*www.example.com/image/481989943");
-	cJSON_AddNumberToObject(thm,"Height",125);
-	cJSON_AddStringToObject(thm,"Width","100");
-	cJSON_AddItemToObject(img,"IDs", cJSON_CreateIntArray(ids,4));
-
-	out=cJSON_Print(root);	cJSON_Delete(root);	printf("%s\n",out);	free(out);
-
-	/* Our array of "records": */
-	struct record fields[2]={
-		{"zip",37.7668,-1.223959e+2,"","SAN FRANCISCO","CA","94107","US"},
-		{"zip",37.371991,-1.22026e+2,"","SUNNYVALE","CA","94085","US"}};
-
-	root=cJSON_CreateArray();
-	for (i=0;i<2;i++)
-	{
-		cJSON_AddItemToArray(root,fld=cJSON_CreateObject());
-		cJSON_AddStringToObject(fld, "precision", fields[i].precision);
-		cJSON_AddNumberToObject(fld, "Latitude", fields[i].lat);
-		cJSON_AddNumberToObject(fld, "Longitude", fields[i].lon);
-		cJSON_AddStringToObject(fld, "Address", fields[i].address);
-		cJSON_AddStringToObject(fld, "City", fields[i].city);
-		cJSON_AddStringToObject(fld, "State", fields[i].state);
-		cJSON_AddStringToObject(fld, "Zip", fields[i].zip);
-		cJSON_AddStringToObject(fld, "Country", fields[i].country);
-	}
-	
-/*	cJSON_ReplaceItemInObject(cJSON_GetArrayItem(root,1),"City",cJSON_CreateIntArray(ids,4)); */
-	
-	out=cJSON_Print(root);	cJSON_Delete(root);	printf("%s\n",out);	free(out);
-
-}
-
-int main (int argc, const char * argv[]) {
-	/* a bunch of json: */
-	char text1[]="{\n\"name\": \"Jack (\\\"Bee\\\") Nimble\", \n\"format\": {\"type\":       \"rect\", \n\"width\":      1920, \n\"height\":     1080, \n\"interlace\":  false,\"frame rate\": 24\n}\n}";	
-	char text2[]="[\"Sunday\", \"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\", \"Saturday\"]";
-	char text3[]="[\n    [0, -1, 0],\n    [1, 0, 0],\n    [0, 0, 1]\n	]\n";
-	char text4[]="{\n		\"Image\": {\n			\"Width\":  800,\n			\"Height\": 600,\n			\"Title\":  \"View from 15th Floor\",\n			\"Thumbnail\": {\n				\"Url\":    \"http:/*www.example.com/image/481989943\",\n				\"Height\": 125,\n				\"Width\":  \"100\"\n			},\n			\"IDs\": [116, 943, 234, 38793]\n		}\n	}";
-	char text5[]="[\n	 {\n	 \"precision\": \"zip\",\n	 \"Latitude\":  37.7668,\n	 \"Longitude\": -122.3959,\n	 \"Address\":   \"\",\n	 \"City\":      \"SAN FRANCISCO\",\n	 \"State\":     \"CA\",\n	 \"Zip\":       \"94107\",\n	 \"Country\":   \"US\"\n	 },\n	 {\n	 \"precision\": \"zip\",\n	 \"Latitude\":  37.371991,\n	 \"Longitude\": -122.026020,\n	 \"Address\":   \"\",\n	 \"City\":      \"SUNNYVALE\",\n	 \"State\":     \"CA\",\n	 \"Zip\":       \"94085\",\n	 \"Country\":   \"US\"\n	 }\n	 ]";
-
-	/* Process each json textblock by parsing, then rebuilding: */
-	doit(text1);
-	doit(text2);	
-	doit(text3);
-	doit(text4);
-	doit(text5);
-
-	/* Parse standard testfiles: */
-/*	dofile("../../tests/test1"); */
-/*	dofile("../../tests/test2"); */
-/*	dofile("../../tests/test3"); */
-/*	dofile("../../tests/test4"); */
-/*	dofile("../../tests/test5"); */
-
-	/* Now some samplecode for building objects concisely: */
-	create_objects();
-	
-	return 0;
-}
diff --git a/third_party/cJSON/tests/test1 b/third_party/cJSON/tests/test1
deleted file mode 100644
index eacfbf5..0000000
--- a/third_party/cJSON/tests/test1
+++ /dev/null
@@ -1,22 +0,0 @@
-{
-    "glossary": {
-        "title": "example glossary",
-		"GlossDiv": {
-            "title": "S",
-			"GlossList": {
-                "GlossEntry": {
-                    "ID": "SGML",
-					"SortAs": "SGML",
-					"GlossTerm": "Standard Generalized Markup Language",
-					"Acronym": "SGML",
-					"Abbrev": "ISO 8879:1986",
-					"GlossDef": {
-                        "para": "A meta-markup language, used to create markup languages such as DocBook.",
-						"GlossSeeAlso": ["GML", "XML"]
-                    },
-					"GlossSee": "markup"
-                }
-            }
-        }
-    }
-}
diff --git a/third_party/cJSON/tests/test2 b/third_party/cJSON/tests/test2
deleted file mode 100644
index 5600991..0000000
--- a/third_party/cJSON/tests/test2
+++ /dev/null
@@ -1,11 +0,0 @@
-{"menu": {
-  "id": "file",
-  "value": "File",
-  "popup": {
-    "menuitem": [
-      {"value": "New", "onclick": "CreateNewDoc()"},
-      {"value": "Open", "onclick": "OpenDoc()"},
-      {"value": "Close", "onclick": "CloseDoc()"}
-    ]
-  }
-}}
diff --git a/third_party/cJSON/tests/test3 b/third_party/cJSON/tests/test3
deleted file mode 100644
index 5662b37..0000000
--- a/third_party/cJSON/tests/test3
+++ /dev/null
@@ -1,26 +0,0 @@
-{"widget": {
-    "debug": "on",
-    "window": {
-        "title": "Sample Konfabulator Widget",
-        "name": "main_window",
-        "width": 500,
-        "height": 500
-    },
-    "image": { 
-        "src": "Images/Sun.png",
-        "name": "sun1",
-        "hOffset": 250,
-        "vOffset": 250,
-        "alignment": "center"
-    },
-    "text": {
-        "data": "Click Here",
-        "size": 36,
-        "style": "bold",
-        "name": "text1",
-        "hOffset": 250,
-        "vOffset": 100,
-        "alignment": "center",
-        "onMouseUp": "sun1.opacity = (sun1.opacity / 100) * 90;"
-    }
-}}    
\ No newline at end of file
diff --git a/third_party/cJSON/tests/test4 b/third_party/cJSON/tests/test4
deleted file mode 100644
index d540b57..0000000
--- a/third_party/cJSON/tests/test4
+++ /dev/null
@@ -1,88 +0,0 @@
-{"web-app": {
-  "servlet": [   
-    {
-      "servlet-name": "cofaxCDS",
-      "servlet-class": "org.cofax.cds.CDSServlet",
-      "init-param": {
-        "configGlossary:installationAt": "Philadelphia, PA",
-        "configGlossary:adminEmail": "ksm@pobox.com",
-        "configGlossary:poweredBy": "Cofax",
-        "configGlossary:poweredByIcon": "/images/cofax.gif",
-        "configGlossary:staticPath": "/content/static",
-        "templateProcessorClass": "org.cofax.WysiwygTemplate",
-        "templateLoaderClass": "org.cofax.FilesTemplateLoader",
-        "templatePath": "templates",
-        "templateOverridePath": "",
-        "defaultListTemplate": "listTemplate.htm",
-        "defaultFileTemplate": "articleTemplate.htm",
-        "useJSP": false,
-        "jspListTemplate": "listTemplate.jsp",
-        "jspFileTemplate": "articleTemplate.jsp",
-        "cachePackageTagsTrack": 200,
-        "cachePackageTagsStore": 200,
-        "cachePackageTagsRefresh": 60,
-        "cacheTemplatesTrack": 100,
-        "cacheTemplatesStore": 50,
-        "cacheTemplatesRefresh": 15,
-        "cachePagesTrack": 200,
-        "cachePagesStore": 100,
-        "cachePagesRefresh": 10,
-        "cachePagesDirtyRead": 10,
-        "searchEngineListTemplate": "forSearchEnginesList.htm",
-        "searchEngineFileTemplate": "forSearchEngines.htm",
-        "searchEngineRobotsDb": "WEB-INF/robots.db",
-        "useDataStore": true,
-        "dataStoreClass": "org.cofax.SqlDataStore",
-        "redirectionClass": "org.cofax.SqlRedirection",
-        "dataStoreName": "cofax",
-        "dataStoreDriver": "com.microsoft.jdbc.sqlserver.SQLServerDriver",
-        "dataStoreUrl": "jdbc:microsoft:sqlserver://LOCALHOST:1433;DatabaseName=goon",
-        "dataStoreUser": "sa",
-        "dataStorePassword": "dataStoreTestQuery",
-        "dataStoreTestQuery": "SET NOCOUNT ON;select test='test';",
-        "dataStoreLogFile": "/usr/local/tomcat/logs/datastore.log",
-        "dataStoreInitConns": 10,
-        "dataStoreMaxConns": 100,
-        "dataStoreConnUsageLimit": 100,
-        "dataStoreLogLevel": "debug",
-        "maxUrlLength": 500}},
-    {
-      "servlet-name": "cofaxEmail",
-      "servlet-class": "org.cofax.cds.EmailServlet",
-      "init-param": {
-      "mailHost": "mail1",
-      "mailHostOverride": "mail2"}},
-    {
-      "servlet-name": "cofaxAdmin",
-      "servlet-class": "org.cofax.cds.AdminServlet"},
- 
-    {
-      "servlet-name": "fileServlet",
-      "servlet-class": "org.cofax.cds.FileServlet"},
-    {
-      "servlet-name": "cofaxTools",
-      "servlet-class": "org.cofax.cms.CofaxToolsServlet",
-      "init-param": {
-        "templatePath": "toolstemplates/",
-        "log": 1,
-        "logLocation": "/usr/local/tomcat/logs/CofaxTools.log",
-        "logMaxSize": "",
-        "dataLog": 1,
-        "dataLogLocation": "/usr/local/tomcat/logs/dataLog.log",
-        "dataLogMaxSize": "",
-        "removePageCache": "/content/admin/remove?cache=pages&id=",
-        "removeTemplateCache": "/content/admin/remove?cache=templates&id=",
-        "fileTransferFolder": "/usr/local/tomcat/webapps/content/fileTransferFolder",
-        "lookInContext": 1,
-        "adminGroupID": 4,
-        "betaServer": true}}],
-  "servlet-mapping": {
-    "cofaxCDS": "/",
-    "cofaxEmail": "/cofaxutil/aemail/*",
-    "cofaxAdmin": "/admin/*",
-    "fileServlet": "/static/*",
-    "cofaxTools": "/tools/*"},
- 
-  "taglib": {
-    "taglib-uri": "cofax.tld",
-    "taglib-location": "/WEB-INF/tlds/cofax.tld"}}}
\ No newline at end of file
diff --git a/third_party/cJSON/tests/test5 b/third_party/cJSON/tests/test5
deleted file mode 100644
index 49980ca..0000000
--- a/third_party/cJSON/tests/test5
+++ /dev/null
@@ -1,27 +0,0 @@
-{"menu": {
-    "header": "SVG Viewer",
-    "items": [
-        {"id": "Open"},
-        {"id": "OpenNew", "label": "Open New"},
-        null,
-        {"id": "ZoomIn", "label": "Zoom In"},
-        {"id": "ZoomOut", "label": "Zoom Out"},
-        {"id": "OriginalView", "label": "Original View"},
-        null,
-        {"id": "Quality"},
-        {"id": "Pause"},
-        {"id": "Mute"},
-        null,
-        {"id": "Find", "label": "Find..."},
-        {"id": "FindAgain", "label": "Find Again"},
-        {"id": "Copy"},
-        {"id": "CopyAgain", "label": "Copy Again"},
-        {"id": "CopySVG", "label": "Copy SVG"},
-        {"id": "ViewSVG", "label": "View SVG"},
-        {"id": "ViewSource", "label": "View Source"},
-        {"id": "SaveAs", "label": "Save As"},
-        null,
-        {"id": "Help"},
-        {"id": "About", "label": "About Adobe CVG Viewer..."}
-    ]
-}}
diff --git a/tools/buildgen/build-cleaner.py b/tools/buildgen/build-cleaner.py
index f930736..4992beb 100755
--- a/tools/buildgen/build-cleaner.py
+++ b/tools/buildgen/build-cleaner.py
@@ -33,9 +33,9 @@
   for name in ['public_headers', 'headers', 'src']:
     if name not in indict: continue
     inlist = indict[name]
-    protos = set(x for x in inlist if os.path.splitext(x)[1] == '.proto')
+    protos = list(x for x in inlist if os.path.splitext(x)[1] == '.proto')
     others = set(x for x in inlist if x not in protos)
-    indict[name] = sorted(protos) + sorted(others)
+    indict[name] = protos + sorted(others)
   return rebuild_as_ordered_dict(indict, _ELEM_KEYS)
 
 for filename in sys.argv[1:]:
diff --git a/tools/dockerfile/grpc_cxx/Dockerfile b/tools/dockerfile/grpc_cxx/Dockerfile
index 141a20a..43da9fe 100644
--- a/tools/dockerfile/grpc_cxx/Dockerfile
+++ b/tools/dockerfile/grpc_cxx/Dockerfile
@@ -21,4 +21,6 @@
   && make interop_client \
   && make interop_server
 
+ADD service_account service_account
+
 CMD ["/var/local/git/grpc/bins/opt/interop_server", "--enable_ssl", "--port=8010"]
diff --git a/tools/dockerfile/grpc_java/Dockerfile b/tools/dockerfile/grpc_java/Dockerfile
index f234f51..a5508ca 100644
--- a/tools/dockerfile/grpc_java/Dockerfile
+++ b/tools/dockerfile/grpc_java/Dockerfile
@@ -1,13 +1,11 @@
 # Dockerfile for the gRPC Java dev image
 FROM grpc/java_base
 
-RUN  cd /var/local/git/grpc-java/lib/okhttp && \
-  mvn -pl okhttp -am install
-RUN  cd /var/local/git/grpc-java/lib/netty && \
-  mvn -pl codec-http2 -am -DskipTests install
+RUN git clone --recursive --depth 1 git@github.com:google/grpc-java.git /var/local/git/grpc-java
+RUN cd /var/local/git/grpc-java/lib/netty && \
+  mvn -pl codec-http2 -am -DskipTests install clean
 RUN cd /var/local/git/grpc-java && \
-  protoc --version>ver.txt && \
-  mvn install
+  ./gradlew build
 
 # Specify the default command such that the interop server runs on its known testing port
 CMD ["/var/local/git/grpc-java/run-test-server.sh", "--use_tls=true", "--port=8030"]
diff --git a/tools/dockerfile/grpc_java_base/Dockerfile b/tools/dockerfile/grpc_java_base/Dockerfile
index 3271d1b..73382ed 100644
--- a/tools/dockerfile/grpc_java_base/Dockerfile
+++ b/tools/dockerfile/grpc_java_base/Dockerfile
@@ -9,35 +9,36 @@
 RUN echo "deb http://ppa.launchpad.net/webupd8team/java/ubuntu trusty main" | tee /etc/apt/sources.list.d/webupd8team-java.list
 RUN echo "deb-src http://ppa.launchpad.net/webupd8team/java/ubuntu trusty main" | tee -a /etc/apt/sources.list.d/webupd8team-java.list
 RUN apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys EEA14886
-RUN apt-get update && apt-get -y install oracle-java8-installer
+RUN apt-get update && apt-get -y install oracle-java8-installer && \
+  apt-get clean && rm -r /var/cache/oracle-jdk8-installer/
 
 # Install maven
-RUN wget http://mirror.olnevhost.net/pub/apache/maven/binaries/apache-maven-3.2.1-bin.tar.gz && \
-  tar xvf apache-maven-3.2.1-bin.tar.gz -C /var/local
+RUN wget -O - http://mirror.olnevhost.net/pub/apache/maven/binaries/apache-maven-3.2.1-bin.tar.gz | \
+  tar xz -C /var/local
 
 ENV JAVA_HOME /usr/lib/jvm/java-8-oracle
 ENV M2_HOME /var/local/apache-maven-3.2.1
 ENV PATH $PATH:$JAVA_HOME/bin:$M2_HOME/bin
 ENV LD_LIBRARY_PATH /usr/local/lib
 
+# Get the protobuf source from GitHub and install it
+RUN wget -O - https://github.com/google/protobuf/releases/download/v2.6.1/protobuf-2.6.1.tar.bz2 | \
+  tar xj && \
+  cd protobuf-2.6.1 && \
+  ./configure --prefix=/usr && \
+  make -j12 && make check && make install && \
+  rm -r "$(pwd)"
+
 # Install a GitHub SSH service credential that gives access to the GitHub repo while it's private
 # TODO: remove this once the repo is public
-ADD .ssh .ssh
-RUN chmod 600 .ssh/github.rsa
-RUN mkdir -p $HOME/.ssh && echo 'Host github.com' > $HOME/.ssh/config
-RUN echo "    IdentityFile /.ssh/github.rsa" >> $HOME/.ssh/config
-RUN echo 'StrictHostKeyChecking no' >> $HOME/.ssh/config
+COPY .ssh/github.rsa /root/.ssh/id_rsa
+RUN echo 'Host github.com\nStrictHostKeyChecking no' > /root/.ssh/config
 
-# Get the protobuf source from GitHub and install it
-RUN git clone --recursive --branch v2.6.1 git@github.com:google/protobuf.git /var/local/git/protobuf
-RUN cd /var/local/git/protobuf && \
-  ./autogen.sh && \
-  ./configure --prefix=/usr && \
-  make -j12 && make check && make install && make clean
-
-RUN cd /var/local/git/grpc-java/lib/okhttp && \
-  mvn -pl okhttp -am validate
-RUN cd /var/local/git/grpc-java/lib/netty && \
-  mvn -pl codec-http2 -am validate
-RUN cd /var/local/git/grpc-java && \
-  mvn validate
+# Trigger download of as many Maven and Gradle artifacts as possible. We don't build grpc-java
+# because we don't want to install netty
+RUN git clone --recursive --depth 1 git@github.com:google/grpc-java.git && \
+  cd grpc-java/lib/netty && \
+  mvn -pl codec-http2 -am -DskipTests verify && \
+  cd ../.. && \
+  ./gradlew && \
+  rm -r "$(pwd)"
diff --git a/tools/dockerfile/grpc_node/Dockerfile b/tools/dockerfile/grpc_node/Dockerfile
new file mode 100644
index 0000000..baec0e2
--- /dev/null
+++ b/tools/dockerfile/grpc_node/Dockerfile
@@ -0,0 +1,14 @@
+# Dockerfile for gRPC Node
+FROM grpc/node_base
+
+# Update the C libary
+RUN cd /var/local/git/grpc \
+  && git pull --recurse-submodules \
+  && git submodule update --init --recursive
+
+# Install the C core.
+RUN make install_c -C /var/local/git/grpc
+
+RUN cd /var/local/git/grpc/src/node && npm install && node-gyp rebuild
+
+CMD ["/usr/bin/nodejs", "/var/local/git/grpc/src/node/interop/interop_server.js", "--use_tls=true", "--port 8040"]
\ No newline at end of file
diff --git a/tools/dockerfile/grpc_node_base/Dockerfile b/tools/dockerfile/grpc_node_base/Dockerfile
new file mode 100644
index 0000000..28bd7b2
--- /dev/null
+++ b/tools/dockerfile/grpc_node_base/Dockerfile
@@ -0,0 +1,22 @@
+# Base Dockerfile for gRPC Node.
+#
+# Includes Node installation dependencies
+FROM grpc/base
+
+RUN curl -sL https://deb.nodesource.com/setup | bash -
+
+RUN apt-get update && apt-get install -y nodejs
+
+RUN npm install -g node-gyp
+
+# Get the source from GitHub, this gets the protobuf library as well
+RUN git clone git@github.com:google/grpc.git /var/local/git/grpc
+RUN cd /var/local/git/grpc && \
+  git pull --recurse-submodules && \
+  git submodule update --init --recursive
+
+# Build the C core
+RUN make static_c shared_c -j12 -C /var/local/git/grpc
+
+# Define the default command.
+CMD ["bash"]
\ No newline at end of file
diff --git a/tools/dockerfile/grpc_ruby/Dockerfile b/tools/dockerfile/grpc_ruby/Dockerfile
index f01f81d..c84548c 100644
--- a/tools/dockerfile/grpc_ruby/Dockerfile
+++ b/tools/dockerfile/grpc_ruby/Dockerfile
@@ -12,14 +12,8 @@
 # Build the C core.
 RUN make install_c -C /var/local/git/grpc
 
-# Install the grpc gem locally with its dependencies and build the extension
-RUN /bin/bash -l -c 'cd /var/local/git/grpc/src/ruby && bundle && rake compile:grpc && gem build grpc.gemspec && gem install grpc'
-
-# TODO add a command to run the unittest tests when the bug below is fixed
-# - the tests fail due to an error in the C threading library:
-#   they fail with 'ruby: __pthread_mutex_cond_lock_adjust for unknown reasons' at the end of a testcase
-# - however, the interop server and client run OK, so this bug can be investigated
-# RUN /bin/bash -l -c 'cd /var/local/git/grpc/src/ruby && bundle && rake'
+# Build ruby gRPC and run its tests
+RUN /bin/bash -l -c 'cd /var/local/git/grpc/src/ruby && bundle && rake'
 
 # Add a cacerts directory containing the Google root pem file, allowing the ruby client to access the production test instance
 ADD cacerts cacerts
diff --git a/tools/dockerfile/grpc_ruby_base/Dockerfile b/tools/dockerfile/grpc_ruby_base/Dockerfile
index b2af9d7..ec4544d 100644
--- a/tools/dockerfile/grpc_ruby_base/Dockerfile
+++ b/tools/dockerfile/grpc_ruby_base/Dockerfile
@@ -39,7 +39,6 @@
 RUN /bin/bash -l -c "rvm install ruby-2.1"
 RUN /bin/bash -l -c "rvm use --default ruby-2.1"
 RUN /bin/bash -l -c "echo 'gem: --no-ri --no-rdoc' > ~/.gemrc"
-RUN /bin/bash -l -c "echo 'source /home/grpc_ruby/.rvm/scripts/rvm' >> ~/.bashrc"
 RUN /bin/bash -l -c "echo 'rvm --default use ruby-2.1' >> ~/.bashrc"
 RUN /bin/bash -l -c "gem install bundler --no-ri --no-rdoc"
 
diff --git a/tools/gce_setup/grpc_docker.sh b/tools/gce_setup/grpc_docker.sh
index 6bb7326..a97cc88 100755
--- a/tools/gce_setup/grpc_docker.sh
+++ b/tools/gce_setup/grpc_docker.sh
@@ -350,7 +350,7 @@
 
   [[ -n $1 ]] && {  # client_type
     case $1 in
-      cxx|go|java|nodejs|php|python|ruby)
+      cxx|go|java|node|php|python|ruby)
         grpc_gen_test_cmd="grpc_interop_gen_$1_cmd"
         declare -F $grpc_gen_test_cmd >> /dev/null || {
           echo "-f: test_func for $1 => $grpc_gen_test_cmd is not defined" 1>&2
@@ -381,7 +381,7 @@
       cxx)    grpc_port=8010 ;;
       go)     grpc_port=8020 ;;
       java)   grpc_port=8030 ;;
-      nodejs) grpc_port=8040 ;;
+      node)   grpc_port=8040 ;;
       python) grpc_port=8050 ;;
       ruby)   grpc_port=8060 ;;
       *) echo "bad server_type: $1" 1>&2; return 1 ;;
@@ -421,7 +421,7 @@
 
   [[ -n $1 ]] && {  # client_type
     case $1 in
-      cxx|go|java|nodejs|php|python|ruby)
+      cxx|go|java|node|php|python|ruby)
         grpc_gen_test_cmd="grpc_cloud_prod_gen_$1_cmd"
         declare -F $grpc_gen_test_cmd >> /dev/null || {
           echo "-f: test_func for $1 => $grpc_gen_test_cmd is not defined" 1>&2
@@ -440,6 +440,55 @@
   }
 }
 
+# checks the positional args and assigns them to variables visible in the caller
+#
+# these are the positional args passed to grpc_cloud_prod_auth_test after option flags
+# are removed
+#
+# three args are expected, in order
+# - test_case
+# - host <the gce docker instance on which to run the test>
+# - client to run
+grpc_cloud_prod_auth_test_args() {
+  grpc_gen_test_cmd="grpc_cloud_prod_auth_"
+  [[ -n $1 ]] && {  # test_case
+    test_case=$1
+    grpc_gen_test_cmd+="$1"
+    shift
+  } || {
+    echo "$FUNCNAME: missing arg: test_case" 1>&2
+    return 1
+  }
+
+  [[ -n $1 ]] && {  # host
+    host=$1
+    shift
+  } || {
+    echo "$FUNCNAME: missing arg: host" 1>&2
+    return 1
+  }
+
+  [[ -n $1 ]] && {  # client_type
+    case $1 in
+      cxx|go|java|nodejs|php|python|ruby)
+        grpc_gen_test_cmd+="_gen_$1_cmd"
+        declare -F $grpc_gen_test_cmd >> /dev/null || {
+          echo "-f: test_func for $1 => $grpc_gen_test_cmd is not defined" 1>&2
+          return 2
+        }
+        shift
+        ;;
+      *)
+        echo "bad client_type: $1" 1>&2
+        return 1
+        ;;
+    esac
+  } || {
+    echo "$FUNCNAME: missing arg: client_type" 1>&2
+    return 1
+  }
+}
+
 _grpc_sync_scripts_args() {
   grpc_gce_script_root='tools/gce_setup'
 
@@ -555,7 +604,7 @@
       cxx)    grpc_port=8010 ;;
       go)     grpc_port=8020 ;;
       java)   grpc_port=8030 ;;
-      nodejs) grpc_port=8040 ;;
+      node)   grpc_port=8040 ;;
       python) grpc_port=8050 ;;
       ruby)   grpc_port=8060 ;;
       *) echo "bad server_type: $1" 1>&2; return 1 ;;
@@ -627,7 +676,7 @@
 #   cxx:    8010
 #   go:     8020
 #   java:   8030
-#   nodejs: 8040
+#   node:   8040
 #   python: 8050
 #   ruby:   8060
 #
@@ -715,6 +764,52 @@
   gcloud compute $project_opt ssh $zone_opt $host --command "$cmd"
 }
 
+# Runs a test command on a docker instance.
+#
+# call-seq:
+#   grpc_cloud_prod_auth_test <test_name> <host> <client_type>
+#
+# requirements:
+#   host is a GCE instance running docker with access to the gRPC docker images
+#   test_name is one of the named gRPC tests [http://go/grpc_interop_tests]
+#   client_type is one of [cxx,go,java,php,python,ruby]
+#
+# it assumes:
+#   that each grpc-imp has a docker image named grpc/<imp>, e.g, grpc/java
+#   a test is run using $ docker run 'path/to/interop_test_bin --flags'
+#   the required images are available on <host>
+#
+# each client_type should have an associated bash func:
+#   grpc_cloud_prod_auth_<test_case>_gen_<client_type>_cmd
+# the func provides the dockerized commmand for running client_type's test.
+# If no such func is available, tests for that client type cannot be run.
+grpc_cloud_prod_auth_test() {
+  _grpc_ensure_gcloud_ssh || return 1;
+  # declare vars local so that they don't pollute the shell environment
+  # where they this func is used.
+
+  local grpc_zone grpc_project dry_run  # set by _grpc_set_project_and_zone
+  #  grpc_cloud_prod_test_args
+  local test_case host grpc_gen_test_cmd
+
+  # set the project zone and check that all necessary args are provided
+  _grpc_set_project_and_zone -f grpc_cloud_prod_auth_test_args "$@" || return 1
+  gce_has_instance $grpc_project $host || return 1;
+
+  local test_case_flag=" --test_case=$test_case"
+  cmd=$($grpc_gen_test_cmd $test_case_flag)
+  [[ -n $cmd ]] || return 1
+
+  local project_opt="--project $grpc_project"
+  local zone_opt="--zone $grpc_zone"
+  local ssh_cmd="bash -l -c \"$cmd\""
+  echo "will run:"
+  echo "  $ssh_cmd"
+  echo "on $host"
+  [[ $dry_run == 1 ]] && return 0  # don't run the command on a dry run
+  gcloud compute $project_opt ssh $zone_opt $host --command "$cmd"
+}
+
 # constructs the full dockerized ruby interop test cmd.
 #
 # call-seq:
@@ -827,6 +922,13 @@
     echo $the_cmd
 }
 
+grpc_interop_gen_node_cmd() {
+  local cmd_prefix="sudo docker run grpc/node";
+  local test_script="/usr/bin/nodejs /var/local/git/grpc/src/node/interop/interop_client.js --use_tls=true";
+  local the_cmd="$cmd_prefix $test_script $@";
+  echo $the_cmd
+}
+
 # constructs the full dockerized cpp interop test cmd.
 #
 #
@@ -841,4 +943,34 @@
     echo $the_cmd
 }
 
-# TODO(grpc-team): add grpc_interop_gen_xxx_cmd for python|cxx|nodejs
+# constructs the full dockerized cpp interop test cmd.
+#
+#
+# call-seq:
+#   flags= .... # generic flags to include the command
+#   cmd=$($grpc_gen_test_cmd $flags)
+grpc_cloud_prod_auth_service_account_creds_gen_cxx_cmd() {
+    local cmd_prefix="sudo docker run grpc/cxx";
+    local test_script="/var/local/git/grpc/bins/opt/interop_client --enable_ssl";
+    local gfe_flags=" --use_prod_roots --server_port=443 --server_host=grpc-test.sandbox.google.com --server_host_override=grpc-test.sandbox.google.com"
+    local added_gfe_flags=" --service_account_key_file=/service_account/stubbyCloudTestingTest-7dd63462c60c.json --oauth_scope=https://www.googleapis.com/auth/xapi.zoo"
+    local the_cmd="$cmd_prefix $test_script $gfe_flags $added_gfe_flags $@";
+    echo $the_cmd
+}
+
+# constructs the full dockerized cpp interop test cmd.
+#
+#
+# call-seq:
+#   flags= .... # generic flags to include the command
+#   cmd=$($grpc_gen_test_cmd $flags)
+grpc_cloud_prod_auth_compute_engine_creds_gen_cxx_cmd() {
+    local cmd_prefix="sudo docker run grpc/cxx";
+    local test_script="/var/local/git/grpc/bins/opt/interop_client --enable_ssl";
+    local gfe_flags=" --use_prod_roots --server_port=443 --server_host=grpc-test.sandbox.google.com --server_host_override=grpc-test.sandbox.google.com"
+    local added_gfe_flags=" --default_service_account=155450119199-r5aaqa2vqoa9g5mv2m6s3m1l293rlmel@developer.gserviceaccount.com --oauth_scope=https://www.googleapis.com/auth/xapi.zoo"
+    local the_cmd="$cmd_prefix $test_script $gfe_flags $added_gfe_flags $@";
+    echo $the_cmd
+}
+
+# TODO(grpc-team): add grpc_interop_gen_xxx_cmd for python|nodejs
diff --git a/tools/gce_setup/interop_test_runner.sh b/tools/gce_setup/interop_test_runner.sh
index 1c0d820..edc8bba 100755
--- a/tools/gce_setup/interop_test_runner.sh
+++ b/tools/gce_setup/interop_test_runner.sh
@@ -3,8 +3,8 @@
 main() {
   source grpc_docker.sh
   test_cases=(large_unary empty_unary ping_pong client_streaming server_streaming)
-  clients=(cxx java go ruby)
-  servers=(cxx java go ruby)
+  clients=(cxx java go ruby node)
+  servers=(cxx java go ruby node)
   for test_case in "${test_cases[@]}"
   do
     for client in "${clients[@]}"
diff --git a/tools/gce_setup/new_grpc_docker_builder.sh b/tools/gce_setup/new_grpc_docker_builder.sh
index 9a3988f..5d4fc36 100755
--- a/tools/gce_setup/new_grpc_docker_builder.sh
+++ b/tools/gce_setup/new_grpc_docker_builder.sh
@@ -87,6 +87,7 @@
   local the_image='container-vm-v20140925'
   local scopes='compute-rw storage-full'
   scopes+=' https://www.googleapis.com/auth/gerritcodereview'
+  scopes+=' https://www.googleapis.com/auth/xapi.zoo'
   gcloud --project $project compute instances create $instance \
     $address_flag \
     --image $the_image \
diff --git a/tools/gce_setup/shared_startup_funcs.sh b/tools/gce_setup/shared_startup_funcs.sh
index 69f6ba8..3300eb2 100755
--- a/tools/gce_setup/shared_startup_funcs.sh
+++ b/tools/gce_setup/shared_startup_funcs.sh
@@ -367,7 +367,7 @@
 grpc_docker_pull_known() {
   local addr=$1
   [[ -n $addr ]] || addr="0.0.0.0:5000"
-  local known="base cxx php_base php ruby_base ruby java_base java go"
+  local known="base cxx php_base php ruby_base ruby java_base java go node_base node"
   echo "... pulling docker images for '$known'"
   for i in $known
   do
@@ -416,6 +416,9 @@
   [[ $image_label == "grpc/ruby" ]] && {
     grpc_docker_sync_roots_pem $dockerfile_dir/cacerts || return 1;
   }
+  [[ $image_label == "grpc/cxx" ]] && {
+    grpc_docker_sync_service_account $dockerfile_dir/service_account || return 1;
+  }
 
 
   # TODO(temiola): maybe make cache/no-cache a func option?
@@ -503,3 +506,31 @@
   }
   gsutil cp $src $gcs_certs_path $local_certs_path
 }
+
+# grpc_docker_sync_service_account.
+#
+# Copies the service account from GCS to the target dir
+#
+# call-seq:
+#   grpc_docker_sync_service_account <target_dir>
+grpc_docker_sync_service_account() {
+  local target_dir=$1
+  [[ -n $target_dir ]] || { echo "$FUNCNAME: missing arg: target_dir" >&2; return 1; }
+
+  # determine the admin root; the parent of the dockerfile root,
+  local gs_dockerfile_root=$(load_metadata "attributes/gs_dockerfile_root")
+  [[ -n $gs_dockerfile_root ]] || {
+    echo "$FUNCNAME: missing metadata: gs_dockerfile_root" >&2
+    return 1
+  }
+  local gcs_admin_root=$(dirname $gs_dockerfile_root)
+
+  # cp the file from gsutil to a known local area
+  local gcs_acct_path=$gcs_admin_root/service_account/stubbyCloudTestingTest-7dd63462c60c.json
+  local local_acct_path=$target_dir/stubbyCloudTestingTest-7dd63462c60c.json
+  mkdir -p $target_dir || {
+    echo "$FUNCNAME: could not create dir: $target_dir" 1>&2
+    return 1
+  }
+  gsutil cp $src $gcs_acct_path $local_acct_path
+}
diff --git a/tools/run_tests/build_python.sh b/tools/run_tests/build_python.sh
index 6899ac7..4abb412 100755
--- a/tools/run_tests/build_python.sh
+++ b/tools/run_tests/build_python.sh
@@ -7,4 +7,5 @@
 
 root=`pwd`
 virtualenv python2.7_virtual_environment
-python2.7_virtual_environment/bin/pip install enum34==1.0.4 futures==2.2.0
+python2.7_virtual_environment/bin/pip install enum34==1.0.4 futures==2.2.0 protobuf==2.6.1
+python2.7_virtual_environment/bin/pip install src/python
diff --git a/tools/run_tests/jobset.py b/tools/run_tests/jobset.py
index 8f16a4f..19ae52e 100755
--- a/tools/run_tests/jobset.py
+++ b/tools/run_tests/jobset.py
@@ -86,19 +86,49 @@
   raise Exception('%s not found' % filename)
 
 
+class JobSpec(object):
+  """Specifies what to run for a job."""
+
+  def __init__(self, cmdline, shortname=None, environ={}, hash_targets=[]):
+    """
+    Arguments:
+      cmdline: a list of arguments to pass as the command line
+      environ: a dictionary of environment variables to set in the child process
+      hash_targets: which files to include in the hash representing the jobs version
+                    (or empty, indicating the job should not be hashed)
+    """
+    self.cmdline = cmdline
+    self.environ = environ
+    self.shortname = cmdline[0] if shortname is None else shortname
+    self.hash_targets = hash_targets or []
+
+  def identity(self):
+    return '%r %r %r' % (self.cmdline, self.environ, self.hash_targets)
+
+  def __hash__(self):
+    return hash(self.identity())
+
+  def __cmp__(self, other):
+    return self.identity() == other.identity()
+
+
 class Job(object):
   """Manages one job."""
 
-  def __init__(self, cmdline, bin_hash, newline_on_success):
-    self._cmdline = cmdline
+  def __init__(self, spec, bin_hash, newline_on_success):
+    self._spec = spec
     self._bin_hash = bin_hash
     self._tempfile = tempfile.TemporaryFile()
-    self._process = subprocess.Popen(args=cmdline,
+    env = os.environ.copy()
+    for k, v in spec.environ.iteritems():
+      env[k] = v
+    self._process = subprocess.Popen(args=spec.cmdline,
                                      stderr=subprocess.STDOUT,
-                                     stdout=self._tempfile)
+                                     stdout=self._tempfile,
+                                     env=env)
     self._state = _RUNNING
     self._newline_on_success = newline_on_success
-    message('START', ' '.join(self._cmdline))
+    message('START', spec.shortname)
 
   def state(self, update_cache):
     """Poll current state of the job. Prints messages at completion."""
@@ -108,12 +138,13 @@
         self._tempfile.seek(0)
         stdout = self._tempfile.read()
         message('FAILED', '%s [ret=%d]' % (
-            ' '.join(self._cmdline), self._process.returncode), stdout)
+            self._spec.shortname, self._process.returncode), stdout)
       else:
         self._state = _SUCCESS
-        message('PASSED', '%s' % ' '.join(self._cmdline),
+        message('PASSED', self._spec.shortname,
                 do_newline=self._newline_on_success)
-        update_cache.finished(self._cmdline, self._bin_hash)
+        if self._bin_hash:
+          update_cache.finished(self._spec.identity(), self._bin_hash)
     return self._state
 
   def kill(self):
@@ -135,16 +166,26 @@
     self._newline_on_success = newline_on_success
     self._cache = cache
 
-  def start(self, cmdline):
+  def start(self, spec):
     """Start a job. Return True on success, False on failure."""
     while len(self._running) >= self._maxjobs:
       if self.cancelled(): return False
       self.reap()
     if self.cancelled(): return False
-    with open(which(cmdline[0])) as f:
-      bin_hash = hashlib.sha1(f.read()).hexdigest()
-    if self._cache.should_run(cmdline, bin_hash):
-      self._running.add(Job(cmdline, bin_hash, self._newline_on_success))
+    if spec.hash_targets:
+      bin_hash = hashlib.sha1()
+      for fn in spec.hash_targets:
+        with open(which(fn)) as f:
+          bin_hash.update(f.read())
+      bin_hash = bin_hash.hexdigest()
+      should_run = self._cache.should_run(spec.identity(), bin_hash)
+    else:
+      bin_hash = None
+      should_run = True
+    if should_run:
+      self._running.add(Job(spec,
+                            bin_hash,
+                            self._newline_on_success))
     return True
 
   def reap(self):
diff --git a/tools/run_tests/run_python.sh b/tools/run_tests/run_python.sh
index ef40602..6e9405a 100755
--- a/tools/run_tests/run_python.sh
+++ b/tools/run_tests/run_python.sh
@@ -6,6 +6,19 @@
 cd $(dirname $0)/../..
 
 root=`pwd`
-PYTHONPATH=third_party/protobuf/python python2.7_virtual_environment/bin/python2.7 -B -m unittest discover -s src/python -p '*.py'
-# TODO(nathaniel): Get this working again (requires 3.X-friendly protobuf)
+# TODO(issue 215): Properly itemize these in run_tests.py so that they can be parallelized.
+python2.7_virtual_environment/bin/python2.7 -B -m _adapter._blocking_invocation_inline_service_test
+python2.7_virtual_environment/bin/python2.7 -B -m _adapter._c_test
+python2.7_virtual_environment/bin/python2.7 -B -m _adapter._event_invocation_synchronous_event_service_test
+python2.7_virtual_environment/bin/python2.7 -B -m _adapter._future_invocation_asynchronous_event_service_test
+python2.7_virtual_environment/bin/python2.7 -B -m _adapter._links_test
+python2.7_virtual_environment/bin/python2.7 -B -m _adapter._lonely_rear_link_test
+python2.7_virtual_environment/bin/python2.7 -B -m _adapter._low_test
+python2.7_virtual_environment/bin/python2.7 -B -m _framework.base.packets.implementations_test
+python2.7_virtual_environment/bin/python2.7 -B -m _framework.face.blocking_invocation_inline_service_test
+python2.7_virtual_environment/bin/python2.7 -B -m _framework.face.event_invocation_synchronous_event_service_test
+python2.7_virtual_environment/bin/python2.7 -B -m _framework.face.future_invocation_asynchronous_event_service_test
+python2.7_virtual_environment/bin/python2.7 -B -m _framework.foundation._later_test
+python2.7_virtual_environment/bin/python2.7 -B -m _framework.foundation._logging_pool_test
+# TODO(nathaniel): Get tests working under 3.4 (requires 3.X-friendly protobuf)
 # python3.4 -B -m unittest discover -s src/python -p '*.py'
diff --git a/tools/run_tests/run_tests.py b/tools/run_tests/run_tests.py
index a699399..8cc029e 100755
--- a/tools/run_tests/run_tests.py
+++ b/tools/run_tests/run_tests.py
@@ -17,13 +17,17 @@
 # SimpleConfig: just compile with CONFIG=config, and run the binary to test
 class SimpleConfig(object):
 
-  def __init__(self, config):
+  def __init__(self, config, environ={}):
     self.build_config = config
     self.maxjobs = 2 * multiprocessing.cpu_count()
     self.allow_hashing = (config != 'gcov')
+    self.environ = environ
 
-  def run_command(self, binary):
-    return [binary]
+  def job_spec(self, binary, hash_targets):
+    return jobset.JobSpec(cmdline=[binary],
+                          environ=self.environ,
+                          hash_targets=hash_targets
+                              if self.allow_hashing else None)
 
 
 # ValgrindConfig: compile with some CONFIG=config, but use valgrind to run
@@ -35,14 +39,14 @@
     self.maxjobs = 2 * multiprocessing.cpu_count()
     self.allow_hashing = False
 
-  def run_command(self, binary):
-    return ['valgrind', binary, '--tool=%s' % self.tool]
+  def job_spec(self, binary, hash_targets):
+    return JobSpec(cmdline=['valgrind', '--tool=%s' % self.tool, binary],
+                   hash_targets=None)
 
 
 class CLanguage(object):
 
   def __init__(self, make_target, test_lang):
-    self.allow_hashing = True
     self.make_target = make_target
     with open('tools/run_tests/tests.json') as f:
       js = json.load(f)
@@ -50,8 +54,12 @@
                        for tgt in js
                        if tgt['language'] == test_lang]
 
-  def test_binaries(self, config):
-    return ['bins/%s/%s' % (config, binary) for binary in self.binaries]
+  def test_specs(self, config):
+    out = []
+    for name in self.binaries:
+      binary = 'bins/%s/%s' % (config.build_config, name)
+      out.append(config.job_spec(binary, [binary]))
+    return out
 
   def make_targets(self):
     return ['buildtests_%s' % self.make_target]
@@ -59,13 +67,11 @@
   def build_steps(self):
     return []
 
+
 class NodeLanguage(object):
 
-  def __init__(self):
-    self.allow_hashing = False
-
-  def test_binaries(self, config):
-    return ['tools/run_tests/run_node.sh']
+  def test_specs(self, config):
+    return [config.job_spec('tools/run_tests/run_node.sh', None)]
 
   def make_targets(self):
     return ['static_c']
@@ -73,13 +79,11 @@
   def build_steps(self):
     return [['tools/run_tests/build_node.sh']]
 
+
 class PhpLanguage(object):
 
-  def __init__(self):
-    self.allow_hashing = False
-
-  def test_binaries(self, config):
-    return ['src/php/bin/run_tests.sh']
+  def test_specs(self, config):
+    return [config.job_spec('src/php/bin/run_tests.sh', None)]
 
   def make_targets(self):
     return ['static_c']
@@ -90,11 +94,8 @@
 
 class PythonLanguage(object):
 
-  def __init__(self):
-    self.allow_hashing = False
-
-  def test_binaries(self, config):
-    return ['tools/run_tests/run_python.sh']
+  def test_specs(self, config):
+    return [config.job_spec('tools/run_tests/run_python.sh', None)]
 
   def make_targets(self):
     return[]
@@ -109,7 +110,8 @@
     'opt': SimpleConfig('opt'),
     'tsan': SimpleConfig('tsan'),
     'msan': SimpleConfig('msan'),
-    'asan': SimpleConfig('asan'),
+    'asan': SimpleConfig('asan', environ={
+        'ASAN_OPTIONS': 'detect_leaks=1:color=always'}),
     'gcov': SimpleConfig('gcov'),
     'memcheck': ValgrindConfig('valgrind', 'memcheck'),
     'helgrind': ValgrindConfig('dbg', 'helgrind')
@@ -123,7 +125,7 @@
     'node': NodeLanguage(),
     'php': PhpLanguage(),
     'python': PythonLanguage(),
-}
+    }
 
 # parse command line
 argp = argparse.ArgumentParser(description='Run grpc tests.')
@@ -155,14 +157,20 @@
 
 make_targets = []
 languages = set(_LANGUAGES[l] for l in args.language)
-build_steps = [['make',
-                '-j', '%d' % (multiprocessing.cpu_count() + 1),
-                'CONFIG=%s' % cfg] + list(set(
-                    itertools.chain.from_iterable(l.make_targets()
-                                                  for l in languages)))
-               for cfg in build_configs] + list(
-                   itertools.chain.from_iterable(l.build_steps()
-                                                 for l in languages))
+build_steps = [jobset.JobSpec(['make',
+                               '-j', '%d' % (multiprocessing.cpu_count() + 1),
+                               'CONFIG=%s' % cfg] + list(set(
+                                   itertools.chain.from_iterable(
+                                       l.make_targets() for l in languages))))
+               for cfg in build_configs] + list(set(
+                   jobset.JobSpec(cmdline)
+                   for l in languages
+                   for cmdline in l.build_steps()))
+one_run = set(
+    spec
+    for config in run_configs
+    for language in args.language
+    for spec in _LANGUAGES[language].test_specs(config))
 
 runs_per_test = args.runs_per_test
 forever = args.forever
@@ -175,7 +183,6 @@
     self._last_successful_run = {}
 
   def should_run(self, cmdline, bin_hash):
-    cmdline = ' '.join(cmdline)
     if cmdline not in self._last_successful_run:
       return True
     if self._last_successful_run[cmdline] != bin_hash:
@@ -183,7 +190,7 @@
     return False
 
   def finished(self, cmdline, bin_hash):
-    self._last_successful_run[' '.join(cmdline)] = bin_hash
+    self._last_successful_run[cmdline] = bin_hash
 
   def dump(self):
     return [{'cmdline': k, 'hash': v}
@@ -209,12 +216,6 @@
     return 1
 
   # run all the tests
-  one_run = dict(
-      (' '.join(config.run_command(x)), config.run_command(x))
-      for config in run_configs
-      for language in args.language
-      for x in _LANGUAGES[language].test_binaries(config.build_config)
-      ).values()
   all_runs = itertools.chain.from_iterable(
       itertools.repeat(one_run, runs_per_test))
   if not jobset.run(all_runs, check_cancelled,
@@ -226,12 +227,8 @@
   return 0
 
 
-test_cache = (None
-              if not all(x.allow_hashing
-                         for x in itertools.chain(languages, run_configs))
-              else TestCache())
-if test_cache:
-  test_cache.maybe_load()
+test_cache = TestCache()
+test_cache.maybe_load()
 
 if forever:
   success = True
@@ -248,7 +245,7 @@
                      'All tests are now passing properly',
                      do_newline=True)
     jobset.message('IDLE', 'No change detected')
-    if test_cache: test_cache.save()
+    test_cache.save()
     while not have_files_changed():
       time.sleep(1)
 else:
@@ -259,5 +256,5 @@
     jobset.message('SUCCESS', 'All tests passed', do_newline=True)
   else:
     jobset.message('FAILED', 'Some tests failed', do_newline=True)
-  if test_cache: test_cache.save()
+  test_cache.save()
   sys.exit(result)
diff --git a/tools/run_tests/tests.json b/tools/run_tests/tests.json
index a610e92..fd15182 100644
--- a/tools/run_tests/tests.json
+++ b/tools/run_tests/tests.json
@@ -187,6 +187,14 @@
   }, 
   {
     "language": "c", 
+    "name": "json_rewrite_test"
+  }, 
+  {
+    "language": "c", 
+    "name": "json_test"
+  }, 
+  {
+    "language": "c", 
     "name": "lame_client_test"
   }, 
   {
@@ -263,10 +271,6 @@
   }, 
   {
     "language": "c++", 
-    "name": "tips_client_test"
-  }, 
-  {
-    "language": "c++", 
     "name": "status_test"
   }, 
   {
@@ -278,6 +282,10 @@
     "name": "thread_pool_test"
   }, 
   {
+    "language": "c++", 
+    "name": "tips_client_test"
+  }, 
+  {
     "language": "c", 
     "name": "chttp2_fake_security_cancel_after_accept_test"
   }, 
diff --git a/vsprojects/vs2013/grpc.vcxproj b/vsprojects/vs2013/grpc.vcxproj
index 9808a45..21a1f06 100644
--- a/vsprojects/vs2013/grpc.vcxproj
+++ b/vsprojects/vs2013/grpc.vcxproj
@@ -134,8 +134,12 @@
     <ClInclude Include="..\..\src\core\iomgr\tcp_posix.h" />
     <ClInclude Include="..\..\src\core\iomgr\tcp_server.h" />
     <ClInclude Include="..\..\src\core\iomgr\time_averaged_stats.h" />
-    <ClInclude Include="..\..\src\core\iomgr\wakeup_fd_posix.h" />
     <ClInclude Include="..\..\src\core\iomgr\wakeup_fd_pipe.h" />
+    <ClInclude Include="..\..\src\core\iomgr\wakeup_fd_posix.h" />
+    <ClInclude Include="..\..\src\core\json\json.h" />
+    <ClInclude Include="..\..\src\core\json\json_common.h" />
+    <ClInclude Include="..\..\src\core\json\json_reader.h" />
+    <ClInclude Include="..\..\src\core\json\json_writer.h" />
     <ClInclude Include="..\..\src\core\statistics\census_interface.h" />
     <ClInclude Include="..\..\src\core\statistics\census_log.h" />
     <ClInclude Include="..\..\src\core\statistics\census_rpc_stats.h" />
@@ -286,6 +290,14 @@
     </ClCompile>
     <ClCompile Include="..\..\src\core\iomgr\wakeup_fd_posix.c">
     </ClCompile>
+    <ClCompile Include="..\..\src\core\json\json.c">
+    </ClCompile>
+    <ClCompile Include="..\..\src\core\json\json_reader.c">
+    </ClCompile>
+    <ClCompile Include="..\..\src\core\json\json_string.c">
+    </ClCompile>
+    <ClCompile Include="..\..\src\core\json\json_writer.c">
+    </ClCompile>
     <ClCompile Include="..\..\src\core\statistics\census_init.c">
     </ClCompile>
     <ClCompile Include="..\..\src\core\statistics\census_log.c">
@@ -368,8 +380,6 @@
     </ClCompile>
     <ClCompile Include="..\..\src\core\transport\transport.c">
     </ClCompile>
-    <ClCompile Include="..\..\third_party\cJSON\cJSON.c">
-    </ClCompile>
   </ItemGroup>
   <ItemGroup>
     <ProjectReference Include="gpr.vcxproj">
diff --git a/vsprojects/vs2013/grpc.vcxproj.filters b/vsprojects/vs2013/grpc.vcxproj.filters
index d080ce9..3af681a 100644
--- a/vsprojects/vs2013/grpc.vcxproj.filters
+++ b/vsprojects/vs2013/grpc.vcxproj.filters
@@ -169,6 +169,18 @@
     <ClCompile Include="..\..\src\core\iomgr\wakeup_fd_posix.c">
       <Filter>src\core\iomgr</Filter>
     </ClCompile>
+    <ClCompile Include="..\..\src\core\json\json.c">
+      <Filter>src\core\json</Filter>
+    </ClCompile>
+    <ClCompile Include="..\..\src\core\json\json_reader.c">
+      <Filter>src\core\json</Filter>
+    </ClCompile>
+    <ClCompile Include="..\..\src\core\json\json_string.c">
+      <Filter>src\core\json</Filter>
+    </ClCompile>
+    <ClCompile Include="..\..\src\core\json\json_writer.c">
+      <Filter>src\core\json</Filter>
+    </ClCompile>
     <ClCompile Include="..\..\src\core\statistics\census_init.c">
       <Filter>src\core\statistics</Filter>
     </ClCompile>
@@ -292,9 +304,6 @@
     <ClCompile Include="..\..\src\core\transport\transport.c">
       <Filter>src\core\transport</Filter>
     </ClCompile>
-    <ClCompile Include="..\..\third_party\cJSON\cJSON.c">
-      <Filter>third_party\cJSON</Filter>
-    </ClCompile>
   </ItemGroup>
   <ItemGroup>
     <ClInclude Include="..\..\include\grpc\grpc_security.h">
@@ -476,11 +485,23 @@
     <ClInclude Include="..\..\src\core\iomgr\time_averaged_stats.h">
       <Filter>src\core\iomgr</Filter>
     </ClInclude>
+    <ClInclude Include="..\..\src\core\iomgr\wakeup_fd_pipe.h">
+      <Filter>src\core\iomgr</Filter>
+    </ClInclude>
     <ClInclude Include="..\..\src\core\iomgr\wakeup_fd_posix.h">
       <Filter>src\core\iomgr</Filter>
     </ClInclude>
-    <ClInclude Include="..\..\src\core\iomgr\wakeup_fd_pipe.h">
-      <Filter>src\core\iomgr</Filter>
+    <ClInclude Include="..\..\src\core\json\json.h">
+      <Filter>src\core\json</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\src\core\json\json_common.h">
+      <Filter>src\core\json</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\src\core\json\json_reader.h">
+      <Filter>src\core\json</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\src\core\json\json_writer.h">
+      <Filter>src\core\json</Filter>
     </ClInclude>
     <ClInclude Include="..\..\src\core\statistics\census_interface.h">
       <Filter>src\core\statistics</Filter>
@@ -617,6 +638,9 @@
     <Filter Include="src\core\iomgr">
       <UniqueIdentifier>{1baf3894-af37-e647-bdbc-95dc17ed0073}</UniqueIdentifier>
     </Filter>
+    <Filter Include="src\core\json">
+      <UniqueIdentifier>{e665cc0e-b994-d7c5-cc18-2007392019f0}</UniqueIdentifier>
+    </Filter>
     <Filter Include="src\core\security">
       <UniqueIdentifier>{1d850ac6-e639-4eab-5338-4ba40272fcc9}</UniqueIdentifier>
     </Filter>
@@ -635,12 +659,6 @@
     <Filter Include="src\core\tsi">
       <UniqueIdentifier>{0b0f9ab1-efa4-7f03-e446-6fb9b5227e84}</UniqueIdentifier>
     </Filter>
-    <Filter Include="third_party">
-      <UniqueIdentifier>{aaab30a4-2a15-732e-c141-3fbc0f0f5a7a}</UniqueIdentifier>
-    </Filter>
-    <Filter Include="third_party\cJSON">
-      <UniqueIdentifier>{332d0840-2c7a-bb09-8e58-585a6fb3959f}</UniqueIdentifier>
-    </Filter>
   </ItemGroup>
 </Project>
 
diff --git a/vsprojects/vs2013/grpc_unsecure.vcxproj b/vsprojects/vs2013/grpc_unsecure.vcxproj
index 9808a45..21a1f06 100644
--- a/vsprojects/vs2013/grpc_unsecure.vcxproj
+++ b/vsprojects/vs2013/grpc_unsecure.vcxproj
@@ -134,8 +134,12 @@
     <ClInclude Include="..\..\src\core\iomgr\tcp_posix.h" />
     <ClInclude Include="..\..\src\core\iomgr\tcp_server.h" />
     <ClInclude Include="..\..\src\core\iomgr\time_averaged_stats.h" />
-    <ClInclude Include="..\..\src\core\iomgr\wakeup_fd_posix.h" />
     <ClInclude Include="..\..\src\core\iomgr\wakeup_fd_pipe.h" />
+    <ClInclude Include="..\..\src\core\iomgr\wakeup_fd_posix.h" />
+    <ClInclude Include="..\..\src\core\json\json.h" />
+    <ClInclude Include="..\..\src\core\json\json_common.h" />
+    <ClInclude Include="..\..\src\core\json\json_reader.h" />
+    <ClInclude Include="..\..\src\core\json\json_writer.h" />
     <ClInclude Include="..\..\src\core\statistics\census_interface.h" />
     <ClInclude Include="..\..\src\core\statistics\census_log.h" />
     <ClInclude Include="..\..\src\core\statistics\census_rpc_stats.h" />
@@ -286,6 +290,14 @@
     </ClCompile>
     <ClCompile Include="..\..\src\core\iomgr\wakeup_fd_posix.c">
     </ClCompile>
+    <ClCompile Include="..\..\src\core\json\json.c">
+    </ClCompile>
+    <ClCompile Include="..\..\src\core\json\json_reader.c">
+    </ClCompile>
+    <ClCompile Include="..\..\src\core\json\json_string.c">
+    </ClCompile>
+    <ClCompile Include="..\..\src\core\json\json_writer.c">
+    </ClCompile>
     <ClCompile Include="..\..\src\core\statistics\census_init.c">
     </ClCompile>
     <ClCompile Include="..\..\src\core\statistics\census_log.c">
@@ -368,8 +380,6 @@
     </ClCompile>
     <ClCompile Include="..\..\src\core\transport\transport.c">
     </ClCompile>
-    <ClCompile Include="..\..\third_party\cJSON\cJSON.c">
-    </ClCompile>
   </ItemGroup>
   <ItemGroup>
     <ProjectReference Include="gpr.vcxproj">
diff --git a/vsprojects/vs2013/grpc_unsecure.vcxproj.filters b/vsprojects/vs2013/grpc_unsecure.vcxproj.filters
index 5b12fab..4dadb61 100644
--- a/vsprojects/vs2013/grpc_unsecure.vcxproj.filters
+++ b/vsprojects/vs2013/grpc_unsecure.vcxproj.filters
@@ -130,6 +130,18 @@
     <ClCompile Include="..\..\src\core\iomgr\wakeup_fd_posix.c">
       <Filter>src\core\iomgr</Filter>
     </ClCompile>
+    <ClCompile Include="..\..\src\core\json\json.c">
+      <Filter>src\core\json</Filter>
+    </ClCompile>
+    <ClCompile Include="..\..\src\core\json\json_reader.c">
+      <Filter>src\core\json</Filter>
+    </ClCompile>
+    <ClCompile Include="..\..\src\core\json\json_string.c">
+      <Filter>src\core\json</Filter>
+    </ClCompile>
+    <ClCompile Include="..\..\src\core\json\json_writer.c">
+      <Filter>src\core\json</Filter>
+    </ClCompile>
     <ClCompile Include="..\..\src\core\statistics\census_init.c">
       <Filter>src\core\statistics</Filter>
     </ClCompile>
@@ -253,9 +265,6 @@
     <ClCompile Include="..\..\src\core\transport\transport.c">
       <Filter>src\core\transport</Filter>
     </ClCompile>
-    <ClCompile Include="..\..\third_party\cJSON\cJSON.c">
-      <Filter>third_party\cJSON</Filter>
-    </ClCompile>
   </ItemGroup>
   <ItemGroup>
     <ClInclude Include="..\..\include\grpc\byte_buffer.h">
@@ -401,11 +410,23 @@
     <ClInclude Include="..\..\src\core\iomgr\time_averaged_stats.h">
       <Filter>src\core\iomgr</Filter>
     </ClInclude>
+    <ClInclude Include="..\..\src\core\iomgr\wakeup_fd_pipe.h">
+      <Filter>src\core\iomgr</Filter>
+    </ClInclude>
     <ClInclude Include="..\..\src\core\iomgr\wakeup_fd_posix.h">
       <Filter>src\core\iomgr</Filter>
     </ClInclude>
-    <ClInclude Include="..\..\src\core\iomgr\wakeup_fd_pipe.h">
-      <Filter>src\core\iomgr</Filter>
+    <ClInclude Include="..\..\src\core\json\json.h">
+      <Filter>src\core\json</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\src\core\json\json_common.h">
+      <Filter>src\core\json</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\src\core\json\json_reader.h">
+      <Filter>src\core\json</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\src\core\json\json_writer.h">
+      <Filter>src\core\json</Filter>
     </ClInclude>
     <ClInclude Include="..\..\src\core\statistics\census_interface.h">
       <Filter>src\core\statistics</Filter>
@@ -542,6 +563,9 @@
     <Filter Include="src\core\iomgr">
       <UniqueIdentifier>{a9df8b24-ecea-ff6d-8999-d8fa54cd70bf}</UniqueIdentifier>
     </Filter>
+    <Filter Include="src\core\json">
+      <UniqueIdentifier>{443ffc61-1bea-2477-6e54-1ddf8c139264}</UniqueIdentifier>
+    </Filter>
     <Filter Include="src\core\statistics">
       <UniqueIdentifier>{e084164c-a069-00e3-db35-4e0b1cd6f0b7}</UniqueIdentifier>
     </Filter>
@@ -554,12 +578,6 @@
     <Filter Include="src\core\transport\chttp2">
       <UniqueIdentifier>{5fcd6206-f774-9ae6-4b85-305d6a723843}</UniqueIdentifier>
     </Filter>
-    <Filter Include="third_party">
-      <UniqueIdentifier>{025c051e-8eba-125b-67f9-173f95176eb2}</UniqueIdentifier>
-    </Filter>
-    <Filter Include="third_party\cJSON">
-      <UniqueIdentifier>{7d75397e-988a-baac-897e-2ea7b43d5dd9}</UniqueIdentifier>
-    </Filter>
   </ItemGroup>
 </Project>
 
diff --git a/vsprojects/vs2013/ssl.props b/vsprojects/vs2013/ssl.props
new file mode 100644
index 0000000..283bd17
--- /dev/null
+++ b/vsprojects/vs2013/ssl.props
@@ -0,0 +1,13 @@
+<?xml version="1.0" encoding="utf-8"?>

+<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">

+  <ImportGroup Label="PropertySheets" />

+  <PropertyGroup Label="UserMacros" />

+  <PropertyGroup />

+  <ItemDefinitionGroup>

+    <Link>

+      <AdditionalLibraryDirectories>..\..\third_party\openssl\out32;%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>

+      <AdditionalDependencies>ssleay32.lib;libeay32.lib;%(AdditionalDependencies)</AdditionalDependencies>

+    </Link>

+  </ItemDefinitionGroup>

+  <ItemGroup />

+</Project>
\ No newline at end of file
diff --git a/vsprojects/vs2013/winsock.props b/vsprojects/vs2013/winsock.props
new file mode 100644
index 0000000..1e84104
--- /dev/null
+++ b/vsprojects/vs2013/winsock.props
@@ -0,0 +1,12 @@
+<?xml version="1.0" encoding="utf-8"?>

+<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">

+  <ImportGroup Label="PropertySheets" />

+  <PropertyGroup Label="UserMacros" />

+  <PropertyGroup />

+  <ItemDefinitionGroup>

+    <Link>

+      <AdditionalDependencies>ws2_32.lib;%(AdditionalDependencies)</AdditionalDependencies>

+    </Link>

+  </ItemDefinitionGroup>

+  <ItemGroup />

+</Project>
\ No newline at end of file