Introducing iomgr.

Move eventmanager and platform dependent endpoint functionality into a single
library called 'iomgr'.

This is primarily to prepare for a Windows port - where posix socket semantics
lead to poor quality code.

Mostly this is a code movement CL, with some small changes to help prepare the
way for porting:

- em style fd objects can only be held internally in iomgr, and own their memory
- added grpc_iomgr_create_endpoint_pair() to accomodate the common pattern of
creating a tcp endpoint from the output of socketpair - this will help keep
our tests portable
- separated em alarm interface into a separate file, as this part of event
manager is needed higher up the stack
- made the eventmanager bits a true singleton, simplifying API's across the
stack as there's no longer a reason to carry a pointer there.

Initial design document is here:
https://docs.google.com/document/d/1VmafcHvvrP5kwtQkz84R5yXF7u7fW-9Pn0bkSUQHDt8/edit?disco=AAAAARNByxg
	Change on 2014/12/09 by ctiller <ctiller@google.com>
-------------
Created by MOE: http://code.google.com/p/moe-java
MOE_MIGRATED_REVID=81716456
diff --git a/Makefile b/Makefile
index 69e3c67..a01d6f9 100644
--- a/Makefile
+++ b/Makefile
@@ -84,7 +84,7 @@
 
 buildtests: buildtests_c buildtests_cxx
 
-buildtests_c: privatelibs_c bins/grpc_byte_buffer_reader_test bins/gpr_cancellable_test bins/gpr_log_test bins/gpr_cmdline_test bins/gpr_histogram_test bins/gpr_host_port_test bins/gpr_slice_buffer_test bins/gpr_slice_test bins/gpr_string_test bins/gpr_sync_test bins/gpr_thd_test bins/gpr_time_test bins/murmur_hash_test bins/grpc_em_test bins/grpc_em_pipe_test bins/grpc_stream_op_test bins/alpn_test bins/chttp2_stream_encoder_test bins/hpack_table_test bins/chttp2_stream_map_test bins/hpack_parser_test bins/transport_metadata_test bins/chttp2_status_conversion_test bins/chttp2_transport_end2end_test bins/grpc_tcp_test bins/dualstack_socket_test bins/no_server_test bins/resolve_address_test bins/socket_utils_test bins/tcp_server_test bins/tcp_client_test bins/grpc_channel_stack_test bins/metadata_buffer_test bins/grpc_completion_queue_test bins/census_window_stats_test bins/census_statistics_quick_test bins/census_statistics_performance_test bins/census_statistics_multiple_writers_test bins/census_statistics_multiple_writers_circular_buffer_test bins/census_stub_test bins/census_hash_table_test bins/fling_server bins/fling_client bins/fling_test bins/echo_server bins/echo_client bins/echo_test bins/message_compress_test bins/bin_encoder_test bins/secure_endpoint_test bins/httpcli_format_request_test bins/httpcli_parser_test bins/httpcli_test bins/grpc_credentials_test bins/fling_stream_test bins/lame_client_test bins/chttp2_fake_security_cancel_after_accept_test bins/chttp2_fake_security_cancel_after_accept_and_writes_closed_test bins/chttp2_fake_security_cancel_after_invoke_test bins/chttp2_fake_security_cancel_before_invoke_test bins/chttp2_fake_security_cancel_in_a_vacuum_test bins/chttp2_fake_security_early_server_shutdown_finishes_inflight_calls_test bins/chttp2_fake_security_early_server_shutdown_finishes_tags_test bins/chttp2_fake_security_invoke_large_request_test bins/chttp2_fake_security_max_concurrent_streams_test bins/chttp2_fake_security_no_op_test bins/chttp2_fake_security_ping_pong_streaming_test bins/chttp2_fake_security_request_response_with_metadata_and_payload_test bins/chttp2_fake_security_request_response_with_payload_test bins/chttp2_fake_security_simple_delayed_request_test bins/chttp2_fake_security_simple_request_test bins/chttp2_fake_security_thread_stress_test bins/chttp2_fake_security_writes_done_hangs_with_pending_read_test bins/chttp2_fullstack_cancel_after_accept_test bins/chttp2_fullstack_cancel_after_accept_and_writes_closed_test bins/chttp2_fullstack_cancel_after_invoke_test bins/chttp2_fullstack_cancel_before_invoke_test bins/chttp2_fullstack_cancel_in_a_vacuum_test bins/chttp2_fullstack_early_server_shutdown_finishes_inflight_calls_test bins/chttp2_fullstack_early_server_shutdown_finishes_tags_test bins/chttp2_fullstack_invoke_large_request_test bins/chttp2_fullstack_max_concurrent_streams_test bins/chttp2_fullstack_no_op_test bins/chttp2_fullstack_ping_pong_streaming_test bins/chttp2_fullstack_request_response_with_metadata_and_payload_test bins/chttp2_fullstack_request_response_with_payload_test bins/chttp2_fullstack_simple_delayed_request_test bins/chttp2_fullstack_simple_request_test bins/chttp2_fullstack_thread_stress_test bins/chttp2_fullstack_writes_done_hangs_with_pending_read_test bins/chttp2_simple_ssl_fullstack_cancel_after_accept_test bins/chttp2_simple_ssl_fullstack_cancel_after_accept_and_writes_closed_test bins/chttp2_simple_ssl_fullstack_cancel_after_invoke_test bins/chttp2_simple_ssl_fullstack_cancel_before_invoke_test bins/chttp2_simple_ssl_fullstack_cancel_in_a_vacuum_test bins/chttp2_simple_ssl_fullstack_early_server_shutdown_finishes_inflight_calls_test bins/chttp2_simple_ssl_fullstack_early_server_shutdown_finishes_tags_test bins/chttp2_simple_ssl_fullstack_invoke_large_request_test bins/chttp2_simple_ssl_fullstack_max_concurrent_streams_test bins/chttp2_simple_ssl_fullstack_no_op_test bins/chttp2_simple_ssl_fullstack_ping_pong_streaming_test bins/chttp2_simple_ssl_fullstack_request_response_with_metadata_and_payload_test bins/chttp2_simple_ssl_fullstack_request_response_with_payload_test bins/chttp2_simple_ssl_fullstack_simple_delayed_request_test bins/chttp2_simple_ssl_fullstack_simple_request_test bins/chttp2_simple_ssl_fullstack_thread_stress_test bins/chttp2_simple_ssl_fullstack_writes_done_hangs_with_pending_read_test bins/chttp2_simple_ssl_with_oauth2_fullstack_cancel_after_accept_test bins/chttp2_simple_ssl_with_oauth2_fullstack_cancel_after_accept_and_writes_closed_test bins/chttp2_simple_ssl_with_oauth2_fullstack_cancel_after_invoke_test bins/chttp2_simple_ssl_with_oauth2_fullstack_cancel_before_invoke_test bins/chttp2_simple_ssl_with_oauth2_fullstack_cancel_in_a_vacuum_test bins/chttp2_simple_ssl_with_oauth2_fullstack_early_server_shutdown_finishes_inflight_calls_test bins/chttp2_simple_ssl_with_oauth2_fullstack_early_server_shutdown_finishes_tags_test bins/chttp2_simple_ssl_with_oauth2_fullstack_invoke_large_request_test bins/chttp2_simple_ssl_with_oauth2_fullstack_max_concurrent_streams_test bins/chttp2_simple_ssl_with_oauth2_fullstack_no_op_test bins/chttp2_simple_ssl_with_oauth2_fullstack_ping_pong_streaming_test bins/chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_metadata_and_payload_test bins/chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_payload_test bins/chttp2_simple_ssl_with_oauth2_fullstack_simple_delayed_request_test bins/chttp2_simple_ssl_with_oauth2_fullstack_simple_request_test bins/chttp2_simple_ssl_with_oauth2_fullstack_thread_stress_test bins/chttp2_simple_ssl_with_oauth2_fullstack_writes_done_hangs_with_pending_read_test bins/chttp2_socket_pair_cancel_after_accept_test bins/chttp2_socket_pair_cancel_after_accept_and_writes_closed_test bins/chttp2_socket_pair_cancel_after_invoke_test bins/chttp2_socket_pair_cancel_before_invoke_test bins/chttp2_socket_pair_cancel_in_a_vacuum_test bins/chttp2_socket_pair_early_server_shutdown_finishes_inflight_calls_test bins/chttp2_socket_pair_early_server_shutdown_finishes_tags_test bins/chttp2_socket_pair_invoke_large_request_test bins/chttp2_socket_pair_max_concurrent_streams_test bins/chttp2_socket_pair_no_op_test bins/chttp2_socket_pair_ping_pong_streaming_test bins/chttp2_socket_pair_request_response_with_metadata_and_payload_test bins/chttp2_socket_pair_request_response_with_payload_test bins/chttp2_socket_pair_simple_delayed_request_test bins/chttp2_socket_pair_simple_request_test bins/chttp2_socket_pair_thread_stress_test bins/chttp2_socket_pair_writes_done_hangs_with_pending_read_test bins/chttp2_socket_pair_one_byte_at_a_time_cancel_after_accept_test bins/chttp2_socket_pair_one_byte_at_a_time_cancel_after_accept_and_writes_closed_test bins/chttp2_socket_pair_one_byte_at_a_time_cancel_after_invoke_test bins/chttp2_socket_pair_one_byte_at_a_time_cancel_before_invoke_test bins/chttp2_socket_pair_one_byte_at_a_time_cancel_in_a_vacuum_test bins/chttp2_socket_pair_one_byte_at_a_time_early_server_shutdown_finishes_inflight_calls_test bins/chttp2_socket_pair_one_byte_at_a_time_early_server_shutdown_finishes_tags_test bins/chttp2_socket_pair_one_byte_at_a_time_invoke_large_request_test bins/chttp2_socket_pair_one_byte_at_a_time_max_concurrent_streams_test bins/chttp2_socket_pair_one_byte_at_a_time_no_op_test bins/chttp2_socket_pair_one_byte_at_a_time_ping_pong_streaming_test bins/chttp2_socket_pair_one_byte_at_a_time_request_response_with_metadata_and_payload_test bins/chttp2_socket_pair_one_byte_at_a_time_request_response_with_payload_test bins/chttp2_socket_pair_one_byte_at_a_time_simple_delayed_request_test bins/chttp2_socket_pair_one_byte_at_a_time_simple_request_test bins/chttp2_socket_pair_one_byte_at_a_time_thread_stress_test bins/chttp2_socket_pair_one_byte_at_a_time_writes_done_hangs_with_pending_read_test
+buildtests_c: privatelibs_c bins/grpc_byte_buffer_reader_test bins/gpr_cancellable_test bins/gpr_log_test bins/gpr_cmdline_test bins/gpr_histogram_test bins/gpr_host_port_test bins/gpr_slice_buffer_test bins/gpr_slice_test bins/gpr_string_test bins/gpr_sync_test bins/gpr_thd_test bins/gpr_time_test bins/murmur_hash_test bins/grpc_stream_op_test bins/alpn_test bins/chttp2_stream_encoder_test bins/hpack_table_test bins/chttp2_stream_map_test bins/hpack_parser_test bins/transport_metadata_test bins/chttp2_status_conversion_test bins/chttp2_transport_end2end_test bins/tcp_posix_test bins/dualstack_socket_test bins/no_server_test bins/resolve_address_test bins/sockaddr_utils_test bins/tcp_server_posix_test bins/tcp_client_posix_test bins/grpc_channel_stack_test bins/metadata_buffer_test bins/grpc_completion_queue_test bins/census_window_stats_test bins/census_statistics_quick_test bins/census_statistics_performance_test bins/census_statistics_multiple_writers_test bins/census_statistics_multiple_writers_circular_buffer_test bins/census_stub_test bins/census_hash_table_test bins/fling_server bins/fling_client bins/fling_test bins/echo_server bins/echo_client bins/echo_test bins/message_compress_test bins/bin_encoder_test bins/secure_endpoint_test bins/httpcli_format_request_test bins/httpcli_parser_test bins/httpcli_test bins/grpc_credentials_test bins/fling_stream_test bins/lame_client_test bins/chttp2_fake_security_cancel_after_accept_test bins/chttp2_fake_security_cancel_after_accept_and_writes_closed_test bins/chttp2_fake_security_cancel_after_invoke_test bins/chttp2_fake_security_cancel_before_invoke_test bins/chttp2_fake_security_cancel_in_a_vacuum_test bins/chttp2_fake_security_early_server_shutdown_finishes_inflight_calls_test bins/chttp2_fake_security_early_server_shutdown_finishes_tags_test bins/chttp2_fake_security_invoke_large_request_test bins/chttp2_fake_security_max_concurrent_streams_test bins/chttp2_fake_security_no_op_test bins/chttp2_fake_security_ping_pong_streaming_test bins/chttp2_fake_security_request_response_with_metadata_and_payload_test bins/chttp2_fake_security_request_response_with_payload_test bins/chttp2_fake_security_simple_delayed_request_test bins/chttp2_fake_security_simple_request_test bins/chttp2_fake_security_thread_stress_test bins/chttp2_fake_security_writes_done_hangs_with_pending_read_test bins/chttp2_fullstack_cancel_after_accept_test bins/chttp2_fullstack_cancel_after_accept_and_writes_closed_test bins/chttp2_fullstack_cancel_after_invoke_test bins/chttp2_fullstack_cancel_before_invoke_test bins/chttp2_fullstack_cancel_in_a_vacuum_test bins/chttp2_fullstack_early_server_shutdown_finishes_inflight_calls_test bins/chttp2_fullstack_early_server_shutdown_finishes_tags_test bins/chttp2_fullstack_invoke_large_request_test bins/chttp2_fullstack_max_concurrent_streams_test bins/chttp2_fullstack_no_op_test bins/chttp2_fullstack_ping_pong_streaming_test bins/chttp2_fullstack_request_response_with_metadata_and_payload_test bins/chttp2_fullstack_request_response_with_payload_test bins/chttp2_fullstack_simple_delayed_request_test bins/chttp2_fullstack_simple_request_test bins/chttp2_fullstack_thread_stress_test bins/chttp2_fullstack_writes_done_hangs_with_pending_read_test bins/chttp2_simple_ssl_fullstack_cancel_after_accept_test bins/chttp2_simple_ssl_fullstack_cancel_after_accept_and_writes_closed_test bins/chttp2_simple_ssl_fullstack_cancel_after_invoke_test bins/chttp2_simple_ssl_fullstack_cancel_before_invoke_test bins/chttp2_simple_ssl_fullstack_cancel_in_a_vacuum_test bins/chttp2_simple_ssl_fullstack_early_server_shutdown_finishes_inflight_calls_test bins/chttp2_simple_ssl_fullstack_early_server_shutdown_finishes_tags_test bins/chttp2_simple_ssl_fullstack_invoke_large_request_test bins/chttp2_simple_ssl_fullstack_max_concurrent_streams_test bins/chttp2_simple_ssl_fullstack_no_op_test bins/chttp2_simple_ssl_fullstack_ping_pong_streaming_test bins/chttp2_simple_ssl_fullstack_request_response_with_metadata_and_payload_test bins/chttp2_simple_ssl_fullstack_request_response_with_payload_test bins/chttp2_simple_ssl_fullstack_simple_delayed_request_test bins/chttp2_simple_ssl_fullstack_simple_request_test bins/chttp2_simple_ssl_fullstack_thread_stress_test bins/chttp2_simple_ssl_fullstack_writes_done_hangs_with_pending_read_test bins/chttp2_simple_ssl_with_oauth2_fullstack_cancel_after_accept_test bins/chttp2_simple_ssl_with_oauth2_fullstack_cancel_after_accept_and_writes_closed_test bins/chttp2_simple_ssl_with_oauth2_fullstack_cancel_after_invoke_test bins/chttp2_simple_ssl_with_oauth2_fullstack_cancel_before_invoke_test bins/chttp2_simple_ssl_with_oauth2_fullstack_cancel_in_a_vacuum_test bins/chttp2_simple_ssl_with_oauth2_fullstack_early_server_shutdown_finishes_inflight_calls_test bins/chttp2_simple_ssl_with_oauth2_fullstack_early_server_shutdown_finishes_tags_test bins/chttp2_simple_ssl_with_oauth2_fullstack_invoke_large_request_test bins/chttp2_simple_ssl_with_oauth2_fullstack_max_concurrent_streams_test bins/chttp2_simple_ssl_with_oauth2_fullstack_no_op_test bins/chttp2_simple_ssl_with_oauth2_fullstack_ping_pong_streaming_test bins/chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_metadata_and_payload_test bins/chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_payload_test bins/chttp2_simple_ssl_with_oauth2_fullstack_simple_delayed_request_test bins/chttp2_simple_ssl_with_oauth2_fullstack_simple_request_test bins/chttp2_simple_ssl_with_oauth2_fullstack_thread_stress_test bins/chttp2_simple_ssl_with_oauth2_fullstack_writes_done_hangs_with_pending_read_test bins/chttp2_socket_pair_cancel_after_accept_test bins/chttp2_socket_pair_cancel_after_accept_and_writes_closed_test bins/chttp2_socket_pair_cancel_after_invoke_test bins/chttp2_socket_pair_cancel_before_invoke_test bins/chttp2_socket_pair_cancel_in_a_vacuum_test bins/chttp2_socket_pair_early_server_shutdown_finishes_inflight_calls_test bins/chttp2_socket_pair_early_server_shutdown_finishes_tags_test bins/chttp2_socket_pair_invoke_large_request_test bins/chttp2_socket_pair_max_concurrent_streams_test bins/chttp2_socket_pair_no_op_test bins/chttp2_socket_pair_ping_pong_streaming_test bins/chttp2_socket_pair_request_response_with_metadata_and_payload_test bins/chttp2_socket_pair_request_response_with_payload_test bins/chttp2_socket_pair_simple_delayed_request_test bins/chttp2_socket_pair_simple_request_test bins/chttp2_socket_pair_thread_stress_test bins/chttp2_socket_pair_writes_done_hangs_with_pending_read_test bins/chttp2_socket_pair_one_byte_at_a_time_cancel_after_accept_test bins/chttp2_socket_pair_one_byte_at_a_time_cancel_after_accept_and_writes_closed_test bins/chttp2_socket_pair_one_byte_at_a_time_cancel_after_invoke_test bins/chttp2_socket_pair_one_byte_at_a_time_cancel_before_invoke_test bins/chttp2_socket_pair_one_byte_at_a_time_cancel_in_a_vacuum_test bins/chttp2_socket_pair_one_byte_at_a_time_early_server_shutdown_finishes_inflight_calls_test bins/chttp2_socket_pair_one_byte_at_a_time_early_server_shutdown_finishes_tags_test bins/chttp2_socket_pair_one_byte_at_a_time_invoke_large_request_test bins/chttp2_socket_pair_one_byte_at_a_time_max_concurrent_streams_test bins/chttp2_socket_pair_one_byte_at_a_time_no_op_test bins/chttp2_socket_pair_one_byte_at_a_time_ping_pong_streaming_test bins/chttp2_socket_pair_one_byte_at_a_time_request_response_with_metadata_and_payload_test bins/chttp2_socket_pair_one_byte_at_a_time_request_response_with_payload_test bins/chttp2_socket_pair_one_byte_at_a_time_simple_delayed_request_test bins/chttp2_socket_pair_one_byte_at_a_time_simple_request_test bins/chttp2_socket_pair_one_byte_at_a_time_thread_stress_test bins/chttp2_socket_pair_one_byte_at_a_time_writes_done_hangs_with_pending_read_test
 
 buildtests_cxx: privatelibs_cxx bins/thread_pool_test bins/status_test
 
@@ -117,10 +117,6 @@
 	$(Q) ./bins/gpr_time_test || ( echo test gpr_time_test failed ; exit 1 )
 	$(E) "[RUN]     Testing murmur_hash_test"
 	$(Q) ./bins/murmur_hash_test || ( echo test murmur_hash_test failed ; exit 1 )
-	$(E) "[RUN]     Testing grpc_em_test"
-	$(Q) ./bins/grpc_em_test || ( echo test grpc_em_test failed ; exit 1 )
-	$(E) "[RUN]     Testing grpc_em_pipe_test"
-	$(Q) ./bins/grpc_em_pipe_test || ( echo test grpc_em_pipe_test failed ; exit 1 )
 	$(E) "[RUN]     Testing grpc_stream_op_test"
 	$(Q) ./bins/grpc_stream_op_test || ( echo test grpc_stream_op_test failed ; exit 1 )
 	$(E) "[RUN]     Testing alpn_test"
@@ -139,20 +135,20 @@
 	$(Q) ./bins/chttp2_status_conversion_test || ( echo test chttp2_status_conversion_test failed ; exit 1 )
 	$(E) "[RUN]     Testing chttp2_transport_end2end_test"
 	$(Q) ./bins/chttp2_transport_end2end_test || ( echo test chttp2_transport_end2end_test failed ; exit 1 )
-	$(E) "[RUN]     Testing grpc_tcp_test"
-	$(Q) ./bins/grpc_tcp_test || ( echo test grpc_tcp_test failed ; exit 1 )
+	$(E) "[RUN]     Testing tcp_posix_test"
+	$(Q) ./bins/tcp_posix_test || ( echo test tcp_posix_test failed ; exit 1 )
 	$(E) "[RUN]     Testing dualstack_socket_test"
 	$(Q) ./bins/dualstack_socket_test || ( echo test dualstack_socket_test failed ; exit 1 )
 	$(E) "[RUN]     Testing no_server_test"
 	$(Q) ./bins/no_server_test || ( echo test no_server_test failed ; exit 1 )
 	$(E) "[RUN]     Testing resolve_address_test"
 	$(Q) ./bins/resolve_address_test || ( echo test resolve_address_test failed ; exit 1 )
-	$(E) "[RUN]     Testing socket_utils_test"
-	$(Q) ./bins/socket_utils_test || ( echo test socket_utils_test failed ; exit 1 )
-	$(E) "[RUN]     Testing tcp_server_test"
-	$(Q) ./bins/tcp_server_test || ( echo test tcp_server_test failed ; exit 1 )
-	$(E) "[RUN]     Testing tcp_client_test"
-	$(Q) ./bins/tcp_client_test || ( echo test tcp_client_test failed ; exit 1 )
+	$(E) "[RUN]     Testing sockaddr_utils_test"
+	$(Q) ./bins/sockaddr_utils_test || ( echo test sockaddr_utils_test failed ; exit 1 )
+	$(E) "[RUN]     Testing tcp_server_posix_test"
+	$(Q) ./bins/tcp_server_posix_test || ( echo test tcp_server_posix_test failed ; exit 1 )
+	$(E) "[RUN]     Testing tcp_client_posix_test"
+	$(Q) ./bins/tcp_client_posix_test || ( echo test tcp_client_posix_test failed ; exit 1 )
 	$(E) "[RUN]     Testing grpc_channel_stack_test"
 	$(Q) ./bins/grpc_channel_stack_test || ( echo test grpc_channel_stack_test failed ; exit 1 )
 	$(E) "[RUN]     Testing metadata_buffer_test"
@@ -477,7 +473,7 @@
 
 dep: dep_c dep_cxx
 
-dep_c: deps_libgpr deps_libgrpc deps_libgrpc_test_util deps_libend2end_fixture_chttp2_fake_security deps_libend2end_fixture_chttp2_fullstack deps_libend2end_fixture_chttp2_simple_ssl_fullstack deps_libend2end_fixture_chttp2_simple_ssl_with_oauth2_fullstack deps_libend2end_fixture_chttp2_socket_pair deps_libend2end_fixture_chttp2_socket_pair_one_byte_at_a_time deps_libend2end_test_cancel_after_accept deps_libend2end_test_cancel_after_accept_and_writes_closed deps_libend2end_test_cancel_after_invoke deps_libend2end_test_cancel_before_invoke deps_libend2end_test_cancel_in_a_vacuum deps_libend2end_test_early_server_shutdown_finishes_inflight_calls deps_libend2end_test_early_server_shutdown_finishes_tags deps_libend2end_test_invoke_large_request deps_libend2end_test_max_concurrent_streams deps_libend2end_test_no_op deps_libend2end_test_ping_pong_streaming deps_libend2end_test_request_response_with_metadata_and_payload deps_libend2end_test_request_response_with_payload deps_libend2end_test_simple_delayed_request deps_libend2end_test_simple_request deps_libend2end_test_thread_stress deps_libend2end_test_writes_done_hangs_with_pending_read deps_libend2end_certs deps_libgrpc_unsecure deps_gen_hpack_tables deps_grpc_byte_buffer_reader_test deps_gpr_cancellable_test deps_gpr_log_test deps_gpr_cmdline_test deps_gpr_histogram_test deps_gpr_host_port_test deps_gpr_slice_buffer_test deps_gpr_slice_test deps_gpr_string_test deps_gpr_sync_test deps_gpr_thd_test deps_gpr_time_test deps_murmur_hash_test deps_grpc_em_test deps_grpc_em_pipe_test deps_grpc_stream_op_test deps_alpn_test deps_chttp2_stream_encoder_test deps_hpack_table_test deps_chttp2_stream_map_test deps_hpack_parser_test deps_transport_metadata_test deps_chttp2_status_conversion_test deps_chttp2_transport_end2end_test deps_grpc_tcp_test deps_dualstack_socket_test deps_no_server_test deps_resolve_address_test deps_socket_utils_test deps_tcp_server_test deps_tcp_client_test deps_grpc_channel_stack_test deps_metadata_buffer_test deps_grpc_completion_queue_test deps_grpc_completion_queue_benchmark deps_census_window_stats_test deps_census_statistics_quick_test deps_census_statistics_performance_test deps_census_statistics_multiple_writers_test deps_census_statistics_multiple_writers_circular_buffer_test deps_census_stub_test deps_census_hash_table_test deps_fling_server deps_fling_client deps_fling_test deps_echo_server deps_echo_client deps_echo_test deps_low_level_ping_pong_benchmark deps_message_compress_test deps_bin_encoder_test deps_secure_endpoint_test deps_httpcli_format_request_test deps_httpcli_parser_test deps_httpcli_test deps_grpc_credentials_test deps_fling_stream_test deps_lame_client_test deps_chttp2_fake_security_cancel_after_accept_test deps_chttp2_fake_security_cancel_after_accept_and_writes_closed_test deps_chttp2_fake_security_cancel_after_invoke_test deps_chttp2_fake_security_cancel_before_invoke_test deps_chttp2_fake_security_cancel_in_a_vacuum_test deps_chttp2_fake_security_early_server_shutdown_finishes_inflight_calls_test deps_chttp2_fake_security_early_server_shutdown_finishes_tags_test deps_chttp2_fake_security_invoke_large_request_test deps_chttp2_fake_security_max_concurrent_streams_test deps_chttp2_fake_security_no_op_test deps_chttp2_fake_security_ping_pong_streaming_test deps_chttp2_fake_security_request_response_with_metadata_and_payload_test deps_chttp2_fake_security_request_response_with_payload_test deps_chttp2_fake_security_simple_delayed_request_test deps_chttp2_fake_security_simple_request_test deps_chttp2_fake_security_thread_stress_test deps_chttp2_fake_security_writes_done_hangs_with_pending_read_test deps_chttp2_fullstack_cancel_after_accept_test deps_chttp2_fullstack_cancel_after_accept_and_writes_closed_test deps_chttp2_fullstack_cancel_after_invoke_test deps_chttp2_fullstack_cancel_before_invoke_test deps_chttp2_fullstack_cancel_in_a_vacuum_test deps_chttp2_fullstack_early_server_shutdown_finishes_inflight_calls_test deps_chttp2_fullstack_early_server_shutdown_finishes_tags_test deps_chttp2_fullstack_invoke_large_request_test deps_chttp2_fullstack_max_concurrent_streams_test deps_chttp2_fullstack_no_op_test deps_chttp2_fullstack_ping_pong_streaming_test deps_chttp2_fullstack_request_response_with_metadata_and_payload_test deps_chttp2_fullstack_request_response_with_payload_test deps_chttp2_fullstack_simple_delayed_request_test deps_chttp2_fullstack_simple_request_test deps_chttp2_fullstack_thread_stress_test deps_chttp2_fullstack_writes_done_hangs_with_pending_read_test deps_chttp2_simple_ssl_fullstack_cancel_after_accept_test deps_chttp2_simple_ssl_fullstack_cancel_after_accept_and_writes_closed_test deps_chttp2_simple_ssl_fullstack_cancel_after_invoke_test deps_chttp2_simple_ssl_fullstack_cancel_before_invoke_test deps_chttp2_simple_ssl_fullstack_cancel_in_a_vacuum_test deps_chttp2_simple_ssl_fullstack_early_server_shutdown_finishes_inflight_calls_test deps_chttp2_simple_ssl_fullstack_early_server_shutdown_finishes_tags_test deps_chttp2_simple_ssl_fullstack_invoke_large_request_test deps_chttp2_simple_ssl_fullstack_max_concurrent_streams_test deps_chttp2_simple_ssl_fullstack_no_op_test deps_chttp2_simple_ssl_fullstack_ping_pong_streaming_test deps_chttp2_simple_ssl_fullstack_request_response_with_metadata_and_payload_test deps_chttp2_simple_ssl_fullstack_request_response_with_payload_test deps_chttp2_simple_ssl_fullstack_simple_delayed_request_test deps_chttp2_simple_ssl_fullstack_simple_request_test deps_chttp2_simple_ssl_fullstack_thread_stress_test deps_chttp2_simple_ssl_fullstack_writes_done_hangs_with_pending_read_test deps_chttp2_simple_ssl_with_oauth2_fullstack_cancel_after_accept_test deps_chttp2_simple_ssl_with_oauth2_fullstack_cancel_after_accept_and_writes_closed_test deps_chttp2_simple_ssl_with_oauth2_fullstack_cancel_after_invoke_test deps_chttp2_simple_ssl_with_oauth2_fullstack_cancel_before_invoke_test deps_chttp2_simple_ssl_with_oauth2_fullstack_cancel_in_a_vacuum_test deps_chttp2_simple_ssl_with_oauth2_fullstack_early_server_shutdown_finishes_inflight_calls_test deps_chttp2_simple_ssl_with_oauth2_fullstack_early_server_shutdown_finishes_tags_test deps_chttp2_simple_ssl_with_oauth2_fullstack_invoke_large_request_test deps_chttp2_simple_ssl_with_oauth2_fullstack_max_concurrent_streams_test deps_chttp2_simple_ssl_with_oauth2_fullstack_no_op_test deps_chttp2_simple_ssl_with_oauth2_fullstack_ping_pong_streaming_test deps_chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_metadata_and_payload_test deps_chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_payload_test deps_chttp2_simple_ssl_with_oauth2_fullstack_simple_delayed_request_test deps_chttp2_simple_ssl_with_oauth2_fullstack_simple_request_test deps_chttp2_simple_ssl_with_oauth2_fullstack_thread_stress_test deps_chttp2_simple_ssl_with_oauth2_fullstack_writes_done_hangs_with_pending_read_test deps_chttp2_socket_pair_cancel_after_accept_test deps_chttp2_socket_pair_cancel_after_accept_and_writes_closed_test deps_chttp2_socket_pair_cancel_after_invoke_test deps_chttp2_socket_pair_cancel_before_invoke_test deps_chttp2_socket_pair_cancel_in_a_vacuum_test deps_chttp2_socket_pair_early_server_shutdown_finishes_inflight_calls_test deps_chttp2_socket_pair_early_server_shutdown_finishes_tags_test deps_chttp2_socket_pair_invoke_large_request_test deps_chttp2_socket_pair_max_concurrent_streams_test deps_chttp2_socket_pair_no_op_test deps_chttp2_socket_pair_ping_pong_streaming_test deps_chttp2_socket_pair_request_response_with_metadata_and_payload_test deps_chttp2_socket_pair_request_response_with_payload_test deps_chttp2_socket_pair_simple_delayed_request_test deps_chttp2_socket_pair_simple_request_test deps_chttp2_socket_pair_thread_stress_test deps_chttp2_socket_pair_writes_done_hangs_with_pending_read_test deps_chttp2_socket_pair_one_byte_at_a_time_cancel_after_accept_test deps_chttp2_socket_pair_one_byte_at_a_time_cancel_after_accept_and_writes_closed_test deps_chttp2_socket_pair_one_byte_at_a_time_cancel_after_invoke_test deps_chttp2_socket_pair_one_byte_at_a_time_cancel_before_invoke_test deps_chttp2_socket_pair_one_byte_at_a_time_cancel_in_a_vacuum_test deps_chttp2_socket_pair_one_byte_at_a_time_early_server_shutdown_finishes_inflight_calls_test deps_chttp2_socket_pair_one_byte_at_a_time_early_server_shutdown_finishes_tags_test deps_chttp2_socket_pair_one_byte_at_a_time_invoke_large_request_test deps_chttp2_socket_pair_one_byte_at_a_time_max_concurrent_streams_test deps_chttp2_socket_pair_one_byte_at_a_time_no_op_test deps_chttp2_socket_pair_one_byte_at_a_time_ping_pong_streaming_test deps_chttp2_socket_pair_one_byte_at_a_time_request_response_with_metadata_and_payload_test deps_chttp2_socket_pair_one_byte_at_a_time_request_response_with_payload_test deps_chttp2_socket_pair_one_byte_at_a_time_simple_delayed_request_test deps_chttp2_socket_pair_one_byte_at_a_time_simple_request_test deps_chttp2_socket_pair_one_byte_at_a_time_thread_stress_test deps_chttp2_socket_pair_one_byte_at_a_time_writes_done_hangs_with_pending_read_test
+dep_c: deps_libgpr deps_libgrpc deps_libgrpc_test_util deps_libend2end_fixture_chttp2_fake_security deps_libend2end_fixture_chttp2_fullstack deps_libend2end_fixture_chttp2_simple_ssl_fullstack deps_libend2end_fixture_chttp2_simple_ssl_with_oauth2_fullstack deps_libend2end_fixture_chttp2_socket_pair deps_libend2end_fixture_chttp2_socket_pair_one_byte_at_a_time deps_libend2end_test_cancel_after_accept deps_libend2end_test_cancel_after_accept_and_writes_closed deps_libend2end_test_cancel_after_invoke deps_libend2end_test_cancel_before_invoke deps_libend2end_test_cancel_in_a_vacuum deps_libend2end_test_early_server_shutdown_finishes_inflight_calls deps_libend2end_test_early_server_shutdown_finishes_tags deps_libend2end_test_invoke_large_request deps_libend2end_test_max_concurrent_streams deps_libend2end_test_no_op deps_libend2end_test_ping_pong_streaming deps_libend2end_test_request_response_with_metadata_and_payload deps_libend2end_test_request_response_with_payload deps_libend2end_test_simple_delayed_request deps_libend2end_test_simple_request deps_libend2end_test_thread_stress deps_libend2end_test_writes_done_hangs_with_pending_read deps_libend2end_certs deps_libgrpc_unsecure deps_gen_hpack_tables deps_grpc_byte_buffer_reader_test deps_gpr_cancellable_test deps_gpr_log_test deps_gpr_cmdline_test deps_gpr_histogram_test deps_gpr_host_port_test deps_gpr_slice_buffer_test deps_gpr_slice_test deps_gpr_string_test deps_gpr_sync_test deps_gpr_thd_test deps_gpr_time_test deps_murmur_hash_test deps_grpc_stream_op_test deps_alpn_test deps_chttp2_stream_encoder_test deps_hpack_table_test deps_chttp2_stream_map_test deps_hpack_parser_test deps_transport_metadata_test deps_chttp2_status_conversion_test deps_chttp2_transport_end2end_test deps_tcp_posix_test deps_dualstack_socket_test deps_no_server_test deps_resolve_address_test deps_sockaddr_utils_test deps_tcp_server_posix_test deps_tcp_client_posix_test deps_grpc_channel_stack_test deps_metadata_buffer_test deps_grpc_completion_queue_test deps_grpc_completion_queue_benchmark deps_census_window_stats_test deps_census_statistics_quick_test deps_census_statistics_performance_test deps_census_statistics_multiple_writers_test deps_census_statistics_multiple_writers_circular_buffer_test deps_census_stub_test deps_census_hash_table_test deps_fling_server deps_fling_client deps_fling_test deps_echo_server deps_echo_client deps_echo_test deps_low_level_ping_pong_benchmark deps_message_compress_test deps_bin_encoder_test deps_secure_endpoint_test deps_httpcli_format_request_test deps_httpcli_parser_test deps_httpcli_test deps_grpc_credentials_test deps_fling_stream_test deps_lame_client_test deps_chttp2_fake_security_cancel_after_accept_test deps_chttp2_fake_security_cancel_after_accept_and_writes_closed_test deps_chttp2_fake_security_cancel_after_invoke_test deps_chttp2_fake_security_cancel_before_invoke_test deps_chttp2_fake_security_cancel_in_a_vacuum_test deps_chttp2_fake_security_early_server_shutdown_finishes_inflight_calls_test deps_chttp2_fake_security_early_server_shutdown_finishes_tags_test deps_chttp2_fake_security_invoke_large_request_test deps_chttp2_fake_security_max_concurrent_streams_test deps_chttp2_fake_security_no_op_test deps_chttp2_fake_security_ping_pong_streaming_test deps_chttp2_fake_security_request_response_with_metadata_and_payload_test deps_chttp2_fake_security_request_response_with_payload_test deps_chttp2_fake_security_simple_delayed_request_test deps_chttp2_fake_security_simple_request_test deps_chttp2_fake_security_thread_stress_test deps_chttp2_fake_security_writes_done_hangs_with_pending_read_test deps_chttp2_fullstack_cancel_after_accept_test deps_chttp2_fullstack_cancel_after_accept_and_writes_closed_test deps_chttp2_fullstack_cancel_after_invoke_test deps_chttp2_fullstack_cancel_before_invoke_test deps_chttp2_fullstack_cancel_in_a_vacuum_test deps_chttp2_fullstack_early_server_shutdown_finishes_inflight_calls_test deps_chttp2_fullstack_early_server_shutdown_finishes_tags_test deps_chttp2_fullstack_invoke_large_request_test deps_chttp2_fullstack_max_concurrent_streams_test deps_chttp2_fullstack_no_op_test deps_chttp2_fullstack_ping_pong_streaming_test deps_chttp2_fullstack_request_response_with_metadata_and_payload_test deps_chttp2_fullstack_request_response_with_payload_test deps_chttp2_fullstack_simple_delayed_request_test deps_chttp2_fullstack_simple_request_test deps_chttp2_fullstack_thread_stress_test deps_chttp2_fullstack_writes_done_hangs_with_pending_read_test deps_chttp2_simple_ssl_fullstack_cancel_after_accept_test deps_chttp2_simple_ssl_fullstack_cancel_after_accept_and_writes_closed_test deps_chttp2_simple_ssl_fullstack_cancel_after_invoke_test deps_chttp2_simple_ssl_fullstack_cancel_before_invoke_test deps_chttp2_simple_ssl_fullstack_cancel_in_a_vacuum_test deps_chttp2_simple_ssl_fullstack_early_server_shutdown_finishes_inflight_calls_test deps_chttp2_simple_ssl_fullstack_early_server_shutdown_finishes_tags_test deps_chttp2_simple_ssl_fullstack_invoke_large_request_test deps_chttp2_simple_ssl_fullstack_max_concurrent_streams_test deps_chttp2_simple_ssl_fullstack_no_op_test deps_chttp2_simple_ssl_fullstack_ping_pong_streaming_test deps_chttp2_simple_ssl_fullstack_request_response_with_metadata_and_payload_test deps_chttp2_simple_ssl_fullstack_request_response_with_payload_test deps_chttp2_simple_ssl_fullstack_simple_delayed_request_test deps_chttp2_simple_ssl_fullstack_simple_request_test deps_chttp2_simple_ssl_fullstack_thread_stress_test deps_chttp2_simple_ssl_fullstack_writes_done_hangs_with_pending_read_test deps_chttp2_simple_ssl_with_oauth2_fullstack_cancel_after_accept_test deps_chttp2_simple_ssl_with_oauth2_fullstack_cancel_after_accept_and_writes_closed_test deps_chttp2_simple_ssl_with_oauth2_fullstack_cancel_after_invoke_test deps_chttp2_simple_ssl_with_oauth2_fullstack_cancel_before_invoke_test deps_chttp2_simple_ssl_with_oauth2_fullstack_cancel_in_a_vacuum_test deps_chttp2_simple_ssl_with_oauth2_fullstack_early_server_shutdown_finishes_inflight_calls_test deps_chttp2_simple_ssl_with_oauth2_fullstack_early_server_shutdown_finishes_tags_test deps_chttp2_simple_ssl_with_oauth2_fullstack_invoke_large_request_test deps_chttp2_simple_ssl_with_oauth2_fullstack_max_concurrent_streams_test deps_chttp2_simple_ssl_with_oauth2_fullstack_no_op_test deps_chttp2_simple_ssl_with_oauth2_fullstack_ping_pong_streaming_test deps_chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_metadata_and_payload_test deps_chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_payload_test deps_chttp2_simple_ssl_with_oauth2_fullstack_simple_delayed_request_test deps_chttp2_simple_ssl_with_oauth2_fullstack_simple_request_test deps_chttp2_simple_ssl_with_oauth2_fullstack_thread_stress_test deps_chttp2_simple_ssl_with_oauth2_fullstack_writes_done_hangs_with_pending_read_test deps_chttp2_socket_pair_cancel_after_accept_test deps_chttp2_socket_pair_cancel_after_accept_and_writes_closed_test deps_chttp2_socket_pair_cancel_after_invoke_test deps_chttp2_socket_pair_cancel_before_invoke_test deps_chttp2_socket_pair_cancel_in_a_vacuum_test deps_chttp2_socket_pair_early_server_shutdown_finishes_inflight_calls_test deps_chttp2_socket_pair_early_server_shutdown_finishes_tags_test deps_chttp2_socket_pair_invoke_large_request_test deps_chttp2_socket_pair_max_concurrent_streams_test deps_chttp2_socket_pair_no_op_test deps_chttp2_socket_pair_ping_pong_streaming_test deps_chttp2_socket_pair_request_response_with_metadata_and_payload_test deps_chttp2_socket_pair_request_response_with_payload_test deps_chttp2_socket_pair_simple_delayed_request_test deps_chttp2_socket_pair_simple_request_test deps_chttp2_socket_pair_thread_stress_test deps_chttp2_socket_pair_writes_done_hangs_with_pending_read_test deps_chttp2_socket_pair_one_byte_at_a_time_cancel_after_accept_test deps_chttp2_socket_pair_one_byte_at_a_time_cancel_after_accept_and_writes_closed_test deps_chttp2_socket_pair_one_byte_at_a_time_cancel_after_invoke_test deps_chttp2_socket_pair_one_byte_at_a_time_cancel_before_invoke_test deps_chttp2_socket_pair_one_byte_at_a_time_cancel_in_a_vacuum_test deps_chttp2_socket_pair_one_byte_at_a_time_early_server_shutdown_finishes_inflight_calls_test deps_chttp2_socket_pair_one_byte_at_a_time_early_server_shutdown_finishes_tags_test deps_chttp2_socket_pair_one_byte_at_a_time_invoke_large_request_test deps_chttp2_socket_pair_one_byte_at_a_time_max_concurrent_streams_test deps_chttp2_socket_pair_one_byte_at_a_time_no_op_test deps_chttp2_socket_pair_one_byte_at_a_time_ping_pong_streaming_test deps_chttp2_socket_pair_one_byte_at_a_time_request_response_with_metadata_and_payload_test deps_chttp2_socket_pair_one_byte_at_a_time_request_response_with_payload_test deps_chttp2_socket_pair_one_byte_at_a_time_simple_delayed_request_test deps_chttp2_socket_pair_one_byte_at_a_time_simple_request_test deps_chttp2_socket_pair_one_byte_at_a_time_thread_stress_test deps_chttp2_socket_pair_one_byte_at_a_time_writes_done_hangs_with_pending_read_test
 
 dep_cxx: deps_libgrpc++ deps_libgrpc++_test_util deps_thread_pool_test deps_status_test
 
@@ -523,7 +519,7 @@
 	$(E) "[INSTALL] Installing libgrpc++.so"
 	$(Q) $(INSTALL) libs/libgrpc++.so.$(VERSION) $(prefix)/lib/libgrpc++.so.$(VERSION)
 
-clean: clean_libgpr clean_libgrpc clean_libgrpc_test_util clean_libgrpc++ clean_libgrpc++_test_util clean_libend2end_fixture_chttp2_fake_security clean_libend2end_fixture_chttp2_fullstack clean_libend2end_fixture_chttp2_simple_ssl_fullstack clean_libend2end_fixture_chttp2_simple_ssl_with_oauth2_fullstack clean_libend2end_fixture_chttp2_socket_pair clean_libend2end_fixture_chttp2_socket_pair_one_byte_at_a_time clean_libend2end_test_cancel_after_accept clean_libend2end_test_cancel_after_accept_and_writes_closed clean_libend2end_test_cancel_after_invoke clean_libend2end_test_cancel_before_invoke clean_libend2end_test_cancel_in_a_vacuum clean_libend2end_test_early_server_shutdown_finishes_inflight_calls clean_libend2end_test_early_server_shutdown_finishes_tags clean_libend2end_test_invoke_large_request clean_libend2end_test_max_concurrent_streams clean_libend2end_test_no_op clean_libend2end_test_ping_pong_streaming clean_libend2end_test_request_response_with_metadata_and_payload clean_libend2end_test_request_response_with_payload clean_libend2end_test_simple_delayed_request clean_libend2end_test_simple_request clean_libend2end_test_thread_stress clean_libend2end_test_writes_done_hangs_with_pending_read clean_libend2end_certs clean_libgrpc_unsecure clean_gen_hpack_tables clean_grpc_byte_buffer_reader_test clean_gpr_cancellable_test clean_gpr_log_test clean_gpr_cmdline_test clean_gpr_histogram_test clean_gpr_host_port_test clean_gpr_slice_buffer_test clean_gpr_slice_test clean_gpr_string_test clean_gpr_sync_test clean_gpr_thd_test clean_gpr_time_test clean_murmur_hash_test clean_grpc_em_test clean_grpc_em_pipe_test clean_grpc_stream_op_test clean_alpn_test clean_chttp2_stream_encoder_test clean_hpack_table_test clean_chttp2_stream_map_test clean_hpack_parser_test clean_transport_metadata_test clean_chttp2_status_conversion_test clean_chttp2_transport_end2end_test clean_grpc_tcp_test clean_dualstack_socket_test clean_no_server_test clean_resolve_address_test clean_socket_utils_test clean_tcp_server_test clean_tcp_client_test clean_grpc_channel_stack_test clean_metadata_buffer_test clean_grpc_completion_queue_test clean_grpc_completion_queue_benchmark clean_census_window_stats_test clean_census_statistics_quick_test clean_census_statistics_performance_test clean_census_statistics_multiple_writers_test clean_census_statistics_multiple_writers_circular_buffer_test clean_census_stub_test clean_census_hash_table_test clean_fling_server clean_fling_client clean_fling_test clean_echo_server clean_echo_client clean_echo_test clean_low_level_ping_pong_benchmark clean_message_compress_test clean_bin_encoder_test clean_secure_endpoint_test clean_httpcli_format_request_test clean_httpcli_parser_test clean_httpcli_test clean_grpc_credentials_test clean_fling_stream_test clean_lame_client_test clean_thread_pool_test clean_status_test clean_chttp2_fake_security_cancel_after_accept_test clean_chttp2_fake_security_cancel_after_accept_and_writes_closed_test clean_chttp2_fake_security_cancel_after_invoke_test clean_chttp2_fake_security_cancel_before_invoke_test clean_chttp2_fake_security_cancel_in_a_vacuum_test clean_chttp2_fake_security_early_server_shutdown_finishes_inflight_calls_test clean_chttp2_fake_security_early_server_shutdown_finishes_tags_test clean_chttp2_fake_security_invoke_large_request_test clean_chttp2_fake_security_max_concurrent_streams_test clean_chttp2_fake_security_no_op_test clean_chttp2_fake_security_ping_pong_streaming_test clean_chttp2_fake_security_request_response_with_metadata_and_payload_test clean_chttp2_fake_security_request_response_with_payload_test clean_chttp2_fake_security_simple_delayed_request_test clean_chttp2_fake_security_simple_request_test clean_chttp2_fake_security_thread_stress_test clean_chttp2_fake_security_writes_done_hangs_with_pending_read_test clean_chttp2_fullstack_cancel_after_accept_test clean_chttp2_fullstack_cancel_after_accept_and_writes_closed_test clean_chttp2_fullstack_cancel_after_invoke_test clean_chttp2_fullstack_cancel_before_invoke_test clean_chttp2_fullstack_cancel_in_a_vacuum_test clean_chttp2_fullstack_early_server_shutdown_finishes_inflight_calls_test clean_chttp2_fullstack_early_server_shutdown_finishes_tags_test clean_chttp2_fullstack_invoke_large_request_test clean_chttp2_fullstack_max_concurrent_streams_test clean_chttp2_fullstack_no_op_test clean_chttp2_fullstack_ping_pong_streaming_test clean_chttp2_fullstack_request_response_with_metadata_and_payload_test clean_chttp2_fullstack_request_response_with_payload_test clean_chttp2_fullstack_simple_delayed_request_test clean_chttp2_fullstack_simple_request_test clean_chttp2_fullstack_thread_stress_test clean_chttp2_fullstack_writes_done_hangs_with_pending_read_test clean_chttp2_simple_ssl_fullstack_cancel_after_accept_test clean_chttp2_simple_ssl_fullstack_cancel_after_accept_and_writes_closed_test clean_chttp2_simple_ssl_fullstack_cancel_after_invoke_test clean_chttp2_simple_ssl_fullstack_cancel_before_invoke_test clean_chttp2_simple_ssl_fullstack_cancel_in_a_vacuum_test clean_chttp2_simple_ssl_fullstack_early_server_shutdown_finishes_inflight_calls_test clean_chttp2_simple_ssl_fullstack_early_server_shutdown_finishes_tags_test clean_chttp2_simple_ssl_fullstack_invoke_large_request_test clean_chttp2_simple_ssl_fullstack_max_concurrent_streams_test clean_chttp2_simple_ssl_fullstack_no_op_test clean_chttp2_simple_ssl_fullstack_ping_pong_streaming_test clean_chttp2_simple_ssl_fullstack_request_response_with_metadata_and_payload_test clean_chttp2_simple_ssl_fullstack_request_response_with_payload_test clean_chttp2_simple_ssl_fullstack_simple_delayed_request_test clean_chttp2_simple_ssl_fullstack_simple_request_test clean_chttp2_simple_ssl_fullstack_thread_stress_test clean_chttp2_simple_ssl_fullstack_writes_done_hangs_with_pending_read_test clean_chttp2_simple_ssl_with_oauth2_fullstack_cancel_after_accept_test clean_chttp2_simple_ssl_with_oauth2_fullstack_cancel_after_accept_and_writes_closed_test clean_chttp2_simple_ssl_with_oauth2_fullstack_cancel_after_invoke_test clean_chttp2_simple_ssl_with_oauth2_fullstack_cancel_before_invoke_test clean_chttp2_simple_ssl_with_oauth2_fullstack_cancel_in_a_vacuum_test clean_chttp2_simple_ssl_with_oauth2_fullstack_early_server_shutdown_finishes_inflight_calls_test clean_chttp2_simple_ssl_with_oauth2_fullstack_early_server_shutdown_finishes_tags_test clean_chttp2_simple_ssl_with_oauth2_fullstack_invoke_large_request_test clean_chttp2_simple_ssl_with_oauth2_fullstack_max_concurrent_streams_test clean_chttp2_simple_ssl_with_oauth2_fullstack_no_op_test clean_chttp2_simple_ssl_with_oauth2_fullstack_ping_pong_streaming_test clean_chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_metadata_and_payload_test clean_chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_payload_test clean_chttp2_simple_ssl_with_oauth2_fullstack_simple_delayed_request_test clean_chttp2_simple_ssl_with_oauth2_fullstack_simple_request_test clean_chttp2_simple_ssl_with_oauth2_fullstack_thread_stress_test clean_chttp2_simple_ssl_with_oauth2_fullstack_writes_done_hangs_with_pending_read_test clean_chttp2_socket_pair_cancel_after_accept_test clean_chttp2_socket_pair_cancel_after_accept_and_writes_closed_test clean_chttp2_socket_pair_cancel_after_invoke_test clean_chttp2_socket_pair_cancel_before_invoke_test clean_chttp2_socket_pair_cancel_in_a_vacuum_test clean_chttp2_socket_pair_early_server_shutdown_finishes_inflight_calls_test clean_chttp2_socket_pair_early_server_shutdown_finishes_tags_test clean_chttp2_socket_pair_invoke_large_request_test clean_chttp2_socket_pair_max_concurrent_streams_test clean_chttp2_socket_pair_no_op_test clean_chttp2_socket_pair_ping_pong_streaming_test clean_chttp2_socket_pair_request_response_with_metadata_and_payload_test clean_chttp2_socket_pair_request_response_with_payload_test clean_chttp2_socket_pair_simple_delayed_request_test clean_chttp2_socket_pair_simple_request_test clean_chttp2_socket_pair_thread_stress_test clean_chttp2_socket_pair_writes_done_hangs_with_pending_read_test clean_chttp2_socket_pair_one_byte_at_a_time_cancel_after_accept_test clean_chttp2_socket_pair_one_byte_at_a_time_cancel_after_accept_and_writes_closed_test clean_chttp2_socket_pair_one_byte_at_a_time_cancel_after_invoke_test clean_chttp2_socket_pair_one_byte_at_a_time_cancel_before_invoke_test clean_chttp2_socket_pair_one_byte_at_a_time_cancel_in_a_vacuum_test clean_chttp2_socket_pair_one_byte_at_a_time_early_server_shutdown_finishes_inflight_calls_test clean_chttp2_socket_pair_one_byte_at_a_time_early_server_shutdown_finishes_tags_test clean_chttp2_socket_pair_one_byte_at_a_time_invoke_large_request_test clean_chttp2_socket_pair_one_byte_at_a_time_max_concurrent_streams_test clean_chttp2_socket_pair_one_byte_at_a_time_no_op_test clean_chttp2_socket_pair_one_byte_at_a_time_ping_pong_streaming_test clean_chttp2_socket_pair_one_byte_at_a_time_request_response_with_metadata_and_payload_test clean_chttp2_socket_pair_one_byte_at_a_time_request_response_with_payload_test clean_chttp2_socket_pair_one_byte_at_a_time_simple_delayed_request_test clean_chttp2_socket_pair_one_byte_at_a_time_simple_request_test clean_chttp2_socket_pair_one_byte_at_a_time_thread_stress_test clean_chttp2_socket_pair_one_byte_at_a_time_writes_done_hangs_with_pending_read_test
+clean: clean_libgpr clean_libgrpc clean_libgrpc_test_util clean_libgrpc++ clean_libgrpc++_test_util clean_libend2end_fixture_chttp2_fake_security clean_libend2end_fixture_chttp2_fullstack clean_libend2end_fixture_chttp2_simple_ssl_fullstack clean_libend2end_fixture_chttp2_simple_ssl_with_oauth2_fullstack clean_libend2end_fixture_chttp2_socket_pair clean_libend2end_fixture_chttp2_socket_pair_one_byte_at_a_time clean_libend2end_test_cancel_after_accept clean_libend2end_test_cancel_after_accept_and_writes_closed clean_libend2end_test_cancel_after_invoke clean_libend2end_test_cancel_before_invoke clean_libend2end_test_cancel_in_a_vacuum clean_libend2end_test_early_server_shutdown_finishes_inflight_calls clean_libend2end_test_early_server_shutdown_finishes_tags clean_libend2end_test_invoke_large_request clean_libend2end_test_max_concurrent_streams clean_libend2end_test_no_op clean_libend2end_test_ping_pong_streaming clean_libend2end_test_request_response_with_metadata_and_payload clean_libend2end_test_request_response_with_payload clean_libend2end_test_simple_delayed_request clean_libend2end_test_simple_request clean_libend2end_test_thread_stress clean_libend2end_test_writes_done_hangs_with_pending_read clean_libend2end_certs clean_libgrpc_unsecure clean_gen_hpack_tables clean_grpc_byte_buffer_reader_test clean_gpr_cancellable_test clean_gpr_log_test clean_gpr_cmdline_test clean_gpr_histogram_test clean_gpr_host_port_test clean_gpr_slice_buffer_test clean_gpr_slice_test clean_gpr_string_test clean_gpr_sync_test clean_gpr_thd_test clean_gpr_time_test clean_murmur_hash_test clean_grpc_stream_op_test clean_alpn_test clean_chttp2_stream_encoder_test clean_hpack_table_test clean_chttp2_stream_map_test clean_hpack_parser_test clean_transport_metadata_test clean_chttp2_status_conversion_test clean_chttp2_transport_end2end_test clean_tcp_posix_test clean_dualstack_socket_test clean_no_server_test clean_resolve_address_test clean_sockaddr_utils_test clean_tcp_server_posix_test clean_tcp_client_posix_test clean_grpc_channel_stack_test clean_metadata_buffer_test clean_grpc_completion_queue_test clean_grpc_completion_queue_benchmark clean_census_window_stats_test clean_census_statistics_quick_test clean_census_statistics_performance_test clean_census_statistics_multiple_writers_test clean_census_statistics_multiple_writers_circular_buffer_test clean_census_stub_test clean_census_hash_table_test clean_fling_server clean_fling_client clean_fling_test clean_echo_server clean_echo_client clean_echo_test clean_low_level_ping_pong_benchmark clean_message_compress_test clean_bin_encoder_test clean_secure_endpoint_test clean_httpcli_format_request_test clean_httpcli_parser_test clean_httpcli_test clean_grpc_credentials_test clean_fling_stream_test clean_lame_client_test clean_thread_pool_test clean_status_test clean_chttp2_fake_security_cancel_after_accept_test clean_chttp2_fake_security_cancel_after_accept_and_writes_closed_test clean_chttp2_fake_security_cancel_after_invoke_test clean_chttp2_fake_security_cancel_before_invoke_test clean_chttp2_fake_security_cancel_in_a_vacuum_test clean_chttp2_fake_security_early_server_shutdown_finishes_inflight_calls_test clean_chttp2_fake_security_early_server_shutdown_finishes_tags_test clean_chttp2_fake_security_invoke_large_request_test clean_chttp2_fake_security_max_concurrent_streams_test clean_chttp2_fake_security_no_op_test clean_chttp2_fake_security_ping_pong_streaming_test clean_chttp2_fake_security_request_response_with_metadata_and_payload_test clean_chttp2_fake_security_request_response_with_payload_test clean_chttp2_fake_security_simple_delayed_request_test clean_chttp2_fake_security_simple_request_test clean_chttp2_fake_security_thread_stress_test clean_chttp2_fake_security_writes_done_hangs_with_pending_read_test clean_chttp2_fullstack_cancel_after_accept_test clean_chttp2_fullstack_cancel_after_accept_and_writes_closed_test clean_chttp2_fullstack_cancel_after_invoke_test clean_chttp2_fullstack_cancel_before_invoke_test clean_chttp2_fullstack_cancel_in_a_vacuum_test clean_chttp2_fullstack_early_server_shutdown_finishes_inflight_calls_test clean_chttp2_fullstack_early_server_shutdown_finishes_tags_test clean_chttp2_fullstack_invoke_large_request_test clean_chttp2_fullstack_max_concurrent_streams_test clean_chttp2_fullstack_no_op_test clean_chttp2_fullstack_ping_pong_streaming_test clean_chttp2_fullstack_request_response_with_metadata_and_payload_test clean_chttp2_fullstack_request_response_with_payload_test clean_chttp2_fullstack_simple_delayed_request_test clean_chttp2_fullstack_simple_request_test clean_chttp2_fullstack_thread_stress_test clean_chttp2_fullstack_writes_done_hangs_with_pending_read_test clean_chttp2_simple_ssl_fullstack_cancel_after_accept_test clean_chttp2_simple_ssl_fullstack_cancel_after_accept_and_writes_closed_test clean_chttp2_simple_ssl_fullstack_cancel_after_invoke_test clean_chttp2_simple_ssl_fullstack_cancel_before_invoke_test clean_chttp2_simple_ssl_fullstack_cancel_in_a_vacuum_test clean_chttp2_simple_ssl_fullstack_early_server_shutdown_finishes_inflight_calls_test clean_chttp2_simple_ssl_fullstack_early_server_shutdown_finishes_tags_test clean_chttp2_simple_ssl_fullstack_invoke_large_request_test clean_chttp2_simple_ssl_fullstack_max_concurrent_streams_test clean_chttp2_simple_ssl_fullstack_no_op_test clean_chttp2_simple_ssl_fullstack_ping_pong_streaming_test clean_chttp2_simple_ssl_fullstack_request_response_with_metadata_and_payload_test clean_chttp2_simple_ssl_fullstack_request_response_with_payload_test clean_chttp2_simple_ssl_fullstack_simple_delayed_request_test clean_chttp2_simple_ssl_fullstack_simple_request_test clean_chttp2_simple_ssl_fullstack_thread_stress_test clean_chttp2_simple_ssl_fullstack_writes_done_hangs_with_pending_read_test clean_chttp2_simple_ssl_with_oauth2_fullstack_cancel_after_accept_test clean_chttp2_simple_ssl_with_oauth2_fullstack_cancel_after_accept_and_writes_closed_test clean_chttp2_simple_ssl_with_oauth2_fullstack_cancel_after_invoke_test clean_chttp2_simple_ssl_with_oauth2_fullstack_cancel_before_invoke_test clean_chttp2_simple_ssl_with_oauth2_fullstack_cancel_in_a_vacuum_test clean_chttp2_simple_ssl_with_oauth2_fullstack_early_server_shutdown_finishes_inflight_calls_test clean_chttp2_simple_ssl_with_oauth2_fullstack_early_server_shutdown_finishes_tags_test clean_chttp2_simple_ssl_with_oauth2_fullstack_invoke_large_request_test clean_chttp2_simple_ssl_with_oauth2_fullstack_max_concurrent_streams_test clean_chttp2_simple_ssl_with_oauth2_fullstack_no_op_test clean_chttp2_simple_ssl_with_oauth2_fullstack_ping_pong_streaming_test clean_chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_metadata_and_payload_test clean_chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_payload_test clean_chttp2_simple_ssl_with_oauth2_fullstack_simple_delayed_request_test clean_chttp2_simple_ssl_with_oauth2_fullstack_simple_request_test clean_chttp2_simple_ssl_with_oauth2_fullstack_thread_stress_test clean_chttp2_simple_ssl_with_oauth2_fullstack_writes_done_hangs_with_pending_read_test clean_chttp2_socket_pair_cancel_after_accept_test clean_chttp2_socket_pair_cancel_after_accept_and_writes_closed_test clean_chttp2_socket_pair_cancel_after_invoke_test clean_chttp2_socket_pair_cancel_before_invoke_test clean_chttp2_socket_pair_cancel_in_a_vacuum_test clean_chttp2_socket_pair_early_server_shutdown_finishes_inflight_calls_test clean_chttp2_socket_pair_early_server_shutdown_finishes_tags_test clean_chttp2_socket_pair_invoke_large_request_test clean_chttp2_socket_pair_max_concurrent_streams_test clean_chttp2_socket_pair_no_op_test clean_chttp2_socket_pair_ping_pong_streaming_test clean_chttp2_socket_pair_request_response_with_metadata_and_payload_test clean_chttp2_socket_pair_request_response_with_payload_test clean_chttp2_socket_pair_simple_delayed_request_test clean_chttp2_socket_pair_simple_request_test clean_chttp2_socket_pair_thread_stress_test clean_chttp2_socket_pair_writes_done_hangs_with_pending_read_test clean_chttp2_socket_pair_one_byte_at_a_time_cancel_after_accept_test clean_chttp2_socket_pair_one_byte_at_a_time_cancel_after_accept_and_writes_closed_test clean_chttp2_socket_pair_one_byte_at_a_time_cancel_after_invoke_test clean_chttp2_socket_pair_one_byte_at_a_time_cancel_before_invoke_test clean_chttp2_socket_pair_one_byte_at_a_time_cancel_in_a_vacuum_test clean_chttp2_socket_pair_one_byte_at_a_time_early_server_shutdown_finishes_inflight_calls_test clean_chttp2_socket_pair_one_byte_at_a_time_early_server_shutdown_finishes_tags_test clean_chttp2_socket_pair_one_byte_at_a_time_invoke_large_request_test clean_chttp2_socket_pair_one_byte_at_a_time_max_concurrent_streams_test clean_chttp2_socket_pair_one_byte_at_a_time_no_op_test clean_chttp2_socket_pair_one_byte_at_a_time_ping_pong_streaming_test clean_chttp2_socket_pair_one_byte_at_a_time_request_response_with_metadata_and_payload_test clean_chttp2_socket_pair_one_byte_at_a_time_request_response_with_payload_test clean_chttp2_socket_pair_one_byte_at_a_time_simple_delayed_request_test clean_chttp2_socket_pair_one_byte_at_a_time_simple_request_test clean_chttp2_socket_pair_one_byte_at_a_time_thread_stress_test clean_chttp2_socket_pair_one_byte_at_a_time_writes_done_hangs_with_pending_read_test
 	$(Q) $(RM) -r deps objs libs bins gens
 
 
@@ -627,25 +623,46 @@
     src/core/compression/algorithm.c \
     src/core/compression/message_compress.c \
     src/core/endpoint/endpoint.c \
-    src/core/endpoint/resolve_address.c \
-    src/core/endpoint/socket_utils.c \
-    src/core/endpoint/socket_utils_linux.c \
-    src/core/endpoint/socket_utils_posix.c \
-    src/core/endpoint/tcp.c \
-    src/core/endpoint/tcp_client.c \
-    src/core/endpoint/tcp_server.c \
-    src/core/eventmanager/em.c \
-    src/core/eventmanager/em_posix.c \
+    src/core/endpoint/secure_endpoint.c \
+    src/core/httpcli/format_request.c \
+    src/core/httpcli/httpcli.c \
+    src/core/httpcli/httpcli_security_context.c \
+    src/core/httpcli/parser.c \
+    src/core/iomgr/endpoint_pair_posix.c \
+    src/core/iomgr/iomgr_libevent.c \
+    src/core/iomgr/iomgr_libevent_use_threads.c \
+    src/core/iomgr/resolve_address_posix.c \
+    src/core/iomgr/sockaddr_utils.c \
+    src/core/iomgr/socket_utils_common_posix.c \
+    src/core/iomgr/socket_utils_linux.c \
+    src/core/iomgr/socket_utils_posix.c \
+    src/core/iomgr/tcp_client_posix.c \
+    src/core/iomgr/tcp_posix.c \
+    src/core/iomgr/tcp_server_posix.c \
+    src/core/security/auth.c \
+    src/core/security/credentials.c \
+    src/core/security/google_root_certs.c \
+    src/core/security/secure_transport_setup.c \
+    src/core/security/security_context.c \
+    src/core/security/server_secure_chttp2.c \
+    src/core/statistics/census_init.c \
+    src/core/statistics/census_rpc_stats.c \
+    src/core/statistics/census_tracing.c \
+    src/core/statistics/hash_table.c \
+    src/core/statistics/log.c \
+    src/core/statistics/window_stats.c \
     src/core/surface/byte_buffer.c \
     src/core/surface/byte_buffer_reader.c \
     src/core/surface/call.c \
     src/core/surface/channel.c \
     src/core/surface/channel_create.c \
     src/core/surface/client.c \
-    src/core/surface/lame_client.c \
     src/core/surface/completion_queue.c \
     src/core/surface/event_string.c \
     src/core/surface/init.c \
+    src/core/surface/lame_client.c \
+    src/core/surface/secure_channel_create.c \
+    src/core/surface/secure_server_create.c \
     src/core/surface/server.c \
     src/core/surface/server_chttp2.c \
     src/core/surface/server_create.c \
@@ -665,33 +682,14 @@
     src/core/transport/chttp2/stream_encoder.c \
     src/core/transport/chttp2/stream_map.c \
     src/core/transport/chttp2/timeout_encoding.c \
-    src/core/transport/chttp2/varint.c \
     src/core/transport/chttp2_transport.c \
+    src/core/transport/chttp2/varint.c \
     src/core/transport/metadata.c \
     src/core/transport/stream_op.c \
     src/core/transport/transport.c \
-    src/core/statistics/census_init.c \
-    src/core/statistics/census_rpc_stats.c \
-    src/core/statistics/census_tracing.c \
-    src/core/statistics/log.c \
-    src/core/statistics/window_stats.c \
-    src/core/statistics/hash_table.c \
-    src/core/httpcli/format_request.c \
-    src/core/httpcli/httpcli.c \
-    src/core/httpcli/httpcli_security_context.c \
-    src/core/httpcli/parser.c \
-    src/core/security/auth.c \
-    src/core/security/credentials.c \
-    src/core/security/google_root_certs.c \
-    src/core/security/secure_transport_setup.c \
-    src/core/security/security_context.c \
-    src/core/security/server_secure_chttp2.c \
-    src/core/surface/secure_channel_create.c \
-    src/core/surface/secure_server_create.c \
-    src/core/endpoint/secure_endpoint.c \
-    src/core/tsi/transport_security.c \
     src/core/tsi/fake_transport_security.c \
     src/core/tsi/ssl_transport_security.c \
+    src/core/tsi/transport_security.c \
     third_party/cJSON/cJSON.c \
 
 PUBLIC_HEADERS_C += \
@@ -1546,25 +1544,40 @@
     src/core/compression/algorithm.c \
     src/core/compression/message_compress.c \
     src/core/endpoint/endpoint.c \
-    src/core/endpoint/resolve_address.c \
-    src/core/endpoint/socket_utils.c \
-    src/core/endpoint/socket_utils_linux.c \
-    src/core/endpoint/socket_utils_posix.c \
-    src/core/endpoint/tcp.c \
-    src/core/endpoint/tcp_client.c \
-    src/core/endpoint/tcp_server.c \
-    src/core/eventmanager/em.c \
-    src/core/eventmanager/em_posix.c \
+    src/core/endpoint/secure_endpoint.c \
+    src/core/httpcli/format_request.c \
+    src/core/httpcli/httpcli.c \
+    src/core/httpcli/httpcli_security_context.c \
+    src/core/httpcli/parser.c \
+    src/core/iomgr/endpoint_pair_posix.c \
+    src/core/iomgr/iomgr_libevent.c \
+    src/core/iomgr/iomgr_libevent_use_threads.c \
+    src/core/iomgr/resolve_address_posix.c \
+    src/core/iomgr/sockaddr_utils.c \
+    src/core/iomgr/socket_utils_common_posix.c \
+    src/core/iomgr/socket_utils_linux.c \
+    src/core/iomgr/socket_utils_posix.c \
+    src/core/iomgr/tcp_client_posix.c \
+    src/core/iomgr/tcp_posix.c \
+    src/core/iomgr/tcp_server_posix.c \
+    src/core/statistics/census_init.c \
+    src/core/statistics/census_rpc_stats.c \
+    src/core/statistics/census_tracing.c \
+    src/core/statistics/hash_table.c \
+    src/core/statistics/log.c \
+    src/core/statistics/window_stats.c \
     src/core/surface/byte_buffer.c \
     src/core/surface/byte_buffer_reader.c \
     src/core/surface/call.c \
     src/core/surface/channel.c \
     src/core/surface/channel_create.c \
     src/core/surface/client.c \
-    src/core/surface/lame_client.c \
     src/core/surface/completion_queue.c \
     src/core/surface/event_string.c \
     src/core/surface/init.c \
+    src/core/surface/lame_client.c \
+    src/core/surface/secure_channel_create.c \
+    src/core/surface/secure_server_create.c \
     src/core/surface/server.c \
     src/core/surface/server_chttp2.c \
     src/core/surface/server_create.c \
@@ -1584,24 +1597,11 @@
     src/core/transport/chttp2/stream_encoder.c \
     src/core/transport/chttp2/stream_map.c \
     src/core/transport/chttp2/timeout_encoding.c \
-    src/core/transport/chttp2/varint.c \
     src/core/transport/chttp2_transport.c \
+    src/core/transport/chttp2/varint.c \
     src/core/transport/metadata.c \
     src/core/transport/stream_op.c \
     src/core/transport/transport.c \
-    src/core/statistics/census_init.c \
-    src/core/statistics/census_rpc_stats.c \
-    src/core/statistics/census_tracing.c \
-    src/core/statistics/log.c \
-    src/core/statistics/window_stats.c \
-    src/core/statistics/hash_table.c \
-    src/core/httpcli/format_request.c \
-    src/core/httpcli/httpcli.c \
-    src/core/httpcli/httpcli_security_context.c \
-    src/core/httpcli/parser.c \
-    src/core/surface/secure_channel_create.c \
-    src/core/surface/secure_server_create.c \
-    src/core/endpoint/secure_endpoint.c \
     third_party/cJSON/cJSON.c \
 
 PUBLIC_HEADERS_C += \
@@ -1979,54 +1979,6 @@
 	$(Q) $(RM) bins/murmur_hash_test
 
 
-GRPC_EM_TEST_SRC = \
-    test/core/eventmanager/em_test.c \
-
-GRPC_EM_TEST_OBJS = $(addprefix objs/, $(addsuffix .o, $(basename $(GRPC_EM_TEST_SRC))))
-GRPC_EM_TEST_DEPS = $(addprefix deps/, $(addsuffix .dep, $(basename $(GRPC_EM_TEST_SRC))))
-
-bins/grpc_em_test: $(GRPC_EM_TEST_OBJS) libs/libgrpc_test_util.a libs/libgrpc.a libs/libgpr.a
-	$(E) "[LD]      Linking $@"
-	$(Q) mkdir -p `dirname $@`
-	$(Q) $(LD) $(LDFLAGS) $(GRPC_EM_TEST_OBJS) -Llibs -lgrpc_test_util -lgrpc -lgpr $(LDLIBS) $(LDLIBS_SECURE) -o bins/grpc_em_test
-
-deps_grpc_em_test: $(GRPC_EM_TEST_DEPS)
-
-ifneq ($(MAKECMDGOALS),clean)
--include $(GRPC_EM_TEST_DEPS)
-endif
-
-clean_grpc_em_test:
-	$(E) "[CLEAN]   Cleaning grpc_em_test files"
-	$(Q) $(RM) $(GRPC_EM_TEST_OBJS)
-	$(Q) $(RM) $(GRPC_EM_TEST_DEPS)
-	$(Q) $(RM) bins/grpc_em_test
-
-
-GRPC_EM_PIPE_TEST_SRC = \
-    test/core/eventmanager/em_pipe_test.c \
-
-GRPC_EM_PIPE_TEST_OBJS = $(addprefix objs/, $(addsuffix .o, $(basename $(GRPC_EM_PIPE_TEST_SRC))))
-GRPC_EM_PIPE_TEST_DEPS = $(addprefix deps/, $(addsuffix .dep, $(basename $(GRPC_EM_PIPE_TEST_SRC))))
-
-bins/grpc_em_pipe_test: $(GRPC_EM_PIPE_TEST_OBJS) libs/libgrpc_test_util.a libs/libgrpc.a libs/libgpr.a
-	$(E) "[LD]      Linking $@"
-	$(Q) mkdir -p `dirname $@`
-	$(Q) $(LD) $(LDFLAGS) $(GRPC_EM_PIPE_TEST_OBJS) -Llibs -lgrpc_test_util -lgrpc -lgpr $(LDLIBS) $(LDLIBS_SECURE) -o bins/grpc_em_pipe_test
-
-deps_grpc_em_pipe_test: $(GRPC_EM_PIPE_TEST_DEPS)
-
-ifneq ($(MAKECMDGOALS),clean)
--include $(GRPC_EM_PIPE_TEST_DEPS)
-endif
-
-clean_grpc_em_pipe_test:
-	$(E) "[CLEAN]   Cleaning grpc_em_pipe_test files"
-	$(Q) $(RM) $(GRPC_EM_PIPE_TEST_OBJS)
-	$(Q) $(RM) $(GRPC_EM_PIPE_TEST_DEPS)
-	$(Q) $(RM) bins/grpc_em_pipe_test
-
-
 GRPC_STREAM_OP_TEST_SRC = \
     test/core/transport/stream_op_test.c \
 
@@ -2243,28 +2195,28 @@
 	$(Q) $(RM) bins/chttp2_transport_end2end_test
 
 
-GRPC_TCP_TEST_SRC = \
-    test/core/endpoint/tcp_test.c \
+TCP_POSIX_TEST_SRC = \
+    test/core/iomgr/tcp_posix_test.c \
 
-GRPC_TCP_TEST_OBJS = $(addprefix objs/, $(addsuffix .o, $(basename $(GRPC_TCP_TEST_SRC))))
-GRPC_TCP_TEST_DEPS = $(addprefix deps/, $(addsuffix .dep, $(basename $(GRPC_TCP_TEST_SRC))))
+TCP_POSIX_TEST_OBJS = $(addprefix objs/, $(addsuffix .o, $(basename $(TCP_POSIX_TEST_SRC))))
+TCP_POSIX_TEST_DEPS = $(addprefix deps/, $(addsuffix .dep, $(basename $(TCP_POSIX_TEST_SRC))))
 
-bins/grpc_tcp_test: $(GRPC_TCP_TEST_OBJS) libs/libgrpc_test_util.a libs/libgrpc.a libs/libgpr.a
+bins/tcp_posix_test: $(TCP_POSIX_TEST_OBJS) libs/libgrpc_test_util.a libs/libgrpc.a libs/libgpr.a
 	$(E) "[LD]      Linking $@"
 	$(Q) mkdir -p `dirname $@`
-	$(Q) $(LD) $(LDFLAGS) $(GRPC_TCP_TEST_OBJS) -Llibs -lgrpc_test_util -lgrpc -lgpr $(LDLIBS) $(LDLIBS_SECURE) -o bins/grpc_tcp_test
+	$(Q) $(LD) $(LDFLAGS) $(TCP_POSIX_TEST_OBJS) -Llibs -lgrpc_test_util -lgrpc -lgpr $(LDLIBS) $(LDLIBS_SECURE) -o bins/tcp_posix_test
 
-deps_grpc_tcp_test: $(GRPC_TCP_TEST_DEPS)
+deps_tcp_posix_test: $(TCP_POSIX_TEST_DEPS)
 
 ifneq ($(MAKECMDGOALS),clean)
--include $(GRPC_TCP_TEST_DEPS)
+-include $(TCP_POSIX_TEST_DEPS)
 endif
 
-clean_grpc_tcp_test:
-	$(E) "[CLEAN]   Cleaning grpc_tcp_test files"
-	$(Q) $(RM) $(GRPC_TCP_TEST_OBJS)
-	$(Q) $(RM) $(GRPC_TCP_TEST_DEPS)
-	$(Q) $(RM) bins/grpc_tcp_test
+clean_tcp_posix_test:
+	$(E) "[CLEAN]   Cleaning tcp_posix_test files"
+	$(Q) $(RM) $(TCP_POSIX_TEST_OBJS)
+	$(Q) $(RM) $(TCP_POSIX_TEST_DEPS)
+	$(Q) $(RM) bins/tcp_posix_test
 
 
 DUALSTACK_SOCKET_TEST_SRC = \
@@ -2316,7 +2268,7 @@
 
 
 RESOLVE_ADDRESS_TEST_SRC = \
-    test/core/endpoint/resolve_address_test.c \
+    test/core/iomgr/resolve_address_test.c \
 
 RESOLVE_ADDRESS_TEST_OBJS = $(addprefix objs/, $(addsuffix .o, $(basename $(RESOLVE_ADDRESS_TEST_SRC))))
 RESOLVE_ADDRESS_TEST_DEPS = $(addprefix deps/, $(addsuffix .dep, $(basename $(RESOLVE_ADDRESS_TEST_SRC))))
@@ -2339,76 +2291,76 @@
 	$(Q) $(RM) bins/resolve_address_test
 
 
-SOCKET_UTILS_TEST_SRC = \
-    test/core/endpoint/socket_utils_test.c \
+SOCKADDR_UTILS_TEST_SRC = \
+    test/core/iomgr/sockaddr_utils_test.c \
 
-SOCKET_UTILS_TEST_OBJS = $(addprefix objs/, $(addsuffix .o, $(basename $(SOCKET_UTILS_TEST_SRC))))
-SOCKET_UTILS_TEST_DEPS = $(addprefix deps/, $(addsuffix .dep, $(basename $(SOCKET_UTILS_TEST_SRC))))
+SOCKADDR_UTILS_TEST_OBJS = $(addprefix objs/, $(addsuffix .o, $(basename $(SOCKADDR_UTILS_TEST_SRC))))
+SOCKADDR_UTILS_TEST_DEPS = $(addprefix deps/, $(addsuffix .dep, $(basename $(SOCKADDR_UTILS_TEST_SRC))))
 
-bins/socket_utils_test: $(SOCKET_UTILS_TEST_OBJS) libs/libgrpc_test_util.a libs/libgrpc.a libs/libgpr.a
+bins/sockaddr_utils_test: $(SOCKADDR_UTILS_TEST_OBJS) libs/libgrpc_test_util.a libs/libgrpc.a libs/libgpr.a
 	$(E) "[LD]      Linking $@"
 	$(Q) mkdir -p `dirname $@`
-	$(Q) $(LD) $(LDFLAGS) $(SOCKET_UTILS_TEST_OBJS) -Llibs -lgrpc_test_util -lgrpc -lgpr $(LDLIBS) $(LDLIBS_SECURE) -o bins/socket_utils_test
+	$(Q) $(LD) $(LDFLAGS) $(SOCKADDR_UTILS_TEST_OBJS) -Llibs -lgrpc_test_util -lgrpc -lgpr $(LDLIBS) $(LDLIBS_SECURE) -o bins/sockaddr_utils_test
 
-deps_socket_utils_test: $(SOCKET_UTILS_TEST_DEPS)
+deps_sockaddr_utils_test: $(SOCKADDR_UTILS_TEST_DEPS)
 
 ifneq ($(MAKECMDGOALS),clean)
--include $(SOCKET_UTILS_TEST_DEPS)
+-include $(SOCKADDR_UTILS_TEST_DEPS)
 endif
 
-clean_socket_utils_test:
-	$(E) "[CLEAN]   Cleaning socket_utils_test files"
-	$(Q) $(RM) $(SOCKET_UTILS_TEST_OBJS)
-	$(Q) $(RM) $(SOCKET_UTILS_TEST_DEPS)
-	$(Q) $(RM) bins/socket_utils_test
+clean_sockaddr_utils_test:
+	$(E) "[CLEAN]   Cleaning sockaddr_utils_test files"
+	$(Q) $(RM) $(SOCKADDR_UTILS_TEST_OBJS)
+	$(Q) $(RM) $(SOCKADDR_UTILS_TEST_DEPS)
+	$(Q) $(RM) bins/sockaddr_utils_test
 
 
-TCP_SERVER_TEST_SRC = \
-    test/core/endpoint/tcp_server_test.c \
+TCP_SERVER_POSIX_TEST_SRC = \
+    test/core/iomgr/tcp_server_posix_test.c \
 
-TCP_SERVER_TEST_OBJS = $(addprefix objs/, $(addsuffix .o, $(basename $(TCP_SERVER_TEST_SRC))))
-TCP_SERVER_TEST_DEPS = $(addprefix deps/, $(addsuffix .dep, $(basename $(TCP_SERVER_TEST_SRC))))
+TCP_SERVER_POSIX_TEST_OBJS = $(addprefix objs/, $(addsuffix .o, $(basename $(TCP_SERVER_POSIX_TEST_SRC))))
+TCP_SERVER_POSIX_TEST_DEPS = $(addprefix deps/, $(addsuffix .dep, $(basename $(TCP_SERVER_POSIX_TEST_SRC))))
 
-bins/tcp_server_test: $(TCP_SERVER_TEST_OBJS) libs/libgrpc_test_util.a libs/libgrpc.a libs/libgpr.a
+bins/tcp_server_posix_test: $(TCP_SERVER_POSIX_TEST_OBJS) libs/libgrpc_test_util.a libs/libgrpc.a libs/libgpr.a
 	$(E) "[LD]      Linking $@"
 	$(Q) mkdir -p `dirname $@`
-	$(Q) $(LD) $(LDFLAGS) $(TCP_SERVER_TEST_OBJS) -Llibs -lgrpc_test_util -lgrpc -lgpr $(LDLIBS) $(LDLIBS_SECURE) -o bins/tcp_server_test
+	$(Q) $(LD) $(LDFLAGS) $(TCP_SERVER_POSIX_TEST_OBJS) -Llibs -lgrpc_test_util -lgrpc -lgpr $(LDLIBS) $(LDLIBS_SECURE) -o bins/tcp_server_posix_test
 
-deps_tcp_server_test: $(TCP_SERVER_TEST_DEPS)
+deps_tcp_server_posix_test: $(TCP_SERVER_POSIX_TEST_DEPS)
 
 ifneq ($(MAKECMDGOALS),clean)
--include $(TCP_SERVER_TEST_DEPS)
+-include $(TCP_SERVER_POSIX_TEST_DEPS)
 endif
 
-clean_tcp_server_test:
-	$(E) "[CLEAN]   Cleaning tcp_server_test files"
-	$(Q) $(RM) $(TCP_SERVER_TEST_OBJS)
-	$(Q) $(RM) $(TCP_SERVER_TEST_DEPS)
-	$(Q) $(RM) bins/tcp_server_test
+clean_tcp_server_posix_test:
+	$(E) "[CLEAN]   Cleaning tcp_server_posix_test files"
+	$(Q) $(RM) $(TCP_SERVER_POSIX_TEST_OBJS)
+	$(Q) $(RM) $(TCP_SERVER_POSIX_TEST_DEPS)
+	$(Q) $(RM) bins/tcp_server_posix_test
 
 
-TCP_CLIENT_TEST_SRC = \
-    test/core/endpoint/tcp_client_test.c \
+TCP_CLIENT_POSIX_TEST_SRC = \
+    test/core/iomgr/tcp_client_posix_test.c \
 
-TCP_CLIENT_TEST_OBJS = $(addprefix objs/, $(addsuffix .o, $(basename $(TCP_CLIENT_TEST_SRC))))
-TCP_CLIENT_TEST_DEPS = $(addprefix deps/, $(addsuffix .dep, $(basename $(TCP_CLIENT_TEST_SRC))))
+TCP_CLIENT_POSIX_TEST_OBJS = $(addprefix objs/, $(addsuffix .o, $(basename $(TCP_CLIENT_POSIX_TEST_SRC))))
+TCP_CLIENT_POSIX_TEST_DEPS = $(addprefix deps/, $(addsuffix .dep, $(basename $(TCP_CLIENT_POSIX_TEST_SRC))))
 
-bins/tcp_client_test: $(TCP_CLIENT_TEST_OBJS) libs/libgrpc_test_util.a libs/libgrpc.a libs/libgpr.a
+bins/tcp_client_posix_test: $(TCP_CLIENT_POSIX_TEST_OBJS) libs/libgrpc_test_util.a libs/libgrpc.a libs/libgpr.a
 	$(E) "[LD]      Linking $@"
 	$(Q) mkdir -p `dirname $@`
-	$(Q) $(LD) $(LDFLAGS) $(TCP_CLIENT_TEST_OBJS) -Llibs -lgrpc_test_util -lgrpc -lgpr $(LDLIBS) $(LDLIBS_SECURE) -o bins/tcp_client_test
+	$(Q) $(LD) $(LDFLAGS) $(TCP_CLIENT_POSIX_TEST_OBJS) -Llibs -lgrpc_test_util -lgrpc -lgpr $(LDLIBS) $(LDLIBS_SECURE) -o bins/tcp_client_posix_test
 
-deps_tcp_client_test: $(TCP_CLIENT_TEST_DEPS)
+deps_tcp_client_posix_test: $(TCP_CLIENT_POSIX_TEST_DEPS)
 
 ifneq ($(MAKECMDGOALS),clean)
--include $(TCP_CLIENT_TEST_DEPS)
+-include $(TCP_CLIENT_POSIX_TEST_DEPS)
 endif
 
-clean_tcp_client_test:
-	$(E) "[CLEAN]   Cleaning tcp_client_test files"
-	$(Q) $(RM) $(TCP_CLIENT_TEST_OBJS)
-	$(Q) $(RM) $(TCP_CLIENT_TEST_DEPS)
-	$(Q) $(RM) bins/tcp_client_test
+clean_tcp_client_posix_test:
+	$(E) "[CLEAN]   Cleaning tcp_client_posix_test files"
+	$(Q) $(RM) $(TCP_CLIENT_POSIX_TEST_OBJS)
+	$(Q) $(RM) $(TCP_CLIENT_POSIX_TEST_DEPS)
+	$(Q) $(RM) bins/tcp_client_posix_test
 
 
 GRPC_CHANNEL_STACK_TEST_SRC = \
@@ -5457,4 +5409,4 @@
 
 
 
-.PHONY: all strip tools buildtests buildtests_c buildtests_cxx test test_c test_cxx install install_c install_cxx install-headers install-headers_c install-headers_cxx install-shared install-shared_c install-shared_cxx install-static install-static_c install-static_cxx strip strip-shared strip-static strip_c strip-shared_c strip-static_c strip_cxx strip-shared_cxx strip-static_cxx clean deps_libgpr clean_libgpr deps_libgrpc clean_libgrpc deps_libgrpc_test_util clean_libgrpc_test_util deps_libgrpc++ clean_libgrpc++ deps_libgrpc++_test_util clean_libgrpc++_test_util deps_libend2end_fixture_chttp2_fake_security clean_libend2end_fixture_chttp2_fake_security deps_libend2end_fixture_chttp2_fullstack clean_libend2end_fixture_chttp2_fullstack deps_libend2end_fixture_chttp2_simple_ssl_fullstack clean_libend2end_fixture_chttp2_simple_ssl_fullstack deps_libend2end_fixture_chttp2_simple_ssl_with_oauth2_fullstack clean_libend2end_fixture_chttp2_simple_ssl_with_oauth2_fullstack deps_libend2end_fixture_chttp2_socket_pair clean_libend2end_fixture_chttp2_socket_pair deps_libend2end_fixture_chttp2_socket_pair_one_byte_at_a_time clean_libend2end_fixture_chttp2_socket_pair_one_byte_at_a_time deps_libend2end_test_cancel_after_accept clean_libend2end_test_cancel_after_accept deps_libend2end_test_cancel_after_accept_and_writes_closed clean_libend2end_test_cancel_after_accept_and_writes_closed deps_libend2end_test_cancel_after_invoke clean_libend2end_test_cancel_after_invoke deps_libend2end_test_cancel_before_invoke clean_libend2end_test_cancel_before_invoke deps_libend2end_test_cancel_in_a_vacuum clean_libend2end_test_cancel_in_a_vacuum deps_libend2end_test_early_server_shutdown_finishes_inflight_calls clean_libend2end_test_early_server_shutdown_finishes_inflight_calls deps_libend2end_test_early_server_shutdown_finishes_tags clean_libend2end_test_early_server_shutdown_finishes_tags deps_libend2end_test_invoke_large_request clean_libend2end_test_invoke_large_request deps_libend2end_test_max_concurrent_streams clean_libend2end_test_max_concurrent_streams deps_libend2end_test_no_op clean_libend2end_test_no_op deps_libend2end_test_ping_pong_streaming clean_libend2end_test_ping_pong_streaming deps_libend2end_test_request_response_with_metadata_and_payload clean_libend2end_test_request_response_with_metadata_and_payload deps_libend2end_test_request_response_with_payload clean_libend2end_test_request_response_with_payload deps_libend2end_test_simple_delayed_request clean_libend2end_test_simple_delayed_request deps_libend2end_test_simple_request clean_libend2end_test_simple_request deps_libend2end_test_thread_stress clean_libend2end_test_thread_stress deps_libend2end_test_writes_done_hangs_with_pending_read clean_libend2end_test_writes_done_hangs_with_pending_read deps_libend2end_certs clean_libend2end_certs deps_libgrpc_unsecure clean_libgrpc_unsecure deps_gen_hpack_tables clean_gen_hpack_tables deps_grpc_byte_buffer_reader_test clean_grpc_byte_buffer_reader_test deps_gpr_cancellable_test clean_gpr_cancellable_test deps_gpr_log_test clean_gpr_log_test deps_gpr_cmdline_test clean_gpr_cmdline_test deps_gpr_histogram_test clean_gpr_histogram_test deps_gpr_host_port_test clean_gpr_host_port_test deps_gpr_slice_buffer_test clean_gpr_slice_buffer_test deps_gpr_slice_test clean_gpr_slice_test deps_gpr_string_test clean_gpr_string_test deps_gpr_sync_test clean_gpr_sync_test deps_gpr_thd_test clean_gpr_thd_test deps_gpr_time_test clean_gpr_time_test deps_murmur_hash_test clean_murmur_hash_test deps_grpc_em_test clean_grpc_em_test deps_grpc_em_pipe_test clean_grpc_em_pipe_test deps_grpc_stream_op_test clean_grpc_stream_op_test deps_alpn_test clean_alpn_test deps_chttp2_stream_encoder_test clean_chttp2_stream_encoder_test deps_hpack_table_test clean_hpack_table_test deps_chttp2_stream_map_test clean_chttp2_stream_map_test deps_hpack_parser_test clean_hpack_parser_test deps_transport_metadata_test clean_transport_metadata_test deps_chttp2_status_conversion_test clean_chttp2_status_conversion_test deps_chttp2_transport_end2end_test clean_chttp2_transport_end2end_test deps_grpc_tcp_test clean_grpc_tcp_test deps_dualstack_socket_test clean_dualstack_socket_test deps_no_server_test clean_no_server_test deps_resolve_address_test clean_resolve_address_test deps_socket_utils_test clean_socket_utils_test deps_tcp_server_test clean_tcp_server_test deps_tcp_client_test clean_tcp_client_test deps_grpc_channel_stack_test clean_grpc_channel_stack_test deps_metadata_buffer_test clean_metadata_buffer_test deps_grpc_completion_queue_test clean_grpc_completion_queue_test deps_grpc_completion_queue_benchmark clean_grpc_completion_queue_benchmark deps_census_window_stats_test clean_census_window_stats_test deps_census_statistics_quick_test clean_census_statistics_quick_test deps_census_statistics_performance_test clean_census_statistics_performance_test deps_census_statistics_multiple_writers_test clean_census_statistics_multiple_writers_test deps_census_statistics_multiple_writers_circular_buffer_test clean_census_statistics_multiple_writers_circular_buffer_test deps_census_stub_test clean_census_stub_test deps_census_hash_table_test clean_census_hash_table_test deps_fling_server clean_fling_server deps_fling_client clean_fling_client deps_fling_test clean_fling_test deps_echo_server clean_echo_server deps_echo_client clean_echo_client deps_echo_test clean_echo_test deps_low_level_ping_pong_benchmark clean_low_level_ping_pong_benchmark deps_message_compress_test clean_message_compress_test deps_bin_encoder_test clean_bin_encoder_test deps_secure_endpoint_test clean_secure_endpoint_test deps_httpcli_format_request_test clean_httpcli_format_request_test deps_httpcli_parser_test clean_httpcli_parser_test deps_httpcli_test clean_httpcli_test deps_grpc_credentials_test clean_grpc_credentials_test deps_fling_stream_test clean_fling_stream_test deps_lame_client_test clean_lame_client_test deps_thread_pool_test clean_thread_pool_test deps_status_test clean_status_test deps_chttp2_fake_security_cancel_after_accept_test clean_chttp2_fake_security_cancel_after_accept_test deps_chttp2_fake_security_cancel_after_accept_and_writes_closed_test clean_chttp2_fake_security_cancel_after_accept_and_writes_closed_test deps_chttp2_fake_security_cancel_after_invoke_test clean_chttp2_fake_security_cancel_after_invoke_test deps_chttp2_fake_security_cancel_before_invoke_test clean_chttp2_fake_security_cancel_before_invoke_test deps_chttp2_fake_security_cancel_in_a_vacuum_test clean_chttp2_fake_security_cancel_in_a_vacuum_test deps_chttp2_fake_security_early_server_shutdown_finishes_inflight_calls_test clean_chttp2_fake_security_early_server_shutdown_finishes_inflight_calls_test deps_chttp2_fake_security_early_server_shutdown_finishes_tags_test clean_chttp2_fake_security_early_server_shutdown_finishes_tags_test deps_chttp2_fake_security_invoke_large_request_test clean_chttp2_fake_security_invoke_large_request_test deps_chttp2_fake_security_max_concurrent_streams_test clean_chttp2_fake_security_max_concurrent_streams_test deps_chttp2_fake_security_no_op_test clean_chttp2_fake_security_no_op_test deps_chttp2_fake_security_ping_pong_streaming_test clean_chttp2_fake_security_ping_pong_streaming_test deps_chttp2_fake_security_request_response_with_metadata_and_payload_test clean_chttp2_fake_security_request_response_with_metadata_and_payload_test deps_chttp2_fake_security_request_response_with_payload_test clean_chttp2_fake_security_request_response_with_payload_test deps_chttp2_fake_security_simple_delayed_request_test clean_chttp2_fake_security_simple_delayed_request_test deps_chttp2_fake_security_simple_request_test clean_chttp2_fake_security_simple_request_test deps_chttp2_fake_security_thread_stress_test clean_chttp2_fake_security_thread_stress_test deps_chttp2_fake_security_writes_done_hangs_with_pending_read_test clean_chttp2_fake_security_writes_done_hangs_with_pending_read_test deps_chttp2_fullstack_cancel_after_accept_test clean_chttp2_fullstack_cancel_after_accept_test deps_chttp2_fullstack_cancel_after_accept_and_writes_closed_test clean_chttp2_fullstack_cancel_after_accept_and_writes_closed_test deps_chttp2_fullstack_cancel_after_invoke_test clean_chttp2_fullstack_cancel_after_invoke_test deps_chttp2_fullstack_cancel_before_invoke_test clean_chttp2_fullstack_cancel_before_invoke_test deps_chttp2_fullstack_cancel_in_a_vacuum_test clean_chttp2_fullstack_cancel_in_a_vacuum_test deps_chttp2_fullstack_early_server_shutdown_finishes_inflight_calls_test clean_chttp2_fullstack_early_server_shutdown_finishes_inflight_calls_test deps_chttp2_fullstack_early_server_shutdown_finishes_tags_test clean_chttp2_fullstack_early_server_shutdown_finishes_tags_test deps_chttp2_fullstack_invoke_large_request_test clean_chttp2_fullstack_invoke_large_request_test deps_chttp2_fullstack_max_concurrent_streams_test clean_chttp2_fullstack_max_concurrent_streams_test deps_chttp2_fullstack_no_op_test clean_chttp2_fullstack_no_op_test deps_chttp2_fullstack_ping_pong_streaming_test clean_chttp2_fullstack_ping_pong_streaming_test deps_chttp2_fullstack_request_response_with_metadata_and_payload_test clean_chttp2_fullstack_request_response_with_metadata_and_payload_test deps_chttp2_fullstack_request_response_with_payload_test clean_chttp2_fullstack_request_response_with_payload_test deps_chttp2_fullstack_simple_delayed_request_test clean_chttp2_fullstack_simple_delayed_request_test deps_chttp2_fullstack_simple_request_test clean_chttp2_fullstack_simple_request_test deps_chttp2_fullstack_thread_stress_test clean_chttp2_fullstack_thread_stress_test deps_chttp2_fullstack_writes_done_hangs_with_pending_read_test clean_chttp2_fullstack_writes_done_hangs_with_pending_read_test deps_chttp2_simple_ssl_fullstack_cancel_after_accept_test clean_chttp2_simple_ssl_fullstack_cancel_after_accept_test deps_chttp2_simple_ssl_fullstack_cancel_after_accept_and_writes_closed_test clean_chttp2_simple_ssl_fullstack_cancel_after_accept_and_writes_closed_test deps_chttp2_simple_ssl_fullstack_cancel_after_invoke_test clean_chttp2_simple_ssl_fullstack_cancel_after_invoke_test deps_chttp2_simple_ssl_fullstack_cancel_before_invoke_test clean_chttp2_simple_ssl_fullstack_cancel_before_invoke_test deps_chttp2_simple_ssl_fullstack_cancel_in_a_vacuum_test clean_chttp2_simple_ssl_fullstack_cancel_in_a_vacuum_test deps_chttp2_simple_ssl_fullstack_early_server_shutdown_finishes_inflight_calls_test clean_chttp2_simple_ssl_fullstack_early_server_shutdown_finishes_inflight_calls_test deps_chttp2_simple_ssl_fullstack_early_server_shutdown_finishes_tags_test clean_chttp2_simple_ssl_fullstack_early_server_shutdown_finishes_tags_test deps_chttp2_simple_ssl_fullstack_invoke_large_request_test clean_chttp2_simple_ssl_fullstack_invoke_large_request_test deps_chttp2_simple_ssl_fullstack_max_concurrent_streams_test clean_chttp2_simple_ssl_fullstack_max_concurrent_streams_test deps_chttp2_simple_ssl_fullstack_no_op_test clean_chttp2_simple_ssl_fullstack_no_op_test deps_chttp2_simple_ssl_fullstack_ping_pong_streaming_test clean_chttp2_simple_ssl_fullstack_ping_pong_streaming_test deps_chttp2_simple_ssl_fullstack_request_response_with_metadata_and_payload_test clean_chttp2_simple_ssl_fullstack_request_response_with_metadata_and_payload_test deps_chttp2_simple_ssl_fullstack_request_response_with_payload_test clean_chttp2_simple_ssl_fullstack_request_response_with_payload_test deps_chttp2_simple_ssl_fullstack_simple_delayed_request_test clean_chttp2_simple_ssl_fullstack_simple_delayed_request_test deps_chttp2_simple_ssl_fullstack_simple_request_test clean_chttp2_simple_ssl_fullstack_simple_request_test deps_chttp2_simple_ssl_fullstack_thread_stress_test clean_chttp2_simple_ssl_fullstack_thread_stress_test deps_chttp2_simple_ssl_fullstack_writes_done_hangs_with_pending_read_test clean_chttp2_simple_ssl_fullstack_writes_done_hangs_with_pending_read_test deps_chttp2_simple_ssl_with_oauth2_fullstack_cancel_after_accept_test clean_chttp2_simple_ssl_with_oauth2_fullstack_cancel_after_accept_test deps_chttp2_simple_ssl_with_oauth2_fullstack_cancel_after_accept_and_writes_closed_test clean_chttp2_simple_ssl_with_oauth2_fullstack_cancel_after_accept_and_writes_closed_test deps_chttp2_simple_ssl_with_oauth2_fullstack_cancel_after_invoke_test clean_chttp2_simple_ssl_with_oauth2_fullstack_cancel_after_invoke_test deps_chttp2_simple_ssl_with_oauth2_fullstack_cancel_before_invoke_test clean_chttp2_simple_ssl_with_oauth2_fullstack_cancel_before_invoke_test deps_chttp2_simple_ssl_with_oauth2_fullstack_cancel_in_a_vacuum_test clean_chttp2_simple_ssl_with_oauth2_fullstack_cancel_in_a_vacuum_test deps_chttp2_simple_ssl_with_oauth2_fullstack_early_server_shutdown_finishes_inflight_calls_test clean_chttp2_simple_ssl_with_oauth2_fullstack_early_server_shutdown_finishes_inflight_calls_test deps_chttp2_simple_ssl_with_oauth2_fullstack_early_server_shutdown_finishes_tags_test clean_chttp2_simple_ssl_with_oauth2_fullstack_early_server_shutdown_finishes_tags_test deps_chttp2_simple_ssl_with_oauth2_fullstack_invoke_large_request_test clean_chttp2_simple_ssl_with_oauth2_fullstack_invoke_large_request_test deps_chttp2_simple_ssl_with_oauth2_fullstack_max_concurrent_streams_test clean_chttp2_simple_ssl_with_oauth2_fullstack_max_concurrent_streams_test deps_chttp2_simple_ssl_with_oauth2_fullstack_no_op_test clean_chttp2_simple_ssl_with_oauth2_fullstack_no_op_test deps_chttp2_simple_ssl_with_oauth2_fullstack_ping_pong_streaming_test clean_chttp2_simple_ssl_with_oauth2_fullstack_ping_pong_streaming_test deps_chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_metadata_and_payload_test clean_chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_metadata_and_payload_test deps_chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_payload_test clean_chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_payload_test deps_chttp2_simple_ssl_with_oauth2_fullstack_simple_delayed_request_test clean_chttp2_simple_ssl_with_oauth2_fullstack_simple_delayed_request_test deps_chttp2_simple_ssl_with_oauth2_fullstack_simple_request_test clean_chttp2_simple_ssl_with_oauth2_fullstack_simple_request_test deps_chttp2_simple_ssl_with_oauth2_fullstack_thread_stress_test clean_chttp2_simple_ssl_with_oauth2_fullstack_thread_stress_test deps_chttp2_simple_ssl_with_oauth2_fullstack_writes_done_hangs_with_pending_read_test clean_chttp2_simple_ssl_with_oauth2_fullstack_writes_done_hangs_with_pending_read_test deps_chttp2_socket_pair_cancel_after_accept_test clean_chttp2_socket_pair_cancel_after_accept_test deps_chttp2_socket_pair_cancel_after_accept_and_writes_closed_test clean_chttp2_socket_pair_cancel_after_accept_and_writes_closed_test deps_chttp2_socket_pair_cancel_after_invoke_test clean_chttp2_socket_pair_cancel_after_invoke_test deps_chttp2_socket_pair_cancel_before_invoke_test clean_chttp2_socket_pair_cancel_before_invoke_test deps_chttp2_socket_pair_cancel_in_a_vacuum_test clean_chttp2_socket_pair_cancel_in_a_vacuum_test deps_chttp2_socket_pair_early_server_shutdown_finishes_inflight_calls_test clean_chttp2_socket_pair_early_server_shutdown_finishes_inflight_calls_test deps_chttp2_socket_pair_early_server_shutdown_finishes_tags_test clean_chttp2_socket_pair_early_server_shutdown_finishes_tags_test deps_chttp2_socket_pair_invoke_large_request_test clean_chttp2_socket_pair_invoke_large_request_test deps_chttp2_socket_pair_max_concurrent_streams_test clean_chttp2_socket_pair_max_concurrent_streams_test deps_chttp2_socket_pair_no_op_test clean_chttp2_socket_pair_no_op_test deps_chttp2_socket_pair_ping_pong_streaming_test clean_chttp2_socket_pair_ping_pong_streaming_test deps_chttp2_socket_pair_request_response_with_metadata_and_payload_test clean_chttp2_socket_pair_request_response_with_metadata_and_payload_test deps_chttp2_socket_pair_request_response_with_payload_test clean_chttp2_socket_pair_request_response_with_payload_test deps_chttp2_socket_pair_simple_delayed_request_test clean_chttp2_socket_pair_simple_delayed_request_test deps_chttp2_socket_pair_simple_request_test clean_chttp2_socket_pair_simple_request_test deps_chttp2_socket_pair_thread_stress_test clean_chttp2_socket_pair_thread_stress_test deps_chttp2_socket_pair_writes_done_hangs_with_pending_read_test clean_chttp2_socket_pair_writes_done_hangs_with_pending_read_test deps_chttp2_socket_pair_one_byte_at_a_time_cancel_after_accept_test clean_chttp2_socket_pair_one_byte_at_a_time_cancel_after_accept_test deps_chttp2_socket_pair_one_byte_at_a_time_cancel_after_accept_and_writes_closed_test clean_chttp2_socket_pair_one_byte_at_a_time_cancel_after_accept_and_writes_closed_test deps_chttp2_socket_pair_one_byte_at_a_time_cancel_after_invoke_test clean_chttp2_socket_pair_one_byte_at_a_time_cancel_after_invoke_test deps_chttp2_socket_pair_one_byte_at_a_time_cancel_before_invoke_test clean_chttp2_socket_pair_one_byte_at_a_time_cancel_before_invoke_test deps_chttp2_socket_pair_one_byte_at_a_time_cancel_in_a_vacuum_test clean_chttp2_socket_pair_one_byte_at_a_time_cancel_in_a_vacuum_test deps_chttp2_socket_pair_one_byte_at_a_time_early_server_shutdown_finishes_inflight_calls_test clean_chttp2_socket_pair_one_byte_at_a_time_early_server_shutdown_finishes_inflight_calls_test deps_chttp2_socket_pair_one_byte_at_a_time_early_server_shutdown_finishes_tags_test clean_chttp2_socket_pair_one_byte_at_a_time_early_server_shutdown_finishes_tags_test deps_chttp2_socket_pair_one_byte_at_a_time_invoke_large_request_test clean_chttp2_socket_pair_one_byte_at_a_time_invoke_large_request_test deps_chttp2_socket_pair_one_byte_at_a_time_max_concurrent_streams_test clean_chttp2_socket_pair_one_byte_at_a_time_max_concurrent_streams_test deps_chttp2_socket_pair_one_byte_at_a_time_no_op_test clean_chttp2_socket_pair_one_byte_at_a_time_no_op_test deps_chttp2_socket_pair_one_byte_at_a_time_ping_pong_streaming_test clean_chttp2_socket_pair_one_byte_at_a_time_ping_pong_streaming_test deps_chttp2_socket_pair_one_byte_at_a_time_request_response_with_metadata_and_payload_test clean_chttp2_socket_pair_one_byte_at_a_time_request_response_with_metadata_and_payload_test deps_chttp2_socket_pair_one_byte_at_a_time_request_response_with_payload_test clean_chttp2_socket_pair_one_byte_at_a_time_request_response_with_payload_test deps_chttp2_socket_pair_one_byte_at_a_time_simple_delayed_request_test clean_chttp2_socket_pair_one_byte_at_a_time_simple_delayed_request_test deps_chttp2_socket_pair_one_byte_at_a_time_simple_request_test clean_chttp2_socket_pair_one_byte_at_a_time_simple_request_test deps_chttp2_socket_pair_one_byte_at_a_time_thread_stress_test clean_chttp2_socket_pair_one_byte_at_a_time_thread_stress_test deps_chttp2_socket_pair_one_byte_at_a_time_writes_done_hangs_with_pending_read_test clean_chttp2_socket_pair_one_byte_at_a_time_writes_done_hangs_with_pending_read_test
+.PHONY: all strip tools buildtests buildtests_c buildtests_cxx test test_c test_cxx install install_c install_cxx install-headers install-headers_c install-headers_cxx install-shared install-shared_c install-shared_cxx install-static install-static_c install-static_cxx strip strip-shared strip-static strip_c strip-shared_c strip-static_c strip_cxx strip-shared_cxx strip-static_cxx clean deps_libgpr clean_libgpr deps_libgrpc clean_libgrpc deps_libgrpc_test_util clean_libgrpc_test_util deps_libgrpc++ clean_libgrpc++ deps_libgrpc++_test_util clean_libgrpc++_test_util deps_libend2end_fixture_chttp2_fake_security clean_libend2end_fixture_chttp2_fake_security deps_libend2end_fixture_chttp2_fullstack clean_libend2end_fixture_chttp2_fullstack deps_libend2end_fixture_chttp2_simple_ssl_fullstack clean_libend2end_fixture_chttp2_simple_ssl_fullstack deps_libend2end_fixture_chttp2_simple_ssl_with_oauth2_fullstack clean_libend2end_fixture_chttp2_simple_ssl_with_oauth2_fullstack deps_libend2end_fixture_chttp2_socket_pair clean_libend2end_fixture_chttp2_socket_pair deps_libend2end_fixture_chttp2_socket_pair_one_byte_at_a_time clean_libend2end_fixture_chttp2_socket_pair_one_byte_at_a_time deps_libend2end_test_cancel_after_accept clean_libend2end_test_cancel_after_accept deps_libend2end_test_cancel_after_accept_and_writes_closed clean_libend2end_test_cancel_after_accept_and_writes_closed deps_libend2end_test_cancel_after_invoke clean_libend2end_test_cancel_after_invoke deps_libend2end_test_cancel_before_invoke clean_libend2end_test_cancel_before_invoke deps_libend2end_test_cancel_in_a_vacuum clean_libend2end_test_cancel_in_a_vacuum deps_libend2end_test_early_server_shutdown_finishes_inflight_calls clean_libend2end_test_early_server_shutdown_finishes_inflight_calls deps_libend2end_test_early_server_shutdown_finishes_tags clean_libend2end_test_early_server_shutdown_finishes_tags deps_libend2end_test_invoke_large_request clean_libend2end_test_invoke_large_request deps_libend2end_test_max_concurrent_streams clean_libend2end_test_max_concurrent_streams deps_libend2end_test_no_op clean_libend2end_test_no_op deps_libend2end_test_ping_pong_streaming clean_libend2end_test_ping_pong_streaming deps_libend2end_test_request_response_with_metadata_and_payload clean_libend2end_test_request_response_with_metadata_and_payload deps_libend2end_test_request_response_with_payload clean_libend2end_test_request_response_with_payload deps_libend2end_test_simple_delayed_request clean_libend2end_test_simple_delayed_request deps_libend2end_test_simple_request clean_libend2end_test_simple_request deps_libend2end_test_thread_stress clean_libend2end_test_thread_stress deps_libend2end_test_writes_done_hangs_with_pending_read clean_libend2end_test_writes_done_hangs_with_pending_read deps_libend2end_certs clean_libend2end_certs deps_libgrpc_unsecure clean_libgrpc_unsecure deps_gen_hpack_tables clean_gen_hpack_tables deps_grpc_byte_buffer_reader_test clean_grpc_byte_buffer_reader_test deps_gpr_cancellable_test clean_gpr_cancellable_test deps_gpr_log_test clean_gpr_log_test deps_gpr_cmdline_test clean_gpr_cmdline_test deps_gpr_histogram_test clean_gpr_histogram_test deps_gpr_host_port_test clean_gpr_host_port_test deps_gpr_slice_buffer_test clean_gpr_slice_buffer_test deps_gpr_slice_test clean_gpr_slice_test deps_gpr_string_test clean_gpr_string_test deps_gpr_sync_test clean_gpr_sync_test deps_gpr_thd_test clean_gpr_thd_test deps_gpr_time_test clean_gpr_time_test deps_murmur_hash_test clean_murmur_hash_test deps_grpc_stream_op_test clean_grpc_stream_op_test deps_alpn_test clean_alpn_test deps_chttp2_stream_encoder_test clean_chttp2_stream_encoder_test deps_hpack_table_test clean_hpack_table_test deps_chttp2_stream_map_test clean_chttp2_stream_map_test deps_hpack_parser_test clean_hpack_parser_test deps_transport_metadata_test clean_transport_metadata_test deps_chttp2_status_conversion_test clean_chttp2_status_conversion_test deps_chttp2_transport_end2end_test clean_chttp2_transport_end2end_test deps_tcp_posix_test clean_tcp_posix_test deps_dualstack_socket_test clean_dualstack_socket_test deps_no_server_test clean_no_server_test deps_resolve_address_test clean_resolve_address_test deps_sockaddr_utils_test clean_sockaddr_utils_test deps_tcp_server_posix_test clean_tcp_server_posix_test deps_tcp_client_posix_test clean_tcp_client_posix_test deps_grpc_channel_stack_test clean_grpc_channel_stack_test deps_metadata_buffer_test clean_metadata_buffer_test deps_grpc_completion_queue_test clean_grpc_completion_queue_test deps_grpc_completion_queue_benchmark clean_grpc_completion_queue_benchmark deps_census_window_stats_test clean_census_window_stats_test deps_census_statistics_quick_test clean_census_statistics_quick_test deps_census_statistics_performance_test clean_census_statistics_performance_test deps_census_statistics_multiple_writers_test clean_census_statistics_multiple_writers_test deps_census_statistics_multiple_writers_circular_buffer_test clean_census_statistics_multiple_writers_circular_buffer_test deps_census_stub_test clean_census_stub_test deps_census_hash_table_test clean_census_hash_table_test deps_fling_server clean_fling_server deps_fling_client clean_fling_client deps_fling_test clean_fling_test deps_echo_server clean_echo_server deps_echo_client clean_echo_client deps_echo_test clean_echo_test deps_low_level_ping_pong_benchmark clean_low_level_ping_pong_benchmark deps_message_compress_test clean_message_compress_test deps_bin_encoder_test clean_bin_encoder_test deps_secure_endpoint_test clean_secure_endpoint_test deps_httpcli_format_request_test clean_httpcli_format_request_test deps_httpcli_parser_test clean_httpcli_parser_test deps_httpcli_test clean_httpcli_test deps_grpc_credentials_test clean_grpc_credentials_test deps_fling_stream_test clean_fling_stream_test deps_lame_client_test clean_lame_client_test deps_thread_pool_test clean_thread_pool_test deps_status_test clean_status_test deps_chttp2_fake_security_cancel_after_accept_test clean_chttp2_fake_security_cancel_after_accept_test deps_chttp2_fake_security_cancel_after_accept_and_writes_closed_test clean_chttp2_fake_security_cancel_after_accept_and_writes_closed_test deps_chttp2_fake_security_cancel_after_invoke_test clean_chttp2_fake_security_cancel_after_invoke_test deps_chttp2_fake_security_cancel_before_invoke_test clean_chttp2_fake_security_cancel_before_invoke_test deps_chttp2_fake_security_cancel_in_a_vacuum_test clean_chttp2_fake_security_cancel_in_a_vacuum_test deps_chttp2_fake_security_early_server_shutdown_finishes_inflight_calls_test clean_chttp2_fake_security_early_server_shutdown_finishes_inflight_calls_test deps_chttp2_fake_security_early_server_shutdown_finishes_tags_test clean_chttp2_fake_security_early_server_shutdown_finishes_tags_test deps_chttp2_fake_security_invoke_large_request_test clean_chttp2_fake_security_invoke_large_request_test deps_chttp2_fake_security_max_concurrent_streams_test clean_chttp2_fake_security_max_concurrent_streams_test deps_chttp2_fake_security_no_op_test clean_chttp2_fake_security_no_op_test deps_chttp2_fake_security_ping_pong_streaming_test clean_chttp2_fake_security_ping_pong_streaming_test deps_chttp2_fake_security_request_response_with_metadata_and_payload_test clean_chttp2_fake_security_request_response_with_metadata_and_payload_test deps_chttp2_fake_security_request_response_with_payload_test clean_chttp2_fake_security_request_response_with_payload_test deps_chttp2_fake_security_simple_delayed_request_test clean_chttp2_fake_security_simple_delayed_request_test deps_chttp2_fake_security_simple_request_test clean_chttp2_fake_security_simple_request_test deps_chttp2_fake_security_thread_stress_test clean_chttp2_fake_security_thread_stress_test deps_chttp2_fake_security_writes_done_hangs_with_pending_read_test clean_chttp2_fake_security_writes_done_hangs_with_pending_read_test deps_chttp2_fullstack_cancel_after_accept_test clean_chttp2_fullstack_cancel_after_accept_test deps_chttp2_fullstack_cancel_after_accept_and_writes_closed_test clean_chttp2_fullstack_cancel_after_accept_and_writes_closed_test deps_chttp2_fullstack_cancel_after_invoke_test clean_chttp2_fullstack_cancel_after_invoke_test deps_chttp2_fullstack_cancel_before_invoke_test clean_chttp2_fullstack_cancel_before_invoke_test deps_chttp2_fullstack_cancel_in_a_vacuum_test clean_chttp2_fullstack_cancel_in_a_vacuum_test deps_chttp2_fullstack_early_server_shutdown_finishes_inflight_calls_test clean_chttp2_fullstack_early_server_shutdown_finishes_inflight_calls_test deps_chttp2_fullstack_early_server_shutdown_finishes_tags_test clean_chttp2_fullstack_early_server_shutdown_finishes_tags_test deps_chttp2_fullstack_invoke_large_request_test clean_chttp2_fullstack_invoke_large_request_test deps_chttp2_fullstack_max_concurrent_streams_test clean_chttp2_fullstack_max_concurrent_streams_test deps_chttp2_fullstack_no_op_test clean_chttp2_fullstack_no_op_test deps_chttp2_fullstack_ping_pong_streaming_test clean_chttp2_fullstack_ping_pong_streaming_test deps_chttp2_fullstack_request_response_with_metadata_and_payload_test clean_chttp2_fullstack_request_response_with_metadata_and_payload_test deps_chttp2_fullstack_request_response_with_payload_test clean_chttp2_fullstack_request_response_with_payload_test deps_chttp2_fullstack_simple_delayed_request_test clean_chttp2_fullstack_simple_delayed_request_test deps_chttp2_fullstack_simple_request_test clean_chttp2_fullstack_simple_request_test deps_chttp2_fullstack_thread_stress_test clean_chttp2_fullstack_thread_stress_test deps_chttp2_fullstack_writes_done_hangs_with_pending_read_test clean_chttp2_fullstack_writes_done_hangs_with_pending_read_test deps_chttp2_simple_ssl_fullstack_cancel_after_accept_test clean_chttp2_simple_ssl_fullstack_cancel_after_accept_test deps_chttp2_simple_ssl_fullstack_cancel_after_accept_and_writes_closed_test clean_chttp2_simple_ssl_fullstack_cancel_after_accept_and_writes_closed_test deps_chttp2_simple_ssl_fullstack_cancel_after_invoke_test clean_chttp2_simple_ssl_fullstack_cancel_after_invoke_test deps_chttp2_simple_ssl_fullstack_cancel_before_invoke_test clean_chttp2_simple_ssl_fullstack_cancel_before_invoke_test deps_chttp2_simple_ssl_fullstack_cancel_in_a_vacuum_test clean_chttp2_simple_ssl_fullstack_cancel_in_a_vacuum_test deps_chttp2_simple_ssl_fullstack_early_server_shutdown_finishes_inflight_calls_test clean_chttp2_simple_ssl_fullstack_early_server_shutdown_finishes_inflight_calls_test deps_chttp2_simple_ssl_fullstack_early_server_shutdown_finishes_tags_test clean_chttp2_simple_ssl_fullstack_early_server_shutdown_finishes_tags_test deps_chttp2_simple_ssl_fullstack_invoke_large_request_test clean_chttp2_simple_ssl_fullstack_invoke_large_request_test deps_chttp2_simple_ssl_fullstack_max_concurrent_streams_test clean_chttp2_simple_ssl_fullstack_max_concurrent_streams_test deps_chttp2_simple_ssl_fullstack_no_op_test clean_chttp2_simple_ssl_fullstack_no_op_test deps_chttp2_simple_ssl_fullstack_ping_pong_streaming_test clean_chttp2_simple_ssl_fullstack_ping_pong_streaming_test deps_chttp2_simple_ssl_fullstack_request_response_with_metadata_and_payload_test clean_chttp2_simple_ssl_fullstack_request_response_with_metadata_and_payload_test deps_chttp2_simple_ssl_fullstack_request_response_with_payload_test clean_chttp2_simple_ssl_fullstack_request_response_with_payload_test deps_chttp2_simple_ssl_fullstack_simple_delayed_request_test clean_chttp2_simple_ssl_fullstack_simple_delayed_request_test deps_chttp2_simple_ssl_fullstack_simple_request_test clean_chttp2_simple_ssl_fullstack_simple_request_test deps_chttp2_simple_ssl_fullstack_thread_stress_test clean_chttp2_simple_ssl_fullstack_thread_stress_test deps_chttp2_simple_ssl_fullstack_writes_done_hangs_with_pending_read_test clean_chttp2_simple_ssl_fullstack_writes_done_hangs_with_pending_read_test deps_chttp2_simple_ssl_with_oauth2_fullstack_cancel_after_accept_test clean_chttp2_simple_ssl_with_oauth2_fullstack_cancel_after_accept_test deps_chttp2_simple_ssl_with_oauth2_fullstack_cancel_after_accept_and_writes_closed_test clean_chttp2_simple_ssl_with_oauth2_fullstack_cancel_after_accept_and_writes_closed_test deps_chttp2_simple_ssl_with_oauth2_fullstack_cancel_after_invoke_test clean_chttp2_simple_ssl_with_oauth2_fullstack_cancel_after_invoke_test deps_chttp2_simple_ssl_with_oauth2_fullstack_cancel_before_invoke_test clean_chttp2_simple_ssl_with_oauth2_fullstack_cancel_before_invoke_test deps_chttp2_simple_ssl_with_oauth2_fullstack_cancel_in_a_vacuum_test clean_chttp2_simple_ssl_with_oauth2_fullstack_cancel_in_a_vacuum_test deps_chttp2_simple_ssl_with_oauth2_fullstack_early_server_shutdown_finishes_inflight_calls_test clean_chttp2_simple_ssl_with_oauth2_fullstack_early_server_shutdown_finishes_inflight_calls_test deps_chttp2_simple_ssl_with_oauth2_fullstack_early_server_shutdown_finishes_tags_test clean_chttp2_simple_ssl_with_oauth2_fullstack_early_server_shutdown_finishes_tags_test deps_chttp2_simple_ssl_with_oauth2_fullstack_invoke_large_request_test clean_chttp2_simple_ssl_with_oauth2_fullstack_invoke_large_request_test deps_chttp2_simple_ssl_with_oauth2_fullstack_max_concurrent_streams_test clean_chttp2_simple_ssl_with_oauth2_fullstack_max_concurrent_streams_test deps_chttp2_simple_ssl_with_oauth2_fullstack_no_op_test clean_chttp2_simple_ssl_with_oauth2_fullstack_no_op_test deps_chttp2_simple_ssl_with_oauth2_fullstack_ping_pong_streaming_test clean_chttp2_simple_ssl_with_oauth2_fullstack_ping_pong_streaming_test deps_chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_metadata_and_payload_test clean_chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_metadata_and_payload_test deps_chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_payload_test clean_chttp2_simple_ssl_with_oauth2_fullstack_request_response_with_payload_test deps_chttp2_simple_ssl_with_oauth2_fullstack_simple_delayed_request_test clean_chttp2_simple_ssl_with_oauth2_fullstack_simple_delayed_request_test deps_chttp2_simple_ssl_with_oauth2_fullstack_simple_request_test clean_chttp2_simple_ssl_with_oauth2_fullstack_simple_request_test deps_chttp2_simple_ssl_with_oauth2_fullstack_thread_stress_test clean_chttp2_simple_ssl_with_oauth2_fullstack_thread_stress_test deps_chttp2_simple_ssl_with_oauth2_fullstack_writes_done_hangs_with_pending_read_test clean_chttp2_simple_ssl_with_oauth2_fullstack_writes_done_hangs_with_pending_read_test deps_chttp2_socket_pair_cancel_after_accept_test clean_chttp2_socket_pair_cancel_after_accept_test deps_chttp2_socket_pair_cancel_after_accept_and_writes_closed_test clean_chttp2_socket_pair_cancel_after_accept_and_writes_closed_test deps_chttp2_socket_pair_cancel_after_invoke_test clean_chttp2_socket_pair_cancel_after_invoke_test deps_chttp2_socket_pair_cancel_before_invoke_test clean_chttp2_socket_pair_cancel_before_invoke_test deps_chttp2_socket_pair_cancel_in_a_vacuum_test clean_chttp2_socket_pair_cancel_in_a_vacuum_test deps_chttp2_socket_pair_early_server_shutdown_finishes_inflight_calls_test clean_chttp2_socket_pair_early_server_shutdown_finishes_inflight_calls_test deps_chttp2_socket_pair_early_server_shutdown_finishes_tags_test clean_chttp2_socket_pair_early_server_shutdown_finishes_tags_test deps_chttp2_socket_pair_invoke_large_request_test clean_chttp2_socket_pair_invoke_large_request_test deps_chttp2_socket_pair_max_concurrent_streams_test clean_chttp2_socket_pair_max_concurrent_streams_test deps_chttp2_socket_pair_no_op_test clean_chttp2_socket_pair_no_op_test deps_chttp2_socket_pair_ping_pong_streaming_test clean_chttp2_socket_pair_ping_pong_streaming_test deps_chttp2_socket_pair_request_response_with_metadata_and_payload_test clean_chttp2_socket_pair_request_response_with_metadata_and_payload_test deps_chttp2_socket_pair_request_response_with_payload_test clean_chttp2_socket_pair_request_response_with_payload_test deps_chttp2_socket_pair_simple_delayed_request_test clean_chttp2_socket_pair_simple_delayed_request_test deps_chttp2_socket_pair_simple_request_test clean_chttp2_socket_pair_simple_request_test deps_chttp2_socket_pair_thread_stress_test clean_chttp2_socket_pair_thread_stress_test deps_chttp2_socket_pair_writes_done_hangs_with_pending_read_test clean_chttp2_socket_pair_writes_done_hangs_with_pending_read_test deps_chttp2_socket_pair_one_byte_at_a_time_cancel_after_accept_test clean_chttp2_socket_pair_one_byte_at_a_time_cancel_after_accept_test deps_chttp2_socket_pair_one_byte_at_a_time_cancel_after_accept_and_writes_closed_test clean_chttp2_socket_pair_one_byte_at_a_time_cancel_after_accept_and_writes_closed_test deps_chttp2_socket_pair_one_byte_at_a_time_cancel_after_invoke_test clean_chttp2_socket_pair_one_byte_at_a_time_cancel_after_invoke_test deps_chttp2_socket_pair_one_byte_at_a_time_cancel_before_invoke_test clean_chttp2_socket_pair_one_byte_at_a_time_cancel_before_invoke_test deps_chttp2_socket_pair_one_byte_at_a_time_cancel_in_a_vacuum_test clean_chttp2_socket_pair_one_byte_at_a_time_cancel_in_a_vacuum_test deps_chttp2_socket_pair_one_byte_at_a_time_early_server_shutdown_finishes_inflight_calls_test clean_chttp2_socket_pair_one_byte_at_a_time_early_server_shutdown_finishes_inflight_calls_test deps_chttp2_socket_pair_one_byte_at_a_time_early_server_shutdown_finishes_tags_test clean_chttp2_socket_pair_one_byte_at_a_time_early_server_shutdown_finishes_tags_test deps_chttp2_socket_pair_one_byte_at_a_time_invoke_large_request_test clean_chttp2_socket_pair_one_byte_at_a_time_invoke_large_request_test deps_chttp2_socket_pair_one_byte_at_a_time_max_concurrent_streams_test clean_chttp2_socket_pair_one_byte_at_a_time_max_concurrent_streams_test deps_chttp2_socket_pair_one_byte_at_a_time_no_op_test clean_chttp2_socket_pair_one_byte_at_a_time_no_op_test deps_chttp2_socket_pair_one_byte_at_a_time_ping_pong_streaming_test clean_chttp2_socket_pair_one_byte_at_a_time_ping_pong_streaming_test deps_chttp2_socket_pair_one_byte_at_a_time_request_response_with_metadata_and_payload_test clean_chttp2_socket_pair_one_byte_at_a_time_request_response_with_metadata_and_payload_test deps_chttp2_socket_pair_one_byte_at_a_time_request_response_with_payload_test clean_chttp2_socket_pair_one_byte_at_a_time_request_response_with_payload_test deps_chttp2_socket_pair_one_byte_at_a_time_simple_delayed_request_test clean_chttp2_socket_pair_one_byte_at_a_time_simple_delayed_request_test deps_chttp2_socket_pair_one_byte_at_a_time_simple_request_test clean_chttp2_socket_pair_one_byte_at_a_time_simple_request_test deps_chttp2_socket_pair_one_byte_at_a_time_thread_stress_test clean_chttp2_socket_pair_one_byte_at_a_time_thread_stress_test deps_chttp2_socket_pair_one_byte_at_a_time_writes_done_hangs_with_pending_read_test clean_chttp2_socket_pair_one_byte_at_a_time_writes_done_hangs_with_pending_read_test
diff --git a/build.json b/build.json
index e185d66..b66c5ea 100644
--- a/build.json
+++ b/build.json
@@ -110,25 +110,46 @@
         "src/core/compression/algorithm.c",
         "src/core/compression/message_compress.c",
         "src/core/endpoint/endpoint.c",
-        "src/core/endpoint/resolve_address.c",
-        "src/core/endpoint/socket_utils.c",
-        "src/core/endpoint/socket_utils_linux.c",
-        "src/core/endpoint/socket_utils_posix.c",
-        "src/core/endpoint/tcp.c",
-        "src/core/endpoint/tcp_client.c",
-        "src/core/endpoint/tcp_server.c",
-        "src/core/eventmanager/em.c",
-        "src/core/eventmanager/em_posix.c",
+        "src/core/endpoint/secure_endpoint.c",
+        "src/core/httpcli/format_request.c",
+        "src/core/httpcli/httpcli.c",
+        "src/core/httpcli/httpcli_security_context.c",
+        "src/core/httpcli/parser.c",
+        "src/core/iomgr/endpoint_pair_posix.c",
+        "src/core/iomgr/iomgr_libevent.c",
+        "src/core/iomgr/iomgr_libevent_use_threads.c",
+        "src/core/iomgr/resolve_address_posix.c",
+        "src/core/iomgr/sockaddr_utils.c",
+        "src/core/iomgr/socket_utils_common_posix.c",
+        "src/core/iomgr/socket_utils_linux.c",
+        "src/core/iomgr/socket_utils_posix.c",
+        "src/core/iomgr/tcp_client_posix.c",
+        "src/core/iomgr/tcp_posix.c",
+        "src/core/iomgr/tcp_server_posix.c",
+        "src/core/security/auth.c",
+        "src/core/security/credentials.c",
+        "src/core/security/google_root_certs.c",
+        "src/core/security/secure_transport_setup.c",
+        "src/core/security/security_context.c",
+        "src/core/security/server_secure_chttp2.c",
+        "src/core/statistics/census_init.c",
+        "src/core/statistics/census_rpc_stats.c",
+        "src/core/statistics/census_tracing.c",
+        "src/core/statistics/hash_table.c",
+        "src/core/statistics/log.c",
+        "src/core/statistics/window_stats.c",
         "src/core/surface/byte_buffer.c",
         "src/core/surface/byte_buffer_reader.c",
         "src/core/surface/call.c",
         "src/core/surface/channel.c",
         "src/core/surface/channel_create.c",
         "src/core/surface/client.c",
-        "src/core/surface/lame_client.c",
         "src/core/surface/completion_queue.c",
         "src/core/surface/event_string.c",
         "src/core/surface/init.c",
+        "src/core/surface/lame_client.c",
+        "src/core/surface/secure_channel_create.c",
+        "src/core/surface/secure_server_create.c",
         "src/core/surface/server.c",
         "src/core/surface/server_chttp2.c",
         "src/core/surface/server_create.c",
@@ -148,33 +169,14 @@
         "src/core/transport/chttp2/stream_encoder.c",
         "src/core/transport/chttp2/stream_map.c",
         "src/core/transport/chttp2/timeout_encoding.c",
-        "src/core/transport/chttp2/varint.c",
         "src/core/transport/chttp2_transport.c",
+        "src/core/transport/chttp2/varint.c",
         "src/core/transport/metadata.c",
         "src/core/transport/stream_op.c",
         "src/core/transport/transport.c",
-        "src/core/statistics/census_init.c",
-        "src/core/statistics/census_rpc_stats.c",
-        "src/core/statistics/census_tracing.c",
-        "src/core/statistics/log.c",
-        "src/core/statistics/window_stats.c",
-        "src/core/statistics/hash_table.c",
-        "src/core/httpcli/format_request.c",
-        "src/core/httpcli/httpcli.c",
-        "src/core/httpcli/httpcli_security_context.c",
-        "src/core/httpcli/parser.c",
-        "src/core/security/auth.c",
-        "src/core/security/credentials.c",
-        "src/core/security/google_root_certs.c",
-        "src/core/security/secure_transport_setup.c",
-        "src/core/security/security_context.c",
-        "src/core/security/server_secure_chttp2.c",
-        "src/core/surface/secure_channel_create.c",
-        "src/core/surface/secure_server_create.c",
-        "src/core/endpoint/secure_endpoint.c",
-        "src/core/tsi/transport_security.c",
         "src/core/tsi/fake_transport_security.c",
         "src/core/tsi/ssl_transport_security.c",
+        "src/core/tsi/transport_security.c",
         "third_party/cJSON/cJSON.c"
       ],
       "public_headers": [
@@ -199,17 +201,24 @@
         "src/core/compression/algorithm.h",
         "src/core/compression/message_compress.h",
         "src/core/endpoint/endpoint.h",
-        "src/core/endpoint/resolve_address.h",
-        "src/core/endpoint/secure_endpoint.h",
-        "src/core/endpoint/socket_utils.h",
-        "src/core/endpoint/tcp_client.h",
-        "src/core/endpoint/tcp.h",
-        "src/core/endpoint/tcp_server.h",
-        "src/core/eventmanager/em.h",
         "src/core/httpcli/format_request.h",
         "src/core/httpcli/httpcli.h",
         "src/core/httpcli/httpcli_security_context.h",
         "src/core/httpcli/parser.h",
+        "src/core/iomgr/alarm.h",
+        "src/core/iomgr/endpoint_pair.h",
+        "src/core/iomgr/iomgr_completion_queue_interface.h",
+        "src/core/iomgr/iomgr.h",
+        "src/core/iomgr/iomgr_libevent.h",
+        "src/core/iomgr/resolve_address.h",
+        "src/core/iomgr/sockaddr.h",
+        "src/core/iomgr/sockaddr_posix.h",
+        "src/core/iomgr/sockaddr_utils.h",
+        "src/core/iomgr/sockaddr_win32.h",
+        "src/core/iomgr/socket_utils_posix.h",
+        "src/core/iomgr/tcp_client.h",
+        "src/core/iomgr/tcp_posix.h",
+        "src/core/iomgr/tcp_server.h",
         "src/core/security/auth.h",
         "src/core/security/credentials.h",
         "src/core/security/google_root_certs.h",
@@ -223,9 +232,9 @@
         "src/core/surface/call.h",
         "src/core/surface/channel.h",
         "src/core/surface/client.h",
-        "src/core/surface/lame_client.h",
         "src/core/surface/completion_queue.h",
         "src/core/surface/event_string.h",
+        "src/core/surface/lame_client.h",
         "src/core/surface/server.h",
         "src/core/surface/surface_em.h",
         "src/core/surface/surface_trace.h",
@@ -239,8 +248,8 @@
         "src/core/transport/chttp2/frame_window_update.h",
         "src/core/transport/chttp2/hpack_parser.h",
         "src/core/transport/chttp2/hpack_table.h",
-        "src/core/transport/chttp2/huffsyms.h",
         "src/core/transport/chttp2/http2_errors.h",
+        "src/core/transport/chttp2/huffsyms.h",
         "src/core/transport/chttp2/status_conversion.h",
         "src/core/transport/chttp2/stream_encoder.h",
         "src/core/transport/chttp2/stream_map.h",
@@ -349,8 +358,6 @@
         "grpc"
       ]
     },
-
-
     {
       "name": "grpc_byte_buffer_reader_test",
       "build": "test",
@@ -508,30 +515,6 @@
       ]
     },
     {
-      "name": "grpc_em_test",
-      "build": "test",
-      "src": [
-        "test/core/eventmanager/em_test.c"
-      ],
-      "deps": [
-        "grpc_test_util",
-        "grpc",
-        "gpr"
-      ]
-    },
-    {
-      "name": "grpc_em_pipe_test",
-      "build": "test",
-      "src": [
-        "test/core/eventmanager/em_pipe_test.c"
-      ],
-      "deps": [
-        "grpc_test_util",
-        "grpc",
-        "gpr"
-      ]
-    },
-    {
       "name": "grpc_stream_op_test",
       "build": "test",
       "src": [
@@ -640,10 +623,10 @@
       ]
     },
     {
-      "name": "grpc_tcp_test",
+      "name": "tcp_posix_test",
       "build": "test",
       "src": [
-        "test/core/endpoint/tcp_test.c"
+        "test/core/iomgr/tcp_posix_test.c"
       ],
       "deps": [
         "grpc_test_util",
@@ -679,7 +662,7 @@
       "name": "resolve_address_test",
       "build": "test",
       "src": [
-        "test/core/endpoint/resolve_address_test.c"
+        "test/core/iomgr/resolve_address_test.c"
       ],
       "deps": [
         "grpc_test_util",
@@ -688,10 +671,10 @@
       ]
     },
     {
-      "name": "socket_utils_test",
+      "name": "sockaddr_utils_test",
       "build": "test",
       "src": [
-        "test/core/endpoint/socket_utils_test.c"
+        "test/core/iomgr/sockaddr_utils_test.c"
       ],
       "deps": [
         "grpc_test_util",
@@ -700,10 +683,10 @@
       ]
     },
     {
-      "name": "tcp_server_test",
+      "name": "tcp_server_posix_test",
       "build": "test",
       "src": [
-        "test/core/endpoint/tcp_server_test.c"
+        "test/core/iomgr/tcp_server_posix_test.c"
       ],
       "deps": [
         "grpc_test_util",
@@ -712,10 +695,10 @@
       ]
     },
     {
-      "name": "tcp_client_test",
+      "name": "tcp_client_posix_test",
       "build": "test",
       "src": [
-        "test/core/endpoint/tcp_client_test.c"
+        "test/core/iomgr/tcp_client_posix_test.c"
       ],
       "deps": [
         "grpc_test_util",
@@ -1051,7 +1034,6 @@
         "gpr"
       ]
     },
-
     {
       "name": "thread_pool_test",
       "build": "test",
diff --git a/include/grpc/support/port_platform.h b/include/grpc/support/port_platform.h
index 13ce474..5e3ca91 100644
--- a/include/grpc/support/port_platform.h
+++ b/include/grpc/support/port_platform.h
@@ -45,40 +45,44 @@
 #if defined(_WIN64) || defined(WIN64)
 #define GPR_WIN32 1
 #define GPR_ARCH_64 1
-#define GPR_POSIX_SOCKETUTILS 1
 #elif defined(_WIN32) || defined(WIN32)
+#define GPR_ARCH_32 1
 #define GPR_WIN32 1
-#define GPR_ARCH_32 1
-#define GPR_POSIX_SOCKETUTILS 1
 #elif defined(ANDROID) || defined(__ANDROID__)
-#define GPR_POSIX_TIME 1
-#define GPR_POSIX_SYNC 1
-#define GPR_POSIX_STRING 1
-#define GPR_POSIX_SOCKET 1
-#define GPR_POSIX_SOCKETUTILS 1
 #define GPR_ANDROID 1
-#define GPR_GCC_SYNC 1
 #define GPR_ARCH_32 1
-#elif defined(__linux__)
-#define GPR_POSIX_TIME 1
-#define GPR_POSIX_SYNC 1
-#define GPR_POSIX_STRING 1
+#define GPR_GCC_SYNC 1
+#define GPR_LIBEVENT 1
 #define GPR_POSIX_SOCKET 1
-#define GPR_LINUX 1
+#define GPR_POSIX_SOCKETADDR 1
+#define GPR_POSIX_SOCKETUTILS 1
+#define GPR_POSIX_STRING 1
+#define GPR_POSIX_SYNC 1
+#define GPR_POSIX_TIME 1
+#elif defined(__linux__)
 #define GPR_GCC_ATOMIC 1
+#define GPR_LIBEVENT 1
+#define GPR_LINUX 1
+#define GPR_POSIX_SOCKET 1
+#define GPR_POSIX_SOCKETADDR 1
+#define GPR_POSIX_STRING 1
+#define GPR_POSIX_SYNC 1
+#define GPR_POSIX_TIME 1
 #ifdef _LP64
 #define GPR_ARCH_64 1
 #else /* _LP64 */
 #define GPR_ARCH_32 1
 #endif /* _LP64 */
 #elif defined(__APPLE__)
-#define GPR_POSIX_TIME 1
-#define GPR_POSIX_SYNC 1
-#define GPR_POSIX_STRING 1
+#define GPR_GCC_ATOMIC 1
+#define GPR_LIBEVENT 1
 #define GPR_POSIX_LOG 1
 #define GPR_POSIX_SOCKET 1
+#define GPR_POSIX_SOCKETADDR 1
 #define GPR_POSIX_SOCKETUTILS 1
-#define GPR_GCC_ATOMIC 1
+#define GPR_POSIX_STRING 1
+#define GPR_POSIX_SYNC 1
+#define GPR_POSIX_TIME 1
 #ifdef _LP64
 #define GPR_ARCH_64 1
 #else /* _LP64 */
diff --git a/src/core/channel/client_setup.c b/src/core/channel/client_setup.c
index ea25670..29fe915 100644
--- a/src/core/channel/client_setup.c
+++ b/src/core/channel/client_setup.c
@@ -34,6 +34,7 @@
 #include "src/core/channel/client_setup.h"
 #include "src/core/channel/channel_args.h"
 #include "src/core/channel/channel_stack.h"
+#include "src/core/iomgr/alarm.h"
 #include <grpc/support/alloc.h>
 #include <grpc/support/log.h>
 #include <grpc/support/time.h>
@@ -45,8 +46,7 @@
   void *user_data;
   grpc_channel_args *args;
   grpc_mdctx *mdctx;
-  grpc_em *em;
-  grpc_em_alarm backoff_alarm;
+  grpc_alarm backoff_alarm;
   gpr_timespec current_backoff_interval;
   int in_alarm;
 
@@ -115,7 +115,7 @@
   /* effectively cancels the current request (if any) */
   s->active_request = NULL;
   if (s->in_alarm) {
-    grpc_em_alarm_cancel(&s->backoff_alarm);
+    grpc_alarm_cancel(&s->backoff_alarm);
   }
   if (--s->refs == 0) {
     gpr_mu_unlock(&s->mu);
@@ -133,7 +133,7 @@
     grpc_channel_stack *newly_minted_channel, const grpc_channel_args *args,
     grpc_mdctx *mdctx,
     void (*initiate)(void *user_data, grpc_client_setup_request *request),
-    void (*done)(void *user_data), void *user_data, grpc_em *em) {
+    void (*done)(void *user_data), void *user_data) {
   grpc_client_setup *s = gpr_malloc(sizeof(grpc_client_setup));
 
   s->base.vtable = &setup_vtable;
@@ -143,7 +143,6 @@
   s->initiate = initiate;
   s->done = done;
   s->user_data = user_data;
-  s->em = em;
   s->active_request = NULL;
   s->args = grpc_channel_args_copy(args);
   s->current_backoff_interval = gpr_time_from_micros(1000000);
@@ -164,7 +163,7 @@
 }
 
 static void backoff_alarm_done(void *arg /* grpc_client_setup */,
-                               grpc_em_cb_status status) {
+                               grpc_iomgr_cb_status status) {
   grpc_client_setup *s = arg;
   grpc_client_setup_request *r = gpr_malloc(sizeof(grpc_client_setup_request));
   r->setup = s;
@@ -215,9 +214,9 @@
     gpr_timespec max_backoff = gpr_time_from_micros(120000000);
     GPR_ASSERT(!s->in_alarm);
     s->in_alarm = 1;
-    grpc_em_alarm_init(&s->backoff_alarm, s->em, backoff_alarm_done, s);
-    grpc_em_alarm_add(&s->backoff_alarm,
-                      gpr_time_add(s->current_backoff_interval, gpr_now()));
+    grpc_alarm_init(&s->backoff_alarm, backoff_alarm_done, s);
+    grpc_alarm_add(&s->backoff_alarm,
+                   gpr_time_add(s->current_backoff_interval, gpr_now()));
     s->current_backoff_interval =
         gpr_time_add(s->current_backoff_interval, s->current_backoff_interval);
     if (gpr_time_cmp(s->current_backoff_interval, max_backoff) > 0) {
diff --git a/src/core/channel/client_setup.h b/src/core/channel/client_setup.h
index 862c132..a508785 100644
--- a/src/core/channel/client_setup.h
+++ b/src/core/channel/client_setup.h
@@ -35,7 +35,6 @@
 #define __GRPC_INTERNAL_CHANNEL_CLIENT_SETUP_H__
 
 #include "src/core/channel/client_channel.h"
-#include "src/core/eventmanager/em.h"
 #include "src/core/transport/metadata.h"
 #include <grpc/support/time.h>
 
@@ -48,7 +47,7 @@
     grpc_channel_stack *newly_minted_channel, const grpc_channel_args *args,
     grpc_mdctx *mdctx,
     void (*initiate)(void *user_data, grpc_client_setup_request *request),
-    void (*done)(void *user_data), void *user_data, grpc_em *em);
+    void (*done)(void *user_data), void *user_data);
 
 /* Check that r is the active request: needs to be performed at each callback.
    If this races, we'll have two connection attempts running at once and the
diff --git a/src/core/eventmanager/em.c b/src/core/eventmanager/em.c
deleted file mode 100644
index 0dc6c6a..0000000
--- a/src/core/eventmanager/em.c
+++ /dev/null
@@ -1,728 +0,0 @@
-/*
- *
- * Copyright 2014, Google Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- *     * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- *     * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#include "src/core/eventmanager/em.h"
-
-#include <unistd.h>
-#include <fcntl.h>
-
-#include <grpc/support/atm.h>
-#include <grpc/support/alloc.h>
-#include <grpc/support/log.h>
-#include <grpc/support/sync.h>
-#include <grpc/support/time.h>
-#include <event2/event.h>
-#include <event2/thread.h>
-
-int evthread_use_threads(void);
-
-static void grpc_em_fd_impl_destroy(struct grpc_em_fd_impl *impl);
-
-#define ALARM_TRIGGER_INIT ((gpr_atm)0)
-#define ALARM_TRIGGER_INCREMENT ((gpr_atm)1)
-#define DONE_SHUTDOWN ((void *)1)
-
-#define POLLER_ID_INVALID ((gpr_atm)-1)
-
-typedef struct grpc_em_fd_impl {
-  grpc_em_task task; /* Base class, callbacks, queues, etc */
-  int fd;            /* File descriptor */
-
-  /* Note that the shutdown event is only needed as a workaround for libevent
-     not properly handling event_active on an in flight event. */
-  struct event *shutdown_ev; /* activated to trigger shutdown */
-
-  /* protect shutdown_started|read_state|write_state and ensure barriers
-     between notify_on_[read|write] and read|write callbacks */
-  gpr_mu mu;
-  int shutdown_started; /* 0 -> shutdown not started, 1 -> started */
-  grpc_em_fd_state read_state;
-  grpc_em_fd_state write_state;
-
-  /* descriptor delete list. These are destroyed during polling. */
-  struct grpc_em_fd_impl *next;
-} grpc_em_fd_impl;
-
-/* ================== grpc_em implementation ===================== */
-
-/* If anything is in the work queue, process one item and return 1.
-   Return 0 if there were no work items to complete.
-   Requires em->mu locked, may unlock and relock during the call. */
-static int maybe_do_queue_work(grpc_em *em) {
-  grpc_em_activation_data *work = em->q;
-
-  if (work == NULL) return 0;
-
-  if (work->next == work) {
-    em->q = NULL;
-  } else {
-    em->q = work->next;
-    em->q->prev = work->prev;
-    em->q->next->prev = em->q->prev->next = em->q;
-  }
-  work->next = work->prev = NULL;
-  gpr_mu_unlock(&em->mu);
-
-  work->cb(work->arg, work->status);
-
-  gpr_mu_lock(&em->mu);
-  return 1;
-}
-
-/* Break out of the event loop on timeout */
-static void timer_callback(int fd, short events, void *context) {
-  event_base_loopbreak((struct event_base *)context);
-}
-
-static void free_fd_list(grpc_em_fd_impl *impl) {
-  while (impl != NULL) {
-    grpc_em_fd_impl *current = impl;
-    impl = impl->next;
-    grpc_em_fd_impl_destroy(current);
-    gpr_free(current);
-  }
-}
-
-/* Spend some time doing polling and libevent maintenance work if no other
-   thread is. This includes both polling for events and destroying/closing file
-   descriptor objects.
-   Returns 1 if polling was performed, 0 otherwise.
-   Requires em->mu locked, may unlock and relock during the call. */
-static int maybe_do_polling_work(grpc_em *em, struct timeval delay) {
-  int status;
-
-  if (em->num_pollers) return 0;
-
-  em->num_pollers = 1;
-
-  free_fd_list(em->fds_to_free);
-  em->fds_to_free = NULL;
-
-  gpr_mu_unlock(&em->mu);
-
-  event_add(em->timeout_ev, &delay);
-  status = event_base_loop(em->event_base, EVLOOP_ONCE);
-  if (status < 0) {
-    gpr_log(GPR_ERROR, "event polling loop stops with error status %d", status);
-  }
-  event_del(em->timeout_ev);
-
-  gpr_mu_lock(&em->mu);
-  if (em->fds_to_free) {
-    free_fd_list(em->fds_to_free);
-    em->fds_to_free = NULL;
-  }
-
-  em->num_pollers = 0;
-  gpr_cv_broadcast(&em->cv);
-  return 1;
-}
-
-int grpc_em_work(grpc_em *em, gpr_timespec deadline) {
-  gpr_timespec delay_timespec = gpr_time_sub(deadline, gpr_now());
-  /* poll for no longer than one second */
-  gpr_timespec max_delay = {1, 0};
-  struct timeval delay;
-
-  GPR_ASSERT(em);
-
-  if (gpr_time_cmp(delay_timespec, gpr_time_0) <= 0) {
-    return 0;
-  }
-
-  if (gpr_time_cmp(delay_timespec, max_delay) > 0) {
-    delay_timespec = max_delay;
-  }
-
-  delay = gpr_timeval_from_timespec(delay_timespec);
-
-  if (maybe_do_queue_work(em) || maybe_do_polling_work(em, delay)) {
-    em->last_poll_completed = gpr_now();
-    return 1;
-  }
-
-  return 0;
-}
-
-static void backup_poller_thread(void *p) {
-  grpc_em *em = p;
-  int backup_poller_engaged = 0;
-  /* allow no pollers for 100 milliseconds, then engage backup polling */
-  gpr_timespec allow_no_pollers = gpr_time_from_micros(100 * 1000);
-
-  gpr_mu_lock(&em->mu);
-  while (!em->shutdown_backup_poller) {
-    if (em->num_pollers == 0) {
-      gpr_timespec now = gpr_now();
-      gpr_timespec time_until_engage = gpr_time_sub(
-          allow_no_pollers, gpr_time_sub(now, em->last_poll_completed));
-      if (gpr_time_cmp(time_until_engage, gpr_time_0) <= 0) {
-        if (!backup_poller_engaged) {
-          gpr_log(GPR_DEBUG, "No pollers for a while - engaging backup poller");
-          backup_poller_engaged = 1;
-        }
-        if (!maybe_do_queue_work(em)) {
-          struct timeval tv = {1, 0};
-          maybe_do_polling_work(em, tv);
-        }
-      } else {
-        if (backup_poller_engaged) {
-          gpr_log(GPR_DEBUG, "Backup poller disengaged");
-          backup_poller_engaged = 0;
-        }
-        gpr_mu_unlock(&em->mu);
-        gpr_sleep_until(gpr_time_add(now, time_until_engage));
-        gpr_mu_lock(&em->mu);
-      }
-    } else {
-      if (backup_poller_engaged) {
-        gpr_log(GPR_DEBUG, "Backup poller disengaged");
-        backup_poller_engaged = 0;
-      }
-      gpr_cv_wait(&em->cv, &em->mu, gpr_inf_future);
-    }
-  }
-  gpr_mu_unlock(&em->mu);
-
-  gpr_event_set(&em->backup_poller_done, (void *)1);
-}
-
-grpc_em_error grpc_em_init(grpc_em *em) {
-  gpr_thd_id backup_poller_id;
-
-  if (evthread_use_threads() != 0) {
-    gpr_log(GPR_ERROR, "Failed to initialize libevent thread support!");
-    return GRPC_EM_ERROR;
-  }
-
-  gpr_mu_init(&em->mu);
-  gpr_cv_init(&em->cv);
-  em->q = NULL;
-  em->num_pollers = 0;
-  em->num_fds = 0;
-  em->last_poll_completed = gpr_now();
-  em->shutdown_backup_poller = 0;
-  em->fds_to_free = NULL;
-
-  gpr_event_init(&em->backup_poller_done);
-
-  em->event_base = NULL;
-  em->timeout_ev = NULL;
-
-  em->event_base = event_base_new();
-  if (!em->event_base) {
-    gpr_log(GPR_ERROR, "Failed to create the event base");
-    return GRPC_EM_ERROR;
-  }
-
-  if (evthread_make_base_notifiable(em->event_base) != 0) {
-    gpr_log(GPR_ERROR, "Couldn't make event base notifiable cross threads!");
-    return GRPC_EM_ERROR;
-  }
-
-  em->timeout_ev = evtimer_new(em->event_base, timer_callback, em->event_base);
-
-  gpr_thd_new(&backup_poller_id, backup_poller_thread, em, NULL);
-
-  return GRPC_EM_OK;
-}
-
-grpc_em_error grpc_em_destroy(grpc_em *em) {
-  gpr_timespec fd_shutdown_deadline =
-      gpr_time_add(gpr_now(), gpr_time_from_micros(10 * 1000 * 1000));
-
-  /* broadcast shutdown */
-  gpr_mu_lock(&em->mu);
-  while (em->num_fds) {
-    gpr_log(GPR_INFO,
-            "waiting for %d fds to be destroyed before closing event manager",
-            em->num_fds);
-    if (gpr_cv_wait(&em->cv, &em->mu, fd_shutdown_deadline)) {
-      gpr_log(GPR_ERROR,
-              "not all fds destroyed before shutdown deadline: memory leaks "
-              "are likely");
-      break;
-    } else if (em->num_fds == 0) {
-      gpr_log(GPR_INFO, "all fds closed");
-    }
-  }
-
-  em->shutdown_backup_poller = 1;
-  gpr_cv_broadcast(&em->cv);
-  gpr_mu_unlock(&em->mu);
-
-  gpr_event_wait(&em->backup_poller_done, gpr_inf_future);
-
-  /* drain pending work */
-  gpr_mu_lock(&em->mu);
-  while (maybe_do_queue_work(em))
-    ;
-  gpr_mu_unlock(&em->mu);
-
-  free_fd_list(em->fds_to_free);
-
-  /* complete shutdown */
-  gpr_mu_destroy(&em->mu);
-  gpr_cv_destroy(&em->cv);
-
-  if (em->timeout_ev != NULL) {
-    event_free(em->timeout_ev);
-  }
-
-  if (em->event_base != NULL) {
-    event_base_free(em->event_base);
-    em->event_base = NULL;
-  }
-
-  return GRPC_EM_OK;
-}
-
-static void add_task(grpc_em *em, grpc_em_activation_data *adata) {
-  gpr_mu_lock(&em->mu);
-  if (em->q) {
-    adata->next = em->q;
-    adata->prev = adata->next->prev;
-    adata->next->prev = adata->prev->next = adata;
-  } else {
-    em->q = adata;
-    adata->next = adata->prev = adata;
-  }
-  gpr_cv_broadcast(&em->cv);
-  gpr_mu_unlock(&em->mu);
-}
-
-/* ===============grpc_em_alarm implementation==================== */
-
-/* The following function frees up the alarm's libevent structure and
-   should always be invoked just before calling the alarm's callback */
-static void alarm_ev_destroy(grpc_em_alarm *alarm) {
-  grpc_em_activation_data *adata = &alarm->task.activation[GRPC_EM_TA_ONLY];
-  if (adata->ev != NULL) {
-    /* TODO(klempner): Is this safe to do when we're cancelling? */
-    event_free(adata->ev);
-    adata->ev = NULL;
-  }
-}
-/* Proxy callback triggered by alarm->ev to call alarm->cb */
-static void libevent_alarm_cb(int fd, short what, void *arg /*=alarm*/) {
-  grpc_em_alarm *alarm = arg;
-  grpc_em_activation_data *adata = &alarm->task.activation[GRPC_EM_TA_ONLY];
-  int trigger_old;
-
-  /* First check if this alarm has been canceled, atomically */
-  trigger_old =
-      gpr_atm_full_fetch_add(&alarm->triggered, ALARM_TRIGGER_INCREMENT);
-  if (trigger_old == ALARM_TRIGGER_INIT) {
-    /* Before invoking user callback, destroy the libevent structure */
-    alarm_ev_destroy(alarm);
-    adata->status = GRPC_CALLBACK_SUCCESS;
-    add_task(alarm->task.em, adata);
-  }
-}
-
-grpc_em_error grpc_em_alarm_init(grpc_em_alarm *alarm, grpc_em *em,
-                                 grpc_em_cb_func alarm_cb, void *alarm_cb_arg) {
-  grpc_em_activation_data *adata = &alarm->task.activation[GRPC_EM_TA_ONLY];
-  alarm->task.type = GRPC_EM_TASK_ALARM;
-  alarm->task.em = em;
-  gpr_atm_rel_store(&alarm->triggered, ALARM_TRIGGER_INIT);
-  adata->cb = alarm_cb;
-  adata->arg = alarm_cb_arg;
-  adata->prev = NULL;
-  adata->next = NULL;
-  adata->ev = NULL;
-  return GRPC_EM_OK;
-}
-
-grpc_em_error grpc_em_alarm_add(grpc_em_alarm *alarm, gpr_timespec deadline) {
-  grpc_em_activation_data *adata = &alarm->task.activation[GRPC_EM_TA_ONLY];
-  gpr_timespec delay_timespec = gpr_time_sub(deadline, gpr_now());
-  struct timeval delay = gpr_timeval_from_timespec(delay_timespec);
-  if (adata->ev) {
-    event_free(adata->ev);
-    gpr_log(GPR_INFO, "Adding an alarm that already has an event.");
-    adata->ev = NULL;
-  }
-  adata->ev = evtimer_new(alarm->task.em->event_base, libevent_alarm_cb, alarm);
-  /* Set the trigger field to untriggered. Do this as the last store since
-     it is a release of previous stores. */
-  gpr_atm_rel_store(&alarm->triggered, ALARM_TRIGGER_INIT);
-
-  if (adata->ev != NULL && evtimer_add(adata->ev, &delay) == 0) {
-    return GRPC_EM_OK;
-  } else {
-    return GRPC_EM_ERROR;
-  }
-}
-
-grpc_em_error grpc_em_alarm_cancel(grpc_em_alarm *alarm) {
-  grpc_em_activation_data *adata = &alarm->task.activation[GRPC_EM_TA_ONLY];
-  int trigger_old;
-
-  /* First check if this alarm has been triggered, atomically */
-  trigger_old =
-      gpr_atm_full_fetch_add(&alarm->triggered, ALARM_TRIGGER_INCREMENT);
-  if (trigger_old == ALARM_TRIGGER_INIT) {
-    /* We need to make sure that we only invoke the callback if it hasn't
-       already been invoked */
-    /* First remove this event from libevent. This returns success even if the
-       event has gone active or invoked its callback. */
-    if (evtimer_del(adata->ev) != 0) {
-      /* The delete was unsuccessful for some reason. */
-      gpr_log(GPR_ERROR, "Attempt to delete alarm event was unsuccessful");
-      return GRPC_EM_ERROR;
-    }
-    /* Free up the event structure before invoking callback */
-    alarm_ev_destroy(alarm);
-    adata->status = GRPC_CALLBACK_CANCELLED;
-    add_task(alarm->task.em, adata);
-  }
-  return GRPC_EM_OK;
-}
-
-/* ==================== grpc_em_fd implementation =================== */
-
-/* Proxy callback to call a gRPC read/write callback */
-static void em_fd_cb(int fd, short what, void *arg /*=em_fd_impl*/) {
-  grpc_em_fd_impl *em_fd = arg;
-  grpc_em_cb_status status = GRPC_CALLBACK_SUCCESS;
-  int run_read_cb = 0;
-  int run_write_cb = 0;
-  grpc_em_activation_data *rdata, *wdata;
-
-  gpr_mu_lock(&em_fd->mu);
-  if (em_fd->shutdown_started) {
-    status = GRPC_CALLBACK_CANCELLED;
-  } else if (status == GRPC_CALLBACK_SUCCESS && (what & EV_TIMEOUT)) {
-    status = GRPC_CALLBACK_TIMED_OUT;
-    /* TODO(klempner): This is broken if we are monitoring both read and write
-       events on the same fd -- generating a spurious event is okay, but
-       generating a spurious timeout is not. */
-    what |= (EV_READ | EV_WRITE);
-  }
-
-  if (what & EV_READ) {
-    switch (em_fd->read_state) {
-      case GRPC_EM_FD_WAITING:
-        run_read_cb = 1;
-        em_fd->read_state = GRPC_EM_FD_IDLE;
-        break;
-      case GRPC_EM_FD_IDLE:
-      case GRPC_EM_FD_CACHED:
-        em_fd->read_state = GRPC_EM_FD_CACHED;
-    }
-  }
-  if (what & EV_WRITE) {
-    switch (em_fd->write_state) {
-      case GRPC_EM_FD_WAITING:
-        run_write_cb = 1;
-        em_fd->write_state = GRPC_EM_FD_IDLE;
-        break;
-      case GRPC_EM_FD_IDLE:
-      case GRPC_EM_FD_CACHED:
-        em_fd->write_state = GRPC_EM_FD_CACHED;
-    }
-  }
-
-  if (run_read_cb) {
-    rdata = &(em_fd->task.activation[GRPC_EM_TA_READ]);
-    rdata->status = status;
-    add_task(em_fd->task.em, rdata);
-  } else if (run_write_cb) {
-    wdata = &(em_fd->task.activation[GRPC_EM_TA_WRITE]);
-    wdata->status = status;
-    add_task(em_fd->task.em, wdata);
-  }
-  gpr_mu_unlock(&em_fd->mu);
-}
-
-static void em_fd_shutdown_cb(int fd, short what, void *arg /*=em_fd*/) {
-  /* TODO(klempner): This could just run directly in the calling thread, except
-     that libevent's handling of event_active() on an event which is already in
-     flight on a different thread is racy and easily triggers TSAN.
-   */
-  grpc_em_fd_impl *impl = arg;
-  gpr_mu_lock(&impl->mu);
-  impl->shutdown_started = 1;
-  if (impl->read_state == GRPC_EM_FD_WAITING) {
-    event_active(impl->task.activation[GRPC_EM_TA_READ].ev, EV_READ, 1);
-  }
-  if (impl->write_state == GRPC_EM_FD_WAITING) {
-    event_active(impl->task.activation[GRPC_EM_TA_WRITE].ev, EV_WRITE, 1);
-  }
-  gpr_mu_unlock(&impl->mu);
-}
-
-grpc_em_error grpc_em_fd_init(grpc_em_fd *em_fd, grpc_em *em, int fd) {
-  int flags;
-  grpc_em_activation_data *rdata, *wdata;
-  grpc_em_fd_impl *impl = gpr_malloc(sizeof(grpc_em_fd_impl));
-
-  gpr_mu_lock(&em->mu);
-  em->num_fds++;
-
-  gpr_mu_unlock(&em->mu);
-
-  em_fd->impl = impl;
-
-  impl->shutdown_ev = NULL;
-  gpr_mu_init(&impl->mu);
-
-  flags = fcntl(fd, F_GETFL, 0);
-  if ((flags & O_NONBLOCK) == 0) {
-    gpr_log(GPR_ERROR, "File descriptor %d is blocking", fd);
-    return GRPC_EM_INVALID_ARGUMENTS;
-  }
-
-  impl->task.type = GRPC_EM_TASK_FD;
-  impl->task.em = em;
-  impl->fd = fd;
-
-  rdata = &(impl->task.activation[GRPC_EM_TA_READ]);
-  rdata->ev = NULL;
-  rdata->cb = NULL;
-  rdata->arg = NULL;
-  rdata->status = GRPC_CALLBACK_SUCCESS;
-  rdata->prev = NULL;
-  rdata->next = NULL;
-
-  wdata = &(impl->task.activation[GRPC_EM_TA_WRITE]);
-  wdata->ev = NULL;
-  wdata->cb = NULL;
-  wdata->arg = NULL;
-  wdata->status = GRPC_CALLBACK_SUCCESS;
-  wdata->prev = NULL;
-  wdata->next = NULL;
-
-  impl->read_state = GRPC_EM_FD_IDLE;
-  impl->write_state = GRPC_EM_FD_IDLE;
-
-  impl->shutdown_started = 0;
-  impl->next = NULL;
-
-  /* TODO(chenw): detect platforms where only level trigger is supported,
-     and set the event to non-persist. */
-  rdata->ev = event_new(em->event_base, impl->fd, EV_ET | EV_PERSIST | EV_READ,
-                        em_fd_cb, impl);
-  if (!rdata->ev) {
-    gpr_log(GPR_ERROR, "Failed to create read event");
-    return GRPC_EM_ERROR;
-  }
-
-  wdata->ev = event_new(em->event_base, impl->fd, EV_ET | EV_PERSIST | EV_WRITE,
-                        em_fd_cb, impl);
-  if (!wdata->ev) {
-    gpr_log(GPR_ERROR, "Failed to create write event");
-    return GRPC_EM_ERROR;
-  }
-
-  impl->shutdown_ev =
-      event_new(em->event_base, -1, EV_READ, em_fd_shutdown_cb, impl);
-
-  if (!impl->shutdown_ev) {
-    gpr_log(GPR_ERROR, "Failed to create shutdown event");
-    return GRPC_EM_ERROR;
-  }
-
-  return GRPC_EM_OK;
-}
-
-static void grpc_em_fd_impl_destroy(grpc_em_fd_impl *impl) {
-  grpc_em_task_activity_type type;
-  grpc_em_activation_data *adata;
-
-  for (type = GRPC_EM_TA_READ; type < GRPC_EM_TA_COUNT; type++) {
-    adata = &(impl->task.activation[type]);
-    GPR_ASSERT(adata->next == NULL);
-    if (adata->ev != NULL) {
-      event_free(adata->ev);
-      adata->ev = NULL;
-    }
-  }
-
-  if (impl->shutdown_ev != NULL) {
-    event_free(impl->shutdown_ev);
-    impl->shutdown_ev = NULL;
-  }
-  gpr_mu_destroy(&impl->mu);
-  close(impl->fd);
-}
-
-void grpc_em_fd_destroy(grpc_em_fd *em_fd) {
-  grpc_em_fd_impl *impl = em_fd->impl;
-  grpc_em *em = impl->task.em;
-
-  gpr_mu_lock(&em->mu);
-
-  if (em->num_pollers == 0) {
-    /* it is safe to simply free it */
-    grpc_em_fd_impl_destroy(impl);
-    gpr_free(impl);
-  } else {
-    /* Put the impl on the list to be destroyed by the poller. */
-    impl->next = em->fds_to_free;
-    em->fds_to_free = impl;
-    /* Kick the poller so it closes the fd promptly.
-     * TODO(klempner): maybe this should be a different event.
-     */
-    event_active(em_fd->impl->shutdown_ev, EV_READ, 1);
-  }
-
-  em->num_fds--;
-  gpr_cv_broadcast(&em->cv);
-  gpr_mu_unlock(&em->mu);
-}
-
-int grpc_em_fd_get(struct grpc_em_fd *em_fd) { return em_fd->impl->fd; }
-
-/* Returns the event manager associated with *em_fd. */
-grpc_em *grpc_em_fd_get_em(grpc_em_fd *em_fd) { return em_fd->impl->task.em; }
-
-/* TODO(chenw): should we enforce the contract that notify_on_read cannot be
-   called when the previously registered callback has not been called yet. */
-grpc_em_error grpc_em_fd_notify_on_read(grpc_em_fd *em_fd,
-                                        grpc_em_cb_func read_cb,
-                                        void *read_cb_arg,
-                                        gpr_timespec deadline) {
-  grpc_em_fd_impl *impl = em_fd->impl;
-  int force_event = 0;
-  grpc_em_activation_data *rdata;
-  grpc_em_error result = GRPC_EM_OK;
-  gpr_timespec delay_timespec = gpr_time_sub(deadline, gpr_now());
-  struct timeval delay = gpr_timeval_from_timespec(delay_timespec);
-  struct timeval *delayp =
-      gpr_time_cmp(deadline, gpr_inf_future) ? &delay : NULL;
-
-  rdata = &impl->task.activation[GRPC_EM_TA_READ];
-
-  gpr_mu_lock(&impl->mu);
-  rdata->cb = read_cb;
-  rdata->arg = read_cb_arg;
-
-  force_event =
-      (impl->shutdown_started || impl->read_state == GRPC_EM_FD_CACHED);
-  impl->read_state = GRPC_EM_FD_WAITING;
-
-  if (force_event) {
-    event_active(rdata->ev, EV_READ, 1);
-  } else if (event_add(rdata->ev, delayp) == -1) {
-    result = GRPC_EM_ERROR;
-  }
-  gpr_mu_unlock(&impl->mu);
-  return result;
-}
-
-grpc_em_error grpc_em_fd_notify_on_write(grpc_em_fd *em_fd,
-                                         grpc_em_cb_func write_cb,
-                                         void *write_cb_arg,
-                                         gpr_timespec deadline) {
-  grpc_em_fd_impl *impl = em_fd->impl;
-  int force_event = 0;
-  grpc_em_activation_data *wdata;
-  grpc_em_error result = GRPC_EM_OK;
-  gpr_timespec delay_timespec = gpr_time_sub(deadline, gpr_now());
-  struct timeval delay = gpr_timeval_from_timespec(delay_timespec);
-  struct timeval *delayp =
-      gpr_time_cmp(deadline, gpr_inf_future) ? &delay : NULL;
-
-  wdata = &impl->task.activation[GRPC_EM_TA_WRITE];
-
-  gpr_mu_lock(&impl->mu);
-  wdata->cb = write_cb;
-  wdata->arg = write_cb_arg;
-
-  force_event =
-      (impl->shutdown_started || impl->write_state == GRPC_EM_FD_CACHED);
-  impl->write_state = GRPC_EM_FD_WAITING;
-
-  if (force_event) {
-    event_active(wdata->ev, EV_WRITE, 1);
-  } else if (event_add(wdata->ev, delayp) == -1) {
-    result = GRPC_EM_ERROR;
-  }
-  gpr_mu_unlock(&impl->mu);
-  return result;
-}
-
-void grpc_em_fd_shutdown(grpc_em_fd *em_fd) {
-  event_active(em_fd->impl->shutdown_ev, EV_READ, 1);
-}
-
-/*====================== Other callback functions ======================*/
-
-/* Sometimes we want a followup callback: something to be added from the
-   current callback for the EM to invoke once this callback is complete.
-   This is implemented by inserting an entry into an EM queue. */
-
-/* The following structure holds the field needed for adding the
-   followup callback. These are the argument for the followup callback,
-   the function to use for the followup callback, and the
-   activation data pointer used for the queues (to free in the CB) */
-struct followup_callback_arg {
-  grpc_em_cb_func func;
-  void *cb_arg;
-  grpc_em_activation_data adata;
-};
-
-static void followup_proxy_callback(void *cb_arg, grpc_em_cb_status status) {
-  struct followup_callback_arg *fcb_arg = cb_arg;
-  /* Invoke the function */
-  fcb_arg->func(fcb_arg->cb_arg, status);
-  gpr_free(fcb_arg);
-}
-
-grpc_em_error grpc_em_add_callback(grpc_em *em, grpc_em_cb_func cb,
-                                   void *cb_arg) {
-  grpc_em_activation_data *adptr;
-  struct followup_callback_arg *fcb_arg;
-
-  fcb_arg = gpr_malloc(sizeof(*fcb_arg));
-  if (fcb_arg == NULL) {
-    return GRPC_EM_ERROR;
-  }
-  /* Set up the activation data and followup callback argument structures */
-  adptr = &fcb_arg->adata;
-  adptr->ev = NULL;
-  adptr->cb = followup_proxy_callback;
-  adptr->arg = fcb_arg;
-  adptr->status = GRPC_CALLBACK_SUCCESS;
-  adptr->prev = NULL;
-  adptr->next = NULL;
-
-  fcb_arg->func = cb;
-  fcb_arg->cb_arg = cb_arg;
-
-  /* Insert an activation data for the specified em */
-  add_task(em, adptr);
-  return GRPC_EM_OK;
-}
diff --git a/src/core/eventmanager/em.h b/src/core/eventmanager/em.h
deleted file mode 100644
index f190bc8..0000000
--- a/src/core/eventmanager/em.h
+++ /dev/null
@@ -1,344 +0,0 @@
-/*
- *
- * Copyright 2014, Google Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- *     * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- *     * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#ifndef __GRPC_INTERNAL_EVENTMANAGER_EM_H__
-#define __GRPC_INTERNAL_EVENTMANAGER_EM_H__
-/* grpc_em is an event manager wrapping event loop with multithread support.
-   It executes a callback function when a specific event occurs on a file
-   descriptor or after a timeout has passed.
-   All methods are threadsafe and can be called from any thread.
-
-   To use the event manager, a grpc_em instance needs to be initialized to
-   maintains the internal states. The grpc_em instance can be used to
-   initialize file descriptor instance of grpc_em_fd, or alarm instance of
-   grpc_em_alarm. The former is used to register a callback with a IO event.
-   The later is used to schedule an alarm.
-
-   Instantiating any of these data structures requires including em_internal.h
-   A typical usage example is shown in the end of that header file.  */
-
-#include <grpc/support/atm.h>
-#include <grpc/support/sync.h>
-#include <grpc/support/thd.h>
-#include <grpc/support/time.h>
-
-/* =============== Enums used in GRPC event manager API ==================== */
-
-/* Result of a grpc_em operation */
-typedef enum grpc_em_error {
-  GRPC_EM_OK = 0,           /* everything went ok */
-  GRPC_EM_ERROR,            /* internal errors not caused by the caller */
-  GRPC_EM_INVALID_ARGUMENTS /* invalid arguments from the caller */
-} grpc_em_error;
-
-/* Status passed to callbacks for grpc_em_fd_notify_on_read and
-   grpc_em_fd_notify_on_write.  */
-typedef enum grpc_em_cb_status {
-  GRPC_CALLBACK_SUCCESS = 0,
-  GRPC_CALLBACK_TIMED_OUT,
-  GRPC_CALLBACK_CANCELLED,
-  GRPC_CALLBACK_DO_NOT_USE
-} grpc_em_cb_status;
-
-/* ======= Useful forward struct typedefs for GRPC event manager API ======= */
-
-struct grpc_em;
-struct grpc_em_alarm;
-struct grpc_fd;
-
-typedef struct grpc_em grpc_em;
-typedef struct grpc_em_alarm grpc_em_alarm;
-typedef struct grpc_em_fd grpc_em_fd;
-
-/* gRPC Callback definition */
-typedef void (*grpc_em_cb_func)(void *arg, grpc_em_cb_status status);
-
-/* ============================ grpc_em =============================== */
-/* Initialize *em and start polling, return GRPC_EM_OK on success, return
-   GRPC_EM_ERROR on failure. Upon failure, caller should call grpc_em_destroy()
-   to clean partially initialized *em.
-
-   Requires:  *em uninitialized.  */
-grpc_em_error grpc_em_init(grpc_em *em);
-
-/* Stop polling and cause *em no longer to be initialized.
-   Return GRPC_EM_OK if event polling is cleanly stopped.
-   Otherwise, return GRPC_EM_ERROR if polling is shutdown with errors.
-   Requires: *em initialized; no other concurrent operation on *em.  */
-grpc_em_error grpc_em_destroy(grpc_em *em);
-
-/* do some work; assumes em->mu locked; may unlock and relock em->mu */
-int grpc_em_work(grpc_em *em, gpr_timespec deadline);
-
-/* =========================== grpc_em_am ============================== */
-/* Initialize *alarm. When expired or canceled, alarm_cb will be called with
-   *alarm_cb_arg and status to indicate if it expired (SUCCESS) or was
-   canceled (CANCELLED). alarm_cb is guaranteed to be called exactly once,
-   and application code should check the status to determine how it was
-   invoked. The application callback is also responsible for maintaining
-   information about when to free up any user-level state.  */
-grpc_em_error grpc_em_alarm_init(grpc_em_alarm *alarm, grpc_em *em,
-                                 grpc_em_cb_func alarm_cb, void *alarm_cb_arg);
-
-/* Note that there is no alarm destroy function. This is because the
-   alarm is a one-time occurrence with a guarantee that the callback will
-   be called exactly once, either at expiration or cancellation. Thus, all
-   the internal alarm event management state is destroyed just before
-   that callback is invoked. If the user has additional state associated with
-   the alarm, the user is responsible for determining when it is safe to
-   destroy that state. */
-
-/* Schedule *alarm to expire at deadline. If *alarm is
-   re-added before expiration, the *delay is simply reset to the new value.
-   Return GRPC_EM_OK on success, or GRPC_EM_ERROR on failure.
-   Upon failure, caller should abort further operations on *alarm */
-grpc_em_error grpc_em_alarm_add(grpc_em_alarm *alarm, gpr_timespec deadline);
-
-/* Cancel an *alarm.
-   There are three cases:
-   1. We normally cancel the alarm
-   2. The alarm has already run
-   3. We can't cancel the alarm because it is "in flight".
-
-   In all of these cases, the cancellation is still considered successful.
-   They are essentially distinguished in that the alarm_cb will be run
-   exactly once from either the cancellation (with status CANCELLED)
-   or from the activation (with status SUCCESS)
-
-   Requires:  cancel() must happen after add() on a given alarm */
-grpc_em_error grpc_em_alarm_cancel(grpc_em_alarm *alarm);
-
-/* ========================== grpc_em_fd ============================= */
-
-/* Initialize *em_fd, return GRPM_EM_OK on success, GRPC_EM_ERROR on internal
-   errors, or GRPC_EM_INVALID_ARGUMENTS if fd is a blocking file descriptor.
-   Upon failure, caller should call grpc_em_fd_destroy() to clean partially
-   initialized *em_fd.
-   fd is a non-blocking file descriptor.
-
-   This takes ownership of closing fd.
-
-   Requires:  *em_fd uninitialized. fd is a non-blocking file descriptor.  */
-grpc_em_error grpc_em_fd_init(grpc_em_fd *em_fd, grpc_em *em, int fd);
-
-/* Cause *em_fd no longer to be initialized and closes the underlying fd.
-   Requires: *em_fd initialized; no outstanding notify_on_read or
-   notify_on_write.  */
-void grpc_em_fd_destroy(grpc_em_fd *em_fd);
-
-/* Returns the file descriptor associated with *em_fd. */
-int grpc_em_fd_get(grpc_em_fd *em_fd);
-
-/* Returns the event manager associated with *em_fd. */
-grpc_em *grpc_em_fd_get_em(grpc_em_fd *em_fd);
-
-/* Register read interest, causing read_cb to be called once when em_fd becomes
-   readable, on deadline specified by deadline, or on shutdown triggered by
-   grpc_em_fd_shutdown.
-   Return GRPC_EM_OK on success, or GRPC_EM_ERROR on failure.
-   Upon Failure, caller should abort further operations on *em_fd except
-   grpc_em_fd_shutdown().
-   read_cb will be called with read_cb_arg when *em_fd becomes readable.
-   read_cb is Called with status of GRPC_CALLBACK_SUCCESS if readable,
-   GRPC_CALLBACK_TIMED_OUT if the call timed out,
-   and CANCELLED if the call was cancelled.
-
-   Requires:This method must not be called before the read_cb for any previous
-   call runs. Edge triggered events are used whenever they are supported by the
-   underlying platform. This means that users must drain em_fd in read_cb before
-   calling notify_on_read again. Users are also expected to handle spurious
-   events, i.e read_cb is called while nothing can be readable from em_fd  */
-grpc_em_error grpc_em_fd_notify_on_read(grpc_em_fd *em_fd,
-                                        grpc_em_cb_func read_cb,
-                                        void *read_cb_arg,
-                                        gpr_timespec deadline);
-
-/* Exactly the same semantics as above, except based on writable events.  */
-grpc_em_error grpc_em_fd_notify_on_write(grpc_em_fd *fd,
-                                         grpc_em_cb_func write_cb,
-                                         void *write_cb_arg,
-                                         gpr_timespec deadline);
-
-/* Cause any current and all future read/write callbacks to error out with
-   GRPC_CALLBACK_CANCELLED. */
-void grpc_em_fd_shutdown(grpc_em_fd *em_fd);
-
-/* ================== Other functions =================== */
-
-/* This function is called from within a callback or from anywhere else
-   and causes the invocation of a callback at some point in the future */
-grpc_em_error grpc_em_add_callback(grpc_em *em, grpc_em_cb_func cb,
-                                   void *cb_arg);
-
-/* ========== Declarations related to queue management (non-API) =========== */
-
-/* Forward declarations */
-struct grpc_em_activation_data;
-struct grpc_em_fd_impl;
-
-/* ================== Actual structure definitions ========================= */
-/* gRPC event manager handle.
-   The handle is used to initialize both grpc_em_alarm and grpc_em_fd. */
-struct em_thread_arg;
-
-struct grpc_em {
-  struct event_base *event_base;
-
-  gpr_mu mu;
-  gpr_cv cv;
-  struct grpc_em_activation_data *q;
-  int num_pollers;
-  int num_fds;
-  gpr_timespec last_poll_completed;
-
-  int shutdown_backup_poller;
-  gpr_event backup_poller_done;
-
-  struct grpc_em_fd_impl *fds_to_free;
-
-  struct event *timeout_ev; /* activated to break out of the event loop early */
-};
-
-/* gRPC event manager task "base class". This is pretend-inheritance in C89.
-   This should be the first member of any actual grpc_em task type.
-
-   Memory warning: expanding this will increase memory usage in any derived
-   class, so be careful.
-
-   For generality, this base can be on multiple task queues and can have
-   multiple event callbacks registered. Not all "derived classes" will use
-   this feature. */
-
-typedef enum grpc_em_task_type {
-  GRPC_EM_TASK_ALARM,
-  GRPC_EM_TASK_FD,
-  GRPC_EM_TASK_DO_NOT_USE
-} grpc_em_task_type;
-
-/* Different activity types to shape the callback and queueing arrays */
-typedef enum grpc_em_task_activity_type {
-  GRPC_EM_TA_READ, /* use this also for single-type events */
-  GRPC_EM_TA_WRITE,
-  GRPC_EM_TA_COUNT
-} grpc_em_task_activity_type;
-
-/* Include the following #define for convenience for tasks like alarms that
-   only have a single type */
-#define GRPC_EM_TA_ONLY GRPC_EM_TA_READ
-
-typedef struct grpc_em_activation_data {
-  struct event *ev;   /* event activated on this callback type */
-  grpc_em_cb_func cb; /* function pointer for callback */
-  void *arg;          /* argument passed to cb */
-
-  /* Hold the status associated with the callback when queued */
-  grpc_em_cb_status status;
-  /* Now set up to link activations into scheduler queues */
-  struct grpc_em_activation_data *prev;
-  struct grpc_em_activation_data *next;
-} grpc_em_activation_data;
-
-typedef struct grpc_em_task {
-  grpc_em_task_type type;
-  grpc_em *em;
-
-  /* Now have an array of activation data elements: one for each activity
-     type that could get activated */
-  grpc_em_activation_data activation[GRPC_EM_TA_COUNT];
-} grpc_em_task;
-
-/* gRPC alarm handle.
-   The handle is used to add an alarm which expires after specified timeout. */
-struct grpc_em_alarm {
-  grpc_em_task task; /* Include the base class */
-
-  gpr_atm triggered; /* To be used atomically if alarm triggered */
-};
-
-/* =================== Event caching ===================
-   In order to not miss or double-return edges in the context of edge triggering
-   and multithreading, we need a per-fd caching layer in the eventmanager itself
-   to cache relevant events.
-
-   There are two types of events we care about: calls to notify_on_[read|write]
-   and readable/writable events for the socket from eventfd. There are separate
-   event caches for read and write.
-
-   There are three states:
-   0. "waiting" -- There's been a call to notify_on_[read|write] which has not
-   had a corresponding event. In other words, we're waiting for an event so we
-   can run the callback.
-   1. "idle" -- We are neither waiting nor have a cached event.
-   2. "cached" -- There has been a read/write event without a waiting callback,
-   so we want to run the event next time the application calls
-   notify_on_[read|write].
-
-   The high level state diagram:
-
-   +--------------------------------------------------------------------+
-   | WAITING                  | IDLE                | CACHED            |
-   |                          |                     |                   |
-   |                     1. --*->              2. --+->           3.  --+\
-   |                          |                     |                <--+/
-   |                          |                     |                   |
-  x+-- 6.                5. <-+--              4. <-*--                 |
-   |                          |                     |                   |
-   +--------------------------------------------------------------------+
-
-   Transitions right occur on read|write events. Transitions left occur on
-   notify_on_[read|write] events.
-   State transitions:
-   1. Read|Write event while waiting -> run the callback and transition to idle.
-   2. Read|Write event while idle -> transition to cached.
-   3. Read|Write event with one already cached -> still cached.
-   4. notify_on_[read|write] with event cached: run callback and transition to
-      idle.
-   5. notify_on_[read|write] when idle: Store callback and transition to
-      waiting.
-   6. notify_on_[read|write] when waiting: invalid. */
-
-typedef enum grpc_em_fd_state {
-  GRPC_EM_FD_WAITING = 0,
-  GRPC_EM_FD_IDLE = 1,
-  GRPC_EM_FD_CACHED = 2
-} grpc_em_fd_state;
-
-struct grpc_em_fd_impl;
-
-/* gRPC file descriptor handle.
-   The handle is used to register read/write callbacks to a file descriptor */
-struct grpc_em_fd {
-  struct grpc_em_fd_impl *impl;
-};
-
-#endif  /* __GRPC_INTERNAL_EVENTMANAGER_EM_H__ */
diff --git a/src/core/httpcli/httpcli.c b/src/core/httpcli/httpcli.c
index 6c0a688..84a97a4 100644
--- a/src/core/httpcli/httpcli.c
+++ b/src/core/httpcli/httpcli.c
@@ -36,8 +36,8 @@
 #include <string.h>
 
 #include "src/core/endpoint/endpoint.h"
-#include "src/core/endpoint/resolve_address.h"
-#include "src/core/endpoint/tcp_client.h"
+#include "src/core/iomgr/resolve_address.h"
+#include "src/core/iomgr/tcp_client.h"
 #include "src/core/httpcli/format_request.h"
 #include "src/core/httpcli/httpcli_security_context.h"
 #include "src/core/httpcli/parser.h"
@@ -54,7 +54,6 @@
   grpc_resolved_addresses *addresses;
   size_t next_address;
   grpc_endpoint *ep;
-  grpc_em *em;
   char *host;
   gpr_timespec deadline;
   int have_read_byte;
@@ -200,9 +199,8 @@
     return;
   }
   addr = &req->addresses->addrs[req->next_address++];
-  grpc_tcp_client_connect(on_connected, req, req->em,
-                          (struct sockaddr *)&addr->addr, addr->len,
-                          req->deadline);
+  grpc_tcp_client_connect(on_connected, req, (struct sockaddr *)&addr->addr,
+                          addr->len, req->deadline);
 }
 
 static void on_resolved(void *arg, grpc_resolved_addresses *addresses) {
@@ -217,7 +215,7 @@
 }
 
 void grpc_httpcli_get(const grpc_httpcli_request *request,
-                      gpr_timespec deadline, grpc_em *em,
+                      gpr_timespec deadline,
                       grpc_httpcli_response_cb on_response, void *user_data) {
   internal_request *req = gpr_malloc(sizeof(internal_request));
   memset(req, 0, sizeof(*req));
@@ -225,7 +223,6 @@
   grpc_httpcli_parser_init(&req->parser);
   req->on_response = on_response;
   req->user_data = user_data;
-  req->em = em;
   req->deadline = deadline;
   req->use_ssl = request->use_ssl;
   if (req->use_ssl) {
@@ -238,7 +235,7 @@
 
 void grpc_httpcli_post(const grpc_httpcli_request *request,
                        const char *body_bytes, size_t body_size,
-                       gpr_timespec deadline, grpc_em *em,
+                       gpr_timespec deadline,
                        grpc_httpcli_response_cb on_response, void *user_data) {
   internal_request *req = gpr_malloc(sizeof(internal_request));
   memset(req, 0, sizeof(*req));
@@ -247,7 +244,6 @@
   grpc_httpcli_parser_init(&req->parser);
   req->on_response = on_response;
   req->user_data = user_data;
-  req->em = em;
   req->deadline = deadline;
   req->use_ssl = request->use_ssl;
   if (req->use_ssl) {
diff --git a/src/core/httpcli/httpcli.h b/src/core/httpcli/httpcli.h
index aef0edf..56eebe9 100644
--- a/src/core/httpcli/httpcli.h
+++ b/src/core/httpcli/httpcli.h
@@ -36,7 +36,6 @@
 
 #include <stddef.h>
 
-#include "src/core/eventmanager/em.h"
 #include <grpc/support/time.h>
 
 /* User agent this library reports */
@@ -90,7 +89,7 @@
    'on_response' is a callback to report results to (and 'user_data' is a user
      supplied pointer to pass to said call) */
 void grpc_httpcli_get(const grpc_httpcli_request *request,
-                      gpr_timespec deadline, grpc_em *em,
+                      gpr_timespec deadline,
                       grpc_httpcli_response_cb on_response, void *user_data);
 
 /* Asynchronously perform a HTTP POST.
@@ -98,7 +97,7 @@
    Does not support ?var1=val1&var2=val2 in the path. */
 void grpc_httpcli_post(const grpc_httpcli_request *request,
                        const char *body_bytes, size_t body_size,
-                       gpr_timespec deadline, grpc_em *em,
+                       gpr_timespec deadline,
                        grpc_httpcli_response_cb on_response, void *user_data);
 
 #endif  /* __GRPC_INTERNAL_HTTPCLI_HTTPCLI_H__ */
diff --git a/src/core/iomgr/alarm.h b/src/core/iomgr/alarm.h
new file mode 100644
index 0000000..5bd00d9
--- /dev/null
+++ b/src/core/iomgr/alarm.h
@@ -0,0 +1,85 @@
+/*
+ *
+ * Copyright 2014, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __GRPC_INTERNAL_IOMGR_ALARM_H__
+#define __GRPC_INTERNAL_IOMGR_ALARM_H__
+
+#include "src/core/iomgr/iomgr.h"
+#include <grpc/support/port_platform.h>
+#include <grpc/support/time.h>
+
+typedef struct grpc_alarm grpc_alarm;
+
+/* One of the following headers should provide struct grpc_alarm */
+#ifdef GPR_LIBEVENT
+#include "src/core/iomgr/iomgr_libevent.h"
+#endif
+
+/* Initialize *alarm. When expired or canceled, alarm_cb will be called with
+   *alarm_cb_arg and status to indicate if it expired (SUCCESS) or was
+   canceled (CANCELLED). alarm_cb is guaranteed to be called exactly once,
+   and application code should check the status to determine how it was
+   invoked. The application callback is also responsible for maintaining
+   information about when to free up any user-level state.  */
+void grpc_alarm_init(grpc_alarm *alarm, grpc_iomgr_cb_func alarm_cb,
+                     void *alarm_cb_arg);
+
+/* Note that there is no alarm destroy function. This is because the
+   alarm is a one-time occurrence with a guarantee that the callback will
+   be called exactly once, either at expiration or cancellation. Thus, all
+   the internal alarm event management state is destroyed just before
+   that callback is invoked. If the user has additional state associated with
+   the alarm, the user is responsible for determining when it is safe to
+   destroy that state. */
+
+/* Schedule *alarm to expire at deadline. If *alarm is
+   re-added before expiration, the *delay is simply reset to the new value.
+   Return GRPC_EM_OK on success, or GRPC_EM_ERROR on failure.
+   Upon failure, caller should abort further operations on *alarm */
+int grpc_alarm_add(grpc_alarm *alarm, gpr_timespec deadline);
+
+/* Cancel an *alarm.
+   There are three cases:
+   1. We normally cancel the alarm
+   2. The alarm has already run
+   3. We can't cancel the alarm because it is "in flight".
+
+   In all of these cases, the cancellation is still considered successful.
+   They are essentially distinguished in that the alarm_cb will be run
+   exactly once from either the cancellation (with status CANCELLED)
+   or from the activation (with status SUCCESS)
+
+   Requires:  cancel() must happen after add() on a given alarm */
+int grpc_alarm_cancel(grpc_alarm *alarm);
+
+#endif /* __GRPC_INTERNAL_IOMGR_ALARM_H__ */
diff --git a/src/core/eventmanager/em_win32.c b/src/core/iomgr/endpoint_pair.h
similarity index 80%
copy from src/core/eventmanager/em_win32.c
copy to src/core/iomgr/endpoint_pair.h
index 4d5c3b5..4a97ebf 100644
--- a/src/core/eventmanager/em_win32.c
+++ b/src/core/iomgr/endpoint_pair.h
@@ -31,8 +31,16 @@
  *
  */
 
-/* Windows event manager support code. */
-#include <event2/thread.h>
+#ifndef __GRPC_INTERNAL_IOMGR_ENDPOINT_PAIR_H_
+#define __GRPC_INTERNAL_IOMGR_ENDPOINT_PAIR_H_
 
-/* Notify LibEvent that Windows thread is used. */
-int evthread_use_threads() { return evthread_use_windows_threads(); }
+#include "src/core/endpoint/endpoint.h"
+
+typedef struct {
+  grpc_endpoint *client;
+  grpc_endpoint *server;
+} grpc_endpoint_pair;
+
+grpc_endpoint_pair grpc_iomgr_create_endpoint_pair(size_t read_slice_size);
+
+#endif /* __GRPC_INTERNAL_IOMGR_ENDPOINT_PAIR_H_ */
diff --git a/src/core/endpoint/socket_utils_linux.c b/src/core/iomgr/endpoint_pair_posix.c
similarity index 67%
copy from src/core/endpoint/socket_utils_linux.c
copy to src/core/iomgr/endpoint_pair_posix.c
index 479675e..f08d134 100644
--- a/src/core/endpoint/socket_utils_linux.c
+++ b/src/core/iomgr/endpoint_pair_posix.c
@@ -31,22 +31,31 @@
  *
  */
 
-#define _GNU_SOURCE
-#include <grpc/support/port_platform.h>
+#include "src/core/iomgr/endpoint_pair.h"
 
-#ifdef GPR_LINUX
-
-#include "src/core/endpoint/socket_utils.h"
-
+#include <errno.h>
+#include <fcntl.h>
+#include <string.h>
 #include <sys/types.h>
 #include <sys/socket.h>
 
-int grpc_accept4(int sockfd, struct sockaddr *addr, socklen_t *addrlen,
-                 int nonblock, int cloexec) {
-  int flags = 0;
-  flags |= nonblock ? SOCK_NONBLOCK : 0;
-  flags |= cloexec ? SOCK_CLOEXEC : 0;
-  return accept4(sockfd, addr, addrlen, flags);
+#include "src/core/iomgr/tcp_posix.h"
+#include <grpc/support/log.h>
+
+static void create_sockets(int sv[2]) {
+  int flags;
+  GPR_ASSERT(socketpair(AF_UNIX, SOCK_STREAM, 0, sv) == 0);
+  flags = fcntl(sv[0], F_GETFL, 0);
+  GPR_ASSERT(fcntl(sv[0], F_SETFL, flags | O_NONBLOCK) == 0);
+  flags = fcntl(sv[1], F_GETFL, 0);
+  GPR_ASSERT(fcntl(sv[1], F_SETFL, flags | O_NONBLOCK) == 0);
 }
 
-#endif
+grpc_endpoint_pair grpc_iomgr_create_endpoint_pair(size_t read_slice_size) {
+  int sv[2];
+  grpc_endpoint_pair p;
+  create_sockets(sv);
+  p.client = grpc_tcp_create(grpc_fd_create(sv[1]), read_slice_size);
+  p.server = grpc_tcp_create(grpc_fd_create(sv[0]), read_slice_size);
+  return p;
+}
diff --git a/src/core/surface/surface_em.c b/src/core/iomgr/iomgr.h
similarity index 66%
copy from src/core/surface/surface_em.c
copy to src/core/iomgr/iomgr.h
index e1785d1..cf39f94 100644
--- a/src/core/surface/surface_em.c
+++ b/src/core/iomgr/iomgr.h
@@ -31,25 +31,26 @@
  *
  */
 
-#include "src/core/surface/surface_em.h"
-#include <grpc/support/log.h>
+#ifndef __GRPC_INTERNAL_IOMGR_IOMGR_H__
+#define __GRPC_INTERNAL_IOMGR_IOMGR_H__
 
-static int initialized = 0;
-static grpc_em em;
+/* Status passed to callbacks for grpc_em_fd_notify_on_read and
+   grpc_em_fd_notify_on_write.  */
+typedef enum grpc_em_cb_status {
+  GRPC_CALLBACK_SUCCESS = 0,
+  GRPC_CALLBACK_TIMED_OUT,
+  GRPC_CALLBACK_CANCELLED,
+  GRPC_CALLBACK_DO_NOT_USE
+} grpc_iomgr_cb_status;
 
-grpc_em *grpc_surface_em() {
-  GPR_ASSERT(initialized && "call grpc_init()");
-  return &em;
-}
+/* gRPC Callback definition */
+typedef void (*grpc_iomgr_cb_func)(void *arg, grpc_iomgr_cb_status status);
 
-void grpc_surface_em_init() {
-  GPR_ASSERT(!initialized);
-  initialized = 1;
-  grpc_em_init(&em);
-}
+void grpc_iomgr_init();
+void grpc_iomgr_shutdown();
 
-void grpc_surface_em_shutdown() {
-  GPR_ASSERT(initialized);
-  grpc_em_destroy(&em);
-  initialized = 0;
-}
+/* This function is called from within a callback or from anywhere else
+   and causes the invocation of a callback at some point in the future */
+void grpc_iomgr_add_callback(grpc_iomgr_cb_func cb, void *cb_arg);
+
+#endif /* __GRPC_INTERNAL_IOMGR_IOMGR_H__ */
diff --git a/src/core/surface/surface_em.c b/src/core/iomgr/iomgr_completion_queue_interface.h
similarity index 78%
rename from src/core/surface/surface_em.c
rename to src/core/iomgr/iomgr_completion_queue_interface.h
index e1785d1..3c4efe7 100644
--- a/src/core/surface/surface_em.c
+++ b/src/core/iomgr/iomgr_completion_queue_interface.h
@@ -31,25 +31,15 @@
  *
  */
 
-#include "src/core/surface/surface_em.h"
-#include <grpc/support/log.h>
+#ifndef __GRPC_INTERNAL_IOMGR_IOMGR_COMPLETION_QUEUE_INTERFACE_H_
+#define __GRPC_INTERNAL_IOMGR_IOMGR_COMPLETION_QUEUE_INTERFACE_H_
 
-static int initialized = 0;
-static grpc_em em;
+/* Internals of iomgr that are exposed only to be used for completion queue
+   implementation */
 
-grpc_em *grpc_surface_em() {
-  GPR_ASSERT(initialized && "call grpc_init()");
-  return &em;
-}
+extern gpr_mu grpc_iomgr_mu;
+extern gpr_cv grpc_iomgr_cv;
 
-void grpc_surface_em_init() {
-  GPR_ASSERT(!initialized);
-  initialized = 1;
-  grpc_em_init(&em);
-}
+int grpc_iomgr_work(gpr_timespec deadline);
 
-void grpc_surface_em_shutdown() {
-  GPR_ASSERT(initialized);
-  grpc_em_destroy(&em);
-  initialized = 0;
-}
+#endif /* __GRPC_INTERNAL_IOMGR_IOMGR_COMPLETION_QUEUE_INTERFACE_H_ */
diff --git a/src/core/iomgr/iomgr_libevent.c b/src/core/iomgr/iomgr_libevent.c
new file mode 100644
index 0000000..1af03dc
--- /dev/null
+++ b/src/core/iomgr/iomgr_libevent.c
@@ -0,0 +1,676 @@
+/*
+ *
+ * Copyright 2014, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/iomgr/iomgr_libevent.h"
+
+#include <unistd.h>
+#include <fcntl.h>
+
+#include "src/core/iomgr/alarm.h"
+#include <grpc/support/atm.h>
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/sync.h>
+#include <grpc/support/thd.h>
+#include <grpc/support/time.h>
+#include <event2/event.h>
+#include <event2/thread.h>
+
+#define ALARM_TRIGGER_INIT ((gpr_atm)0)
+#define ALARM_TRIGGER_INCREMENT ((gpr_atm)1)
+#define DONE_SHUTDOWN ((void *)1)
+
+#define POLLER_ID_INVALID ((gpr_atm)-1)
+
+/* Global data */
+struct event_base *g_event_base;
+gpr_mu grpc_iomgr_mu;
+gpr_cv grpc_iomgr_cv;
+static grpc_libevent_activation_data *g_activation_queue;
+static int g_num_pollers;
+static int g_num_fds;
+static gpr_timespec g_last_poll_completed;
+static int g_shutdown_backup_poller;
+static gpr_event g_backup_poller_done;
+/* activated to break out of the event loop early */
+static struct event *g_timeout_ev;
+static grpc_fd *g_fds_to_free;
+
+int evthread_use_threads(void);
+static void grpc_fd_impl_destroy(grpc_fd *impl);
+
+/* If anything is in the work queue, process one item and return 1.
+   Return 0 if there were no work items to complete.
+   Requires grpc_iomgr_mu locked, may unlock and relock during the call. */
+static int maybe_do_queue_work() {
+  grpc_libevent_activation_data *work = g_activation_queue;
+
+  if (work == NULL) return 0;
+
+  if (work->next == work) {
+    g_activation_queue = NULL;
+  } else {
+    g_activation_queue = work->next;
+    g_activation_queue->prev = work->prev;
+    g_activation_queue->next->prev = g_activation_queue->prev->next =
+        g_activation_queue;
+  }
+  work->next = work->prev = NULL;
+  gpr_mu_unlock(&grpc_iomgr_mu);
+
+  work->cb(work->arg, work->status);
+
+  gpr_mu_lock(&grpc_iomgr_mu);
+  return 1;
+}
+
+/* Break out of the event loop on timeout */
+static void timer_callback(int fd, short events, void *context) {
+  event_base_loopbreak((struct event_base *)context);
+}
+
+static void free_fd_list(grpc_fd *impl) {
+  while (impl != NULL) {
+    grpc_fd *current = impl;
+    impl = impl->next;
+    grpc_fd_impl_destroy(current);
+    gpr_free(current);
+  }
+}
+
+static void maybe_free_fds() {
+  if (g_fds_to_free) {
+    free_fd_list(g_fds_to_free);
+    g_fds_to_free = NULL;
+  }
+}
+
+/* Spend some time doing polling and libevent maintenance work if no other
+   thread is. This includes both polling for events and destroying/closing file
+   descriptor objects.
+   Returns 1 if polling was performed, 0 otherwise.
+   Requires grpc_iomgr_mu locked, may unlock and relock during the call. */
+static int maybe_do_polling_work(struct timeval delay) {
+  int status;
+
+  if (g_num_pollers) return 0;
+
+  g_num_pollers = 1;
+
+  maybe_free_fds();
+
+  gpr_mu_unlock(&grpc_iomgr_mu);
+
+  event_add(g_timeout_ev, &delay);
+  status = event_base_loop(g_event_base, EVLOOP_ONCE);
+  if (status < 0) {
+    gpr_log(GPR_ERROR, "event polling loop stops with error status %d", status);
+  }
+  event_del(g_timeout_ev);
+
+  gpr_mu_lock(&grpc_iomgr_mu);
+  maybe_free_fds();
+
+  g_num_pollers = 0;
+  gpr_cv_broadcast(&grpc_iomgr_cv);
+  return 1;
+}
+
+int grpc_iomgr_work(gpr_timespec deadline) {
+  gpr_timespec delay_timespec = gpr_time_sub(deadline, gpr_now());
+  /* poll for no longer than one second */
+  gpr_timespec max_delay = {1, 0};
+  struct timeval delay;
+
+  if (gpr_time_cmp(delay_timespec, gpr_time_0) <= 0) {
+    return 0;
+  }
+
+  if (gpr_time_cmp(delay_timespec, max_delay) > 0) {
+    delay_timespec = max_delay;
+  }
+
+  delay = gpr_timeval_from_timespec(delay_timespec);
+
+  if (maybe_do_queue_work() || maybe_do_polling_work(delay)) {
+    g_last_poll_completed = gpr_now();
+    return 1;
+  }
+
+  return 0;
+}
+
+static void backup_poller_thread(void *p) {
+  int backup_poller_engaged = 0;
+  /* allow no pollers for 100 milliseconds, then engage backup polling */
+  gpr_timespec allow_no_pollers = gpr_time_from_micros(100 * 1000);
+
+  gpr_mu_lock(&grpc_iomgr_mu);
+  while (!g_shutdown_backup_poller) {
+    if (g_num_pollers == 0) {
+      gpr_timespec now = gpr_now();
+      gpr_timespec time_until_engage = gpr_time_sub(
+          allow_no_pollers, gpr_time_sub(now, g_last_poll_completed));
+      if (gpr_time_cmp(time_until_engage, gpr_time_0) <= 0) {
+        if (!backup_poller_engaged) {
+          gpr_log(GPR_DEBUG, "No pollers for a while - engaging backup poller");
+          backup_poller_engaged = 1;
+        }
+        if (!maybe_do_queue_work()) {
+          struct timeval tv = {1, 0};
+          maybe_do_polling_work(tv);
+        }
+      } else {
+        if (backup_poller_engaged) {
+          gpr_log(GPR_DEBUG, "Backup poller disengaged");
+          backup_poller_engaged = 0;
+        }
+        gpr_mu_unlock(&grpc_iomgr_mu);
+        gpr_sleep_until(gpr_time_add(now, time_until_engage));
+        gpr_mu_lock(&grpc_iomgr_mu);
+      }
+    } else {
+      if (backup_poller_engaged) {
+        gpr_log(GPR_DEBUG, "Backup poller disengaged");
+        backup_poller_engaged = 0;
+      }
+      gpr_cv_wait(&grpc_iomgr_cv, &grpc_iomgr_mu, gpr_inf_future);
+    }
+  }
+  gpr_mu_unlock(&grpc_iomgr_mu);
+
+  gpr_event_set(&g_backup_poller_done, (void *)1);
+}
+
+void grpc_iomgr_init() {
+  gpr_thd_id backup_poller_id;
+
+  if (evthread_use_threads() != 0) {
+    gpr_log(GPR_ERROR, "Failed to initialize libevent thread support!");
+    abort();
+  }
+
+  gpr_mu_init(&grpc_iomgr_mu);
+  gpr_cv_init(&grpc_iomgr_cv);
+  g_activation_queue = NULL;
+  g_num_pollers = 0;
+  g_num_fds = 0;
+  g_last_poll_completed = gpr_now();
+  g_shutdown_backup_poller = 0;
+  g_fds_to_free = NULL;
+
+  gpr_event_init(&g_backup_poller_done);
+
+  g_event_base = NULL;
+  g_timeout_ev = NULL;
+
+  g_event_base = event_base_new();
+  if (!g_event_base) {
+    gpr_log(GPR_ERROR, "Failed to create the event base");
+    abort();
+  }
+
+  if (evthread_make_base_notifiable(g_event_base) != 0) {
+    gpr_log(GPR_ERROR, "Couldn't make event base notifiable cross threads!");
+    abort();
+  }
+
+  g_timeout_ev = evtimer_new(g_event_base, timer_callback, g_event_base);
+
+  gpr_thd_new(&backup_poller_id, backup_poller_thread, NULL, NULL);
+}
+
+void grpc_iomgr_shutdown() {
+  gpr_timespec fd_shutdown_deadline =
+      gpr_time_add(gpr_now(), gpr_time_from_seconds(10));
+
+  /* broadcast shutdown */
+  gpr_mu_lock(&grpc_iomgr_mu);
+  while (g_num_fds) {
+    gpr_log(GPR_INFO,
+            "waiting for %d fds to be destroyed before closing event manager",
+            g_num_fds);
+    if (gpr_cv_wait(&grpc_iomgr_cv, &grpc_iomgr_mu, fd_shutdown_deadline)) {
+      gpr_log(GPR_ERROR,
+              "not all fds destroyed before shutdown deadline: memory leaks "
+              "are likely");
+      break;
+    } else if (g_num_fds == 0) {
+      gpr_log(GPR_INFO, "all fds closed");
+    }
+  }
+
+  g_shutdown_backup_poller = 1;
+  gpr_cv_broadcast(&grpc_iomgr_cv);
+  gpr_mu_unlock(&grpc_iomgr_mu);
+
+  gpr_event_wait(&g_backup_poller_done, gpr_inf_future);
+
+  /* drain pending work */
+  gpr_mu_lock(&grpc_iomgr_mu);
+  while (maybe_do_queue_work())
+    ;
+  gpr_mu_unlock(&grpc_iomgr_mu);
+
+  free_fd_list(g_fds_to_free);
+
+  /* complete shutdown */
+  gpr_mu_destroy(&grpc_iomgr_mu);
+  gpr_cv_destroy(&grpc_iomgr_cv);
+
+  if (g_timeout_ev != NULL) {
+    event_free(g_timeout_ev);
+  }
+
+  if (g_event_base != NULL) {
+    event_base_free(g_event_base);
+    g_event_base = NULL;
+  }
+}
+
+static void add_task(grpc_libevent_activation_data *adata) {
+  gpr_mu_lock(&grpc_iomgr_mu);
+  if (g_activation_queue) {
+    adata->next = g_activation_queue;
+    adata->prev = adata->next->prev;
+    adata->next->prev = adata->prev->next = adata;
+  } else {
+    g_activation_queue = adata;
+    adata->next = adata->prev = adata;
+  }
+  gpr_cv_broadcast(&grpc_iomgr_cv);
+  gpr_mu_unlock(&grpc_iomgr_mu);
+}
+
+/* ===============grpc_alarm implementation==================== */
+
+/* The following function frees up the alarm's libevent structure and
+   should always be invoked just before calling the alarm's callback */
+static void alarm_ev_destroy(grpc_alarm *alarm) {
+  grpc_libevent_activation_data *adata =
+      &alarm->task.activation[GRPC_EM_TA_ONLY];
+  if (adata->ev != NULL) {
+    /* TODO(klempner): Is this safe to do when we're cancelling? */
+    event_free(adata->ev);
+    adata->ev = NULL;
+  }
+}
+/* Proxy callback triggered by alarm->ev to call alarm->cb */
+static void libevent_alarm_cb(int fd, short what, void *arg /*=alarm*/) {
+  grpc_alarm *alarm = arg;
+  grpc_libevent_activation_data *adata =
+      &alarm->task.activation[GRPC_EM_TA_ONLY];
+  int trigger_old;
+
+  /* First check if this alarm has been canceled, atomically */
+  trigger_old =
+      gpr_atm_full_fetch_add(&alarm->triggered, ALARM_TRIGGER_INCREMENT);
+  if (trigger_old == ALARM_TRIGGER_INIT) {
+    /* Before invoking user callback, destroy the libevent structure */
+    alarm_ev_destroy(alarm);
+    adata->status = GRPC_CALLBACK_SUCCESS;
+    add_task(adata);
+  }
+}
+
+void grpc_alarm_init(grpc_alarm *alarm, grpc_iomgr_cb_func alarm_cb,
+                     void *alarm_cb_arg) {
+  grpc_libevent_activation_data *adata =
+      &alarm->task.activation[GRPC_EM_TA_ONLY];
+  alarm->task.type = GRPC_EM_TASK_ALARM;
+  gpr_atm_rel_store(&alarm->triggered, ALARM_TRIGGER_INIT);
+  adata->cb = alarm_cb;
+  adata->arg = alarm_cb_arg;
+  adata->prev = NULL;
+  adata->next = NULL;
+  adata->ev = NULL;
+}
+
+int grpc_alarm_add(grpc_alarm *alarm, gpr_timespec deadline) {
+  grpc_libevent_activation_data *adata =
+      &alarm->task.activation[GRPC_EM_TA_ONLY];
+  gpr_timespec delay_timespec = gpr_time_sub(deadline, gpr_now());
+  struct timeval delay = gpr_timeval_from_timespec(delay_timespec);
+  if (adata->ev) {
+    event_free(adata->ev);
+    gpr_log(GPR_INFO, "Adding an alarm that already has an event.");
+    adata->ev = NULL;
+  }
+  adata->ev = evtimer_new(g_event_base, libevent_alarm_cb, alarm);
+  /* Set the trigger field to untriggered. Do this as the last store since
+     it is a release of previous stores. */
+  gpr_atm_rel_store(&alarm->triggered, ALARM_TRIGGER_INIT);
+
+  return adata->ev != NULL && evtimer_add(adata->ev, &delay) == 0;
+}
+
+int grpc_alarm_cancel(grpc_alarm *alarm) {
+  grpc_libevent_activation_data *adata =
+      &alarm->task.activation[GRPC_EM_TA_ONLY];
+  int trigger_old;
+
+  /* First check if this alarm has been triggered, atomically */
+  trigger_old =
+      gpr_atm_full_fetch_add(&alarm->triggered, ALARM_TRIGGER_INCREMENT);
+  if (trigger_old == ALARM_TRIGGER_INIT) {
+    /* We need to make sure that we only invoke the callback if it hasn't
+       already been invoked */
+    /* First remove this event from libevent. This returns success even if the
+       event has gone active or invoked its callback. */
+    if (evtimer_del(adata->ev) != 0) {
+      /* The delete was unsuccessful for some reason. */
+      gpr_log(GPR_ERROR, "Attempt to delete alarm event was unsuccessful");
+      return 0;
+    }
+    /* Free up the event structure before invoking callback */
+    alarm_ev_destroy(alarm);
+    adata->status = GRPC_CALLBACK_CANCELLED;
+    add_task(adata);
+  }
+  return 1;
+}
+
+static void grpc_fd_impl_destroy(grpc_fd *impl) {
+  grpc_em_task_activity_type type;
+  grpc_libevent_activation_data *adata;
+
+  for (type = GRPC_EM_TA_READ; type < GRPC_EM_TA_COUNT; type++) {
+    adata = &(impl->task.activation[type]);
+    GPR_ASSERT(adata->next == NULL);
+    if (adata->ev != NULL) {
+      event_free(adata->ev);
+      adata->ev = NULL;
+    }
+  }
+
+  if (impl->shutdown_ev != NULL) {
+    event_free(impl->shutdown_ev);
+    impl->shutdown_ev = NULL;
+  }
+  gpr_mu_destroy(&impl->mu);
+  close(impl->fd);
+}
+
+/* Proxy callback to call a gRPC read/write callback */
+static void em_fd_cb(int fd, short what, void *arg /*=em_fd*/) {
+  grpc_fd *em_fd = arg;
+  grpc_iomgr_cb_status status = GRPC_CALLBACK_SUCCESS;
+  int run_read_cb = 0;
+  int run_write_cb = 0;
+  grpc_libevent_activation_data *rdata, *wdata;
+
+  gpr_mu_lock(&em_fd->mu);
+  if (em_fd->shutdown_started) {
+    status = GRPC_CALLBACK_CANCELLED;
+  } else if (status == GRPC_CALLBACK_SUCCESS && (what & EV_TIMEOUT)) {
+    status = GRPC_CALLBACK_TIMED_OUT;
+    /* TODO(klempner): This is broken if we are monitoring both read and write
+       events on the same fd -- generating a spurious event is okay, but
+       generating a spurious timeout is not. */
+    what |= (EV_READ | EV_WRITE);
+  }
+
+  if (what & EV_READ) {
+    switch (em_fd->read_state) {
+      case GRPC_FD_WAITING:
+        run_read_cb = 1;
+        em_fd->read_state = GRPC_FD_IDLE;
+        break;
+      case GRPC_FD_IDLE:
+      case GRPC_FD_CACHED:
+        em_fd->read_state = GRPC_FD_CACHED;
+    }
+  }
+  if (what & EV_WRITE) {
+    switch (em_fd->write_state) {
+      case GRPC_FD_WAITING:
+        run_write_cb = 1;
+        em_fd->write_state = GRPC_FD_IDLE;
+        break;
+      case GRPC_FD_IDLE:
+      case GRPC_FD_CACHED:
+        em_fd->write_state = GRPC_FD_CACHED;
+    }
+  }
+
+  if (run_read_cb) {
+    rdata = &(em_fd->task.activation[GRPC_EM_TA_READ]);
+    rdata->status = status;
+    add_task(rdata);
+  } else if (run_write_cb) {
+    wdata = &(em_fd->task.activation[GRPC_EM_TA_WRITE]);
+    wdata->status = status;
+    add_task(wdata);
+  }
+  gpr_mu_unlock(&em_fd->mu);
+}
+
+static void em_fd_shutdown_cb(int fd, short what, void *arg /*=em_fd*/) {
+  /* TODO(klempner): This could just run directly in the calling thread, except
+     that libevent's handling of event_active() on an event which is already in
+     flight on a different thread is racy and easily triggers TSAN.
+   */
+  grpc_fd *impl = arg;
+  gpr_mu_lock(&impl->mu);
+  impl->shutdown_started = 1;
+  if (impl->read_state == GRPC_FD_WAITING) {
+    event_active(impl->task.activation[GRPC_EM_TA_READ].ev, EV_READ, 1);
+  }
+  if (impl->write_state == GRPC_FD_WAITING) {
+    event_active(impl->task.activation[GRPC_EM_TA_WRITE].ev, EV_WRITE, 1);
+  }
+  gpr_mu_unlock(&impl->mu);
+}
+
+grpc_fd *grpc_fd_create(int fd) {
+  int flags;
+  grpc_libevent_activation_data *rdata, *wdata;
+  grpc_fd *impl = gpr_malloc(sizeof(grpc_fd));
+
+  gpr_mu_lock(&grpc_iomgr_mu);
+  g_num_fds++;
+  gpr_mu_unlock(&grpc_iomgr_mu);
+
+  impl->shutdown_ev = NULL;
+  gpr_mu_init(&impl->mu);
+
+  flags = fcntl(fd, F_GETFL, 0);
+  GPR_ASSERT((flags & O_NONBLOCK) != 0);
+
+  impl->task.type = GRPC_EM_TASK_FD;
+  impl->fd = fd;
+
+  rdata = &(impl->task.activation[GRPC_EM_TA_READ]);
+  rdata->ev = NULL;
+  rdata->cb = NULL;
+  rdata->arg = NULL;
+  rdata->status = GRPC_CALLBACK_SUCCESS;
+  rdata->prev = NULL;
+  rdata->next = NULL;
+
+  wdata = &(impl->task.activation[GRPC_EM_TA_WRITE]);
+  wdata->ev = NULL;
+  wdata->cb = NULL;
+  wdata->arg = NULL;
+  wdata->status = GRPC_CALLBACK_SUCCESS;
+  wdata->prev = NULL;
+  wdata->next = NULL;
+
+  impl->read_state = GRPC_FD_IDLE;
+  impl->write_state = GRPC_FD_IDLE;
+
+  impl->shutdown_started = 0;
+  impl->next = NULL;
+
+  /* TODO(chenw): detect platforms where only level trigger is supported,
+     and set the event to non-persist. */
+  rdata->ev = event_new(g_event_base, impl->fd, EV_ET | EV_PERSIST | EV_READ,
+                        em_fd_cb, impl);
+  GPR_ASSERT(rdata->ev);
+
+  wdata->ev = event_new(g_event_base, impl->fd, EV_ET | EV_PERSIST | EV_WRITE,
+                        em_fd_cb, impl);
+  GPR_ASSERT(wdata->ev);
+
+  impl->shutdown_ev =
+      event_new(g_event_base, -1, EV_READ, em_fd_shutdown_cb, impl);
+  GPR_ASSERT(impl->shutdown_ev);
+
+  return impl;
+}
+
+void grpc_fd_destroy(grpc_fd *impl) {
+  gpr_mu_lock(&grpc_iomgr_mu);
+
+  if (g_num_pollers == 0) {
+    /* it is safe to simply free it */
+    grpc_fd_impl_destroy(impl);
+    gpr_free(impl);
+  } else {
+    /* Put the impl on the list to be destroyed by the poller. */
+    impl->next = g_fds_to_free;
+    g_fds_to_free = impl;
+    /* TODO(ctiller): kick the poller so it destroys this fd promptly
+       (currently we may wait up to a second) */
+  }
+
+  g_num_fds--;
+  gpr_cv_broadcast(&grpc_iomgr_cv);
+  gpr_mu_unlock(&grpc_iomgr_mu);
+}
+
+int grpc_fd_get(struct grpc_fd *em_fd) { return em_fd->fd; }
+
+/* TODO(chenw): should we enforce the contract that notify_on_read cannot be
+   called when the previously registered callback has not been called yet. */
+int grpc_fd_notify_on_read(grpc_fd *impl, grpc_iomgr_cb_func read_cb,
+                           void *read_cb_arg, gpr_timespec deadline) {
+  int force_event = 0;
+  grpc_libevent_activation_data *rdata;
+  gpr_timespec delay_timespec = gpr_time_sub(deadline, gpr_now());
+  struct timeval delay = gpr_timeval_from_timespec(delay_timespec);
+  struct timeval *delayp =
+      gpr_time_cmp(deadline, gpr_inf_future) ? &delay : NULL;
+
+  rdata = &impl->task.activation[GRPC_EM_TA_READ];
+
+  gpr_mu_lock(&impl->mu);
+  rdata->cb = read_cb;
+  rdata->arg = read_cb_arg;
+
+  force_event = (impl->shutdown_started || impl->read_state == GRPC_FD_CACHED);
+  impl->read_state = GRPC_FD_WAITING;
+
+  if (force_event) {
+    event_active(rdata->ev, EV_READ, 1);
+  } else if (event_add(rdata->ev, delayp) == -1) {
+    gpr_mu_unlock(&impl->mu);
+    return 0;
+  }
+  gpr_mu_unlock(&impl->mu);
+  return 1;
+}
+
+int grpc_fd_notify_on_write(grpc_fd *impl, grpc_iomgr_cb_func write_cb,
+                            void *write_cb_arg, gpr_timespec deadline) {
+  int force_event = 0;
+  grpc_libevent_activation_data *wdata;
+  gpr_timespec delay_timespec = gpr_time_sub(deadline, gpr_now());
+  struct timeval delay = gpr_timeval_from_timespec(delay_timespec);
+  struct timeval *delayp =
+      gpr_time_cmp(deadline, gpr_inf_future) ? &delay : NULL;
+
+  wdata = &impl->task.activation[GRPC_EM_TA_WRITE];
+
+  gpr_mu_lock(&impl->mu);
+  wdata->cb = write_cb;
+  wdata->arg = write_cb_arg;
+
+  force_event = (impl->shutdown_started || impl->write_state == GRPC_FD_CACHED);
+  impl->write_state = GRPC_FD_WAITING;
+
+  if (force_event) {
+    event_active(wdata->ev, EV_WRITE, 1);
+  } else if (event_add(wdata->ev, delayp) == -1) {
+    gpr_mu_unlock(&impl->mu);
+    return 0;
+  }
+  gpr_mu_unlock(&impl->mu);
+  return 1;
+}
+
+void grpc_fd_shutdown(grpc_fd *em_fd) {
+  event_active(em_fd->shutdown_ev, EV_READ, 1);
+}
+
+/* Sometimes we want a followup callback: something to be added from the
+   current callback for the EM to invoke once this callback is complete.
+   This is implemented by inserting an entry into an EM queue. */
+
+/* The following structure holds the field needed for adding the
+   followup callback. These are the argument for the followup callback,
+   the function to use for the followup callback, and the
+   activation data pointer used for the queues (to free in the CB) */
+struct followup_callback_arg {
+  grpc_iomgr_cb_func func;
+  void *cb_arg;
+  grpc_libevent_activation_data adata;
+};
+
+static void followup_proxy_callback(void *cb_arg, grpc_iomgr_cb_status status) {
+  struct followup_callback_arg *fcb_arg = cb_arg;
+  /* Invoke the function */
+  fcb_arg->func(fcb_arg->cb_arg, status);
+  gpr_free(fcb_arg);
+}
+
+void grpc_iomgr_add_callback(grpc_iomgr_cb_func cb, void *cb_arg) {
+  grpc_libevent_activation_data *adptr;
+  struct followup_callback_arg *fcb_arg;
+
+  fcb_arg = gpr_malloc(sizeof(*fcb_arg));
+  /* Set up the activation data and followup callback argument structures */
+  adptr = &fcb_arg->adata;
+  adptr->ev = NULL;
+  adptr->cb = followup_proxy_callback;
+  adptr->arg = fcb_arg;
+  adptr->status = GRPC_CALLBACK_SUCCESS;
+  adptr->prev = NULL;
+  adptr->next = NULL;
+
+  fcb_arg->func = cb;
+  fcb_arg->cb_arg = cb_arg;
+
+  /* Insert an activation data for the specified em */
+  add_task(adptr);
+}
diff --git a/src/core/iomgr/iomgr_libevent.h b/src/core/iomgr/iomgr_libevent.h
new file mode 100644
index 0000000..77e7b59
--- /dev/null
+++ b/src/core/iomgr/iomgr_libevent.h
@@ -0,0 +1,207 @@
+/*
+ *
+ * Copyright 2014, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __GRPC_INTERNAL_IOMGR_IOMGR_LIBEVENT_H__
+#define __GRPC_INTERNAL_IOMGR_IOMGR_LIBEVENT_H__
+
+#include "src/core/iomgr/iomgr.h"
+#include <grpc/support/sync.h>
+#include <grpc/support/time.h>
+
+typedef struct grpc_fd grpc_fd;
+
+/* gRPC event manager task "base class". This is pretend-inheritance in C89.
+   This should be the first member of any actual grpc_em task type.
+
+   Memory warning: expanding this will increase memory usage in any derived
+   class, so be careful.
+
+   For generality, this base can be on multiple task queues and can have
+   multiple event callbacks registered. Not all "derived classes" will use
+   this feature. */
+
+typedef enum grpc_libevent_task_type {
+  GRPC_EM_TASK_ALARM,
+  GRPC_EM_TASK_FD,
+  GRPC_EM_TASK_DO_NOT_USE
+} grpc_libevent_task_type;
+
+/* Different activity types to shape the callback and queueing arrays */
+typedef enum grpc_em_task_activity_type {
+  GRPC_EM_TA_READ, /* use this also for single-type events */
+  GRPC_EM_TA_WRITE,
+  GRPC_EM_TA_COUNT
+} grpc_em_task_activity_type;
+
+/* Include the following #define for convenience for tasks like alarms that
+   only have a single type */
+#define GRPC_EM_TA_ONLY GRPC_EM_TA_READ
+
+typedef struct grpc_libevent_activation_data {
+  struct event *ev;      /* event activated on this callback type */
+  grpc_iomgr_cb_func cb; /* function pointer for callback */
+  void *arg;             /* argument passed to cb */
+
+  /* Hold the status associated with the callback when queued */
+  grpc_iomgr_cb_status status;
+  /* Now set up to link activations into scheduler queues */
+  struct grpc_libevent_activation_data *prev;
+  struct grpc_libevent_activation_data *next;
+} grpc_libevent_activation_data;
+
+typedef struct grpc_libevent_task {
+  grpc_libevent_task_type type;
+
+  /* Now have an array of activation data elements: one for each activity
+     type that could get activated */
+  grpc_libevent_activation_data activation[GRPC_EM_TA_COUNT];
+} grpc_libevent_task;
+
+/* Initialize *em_fd.
+   Requires fd is a non-blocking file descriptor.
+
+   This takes ownership of closing fd.
+
+   Requires:  *em_fd uninitialized. fd is a non-blocking file descriptor.  */
+grpc_fd *grpc_fd_create(int fd);
+
+/* Cause *em_fd no longer to be initialized and closes the underlying fd.
+   Requires: *em_fd initialized; no outstanding notify_on_read or
+   notify_on_write.  */
+void grpc_fd_destroy(grpc_fd *em_fd);
+
+/* Returns the file descriptor associated with *em_fd. */
+int grpc_fd_get(grpc_fd *em_fd);
+
+/* Register read interest, causing read_cb to be called once when em_fd becomes
+   readable, on deadline specified by deadline, or on shutdown triggered by
+   grpc_fd_shutdown.
+   read_cb will be called with read_cb_arg when *em_fd becomes readable.
+   read_cb is Called with status of GRPC_CALLBACK_SUCCESS if readable,
+   GRPC_CALLBACK_TIMED_OUT if the call timed out,
+   and CANCELLED if the call was cancelled.
+
+   Requires:This method must not be called before the read_cb for any previous
+   call runs. Edge triggered events are used whenever they are supported by the
+   underlying platform. This means that users must drain em_fd in read_cb before
+   calling notify_on_read again. Users are also expected to handle spurious
+   events, i.e read_cb is called while nothing can be readable from em_fd  */
+int grpc_fd_notify_on_read(grpc_fd *em_fd, grpc_iomgr_cb_func read_cb,
+                           void *read_cb_arg, gpr_timespec deadline);
+
+/* Exactly the same semantics as above, except based on writable events.  */
+int grpc_fd_notify_on_write(grpc_fd *fd, grpc_iomgr_cb_func write_cb,
+                            void *write_cb_arg, gpr_timespec deadline);
+
+/* Cause any current and all future read/write callbacks to error out with
+   GRPC_CALLBACK_CANCELLED. */
+void grpc_fd_shutdown(grpc_fd *em_fd);
+
+/* =================== Event caching ===================
+   In order to not miss or double-return edges in the context of edge triggering
+   and multithreading, we need a per-fd caching layer in the eventmanager itself
+   to cache relevant events.
+
+   There are two types of events we care about: calls to notify_on_[read|write]
+   and readable/writable events for the socket from eventfd. There are separate
+   event caches for read and write.
+
+   There are three states:
+   0. "waiting" -- There's been a call to notify_on_[read|write] which has not
+   had a corresponding event. In other words, we're waiting for an event so we
+   can run the callback.
+   1. "idle" -- We are neither waiting nor have a cached event.
+   2. "cached" -- There has been a read/write event without a waiting callback,
+   so we want to run the event next time the application calls
+   notify_on_[read|write].
+
+   The high level state diagram:
+
+   +--------------------------------------------------------------------+
+   | WAITING                  | IDLE                | CACHED            |
+   |                          |                     |                   |
+   |                     1. --*->              2. --+->           3.  --+\
+   |                          |                     |                <--+/
+   |                          |                     |                   |
+  x+-- 6.                5. <-+--              4. <-*--                 |
+   |                          |                     |                   |
+   +--------------------------------------------------------------------+
+
+   Transitions right occur on read|write events. Transitions left occur on
+   notify_on_[read|write] events.
+   State transitions:
+   1. Read|Write event while waiting -> run the callback and transition to idle.
+   2. Read|Write event while idle -> transition to cached.
+   3. Read|Write event with one already cached -> still cached.
+   4. notify_on_[read|write] with event cached: run callback and transition to
+      idle.
+   5. notify_on_[read|write] when idle: Store callback and transition to
+      waiting.
+   6. notify_on_[read|write] when waiting: invalid. */
+
+typedef enum grpc_fd_state {
+  GRPC_FD_WAITING = 0,
+  GRPC_FD_IDLE = 1,
+  GRPC_FD_CACHED = 2
+} grpc_fd_state;
+
+/* gRPC file descriptor handle.
+   The handle is used to register read/write callbacks to a file descriptor */
+struct grpc_fd {
+  grpc_libevent_task task; /* Base class, callbacks, queues, etc */
+  int fd;                  /* File descriptor */
+
+  /* Note that the shutdown event is only needed as a workaround for libevent
+     not properly handling event_active on an in flight event. */
+  struct event *shutdown_ev; /* activated to trigger shutdown */
+
+  /* protect shutdown_started|read_state|write_state and ensure barriers
+     between notify_on_[read|write] and read|write callbacks */
+  gpr_mu mu;
+  int shutdown_started; /* 0 -> shutdown not started, 1 -> started */
+  grpc_fd_state read_state;
+  grpc_fd_state write_state;
+
+  /* descriptor delete list. These are destroyed during polling. */
+  struct grpc_fd *next;
+};
+
+/* gRPC alarm handle.
+   The handle is used to add an alarm which expires after specified timeout. */
+struct grpc_alarm {
+  grpc_libevent_task task; /* Include the base class */
+
+  gpr_atm triggered; /* To be used atomically if alarm triggered */
+};
+
+#endif /* __GRPC_INTERNAL_IOMGR_IOMGR_LIBEVENT_H__ */
diff --git a/src/core/eventmanager/em_posix.c b/src/core/iomgr/iomgr_libevent_use_threads.c
similarity index 100%
rename from src/core/eventmanager/em_posix.c
rename to src/core/iomgr/iomgr_libevent_use_threads.c
diff --git a/src/core/endpoint/resolve_address.h b/src/core/iomgr/resolve_address.h
similarity index 93%
rename from src/core/endpoint/resolve_address.h
rename to src/core/iomgr/resolve_address.h
index cc32c47..37ec0f0 100644
--- a/src/core/endpoint/resolve_address.h
+++ b/src/core/iomgr/resolve_address.h
@@ -31,8 +31,8 @@
  *
  */
 
-#ifndef __GRPC_INTERNAL_ENDPOINT_RESOLVE_ADDRESS_H__
-#define __GRPC_INTERNAL_ENDPOINT_RESOLVE_ADDRESS_H__
+#ifndef __GRPC_INTERNAL_IOMGR_RESOLVE_ADDRESS_H__
+#define __GRPC_INTERNAL_IOMGR_RESOLVE_ADDRESS_H__
 
 #include <sys/socket.h>
 
@@ -64,4 +64,4 @@
 grpc_resolved_addresses *grpc_blocking_resolve_address(
     const char *addr, const char *default_port);
 
-#endif  /* __GRPC_INTERNAL_ENDPOINT_RESOLVE_ADDRESS_H__ */
+#endif /* __GRPC_INTERNAL_IOMGR_RESOLVE_ADDRESS_H__ */
diff --git a/src/core/endpoint/resolve_address.c b/src/core/iomgr/resolve_address_posix.c
similarity index 97%
rename from src/core/endpoint/resolve_address.c
rename to src/core/iomgr/resolve_address_posix.c
index 1993b9b..d3ea378 100644
--- a/src/core/endpoint/resolve_address.c
+++ b/src/core/iomgr/resolve_address_posix.c
@@ -33,7 +33,7 @@
 
 #define _POSIX_SOURCE
 
-#include "src/core/endpoint/resolve_address.h"
+#include "src/core/iomgr/resolve_address.h"
 
 #include <sys/types.h>
 #include <sys/socket.h>
@@ -41,10 +41,11 @@
 #include <unistd.h>
 #include <string.h>
 
-#include "src/core/endpoint/socket_utils.h"
+#include "src/core/iomgr/sockaddr_utils.h"
+#include "src/core/iomgr/socket_utils_posix.h"
 #include <grpc/support/alloc.h>
-#include <grpc/support/string.h>
 #include <grpc/support/log.h>
+#include <grpc/support/string.h>
 #include <grpc/support/thd.h>
 #include <grpc/support/time.h>
 
diff --git a/src/core/eventmanager/em_win32.c b/src/core/iomgr/sockaddr.h
similarity index 82%
copy from src/core/eventmanager/em_win32.c
copy to src/core/iomgr/sockaddr.h
index 4d5c3b5..b980b30 100644
--- a/src/core/eventmanager/em_win32.c
+++ b/src/core/iomgr/sockaddr.h
@@ -31,8 +31,17 @@
  *
  */
 
-/* Windows event manager support code. */
-#include <event2/thread.h>
+#ifndef __GRPC_INTERNAL_IOMGR_SOCKADDR_H_
+#define __GRPC_INTERNAL_IOMGR_SOCKADDR_H_
 
-/* Notify LibEvent that Windows thread is used. */
-int evthread_use_threads() { return evthread_use_windows_threads(); }
+#include <grpc/support/port_platform.h>
+
+#ifdef GPR_WIN32
+#include "src/core/iomgr/sockaddr_win32.h"
+#endif
+
+#ifdef GPR_POSIX_SOCKETADDR
+#include "src/core/iomgr/sockaddr_posix.h"
+#endif
+
+#endif /* __GRPC_INTERNAL_IOMGR_SOCKADDR_H_ */
diff --git a/src/core/eventmanager/em_win32.c b/src/core/iomgr/sockaddr_posix.h
similarity index 88%
copy from src/core/eventmanager/em_win32.c
copy to src/core/iomgr/sockaddr_posix.h
index 4d5c3b5..79ef3ca 100644
--- a/src/core/eventmanager/em_win32.c
+++ b/src/core/iomgr/sockaddr_posix.h
@@ -31,8 +31,10 @@
  *
  */
 
-/* Windows event manager support code. */
-#include <event2/thread.h>
+#ifndef __GRPC_INTERNAL_IOMGR_SOCKADDR_POSIX_H_
+#define __GRPC_INTERNAL_IOMGR_SOCKADDR_POSIX_H_
 
-/* Notify LibEvent that Windows thread is used. */
-int evthread_use_threads() { return evthread_use_windows_threads(); }
+#include <sys/socket.h>
+#include <netinet/in.h>
+
+#endif /* __GRPC_INTERNAL_IOMGR_SOCKADDR_POSIX_H_ */
diff --git a/src/core/endpoint/socket_utils.c b/src/core/iomgr/sockaddr_utils.c
similarity index 64%
rename from src/core/endpoint/socket_utils.c
rename to src/core/iomgr/sockaddr_utils.c
index ef160d7..f709d35 100644
--- a/src/core/endpoint/socket_utils.c
+++ b/src/core/iomgr/sockaddr_utils.c
@@ -31,127 +31,17 @@
  *
  */
 
-#include "src/core/endpoint/socket_utils.h"
+#include "src/core/iomgr/sockaddr_utils.h"
 
 #include <arpa/inet.h>
-#include <limits.h>
-#include <fcntl.h>
-#include <netinet/in.h>
-#include <netinet/tcp.h>
-#include <stdio.h>
-#include <sys/types.h>
-#include <sys/socket.h>
-#include <unistd.h>
-#include <string.h>
 #include <errno.h>
+#include <string.h>
 
 #include <grpc/support/host_port.h>
 #include <grpc/support/string.h>
 #include <grpc/support/log.h>
 #include <grpc/support/port_platform.h>
 
-/* set a socket to non blocking mode */
-int grpc_set_socket_nonblocking(int fd, int non_blocking) {
-  int oldflags = fcntl(fd, F_GETFL, 0);
-  if (oldflags < 0) {
-    return 0;
-  }
-
-  if (non_blocking) {
-    oldflags |= O_NONBLOCK;
-  } else {
-    oldflags &= ~O_NONBLOCK;
-  }
-
-  if (fcntl(fd, F_SETFL, oldflags) != 0) {
-    return 0;
-  }
-
-  return 1;
-}
-
-/* set a socket to close on exec */
-int grpc_set_socket_cloexec(int fd, int close_on_exec) {
-  int oldflags = fcntl(fd, F_GETFD, 0);
-  if (oldflags < 0) {
-    return 0;
-  }
-
-  if (close_on_exec) {
-    oldflags |= FD_CLOEXEC;
-  } else {
-    oldflags &= ~FD_CLOEXEC;
-  }
-
-  if (fcntl(fd, F_SETFD, oldflags) != 0) {
-    return 0;
-  }
-
-  return 1;
-}
-
-/* set a socket to reuse old addresses */
-int grpc_set_socket_reuse_addr(int fd, int reuse) {
-  int val = (reuse != 0);
-  int newval;
-  socklen_t intlen = sizeof(newval);
-  return 0 == setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &val, sizeof(val)) &&
-         0 == getsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &newval, &intlen) &&
-         newval == val;
-}
-
-/* disable nagle */
-int grpc_set_socket_low_latency(int fd, int low_latency) {
-  int val = (low_latency != 0);
-  int newval;
-  socklen_t intlen = sizeof(newval);
-  return 0 == setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, &val, sizeof(val)) &&
-         0 == getsockopt(fd, IPPROTO_TCP, TCP_NODELAY, &newval, &intlen) &&
-         newval == val;
-}
-
-/* This should be 0 in production, but it may be enabled for testing or
-   debugging purposes, to simulate an environment where IPv6 sockets can't
-   also speak IPv4. */
-int grpc_forbid_dualstack_sockets_for_testing = 0;
-
-static int set_socket_dualstack(int fd) {
-  if (!grpc_forbid_dualstack_sockets_for_testing) {
-    const int off = 0;
-    return 0 == setsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY, &off, sizeof(off));
-  } else {
-    /* Force an IPv6-only socket, for testing purposes. */
-    const int on = 1;
-    setsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY, &on, sizeof(on));
-    return 0;
-  }
-}
-
-int grpc_create_dualstack_socket(const struct sockaddr *addr, int type,
-                                 int protocol, grpc_dualstack_mode *dsmode) {
-  int family = addr->sa_family;
-  if (family == AF_INET6) {
-    int fd = socket(family, type, protocol);
-    /* Check if we've got a valid dualstack socket. */
-    if (fd >= 0 && set_socket_dualstack(fd)) {
-      *dsmode = GRPC_DSMODE_DUALSTACK;
-      return fd;
-    }
-    /* If this isn't an IPv4 address, then return whatever we've got. */
-    if (!grpc_sockaddr_is_v4mapped(addr, NULL)) {
-      *dsmode = GRPC_DSMODE_IPV6;
-      return fd;
-    }
-    /* Fall back to AF_INET. */
-    if (fd >= 0) {
-      close(fd);
-    }
-    family = AF_INET;
-  }
-  *dsmode = family == AF_INET ? GRPC_DSMODE_IPV4 : GRPC_DSMODE_NONE;
-  return socket(family, type, protocol);
-}
-
 static const gpr_uint8 kV4MappedPrefix[] = {0, 0, 0, 0, 0,    0,
                                             0, 0, 0, 0, 0xff, 0xff};
 
diff --git a/src/core/iomgr/sockaddr_utils.h b/src/core/iomgr/sockaddr_utils.h
new file mode 100644
index 0000000..753d0c8
--- /dev/null
+++ b/src/core/iomgr/sockaddr_utils.h
@@ -0,0 +1,75 @@
+/*
+ *
+ * Copyright 2014, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __GRPC_INTERNAL_IOMGR_SOCKADDR_UTILS_H__
+#define __GRPC_INTERNAL_IOMGR_SOCKADDR_UTILS_H__
+
+#include "src/core/iomgr/sockaddr.h"
+
+/* Returns true if addr is an IPv4-mapped IPv6 address within the
+   ::ffff:0.0.0.0/96 range, or false otherwise.
+
+   If addr4_out is non-NULL, the inner IPv4 address will be copied here when
+   returning true. */
+int grpc_sockaddr_is_v4mapped(const struct sockaddr *addr,
+                              struct sockaddr_in *addr4_out);
+
+/* If addr is an AF_INET address, writes the corresponding ::ffff:0.0.0.0/96
+   address to addr6_out and returns true.  Otherwise returns false. */
+int grpc_sockaddr_to_v4mapped(const struct sockaddr *addr,
+                              struct sockaddr_in6 *addr6_out);
+
+/* If addr is ::, 0.0.0.0, or ::ffff:0.0.0.0, writes the port number to
+   *port_out (if not NULL) and returns true, otherwise returns false. */
+int grpc_sockaddr_is_wildcard(const struct sockaddr *addr, int *port_out);
+
+/* Writes 0.0.0.0:port and [::]:port to separate sockaddrs. */
+void grpc_sockaddr_make_wildcards(int port, struct sockaddr_in *wild4_out,
+                                  struct sockaddr_in6 *wild6_out);
+
+/* Converts a sockaddr into a newly-allocated human-readable string.
+
+   Currently, only the AF_INET and AF_INET6 families are recognized.
+   If the normalize flag is enabled, ::ffff:0.0.0.0/96 IPv6 addresses are
+   displayed as plain IPv4.
+
+   Usage is similar to gpr_asprintf: returns the number of bytes written
+   (excluding the final '\0'), and *out points to a string which must later be
+   destroyed using gpr_free().
+
+   In the unlikely event of an error, returns -1 and sets *out to NULL.
+   The existing value of errno is always preserved. */
+int grpc_sockaddr_to_string(char **out, const struct sockaddr *addr,
+                            int normalize);
+
+#endif /* __GRPC_INTERNAL_IOMGR_SOCKADDR_UTILS_H__ */
diff --git a/src/core/eventmanager/em_win32.c b/src/core/iomgr/sockaddr_win32.h
similarity index 88%
rename from src/core/eventmanager/em_win32.c
rename to src/core/iomgr/sockaddr_win32.h
index 4d5c3b5..751ac3d 100644
--- a/src/core/eventmanager/em_win32.c
+++ b/src/core/iomgr/sockaddr_win32.h
@@ -31,8 +31,7 @@
  *
  */
 
-/* Windows event manager support code. */
-#include <event2/thread.h>
+#ifndef __GRPC_INTERNAL_IOMGR_SOCKADDR_WIN32_H_
+#define __GRPC_INTERNAL_IOMGR_SOCKADDR_WIN32_H_
 
-/* Notify LibEvent that Windows thread is used. */
-int evthread_use_threads() { return evthread_use_windows_threads(); }
+#endif  // __GRPC_INTERNAL_IOMGR_SOCKADDR_WIN32_H_
diff --git a/src/core/iomgr/socket_utils_common_posix.c b/src/core/iomgr/socket_utils_common_posix.c
new file mode 100644
index 0000000..0767d6f
--- /dev/null
+++ b/src/core/iomgr/socket_utils_common_posix.c
@@ -0,0 +1,154 @@
+/*
+ *
+ * Copyright 2014, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/iomgr/socket_utils_posix.h"
+
+#include <arpa/inet.h>
+#include <limits.h>
+#include <fcntl.h>
+#include <netinet/in.h>
+#include <netinet/tcp.h>
+#include <stdio.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <unistd.h>
+#include <string.h>
+#include <errno.h>
+
+#include "src/core/iomgr/sockaddr_utils.h"
+#include <grpc/support/host_port.h>
+#include <grpc/support/string.h>
+#include <grpc/support/log.h>
+#include <grpc/support/port_platform.h>
+
+/* set a socket to non blocking mode */
+int grpc_set_socket_nonblocking(int fd, int non_blocking) {
+  int oldflags = fcntl(fd, F_GETFL, 0);
+  if (oldflags < 0) {
+    return 0;
+  }
+
+  if (non_blocking) {
+    oldflags |= O_NONBLOCK;
+  } else {
+    oldflags &= ~O_NONBLOCK;
+  }
+
+  if (fcntl(fd, F_SETFL, oldflags) != 0) {
+    return 0;
+  }
+
+  return 1;
+}
+
+/* set a socket to close on exec */
+int grpc_set_socket_cloexec(int fd, int close_on_exec) {
+  int oldflags = fcntl(fd, F_GETFD, 0);
+  if (oldflags < 0) {
+    return 0;
+  }
+
+  if (close_on_exec) {
+    oldflags |= FD_CLOEXEC;
+  } else {
+    oldflags &= ~FD_CLOEXEC;
+  }
+
+  if (fcntl(fd, F_SETFD, oldflags) != 0) {
+    return 0;
+  }
+
+  return 1;
+}
+
+/* set a socket to reuse old addresses */
+int grpc_set_socket_reuse_addr(int fd, int reuse) {
+  int val = (reuse != 0);
+  int newval;
+  socklen_t intlen = sizeof(newval);
+  return 0 == setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &val, sizeof(val)) &&
+         0 == getsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &newval, &intlen) &&
+         newval == val;
+}
+
+/* disable nagle */
+int grpc_set_socket_low_latency(int fd, int low_latency) {
+  int val = (low_latency != 0);
+  int newval;
+  socklen_t intlen = sizeof(newval);
+  return 0 == setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, &val, sizeof(val)) &&
+         0 == getsockopt(fd, IPPROTO_TCP, TCP_NODELAY, &newval, &intlen) &&
+         newval == val;
+}
+
+/* This should be 0 in production, but it may be enabled for testing or
+   debugging purposes, to simulate an environment where IPv6 sockets can't
+   also speak IPv4. */
+int grpc_forbid_dualstack_sockets_for_testing = 0;
+
+static int set_socket_dualstack(int fd) {
+  if (!grpc_forbid_dualstack_sockets_for_testing) {
+    const int off = 0;
+    return 0 == setsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY, &off, sizeof(off));
+  } else {
+    /* Force an IPv6-only socket, for testing purposes. */
+    const int on = 1;
+    setsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY, &on, sizeof(on));
+    return 0;
+  }
+}
+
+int grpc_create_dualstack_socket(const struct sockaddr *addr, int type,
+                                 int protocol, grpc_dualstack_mode *dsmode) {
+  int family = addr->sa_family;
+  if (family == AF_INET6) {
+    int fd = socket(family, type, protocol);
+    /* Check if we've got a valid dualstack socket. */
+    if (fd >= 0 && set_socket_dualstack(fd)) {
+      *dsmode = GRPC_DSMODE_DUALSTACK;
+      return fd;
+    }
+    /* If this isn't an IPv4 address, then return whatever we've got. */
+    if (!grpc_sockaddr_is_v4mapped(addr, NULL)) {
+      *dsmode = GRPC_DSMODE_IPV6;
+      return fd;
+    }
+    /* Fall back to AF_INET. */
+    if (fd >= 0) {
+      close(fd);
+    }
+    family = AF_INET;
+  }
+  *dsmode = family == AF_INET ? GRPC_DSMODE_IPV4 : GRPC_DSMODE_NONE;
+  return socket(family, type, protocol);
+}
diff --git a/src/core/endpoint/socket_utils_linux.c b/src/core/iomgr/socket_utils_linux.c
similarity index 97%
rename from src/core/endpoint/socket_utils_linux.c
rename to src/core/iomgr/socket_utils_linux.c
index 479675e..f971cb3 100644
--- a/src/core/endpoint/socket_utils_linux.c
+++ b/src/core/iomgr/socket_utils_linux.c
@@ -36,7 +36,7 @@
 
 #ifdef GPR_LINUX
 
-#include "src/core/endpoint/socket_utils.h"
+#include "src/core/iomgr/socket_utils_posix.h"
 
 #include <sys/types.h>
 #include <sys/socket.h>
diff --git a/src/core/endpoint/socket_utils_posix.c b/src/core/iomgr/socket_utils_posix.c
similarity index 100%
rename from src/core/endpoint/socket_utils_posix.c
rename to src/core/iomgr/socket_utils_posix.c
diff --git a/src/core/endpoint/socket_utils.h b/src/core/iomgr/socket_utils_posix.h
similarity index 66%
rename from src/core/endpoint/socket_utils.h
rename to src/core/iomgr/socket_utils_posix.h
index 23fa192..5c31e5e 100644
--- a/src/core/endpoint/socket_utils.h
+++ b/src/core/iomgr/socket_utils_posix.h
@@ -31,16 +31,12 @@
  *
  */
 
-#ifndef __GRPC_INTERNAL_ENDPOINT_SOCKET_UTILS_H__
-#define __GRPC_INTERNAL_ENDPOINT_SOCKET_UTILS_H__
+#ifndef __GRPC_INTERNAL_IOMGR_SOCKET_UTILS_POSIX_H__
+#define __GRPC_INTERNAL_IOMGR_SOCKET_UTILS_POSIX_H__
 
 #include <unistd.h>
 #include <sys/socket.h>
 
-struct sockaddr;
-struct sockaddr_in;
-struct sockaddr_in6;
-
 /* a wrapper for accept or accept4 */
 int grpc_accept4(int sockfd, struct sockaddr *addr, socklen_t *addrlen,
                  int nonblock, int cloexec);
@@ -99,40 +95,4 @@
 int grpc_create_dualstack_socket(const struct sockaddr *addr, int type,
                                  int protocol, grpc_dualstack_mode *dsmode);
 
-/* Returns true if addr is an IPv4-mapped IPv6 address within the
-   ::ffff:0.0.0.0/96 range, or false otherwise.
-
-   If addr4_out is non-NULL, the inner IPv4 address will be copied here when
-   returning true. */
-int grpc_sockaddr_is_v4mapped(const struct sockaddr *addr,
-                              struct sockaddr_in *addr4_out);
-
-/* If addr is an AF_INET address, writes the corresponding ::ffff:0.0.0.0/96
-   address to addr6_out and returns true.  Otherwise returns false. */
-int grpc_sockaddr_to_v4mapped(const struct sockaddr *addr,
-                              struct sockaddr_in6 *addr6_out);
-
-/* If addr is ::, 0.0.0.0, or ::ffff:0.0.0.0, writes the port number to
-   *port_out (if not NULL) and returns true, otherwise returns false. */
-int grpc_sockaddr_is_wildcard(const struct sockaddr *addr, int *port_out);
-
-/* Writes 0.0.0.0:port and [::]:port to separate sockaddrs. */
-void grpc_sockaddr_make_wildcards(int port, struct sockaddr_in *wild4_out,
-                                  struct sockaddr_in6 *wild6_out);
-
-/* Converts a sockaddr into a newly-allocated human-readable string.
-
-   Currently, only the AF_INET and AF_INET6 families are recognized.
-   If the normalize flag is enabled, ::ffff:0.0.0.0/96 IPv6 addresses are
-   displayed as plain IPv4.
-
-   Usage is similar to gpr_asprintf: returns the number of bytes written
-   (excluding the final '\0'), and *out points to a string which must later be
-   destroyed using gpr_free().
-
-   In the unlikely event of an error, returns -1 and sets *out to NULL.
-   The existing value of errno is always preserved. */
-int grpc_sockaddr_to_string(char **out, const struct sockaddr *addr,
-                            int normalize);
-
-#endif  /* __GRPC_INTERNAL_ENDPOINT_SOCKET_UTILS_H__ */
+#endif /* __GRPC_INTERNAL_IOMGR_SOCKET_UTILS_POSIX_H__ */
diff --git a/src/core/endpoint/tcp_client.h b/src/core/iomgr/tcp_client.h
similarity index 81%
rename from src/core/endpoint/tcp_client.h
rename to src/core/iomgr/tcp_client.h
index 69b1b62..a4632d8 100644
--- a/src/core/endpoint/tcp_client.h
+++ b/src/core/iomgr/tcp_client.h
@@ -31,21 +31,18 @@
  *
  */
 
-#ifndef __GRPC_INTERNAL_ENDPOINT_TCP_CLIENT_H__
-#define __GRPC_INTERNAL_ENDPOINT_TCP_CLIENT_H__
+#ifndef __GRPC_INTERNAL_IOMGR_TCP_CLIENT_H__
+#define __GRPC_INTERNAL_IOMGR_TCP_CLIENT_H__
 
-#include "src/core/endpoint/tcp.h"
+#include "src/core/endpoint/endpoint.h"
+#include "src/core/iomgr/sockaddr.h"
 #include <grpc/support/time.h>
 
-#include <sys/types.h>
-#include <sys/socket.h>
-
 /* Asynchronously connect to an address (specified as (addr, len)), and call
    cb with arg and the completed connection when done (or call cb with arg and
    NULL on failure) */
 void grpc_tcp_client_connect(void (*cb)(void *arg, grpc_endpoint *tcp),
-                             void *arg, grpc_em *em,
-                             const struct sockaddr *addr, int addr_len,
-                             gpr_timespec deadline);
+                             void *arg, const struct sockaddr *addr,
+                             int addr_len, gpr_timespec deadline);
 
-#endif  /* __GRPC_INTERNAL_ENDPOINT_TCP_CLIENT_H__ */
+#endif /* __GRPC_INTERNAL_IOMGR_TCP_CLIENT_H__ */
diff --git a/src/core/endpoint/tcp_client.c b/src/core/iomgr/tcp_client_posix.c
similarity index 85%
rename from src/core/endpoint/tcp_client.c
rename to src/core/iomgr/tcp_client_posix.c
index c6f470b..8d2d7ab 100644
--- a/src/core/endpoint/tcp_client.c
+++ b/src/core/iomgr/tcp_client_posix.c
@@ -31,14 +31,17 @@
  *
  */
 
-#include "src/core/endpoint/tcp_client.h"
+#include "src/core/iomgr/tcp_client.h"
 
 #include <errno.h>
 #include <netinet/in.h>
 #include <string.h>
 #include <unistd.h>
 
-#include "src/core/endpoint/socket_utils.h"
+#include "src/core/iomgr/iomgr_libevent.h"
+#include "src/core/iomgr/sockaddr_utils.h"
+#include "src/core/iomgr/socket_utils_posix.h"
+#include "src/core/iomgr/tcp_posix.h"
 #include <grpc/support/alloc.h>
 #include <grpc/support/log.h>
 #include <grpc/support/time.h>
@@ -46,7 +49,7 @@
 typedef struct {
   void (*cb)(void *arg, grpc_endpoint *tcp);
   void *cb_arg;
-  grpc_em_fd *fd;
+  grpc_fd *fd;
   gpr_timespec deadline;
 } async_connect;
 
@@ -71,12 +74,12 @@
   return 0;
 }
 
-static void on_writable(void *acp, grpc_em_cb_status status) {
+static void on_writable(void *acp, grpc_iomgr_cb_status status) {
   async_connect *ac = acp;
   int so_error = 0;
   socklen_t so_error_size;
   int err;
-  int fd = grpc_em_fd_get(ac->fd);
+  int fd = grpc_fd_get(ac->fd);
 
   if (status == GRPC_CALLBACK_SUCCESS) {
     do {
@@ -103,7 +106,7 @@
            opened too many network connections.  The "easy" fix:
            don't do that! */
         gpr_log(GPR_ERROR, "kernel out of buffers");
-        grpc_em_fd_notify_on_write(ac->fd, on_writable, ac, ac->deadline);
+        grpc_fd_notify_on_write(ac->fd, on_writable, ac, ac->deadline);
         return;
       } else {
         goto error;
@@ -120,20 +123,18 @@
 
 error:
   ac->cb(ac->cb_arg, NULL);
-  grpc_em_fd_destroy(ac->fd);
-  gpr_free(ac->fd);
+  grpc_fd_destroy(ac->fd);
   gpr_free(ac);
   return;
 
 great_success:
-  ac->cb(ac->cb_arg, grpc_tcp_create_emfd(ac->fd));
+  ac->cb(ac->cb_arg, grpc_tcp_create(ac->fd, GRPC_TCP_DEFAULT_READ_SLICE_SIZE));
   gpr_free(ac);
 }
 
 void grpc_tcp_client_connect(void (*cb)(void *arg, grpc_endpoint *ep),
-                             void *arg, grpc_em *em,
-                             const struct sockaddr *addr, int addr_len,
-                             gpr_timespec deadline) {
+                             void *arg, const struct sockaddr *addr,
+                             int addr_len, gpr_timespec deadline) {
   int fd;
   grpc_dualstack_mode dsmode;
   int err;
@@ -167,7 +168,8 @@
   } while (err < 0 && errno == EINTR);
 
   if (err >= 0) {
-    cb(arg, grpc_tcp_create(fd, em));
+    cb(arg,
+       grpc_tcp_create(grpc_fd_create(fd), GRPC_TCP_DEFAULT_READ_SLICE_SIZE));
     return;
   }
 
@@ -182,7 +184,6 @@
   ac->cb = cb;
   ac->cb_arg = arg;
   ac->deadline = deadline;
-  ac->fd = gpr_malloc(sizeof(grpc_em_fd));
-  grpc_em_fd_init(ac->fd, em, fd);
-  grpc_em_fd_notify_on_write(ac->fd, on_writable, ac, deadline);
+  ac->fd = grpc_fd_create(fd);
+  grpc_fd_notify_on_write(ac->fd, on_writable, ac, deadline);
 }
diff --git a/src/core/endpoint/tcp.c b/src/core/iomgr/tcp_posix.c
similarity index 91%
rename from src/core/endpoint/tcp.c
rename to src/core/iomgr/tcp_posix.c
index 482344d..8f63f75 100644
--- a/src/core/endpoint/tcp.c
+++ b/src/core/iomgr/tcp_posix.c
@@ -31,7 +31,7 @@
  *
  */
 
-#include "src/core/endpoint/tcp.h"
+#include "src/core/iomgr/tcp_posix.h"
 
 #include <errno.h>
 #include <stdlib.h>
@@ -40,7 +40,6 @@
 #include <sys/socket.h>
 #include <unistd.h>
 
-#include "src/core/eventmanager/em.h"
 #include <grpc/support/alloc.h>
 #include <grpc/support/log.h>
 #include <grpc/support/slice.h>
@@ -249,8 +248,7 @@
 
 typedef struct {
   grpc_endpoint base;
-  grpc_em *em;
-  grpc_em_fd *em_fd;
+  grpc_fd *em_fd;
   int fd;
   size_t slice_size;
   gpr_refcount refcount;
@@ -266,25 +264,19 @@
 } grpc_tcp;
 
 static void grpc_tcp_handle_read(void *arg /* grpc_tcp */,
-                                 grpc_em_cb_status status);
+                                 grpc_iomgr_cb_status status);
 static void grpc_tcp_handle_write(void *arg /* grpc_tcp */,
-                                  grpc_em_cb_status status);
-
-#define DEFAULT_SLICE_SIZE 8192
-grpc_endpoint *grpc_tcp_create(int fd, grpc_em *em) {
-  return grpc_tcp_create_dbg(fd, em, DEFAULT_SLICE_SIZE);
-}
+                                  grpc_iomgr_cb_status status);
 
 static void grpc_tcp_shutdown(grpc_endpoint *ep) {
   grpc_tcp *tcp = (grpc_tcp *)ep;
-  grpc_em_fd_shutdown(tcp->em_fd);
+  grpc_fd_shutdown(tcp->em_fd);
 }
 
 static void grpc_tcp_unref(grpc_tcp *tcp) {
   int refcount_zero = gpr_unref(&tcp->refcount);
   if (refcount_zero) {
-    grpc_em_fd_destroy(tcp->em_fd);
-    gpr_free(tcp->em_fd);
+    grpc_fd_destroy(tcp->em_fd);
     gpr_free(tcp);
   }
 }
@@ -317,7 +309,7 @@
 #define INLINE_SLICE_BUFFER_SIZE 8
 #define MAX_READ_IOVEC 4
 static void grpc_tcp_handle_read(void *arg /* grpc_tcp */,
-                                 grpc_em_cb_status status) {
+                                 grpc_iomgr_cb_status status) {
   grpc_tcp *tcp = (grpc_tcp *)arg;
   int iov_size = 1;
   gpr_slice static_read_slices[INLINE_SLICE_BUFFER_SIZE];
@@ -385,8 +377,8 @@
         } else {
           /* Spurious read event, consume it here */
           slice_state_destroy(&read_state);
-          grpc_em_fd_notify_on_read(tcp->em_fd, grpc_tcp_handle_read, tcp,
-                                    tcp->read_deadline);
+          grpc_fd_notify_on_read(tcp->em_fd, grpc_tcp_handle_read, tcp,
+                                 tcp->read_deadline);
         }
       } else {
         /* TODO(klempner): Log interesting errors */
@@ -422,7 +414,7 @@
   tcp->read_user_data = user_data;
   tcp->read_deadline = deadline;
   gpr_ref(&tcp->refcount);
-  grpc_em_fd_notify_on_read(tcp->em_fd, grpc_tcp_handle_read, tcp, deadline);
+  grpc_fd_notify_on_read(tcp->em_fd, grpc_tcp_handle_read, tcp, deadline);
 }
 
 #define MAX_WRITE_IOVEC 16
@@ -469,7 +461,7 @@
 }
 
 static void grpc_tcp_handle_write(void *arg /* grpc_tcp */,
-                                  grpc_em_cb_status status) {
+                                  grpc_iomgr_cb_status status) {
   grpc_tcp *tcp = (grpc_tcp *)arg;
   grpc_endpoint_write_status write_status;
   grpc_endpoint_cb_status cb_status;
@@ -494,8 +486,8 @@
 
   write_status = grpc_tcp_flush(tcp);
   if (write_status == GRPC_ENDPOINT_WRITE_PENDING) {
-    grpc_em_fd_notify_on_write(tcp->em_fd, grpc_tcp_handle_write, tcp,
-                               tcp->write_deadline);
+    grpc_fd_notify_on_write(tcp->em_fd, grpc_tcp_handle_write, tcp,
+                            tcp->write_deadline);
   } else {
     slice_state_destroy(&tcp->write_state);
     if (write_status == GRPC_ENDPOINT_WRITE_DONE) {
@@ -539,8 +531,8 @@
     tcp->write_cb = cb;
     tcp->write_user_data = user_data;
     tcp->write_deadline = deadline;
-    grpc_em_fd_notify_on_write(tcp->em_fd, grpc_tcp_handle_write, tcp,
-                               tcp->write_deadline);
+    grpc_fd_notify_on_write(tcp->em_fd, grpc_tcp_handle_write, tcp,
+                            tcp->write_deadline);
   }
 
   return status;
@@ -550,12 +542,10 @@
                                             grpc_tcp_write, grpc_tcp_shutdown,
                                             grpc_tcp_destroy};
 
-static grpc_endpoint *grpc_tcp_create_generic(grpc_em_fd *em_fd,
-                                              size_t slice_size) {
+grpc_endpoint *grpc_tcp_create(grpc_fd *em_fd, size_t slice_size) {
   grpc_tcp *tcp = (grpc_tcp *)gpr_malloc(sizeof(grpc_tcp));
   tcp->base.vtable = &vtable;
-  tcp->fd = grpc_em_fd_get(em_fd);
-  tcp->em = grpc_em_fd_get_em(em_fd);
+  tcp->fd = grpc_fd_get(em_fd);
   tcp->read_cb = NULL;
   tcp->write_cb = NULL;
   tcp->read_user_data = NULL;
@@ -569,13 +559,3 @@
   tcp->em_fd = em_fd;
   return &tcp->base;
 }
-
-grpc_endpoint *grpc_tcp_create_dbg(int fd, grpc_em *em, size_t slice_size) {
-  grpc_em_fd *em_fd = gpr_malloc(sizeof(grpc_em_fd));
-  grpc_em_fd_init(em_fd, em, fd);
-  return grpc_tcp_create_generic(em_fd, slice_size);
-}
-
-grpc_endpoint *grpc_tcp_create_emfd(grpc_em_fd *em_fd) {
-  return grpc_tcp_create_generic(em_fd, DEFAULT_SLICE_SIZE);
-}
diff --git a/src/core/endpoint/tcp.h b/src/core/iomgr/tcp_posix.h
similarity index 75%
rename from src/core/endpoint/tcp.h
rename to src/core/iomgr/tcp_posix.h
index f6a2a19..8a3c528 100644
--- a/src/core/endpoint/tcp.h
+++ b/src/core/iomgr/tcp_posix.h
@@ -31,8 +31,8 @@
  *
  */
 
-#ifndef __GRPC_INTERNAL_ENDPOINT_TCP_H__
-#define __GRPC_INTERNAL_ENDPOINT_TCP_H__
+#ifndef __GRPC_INTERNAL_IOMGR_TCP_POSIX_H__
+#define __GRPC_INTERNAL_IOMGR_TCP_POSIX_H__
 /*
    Low level TCP "bottom half" implementation, for use by transports built on
    top of a TCP connection.
@@ -45,15 +45,12 @@
 */
 
 #include "src/core/endpoint/endpoint.h"
-#include "src/core/eventmanager/em.h"
+#include "src/core/iomgr/iomgr_libevent.h"
 
-/* Create a tcp from an already connected file descriptor. */
-grpc_endpoint *grpc_tcp_create(int fd, grpc_em *em);
-/* Special version for debugging slice changes */
-grpc_endpoint *grpc_tcp_create_dbg(int fd, grpc_em *em, size_t slice_size);
+#define GRPC_TCP_DEFAULT_READ_SLICE_SIZE 8192
 
-/* Special version for handing off ownership of an existing already created
-   eventmanager fd. Must not have any outstanding callbacks. */
-grpc_endpoint *grpc_tcp_create_emfd(grpc_em_fd *em_fd);
+/* Create a tcp endpoint given a file desciptor and a read slice size.
+   Takes ownership of fd. */
+grpc_endpoint *grpc_tcp_create(grpc_fd *fd, size_t read_slice_size);
 
-#endif  /* __GRPC_INTERNAL_ENDPOINT_TCP_H__ */
+#endif /* __GRPC_INTERNAL_IOMGR_TCP_POSIX_H__ */
diff --git a/src/core/endpoint/tcp_server.h b/src/core/iomgr/tcp_server.h
similarity index 91%
rename from src/core/endpoint/tcp_server.h
rename to src/core/iomgr/tcp_server.h
index d81cdd0..bd6b46f 100644
--- a/src/core/endpoint/tcp_server.h
+++ b/src/core/iomgr/tcp_server.h
@@ -31,14 +31,13 @@
  *
  */
 
-#ifndef __GRPC_INTERNAL_ENDPOINT_TCP_SERVER_H__
-#define __GRPC_INTERNAL_ENDPOINT_TCP_SERVER_H__
+#ifndef __GRPC_INTERNAL_IOMGR_TCP_SERVER_H__
+#define __GRPC_INTERNAL_IOMGR_TCP_SERVER_H__
 
 #include <sys/types.h>
 #include <sys/socket.h>
 
-#include "src/core/endpoint/tcp.h"
-#include "src/core/eventmanager/em.h"
+#include "src/core/endpoint/endpoint.h"
 
 /* Forward decl of grpc_tcp_server */
 typedef struct grpc_tcp_server grpc_tcp_server;
@@ -47,7 +46,7 @@
 typedef void (*grpc_tcp_server_cb)(void *arg, grpc_endpoint *ep);
 
 /* Create a server, initially not bound to any ports */
-grpc_tcp_server *grpc_tcp_server_create(grpc_em *em);
+grpc_tcp_server *grpc_tcp_server_create();
 
 /* Start listening to bound ports */
 void grpc_tcp_server_start(grpc_tcp_server *server, grpc_tcp_server_cb cb,
@@ -73,4 +72,4 @@
 
 void grpc_tcp_server_destroy(grpc_tcp_server *server);
 
-#endif  /* __GRPC_INTERNAL_ENDPOINT_TCP_SERVER_H__ */
+#endif /* __GRPC_INTERNAL_IOMGR_TCP_SERVER_H__ */
diff --git a/src/core/endpoint/tcp_server.c b/src/core/iomgr/tcp_server_posix.c
similarity index 89%
rename from src/core/endpoint/tcp_server.c
rename to src/core/iomgr/tcp_server_posix.c
index efd3ded..22bbd45 100644
--- a/src/core/endpoint/tcp_server.c
+++ b/src/core/iomgr/tcp_server_posix.c
@@ -32,7 +32,7 @@
  */
 
 #define _GNU_SOURCE
-#include "src/core/endpoint/tcp_server.h"
+#include "src/core/iomgr/tcp_server.h"
 
 #include <limits.h>
 #include <fcntl.h>
@@ -45,7 +45,10 @@
 #include <string.h>
 #include <errno.h>
 
-#include "src/core/endpoint/socket_utils.h"
+#include "src/core/iomgr/iomgr_libevent.h"
+#include "src/core/iomgr/sockaddr_utils.h"
+#include "src/core/iomgr/socket_utils_posix.h"
+#include "src/core/iomgr/tcp_posix.h"
 #include <grpc/support/alloc.h>
 #include <grpc/support/log.h>
 #include <grpc/support/sync.h>
@@ -60,13 +63,12 @@
 /* one listening port */
 typedef struct {
   int fd;
-  grpc_em_fd *emfd;
+  grpc_fd *emfd;
   grpc_tcp_server *server;
 } server_port;
 
 /* the overall server */
 struct grpc_tcp_server {
-  grpc_em *em;
   grpc_tcp_server_cb cb;
   void *cb_arg;
 
@@ -82,12 +84,11 @@
   size_t port_capacity;
 };
 
-grpc_tcp_server *grpc_tcp_server_create(grpc_em *em) {
+grpc_tcp_server *grpc_tcp_server_create() {
   grpc_tcp_server *s = gpr_malloc(sizeof(grpc_tcp_server));
   gpr_mu_init(&s->mu);
   gpr_cv_init(&s->cv);
   s->active_ports = 0;
-  s->em = em;
   s->cb = NULL;
   s->cb_arg = NULL;
   s->ports = gpr_malloc(sizeof(server_port) * INIT_PORT_CAP);
@@ -101,7 +102,7 @@
   gpr_mu_lock(&s->mu);
   /* shutdown all fd's */
   for (i = 0; i < s->nports; i++) {
-    grpc_em_fd_shutdown(s->ports[i].emfd);
+    grpc_fd_shutdown(s->ports[i].emfd);
   }
   /* wait while that happens */
   while (s->active_ports) {
@@ -112,8 +113,7 @@
   /* delete ALL the things */
   for (i = 0; i < s->nports; i++) {
     server_port *sp = &s->ports[i];
-    grpc_em_fd_destroy(sp->emfd);
-    gpr_free(sp->emfd);
+    grpc_fd_destroy(sp->emfd);
   }
   gpr_free(s->ports);
   gpr_free(s);
@@ -189,7 +189,7 @@
 }
 
 /* event manager callback when reads are ready */
-static void on_read(void *arg, grpc_em_cb_status status) {
+static void on_read(void *arg, grpc_iomgr_cb_status status) {
   server_port *sp = arg;
 
   if (status != GRPC_CALLBACK_SUCCESS) {
@@ -208,11 +208,7 @@
         case EINTR:
           continue;
         case EAGAIN:
-          if (GRPC_EM_OK != grpc_em_fd_notify_on_read(sp->emfd, on_read, sp,
-                                                      gpr_inf_future)) {
-            gpr_log(GPR_ERROR, "Failed to register read request with em");
-            goto error;
-          }
+          grpc_fd_notify_on_read(sp->emfd, on_read, sp, gpr_inf_future);
           return;
         default:
           gpr_log(GPR_ERROR, "Failed accept4: %s", strerror(errno));
@@ -220,7 +216,9 @@
       }
     }
 
-    sp->server->cb(sp->server->cb_arg, grpc_tcp_create(fd, sp->server->em));
+    sp->server->cb(
+        sp->server->cb_arg,
+        grpc_tcp_create(grpc_fd_create(fd), GRPC_TCP_DEFAULT_READ_SLICE_SIZE));
   }
 
   abort();
@@ -249,13 +247,11 @@
     s->ports = gpr_realloc(s->ports, sizeof(server_port *) * s->port_capacity);
   }
   sp = &s->ports[s->nports++];
-  sp->emfd = gpr_malloc(sizeof(grpc_em_fd));
+  sp->emfd = grpc_fd_create(fd);
   sp->fd = fd;
   sp->server = s;
   /* initialize the em desc */
-  if (GRPC_EM_OK != grpc_em_fd_init(sp->emfd, s->em, fd)) {
-    grpc_em_fd_destroy(sp->emfd);
-    gpr_free(sp->emfd);
+  if (sp->emfd == NULL) {
     s->nports--;
     gpr_mu_unlock(&s->mu);
     return 0;
@@ -326,8 +322,8 @@
   s->cb = cb;
   s->cb_arg = cb_arg;
   for (i = 0; i < s->nports; i++) {
-    grpc_em_fd_notify_on_read(s->ports[i].emfd, on_read, &s->ports[i],
-                              gpr_inf_future);
+    grpc_fd_notify_on_read(s->ports[i].emfd, on_read, &s->ports[i],
+                           gpr_inf_future);
     s->active_ports++;
   }
   gpr_mu_unlock(&s->mu);
diff --git a/src/core/security/credentials.c b/src/core/security/credentials.c
index 7ff48f9..bfc2e33 100644
--- a/src/core/security/credentials.c
+++ b/src/core/security/credentials.c
@@ -34,7 +34,7 @@
 #include "src/core/security/credentials.h"
 
 #include "src/core/httpcli/httpcli.h"
-#include "src/core/surface/surface_em.h"
+#include "src/core/iomgr/iomgr.h"
 #include <grpc/support/alloc.h>
 #include <grpc/support/log.h>
 #include <grpc/support/string.h>
@@ -379,7 +379,7 @@
     request.hdr_count = 1;
     request.hdrs = &header;
     grpc_httpcli_get(
-        &request, gpr_time_add(gpr_now(), refresh_threshold), grpc_surface_em(),
+        &request, gpr_time_add(gpr_now(), refresh_threshold),
         on_compute_engine_token_response,
         grpc_credentials_metadata_request_create(creds, cb, user_data));
   } else {
@@ -433,7 +433,8 @@
   return 1;
 }
 
-void on_simulated_token_fetch_done(void *user_data, grpc_em_cb_status status) {
+void on_simulated_token_fetch_done(void *user_data,
+                                   grpc_iomgr_cb_status status) {
   grpc_credentials_metadata_request *r =
       (grpc_credentials_metadata_request *)user_data;
   grpc_fake_oauth2_credentials *c = (grpc_fake_oauth2_credentials *)r->creds;
@@ -448,10 +449,9 @@
   grpc_fake_oauth2_credentials *c = (grpc_fake_oauth2_credentials *)creds;
 
   if (c->is_async) {
-    GPR_ASSERT(grpc_em_add_callback(grpc_surface_em(),
-                                    on_simulated_token_fetch_done,
-                                    grpc_credentials_metadata_request_create(
-                                        creds, cb, user_data)) == GRPC_EM_OK);
+    grpc_iomgr_add_callback(
+        on_simulated_token_fetch_done,
+        grpc_credentials_metadata_request_create(creds, cb, user_data));
   } else {
     cb(user_data, &c->access_token_md, 1, GRPC_CREDENTIALS_OK);
   }
diff --git a/src/core/security/server_secure_chttp2.c b/src/core/security/server_secure_chttp2.c
index 335d502..28b56dd 100644
--- a/src/core/security/server_secure_chttp2.c
+++ b/src/core/security/server_secure_chttp2.c
@@ -35,12 +35,11 @@
 
 #include "src/core/channel/http_filter.h"
 #include "src/core/channel/http_server_filter.h"
-#include "src/core/endpoint/resolve_address.h"
-#include "src/core/endpoint/tcp_server.h"
+#include "src/core/iomgr/resolve_address.h"
+#include "src/core/iomgr/tcp_server.h"
 #include "src/core/security/security_context.h"
 #include "src/core/security/secure_transport_setup.h"
 #include "src/core/surface/server.h"
-#include "src/core/surface/surface_em.h"
 #include "src/core/transport/chttp2_transport.h"
 #include <grpc/support/alloc.h>
 #include <grpc/support/log.h>
@@ -101,7 +100,7 @@
     goto error;
   }
 
-  tcp = grpc_tcp_server_create(grpc_surface_em());
+  tcp = grpc_tcp_server_create();
   if (!tcp) {
     goto error;
   }
diff --git a/src/core/surface/call.c b/src/core/surface/call.c
index a731c7c..1cffe3d 100644
--- a/src/core/surface/call.c
+++ b/src/core/surface/call.c
@@ -34,12 +34,12 @@
 #include "src/core/surface/call.h"
 #include "src/core/channel/channel_stack.h"
 #include "src/core/channel/metadata_buffer.h"
+#include "src/core/iomgr/alarm.h"
+#include "src/core/surface/channel.h"
+#include "src/core/surface/completion_queue.h"
 #include <grpc/support/alloc.h>
 #include <grpc/support/log.h>
 #include <grpc/support/string.h>
-#include "src/core/surface/channel.h"
-#include "src/core/surface/completion_queue.h"
-#include "src/core/surface/surface_em.h"
 
 #include <stdio.h>
 #include <stdlib.h>
@@ -184,7 +184,7 @@
   void *finished_tag;
   pending_read_queue prq;
 
-  grpc_em_alarm alarm;
+  grpc_alarm alarm;
 
   /* The current outstanding send message/context/invoke/end tag (only valid if
      have_write == 1) */
@@ -258,7 +258,7 @@
 void grpc_call_destroy(grpc_call *c) {
   gpr_mu_lock(&c->read_mu);
   if (c->have_alarm) {
-    grpc_em_alarm_cancel(&c->alarm);
+    grpc_alarm_cancel(&c->alarm);
     c->have_alarm = 0;
   }
   gpr_mu_unlock(&c->read_mu);
@@ -813,7 +813,7 @@
   }
   if (is_full_close) {
     if (call->have_alarm) {
-      grpc_em_alarm_cancel(&call->alarm);
+      grpc_alarm_cancel(&call->alarm);
       call->have_alarm = 0;
     }
     call->received_finish = 1;
@@ -852,7 +852,7 @@
   return &call->incoming_metadata;
 }
 
-static void call_alarm(void *arg, grpc_em_cb_status status) {
+static void call_alarm(void *arg, grpc_iomgr_cb_status status) {
   grpc_call *call = arg;
   if (status == GRPC_CALLBACK_SUCCESS) {
     grpc_call_cancel(call);
@@ -868,6 +868,6 @@
   }
   grpc_call_internal_ref(call);
   call->have_alarm = 1;
-  grpc_em_alarm_init(&call->alarm, grpc_surface_em(), call_alarm, call);
-  grpc_em_alarm_add(&call->alarm, deadline);
+  grpc_alarm_init(&call->alarm, call_alarm, call);
+  grpc_alarm_add(&call->alarm, deadline);
 }
diff --git a/src/core/surface/channel_create.c b/src/core/surface/channel_create.c
index ec1c847..7d30b64 100644
--- a/src/core/surface/channel_create.c
+++ b/src/core/surface/channel_create.c
@@ -43,12 +43,11 @@
 #include "src/core/channel/connected_channel.h"
 #include "src/core/channel/http_client_filter.h"
 #include "src/core/channel/http_filter.h"
-#include "src/core/endpoint/resolve_address.h"
-#include "src/core/endpoint/tcp.h"
-#include "src/core/endpoint/tcp_client.h"
+#include "src/core/endpoint/endpoint.h"
+#include "src/core/iomgr/resolve_address.h"
+#include "src/core/iomgr/tcp_client.h"
 #include "src/core/surface/channel.h"
 #include "src/core/surface/client.h"
-#include "src/core/surface/surface_em.h"
 #include "src/core/transport/chttp2_transport.h"
 #include <grpc/support/alloc.h>
 #include <grpc/support/log.h>
@@ -74,7 +73,6 @@
   const char *target;
   grpc_transport_setup_callback setup_callback;
   void *setup_user_data;
-  grpc_em *em;
 };
 
 static int maybe_try_next_resolved(request *r);
@@ -123,8 +121,8 @@
   if (!r->resolved) return 0;
   if (r->resolved_index == r->resolved->naddrs) return 0;
   addr = &r->resolved->addrs[r->resolved_index++];
-  grpc_tcp_client_connect(on_connect, r, r->setup->em,
-                          (struct sockaddr *)&addr->addr, addr->len,
+  grpc_tcp_client_connect(on_connect, r, (struct sockaddr *)&addr->addr,
+                          addr->len,
                           grpc_client_setup_request_deadline(r->cs_request));
   return 1;
 }
@@ -201,13 +199,12 @@
   channel = grpc_channel_create_from_filters(filters, n, args, mdctx, 1);
 
   s->target = gpr_strdup(target);
-  s->em = grpc_surface_em();
   s->setup_callback = complete_setup;
   s->setup_user_data = grpc_channel_get_channel_stack(channel);
 
   grpc_client_setup_create_and_attach(grpc_channel_get_channel_stack(channel),
                                       args, mdctx, initiate_setup, done_setup,
-                                      s, s->em);
+                                      s);
 
   return channel;
 }
diff --git a/src/core/surface/completion_queue.c b/src/core/surface/completion_queue.c
index 2002476..1f3074f 100644
--- a/src/core/surface/completion_queue.c
+++ b/src/core/surface/completion_queue.c
@@ -36,10 +36,9 @@
 #include <stdio.h>
 #include <string.h>
 
-#include "src/core/eventmanager/em.h"
+#include "src/core/iomgr/iomgr_completion_queue_interface.h"
 #include "src/core/surface/call.h"
 #include "src/core/surface/event_string.h"
-#include "src/core/surface/surface_em.h"
 #include "src/core/surface/surface_trace.h"
 #include <grpc/support/alloc.h>
 #include <grpc/support/atm.h>
@@ -62,7 +61,6 @@
 
 /* Completion queue structure */
 struct grpc_completion_queue {
-  grpc_em *em;
   int allow_polling;
 
   /* When refs drops to zero, we are in shutdown mode, and will be destroyable
@@ -89,7 +87,6 @@
   memset(cc, 0, sizeof(*cc));
   /* Initial ref is dropped by grpc_completion_queue_shutdown */
   gpr_ref_init(&cc->refs, 1);
-  cc->em = grpc_surface_em();
   cc->allow_polling = 1;
   return cc;
 }
@@ -100,7 +97,7 @@
 
 /* Create and append an event to the queue. Returns the event so that its data
    members can be filled in.
-   Requires cc->em->mu locked. */
+   Requires grpc_iomgr_mu locked. */
 static event *add_locked(grpc_completion_queue *cc, grpc_completion_type type,
                          void *tag, grpc_call *call,
                          grpc_event_finish_func on_finish, void *user_data) {
@@ -126,7 +123,7 @@
     ev->bucket_prev = cc->buckets[bucket]->bucket_prev;
     ev->bucket_next->bucket_prev = ev->bucket_prev->bucket_next = ev;
   }
-  gpr_cv_broadcast(&cc->em->cv);
+  gpr_cv_broadcast(&grpc_iomgr_cv);
   return ev;
 }
 
@@ -149,7 +146,7 @@
   if (gpr_unref(&cc->refs)) {
     GPR_ASSERT(!cc->shutdown);
     cc->shutdown = 1;
-    gpr_cv_broadcast(&cc->em->cv);
+    gpr_cv_broadcast(&grpc_iomgr_cv);
   }
 }
 
@@ -157,11 +154,11 @@
                       grpc_event_finish_func on_finish, void *user_data,
                       grpc_byte_buffer *read) {
   event *ev;
-  gpr_mu_lock(&cc->em->mu);
+  gpr_mu_lock(&grpc_iomgr_mu);
   ev = add_locked(cc, GRPC_READ, tag, call, on_finish, user_data);
   ev->base.data.read = read;
   end_op_locked(cc, GRPC_READ);
-  gpr_mu_unlock(&cc->em->mu);
+  gpr_mu_unlock(&grpc_iomgr_mu);
 }
 
 void grpc_cq_end_invoke_accepted(grpc_completion_queue *cc, void *tag,
@@ -169,11 +166,11 @@
                                  grpc_event_finish_func on_finish,
                                  void *user_data, grpc_op_error error) {
   event *ev;
-  gpr_mu_lock(&cc->em->mu);
+  gpr_mu_lock(&grpc_iomgr_mu);
   ev = add_locked(cc, GRPC_INVOKE_ACCEPTED, tag, call, on_finish, user_data);
   ev->base.data.invoke_accepted = error;
   end_op_locked(cc, GRPC_INVOKE_ACCEPTED);
-  gpr_mu_unlock(&cc->em->mu);
+  gpr_mu_unlock(&grpc_iomgr_mu);
 }
 
 void grpc_cq_end_write_accepted(grpc_completion_queue *cc, void *tag,
@@ -181,11 +178,11 @@
                                 grpc_event_finish_func on_finish,
                                 void *user_data, grpc_op_error error) {
   event *ev;
-  gpr_mu_lock(&cc->em->mu);
+  gpr_mu_lock(&grpc_iomgr_mu);
   ev = add_locked(cc, GRPC_WRITE_ACCEPTED, tag, call, on_finish, user_data);
   ev->base.data.write_accepted = error;
   end_op_locked(cc, GRPC_WRITE_ACCEPTED);
-  gpr_mu_unlock(&cc->em->mu);
+  gpr_mu_unlock(&grpc_iomgr_mu);
 }
 
 void grpc_cq_end_finish_accepted(grpc_completion_queue *cc, void *tag,
@@ -193,11 +190,11 @@
                                  grpc_event_finish_func on_finish,
                                  void *user_data, grpc_op_error error) {
   event *ev;
-  gpr_mu_lock(&cc->em->mu);
+  gpr_mu_lock(&grpc_iomgr_mu);
   ev = add_locked(cc, GRPC_FINISH_ACCEPTED, tag, call, on_finish, user_data);
   ev->base.data.finish_accepted = error;
   end_op_locked(cc, GRPC_FINISH_ACCEPTED);
-  gpr_mu_unlock(&cc->em->mu);
+  gpr_mu_unlock(&grpc_iomgr_mu);
 }
 
 void grpc_cq_end_client_metadata_read(grpc_completion_queue *cc, void *tag,
@@ -206,24 +203,24 @@
                                       void *user_data, size_t count,
                                       grpc_metadata *elements) {
   event *ev;
-  gpr_mu_lock(&cc->em->mu);
+  gpr_mu_lock(&grpc_iomgr_mu);
   ev = add_locked(cc, GRPC_CLIENT_METADATA_READ, tag, call, on_finish,
                   user_data);
   ev->base.data.client_metadata_read.count = count;
   ev->base.data.client_metadata_read.elements = elements;
   end_op_locked(cc, GRPC_CLIENT_METADATA_READ);
-  gpr_mu_unlock(&cc->em->mu);
+  gpr_mu_unlock(&grpc_iomgr_mu);
 }
 
 void grpc_cq_end_finished(grpc_completion_queue *cc, void *tag, grpc_call *call,
                           grpc_event_finish_func on_finish, void *user_data,
                           grpc_status status) {
   event *ev;
-  gpr_mu_lock(&cc->em->mu);
+  gpr_mu_lock(&grpc_iomgr_mu);
   ev = add_locked(cc, GRPC_FINISHED, tag, call, on_finish, user_data);
   ev->base.data.finished = status;
   end_op_locked(cc, GRPC_FINISHED);
-  gpr_mu_unlock(&cc->em->mu);
+  gpr_mu_unlock(&grpc_iomgr_mu);
 }
 
 void grpc_cq_end_new_rpc(grpc_completion_queue *cc, void *tag, grpc_call *call,
@@ -232,7 +229,7 @@
                          gpr_timespec deadline, size_t metadata_count,
                          grpc_metadata *metadata_elements) {
   event *ev;
-  gpr_mu_lock(&cc->em->mu);
+  gpr_mu_lock(&grpc_iomgr_mu);
   ev = add_locked(cc, GRPC_SERVER_RPC_NEW, tag, call, on_finish, user_data);
   ev->base.data.server_rpc_new.method = method;
   ev->base.data.server_rpc_new.host = host;
@@ -240,7 +237,7 @@
   ev->base.data.server_rpc_new.metadata_count = metadata_count;
   ev->base.data.server_rpc_new.metadata_elements = metadata_elements;
   end_op_locked(cc, GRPC_SERVER_RPC_NEW);
-  gpr_mu_unlock(&cc->em->mu);
+  gpr_mu_unlock(&grpc_iomgr_mu);
 }
 
 /* Create a GRPC_QUEUE_SHUTDOWN event without queuing it anywhere */
@@ -257,7 +254,7 @@
                                        gpr_timespec deadline) {
   event *ev = NULL;
 
-  gpr_mu_lock(&cc->em->mu);
+  gpr_mu_lock(&grpc_iomgr_mu);
   for (;;) {
     if (cc->queue != NULL) {
       gpr_uintptr bucket;
@@ -283,15 +280,15 @@
       ev = create_shutdown_event();
       break;
     }
-    if (cc->allow_polling && grpc_em_work(cc->em, deadline)) {
+    if (cc->allow_polling && grpc_iomgr_work(deadline)) {
       continue;
     }
-    if (gpr_cv_wait(&cc->em->cv, &cc->em->mu, deadline)) {
-      gpr_mu_unlock(&cc->em->mu);
+    if (gpr_cv_wait(&grpc_iomgr_cv, &grpc_iomgr_mu, deadline)) {
+      gpr_mu_unlock(&grpc_iomgr_mu);
       return NULL;
     }
   }
-  gpr_mu_unlock(&cc->em->mu);
+  gpr_mu_unlock(&grpc_iomgr_mu);
   GRPC_SURFACE_TRACE_RETURNED_EVENT(cc, &ev->base);
   return &ev->base;
 }
@@ -329,7 +326,7 @@
                                         gpr_timespec deadline) {
   event *ev = NULL;
 
-  gpr_mu_lock(&cc->em->mu);
+  gpr_mu_lock(&grpc_iomgr_mu);
   for (;;) {
     if ((ev = pluck_event(cc, tag))) {
       break;
@@ -338,15 +335,15 @@
       ev = create_shutdown_event();
       break;
     }
-    if (cc->allow_polling && grpc_em_work(cc->em, deadline)) {
+    if (cc->allow_polling && grpc_iomgr_work(deadline)) {
       continue;
     }
-    if (gpr_cv_wait(&cc->em->cv, &cc->em->mu, deadline)) {
-      gpr_mu_unlock(&cc->em->mu);
+    if (gpr_cv_wait(&grpc_iomgr_cv, &grpc_iomgr_mu, deadline)) {
+      gpr_mu_unlock(&grpc_iomgr_mu);
       return NULL;
     }
   }
-  gpr_mu_unlock(&cc->em->mu);
+  gpr_mu_unlock(&grpc_iomgr_mu);
   GRPC_SURFACE_TRACE_RETURNED_EVENT(cc, &ev->base);
   return &ev->base;
 }
@@ -355,11 +352,11 @@
    to zero here, then enter shutdown mode and wake up any waiters */
 void grpc_completion_queue_shutdown(grpc_completion_queue *cc) {
   if (gpr_unref(&cc->refs)) {
-    gpr_mu_lock(&cc->em->mu);
+    gpr_mu_lock(&grpc_iomgr_mu);
     GPR_ASSERT(!cc->shutdown);
     cc->shutdown = 1;
-    gpr_cv_broadcast(&cc->em->cv);
-    gpr_mu_unlock(&cc->em->mu);
+    gpr_cv_broadcast(&grpc_iomgr_cv);
+    gpr_mu_unlock(&grpc_iomgr_mu);
   }
 }
 
diff --git a/src/core/surface/init.c b/src/core/surface/init.c
index 92c0ac8..832ec08 100644
--- a/src/core/surface/init.c
+++ b/src/core/surface/init.c
@@ -33,14 +33,14 @@
 
 #include <grpc/grpc.h>
 #include "src/core/statistics/census_interface.h"
-#include "src/core/surface/surface_em.h"
+#include "src/core/iomgr/iomgr.h"
 
 void grpc_init() {
-  grpc_surface_em_init();
+  grpc_iomgr_init();
   census_init();
 }
 
 void grpc_shutdown() {
-  grpc_surface_em_shutdown();
+  grpc_iomgr_shutdown();
   census_shutdown();
 }
diff --git a/src/core/surface/secure_channel_create.c b/src/core/surface/secure_channel_create.c
index f330b83..3d57279 100644
--- a/src/core/surface/secure_channel_create.c
+++ b/src/core/surface/secure_channel_create.c
@@ -43,15 +43,13 @@
 #include "src/core/channel/connected_channel.h"
 #include "src/core/channel/http_client_filter.h"
 #include "src/core/channel/http_filter.h"
-#include "src/core/endpoint/resolve_address.h"
-#include "src/core/endpoint/tcp.h"
-#include "src/core/endpoint/tcp_client.h"
+#include "src/core/iomgr/resolve_address.h"
+#include "src/core/iomgr/tcp_client.h"
 #include "src/core/security/auth.h"
 #include "src/core/security/security_context.h"
 #include "src/core/security/secure_transport_setup.h"
 #include "src/core/surface/channel.h"
 #include "src/core/surface/client.h"
-#include "src/core/surface/surface_em.h"
 #include "src/core/transport/chttp2_transport.h"
 #include <grpc/grpc_security.h>
 #include <grpc/support/alloc.h>
@@ -78,7 +76,6 @@
   const char *target;
   grpc_transport_setup_callback setup_callback;
   void *setup_user_data;
-  grpc_em *em;
 };
 
 static int maybe_try_next_resolved(request *r);
@@ -139,8 +136,8 @@
   if (!r->resolved) return 0;
   if (r->resolved_index == r->resolved->naddrs) return 0;
   addr = &r->resolved->addrs[r->resolved_index++];
-  grpc_tcp_client_connect(on_connect, r, r->setup->em,
-                          (struct sockaddr *)&addr->addr, addr->len,
+  grpc_tcp_client_connect(on_connect, r, (struct sockaddr *)&addr->addr,
+                          addr->len,
                           grpc_client_setup_request_deadline(r->cs_request));
   return 1;
 }
@@ -230,7 +227,6 @@
   grpc_channel_args_destroy(args_copy);
 
   s->target = gpr_strdup(target);
-  s->em = grpc_surface_em();
   s->setup_callback = complete_setup;
   s->setup_user_data = grpc_channel_get_channel_stack(channel);
   s->security_context =
@@ -238,6 +234,6 @@
           &context->base);
   grpc_client_setup_create_and_attach(grpc_channel_get_channel_stack(channel),
                                       args, mdctx, initiate_setup, done_setup,
-                                      s, s->em);
+                                      s);
   return channel;
 }
diff --git a/src/core/surface/server.c b/src/core/surface/server.c
index d8d5a7a..2c85906 100644
--- a/src/core/surface/server.c
+++ b/src/core/surface/server.c
@@ -39,10 +39,10 @@
 #include "src/core/channel/census_filter.h"
 #include "src/core/channel/channel_args.h"
 #include "src/core/channel/connected_channel.h"
+#include "src/core/iomgr/iomgr.h"
 #include "src/core/surface/call.h"
 #include "src/core/surface/channel.h"
 #include "src/core/surface/completion_queue.h"
-#include "src/core/surface/surface_em.h"
 #include <grpc/support/alloc.h>
 #include <grpc/support/log.h>
 #include <grpc/support/string.h>
@@ -73,7 +73,6 @@
   const grpc_channel_filter **channel_filters;
   grpc_channel_args *channel_args;
   grpc_completion_queue *cq;
-  grpc_em *em;
 
   gpr_mu mu;
 
@@ -193,7 +192,7 @@
   chand->next = chand->prev = chand;
 }
 
-static void finish_destroy_channel(void *cd, grpc_em_cb_status status) {
+static void finish_destroy_channel(void *cd, grpc_iomgr_cb_status status) {
   channel_data *chand = cd;
   grpc_server *server = chand->server;
   /*gpr_log(GPR_INFO, "destroy channel %p", chand->channel);*/
@@ -206,7 +205,7 @@
   GPR_ASSERT(chand->server != NULL);
   orphan_channel(chand);
   server_ref(chand->server);
-  grpc_em_add_callback(chand->server->em, finish_destroy_channel, chand);
+  grpc_iomgr_add_callback(finish_destroy_channel, chand);
 }
 
 static void queue_new_rpc(grpc_server *server, call_data *calld, void *tag) {
@@ -254,7 +253,7 @@
   gpr_mu_unlock(&server->mu);
 }
 
-static void kill_zombie(void *elem, grpc_em_cb_status status) {
+static void kill_zombie(void *elem, grpc_iomgr_cb_status status) {
   grpc_call_destroy(grpc_call_from_top_element(elem));
 }
 
@@ -275,7 +274,7 @@
     /* fallthrough intended */
     case NOT_STARTED:
       calld->state = ZOMBIED;
-      grpc_em_add_callback(chand->server->em, kill_zombie, elem);
+      grpc_iomgr_add_callback(kill_zombie, elem);
       break;
     case ZOMBIED:
       break;
@@ -341,7 +340,7 @@
   }
 }
 
-static void finish_shutdown_channel(void *cd, grpc_em_cb_status status) {
+static void finish_shutdown_channel(void *cd, grpc_iomgr_cb_status status) {
   channel_data *chand = cd;
   grpc_channel_op op;
   op.type = GRPC_CHANNEL_DISCONNECT;
@@ -354,7 +353,7 @@
 
 static void shutdown_channel(channel_data *chand) {
   grpc_channel_internal_ref(chand->channel);
-  grpc_em_add_callback(chand->server->em, finish_shutdown_channel, chand);
+  grpc_iomgr_add_callback(finish_shutdown_channel, chand);
 }
 
 static void init_call_elem(grpc_call_element *elem,
@@ -442,7 +441,6 @@
   gpr_mu_init(&server->mu);
 
   server->cq = cq;
-  server->em = grpc_surface_em();
   /* decremented by grpc_server_destroy */
   gpr_ref_init(&server->internal_refcount, 1);
   server->root_channel_data.next = server->root_channel_data.prev =
diff --git a/src/core/surface/server_chttp2.c b/src/core/surface/server_chttp2.c
index db8924e..a5fdd03 100644
--- a/src/core/surface/server_chttp2.c
+++ b/src/core/surface/server_chttp2.c
@@ -35,10 +35,9 @@
 
 #include "src/core/channel/http_filter.h"
 #include "src/core/channel/http_server_filter.h"
-#include "src/core/endpoint/resolve_address.h"
-#include "src/core/endpoint/tcp_server.h"
+#include "src/core/iomgr/resolve_address.h"
+#include "src/core/iomgr/tcp_server.h"
 #include "src/core/surface/server.h"
-#include "src/core/surface/surface_em.h"
 #include "src/core/transport/chttp2_transport.h"
 #include <grpc/support/alloc.h>
 #include <grpc/support/log.h>
@@ -83,7 +82,7 @@
     goto error;
   }
 
-  tcp = grpc_tcp_server_create(grpc_surface_em());
+  tcp = grpc_tcp_server_create();
   if (!tcp) {
     goto error;
   }
diff --git a/src/core/surface/surface_em.h b/src/core/surface/surface_em.h
deleted file mode 100644
index 165f42f..0000000
--- a/src/core/surface/surface_em.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- *
- * Copyright 2014, Google Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- *     * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- *     * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#ifndef __GRPC_INTERNAL_SURFACE_SURFACE_EM_H__
-#define __GRPC_INTERNAL_SURFACE_SURFACE_EM_H__
-
-#include "src/core/eventmanager/em.h"
-
-/* Returns a global singleton event manager for
-   the surface apis, and is passed down to channels and
-   transports as needed. */
-grpc_em *grpc_surface_em();
-
-void grpc_surface_em_init();
-void grpc_surface_em_shutdown();
-
-#endif  /* __GRPC_INTERNAL_SURFACE_SURFACE_EM_H__ */
diff --git a/src/core/transport/chttp2_transport.h b/src/core/transport/chttp2_transport.h
index 37eb84e..24f2606 100644
--- a/src/core/transport/chttp2_transport.h
+++ b/src/core/transport/chttp2_transport.h
@@ -34,7 +34,7 @@
 #ifndef __GRPC_INTERNAL_TRANSPORT_CHTTP2_TRANSPORT_H__
 #define __GRPC_INTERNAL_TRANSPORT_CHTTP2_TRANSPORT_H__
 
-#include "src/core/endpoint/tcp.h"
+#include "src/core/endpoint/endpoint.h"
 #include "src/core/transport/transport.h"
 
 void grpc_create_chttp2_transport(grpc_transport_setup_callback setup,
diff --git a/test/core/end2end/dualstack_socket_test.c b/test/core/end2end/dualstack_socket_test.c
index 4813672..e127c61 100644
--- a/test/core/end2end/dualstack_socket_test.c
+++ b/test/core/end2end/dualstack_socket_test.c
@@ -31,7 +31,7 @@
  *
  */
 
-#include "src/core/endpoint/socket_utils.h"
+#include "src/core/iomgr/socket_utils_posix.h"
 #include <grpc/grpc.h>
 #include <grpc/support/alloc.h>
 #include <grpc/support/host_port.h>
@@ -165,16 +165,10 @@
   grpc_completion_queue_shutdown(server_cq);
   drain_cq(server_cq);
   grpc_completion_queue_destroy(server_cq);
-  /* TODO(klempner): We need to give the EM time to actually close the listening
-     socket, or later tests will fail to bind to this port. We should fix this
-     by adding an API to EM to get notified when this happens and having it
-     prevent listener teardown. */
-  gpr_sleep_until(gpr_time_add(gpr_now(), gpr_time_from_millis(250)));
 }
 
 int main(int argc, char **argv) {
   int i;
-  int port = grpc_pick_unused_port_or_die();
 
   grpc_test_init(argc, argv);
   grpc_init();
@@ -184,20 +178,21 @@
     grpc_forbid_dualstack_sockets_for_testing = i;
 
     /* :: and 0.0.0.0 are handled identically. */
-    test_connect("::", "127.0.0.1", port, 1);
-    test_connect("::", "::1", port, 1);
-    test_connect("::", "::ffff:127.0.0.1", port, 1);
-    test_connect("::", "localhost", port, 1);
-    test_connect("0.0.0.0", "127.0.0.1", port, 1);
-    test_connect("0.0.0.0", "::1", port, 1);
-    test_connect("0.0.0.0", "::ffff:127.0.0.1", port, 1);
-    test_connect("0.0.0.0", "localhost", port, 1);
+    test_connect("::", "127.0.0.1", grpc_pick_unused_port_or_die(), 1);
+    test_connect("::", "::1", grpc_pick_unused_port_or_die(), 1);
+    test_connect("::", "::ffff:127.0.0.1", grpc_pick_unused_port_or_die(), 1);
+    test_connect("::", "localhost", grpc_pick_unused_port_or_die(), 1);
+    test_connect("0.0.0.0", "127.0.0.1", grpc_pick_unused_port_or_die(), 1);
+    test_connect("0.0.0.0", "::1", grpc_pick_unused_port_or_die(), 1);
+    test_connect("0.0.0.0", "::ffff:127.0.0.1", grpc_pick_unused_port_or_die(),
+                 1);
+    test_connect("0.0.0.0", "localhost", grpc_pick_unused_port_or_die(), 1);
 
     /* These only work when the families agree. */
-    test_connect("::1", "::1", port, 1);
-    test_connect("::1", "127.0.0.1", port, 0);
-    test_connect("127.0.0.1", "127.0.0.1", port, 1);
-    test_connect("127.0.0.1", "::1", port, 0);
+    test_connect("::1", "::1", grpc_pick_unused_port_or_die(), 1);
+    test_connect("::1", "127.0.0.1", grpc_pick_unused_port_or_die(), 0);
+    test_connect("127.0.0.1", "127.0.0.1", grpc_pick_unused_port_or_die(), 1);
+    test_connect("127.0.0.1", "::1", grpc_pick_unused_port_or_die(), 0);
 
   }
 
diff --git a/test/core/end2end/fixtures/chttp2_fake_security.c b/test/core/end2end/fixtures/chttp2_fake_security.c
index aaca563..ff249ce 100644
--- a/test/core/end2end/fixtures/chttp2_fake_security.c
+++ b/test/core/end2end/fixtures/chttp2_fake_security.c
@@ -37,7 +37,6 @@
 #include <string.h>
 
 #include "src/core/channel/channel_args.h"
-#include "src/core/eventmanager/em.h"
 #include "src/core/security/credentials.h"
 #include "src/core/security/security_context.h"
 #include <grpc/support/alloc.h>
@@ -47,8 +46,6 @@
 #include "test/core/util/port.h"
 #include "test/core/end2end/data/ssl_test_data.h"
 
-static grpc_em em;
-
 typedef struct fullstack_secure_fixture_data {
   char *localaddr;
 } fullstack_secure_fixture_data;
@@ -124,13 +121,11 @@
   grpc_test_init(argc, argv);
 
   grpc_init();
-  grpc_em_init(&em);
 
   for (i = 0; i < sizeof(configs) / sizeof(*configs); i++) {
     grpc_end2end_tests(configs[i]);
   }
 
-  GPR_ASSERT(grpc_em_destroy(&em) == GRPC_EM_OK);
   grpc_shutdown();
 
   return 0;
diff --git a/test/core/end2end/fixtures/chttp2_fullstack.c b/test/core/end2end/fixtures/chttp2_fullstack.c
index da75d61..169032f 100644
--- a/test/core/end2end/fixtures/chttp2_fullstack.c
+++ b/test/core/end2end/fixtures/chttp2_fullstack.c
@@ -46,11 +46,9 @@
 #include "src/core/channel/connected_channel.h"
 #include "src/core/channel/http_filter.h"
 #include "src/core/channel/http_server_filter.h"
-#include "src/core/eventmanager/em.h"
 #include "src/core/surface/channel.h"
 #include "src/core/surface/client.h"
 #include "src/core/surface/server.h"
-#include "src/core/surface/surface_em.h"
 #include "src/core/transport/chttp2_transport.h"
 #include <grpc/support/alloc.h>
 #include <grpc/support/host_port.h>
diff --git a/test/core/end2end/fixtures/chttp2_simple_ssl_fullstack.c b/test/core/end2end/fixtures/chttp2_simple_ssl_fullstack.c
index 57c9141..7b0adb2 100644
--- a/test/core/end2end/fixtures/chttp2_simple_ssl_fullstack.c
+++ b/test/core/end2end/fixtures/chttp2_simple_ssl_fullstack.c
@@ -37,7 +37,6 @@
 #include <string.h>
 
 #include "src/core/channel/channel_args.h"
-#include "src/core/eventmanager/em.h"
 #include "src/core/security/credentials.h"
 #include "src/core/security/security_context.h"
 #include <grpc/support/alloc.h>
@@ -47,8 +46,6 @@
 #include "test/core/util/port.h"
 #include "test/core/end2end/data/ssl_test_data.h"
 
-static grpc_em em;
-
 typedef struct fullstack_secure_fixture_data {
   char *localaddr;
 } fullstack_secure_fixture_data;
@@ -131,13 +128,11 @@
   grpc_test_init(argc, argv);
 
   grpc_init();
-  grpc_em_init(&em);
 
   for (i = 0; i < sizeof(configs) / sizeof(*configs); i++) {
     grpc_end2end_tests(configs[i]);
   }
 
-  GPR_ASSERT(grpc_em_destroy(&em) == GRPC_EM_OK);
   grpc_shutdown();
 
   return 0;
diff --git a/test/core/end2end/fixtures/chttp2_simple_ssl_with_oauth2_fullstack.c b/test/core/end2end/fixtures/chttp2_simple_ssl_with_oauth2_fullstack.c
index 8d55853..04a8795 100644
--- a/test/core/end2end/fixtures/chttp2_simple_ssl_with_oauth2_fullstack.c
+++ b/test/core/end2end/fixtures/chttp2_simple_ssl_with_oauth2_fullstack.c
@@ -37,7 +37,7 @@
 #include <string.h>
 
 #include "src/core/channel/channel_args.h"
-#include "src/core/eventmanager/em.h"
+#include "src/core/iomgr/iomgr.h"
 #include "src/core/security/credentials.h"
 #include "src/core/security/security_context.h"
 #include <grpc/support/alloc.h>
@@ -47,8 +47,6 @@
 #include "test/core/util/port.h"
 #include "test/core/end2end/data/ssl_test_data.h"
 
-static grpc_em em;
-
 typedef struct fullstack_secure_fixture_data {
   char *localaddr;
 } fullstack_secure_fixture_data;
@@ -138,13 +136,11 @@
   grpc_test_init(argc, argv);
 
   grpc_init();
-  grpc_em_init(&em);
 
   for (i = 0; i < sizeof(configs) / sizeof(*configs); i++) {
     grpc_end2end_tests(configs[i]);
   }
 
-  GPR_ASSERT(grpc_em_destroy(&em) == GRPC_EM_OK);
   grpc_shutdown();
 
   return 0;
diff --git a/test/core/end2end/fixtures/chttp2_socket_pair.c b/test/core/end2end/fixtures/chttp2_socket_pair.c
index 593ff78..7ec17e3 100644
--- a/test/core/end2end/fixtures/chttp2_socket_pair.c
+++ b/test/core/end2end/fixtures/chttp2_socket_pair.c
@@ -32,25 +32,15 @@
  */
 
 #include "test/core/end2end/end2end_tests.h"
-
-#include <errno.h>
-#include <fcntl.h>
-#include <string.h>
-#include <sys/types.h>
-#include <sys/socket.h>
-#include <unistd.h>
-#include <stdlib.h>
-#include <stdio.h>
-
 #include "src/core/channel/client_channel.h"
 #include "src/core/channel/connected_channel.h"
 #include "src/core/channel/http_filter.h"
 #include "src/core/channel/http_server_filter.h"
-#include "src/core/eventmanager/em.h"
+#include "src/core/iomgr/endpoint_pair.h"
+#include "src/core/iomgr/iomgr.h"
 #include "src/core/surface/channel.h"
 #include "src/core/surface/client.h"
 #include "src/core/surface/server.h"
-#include "src/core/surface/surface_em.h"
 #include "src/core/transport/chttp2_transport.h"
 #include <grpc/support/alloc.h>
 #include <grpc/support/log.h>
@@ -60,15 +50,6 @@
 #include "test/core/util/port.h"
 #include "test/core/util/test_config.h"
 
-static void create_sockets(int sv[2]) {
-  int flags;
-  GPR_ASSERT(socketpair(AF_UNIX, SOCK_STREAM, 0, sv) == 0);
-  flags = fcntl(sv[0], F_GETFL, 0);
-  GPR_ASSERT(fcntl(sv[0], F_SETFL, flags | O_NONBLOCK) == 0);
-  flags = fcntl(sv[1], F_GETFL, 0);
-  GPR_ASSERT(fcntl(sv[1], F_SETFL, flags | O_NONBLOCK) == 0);
-}
-
 /* chttp2 transport that is immediately available (used for testing
    connected_channel without a client_channel */
 
@@ -102,11 +83,9 @@
       grpc_channel_get_channel_stack(channel), transport);
 }
 
-typedef struct socketpair_fixture_data { int sv[2]; } socketpair_fixture_data;
-
 static grpc_end2end_test_fixture chttp2_create_fixture_socketpair(
     grpc_channel_args *client_args, grpc_channel_args *server_args) {
-  socketpair_fixture_data *sfd = gpr_malloc(sizeof(socketpair_fixture_data));
+  grpc_endpoint_pair *sfd = gpr_malloc(sizeof(grpc_endpoint_pair));
 
   grpc_end2end_test_fixture f;
   f.fixture_data = sfd;
@@ -115,31 +94,27 @@
   f.server = grpc_server_create_from_filters(f.server_cq, NULL, 0, server_args);
   f.client = NULL;
 
-  create_sockets(sfd->sv);
+  *sfd = grpc_iomgr_create_endpoint_pair(65536);
 
   return f;
 }
 
 static void chttp2_init_client_socketpair(grpc_end2end_test_fixture *f,
                                           grpc_channel_args *client_args) {
-  socketpair_fixture_data *sfd = f->fixture_data;
-  grpc_endpoint *cli_tcp;
+  grpc_endpoint_pair *sfd = f->fixture_data;
   sp_client_setup cs;
   cs.client_args = client_args;
   cs.f = f;
-  cli_tcp = grpc_tcp_create_dbg(sfd->sv[0], grpc_surface_em(), 65536);
   grpc_create_chttp2_transport(client_setup_transport, &cs, client_args,
-                               cli_tcp, NULL, 0, grpc_mdctx_create(), 1);
+                               sfd->client, NULL, 0, grpc_mdctx_create(), 1);
   GPR_ASSERT(f->client);
 }
 
 static void chttp2_init_server_socketpair(grpc_end2end_test_fixture *f,
                                           grpc_channel_args *server_args) {
-  socketpair_fixture_data *sfd = f->fixture_data;
-  grpc_endpoint *svr_tcp;
-  svr_tcp = grpc_tcp_create_dbg(sfd->sv[1], grpc_surface_em(), 65536);
-  grpc_create_chttp2_transport(server_setup_transport, f, server_args, svr_tcp,
-                               NULL, 0, grpc_mdctx_create(), 0);
+  grpc_endpoint_pair *sfd = f->fixture_data;
+  grpc_create_chttp2_transport(server_setup_transport, f, server_args,
+                               sfd->server, NULL, 0, grpc_mdctx_create(), 0);
 }
 
 static void chttp2_tear_down_socketpair(grpc_end2end_test_fixture *f) {
diff --git a/test/core/end2end/fixtures/chttp2_socket_pair_one_byte_at_a_time.c b/test/core/end2end/fixtures/chttp2_socket_pair_one_byte_at_a_time.c
index 9287364..3e18de9 100644
--- a/test/core/end2end/fixtures/chttp2_socket_pair_one_byte_at_a_time.c
+++ b/test/core/end2end/fixtures/chttp2_socket_pair_one_byte_at_a_time.c
@@ -32,25 +32,15 @@
  */
 
 #include "test/core/end2end/end2end_tests.h"
-
-#include <errno.h>
-#include <fcntl.h>
-#include <string.h>
-#include <sys/types.h>
-#include <sys/socket.h>
-#include <unistd.h>
-#include <stdlib.h>
-#include <stdio.h>
-
 #include "src/core/channel/client_channel.h"
 #include "src/core/channel/connected_channel.h"
 #include "src/core/channel/http_filter.h"
 #include "src/core/channel/http_server_filter.h"
-#include "src/core/eventmanager/em.h"
+#include "src/core/iomgr/endpoint_pair.h"
+#include "src/core/iomgr/iomgr.h"
 #include "src/core/surface/channel.h"
 #include "src/core/surface/client.h"
 #include "src/core/surface/server.h"
-#include "src/core/surface/surface_em.h"
 #include "src/core/transport/chttp2_transport.h"
 #include <grpc/support/alloc.h>
 #include <grpc/support/log.h>
@@ -60,15 +50,6 @@
 #include "test/core/util/port.h"
 #include "test/core/util/test_config.h"
 
-static void create_sockets(int sv[2]) {
-  int flags;
-  GPR_ASSERT(socketpair(AF_UNIX, SOCK_STREAM, 0, sv) == 0);
-  flags = fcntl(sv[0], F_GETFL, 0);
-  GPR_ASSERT(fcntl(sv[0], F_SETFL, flags | O_NONBLOCK) == 0);
-  flags = fcntl(sv[1], F_GETFL, 0);
-  GPR_ASSERT(fcntl(sv[1], F_SETFL, flags | O_NONBLOCK) == 0);
-}
-
 /* chttp2 transport that is immediately available (used for testing
    connected_channel without a client_channel */
 
@@ -102,11 +83,9 @@
       grpc_channel_get_channel_stack(channel), transport);
 }
 
-typedef struct socketpair_fixture_data { int sv[2]; } socketpair_fixture_data;
-
 static grpc_end2end_test_fixture chttp2_create_fixture_socketpair(
     grpc_channel_args *client_args, grpc_channel_args *server_args) {
-  socketpair_fixture_data *sfd = gpr_malloc(sizeof(socketpair_fixture_data));
+  grpc_endpoint_pair *sfd = gpr_malloc(sizeof(grpc_endpoint_pair));
 
   grpc_end2end_test_fixture f;
   f.fixture_data = sfd;
@@ -115,31 +94,27 @@
   f.server = grpc_server_create_from_filters(f.server_cq, NULL, 0, server_args);
   f.client = NULL;
 
-  create_sockets(sfd->sv);
+  *sfd = grpc_iomgr_create_endpoint_pair(1);
 
   return f;
 }
 
 static void chttp2_init_client_socketpair(grpc_end2end_test_fixture *f,
                                           grpc_channel_args *client_args) {
-  socketpair_fixture_data *sfd = f->fixture_data;
-  grpc_endpoint *cli_tcp;
+  grpc_endpoint_pair *sfd = f->fixture_data;
   sp_client_setup cs;
   cs.client_args = client_args;
   cs.f = f;
-  cli_tcp = grpc_tcp_create_dbg(sfd->sv[0], grpc_surface_em(), 1);
   grpc_create_chttp2_transport(client_setup_transport, &cs, client_args,
-                               cli_tcp, NULL, 0, grpc_mdctx_create(), 1);
+                               sfd->client, NULL, 0, grpc_mdctx_create(), 1);
   GPR_ASSERT(f->client);
 }
 
 static void chttp2_init_server_socketpair(grpc_end2end_test_fixture *f,
                                           grpc_channel_args *server_args) {
-  socketpair_fixture_data *sfd = f->fixture_data;
-  grpc_endpoint *svr_tcp;
-  svr_tcp = grpc_tcp_create_dbg(sfd->sv[1], grpc_surface_em(), 1);
-  grpc_create_chttp2_transport(server_setup_transport, f, server_args, svr_tcp,
-                               NULL, 0, grpc_mdctx_create(), 0);
+  grpc_endpoint_pair *sfd = f->fixture_data;
+  grpc_create_chttp2_transport(server_setup_transport, f, server_args,
+                               sfd->server, NULL, 0, grpc_mdctx_create(), 0);
 }
 
 static void chttp2_tear_down_socketpair(grpc_end2end_test_fixture *f) {
diff --git a/test/core/endpoint/secure_endpoint_test.c b/test/core/endpoint/secure_endpoint_test.c
index 4fd5dee..18a33b5 100644
--- a/test/core/endpoint/secure_endpoint_test.c
+++ b/test/core/endpoint/secure_endpoint_test.c
@@ -39,41 +39,25 @@
 #include <unistd.h>
 
 #include "src/core/endpoint/secure_endpoint.h"
-#include "src/core/endpoint/tcp.h"
-#include "src/core/eventmanager/em.h"
-#include "src/core/tsi/fake_transport_security.h"
+#include "src/core/iomgr/endpoint_pair.h"
+#include "src/core/iomgr/iomgr.h"
 #include <grpc/support/alloc.h>
 #include <grpc/support/log.h>
 #include "test/core/util/test_config.h"
-
-grpc_em g_em;
-
-static void create_sockets(int sv[2]) {
-  int flags;
-  GPR_ASSERT(socketpair(AF_UNIX, SOCK_STREAM, 0, sv) == 0);
-  flags = fcntl(sv[0], F_GETFL, 0);
-  GPR_ASSERT(fcntl(sv[0], F_SETFL, flags | O_NONBLOCK) == 0);
-  flags = fcntl(sv[1], F_GETFL, 0);
-  GPR_ASSERT(fcntl(sv[1], F_SETFL, flags | O_NONBLOCK) == 0);
-}
+#include "src/core/tsi/fake_transport_security.h"
 
 static grpc_endpoint_test_fixture secure_endpoint_create_fixture_tcp_socketpair(
     size_t slice_size, gpr_slice *leftover_slices, size_t leftover_nslices) {
-  int sv[2];
   tsi_frame_protector *fake_read_protector = tsi_create_fake_protector(NULL);
   tsi_frame_protector *fake_write_protector = tsi_create_fake_protector(NULL);
   grpc_endpoint_test_fixture f;
-  grpc_endpoint *tcp_read;
-  grpc_endpoint *tcp_write;
+  grpc_endpoint_pair tcp;
 
-  create_sockets(sv);
-  grpc_em_init(&g_em);
-  tcp_read = grpc_tcp_create_dbg(sv[0], &g_em, slice_size);
-  tcp_write = grpc_tcp_create(sv[1], &g_em);
+  tcp = grpc_iomgr_create_endpoint_pair(slice_size);
 
   if (leftover_nslices == 0) {
     f.client_ep =
-        grpc_secure_endpoint_create(fake_read_protector, tcp_read, NULL, 0);
+        grpc_secure_endpoint_create(fake_read_protector, tcp.client, NULL, 0);
   } else {
     int i;
     tsi_result result;
@@ -115,14 +99,14 @@
     } while (still_pending_size > 0);
     encrypted_leftover = gpr_slice_from_copied_buffer(
         (const char *)encrypted_buffer, total_buffer_size - buffer_size);
-    f.client_ep = grpc_secure_endpoint_create(fake_read_protector, tcp_read,
+    f.client_ep = grpc_secure_endpoint_create(fake_read_protector, tcp.client,
                                               &encrypted_leftover, 1);
     gpr_slice_unref(encrypted_leftover);
     gpr_free(encrypted_buffer);
   }
 
   f.server_ep =
-      grpc_secure_endpoint_create(fake_write_protector, tcp_write, NULL, 0);
+      grpc_secure_endpoint_create(fake_write_protector, tcp.server, NULL, 0);
   return f;
 }
 
@@ -141,7 +125,7 @@
   return f;
 }
 
-static void clean_up() { grpc_em_destroy(&g_em); }
+static void clean_up() {}
 
 static grpc_endpoint_test_config configs[] = {
     {"secure_ep/tcp_socketpair",
@@ -213,9 +197,11 @@
 int main(int argc, char **argv) {
   grpc_test_init(argc, argv);
 
+  grpc_iomgr_init();
   grpc_endpoint_tests(configs[0]);
   test_leftover(configs[1], 1);
   test_destroy_ep_early(configs[1], 1);
+  grpc_iomgr_shutdown();
 
   return 0;
 }
diff --git a/test/core/eventmanager/em_pipe_test.c b/test/core/eventmanager/em_pipe_test.c
deleted file mode 100644
index f2414c4..0000000
--- a/test/core/eventmanager/em_pipe_test.c
+++ /dev/null
@@ -1,198 +0,0 @@
-/*
- *
- * Copyright 2014, Google Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- *     * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- *     * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-/* Test grpc_em_fd with pipe. The test creates a pipe with non-blocking mode,
-   sends a stream of bytes through the pipe, and verifies that all bytes are
-   received. */
-#include "src/core/eventmanager/em.h"
-
-#include <errno.h>
-#include <fcntl.h>
-#include <pthread.h>
-#include <string.h>
-#include <stdio.h>
-#include <unistd.h>
-
-#include <grpc/support/log.h>
-#include "test/core/util/test_config.h"
-
-/* Operation for fcntl() to set pipe buffer size. */
-#ifndef F_SETPIPE_SZ
-#define F_SETPIPE_SZ (1024 + 7)
-#endif
-
-#define TOTAL_WRITE 3 /* total number of times that the write buffer is full. \
-                         */
-#define BUF_SIZE 1024
-char read_buf[BUF_SIZE];
-char write_buf[BUF_SIZE];
-
-typedef struct {
-  int fd[2];
-  grpc_em em;
-  grpc_em_fd read_em_fd;
-  grpc_em_fd write_em_fd;
-  int num_write; /* number of times that the write buffer is full*/
-  ssize_t bytes_written_total; /* total number of bytes written to the pipe */
-  ssize_t bytes_read_total;    /* total number of bytes read from the pipe */
-  pthread_mutex_t mu;          /* protect cv and done */
-  pthread_cond_t cv;           /* signaled when read finished */
-  int done;                    /* set to 1 when read finished */
-} async_pipe;
-
-void write_shutdown_cb(void *arg, /*async_pipe*/
-                       enum grpc_em_cb_status status) {
-  async_pipe *ap = arg;
-  grpc_em_fd_destroy(&ap->write_em_fd);
-}
-
-void write_cb(void *arg, /*async_pipe*/ enum grpc_em_cb_status status) {
-  async_pipe *ap = arg;
-  ssize_t bytes_written = 0;
-
-  if (status == GRPC_CALLBACK_CANCELLED) {
-    write_shutdown_cb(arg, GRPC_CALLBACK_SUCCESS);
-    return;
-  }
-
-  do {
-    bytes_written = write(ap->fd[1], write_buf, BUF_SIZE);
-    if (bytes_written > 0) ap->bytes_written_total += bytes_written;
-  } while (bytes_written > 0);
-
-  if (errno == EAGAIN) {
-    if (ap->num_write < TOTAL_WRITE) {
-      ap->num_write++;
-      grpc_em_fd_notify_on_write(&ap->write_em_fd, write_cb, ap,
-                                 gpr_inf_future);
-    } else {
-      /* Note that this could just shut down directly; doing a trip through the
-         shutdown path serves only a demonstration of the API. */
-      grpc_em_fd_shutdown(&ap->write_em_fd);
-      grpc_em_fd_notify_on_write(&ap->write_em_fd, write_cb, ap,
-                                 gpr_inf_future);
-    }
-  } else {
-    GPR_ASSERT(0 && strcat("unknown errno: ", strerror(errno)));
-  }
-}
-
-void read_shutdown_cb(void *arg, /*async_pipe*/ enum grpc_em_cb_status status) {
-  async_pipe *ap = arg;
-  grpc_em_fd_destroy(&ap->read_em_fd);
-  pthread_mutex_lock(&ap->mu);
-  if (ap->done == 0) {
-    ap->done = 1;
-    pthread_cond_signal(&ap->cv);
-  }
-  pthread_mutex_unlock(&ap->mu);
-}
-
-void read_cb(void *arg, /*async_pipe*/ enum grpc_em_cb_status status) {
-  async_pipe *ap = arg;
-  ssize_t bytes_read = 0;
-
-  if (status == GRPC_CALLBACK_CANCELLED) {
-    read_shutdown_cb(arg, GRPC_CALLBACK_SUCCESS);
-    return;
-  }
-
-  do {
-    bytes_read = read(ap->fd[0], read_buf, BUF_SIZE);
-    if (bytes_read > 0) ap->bytes_read_total += bytes_read;
-  } while (bytes_read > 0);
-
-  if (bytes_read == 0) {
-    /* Note that this could just shut down directly; doing a trip through the
-       shutdown path serves only a demonstration of the API. */
-    grpc_em_fd_shutdown(&ap->read_em_fd);
-    grpc_em_fd_notify_on_read(&ap->read_em_fd, read_cb, ap, gpr_inf_future);
-  } else if (bytes_read == -1) {
-    if (errno == EAGAIN) {
-      grpc_em_fd_notify_on_read(&ap->read_em_fd, read_cb, ap, gpr_inf_future);
-    } else {
-      GPR_ASSERT(0 && strcat("unknown errno: ", strerror(errno)));
-    }
-  }
-}
-
-void dummy_cb(void *arg, /*async_pipe*/ enum grpc_em_cb_status status) {}
-
-void async_pipe_init(async_pipe *ap) {
-  int i;
-
-  ap->num_write = 0;
-  ap->bytes_written_total = 0;
-  ap->bytes_read_total = 0;
-
-  pthread_mutex_init(&ap->mu, NULL);
-  pthread_cond_init(&ap->cv, NULL);
-  ap->done = 0;
-
-  GPR_ASSERT(0 == pipe(ap->fd));
-  for (i = 0; i < 2; i++) {
-    int flags = fcntl(ap->fd[i], F_GETFL, 0);
-    GPR_ASSERT(fcntl(ap->fd[i], F_SETFL, flags | O_NONBLOCK) == 0);
-    GPR_ASSERT(fcntl(ap->fd[i], F_SETPIPE_SZ, 4096) == 4096);
-  }
-
-  grpc_em_init(&ap->em);
-  grpc_em_fd_init(&ap->read_em_fd, &ap->em, ap->fd[0]);
-  grpc_em_fd_init(&ap->write_em_fd, &ap->em, ap->fd[1]);
-}
-
-static void async_pipe_start(async_pipe *ap) {
-  grpc_em_fd_notify_on_read(&ap->read_em_fd, read_cb, ap, gpr_inf_future);
-  grpc_em_fd_notify_on_write(&ap->write_em_fd, write_cb, ap, gpr_inf_future);
-}
-
-static void async_pipe_wait_destroy(async_pipe *ap) {
-  pthread_mutex_lock(&ap->mu);
-  while (!ap->done) pthread_cond_wait(&ap->cv, &ap->mu);
-  pthread_mutex_unlock(&ap->mu);
-  pthread_mutex_destroy(&ap->mu);
-  pthread_cond_destroy(&ap->cv);
-
-  grpc_em_destroy(&ap->em);
-}
-
-int main(int argc, char **argv) {
-  async_pipe ap;
-  grpc_test_init(argc, argv);
-  async_pipe_init(&ap);
-  async_pipe_start(&ap);
-  async_pipe_wait_destroy(&ap);
-  GPR_ASSERT(ap.bytes_read_total == ap.bytes_written_total);
-  gpr_log(GPR_INFO, "read total bytes %d", ap.bytes_read_total);
-  return 0;
-}
diff --git a/test/core/httpcli/httpcli_test.c b/test/core/httpcli/httpcli_test.c
index 5c0d87c..c901e59 100644
--- a/test/core/httpcli/httpcli_test.c
+++ b/test/core/httpcli/httpcli_test.c
@@ -35,11 +35,12 @@
 
 #include <string.h>
 
+#include "src/core/iomgr/iomgr.h"
 #include <grpc/support/log.h>
+#include <grpc/support/sync.h>
 #include "test/core/util/test_config.h"
 
 static gpr_event g_done;
-static grpc_em g_em;
 
 static gpr_timespec n_seconds_time(int seconds) {
   return gpr_time_add(gpr_now(), gpr_time_from_micros(seconds * 1000000));
@@ -55,7 +56,7 @@
 static void test_get(int use_ssl) {
   grpc_httpcli_request req;
 
-  gpr_log(GPR_INFO, "running %s with use_ssl=%d.", __FUNCTION__, (int)use_ssl);
+  gpr_log(GPR_INFO, "running %s with use_ssl=%d.", __FUNCTION__, use_ssl);
 
   gpr_event_init(&g_done);
   memset(&req, 0, sizeof(req));
@@ -63,7 +64,7 @@
   req.path = "/";
   req.use_ssl = use_ssl;
 
-  grpc_httpcli_get(&req, n_seconds_time(15), &g_em, on_finish, (void *)42);
+  grpc_httpcli_get(&req, n_seconds_time(15), on_finish, (void *)42);
   GPR_ASSERT(gpr_event_wait(&g_done, n_seconds_time(20)));
 }
 
@@ -79,7 +80,7 @@
   req.path = "/1eamwr21";
   req.use_ssl = use_ssl;
 
-  grpc_httpcli_post(&req, NULL, 0, n_seconds_time(15), &g_em, on_finish,
+  grpc_httpcli_post(&req, NULL, 0, n_seconds_time(15), on_finish,
                     (void *)42);
   GPR_ASSERT(gpr_event_wait(&g_done, n_seconds_time(20)));
 }
@@ -87,7 +88,7 @@
 
 int main(int argc, char **argv) {
   grpc_test_init(argc, argv);
-  grpc_em_init(&g_em);
+  grpc_iomgr_init();
 
   test_get(0);
   test_get(1);
@@ -95,7 +96,7 @@
   /* test_post(0); */
   /* test_post(1); */
 
-  grpc_em_destroy(&g_em);
+  grpc_iomgr_shutdown();
 
   return 0;
 }
diff --git a/test/core/iomgr/alarm_test.c b/test/core/iomgr/alarm_test.c
new file mode 100644
index 0000000..0dcd214
--- /dev/null
+++ b/test/core/iomgr/alarm_test.c
@@ -0,0 +1,219 @@
+/*
+ *
+ * Copyright 2014, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/* Test gRPC event manager with a simple TCP upload server and client. */
+#include "src/core/iomgr/alarm.h"
+
+#include <ctype.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <netinet/in.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/socket.h>
+#include <sys/time.h>
+#include <unistd.h>
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/sync.h>
+#include <grpc/support/time.h>
+#include "test/core/util/test_config.h"
+
+/* Dummy gRPC callback */
+void no_op_cb(void *arg, grpc_iomgr_cb_status status) {}
+
+typedef struct {
+  gpr_cv cv;
+  gpr_mu mu;
+  int counter;
+  int done_success_ctr;
+  int done_cancel_ctr;
+  int done;
+  gpr_event fcb_arg;
+  grpc_iomgr_cb_status status;
+} alarm_arg;
+
+static void followup_cb(void *arg, grpc_iomgr_cb_status status) {
+  gpr_event_set((gpr_event *)arg, arg);
+}
+
+/* Called when an alarm expires. */
+static void alarm_cb(void *arg /* alarm_arg */, grpc_iomgr_cb_status status) {
+  alarm_arg *a = arg;
+  gpr_mu_lock(&a->mu);
+  if (status == GRPC_CALLBACK_SUCCESS) {
+    a->counter++;
+    a->done_success_ctr++;
+  } else if (status == GRPC_CALLBACK_CANCELLED) {
+    a->done_cancel_ctr++;
+  } else {
+    GPR_ASSERT(0);
+  }
+  a->done = 1;
+  a->status = status;
+  gpr_cv_signal(&a->cv);
+  gpr_mu_unlock(&a->mu);
+  grpc_iomgr_add_callback(followup_cb, &a->fcb_arg);
+}
+
+/* Test grpc_alarm add and cancel. */
+static void test_grpc_alarm() {
+  grpc_alarm alarm;
+  grpc_alarm alarm_to_cancel;
+  gpr_timespec tv0 = {0, 1};
+  /* Timeout on the alarm cond. var, so make big enough to absorb time
+     deviations. Otherwise, operations after wait will not be properly ordered
+   */
+  gpr_timespec tv1 = gpr_time_from_micros(200000);
+  gpr_timespec tv2 = {0, 1};
+  gpr_timespec alarm_deadline;
+  gpr_timespec followup_deadline;
+
+  alarm_arg arg;
+  alarm_arg arg2;
+  void *fdone;
+
+  grpc_iomgr_init();
+
+  arg.counter = 0;
+  arg.status = GRPC_CALLBACK_DO_NOT_USE;
+  arg.done_success_ctr = 0;
+  arg.done_cancel_ctr = 0;
+  arg.done = 0;
+  gpr_mu_init(&arg.mu);
+  gpr_cv_init(&arg.cv);
+  gpr_event_init(&arg.fcb_arg);
+
+  grpc_alarm_init(&alarm, alarm_cb, &arg);
+  grpc_alarm_add(&alarm, gpr_time_add(tv0, gpr_now()));
+
+  alarm_deadline = gpr_time_add(gpr_now(), tv1);
+  gpr_mu_lock(&arg.mu);
+  while (arg.done == 0) {
+    gpr_cv_wait(&arg.cv, &arg.mu, alarm_deadline);
+  }
+  gpr_mu_unlock(&arg.mu);
+
+  followup_deadline = gpr_time_add(gpr_now(), tv1);
+  fdone = gpr_event_wait(&arg.fcb_arg, followup_deadline);
+
+  if (arg.counter != 1) {
+    gpr_log(GPR_ERROR, "Alarm callback not called");
+    GPR_ASSERT(0);
+  } else if (arg.done_success_ctr != 1) {
+    gpr_log(GPR_ERROR, "Alarm done callback not called with success");
+    GPR_ASSERT(0);
+  } else if (arg.done_cancel_ctr != 0) {
+    gpr_log(GPR_ERROR, "Alarm done callback called with cancel");
+    GPR_ASSERT(0);
+  } else if (arg.status == GRPC_CALLBACK_DO_NOT_USE) {
+    gpr_log(GPR_ERROR, "Alarm callback without status");
+    GPR_ASSERT(0);
+  } else {
+    gpr_log(GPR_INFO, "Alarm callback called successfully");
+  }
+
+  if (fdone != (void *)&arg.fcb_arg) {
+    gpr_log(GPR_ERROR, "Followup callback #1 not invoked properly %p %p", fdone,
+            &arg.fcb_arg);
+    GPR_ASSERT(0);
+  }
+  gpr_cv_destroy(&arg.cv);
+  gpr_mu_destroy(&arg.mu);
+
+  arg2.counter = 0;
+  arg2.status = GRPC_CALLBACK_DO_NOT_USE;
+  arg2.done_success_ctr = 0;
+  arg2.done_cancel_ctr = 0;
+  arg2.done = 0;
+  gpr_mu_init(&arg2.mu);
+  gpr_cv_init(&arg2.cv);
+  gpr_event_init(&arg2.fcb_arg);
+
+  grpc_alarm_init(&alarm_to_cancel, alarm_cb, &arg2);
+  grpc_alarm_add(&alarm_to_cancel, gpr_time_add(tv2, gpr_now()));
+  grpc_alarm_cancel(&alarm_to_cancel);
+
+  alarm_deadline = gpr_time_add(gpr_now(), tv1);
+  gpr_mu_lock(&arg2.mu);
+  while (arg2.done == 0) {
+    gpr_cv_wait(&arg2.cv, &arg2.mu, alarm_deadline);
+  }
+  gpr_mu_unlock(&arg2.mu);
+
+  followup_deadline = gpr_time_add(gpr_now(), tv1);
+  fdone = gpr_event_wait(&arg2.fcb_arg, followup_deadline);
+
+  if (arg2.counter != arg2.done_success_ctr) {
+    gpr_log(GPR_ERROR, "Alarm callback called but didn't lead to done success");
+    GPR_ASSERT(0);
+  } else if (arg2.done_success_ctr && arg2.done_cancel_ctr) {
+    gpr_log(GPR_ERROR, "Alarm done callback called with success and cancel");
+    GPR_ASSERT(0);
+  } else if (arg2.done_cancel_ctr + arg2.done_success_ctr != 1) {
+    gpr_log(GPR_ERROR, "Alarm done callback called incorrect number of times");
+    GPR_ASSERT(0);
+  } else if (arg2.status == GRPC_CALLBACK_DO_NOT_USE) {
+    gpr_log(GPR_ERROR, "Alarm callback without status");
+    GPR_ASSERT(0);
+  } else if (arg2.done_success_ctr) {
+    gpr_log(GPR_INFO, "Alarm callback executed before cancel");
+    gpr_log(GPR_INFO, "Current value of triggered is %d\n",
+            (int)alarm_to_cancel.triggered);
+  } else if (arg2.done_cancel_ctr) {
+    gpr_log(GPR_INFO, "Alarm callback canceled");
+    gpr_log(GPR_INFO, "Current value of triggered is %d\n",
+            (int)alarm_to_cancel.triggered);
+  } else {
+    gpr_log(GPR_ERROR, "Alarm cancel test should not be here");
+    GPR_ASSERT(0);
+  }
+
+  if (fdone != (void *)&arg2.fcb_arg) {
+    gpr_log(GPR_ERROR, "Followup callback #2 not invoked properly %p %p", fdone,
+            &arg2.fcb_arg);
+    GPR_ASSERT(0);
+  }
+  gpr_cv_destroy(&arg2.cv);
+  gpr_mu_destroy(&arg2.mu);
+
+  grpc_iomgr_shutdown();
+}
+
+int main(int argc, char **argv) {
+  grpc_test_init(argc, argv);
+  test_grpc_alarm();
+  return 0;
+}
diff --git a/test/core/eventmanager/em_test.c b/test/core/iomgr/fd_posix_test.c
similarity index 63%
rename from test/core/eventmanager/em_test.c
rename to test/core/iomgr/fd_posix_test.c
index 274edc3..4d4461e 100644
--- a/test/core/eventmanager/em_test.c
+++ b/test/core/iomgr/fd_posix_test.c
@@ -32,7 +32,7 @@
  */
 
 /* Test gRPC event manager with a simple TCP upload server and client. */
-#include "src/core/eventmanager/em.h"
+#include "src/core/iomgr/iomgr_libevent.h"
 
 #include <ctype.h>
 #include <errno.h>
@@ -92,8 +92,7 @@
 
 /* An upload server. */
 typedef struct {
-  grpc_em em;               /* event manger used by the sever */
-  grpc_em_fd em_fd;         /* listening fd */
+  grpc_fd *em_fd;           /* listening fd */
   ssize_t read_bytes_total; /* total number of received bytes */
   gpr_mu mu;                /* protect done and done_cv */
   gpr_cv done_cv;           /* signaled when a server finishes serving */
@@ -101,7 +100,6 @@
 } server;
 
 static void server_init(server *sv) {
-  GPR_ASSERT(grpc_em_init(&sv->em) == GRPC_EM_OK);
   sv->read_bytes_total = 0;
   gpr_mu_init(&sv->mu);
   gpr_cv_init(&sv->done_cv);
@@ -112,7 +110,7 @@
    Created when a new upload request arrives in the server. */
 typedef struct {
   server *sv;              /* not owned by a single session */
-  grpc_em_fd em_fd;        /* fd to read upload bytes */
+  grpc_fd *em_fd;          /* fd to read upload bytes */
   char read_buf[BUF_SIZE]; /* buffer to store upload bytes */
 } session;
 
@@ -122,17 +120,17 @@
                                 enum grpc_em_cb_status status) {
   session *se = arg;
   server *sv = se->sv;
-  grpc_em_fd_destroy(&se->em_fd);
+  grpc_fd_destroy(se->em_fd);
   gpr_free(se);
   /* Start to shutdown listen fd. */
-  grpc_em_fd_shutdown(&sv->em_fd);
+  grpc_fd_shutdown(sv->em_fd);
 }
 
 /* Called when data become readable in a session. */
 static void session_read_cb(void *arg, /*session*/
                             enum grpc_em_cb_status status) {
   session *se = arg;
-  int fd = grpc_em_fd_get(&se->em_fd);
+  int fd = grpc_fd_get(se->em_fd);
 
   ssize_t read_once = 0;
   ssize_t read_total = 0;
@@ -153,8 +151,8 @@
      It is possible to read nothing due to spurious edge event or data has
      been drained, In such a case, read() returns -1 and set errno to EAGAIN. */
   if (read_once == 0) {
-    grpc_em_fd_shutdown(&se->em_fd);
-    grpc_em_fd_notify_on_read(&se->em_fd, session_read_cb, se, gpr_inf_future);
+    grpc_fd_shutdown(se->em_fd);
+    grpc_fd_notify_on_read(se->em_fd, session_read_cb, se, gpr_inf_future);
   } else if (read_once == -1) {
     if (errno == EAGAIN) {
       /* An edge triggered event is cached in the kernel until next poll.
@@ -165,8 +163,8 @@
          TODO(chenw): in multi-threaded version, callback and polling can be
          run in different threads. polling may catch a persist read edge event
          before notify_on_read is called.  */
-      GPR_ASSERT(grpc_em_fd_notify_on_read(&se->em_fd, session_read_cb, se,
-                                           gpr_inf_future) == GRPC_EM_OK);
+      GPR_ASSERT(grpc_fd_notify_on_read(se->em_fd, session_read_cb, se,
+                                        gpr_inf_future));
     } else {
       gpr_log(GPR_ERROR, "Unhandled read error %s", strerror(errno));
       GPR_ASSERT(0);
@@ -180,7 +178,7 @@
                                enum grpc_em_cb_status status) {
   server *sv = arg;
 
-  grpc_em_fd_destroy(&sv->em_fd);
+  grpc_fd_destroy(sv->em_fd);
 
   gpr_mu_lock(&sv->mu);
   sv->done = 1;
@@ -197,26 +195,26 @@
   session *se;
   struct sockaddr_storage ss;
   socklen_t slen = sizeof(ss);
-  struct grpc_em_fd *listen_em_fd = &sv->em_fd;
+  struct grpc_fd *listen_em_fd = sv->em_fd;
 
   if (status == GRPC_CALLBACK_CANCELLED) {
     listen_shutdown_cb(arg, GRPC_CALLBACK_SUCCESS);
     return;
   }
 
-  fd = accept(grpc_em_fd_get(listen_em_fd), (struct sockaddr *)&ss, &slen);
+  fd = accept(grpc_fd_get(listen_em_fd), (struct sockaddr *)&ss, &slen);
   GPR_ASSERT(fd >= 0);
   GPR_ASSERT(fd < FD_SETSIZE);
   flags = fcntl(fd, F_GETFL, 0);
   fcntl(fd, F_SETFL, flags | O_NONBLOCK);
   se = gpr_malloc(sizeof(*se));
   se->sv = sv;
-  GPR_ASSERT(grpc_em_fd_init(&se->em_fd, &sv->em, fd) == GRPC_EM_OK);
-  GPR_ASSERT(grpc_em_fd_notify_on_read(&se->em_fd, session_read_cb, se,
-                                       gpr_inf_future) == GRPC_EM_OK);
+  se->em_fd = grpc_fd_create(fd);
+  GPR_ASSERT(
+      grpc_fd_notify_on_read(se->em_fd, session_read_cb, se, gpr_inf_future));
 
-  GPR_ASSERT(grpc_em_fd_notify_on_read(listen_em_fd, listen_cb, sv,
-                                       gpr_inf_future) == GRPC_EM_OK);
+  GPR_ASSERT(
+      grpc_fd_notify_on_read(listen_em_fd, listen_cb, sv, gpr_inf_future));
 }
 
 /* Max number of connections pending to be accepted by listen(). */
@@ -235,14 +233,13 @@
   create_test_socket(port, &fd, &sin);
   addr_len = sizeof(sin);
   GPR_ASSERT(bind(fd, (struct sockaddr *)&sin, addr_len) == 0);
-  GPR_ASSERT(getsockname(fd, (struct sockaddr *)&sin, &addr_len) == GRPC_EM_OK);
+  GPR_ASSERT(getsockname(fd, (struct sockaddr *)&sin, &addr_len) == 0);
   port = ntohs(sin.sin_port);
   GPR_ASSERT(listen(fd, MAX_NUM_FD) == 0);
 
-  GPR_ASSERT(grpc_em_fd_init(&sv->em_fd, &sv->em, fd) == GRPC_EM_OK);
+  sv->em_fd = grpc_fd_create(fd);
   /* Register to be interested in reading from listen_fd. */
-  GPR_ASSERT(grpc_em_fd_notify_on_read(&sv->em_fd, listen_cb, sv,
-                                       gpr_inf_future) == GRPC_EM_OK);
+  GPR_ASSERT(grpc_fd_notify_on_read(sv->em_fd, listen_cb, sv, gpr_inf_future));
 
   return port;
 }
@@ -255,8 +252,6 @@
 
   gpr_mu_destroy(&sv->mu);
   gpr_cv_destroy(&sv->done_cv);
-
-  GPR_ASSERT(grpc_em_destroy(&sv->em) == GRPC_EM_OK);
 }
 
 /* ===An upload client to test notify_on_write=== */
@@ -268,8 +263,7 @@
 
 /* An upload client. */
 typedef struct {
-  grpc_em em;
-  grpc_em_fd em_fd;
+  grpc_fd *em_fd;
   char write_buf[CLIENT_WRITE_BUF_SIZE];
   ssize_t write_bytes_total;
   /* Number of times that the client fills up the write buffer and calls
@@ -282,7 +276,6 @@
 } client;
 
 static void client_init(client *cl) {
-  GPR_ASSERT(grpc_em_init(&cl->em) == GRPC_EM_OK);
   memset(cl->write_buf, 0, sizeof(cl->write_buf));
   cl->write_bytes_total = 0;
   cl->client_write_cnt = 0;
@@ -295,7 +288,7 @@
 static void client_session_shutdown_cb(void *arg /*client*/,
                                        enum grpc_em_cb_status status) {
   client *cl = arg;
-  grpc_em_fd_destroy(&cl->em_fd);
+  grpc_fd_destroy(cl->em_fd);
   gpr_mu_lock(&cl->mu);
   cl->done = 1;
   gpr_cv_signal(&cl->done_cv);
@@ -306,7 +299,7 @@
 static void client_session_write(void *arg, /*client*/
                                  enum grpc_em_cb_status status) {
   client *cl = arg;
-  int fd = grpc_em_fd_get(&cl->em_fd);
+  int fd = grpc_fd_get(cl->em_fd);
   ssize_t write_once = 0;
 
   if (status == GRPC_CALLBACK_CANCELLED) {
@@ -322,14 +315,14 @@
   if (errno == EAGAIN) {
     gpr_mu_lock(&cl->mu);
     if (cl->client_write_cnt < CLIENT_TOTAL_WRITE_CNT) {
-      GPR_ASSERT(grpc_em_fd_notify_on_write(&cl->em_fd, client_session_write,
-                                            cl, gpr_inf_future) == GRPC_EM_OK);
+      GPR_ASSERT(grpc_fd_notify_on_write(cl->em_fd, client_session_write, cl,
+                                         gpr_inf_future));
       cl->client_write_cnt++;
     } else {
       close(fd);
-      grpc_em_fd_shutdown(&cl->em_fd);
-      grpc_em_fd_notify_on_write(&cl->em_fd, client_session_write, cl,
-                                 gpr_inf_future);
+      grpc_fd_shutdown(cl->em_fd);
+      grpc_fd_notify_on_write(cl->em_fd, client_session_write, cl,
+                              gpr_inf_future);
     }
     gpr_mu_unlock(&cl->mu);
   } else {
@@ -349,7 +342,7 @@
     GPR_ASSERT(0);
   }
 
-  GPR_ASSERT(grpc_em_fd_init(&cl->em_fd, &cl->em, fd) == GRPC_EM_OK);
+  cl->em_fd = grpc_fd_create(fd);
 
   client_session_write(cl, GRPC_CALLBACK_SUCCESS);
 }
@@ -362,14 +355,12 @@
 
   gpr_mu_destroy(&cl->mu);
   gpr_cv_destroy(&cl->done_cv);
-
-  GPR_ASSERT(grpc_em_destroy(&cl->em) == GRPC_EM_OK);
 }
 
-/* Test grpc_em_fd. Start an upload server and client, upload a stream of
+/* Test grpc_fd. Start an upload server and client, upload a stream of
    bytes from the client to the server, and verify that the total number of
    sent bytes is equal to the total number of received bytes. */
-static void test_grpc_em_fd() {
+static void test_grpc_fd() {
   server sv;
   client cl;
   int port;
@@ -425,9 +416,8 @@
    Note that we have two different but almost identical callbacks above -- the
    point is to have two different function pointers and two different data
    pointers and make sure that changing both really works. */
-static void test_grpc_em_fd_change() {
-  grpc_em em;
-  grpc_em_fd em_fd;
+static void test_grpc_fd_change() {
+  grpc_fd *em_fd;
   fd_change_data a, b;
   int flags;
   int sv[2];
@@ -443,11 +433,10 @@
   flags = fcntl(sv[1], F_GETFL, 0);
   GPR_ASSERT(fcntl(sv[1], F_SETFL, flags | O_NONBLOCK) == 0);
 
-  grpc_em_init(&em);
-  grpc_em_fd_init(&em_fd, &em, sv[0]);
+  em_fd = grpc_fd_create(sv[0]);
 
   /* Register the first callback, then make its FD readable */
-  grpc_em_fd_notify_on_read(&em_fd, first_read_callback, &a, gpr_inf_future);
+  grpc_fd_notify_on_read(em_fd, first_read_callback, &a, gpr_inf_future);
   data = 0;
   result = write(sv[1], &data, 1);
   GPR_ASSERT(result == 1);
@@ -466,7 +455,7 @@
 
   /* Now register a second callback with distinct change data, and do the same
      thing again. */
-  grpc_em_fd_notify_on_read(&em_fd, second_read_callback, &b, gpr_inf_future);
+  grpc_fd_notify_on_read(em_fd, second_read_callback, &b, gpr_inf_future);
   data = 0;
   result = write(sv[1], &data, 1);
   GPR_ASSERT(result == 1);
@@ -479,8 +468,7 @@
   GPR_ASSERT(b.cb_that_ran == second_read_callback);
   gpr_mu_unlock(&b.mu);
 
-  grpc_em_fd_destroy(&em_fd);
-  grpc_em_destroy(&em);
+  grpc_fd_destroy(em_fd);
   destroy_change_data(&a);
   destroy_change_data(&b);
   close(sv[0]);
@@ -495,9 +483,8 @@
   }
 }
 
-void test_grpc_em_fd_notify_timeout() {
-  grpc_em em;
-  grpc_em_fd em_fd;
+void test_grpc_fd_notify_timeout() {
+  grpc_fd *em_fd;
   gpr_event ev;
   int flags;
   int sv[2];
@@ -512,206 +499,26 @@
   flags = fcntl(sv[1], F_GETFL, 0);
   GPR_ASSERT(fcntl(sv[1], F_SETFL, flags | O_NONBLOCK) == 0);
 
-  grpc_em_init(&em);
-  grpc_em_fd_init(&em_fd, &em, sv[0]);
+  em_fd = grpc_fd_create(sv[0]);
 
   timeout = gpr_time_from_micros(1000000);
   deadline = gpr_time_add(gpr_now(), timeout);
 
-  grpc_em_fd_notify_on_read(&em_fd, timeout_callback, &ev, deadline);
+  grpc_fd_notify_on_read(em_fd, timeout_callback, &ev, deadline);
 
   GPR_ASSERT(gpr_event_wait(&ev, gpr_time_add(deadline, timeout)));
 
   GPR_ASSERT(gpr_event_get(&ev) == (void *)1);
-  grpc_em_fd_destroy(&em_fd);
-  grpc_em_destroy(&em);
+  grpc_fd_destroy(em_fd);
   close(sv[1]);
 }
 
-typedef struct {
-  grpc_em *em;
-  gpr_cv cv;
-  gpr_mu mu;
-  int counter;
-  int done_success_ctr;
-  int done_cancel_ctr;
-  int done;
-  gpr_event fcb_arg;
-  grpc_em_cb_status status;
-} alarm_arg;
-
-static void followup_cb(void *arg, grpc_em_cb_status status) {
-  gpr_event_set((gpr_event *)arg, arg);
-}
-
-/* Called when an alarm expires. */
-static void alarm_cb(void *arg /* alarm_arg */, grpc_em_cb_status status) {
-  alarm_arg *a = arg;
-  gpr_mu_lock(&a->mu);
-  if (status == GRPC_CALLBACK_SUCCESS) {
-    a->counter++;
-    a->done_success_ctr++;
-  } else if (status == GRPC_CALLBACK_CANCELLED) {
-    a->done_cancel_ctr++;
-  } else {
-    GPR_ASSERT(0);
-  }
-  a->done = 1;
-  a->status = status;
-  gpr_cv_signal(&a->cv);
-  gpr_mu_unlock(&a->mu);
-  grpc_em_add_callback(a->em, followup_cb, &a->fcb_arg);
-}
-
-/* Test grpc_em_alarm add and cancel. */
-static void test_grpc_em_alarm() {
-  struct grpc_em em;
-  struct grpc_em_alarm alarm;
-  struct grpc_em_alarm alarm_to_cancel;
-  gpr_timespec tv0 = {0, 1};
-  /* Timeout on the alarm cond. var, so make big enough to absorb time
-     deviations. Otherwise, operations after wait will not be properly ordered
-   */
-  gpr_timespec tv1 = gpr_time_from_micros(200000);
-  gpr_timespec tv2 = {0, 1};
-  gpr_timespec alarm_deadline;
-  gpr_timespec followup_deadline;
-
-  alarm_arg arg;
-  alarm_arg arg2;
-  void *fdone;
-
-  GPR_ASSERT(grpc_em_init(&em) == GRPC_EM_OK);
-
-  arg.em = &em;
-  arg.counter = 0;
-  arg.status = GRPC_CALLBACK_DO_NOT_USE;
-  arg.done_success_ctr = 0;
-  arg.done_cancel_ctr = 0;
-  arg.done = 0;
-  gpr_mu_init(&arg.mu);
-  gpr_cv_init(&arg.cv);
-  gpr_event_init(&arg.fcb_arg);
-
-  GPR_ASSERT(grpc_em_alarm_init(&alarm, &em, alarm_cb, &arg) == GRPC_EM_OK);
-  GPR_ASSERT(grpc_em_alarm_add(&alarm, gpr_time_add(tv0, gpr_now())) ==
-             GRPC_EM_OK);
-
-  alarm_deadline = gpr_time_add(gpr_now(), tv1);
-  gpr_mu_lock(&arg.mu);
-  while (arg.done == 0) {
-    gpr_cv_wait(&arg.cv, &arg.mu, alarm_deadline);
-  }
-  gpr_mu_unlock(&arg.mu);
-
-  followup_deadline = gpr_time_add(gpr_now(), tv1);
-  fdone = gpr_event_wait(&arg.fcb_arg, followup_deadline);
-
-  if (arg.counter != 1) {
-    gpr_log(GPR_ERROR, "Alarm callback not called");
-    GPR_ASSERT(0);
-  } else if (arg.done_success_ctr != 1) {
-    gpr_log(GPR_ERROR, "Alarm done callback not called with success");
-    GPR_ASSERT(0);
-  } else if (arg.done_cancel_ctr != 0) {
-    gpr_log(GPR_ERROR, "Alarm done callback called with cancel");
-    GPR_ASSERT(0);
-  } else if (arg.status == GRPC_CALLBACK_DO_NOT_USE) {
-    gpr_log(GPR_ERROR, "Alarm callback without status");
-    GPR_ASSERT(0);
-  } else {
-    gpr_log(GPR_INFO, "Alarm callback called successfully");
-  }
-
-  if (fdone != (void *)&arg.fcb_arg) {
-    gpr_log(GPR_ERROR, "Followup callback #1 not invoked properly %p %p", fdone,
-            &arg.fcb_arg);
-    GPR_ASSERT(0);
-  }
-  gpr_cv_destroy(&arg.cv);
-  gpr_mu_destroy(&arg.mu);
-
-  arg2.em = &em;
-  arg2.counter = 0;
-  arg2.status = GRPC_CALLBACK_DO_NOT_USE;
-  arg2.done_success_ctr = 0;
-  arg2.done_cancel_ctr = 0;
-  arg2.done = 0;
-  gpr_mu_init(&arg2.mu);
-  gpr_cv_init(&arg2.cv);
-  gpr_event_init(&arg2.fcb_arg);
-
-  GPR_ASSERT(grpc_em_alarm_init(&alarm_to_cancel, &em, alarm_cb, &arg2) ==
-             GRPC_EM_OK);
-  GPR_ASSERT(grpc_em_alarm_add(&alarm_to_cancel,
-                               gpr_time_add(tv2, gpr_now())) == GRPC_EM_OK);
-  switch (grpc_em_alarm_cancel(&alarm_to_cancel)) {
-    case GRPC_EM_OK:
-      gpr_log(GPR_INFO, "Alarm cancel succeeded");
-      break;
-    case GRPC_EM_ERROR:
-      gpr_log(GPR_ERROR, "Alarm cancel failed");
-      GPR_ASSERT(0);
-      break;
-    case GRPC_EM_INVALID_ARGUMENTS:
-      gpr_log(GPR_ERROR, "Alarm cancel failed with bad response code");
-      gpr_log(GPR_ERROR, "Current value of triggered is %d\n",
-              (int)alarm_to_cancel.triggered);
-      GPR_ASSERT(0);
-      break;
-  }
-
-  alarm_deadline = gpr_time_add(gpr_now(), tv1);
-  gpr_mu_lock(&arg2.mu);
-  while (arg2.done == 0) {
-    gpr_cv_wait(&arg2.cv, &arg2.mu, alarm_deadline);
-  }
-  gpr_mu_unlock(&arg2.mu);
-
-  followup_deadline = gpr_time_add(gpr_now(), tv1);
-  fdone = gpr_event_wait(&arg2.fcb_arg, followup_deadline);
-
-  if (arg2.counter != arg2.done_success_ctr) {
-    gpr_log(GPR_ERROR, "Alarm callback called but didn't lead to done success");
-    GPR_ASSERT(0);
-  } else if (arg2.done_success_ctr && arg2.done_cancel_ctr) {
-    gpr_log(GPR_ERROR, "Alarm done callback called with success and cancel");
-    GPR_ASSERT(0);
-  } else if (arg2.done_cancel_ctr + arg2.done_success_ctr != 1) {
-    gpr_log(GPR_ERROR, "Alarm done callback called incorrect number of times");
-    GPR_ASSERT(0);
-  } else if (arg2.status == GRPC_CALLBACK_DO_NOT_USE) {
-    gpr_log(GPR_ERROR, "Alarm callback without status");
-    GPR_ASSERT(0);
-  } else if (arg2.done_success_ctr) {
-    gpr_log(GPR_INFO, "Alarm callback executed before cancel");
-    gpr_log(GPR_INFO, "Current value of triggered is %d\n",
-            (int)alarm_to_cancel.triggered);
-  } else if (arg2.done_cancel_ctr) {
-    gpr_log(GPR_INFO, "Alarm callback canceled");
-    gpr_log(GPR_INFO, "Current value of triggered is %d\n",
-            (int)alarm_to_cancel.triggered);
-  } else {
-    gpr_log(GPR_ERROR, "Alarm cancel test should not be here");
-    GPR_ASSERT(0);
-  }
-
-  if (fdone != (void *)&arg2.fcb_arg) {
-    gpr_log(GPR_ERROR, "Followup callback #2 not invoked properly %p %p", fdone,
-            &arg2.fcb_arg);
-    GPR_ASSERT(0);
-  }
-  gpr_cv_destroy(&arg2.cv);
-  gpr_mu_destroy(&arg2.mu);
-
-  GPR_ASSERT(grpc_em_destroy(&em) == GRPC_EM_OK);
-}
-
 int main(int argc, char **argv) {
   grpc_test_init(argc, argv);
-  test_grpc_em_alarm();
-  test_grpc_em_fd();
-  test_grpc_em_fd_change();
-  test_grpc_em_fd_notify_timeout();
+  grpc_iomgr_init();
+  test_grpc_fd();
+  test_grpc_fd_change();
+  test_grpc_fd_notify_timeout();
+  grpc_iomgr_shutdown();
   return 0;
 }
diff --git a/test/core/endpoint/resolve_address_test.c b/test/core/iomgr/resolve_address_test.c
similarity index 98%
rename from test/core/endpoint/resolve_address_test.c
rename to test/core/iomgr/resolve_address_test.c
index 1e208d3..99e3119 100644
--- a/test/core/endpoint/resolve_address_test.c
+++ b/test/core/iomgr/resolve_address_test.c
@@ -31,7 +31,7 @@
  *
  */
 
-#include "src/core/endpoint/resolve_address.h"
+#include "src/core/iomgr/resolve_address.h"
 #include <grpc/support/log.h>
 #include <grpc/support/sync.h>
 #include <grpc/support/time.h>
diff --git a/test/core/endpoint/socket_utils_test.c b/test/core/iomgr/sockaddr_utils_test.c
similarity index 98%
rename from test/core/endpoint/socket_utils_test.c
rename to test/core/iomgr/sockaddr_utils_test.c
index ef6ac32..8cd9fb6 100644
--- a/test/core/endpoint/socket_utils_test.c
+++ b/test/core/iomgr/sockaddr_utils_test.c
@@ -31,7 +31,7 @@
  *
  */
 
-#include "src/core/endpoint/socket_utils.h"
+#include "src/core/iomgr/sockaddr_utils.h"
 
 #include <errno.h>
 #include <netinet/in.h>
diff --git a/test/core/iomgr/tcp_client_posix_test.c b/test/core/iomgr/tcp_client_posix_test.c
new file mode 100644
index 0000000..cb1cd0b
--- /dev/null
+++ b/test/core/iomgr/tcp_client_posix_test.c
@@ -0,0 +1,176 @@
+/*
+ *
+ * Copyright 2014, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/iomgr/tcp_client.h"
+
+#include <errno.h>
+#include <netinet/in.h>
+#include <string.h>
+#include <sys/socket.h>
+#include <unistd.h>
+
+#include "src/core/iomgr/iomgr.h"
+#include <grpc/support/log.h>
+#include <grpc/support/time.h>
+
+static gpr_timespec test_deadline() {
+  return gpr_time_add(gpr_now(), gpr_time_from_micros(1000000));
+}
+
+static void must_succeed(void *arg, grpc_endpoint *tcp) {
+  GPR_ASSERT(tcp);
+  grpc_endpoint_shutdown(tcp);
+  grpc_endpoint_destroy(tcp);
+  gpr_event_set(arg, (void *)1);
+}
+
+static void must_fail(void *arg, grpc_endpoint *tcp) {
+  GPR_ASSERT(!tcp);
+  gpr_event_set(arg, (void *)1);
+}
+
+void test_succeeds() {
+  struct sockaddr_in addr;
+  socklen_t addr_len = sizeof(addr);
+  int svr_fd;
+  int r;
+  gpr_event ev;
+
+  gpr_event_init(&ev);
+
+  memset(&addr, 0, sizeof(addr));
+  addr.sin_family = AF_INET;
+
+  /* create a dummy server */
+  svr_fd = socket(AF_INET, SOCK_STREAM, 0);
+  GPR_ASSERT(svr_fd >= 0);
+  GPR_ASSERT(0 == bind(svr_fd, (struct sockaddr *)&addr, addr_len));
+  GPR_ASSERT(0 == listen(svr_fd, 1));
+
+  /* connect to it */
+  GPR_ASSERT(getsockname(svr_fd, (struct sockaddr *)&addr, &addr_len) == 0);
+  grpc_tcp_client_connect(must_succeed, &ev, (struct sockaddr *)&addr, addr_len,
+                          gpr_inf_future);
+
+  /* await the connection */
+  do {
+    addr_len = sizeof(addr);
+    r = accept(svr_fd, (struct sockaddr *)&addr, &addr_len);
+  } while (r == -1 && errno == EINTR);
+  GPR_ASSERT(r >= 0);
+  close(r);
+
+  /* wait for the connection callback to finish */
+  GPR_ASSERT(gpr_event_wait(&ev, test_deadline()));
+}
+
+void test_fails() {
+  struct sockaddr_in addr;
+  socklen_t addr_len = sizeof(addr);
+  gpr_event ev;
+
+  gpr_event_init(&ev);
+
+  memset(&addr, 0, sizeof(addr));
+  addr.sin_family = AF_INET;
+
+  /* connect to a broken address */
+  grpc_tcp_client_connect(must_fail, &ev, (struct sockaddr *)&addr, addr_len,
+                          gpr_inf_future);
+
+  /* wait for the connection callback to finish */
+  GPR_ASSERT(gpr_event_wait(&ev, test_deadline()));
+}
+
+void test_times_out() {
+  struct sockaddr_in addr;
+  socklen_t addr_len = sizeof(addr);
+  int svr_fd;
+#define NUM_CLIENT_CONNECTS 10
+  int client_fd[NUM_CLIENT_CONNECTS];
+  int i;
+  int r;
+  gpr_event ev;
+  gpr_timespec connect_deadline;
+
+  gpr_event_init(&ev);
+
+  memset(&addr, 0, sizeof(addr));
+  addr.sin_family = AF_INET;
+
+  /* create a dummy server */
+  svr_fd = socket(AF_INET, SOCK_STREAM, 0);
+  GPR_ASSERT(svr_fd >= 0);
+  GPR_ASSERT(0 == bind(svr_fd, (struct sockaddr *)&addr, addr_len));
+  GPR_ASSERT(0 == listen(svr_fd, 1));
+  /* Get its address */
+  GPR_ASSERT(getsockname(svr_fd, (struct sockaddr *)&addr, &addr_len) == 0);
+
+  /* tie up the listen buffer, which is somewhat arbitrarily sized. */
+  for (i = 0; i < NUM_CLIENT_CONNECTS; ++i) {
+    client_fd[i] = socket(AF_INET, SOCK_STREAM | SOCK_NONBLOCK, 0);
+    do {
+      r = connect(client_fd[i], (struct sockaddr *)&addr, addr_len);
+    } while (r == -1 && errno == EINTR);
+    GPR_ASSERT(r < 0);
+    GPR_ASSERT(errno == EWOULDBLOCK || errno == EINPROGRESS);
+  }
+
+  /* connect to dummy server address */
+
+  connect_deadline = gpr_time_add(gpr_now(), gpr_time_from_micros(1000000));
+
+  grpc_tcp_client_connect(must_fail, &ev, (struct sockaddr *)&addr, addr_len,
+                          connect_deadline);
+  /* Make sure the event doesn't trigger early */
+  GPR_ASSERT(!gpr_event_wait(
+                 &ev, gpr_time_add(gpr_now(), gpr_time_from_micros(500000))));
+  /* Now wait until it should have triggered */
+  sleep(1);
+
+  /* wait for the connection callback to finish */
+  GPR_ASSERT(gpr_event_wait(&ev, test_deadline()));
+  close(svr_fd);
+  for (i = 0; i < NUM_CLIENT_CONNECTS; ++i) {
+    close(client_fd[i]);
+  }
+}
+
+int main(void) {
+  grpc_iomgr_init();
+  test_succeeds();
+  test_fails();
+  test_times_out();
+  grpc_iomgr_shutdown();
+  return 0;
+}
diff --git a/test/core/endpoint/tcp_test.c b/test/core/iomgr/tcp_posix_test.c
similarity index 94%
rename from test/core/endpoint/tcp_test.c
rename to test/core/iomgr/tcp_posix_test.c
index c703f92..52856b6 100644
--- a/test/core/endpoint/tcp_test.c
+++ b/test/core/iomgr/tcp_posix_test.c
@@ -31,7 +31,7 @@
  *
  */
 
-#include "src/core/endpoint/tcp.h"
+#include "src/core/iomgr/tcp_posix.h"
 
 #include <errno.h>
 #include <fcntl.h>
@@ -41,7 +41,6 @@
 #include <sys/socket.h>
 #include <unistd.h>
 
-#include "src/core/eventmanager/em.h"
 #include <grpc/support/alloc.h>
 #include <grpc/support/log.h>
 #include <grpc/support/time.h>
@@ -65,8 +64,6 @@
 
  */
 
-grpc_em g_em;
-
 static void create_sockets(int sv[2]) {
   int flags;
   GPR_ASSERT(socketpair(AF_UNIX, SOCK_STREAM, 0, sv) == 0);
@@ -165,7 +162,6 @@
 /* Write to a socket, then read from it using the grpc_tcp API. */
 static void read_test(ssize_t num_bytes, ssize_t slice_size) {
   int sv[2];
-  grpc_em em;
   grpc_endpoint *ep;
   struct read_socket_state state;
   ssize_t written_bytes;
@@ -176,9 +172,8 @@
           slice_size);
 
   create_sockets(sv);
-  grpc_em_init(&em);
 
-  ep = grpc_tcp_create_dbg(sv[1], &em, slice_size);
+  ep = grpc_tcp_create(grpc_fd_create(sv[1]), slice_size);
   written_bytes = fill_socket_partial(sv[0], num_bytes);
   gpr_log(GPR_INFO, "Wrote %d bytes", written_bytes);
 
@@ -202,7 +197,6 @@
 
   grpc_endpoint_destroy(ep);
 
-  grpc_em_destroy(&em);
   gpr_mu_destroy(&state.mu);
   gpr_cv_destroy(&state.cv);
 }
@@ -211,7 +205,6 @@
    API. */
 static void large_read_test(ssize_t slice_size) {
   int sv[2];
-  grpc_em em;
   grpc_endpoint *ep;
   struct read_socket_state state;
   ssize_t written_bytes;
@@ -221,9 +214,8 @@
   gpr_log(GPR_INFO, "Start large read test, slice size %d", slice_size);
 
   create_sockets(sv);
-  grpc_em_init(&em);
 
-  ep = grpc_tcp_create_dbg(sv[1], &em, slice_size);
+  ep = grpc_tcp_create(grpc_fd_create(sv[1]), slice_size);
   written_bytes = fill_socket(sv[0]);
   gpr_log(GPR_INFO, "Wrote %d bytes", written_bytes);
 
@@ -247,7 +239,6 @@
 
   grpc_endpoint_destroy(ep);
 
-  grpc_em_destroy(&em);
   gpr_mu_destroy(&state.mu);
   gpr_cv_destroy(&state.cv);
 }
@@ -349,7 +340,6 @@
    socket in parallel with the read. */
 static void write_test(ssize_t num_bytes, ssize_t slice_size) {
   int sv[2];
-  grpc_em em;
   grpc_endpoint *ep;
   struct write_socket_state state;
   ssize_t read_bytes;
@@ -363,9 +353,8 @@
           slice_size);
 
   create_sockets(sv);
-  grpc_em_init(&em);
 
-  ep = grpc_tcp_create(sv[1], &em);
+  ep = grpc_tcp_create(grpc_fd_create(sv[1]), GRPC_TCP_DEFAULT_READ_SLICE_SIZE);
 
   gpr_mu_init(&state.mu);
   gpr_cv_init(&state.cv);
@@ -392,7 +381,6 @@
   }
 
   grpc_endpoint_destroy(ep);
-  grpc_em_destroy(&em);
   gpr_mu_destroy(&state.mu);
   gpr_cv_destroy(&state.cv);
   gpr_free(slices);
@@ -410,7 +398,6 @@
    socket in parallel with the read. */
 static void write_error_test(ssize_t num_bytes, ssize_t slice_size) {
   int sv[2];
-  grpc_em em;
   grpc_endpoint *ep;
   struct write_socket_state state;
   size_t num_blocks;
@@ -423,9 +410,8 @@
           num_bytes, slice_size);
 
   create_sockets(sv);
-  grpc_em_init(&em);
 
-  ep = grpc_tcp_create(sv[1], &em);
+  ep = grpc_tcp_create(grpc_fd_create(sv[1]), GRPC_TCP_DEFAULT_READ_SLICE_SIZE);
   close(sv[0]);
 
   gpr_mu_init(&state.mu);
@@ -456,7 +442,6 @@
   }
 
   grpc_endpoint_destroy(ep);
-  grpc_em_destroy(&em);
   gpr_mu_destroy(&state.mu);
   gpr_cv_destroy(&state.cv);
   free(slices);
@@ -487,7 +472,7 @@
   }
 }
 
-static void clean_up() { grpc_em_destroy(&g_em); }
+static void clean_up() {}
 
 static grpc_endpoint_test_fixture create_fixture_tcp_socketpair(
     size_t slice_size) {
@@ -495,9 +480,8 @@
   grpc_endpoint_test_fixture f;
 
   create_sockets(sv);
-  grpc_em_init(&g_em);
-  f.client_ep = grpc_tcp_create_dbg(sv[0], &g_em, slice_size);
-  f.server_ep = grpc_tcp_create(sv[1], &g_em);
+  f.client_ep = grpc_tcp_create(grpc_fd_create(sv[0]), slice_size);
+  f.server_ep = grpc_tcp_create(grpc_fd_create(sv[1]), slice_size);
 
   return f;
 }
@@ -508,10 +492,12 @@
 
 int main(int argc, char **argv) {
   grpc_test_init(argc, argv);
+  grpc_iomgr_init();
   /* disable SIGPIPE */
   signal(SIGPIPE, SIG_IGN);
   run_tests();
   grpc_endpoint_tests(configs[0]);
+  grpc_iomgr_shutdown();
 
   return 0;
 }
diff --git a/test/core/endpoint/tcp_server_test.c b/test/core/iomgr/tcp_server_posix_test.c
similarity index 90%
rename from test/core/endpoint/tcp_server_test.c
rename to test/core/iomgr/tcp_server_posix_test.c
index 6208915..cb77a88 100644
--- a/test/core/endpoint/tcp_server_test.c
+++ b/test/core/iomgr/tcp_server_posix_test.c
@@ -31,8 +31,8 @@
  *
  */
 
-#include "src/core/endpoint/tcp_server.h"
-#include "src/core/eventmanager/em.h"
+#include "src/core/iomgr/tcp_server.h"
+#include "src/core/iomgr/iomgr.h"
 #include <grpc/support/log.h>
 #include <grpc/support/sync.h>
 #include <grpc/support/time.h>
@@ -44,8 +44,6 @@
 
 #define LOG_TEST() gpr_log(GPR_INFO, "%s", __FUNCTION__)
 
-static grpc_em em;
-
 static gpr_mu mu;
 static gpr_cv cv;
 static int nconnects = 0;
@@ -61,12 +59,12 @@
 }
 
 static void test_no_op() {
-  grpc_tcp_server *s = grpc_tcp_server_create(&em);
+  grpc_tcp_server *s = grpc_tcp_server_create();
   grpc_tcp_server_destroy(s);
 }
 
 static void test_no_op_with_start() {
-  grpc_tcp_server *s = grpc_tcp_server_create(&em);
+  grpc_tcp_server *s = grpc_tcp_server_create();
   LOG_TEST();
   grpc_tcp_server_start(s, on_connect, NULL);
   grpc_tcp_server_destroy(s);
@@ -74,7 +72,7 @@
 
 static void test_no_op_with_port() {
   struct sockaddr_in addr;
-  grpc_tcp_server *s = grpc_tcp_server_create(&em);
+  grpc_tcp_server *s = grpc_tcp_server_create();
   LOG_TEST();
 
   memset(&addr, 0, sizeof(addr));
@@ -87,7 +85,7 @@
 
 static void test_no_op_with_port_and_start() {
   struct sockaddr_in addr;
-  grpc_tcp_server *s = grpc_tcp_server_create(&em);
+  grpc_tcp_server *s = grpc_tcp_server_create();
   LOG_TEST();
 
   memset(&addr, 0, sizeof(addr));
@@ -104,7 +102,7 @@
   struct sockaddr_storage addr;
   socklen_t addr_len = sizeof(addr);
   int svrfd, clifd;
-  grpc_tcp_server *s = grpc_tcp_server_create(&em);
+  grpc_tcp_server *s = grpc_tcp_server_create();
   int nconnects_before;
   gpr_timespec deadline;
   int i;
@@ -151,7 +149,7 @@
 
 int main(int argc, char **argv) {
   grpc_test_init(argc, argv);
-  grpc_em_init(&em);
+  grpc_iomgr_init();
   gpr_mu_init(&mu);
   gpr_cv_init(&cv);
 
@@ -162,7 +160,7 @@
   test_connect(1);
   test_connect(10);
 
-  grpc_em_destroy(&em);
+  grpc_iomgr_shutdown();
   gpr_mu_destroy(&mu);
   gpr_cv_destroy(&cv);
   return 0;
diff --git a/test/core/network_benchmarks/low_level_ping_pong.c b/test/core/network_benchmarks/low_level_ping_pong.c
index 93c66a9..543fb27 100644
--- a/test/core/network_benchmarks/low_level_ping_pong.c
+++ b/test/core/network_benchmarks/low_level_ping_pong.c
@@ -49,7 +49,7 @@
 #endif
 #include <sys/socket.h>
 
-#include "src/core/endpoint/socket_utils.h"
+#include "src/core/iomgr/socket_utils_posix.h"
 #include <grpc/support/cmdline.h>
 #include <grpc/support/histogram.h>
 #include <grpc/support/log.h>
diff --git a/test/core/surface/completion_queue_test.c b/test/core/surface/completion_queue_test.c
index 6df159f..77decda 100644
--- a/test/core/surface/completion_queue_test.c
+++ b/test/core/surface/completion_queue_test.c
@@ -33,12 +33,12 @@
 
 #include "src/core/surface/completion_queue.h"
 
+#include "src/core/iomgr/iomgr.h"
 #include <grpc/support/alloc.h>
 #include <grpc/support/log.h>
 #include <grpc/support/thd.h>
 #include <grpc/support/time.h>
 #include <grpc/support/useful.h>
-#include "src/core/surface/surface_em.h"
 #include "test/core/util/test_config.h"
 
 #define LOG_TEST() gpr_log(GPR_INFO, "%s", __FUNCTION__)
@@ -417,7 +417,7 @@
 
 int main(int argc, char **argv) {
   grpc_test_init(argc, argv);
-  grpc_surface_em_init();
+  grpc_iomgr_init();
   test_no_op();
   test_wait_empty();
   test_cq_end_read();
@@ -430,6 +430,6 @@
   test_threading(1, 10);
   test_threading(10, 1);
   test_threading(10, 10);
-  grpc_surface_em_shutdown();
+  grpc_iomgr_shutdown();
   return 0;
 }
diff --git a/test/core/transport/chttp2_transport_end2end_test.c b/test/core/transport/chttp2_transport_end2end_test.c
index 4a16789..30d2a17 100644
--- a/test/core/transport/chttp2_transport_end2end_test.c
+++ b/test/core/transport/chttp2_transport_end2end_test.c
@@ -42,38 +42,23 @@
 #include <unistd.h>
 
 #include "test/core/util/test_config.h"
-#include "src/core/eventmanager/em.h"
+#include "src/core/iomgr/iomgr.h"
+#include "src/core/iomgr/endpoint_pair.h"
 #include "src/core/transport/chttp2_transport.h"
 #include <grpc/support/log.h>
 
-static grpc_em em;
-
-static void create_sockets(int sv[2]) {
-  int flags;
-  GPR_ASSERT(socketpair(AF_UNIX, SOCK_STREAM, 0, sv) == 0);
-  flags = fcntl(sv[0], F_GETFL, 0);
-  GPR_ASSERT(fcntl(sv[0], F_SETFL, flags | O_NONBLOCK) == 0);
-  flags = fcntl(sv[1], F_GETFL, 0);
-  GPR_ASSERT(fcntl(sv[1], F_SETFL, flags | O_NONBLOCK) == 0);
-}
-
 /* Wrapper to create an http2 transport pair */
 static int create_http2_transport_for_test(
     grpc_transport_setup_callback client_setup_transport,
     void *client_setup_arg,
     grpc_transport_setup_callback server_setup_transport,
     void *server_setup_arg, size_t slice_size, grpc_mdctx *mdctx) {
-  int sv[2];
-  grpc_endpoint *svr_ep, *cli_ep;
-
-  create_sockets(sv);
-  svr_ep = grpc_tcp_create_dbg(sv[1], &em, slice_size);
-  cli_ep = grpc_tcp_create_dbg(sv[0], &em, slice_size);
+  grpc_endpoint_pair p = grpc_iomgr_create_endpoint_pair(1);
 
   grpc_create_chttp2_transport(client_setup_transport, client_setup_arg, NULL,
-                               cli_ep, NULL, 0, mdctx, 1);
+                               p.client, NULL, 0, mdctx, 1);
   grpc_create_chttp2_transport(server_setup_transport, server_setup_arg, NULL,
-                               svr_ep, NULL, 0, mdctx, 0);
+                               p.server, NULL, 0, mdctx, 0);
 
   return 0;
 }
@@ -126,13 +111,13 @@
   signal(SIGPIPE, SIG_IGN);
 
   grpc_test_init(argc, argv);
-  grpc_em_init(&em);
+  grpc_iomgr_init();
 
   for (i = 0; i < sizeof(fixture_configs) / sizeof(*fixture_configs); i++) {
     grpc_transport_end2end_tests(&fixture_configs[i]);
   }
 
-  grpc_em_destroy(&em);
+  grpc_iomgr_shutdown();
 
   gpr_log(GPR_INFO, "exiting");
   return 0;
diff --git a/vsprojects/vs2013/grpc.vcxproj b/vsprojects/vs2013/grpc.vcxproj
index 993dff3..0ecb823 100644
--- a/vsprojects/vs2013/grpc.vcxproj
+++ b/vsprojects/vs2013/grpc.vcxproj
@@ -96,17 +96,24 @@
     <ClInclude Include="..\..\src\core\compression\algorithm.h" />
     <ClInclude Include="..\..\src\core\compression\message_compress.h" />
     <ClInclude Include="..\..\src\core\endpoint\endpoint.h" />
-    <ClInclude Include="..\..\src\core\endpoint\resolve_address.h" />
-    <ClInclude Include="..\..\src\core\endpoint\secure_endpoint.h" />
-    <ClInclude Include="..\..\src\core\endpoint\socket_utils.h" />
-    <ClInclude Include="..\..\src\core\endpoint\tcp_client.h" />
-    <ClInclude Include="..\..\src\core\endpoint\tcp.h" />
-    <ClInclude Include="..\..\src\core\endpoint\tcp_server.h" />
-    <ClInclude Include="..\..\src\core\eventmanager\em.h" />
     <ClInclude Include="..\..\src\core\httpcli\format_request.h" />
     <ClInclude Include="..\..\src\core\httpcli\httpcli.h" />
     <ClInclude Include="..\..\src\core\httpcli\httpcli_security_context.h" />
     <ClInclude Include="..\..\src\core\httpcli\parser.h" />
+    <ClInclude Include="..\..\src\core\iomgr\alarm.h" />
+    <ClInclude Include="..\..\src\core\iomgr\endpoint_pair.h" />
+    <ClInclude Include="..\..\src\core\iomgr\iomgr_completion_queue_interface.h" />
+    <ClInclude Include="..\..\src\core\iomgr\iomgr.h" />
+    <ClInclude Include="..\..\src\core\iomgr\iomgr_libevent.h" />
+    <ClInclude Include="..\..\src\core\iomgr\resolve_address.h" />
+    <ClInclude Include="..\..\src\core\iomgr\sockaddr.h" />
+    <ClInclude Include="..\..\src\core\iomgr\sockaddr_posix.h" />
+    <ClInclude Include="..\..\src\core\iomgr\sockaddr_utils.h" />
+    <ClInclude Include="..\..\src\core\iomgr\sockaddr_win32.h" />
+    <ClInclude Include="..\..\src\core\iomgr\socket_utils_posix.h" />
+    <ClInclude Include="..\..\src\core\iomgr\tcp_client.h" />
+    <ClInclude Include="..\..\src\core\iomgr\tcp_posix.h" />
+    <ClInclude Include="..\..\src\core\iomgr\tcp_server.h" />
     <ClInclude Include="..\..\src\core\security\auth.h" />
     <ClInclude Include="..\..\src\core\security\credentials.h" />
     <ClInclude Include="..\..\src\core\security\google_root_certs.h" />
@@ -120,9 +127,9 @@
     <ClInclude Include="..\..\src\core\surface\call.h" />
     <ClInclude Include="..\..\src\core\surface\channel.h" />
     <ClInclude Include="..\..\src\core\surface\client.h" />
-    <ClInclude Include="..\..\src\core\surface\lame_client.h" />
     <ClInclude Include="..\..\src\core\surface\completion_queue.h" />
     <ClInclude Include="..\..\src\core\surface\event_string.h" />
+    <ClInclude Include="..\..\src\core\surface\lame_client.h" />
     <ClInclude Include="..\..\src\core\surface\server.h" />
     <ClInclude Include="..\..\src\core\surface\surface_em.h" />
     <ClInclude Include="..\..\src\core\surface\surface_trace.h" />
@@ -136,8 +143,8 @@
     <ClInclude Include="..\..\src\core\transport\chttp2\frame_window_update.h" />
     <ClInclude Include="..\..\src\core\transport\chttp2\hpack_parser.h" />
     <ClInclude Include="..\..\src\core\transport\chttp2\hpack_table.h" />
-    <ClInclude Include="..\..\src\core\transport\chttp2\huffsyms.h" />
     <ClInclude Include="..\..\src\core\transport\chttp2\http2_errors.h" />
+    <ClInclude Include="..\..\src\core\transport\chttp2\huffsyms.h" />
     <ClInclude Include="..\..\src\core\transport\chttp2\status_conversion.h" />
     <ClInclude Include="..\..\src\core\transport\chttp2\stream_encoder.h" />
     <ClInclude Include="..\..\src\core\transport\chttp2\stream_map.h" />
@@ -185,23 +192,61 @@
     </ClCompile>
     <ClCompile Include="..\..\src\core\endpoint\endpoint.c">
     </ClCompile>
-    <ClCompile Include="..\..\src\core\endpoint\resolve_address.c">
+    <ClCompile Include="..\..\src\core\endpoint\secure_endpoint.c">
     </ClCompile>
-    <ClCompile Include="..\..\src\core\endpoint\socket_utils.c">
+    <ClCompile Include="..\..\src\core\httpcli\format_request.c">
     </ClCompile>
-    <ClCompile Include="..\..\src\core\endpoint\socket_utils_linux.c">
+    <ClCompile Include="..\..\src\core\httpcli\httpcli.c">
     </ClCompile>
-    <ClCompile Include="..\..\src\core\endpoint\socket_utils_posix.c">
+    <ClCompile Include="..\..\src\core\httpcli\httpcli_security_context.c">
     </ClCompile>
-    <ClCompile Include="..\..\src\core\endpoint\tcp.c">
+    <ClCompile Include="..\..\src\core\httpcli\parser.c">
     </ClCompile>
-    <ClCompile Include="..\..\src\core\endpoint\tcp_client.c">
+    <ClCompile Include="..\..\src\core\iomgr\endpoint_pair_posix.c">
     </ClCompile>
-    <ClCompile Include="..\..\src\core\endpoint\tcp_server.c">
+    <ClCompile Include="..\..\src\core\iomgr\iomgr_libevent.c">
     </ClCompile>
-    <ClCompile Include="..\..\src\core\eventmanager\em.c">
+    <ClCompile Include="..\..\src\core\iomgr\iomgr_libevent_use_threads.c">
     </ClCompile>
-    <ClCompile Include="..\..\src\core\eventmanager\em_posix.c">
+    <ClCompile Include="..\..\src\core\iomgr\resolve_address_posix.c">
+    </ClCompile>
+    <ClCompile Include="..\..\src\core\iomgr\sockaddr_utils.c">
+    </ClCompile>
+    <ClCompile Include="..\..\src\core\iomgr\socket_utils_common_posix.c">
+    </ClCompile>
+    <ClCompile Include="..\..\src\core\iomgr\socket_utils_linux.c">
+    </ClCompile>
+    <ClCompile Include="..\..\src\core\iomgr\socket_utils_posix.c">
+    </ClCompile>
+    <ClCompile Include="..\..\src\core\iomgr\tcp_client_posix.c">
+    </ClCompile>
+    <ClCompile Include="..\..\src\core\iomgr\tcp_posix.c">
+    </ClCompile>
+    <ClCompile Include="..\..\src\core\iomgr\tcp_server_posix.c">
+    </ClCompile>
+    <ClCompile Include="..\..\src\core\security\auth.c">
+    </ClCompile>
+    <ClCompile Include="..\..\src\core\security\credentials.c">
+    </ClCompile>
+    <ClCompile Include="..\..\src\core\security\google_root_certs.c">
+    </ClCompile>
+    <ClCompile Include="..\..\src\core\security\secure_transport_setup.c">
+    </ClCompile>
+    <ClCompile Include="..\..\src\core\security\security_context.c">
+    </ClCompile>
+    <ClCompile Include="..\..\src\core\security\server_secure_chttp2.c">
+    </ClCompile>
+    <ClCompile Include="..\..\src\core\statistics\census_init.c">
+    </ClCompile>
+    <ClCompile Include="..\..\src\core\statistics\census_rpc_stats.c">
+    </ClCompile>
+    <ClCompile Include="..\..\src\core\statistics\census_tracing.c">
+    </ClCompile>
+    <ClCompile Include="..\..\src\core\statistics\hash_table.c">
+    </ClCompile>
+    <ClCompile Include="..\..\src\core\statistics\log.c">
+    </ClCompile>
+    <ClCompile Include="..\..\src\core\statistics\window_stats.c">
     </ClCompile>
     <ClCompile Include="..\..\src\core\surface\byte_buffer.c">
     </ClCompile>
@@ -215,14 +260,18 @@
     </ClCompile>
     <ClCompile Include="..\..\src\core\surface\client.c">
     </ClCompile>
-    <ClCompile Include="..\..\src\core\surface\lame_client.c">
-    </ClCompile>
     <ClCompile Include="..\..\src\core\surface\completion_queue.c">
     </ClCompile>
     <ClCompile Include="..\..\src\core\surface\event_string.c">
     </ClCompile>
     <ClCompile Include="..\..\src\core\surface\init.c">
     </ClCompile>
+    <ClCompile Include="..\..\src\core\surface\lame_client.c">
+    </ClCompile>
+    <ClCompile Include="..\..\src\core\surface\secure_channel_create.c">
+    </ClCompile>
+    <ClCompile Include="..\..\src\core\surface\secure_server_create.c">
+    </ClCompile>
     <ClCompile Include="..\..\src\core\surface\server.c">
     </ClCompile>
     <ClCompile Include="..\..\src\core\surface\server_chttp2.c">
@@ -261,60 +310,22 @@
     </ClCompile>
     <ClCompile Include="..\..\src\core\transport\chttp2\timeout_encoding.c">
     </ClCompile>
-    <ClCompile Include="..\..\src\core\transport\chttp2\varint.c">
-    </ClCompile>
     <ClCompile Include="..\..\src\core\transport\chttp2_transport.c">
     </ClCompile>
+    <ClCompile Include="..\..\src\core\transport\chttp2\varint.c">
+    </ClCompile>
     <ClCompile Include="..\..\src\core\transport\metadata.c">
     </ClCompile>
     <ClCompile Include="..\..\src\core\transport\stream_op.c">
     </ClCompile>
     <ClCompile Include="..\..\src\core\transport\transport.c">
     </ClCompile>
-    <ClCompile Include="..\..\src\core\statistics\census_init.c">
-    </ClCompile>
-    <ClCompile Include="..\..\src\core\statistics\census_rpc_stats.c">
-    </ClCompile>
-    <ClCompile Include="..\..\src\core\statistics\census_tracing.c">
-    </ClCompile>
-    <ClCompile Include="..\..\src\core\statistics\log.c">
-    </ClCompile>
-    <ClCompile Include="..\..\src\core\statistics\window_stats.c">
-    </ClCompile>
-    <ClCompile Include="..\..\src\core\statistics\hash_table.c">
-    </ClCompile>
-    <ClCompile Include="..\..\src\core\httpcli\format_request.c">
-    </ClCompile>
-    <ClCompile Include="..\..\src\core\httpcli\httpcli.c">
-    </ClCompile>
-    <ClCompile Include="..\..\src\core\httpcli\httpcli_security_context.c">
-    </ClCompile>
-    <ClCompile Include="..\..\src\core\httpcli\parser.c">
-    </ClCompile>
-    <ClCompile Include="..\..\src\core\security\auth.c">
-    </ClCompile>
-    <ClCompile Include="..\..\src\core\security\credentials.c">
-    </ClCompile>
-    <ClCompile Include="..\..\src\core\security\google_root_certs.c">
-    </ClCompile>
-    <ClCompile Include="..\..\src\core\security\secure_transport_setup.c">
-    </ClCompile>
-    <ClCompile Include="..\..\src\core\security\security_context.c">
-    </ClCompile>
-    <ClCompile Include="..\..\src\core\security\server_secure_chttp2.c">
-    </ClCompile>
-    <ClCompile Include="..\..\src\core\surface\secure_channel_create.c">
-    </ClCompile>
-    <ClCompile Include="..\..\src\core\surface\secure_server_create.c">
-    </ClCompile>
-    <ClCompile Include="..\..\src\core\endpoint\secure_endpoint.c">
-    </ClCompile>
-    <ClCompile Include="..\..\src\core\tsi\transport_security.c">
-    </ClCompile>
     <ClCompile Include="..\..\src\core\tsi\fake_transport_security.c">
     </ClCompile>
     <ClCompile Include="..\..\src\core\tsi\ssl_transport_security.c">
     </ClCompile>
+    <ClCompile Include="..\..\src\core\tsi\transport_security.c">
+    </ClCompile>
     <ClCompile Include="..\..\third_party\cJSON\cJSON.c">
     </ClCompile>
   </ItemGroup>