Merge pull request #20742 from lgeiger:keras-init

PiperOrigin-RevId: 204915130
diff --git a/configure.py b/configure.py
index 8930c3a..eaff83d 100644
--- a/configure.py
+++ b/configure.py
@@ -35,8 +35,8 @@
 
 _DEFAULT_CUDA_VERSION = '9.0'
 _DEFAULT_CUDNN_VERSION = '7'
-_DEFAULT_NCCL_VERSION = '1.3'
-_DEFAULT_CUDA_COMPUTE_CAPABILITIES = '3.5,5.2'
+_DEFAULT_NCCL_VERSION = '2.2'
+_DEFAULT_CUDA_COMPUTE_CAPABILITIES = '3.5,7.0'
 _DEFAULT_CUDA_PATH = '/usr/local/cuda'
 _DEFAULT_CUDA_PATH_LINUX = '/opt/cuda'
 _DEFAULT_CUDA_PATH_WIN = ('C:/Program Files/NVIDIA GPU Computing '
@@ -1097,8 +1097,10 @@
     raise ValueError('Currently NCCL is only supported on Linux platforms.')
 
   ask_nccl_version = (
-      'Please specify the NCCL version you want to use. '
-      '[Leave empty to default to NCCL %s]: ') % _DEFAULT_NCCL_VERSION
+      'Please specify the NCCL version you want to use. If NCCL %s is not '
+      'installed, then you can use version 1.3 that can be fetched '
+      'automatically but it may have worse performance with multiple GPUs. '
+      '[Default is %s]: ') % (_DEFAULT_NCCL_VERSION, _DEFAULT_NCCL_VERSION)
 
   for _ in range(_DEFAULT_PROMPT_ASK_ATTEMPTS):
     tf_nccl_version = get_from_env_or_user_or_default(
@@ -1136,9 +1138,7 @@
 
     nccl_lib_path = os.path.join(nccl_install_path, nccl_lib_path)
     nccl_hdr_path = os.path.join(nccl_install_path, 'include/nccl.h')
-    nccl_license_path = os.path.join(nccl_install_path, 'NCCL-SLA.txt')
-    if os.path.exists(nccl_lib_path) and os.path.exists(
-        nccl_hdr_path) and os.path.exists(nccl_license_path):
+    if os.path.exists(nccl_lib_path) and os.path.exists(nccl_hdr_path):
       # Set NCCL_INSTALL_PATH
       environ_cp['NCCL_INSTALL_PATH'] = nccl_install_path
       write_action_env_to_bazelrc('NCCL_INSTALL_PATH', nccl_install_path)
@@ -1449,11 +1449,23 @@
     # TODO(ibiryukov): Investigate using clang as a cpu or cuda compiler on
     # Windows.
     environ_cp['TF_DOWNLOAD_CLANG'] = '0'
+    environ_cp['TF_ENABLE_XLA'] = '0'
+    environ_cp['TF_NEED_GDR'] = '0'
+    environ_cp['TF_NEED_VERBS'] = '0'
+    environ_cp['TF_NEED_MPI'] = '0'
+    environ_cp['TF_SET_ANDROID_WORKSPACE'] = '0'
 
   if is_macos():
     environ_cp['TF_NEED_JEMALLOC'] = '0'
     environ_cp['TF_NEED_TENSORRT'] = '0'
 
+  # The numpy package on ppc64le uses OpenBLAS which has multi-threading
+  # issues that lead to incorrect answers.  Set OMP_NUM_THREADS=1 at
+  # runtime to allow the Tensorflow testcases which compare numpy
+  # results to Tensorflow results to succeed.
+  if is_ppc64le():
+    write_action_env_to_bazelrc("OMP_NUM_THREADS", 1)
+
   set_build_var(environ_cp, 'TF_NEED_JEMALLOC', 'jemalloc as malloc',
                 'with_jemalloc', True)
   set_build_var(environ_cp, 'TF_NEED_GCP', 'Google Cloud Platform',
diff --git a/tensorflow/BUILD b/tensorflow/BUILD
index 51eea94..518c2b0 100644
--- a/tensorflow/BUILD
+++ b/tensorflow/BUILD
@@ -20,7 +20,7 @@
     "tf_additional_binary_deps",
 )
 load(
-    "//tensorflow/tools/api/generator:api_gen.bzl",
+    "//tensorflow/python/tools/api/generator:api_gen.bzl",
     "gen_api_init_files",  # @unused
 )
 
diff --git a/tensorflow/c/eager/c_api.cc b/tensorflow/c/eager/c_api.cc
index 82ca2be..6c51053 100644
--- a/tensorflow/c/eager/c_api.cc
+++ b/tensorflow/c/eager/c_api.cc
@@ -664,17 +664,17 @@
 
 const tensorflow::Tensor* TFE_TensorHandleUnderlyingTensorInHostMemory(
     TFE_TensorHandle* h, TF_Status* status) {
-  tensorflow::Device* d = nullptr;
-  tensorflow::Device* op_device = nullptr;
-  const tensorflow::Tensor* t = nullptr;
-  status->status = h->handle->TensorAndDevice(&t, &d, &op_device);
-  if (!status->status.ok()) return nullptr;
-  if (d != nullptr) {
+  if (!h->handle->OnHostCPU()) {
     status->status = tensorflow::errors::FailedPrecondition(
         "TFE_TensorHandle is placed in device (not host) memory. Cannot return "
         "a tensorflow::Tensor");
     return nullptr;
   }
+  tensorflow::Device* d = nullptr;
+  tensorflow::Device* op_device = nullptr;
+  const tensorflow::Tensor* t = nullptr;
+  status->status = h->handle->TensorAndDevice(&t, &d, &op_device);
+  if (!status->status.ok()) return nullptr;
   return t;
 }
 
diff --git a/tensorflow/c/python_api.cc b/tensorflow/c/python_api.cc
index e18fdf6..8486b58 100644
--- a/tensorflow/c/python_api.cc
+++ b/tensorflow/c/python_api.cc
@@ -155,7 +155,7 @@
     tensorflow::shape_inference::ShapeHandle shape;
     status->status =
         ic->MakeShapeFromShapeProto(shape_and_type_proto.shape(), &shape);
-    if (status->status.ok()) return;
+    if (!status->status.ok()) return;
     shapes_and_types.emplace_back(shape, shape_and_type_proto.dtype());
   }
   ic->set_output_handle_shapes_and_types(output.index, shapes_and_types);
diff --git a/tensorflow/cc/saved_model/BUILD b/tensorflow/cc/saved_model/BUILD
index 06a3be1..730b1b6 100644
--- a/tensorflow/cc/saved_model/BUILD
+++ b/tensorflow/cc/saved_model/BUILD
@@ -34,6 +34,35 @@
 )
 
 cc_library(
+    name = "reader",
+    srcs = ["reader.cc"],
+    hdrs = ["reader.h"],
+    deps = [
+        ":constants",
+        "//tensorflow/core:lib",
+        "//tensorflow/core:protos_all_cc",
+    ],
+)
+
+tf_cc_test(
+    name = "reader_test",
+    srcs = ["reader_test.cc"],
+    data = [
+        ":saved_model_half_plus_two",
+    ],
+    linkstatic = 1,
+    deps = [
+        ":constants",
+        ":reader",
+        ":tag_constants",
+        "//tensorflow/core:lib",
+        "//tensorflow/core:test",
+        "//tensorflow/core:test_main",
+        "//tensorflow/core:testlib",
+    ],
+)
+
+cc_library(
     name = "loader",
     hdrs = ["loader.h"],
     deps = [
@@ -54,6 +83,7 @@
     hdrs = ["loader.h"],
     deps = [
         ":constants",
+        ":reader",
     ] + if_not_mobile([
         "//tensorflow/core:core_cpu",
         "//tensorflow/core:framework",
diff --git a/tensorflow/cc/saved_model/loader.cc b/tensorflow/cc/saved_model/loader.cc
index faa1e37..07807ed 100644
--- a/tensorflow/cc/saved_model/loader.cc
+++ b/tensorflow/cc/saved_model/loader.cc
@@ -18,8 +18,10 @@
 #include <unordered_set>
 
 #include "tensorflow/cc/saved_model/constants.h"
+#include "tensorflow/cc/saved_model/reader.h"
 #include "tensorflow/core/lib/io/path.h"
 #include "tensorflow/core/lib/monitoring/counter.h"
+#include "tensorflow/core/lib/strings/str_util.h"
 #include "tensorflow/core/lib/strings/strcat.h"
 #include "tensorflow/core/platform/env.h"
 #include "tensorflow/core/platform/protobuf_internal.h"
@@ -43,56 +45,6 @@
 constexpr char kLoadAttemptFail[] = "fail";
 constexpr char kLoadAttemptSuccess[] = "success";
 
-Status ReadSavedModel(const string& export_dir, SavedModel* saved_model_proto) {
-  const string saved_model_pb_path =
-      io::JoinPath(export_dir, kSavedModelFilenamePb);
-  if (Env::Default()->FileExists(saved_model_pb_path).ok()) {
-    return ReadBinaryProto(Env::Default(), saved_model_pb_path,
-                           saved_model_proto);
-  }
-  const string saved_model_pbtxt_path =
-      io::JoinPath(export_dir, kSavedModelFilenamePbTxt);
-  if (Env::Default()->FileExists(saved_model_pbtxt_path).ok()) {
-    return ReadTextProto(Env::Default(), saved_model_pbtxt_path,
-                         saved_model_proto);
-  }
-  return Status(error::Code::NOT_FOUND,
-                "Could not find SavedModel .pb or .pbtxt at supplied export "
-                "directory path: " +
-                    export_dir);
-}
-
-string GetTagsAsString(const std::unordered_set<string>& tags) {
-  string tags_as_string = "{ ";
-  for (const string& tag : tags) {
-    tags_as_string = strings::StrCat(tags_as_string, tag, " ");
-  }
-  tags_as_string = strings::StrCat(tags_as_string, "}");
-  return tags_as_string;
-}
-
-Status FindMetaGraphDefToLoad(const SavedModel& saved_model_proto,
-                              const std::unordered_set<string>& tags,
-                              MetaGraphDef* meta_graph_def_to_load) {
-  for (const MetaGraphDef& meta_graph_def : saved_model_proto.meta_graphs()) {
-    // Get tags from the meta_graph_def.
-    std::unordered_set<string> graph_tags;
-    for (const string& tag : meta_graph_def.meta_info_def().tags()) {
-      graph_tags.insert(tag);
-    }
-    // Match with the set of tags provided.
-    if (graph_tags == tags) {
-      *meta_graph_def_to_load = meta_graph_def;
-      return Status::OK();
-    }
-  }
-  return Status(error::Code::NOT_FOUND,
-                "Could not find meta graph def matching supplied tags: " +
-                    GetTagsAsString(tags) +
-                    ". To inspect available tag-sets in the SavedModel, please "
-                    "use the SavedModel CLI: `saved_model_cli`");
-}
-
 Status LoadMetaGraphIntoSession(const MetaGraphDef& meta_graph_def,
                                 const SessionOptions& session_options,
                                 std::unique_ptr<Session>* session) {
@@ -235,18 +187,8 @@
                               const string& export_dir,
                               const std::unordered_set<string>& tags,
                               SavedModelBundle* const bundle) {
-  if (!MaybeSavedModelDirectory(export_dir)) {
-    return Status(error::Code::NOT_FOUND,
-                  "SavedModel not found in export directory: " + export_dir);
-  }
-  LOG(INFO) << "Loading SavedModel with tags: " << GetTagsAsString(tags)
-            << "; from: " << export_dir;
-
-  SavedModel saved_model_proto;
-  TF_RETURN_IF_ERROR(ReadSavedModel(export_dir, &saved_model_proto));
-
-  TF_RETURN_IF_ERROR(
-      FindMetaGraphDefToLoad(saved_model_proto, tags, &bundle->meta_graph_def));
+  TF_RETURN_IF_ERROR(ReadMetaGraphDefFromSavedModel(export_dir, tags,
+                                                    &bundle->meta_graph_def));
 
   TF_RETURN_IF_ERROR(LoadMetaGraphIntoSession(
       bundle->meta_graph_def, session_options, &bundle->session));
@@ -288,8 +230,8 @@
     return end_microseconds - start_microseconds;
   }();
   auto log_and_count = [&](const string& status_str) {
-    LOG(INFO) << "SavedModel load for tags " << GetTagsAsString(tags)
-              << "; Status: " << status_str << ". Took "
+    LOG(INFO) << "SavedModel load for tags { " << str_util::Join(tags, " ")
+              << " }; Status: " << status_str << ". Took "
               << load_latency_microsecs << " microseconds.";
     load_attempt_count->GetCell(export_dir, status_str)->IncrementBy(1);
   };
diff --git a/tensorflow/cc/saved_model/reader.cc b/tensorflow/cc/saved_model/reader.cc
new file mode 100644
index 0000000..2146c8a
--- /dev/null
+++ b/tensorflow/cc/saved_model/reader.cc
@@ -0,0 +1,88 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/cc/saved_model/reader.h"
+
+#include <unordered_set>
+
+#include "tensorflow/cc/saved_model/constants.h"
+#include "tensorflow/core/lib/io/path.h"
+#include "tensorflow/core/lib/strings/str_util.h"
+#include "tensorflow/core/lib/strings/strcat.h"
+#include "tensorflow/core/platform/env.h"
+#include "tensorflow/core/protobuf/saved_model.pb.h"
+
+namespace tensorflow {
+namespace {
+
+Status ReadSavedModel(const string& export_dir, SavedModel* saved_model_proto) {
+  LOG(INFO) << "Reading SavedModel from: " << export_dir;
+
+  const string saved_model_pb_path =
+      io::JoinPath(export_dir, kSavedModelFilenamePb);
+  if (Env::Default()->FileExists(saved_model_pb_path).ok()) {
+    return ReadBinaryProto(Env::Default(), saved_model_pb_path,
+                           saved_model_proto);
+  }
+  const string saved_model_pbtxt_path =
+      io::JoinPath(export_dir, kSavedModelFilenamePbTxt);
+  if (Env::Default()->FileExists(saved_model_pbtxt_path).ok()) {
+    return ReadTextProto(Env::Default(), saved_model_pbtxt_path,
+                         saved_model_proto);
+  }
+  return Status(error::Code::NOT_FOUND,
+                "Could not find SavedModel .pb or .pbtxt at supplied export "
+                "directory path: " +
+                    export_dir);
+}
+
+Status FindMetaGraphDef(const SavedModel& saved_model_proto,
+                        const std::unordered_set<string>& tags,
+                        MetaGraphDef* meta_graph_def) {
+  LOG(INFO) << "Reading meta graph with tags { " << str_util::Join(tags, " ")
+            << " }";
+  for (const MetaGraphDef& graph_def : saved_model_proto.meta_graphs()) {
+    // Get tags from the graph_def.
+    std::unordered_set<string> graph_tags;
+    for (const string& tag : graph_def.meta_info_def().tags()) {
+      graph_tags.insert(tag);
+    }
+    // Match with the set of tags provided.
+    if (graph_tags == tags) {
+      *meta_graph_def = graph_def;
+      return Status::OK();
+    }
+  }
+  return Status(
+      error::Code::NOT_FOUND,
+      strings::StrCat(
+          "Could not find meta graph def matching supplied tags: { ",
+          str_util::Join(tags, " "),
+          " }. To inspect available tag-sets in the SavedModel, please "
+          "use the SavedModel CLI: `saved_model_cli`"));
+}
+
+}  // namespace
+
+Status ReadMetaGraphDefFromSavedModel(const string& export_dir,
+                                      const std::unordered_set<string>& tags,
+                                      MetaGraphDef* const meta_graph_def) {
+  SavedModel saved_model_proto;
+  TF_RETURN_IF_ERROR(ReadSavedModel(export_dir, &saved_model_proto));
+  TF_RETURN_IF_ERROR(FindMetaGraphDef(saved_model_proto, tags, meta_graph_def));
+  return Status::OK();
+}
+
+}  // namespace tensorflow
diff --git a/tensorflow/cc/saved_model/reader.h b/tensorflow/cc/saved_model/reader.h
new file mode 100644
index 0000000..5815108
--- /dev/null
+++ b/tensorflow/cc/saved_model/reader.h
@@ -0,0 +1,39 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+/// Functions to read the SavedModel proto, or parts of it.
+
+#ifndef TENSORFLOW_CC_SAVED_MODEL_READER_H_
+#define TENSORFLOW_CC_SAVED_MODEL_READER_H_
+
+#include <string>
+#include <unordered_set>
+
+#include "tensorflow/core/lib/core/status.h"
+#include "tensorflow/core/protobuf/meta_graph.pb.h"
+
+namespace tensorflow {
+
+// Reads the SavedModel proto from saved_model.pb(txt) in the given directory,
+// finds the MetaGraphDef that matches the given set of tags and writes it to
+// the `meta_graph_def` parameter. Returns a failure status when the SavedModel
+// file does not exist or no MetaGraphDef matches the tags.
+Status ReadMetaGraphDefFromSavedModel(const string& export_dir,
+                                      const std::unordered_set<string>& tags,
+                                      MetaGraphDef* const meta_graph_def);
+
+}  // namespace tensorflow
+
+#endif  // TENSORFLOW_CC_SAVED_MODEL_READER_H_
diff --git a/tensorflow/cc/saved_model/reader_test.cc b/tensorflow/cc/saved_model/reader_test.cc
new file mode 100644
index 0000000..620e9c2
--- /dev/null
+++ b/tensorflow/cc/saved_model/reader_test.cc
@@ -0,0 +1,108 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/cc/saved_model/reader.h"
+
+#include "tensorflow/cc/saved_model/constants.h"
+#include "tensorflow/cc/saved_model/tag_constants.h"
+#include "tensorflow/core/lib/core/status.h"
+#include "tensorflow/core/lib/core/status_test_util.h"
+#include "tensorflow/core/lib/io/path.h"
+#include "tensorflow/core/lib/strings/str_util.h"
+#include "tensorflow/core/platform/test.h"
+
+namespace tensorflow {
+namespace {
+
+constexpr char kTestDataPbTxt[] =
+    "cc/saved_model/testdata/half_plus_two_pbtxt/00000123";
+constexpr char kTestDataSharded[] =
+    "cc/saved_model/testdata/half_plus_two/00000123";
+
+class ReaderTest : public ::testing::Test {
+ protected:
+  ReaderTest() {}
+
+  void CheckMetaGraphDef(const MetaGraphDef& meta_graph_def) {
+    const auto& tags = meta_graph_def.meta_info_def().tags();
+    EXPECT_TRUE(std::find(tags.begin(), tags.end(), kSavedModelTagServe) !=
+                tags.end());
+    EXPECT_NE(meta_graph_def.meta_info_def().tensorflow_version(), "");
+    EXPECT_EQ(
+        meta_graph_def.signature_def().at("serving_default").method_name(),
+        "tensorflow/serving/predict");
+  }
+};
+
+TEST_F(ReaderTest, TagMatch) {
+  MetaGraphDef meta_graph_def;
+
+  const string export_dir =
+      io::JoinPath(testing::TensorFlowSrcRoot(), kTestDataSharded);
+  TF_ASSERT_OK(ReadMetaGraphDefFromSavedModel(export_dir, {kSavedModelTagServe},
+                                              &meta_graph_def));
+  CheckMetaGraphDef(meta_graph_def);
+}
+
+TEST_F(ReaderTest, NoTagMatch) {
+  MetaGraphDef meta_graph_def;
+
+  const string export_dir =
+      io::JoinPath(testing::TensorFlowSrcRoot(), kTestDataSharded);
+  Status st = ReadMetaGraphDefFromSavedModel(export_dir, {"missing-tag"},
+                                             &meta_graph_def);
+  EXPECT_FALSE(st.ok());
+  EXPECT_TRUE(str_util::StrContains(
+      st.error_message(),
+      "Could not find meta graph def matching supplied tags: { missing-tag }"))
+      << st.error_message();
+}
+
+TEST_F(ReaderTest, NoTagMatchMultiple) {
+  MetaGraphDef meta_graph_def;
+
+  const string export_dir =
+      io::JoinPath(testing::TensorFlowSrcRoot(), kTestDataSharded);
+  Status st = ReadMetaGraphDefFromSavedModel(
+      export_dir, {kSavedModelTagServe, "missing-tag"}, &meta_graph_def);
+  EXPECT_FALSE(st.ok());
+  EXPECT_TRUE(str_util::StrContains(
+      st.error_message(),
+      "Could not find meta graph def matching supplied tags: "))
+      << st.error_message();
+}
+
+TEST_F(ReaderTest, PbtxtFormat) {
+  MetaGraphDef meta_graph_def;
+
+  const string export_dir =
+      io::JoinPath(testing::TensorFlowSrcRoot(), kTestDataPbTxt);
+  TF_ASSERT_OK(ReadMetaGraphDefFromSavedModel(export_dir, {kSavedModelTagServe},
+                                              &meta_graph_def));
+  CheckMetaGraphDef(meta_graph_def);
+}
+
+TEST_F(ReaderTest, InvalidExportPath) {
+  MetaGraphDef meta_graph_def;
+
+  const string export_dir =
+      io::JoinPath(testing::TensorFlowSrcRoot(), "missing-path");
+  Status st = ReadMetaGraphDefFromSavedModel(export_dir, {kSavedModelTagServe},
+                                             &meta_graph_def);
+  EXPECT_FALSE(st.ok());
+}
+
+}  // namespace
+}  // namespace tensorflow
diff --git a/tensorflow/compiler/jit/xla_device_ops.h b/tensorflow/compiler/jit/xla_device_ops.h
index a605335..134dcc1 100644
--- a/tensorflow/compiler/jit/xla_device_ops.h
+++ b/tensorflow/compiler/jit/xla_device_ops.h
@@ -90,6 +90,9 @@
   REGISTER_KERNEL_BUILDER(                                                     \
       Name("ReadVariableOp").Device(DEVICE).HostMemory("resource"),            \
       ReadVariableOp);                                                         \
+  REGISTER_KERNEL_BUILDER(                                                     \
+      Name("DestroyResourceOp").Device(DEVICE).HostMemory("resource"),         \
+      DestroyResourceOp);                                                      \
   REGISTER_KERNEL_BUILDER(Name("Shape")                                        \
                               .Device(DEVICE)                                  \
                               .HostMemory("output")                            \
diff --git a/tensorflow/compiler/tests/BUILD b/tensorflow/compiler/tests/BUILD
index e8e19f0..080bed5 100644
--- a/tensorflow/compiler/tests/BUILD
+++ b/tensorflow/compiler/tests/BUILD
@@ -418,7 +418,7 @@
 
 tf_xla_py_test(
     name = "eager_test",
-    size = "small",
+    size = "large",
     srcs = ["eager_test.py"],
     disabled_backends = [
         # TODO(b/78199195) Support XLA CPU devices in eager runtime
diff --git a/tensorflow/compiler/tests/conv2d_test.py b/tensorflow/compiler/tests/conv2d_test.py
index 98d41ba..f9db103 100644
--- a/tensorflow/compiler/tests/conv2d_test.py
+++ b/tensorflow/compiler/tests/conv2d_test.py
@@ -33,12 +33,9 @@
 from tensorflow.python.ops import nn_ops
 from tensorflow.python.platform import googletest
 
-
 DATA_FORMATS = (
     ("_data_format_NHWC", "NHWC"),
     ("_data_format_NCHW", "NCHW"),
-    ("_data_format_HWNC", "HWNC"),
-    ("_data_format_HWCN", "HWCN"),
 )
 
 
diff --git a/tensorflow/compiler/tests/eager_test.py b/tensorflow/compiler/tests/eager_test.py
index 3524666..6ead15d 100644
--- a/tensorflow/compiler/tests/eager_test.py
+++ b/tensorflow/compiler/tests/eager_test.py
@@ -403,7 +403,7 @@
   def testSliceInDefun(self):
     with self.test_scope():
 
-      @function.defun(compiled=True)
+      @function.defun
       def f(x, y):
         return x[0::2, y:, ...]
 
@@ -418,6 +418,22 @@
       self.assertAllEqual(np.ones([1, 2, 4]), z.numpy())
       self.assertAllEqual((2, 3, 4), dz.shape.as_list())
 
+  def testNestedDefun(self):
+    self.skipTest('Nested defuns do not work on TPU at the moment')
+    with self.test_scope():
+
+      @function.defun
+      def times_two(x):
+        return 2 * x
+
+      @function.defun
+      def two_x_plus_1(x):
+        return times_two(x) + 1
+
+      x = constant_op.constant([2, 3, 4])
+      y = two_x_plus_1(x)
+      self.assertAllEqual([5, 7, 9], y.numpy())
+
 
 class ExcessivePaddingTest(xla_test.XLATestCase):
   """Test that eager execution works with TPU flattened tensors.
@@ -470,6 +486,36 @@
       self.assertAllEqual(100 * [[36.0]], reduced)
 
 
+def multiple_tpus():
+  devices = context.context().devices()
+  return len([d for d in devices if 'device:TPU:' in d]) > 1
+
+
+class MultiDeviceTest(xla_test.XLATestCase):
+  """Test running TPU computation on more than one core."""
+
+  def testBasic(self):
+    if not multiple_tpus():
+      self.skipTest('MultiDeviceTest requires multiple TPU devices.')
+
+    # Compute 10 on TPU core 0
+    with ops.device('device:TPU:0'):
+      two = constant_op.constant(2)
+      five = constant_op.constant(5)
+      ten = two * five
+      self.assertAllEqual(10, ten)
+
+    # Compute 6 on TPU core 1
+    with ops.device('device:TPU:1'):
+      two = constant_op.constant(2)
+      three = constant_op.constant(3)
+      six = two * three
+      self.assertAllEqual(6, six)
+
+    # Copy 10 and 6 to CPU and sum them
+    self.assertAllEqual(16, ten + six)
+
+
 if __name__ == '__main__':
   ops.enable_eager_execution(
       config=config_pb2.ConfigProto(log_device_placement=True))
diff --git a/tensorflow/compiler/tests/unary_ops_test.py b/tensorflow/compiler/tests/unary_ops_test.py
index 6a7011a..5f25ff9 100644
--- a/tensorflow/compiler/tests/unary_ops_test.py
+++ b/tensorflow/compiler/tests/unary_ops_test.py
@@ -382,6 +382,62 @@
           expected=np.array(
               [[True, False, True], [False, True, True]], dtype=np.bool))
 
+      self._assertOpOutputMatchesExpected(
+          math_ops.lgamma,
+          np.array(
+              [[1, 2, 3], [4, 5, 6], [1 / 2, 3 / 2, 5 / 2],
+               [-3 / 2, -7 / 2, -11 / 2]],
+              dtype=dtype),
+          expected=np.array(
+              [
+                  [0, 0, np.log(2.0)],
+                  [np.log(6.0), np.log(24.0),
+                   np.log(120)],
+                  [
+                      np.log(np.pi) / 2,
+                      np.log(np.pi) / 2 - np.log(2),
+                      np.log(np.pi) / 2 - np.log(4) + np.log(3)
+                  ],
+                  [
+                      np.log(np.pi) / 2 - np.log(3) + np.log(4),
+                      np.log(np.pi) / 2 - np.log(105) + np.log(16),
+                      np.log(np.pi) / 2 - np.log(10395) + np.log(64),
+                  ],
+              ],
+              dtype=dtype))
+
+      self._assertOpOutputMatchesExpected(
+          math_ops.digamma,
+          np.array(
+              [[1.0, 0.5, 1 / 3.0], [0.25, 1 / 6.0, 0.125], [2.0, 3.0, 4.0],
+               [6.0, 8.0, 9.0]],
+              dtype=dtype),
+          expected=np.array(
+              [
+                  [
+                      -np.euler_gamma, -2 * np.log(2) - np.euler_gamma,
+                      -np.pi / 2 / np.sqrt(3) - 3 * np.log(3) / 2 -
+                      np.euler_gamma
+                  ],
+                  [
+                      -np.pi / 2 - 3 * np.log(2) - np.euler_gamma,
+                      -np.pi * np.sqrt(3) / 2 - 2 * np.log(2) -
+                      3 * np.log(3) / 2 - np.euler_gamma,
+                      -np.pi / 2 - 4 * np.log(2) -
+                      (np.pi + np.log(2 + np.sqrt(2)) - np.log(2 - np.sqrt(2)))
+                      / np.sqrt(2) - np.euler_gamma
+                  ],
+                  [
+                      1 - np.euler_gamma, 1.5 - np.euler_gamma,
+                      11 / 6.0 - np.euler_gamma
+                  ],
+                  [
+                      137 / 60.0 - np.euler_gamma, 363 / 140.0 - np.euler_gamma,
+                      761 / 280.0 - np.euler_gamma
+                  ],
+              ],
+              dtype=dtype))
+
       def quantize_and_dequantize_v2(x):
         return array_ops.quantize_and_dequantize_v2(
             x, -127, 127, signed_input=True, num_bits=8)
diff --git a/tensorflow/compiler/tf2xla/graph_compiler.cc b/tensorflow/compiler/tf2xla/graph_compiler.cc
index 4900af6..e1cea03 100644
--- a/tensorflow/compiler/tf2xla/graph_compiler.cc
+++ b/tensorflow/compiler/tf2xla/graph_compiler.cc
@@ -161,9 +161,8 @@
     outputs.resize(n->num_outputs());
     for (int o = 0; o < n->num_outputs(); ++o) {
       outputs[o] = op_context.release_output(o);
-      if (*op_context.is_output_dead() || outputs[o].tensor == nullptr) {
+      if (outputs[o].tensor == nullptr) {
         return errors::Internal("Missing xla_context ", o, "-th output from ",
-                                (*op_context.is_output_dead() ? "(dead)" : ""),
                                 SummarizeNode(*n));
       }
     }
diff --git a/tensorflow/compiler/tf2xla/kernels/BUILD b/tensorflow/compiler/tf2xla/kernels/BUILD
index 5a335aa..d88a34d 100644
--- a/tensorflow/compiler/tf2xla/kernels/BUILD
+++ b/tensorflow/compiler/tf2xla/kernels/BUILD
@@ -127,6 +127,7 @@
         "//tensorflow/compiler/xla/client/lib:constants",
         "//tensorflow/compiler/xla/client/lib:math",
         "//tensorflow/compiler/xla/client/lib:numeric",
+        "//tensorflow/compiler/xla/client/lib:prng",
         "//tensorflow/compiler/xla/client/xla_client:xla_builder",
         "//tensorflow/core:framework",
         "//tensorflow/core:image_ops_op_lib",
diff --git a/tensorflow/compiler/tf2xla/kernels/stateless_random_ops.cc b/tensorflow/compiler/tf2xla/kernels/stateless_random_ops.cc
index a6f5769..cc4b13d 100644
--- a/tensorflow/compiler/tf2xla/kernels/stateless_random_ops.cc
+++ b/tensorflow/compiler/tf2xla/kernels/stateless_random_ops.cc
@@ -23,6 +23,7 @@
 #include "tensorflow/compiler/xla/client/lib/constants.h"
 #include "tensorflow/compiler/xla/client/lib/math.h"
 #include "tensorflow/compiler/xla/client/lib/numeric.h"
+#include "tensorflow/compiler/xla/client/lib/prng.h"
 #include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
 #include "tensorflow/core/framework/op_kernel.h"
 #include "tensorflow/core/framework/tensor.h"
@@ -33,134 +34,6 @@
 namespace tensorflow {
 namespace {
 
-// Rotates a 32-bit integer 'v' left by 'distance' bits.
-xla::XlaOp RotateLeftS32(xla::XlaBuilder* builder, const xla::XlaOp& v,
-                         int distance) {
-  return xla::Or(
-      xla::ShiftLeft(v, xla::ConstantR0<int>(builder, distance)),
-      xla::ShiftRightLogical(v, xla::ConstantR0<int>(builder, 32 - distance)));
-}
-
-using ThreeFry2x32State = std::array<xla::XlaOp, 2>;
-
-// Implements the ThreeFry counter-based PRNG algorithm.
-// Salmon et al. SC 2011. Parallel random numbers: as easy as 1, 2, 3.
-// http://www.thesalmons.org/john/random123/papers/random123sc11.pdf
-ThreeFry2x32State ThreeFry2x32(xla::XlaBuilder* builder,
-                               ThreeFry2x32State input, ThreeFry2x32State key) {
-  // Rotation distances specified by the Threefry2x32 algorithm.
-  constexpr std::array<int, 8> rotations = {13, 15, 26, 6, 17, 29, 16, 24};
-  ThreeFry2x32State x;
-
-  std::array<xla::XlaOp, 3> ks;
-  // 0x1BD11BDA is a parity constant specified by the ThreeFry2x32 algorithm.
-  ks[2] = xla::ConstantR0<int32>(builder, 0x1BD11BDA);
-  for (int i = 0; i < 2; ++i) {
-    ks[i] = key[i];
-    x[i] = input[i];
-    ks[2] = xla::Xor(ks[2], key[i]);
-  }
-
-  x[0] = xla::Add(x[0], ks[0]);
-  x[1] = xla::Add(x[1], ks[1]);
-
-  // Performs a single round of the Threefry2x32 algorithm, with a rotation
-  // amount 'rotation'.
-  auto round = [builder](ThreeFry2x32State v, int rotation) {
-    v[0] = xla::Add(v[0], v[1]);
-    v[1] = RotateLeftS32(builder, v[1], rotation);
-    v[1] = xla::Xor(v[0], v[1]);
-    return v;
-  };
-
-  // There are no known statistical flaws with 13 rounds of Threefry2x32.
-  // We are conservative and use 20 rounds.
-  x = round(x, rotations[0]);
-  x = round(x, rotations[1]);
-  x = round(x, rotations[2]);
-  x = round(x, rotations[3]);
-  x[0] = xla::Add(x[0], ks[1]);
-  x[1] = xla::Add(xla::Add(x[1], ks[2]), xla::ConstantR0<int32>(builder, 1));
-
-  x = round(x, rotations[4]);
-  x = round(x, rotations[5]);
-  x = round(x, rotations[6]);
-  x = round(x, rotations[7]);
-  x[0] = xla::Add(x[0], ks[2]);
-  x[1] = xla::Add(xla::Add(x[1], ks[0]), xla::ConstantR0<int32>(builder, 2));
-
-  x = round(x, rotations[0]);
-  x = round(x, rotations[1]);
-  x = round(x, rotations[2]);
-  x = round(x, rotations[3]);
-  x[0] = xla::Add(x[0], ks[0]);
-  x[1] = xla::Add(xla::Add(x[1], ks[1]), xla::ConstantR0<int32>(builder, 3));
-
-  x = round(x, rotations[4]);
-  x = round(x, rotations[5]);
-  x = round(x, rotations[6]);
-  x = round(x, rotations[7]);
-  x[0] = xla::Add(x[0], ks[1]);
-  x[1] = xla::Add(xla::Add(x[1], ks[2]), xla::ConstantR0<int32>(builder, 4));
-
-  x = round(x, rotations[0]);
-  x = round(x, rotations[1]);
-  x = round(x, rotations[2]);
-  x = round(x, rotations[3]);
-  x[0] = xla::Add(x[0], ks[2]);
-  x[1] = xla::Add(xla::Add(x[1], ks[0]), xla::ConstantR0<int32>(builder, 5));
-
-  return x;
-}
-
-// Returns a tensor of 'shape' random values uniformly distributed in the range
-// [minval, maxval)
-xla::XlaOp RandomUniform(xla::XlaBuilder* builder, const xla::XlaOp& seed,
-                         const TensorShape& shape, double minval,
-                         double maxval) {
-  // Split the seed into two 32-bit scalars to form a key.
-  auto seed0 = xla::Reshape(xla::Slice(seed, {0}, {1}, {1}), {});
-  auto seed1 = xla::Reshape(xla::Slice(seed, {1}, {2}, {1}), {});
-  ThreeFry2x32State key = {seed0, seed1};
-  const int64 size = shape.num_elements();
-
-  const int64 half_size = MathUtil::CeilOfRatio<int64>(size, 2);
-  const bool size_is_odd = (half_size * 2 != size);
-
-  // Fill the generator inputs with unique counter values.
-  ThreeFry2x32State inputs;
-  inputs[0] = xla::Iota(builder, xla::S32, half_size);
-  inputs[1] = xla::Add(inputs[0], xla::ConstantR0<int32>(builder, half_size));
-  ThreeFry2x32State outputs = ThreeFry2x32(builder, inputs, key);
-
-  if (size_is_odd) {
-    outputs[1] = xla::Slice(outputs[1], {0}, {half_size - 1}, {1});
-  }
-
-  auto bits =
-      xla::Reshape(xla::ConcatInDim(builder, outputs, 0), shape.dim_sizes());
-
-  // Form 22 random mantissa bits, with a leading 1 bit. The leading 1 bit
-  // forces the random bits into the mantissa.
-  constexpr int kFloatBits = 32;
-  constexpr int kMantissaBits = 23;
-  bits = xla::Or(
-      xla::ShiftRightLogical(
-          bits, xla::ConstantR0<int32>(builder, kFloatBits - kMantissaBits)),
-      xla::ConstantR0<int32>(builder, bit_cast<int32>(1.0f)));
-  auto floats = xla::BitcastConvertType(bits, xla::F32);
-
-  // We have a floating point number in the range [1.0, 2.0).
-  // Subtract 1.0f to shift to the range [0.0, 1.0)
-  floats = xla::Sub(floats, xla::ConstantR0<float>(builder, 1.0f));
-  // Multiply and add to shift to the range [minval, maxval).
-  floats = xla::Mul(floats, xla::ConstantR0<float>(builder, maxval - minval));
-  floats = xla::Add(floats, xla::ConstantR0<float>(builder, minval));
-  return floats;
-}
-
-}  // namespace
-
 class StatelessRandomUniformOp : public XlaOpKernel {
  public:
   explicit StatelessRandomUniformOp(OpKernelConstruction* ctx)
@@ -177,7 +50,17 @@
                 errors::InvalidArgument("seed must have shape [2], not ",
                                         seed_shape.DebugString()));
     xla::XlaOp seed = ctx->Input(1);
-    ctx->SetOutput(0, RandomUniform(builder, seed, shape, 0.0, 1.0));
+
+    xla::Shape xla_shape;
+    OP_REQUIRES_OK(ctx, TensorShapeToXLAShape(DT_FLOAT, shape, &xla_shape));
+
+    auto seed0 = xla::Reshape(xla::Slice(seed, {0}, {1}, {1}), {});
+    auto seed1 = xla::Reshape(xla::Slice(seed, {1}, {2}, {1}), {});
+
+    auto uniform = xla::StatelessRngUniform(
+        {seed0, seed1}, xla_shape, xla::ConstantR0<float>(builder, 0.0),
+        xla::ConstantR0<float>(builder, 1.0));
+    ctx->SetOutput(0, uniform);
   }
 
  private:
@@ -206,8 +89,16 @@
                                         seed_shape.DebugString()));
     xla::XlaOp seed = ctx->Input(1);
     xla::XlaBuilder* builder = ctx->builder();
-    auto uniform =
-        RandomUniform(builder, seed, shape, std::nextafter(-1.0f, 0.0f), 1.0);
+    xla::Shape xla_shape;
+    OP_REQUIRES_OK(ctx, TensorShapeToXLAShape(DT_FLOAT, shape, &xla_shape));
+
+    auto seed0 = xla::Reshape(xla::Slice(seed, {0}, {1}, {1}), {});
+    auto seed1 = xla::Reshape(xla::Slice(seed, {1}, {2}, {1}), {});
+
+    auto uniform = xla::StatelessRngUniform(
+        {seed0, seed1}, xla_shape,
+        xla::ConstantR0<float>(builder, std::nextafter(-1.0f, 0.0f)),
+        xla::ConstantR0<float>(builder, 1.0));
     // Convert uniform distribution to normal distribution by computing
     // sqrt(2) * erfinv(x)
     auto normal =
@@ -240,10 +131,18 @@
                 errors::InvalidArgument("seed must have shape [2], not ",
                                         seed_shape.DebugString()));
     xla::XlaOp seed = ctx->Input(1);
-    xla::XlaBuilder* b = ctx->builder();
+    xla::XlaBuilder* builder = ctx->builder();
 
-    auto uniform =
-        RandomUniform(b, seed, shape, std::numeric_limits<float>::min(), 1.0);
+    auto seed0 = xla::Reshape(xla::Slice(seed, {0}, {1}, {1}), {});
+    auto seed1 = xla::Reshape(xla::Slice(seed, {1}, {2}, {1}), {});
+
+    xla::Shape xla_shape;
+    OP_REQUIRES_OK(ctx, TensorShapeToXLAShape(DT_FLOAT, shape, &xla_shape));
+    auto uniform = xla::StatelessRngUniform(
+        {seed0, seed1}, xla_shape,
+        xla::ConstantR0<float>(builder, std::numeric_limits<float>::min()),
+        xla::ConstantR0<float>(builder, 1.0));
+
     ctx->SetOutput(0, TruncatedNormal(uniform));
   }
 
@@ -257,4 +156,5 @@
                     .TypeConstraint("Tseed", DT_INT32),
                 StatelessTruncatedNormalOp);
 
+}  // namespace
 }  // namespace tensorflow
diff --git a/tensorflow/compiler/tf2xla/kernels/unary_ops.cc b/tensorflow/compiler/tf2xla/kernels/unary_ops.cc
index 116a020..4bb31f4 100644
--- a/tensorflow/compiler/tf2xla/kernels/unary_ops.cc
+++ b/tensorflow/compiler/tf2xla/kernels/unary_ops.cc
@@ -189,5 +189,51 @@
 };
 REGISTER_XLA_OP(Name("Erfc"), ErfcOp);
 
+class LgammaOp : public XlaOpKernel {
+ public:
+  explicit LgammaOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {}
+  // Calculate lgamma using the Lanczos approximation
+  // (https://en.wikipedia.org/wiki/Lanczos_approximation).
+  void Compile(XlaOpKernelContext* ctx) override {
+    xla::XlaOp input = ctx->Input(0);
+    xla::PrimitiveType input_type = ctx->input_xla_type(0);
+
+    if (input_type == xla::F16 || input_type == xla::BF16) {
+      // The approximation works better with at least 32-bits of accuracy.
+      xla::XlaOp input_f32 = xla::ConvertElementType(input, xla::F32);
+      xla::XlaOp result_f32 = xla::Lgamma(input_f32);
+      xla::XlaOp result_x16 = xla::ConvertElementType(result_f32, input_type);
+      ctx->SetOutput(0, result_x16);
+    } else {
+      xla::XlaOp result = xla::Lgamma(input);
+      ctx->SetOutput(0, result);
+    }
+  }
+};  // namespace
+REGISTER_XLA_OP(Name("Lgamma"), LgammaOp);
+
+class DigammaOp : public XlaOpKernel {
+ public:
+  explicit DigammaOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {}
+  // Calculate lgamma using the Lanczos approximation
+  // (https://en.wikipedia.org/wiki/Lanczos_approximation).
+  void Compile(XlaOpKernelContext* ctx) override {
+    xla::XlaOp input = ctx->Input(0);
+    xla::PrimitiveType input_type = ctx->input_xla_type(0);
+
+    if (input_type == xla::F16 || input_type == xla::BF16) {
+      // The approximation works better with at least 32-bits of accuracy.
+      xla::XlaOp input_f32 = xla::ConvertElementType(input, xla::F32);
+      xla::XlaOp result_f32 = xla::Digamma(input_f32);
+      xla::XlaOp result_x16 = xla::ConvertElementType(result_f32, input_type);
+      ctx->SetOutput(0, result_x16);
+    } else {
+      xla::XlaOp result = xla::Digamma(input);
+      ctx->SetOutput(0, result);
+    }
+  }
+};  // namespace
+REGISTER_XLA_OP(Name("Digamma"), DigammaOp);
+
 }  // namespace
 }  // namespace tensorflow
diff --git a/tensorflow/compiler/tf2xla/lib/BUILD b/tensorflow/compiler/tf2xla/lib/BUILD
index becc8b8..30039e2 100644
--- a/tensorflow/compiler/tf2xla/lib/BUILD
+++ b/tensorflow/compiler/tf2xla/lib/BUILD
@@ -120,6 +120,7 @@
         "//tensorflow/compiler/xla:statusor",
         "//tensorflow/compiler/xla:util",
         "//tensorflow/compiler/xla/client/lib:constants",
+        "//tensorflow/compiler/xla/client/lib:numeric",
         "//tensorflow/compiler/xla/client/xla_client:xla_builder",
         "//tensorflow/compiler/xla/client/xla_client:xla_computation",
         "//tensorflow/core:lib",
diff --git a/tensorflow/compiler/tf2xla/lib/triangular_solve.cc b/tensorflow/compiler/tf2xla/lib/triangular_solve.cc
index ce0f28d..e405f8d 100644
--- a/tensorflow/compiler/tf2xla/lib/triangular_solve.cc
+++ b/tensorflow/compiler/tf2xla/lib/triangular_solve.cc
@@ -21,6 +21,7 @@
 #include "tensorflow/compiler/tf2xla/lib/batch_dot.h"
 #include "tensorflow/compiler/tf2xla/lib/util.h"
 #include "tensorflow/compiler/xla/client/lib/constants.h"
+#include "tensorflow/compiler/xla/client/lib/numeric.h"
 #include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
 #include "tensorflow/compiler/xla/literal.h"
 #include "tensorflow/compiler/xla/shape_util.h"
@@ -150,40 +151,38 @@
         int64 k = std::min(block_size, n - i);
 
         // output[..., :, i:i+k] = triangular_solve(
-        //     a[..., i:i+k, i:i+k], b[..., :, i:i+k], ..., block_size=1)
+        //     a[..., i:i+k, i:i+k],
+        //     b[..., :, i:i+k] - np.matmul(output[..., :, :i],
+        //                                  a[..., :i, i:i+k]),
+        //     ..., block_size=1)
         auto a_slice = SliceInMinorDims(a, {i, i}, {i + k, i + k});
         auto b_slice = SliceInMinorDims(b, {0, i}, {m, i + k});
+
+        // Note that we multiply with the full output, since this is faster
+        // than slicing, and output[..., :, i:] = 0
+        xla::XlaOp a_prev;
+        if (lower) {
+          a_prev = SliceInMinorDims(a, {i, 0}, {i + k, n});
+        } else {
+          a_prev = SliceInMinorDims(a, {0, i}, {n, i + k});
+        }
+        auto prev_contribution = BatchDot(output, a_prev,
+                                          /*transpose_x=*/false,
+                                          /*transpose_y=*/transpose_a,
+                                          /*conjugate_x=*/false,
+                                          /*conjugate_y=*/conjugate_a);
+        auto to_solve = b_slice - prev_contribution;
+
         xla::XlaOp update;
         if (k > 1) {
           TF_ASSIGN_OR_RETURN(xla::XlaComputation * solve,
                               get_base_triangular_solve(k));
-          update = xla::Call(builder, *solve, {a_slice, b_slice});
+          update = xla::Call(builder, *solve, {a_slice, to_solve});
         } else {
           auto a_slice_conj = MaybeConjugate(a_slice, conjugate_a);
-          update = b_slice / a_slice_conj;
+          update = to_solve / a_slice_conj;
         }
         output = UpdateSliceInMinorDims(output, update, {0, i});
-
-        // if i + k < a.shape[-1]:
-        //   a_slice_2 = a[..., i+k:, i:i+k] if lower else a[..., i:i+k, i+k:]
-        //   a_slice_2 = T(a_slice_2) if transpose_a else a_slice_2
-        //   b[..., :, i+k:] -= np.matmul(output[..., :, i:i+k], a_slice_2)
-        if (i + k < n) {
-          xla::XlaOp a_slice_2;
-          if (lower) {
-            a_slice_2 = SliceInMinorDims(a, {i + k, i}, {n, i + k});
-          } else {
-            a_slice_2 = SliceInMinorDims(a, {i, i + k}, {i + k, n});
-          }
-
-          auto b_update = BatchDot(update, a_slice_2,
-                                   /*transpose_x=*/false,
-                                   /*transpose_y=*/transpose_a,
-                                   /*conjugate_x=*/false,
-                                   /*conjugate_y=*/conjugate_a);
-          auto b_slice_2 = SliceInMinorDims(b, {0, i + k}, {m, n});
-          b = UpdateSliceInMinorDims(b, b_slice_2 - b_update, {0, i + k});
-        }
       }
 
     } else if (left_side && lower != transpose_a) {
@@ -192,40 +191,36 @@
         int64 k = std::min(block_size, m - i);
 
         // output[..., i:i+k, :] = triangular_solve(
-        //     a[..., i:i+k, i:i+k], b[..., i:i+k, :], ..., block_size=1)
+        //     a[..., i:i+k, i:i+k],
+        //     b[..., i:i+k, :] - np.matmul(a[..., i:i+k, :i],
+        //                                  output[..., :i, :]),
+        //     ..., block_size=1)
         auto a_slice = SliceInMinorDims(a, {i, i}, {i + k, i + k});
         auto b_slice = SliceInMinorDims(b, {i, 0}, {i + k, n});
+
+        xla::XlaOp a_prev;
+        if (lower) {
+          a_prev = SliceInMinorDims(a, {i, 0}, {i + k, m});
+        } else {
+          a_prev = SliceInMinorDims(a, {0, i}, {m, i + k});
+        }
+        auto prev_contribution = BatchDot(a_prev, output,
+                                          /*transpose_x=*/transpose_a,
+                                          /*transpose_y=*/false,
+                                          /*conjugate_x=*/conjugate_a,
+                                          /*conjugate_y=*/false);
+        auto to_solve = b_slice - prev_contribution;
+
         xla::XlaOp update;
         if (k > 1) {
           TF_ASSIGN_OR_RETURN(xla::XlaComputation * solve,
                               get_base_triangular_solve(k));
-          update = xla::Call(builder, *solve, {a_slice, b_slice});
+          update = xla::Call(builder, *solve, {a_slice, to_solve});
         } else {
           auto a_slice_conj = MaybeConjugate(a_slice, conjugate_a);
-          update = b_slice / a_slice_conj;
+          update = to_solve / a_slice_conj;
         }
         output = UpdateSliceInMinorDims(output, update, {i, 0});
-
-        // if i + k < a.shape[-1]:
-        //   a_slice_2 = a[..., i+k:, i:i+k] if lower else a[..., i:i+k, i+k:]
-        //   a_slice_2 = T(a_slice_2) if transpose_a else a_slice_2
-        //   b[..., i+k:, :] -= np.matmul(a_slice_2, output[..., i:i+k, :])
-        if (i + k < m) {
-          xla::XlaOp a_slice_2;
-          if (lower) {
-            a_slice_2 = SliceInMinorDims(a, {i + k, i}, {m, i + k});
-          } else {
-            a_slice_2 = SliceInMinorDims(a, {i, i + k}, {i + k, m});
-          }
-
-          auto b_update = BatchDot(a_slice_2, update,
-                                   /*transpose_x=*/transpose_a,
-                                   /*transpose_y=*/false,
-                                   /*conjugate_x=*/conjugate_a,
-                                   /*conjugate_y=*/false);
-          auto b_slice_2 = SliceInMinorDims(b, {i + k, 0}, {m, n});
-          b = UpdateSliceInMinorDims(b, b_slice_2 - b_update, {i + k, 0});
-        }
       }
     } else if (!left_side && lower != transpose_a) {
       // for i in reversed(range(0, a.shape[-1], block_size)):
@@ -234,41 +229,37 @@
       for (int64 i = last_blk_ix; i >= 0; i -= block_size) {
         int64 k = std::min(block_size, n - i);
 
-        // output[..., :, i:i+k] triangular_solve(
-        //     a[..., i:i+k, i:i+k], b[..., :, i:i+k], ..., block_size=1)
+        // output[..., :, i:i+k] = triangular_solve(
+        //     a[..., i:i+k, i:i+k],
+        //     b[..., :, i:i+k] - np.matmul(output[..., :, :i],
+        //                                  a[..., :i, i:i+k]),\
+        //     ..., block_size=1)
         auto a_slice = SliceInMinorDims(a, {i, i}, {i + k, i + k});
         auto b_slice = SliceInMinorDims(b, {0, i}, {m, i + k});
+
+        xla::XlaOp a_prev;
+        if (lower) {
+          a_prev = SliceInMinorDims(a, {0, i}, {n, i + k});
+        } else {
+          a_prev = SliceInMinorDims(a, {i, 0}, {i + k, n});
+        }
+        auto prev_contribution = BatchDot(output, a_prev,
+                                          /*transpose_x=*/false,
+                                          /*transpose_y=*/transpose_a,
+                                          /*conjugate_x=*/false,
+                                          /*conjugate_y=*/conjugate_a);
+        auto to_solve = b_slice - prev_contribution;
+
         xla::XlaOp update;
         if (k > 1) {
           TF_ASSIGN_OR_RETURN(xla::XlaComputation * solve,
                               get_base_triangular_solve(k));
-          update = xla::Call(builder, *solve, {a_slice, b_slice});
+          update = xla::Call(builder, *solve, {a_slice, to_solve});
         } else {
           auto a_slice_conj = MaybeConjugate(a_slice, conjugate_a);
-          update = b_slice / a_slice_conj;
+          update = to_solve / a_slice_conj;
         }
         output = UpdateSliceInMinorDims(output, update, {0, i});
-
-        // if i - k >= 0:
-        //   a_slice_2 = a[..., i:i+k, :i] if lower else a[..., :i, i:i+k]
-        //   a_slice_2 = T(a_slice_2) if transpose_a else a_slice_2
-        //   b[..., :, :i] -= np.matmul(out[..., :, i:i+k], a_slice_2)
-        if (i - k >= 0) {
-          xla::XlaOp a_slice_2;
-          if (lower) {
-            a_slice_2 = SliceInMinorDims(a, {i, 0}, {i + k, i});
-          } else {
-            a_slice_2 = SliceInMinorDims(a, {0, i}, {i, i + k});
-          }
-
-          auto b_update = BatchDot(update, a_slice_2,
-                                   /*transpose_x=*/false,
-                                   /*transpose_y=*/transpose_a,
-                                   /*conjugate_x=*/false,
-                                   /*conjugate_y=*/conjugate_a);
-          auto b_slice_2 = SliceInMinorDims(b, {0, 0}, {m, i});
-          b = UpdateSliceInMinorDims(b, b_slice_2 - b_update, {0, 0});
-        }
       }
     } else {  // left_side && lower == transpose_a
       // for i in reversed(range(0, a.shape[-1], block_size)):
@@ -277,41 +268,37 @@
       for (int64 i = last_blk_ix; i >= 0; i -= block_size) {
         int64 k = std::min(block_size, m - i);
 
-        // output[..., i:i+k, :] triangular_solve(
-        //     a[..., i:i+k, i:i+k], b[..., i:i+k, :], ..., block_size=1)
+        // output[..., i:i+k, :] = triangular_solve(
+        //     a[..., i:i+k, i:i+k],
+        //     b[..., i:i+k, :] - np.matmul(a[..., i:i+k, :i],
+        //                                  output[..., :i, :]),
+        //     ..., block_size=1)
         auto a_slice = SliceInMinorDims(a, {i, i}, {i + k, i + k});
         auto b_slice = SliceInMinorDims(b, {i, 0}, {i + k, n});
+
+        xla::XlaOp a_prev;
+        if (lower) {
+          a_prev = SliceInMinorDims(a, {0, i}, {m, i + k});
+        } else {
+          a_prev = SliceInMinorDims(a, {i, 0}, {i + k, m});
+        }
+        auto prev_contribution = BatchDot(a_prev, output,
+                                          /*transpose_x=*/transpose_a,
+                                          /*transpose_y=*/false,
+                                          /*conjugate_x=*/conjugate_a,
+                                          /*conjugate_y=*/false);
+        auto to_solve = b_slice - prev_contribution;
+
         xla::XlaOp update;
         if (k > 1) {
           TF_ASSIGN_OR_RETURN(xla::XlaComputation * solve,
                               get_base_triangular_solve(k));
-          update = xla::Call(builder, *solve, {a_slice, b_slice});
+          update = xla::Call(builder, *solve, {a_slice, to_solve});
         } else {
           auto a_slice_conj = MaybeConjugate(a_slice, conjugate_a);
-          update = b_slice / a_slice_conj;
+          update = to_solve / a_slice_conj;
         }
         output = UpdateSliceInMinorDims(output, update, {i, 0});
-
-        // if i - k >= 0:
-        //   a_slice_2 = a[..., i:i+k, :i] if lower else a[..., :i, i:i+k]
-        //   a_slice_2 = T(a_slice_2) if transpose_a else a_slice_2
-        //   b[..., :i, :] -= np.matmul(a_slice_2, out[..., i:i+k, :])
-        if (i - k >= 0) {
-          xla::XlaOp a_slice_2;
-          if (lower) {
-            a_slice_2 = SliceInMinorDims(a, {i, 0}, {i + k, i});
-          } else {
-            a_slice_2 = SliceInMinorDims(a, {0, i}, {i, i + k});
-          }
-
-          auto b_update = BatchDot(a_slice_2, update,
-                                   /*transpose_x=*/transpose_a,
-                                   /*transpose_y=*/false,
-                                   /*conjugate_x=*/conjugate_a,
-                                   /*conjugate_y=*/false);
-          auto b_slice_2 = SliceInMinorDims(b, {0, 0}, {i, n});
-          b = UpdateSliceInMinorDims(b, b_slice_2 - b_update, {0, 0});
-        }
       }
     }
 
@@ -330,9 +317,24 @@
     const int64 ndims = xla::ShapeUtil::Rank(a_shape);
 
     std::vector<int64> batch_dimensions;
+    int64 num_batches = 1;
     for (int i = 0; i < ndims - 2; ++i) {
       int64 a_size = a_shape.dimensions(i);
       batch_dimensions.push_back(a_size);
+      num_batches = num_batches * a_size;
+    }
+
+    // Rescale the input to be unit triangular
+    auto diag = Diagonal(a);
+    xla::XlaOp scaled_a;
+    std::vector<int64> broadcast_dimensions(ndims - 1);
+    std::iota(broadcast_dimensions.begin(), broadcast_dimensions.end(), 0);
+    if (transpose_a) {
+      scaled_a = Div(a, diag, broadcast_dimensions);
+    } else {
+      // Broadcast over the rows
+      broadcast_dimensions[ndims - 2] = ndims - 1;
+      scaled_a = Div(a, diag, broadcast_dimensions);
     }
 
     // The main computation is performed in a While loop.
@@ -346,7 +348,7 @@
     xla::XlaOp output = xla::ZerosLike(b);
     {
       auto i = transpose_a ? m - 1 : 0;
-      auto a_slice = SliceInMinorDims(a, {i, i}, {i + 1, i + 1});
+      auto a_slice = SliceInMinorDims(scaled_a, {i, i}, {i + 1, i + 1});
       auto b_slice = SliceInMinorDims(b, {i, 0}, {i + 1, n});
       auto a_slice_conj = MaybeConjugate(a_slice, conjugate_a);
       auto update = b_slice / a_slice_conj;
@@ -369,7 +371,7 @@
         b_shape};
     xla::Shape tuple_shape = xla::ShapeUtil::MakeTupleShape(tuple_shapes);
     auto init_i = xla::ConstantR0<int32>(builder, transpose_a ? m - 2 : 1);
-    auto init = xla::Tuple(builder, {init_i, output, a, b});
+    auto init = xla::Tuple(builder, {init_i, output, scaled_a, b});
 
     // Construct the loop condition function,
     // def cond_fun(loop_carry):
@@ -445,11 +447,8 @@
           DynamicSliceInMinorDims(body_b, {i, zero}, {1, n});
       auto result_row = result_row_slice - b_update;
 
-      // body_out[..., i:i+1, :] = result_row / a[..., i:i+1, i:i+1]
-      auto a_elt = DynamicSliceInMinorDims(body_a, {i, i}, {1, 1});
-      auto a_elt_conj = MaybeConjugate(a_elt, conjugate_a);
-      auto div_result = xla::Div(result_row, a_elt_conj);
-      body_out = DynamicUpdateSliceInMinorDims(body_out, div_result, {i, zero});
+      // body_out[..., i:i+1, :] = result_row
+      body_out = DynamicUpdateSliceInMinorDims(body_out, result_row, {i, zero});
 
       // if transpose_a:
       //   return (i - 1, body_out, a, b)
@@ -464,7 +463,11 @@
     // Construct the While loop and return the result,
     // return while_loop(cond_fun, body_fun, init)[1]
     auto triangular_solve_left_looking_while = xla::While(cond, body, init);
-    return xla::GetTupleElement(triangular_solve_left_looking_while, 1);
+    output = xla::GetTupleElement(triangular_solve_left_looking_while, 1);
+    auto scaling = MaybeConjugate(diag, conjugate_a);
+    // Broadcast over the columns
+    broadcast_dimensions[ndims - 2] = ndims - 2;
+    return Div(output, scaling, broadcast_dimensions);
   });
 }
 
@@ -479,9 +482,24 @@
     const int64 ndims = xla::ShapeUtil::Rank(a_shape);
 
     std::vector<int64> batch_dimensions;
+    int64 num_batches = 1;
     for (int i = 0; i < ndims - 2; ++i) {
       int64 a_size = a_shape.dimensions(i);
       batch_dimensions.push_back(a_size);
+      num_batches = num_batches * a_size;
+    }
+
+    // Rescale the input to be unit triangular
+    auto diag = Diagonal(a);
+    xla::XlaOp scaled_a;
+    std::vector<int64> broadcast_dimensions(ndims - 1);
+    std::iota(broadcast_dimensions.begin(), broadcast_dimensions.end(), 0);
+    if (transpose_a) {
+      // Broadcast over the rows
+      broadcast_dimensions[ndims - 2] = ndims - 1;
+      scaled_a = Div(a, diag, broadcast_dimensions);
+    } else {
+      scaled_a = Div(a, diag, broadcast_dimensions);
     }
 
     // The main computation is performed in a While loop.
@@ -503,7 +521,7 @@
         b_shape};
     xla::Shape tuple_shape = xla::ShapeUtil::MakeTupleShape(tuple_shapes);
     auto init_i = xla::ConstantR0<int32>(builder, transpose_a ? 0 : n - 1);
-    auto init = xla::Tuple(builder, {init_i, output, a, b});
+    auto init = xla::Tuple(builder, {init_i, output, scaled_a, b});
 
     // Construct the loop condition function,
     // def cond_fun(loop_carry):
@@ -568,11 +586,8 @@
                                               /*conjugate_x=*/false,
                                               /*conjugate_y=*/conjugate_a);
 
-      // body_out[..., :, i:i+1] = b_update / a[..., i:i+1, i:i+1]
-      auto a_ii = DynamicSliceInMinorDims(body_a, {i, i}, {1, 1});
-      auto a_ii_conj = MaybeConjugate(a_ii, conjugate_a);
-      body_out = DynamicUpdateSliceInMinorDims(body_out, b_update / a_ii_conj,
-                                               {zero, i});
+      // body_out[..., :, i:i+1] = b_update
+      body_out = DynamicUpdateSliceInMinorDims(body_out, b_update, {zero, i});
 
       // if transpose_a:
       //   return (i + 1, body_out, a, b)
@@ -587,7 +602,11 @@
     // Construct the While loop and return the result,
     // return while_loop(cond_fun, body_fun, init)[1]
     auto triangular_solve_left_looking_while = xla::While(cond, body, init);
-    return xla::GetTupleElement(triangular_solve_left_looking_while, 1);
+    output = xla::GetTupleElement(triangular_solve_left_looking_while, 1);
+    auto scaling = MaybeConjugate(diag, conjugate_a);
+    // Broadcast over the rows
+    broadcast_dimensions[ndims - 2] = ndims - 1;
+    return Div(output, scaling, broadcast_dimensions);
   });
 }
 
diff --git a/tensorflow/compiler/tf2xla/lib/triangular_solve.h b/tensorflow/compiler/tf2xla/lib/triangular_solve.h
index 80c2bc4..7eb9238 100644
--- a/tensorflow/compiler/tf2xla/lib/triangular_solve.h
+++ b/tensorflow/compiler/tf2xla/lib/triangular_solve.h
@@ -59,7 +59,7 @@
 // blocking is used.
 xla::XlaOp TriangularSolve(xla::XlaOp a, xla::XlaOp b, bool left_side,
                            bool lower, bool transpose_a, bool conjugate_a,
-                           int64 block_size = 256);
+                           int64 block_size = 128);
 
 xla::XlaOp TriangularSolveLeftLooking(xla::XlaOp a, xla::XlaOp b,
                                       bool transpose_a, bool conjugate_a);
diff --git a/tensorflow/compiler/tf2xla/xla_op_registry.h b/tensorflow/compiler/tf2xla/xla_op_registry.h
index 2d4593e..fc14834 100644
--- a/tensorflow/compiler/tf2xla/xla_op_registry.h
+++ b/tensorflow/compiler/tf2xla/xla_op_registry.h
@@ -279,7 +279,7 @@
 
 #define REGISTER_XLA_OP_UNIQ(CTR, BUILDER, OP)                                 \
   static ::tensorflow::XlaOpRegistrar xla_op_registrar__body__##CTR##__object( \
-      XlaOpRegistrationBuilder::BUILDER.Build(                                 \
+      ::tensorflow::XlaOpRegistrationBuilder::BUILDER.Build(                   \
           [](::tensorflow::OpKernelConstruction* context)                      \
               -> ::tensorflow::OpKernel* { return new OP(context); }));
 
diff --git a/tensorflow/compiler/xla/client/lib/BUILD b/tensorflow/compiler/xla/client/lib/BUILD
index 6933e9a..77ba474 100644
--- a/tensorflow/compiler/xla/client/lib/BUILD
+++ b/tensorflow/compiler/xla/client/lib/BUILD
@@ -97,9 +97,12 @@
     srcs = ["numeric.cc"],
     hdrs = ["numeric.h"],
     deps = [
+        ":arithmetic",
+        ":constants",
         "//tensorflow/compiler/xla:types",
         "//tensorflow/compiler/xla:xla_data_proto",
         "//tensorflow/compiler/xla/client/xla_client:xla_builder",
+        "//tensorflow/core:lib",
     ],
 )
 
@@ -119,6 +122,21 @@
 )
 
 cc_library(
+    name = "prng",
+    srcs = ["prng.cc"],
+    hdrs = ["prng.h"],
+    deps = [
+        ":constants",
+        ":math",
+        ":numeric",
+        "//tensorflow/compiler/xla:util",
+        "//tensorflow/compiler/xla:xla_data_proto",
+        "//tensorflow/compiler/xla/client/xla_client:xla_builder",
+        "//tensorflow/core:lib",
+    ],
+)
+
+cc_library(
     name = "testing",
     srcs = ["testing.cc"],
     hdrs = ["testing.h"],
diff --git a/tensorflow/compiler/xla/client/lib/math.cc b/tensorflow/compiler/xla/client/lib/math.cc
index 5587559..2a7ac1d 100644
--- a/tensorflow/compiler/xla/client/lib/math.cc
+++ b/tensorflow/compiler/xla/client/lib/math.cc
@@ -149,4 +149,124 @@
   });
 }
 
+namespace {
+// Coefficients for the Lanczos approximation of the gamma function. The
+// coefficients are uniquely determined by the choice of g and n (kLanczosGamma
+// and kLanczosCoefficients.size() + 1). The coefficients below correspond to
+// [7, 9]. [5, 7], [7, 9], [9, 10], and [607/128.0, 15] were evaluated and [7,
+// 9] seemed to be the least sensitive to the quality of the log function. In
+// particular, [5, 7] is the only choice where -1.5e-5 <= lgamma(2) <= 1.5e-5
+// for a particularly inaccurate log function.
+static constexpr double kLanczosGamma = 7;  // aka g
+static constexpr double kBaseLanczosCoeff = 0.99999999999980993227684700473478;
+static constexpr std::array<double, 8> kLanczosCoefficients = {
+    676.520368121885098567009190444019, -1259.13921672240287047156078755283,
+    771.3234287776530788486528258894,   -176.61502916214059906584551354,
+    12.507343278686904814458936853,     -0.13857109526572011689554707,
+    9.984369578019570859563e-6,         1.50563273514931155834e-7};
+}  // namespace
+
+// Compute the Lgamma function using Lanczos' approximation from "A Precision
+// Approximation of the Gamma Function". SIAM Journal on Numerical Analysis
+// series B. Vol. 1:
+// lgamma(z + 1) = (log(2) + log(pi)) / 2 + (z + 1/2) * log(t(z)) - t(z) + A(z)
+// t(z) = z + kLanczosGamma + 1/2
+// A(z) = kBaseLanczosCoeff + sigma(k = 1, n, kLanczosCoefficients[i] / (z + k))
+xla::XlaOp Lgamma(xla::XlaOp input) {
+  xla::XlaOp one_half = xla::ScalarLike(input, 0.5);
+  xla::XlaOp one = xla::ScalarLike(input, 1);
+
+  xla::XlaOp pi = xla::ScalarLike(input, M_PI);
+  xla::XlaOp log_pi = xla::ScalarLike(input, std::log(M_PI));
+  xla::XlaOp log_sqrt_two_pi =
+      xla::ScalarLike(input, (std::log(2) + std::log(M_PI)) / 2);
+
+  xla::XlaOp lanczos_gamma_plus_one_half =
+      xla::ScalarLike(input, kLanczosGamma + 0.5);
+  xla::XlaOp log_lanczos_gamma_plus_one_half =
+      xla::ScalarLike(input, std::log(kLanczosGamma + 0.5));
+
+  xla::XlaOp base_lanczos_coeff = xla::ScalarLike(input, kBaseLanczosCoeff);
+
+  // If the input is less than 0.5 use Gauss's reflection formula:
+  // gamma(x) = pi / sin(pi * x) * gamma(1 - x)
+  xla::XlaOp need_to_reflect = xla::Lt(xla::Real(input), one_half);
+  xla::XlaOp z = xla::Select(need_to_reflect, -input, input - one);
+
+  xla::XlaOp x = base_lanczos_coeff;
+  for (int i = 0; i < kLanczosCoefficients.size(); ++i) {
+    xla::XlaOp lanczos_coefficient =
+        xla::ScalarLike(input, kLanczosCoefficients[i]);
+    xla::XlaOp index = xla::ScalarLike(input, i);
+    x = x + lanczos_coefficient / (z + index + one);
+  }
+
+  // To improve accuracy on platforms with less-precise log implementations,
+  // compute log(lanczos_gamma_plus_one_half) at compile time and use log1p on
+  // the device.
+  // log(t) = log(kLanczosGamma + 0.5 + z)
+  //        = log(kLanczosGamma + 0.5) + log1p(z / (kLanczosGamma + 0.5))
+  xla::XlaOp t = lanczos_gamma_plus_one_half + z;
+  xla::XlaOp log_t = log_lanczos_gamma_plus_one_half +
+                     xla::Log1p(z / lanczos_gamma_plus_one_half);
+
+  xla::XlaOp log_y = log_sqrt_two_pi + (z + one_half) * log_t - t + xla::Log(x);
+
+  xla::XlaOp reflection = log_pi - xla::Log(xla::Sin(pi * input)) - log_y;
+  xla::XlaOp result = xla::Select(need_to_reflect, reflection, log_y);
+  return result;
+}
+
+// Compute the Digamma function using Lanczos' approximation from "A Precision
+// Approximation of the Gamma Function". SIAM Journal on Numerical Analysis
+// series B. Vol. 1:
+// digamma(z + 1) = log(t(z)) + A'(z) / A(z) - kLanczosGamma / t(z)
+// t(z) = z + kLanczosGamma + 1/2
+// A(z) = kBaseLanczosCoeff + sigma(k = 1, n, kLanczosCoefficients[i] / (z + k))
+// A'(z) = sigma(k = 1, n, kLanczosCoefficients[i] / (z + k) / (z + k))
+xla::XlaOp Digamma(xla::XlaOp input) {
+  xla::XlaOp zero = xla::ScalarLike(input, 0);
+  xla::XlaOp one_half = xla::ScalarLike(input, 0.5);
+  xla::XlaOp one = xla::ScalarLike(input, 1);
+
+  xla::XlaOp pi = xla::ScalarLike(input, M_PI);
+
+  xla::XlaOp lanczos_gamma = xla::ScalarLike(input, kLanczosGamma);
+  xla::XlaOp lanczos_gamma_plus_one_half =
+      xla::ScalarLike(input, kLanczosGamma + 0.5);
+  xla::XlaOp log_lanczos_gamma_plus_one_half =
+      xla::ScalarLike(input, std::log(kLanczosGamma + 0.5));
+
+  xla::XlaOp base_lanczos_coeff = xla::ScalarLike(input, kBaseLanczosCoeff);
+
+  // If the input is less than 0.5 use Gauss's reflection formula:
+  // digamma(x) = digamma(1 - x) - pi * cot(pi * x)
+  xla::XlaOp need_to_reflect = xla::Lt(xla::Real(input), one_half);
+  xla::XlaOp z = xla::Select(need_to_reflect, -input, input - one);
+
+  xla::XlaOp num = zero;
+  xla::XlaOp denom = base_lanczos_coeff;
+  for (int i = 0; i < kLanczosCoefficients.size(); ++i) {
+    xla::XlaOp lanczos_coefficient =
+        xla::ScalarLike(input, kLanczosCoefficients[i]);
+    xla::XlaOp index = xla::ScalarLike(input, i);
+    num = num - lanczos_coefficient / ((z + index + one) * (z + index + one));
+    denom = denom + lanczos_coefficient / (z + index + one);
+  }
+
+  // To improve accuracy on platforms with less-precise log implementations,
+  // compute log(lanczos_gamma_plus_one_half) at compile time and use log1p on
+  // the device.
+  // log(t) = log(kLanczosGamma + 0.5 + z)
+  //        = log(kLanczosGamma + 0.5) + log1p(z / (kLanczosGamma + 0.5))
+  xla::XlaOp t = lanczos_gamma_plus_one_half + z;
+  xla::XlaOp log_t = log_lanczos_gamma_plus_one_half +
+                     xla::Log1p(z / lanczos_gamma_plus_one_half);
+
+  xla::XlaOp y = log_t + num / denom - lanczos_gamma / t;
+  xla::XlaOp reflection = y - pi * xla::Cos(pi * input) / xla::Sin(pi * input);
+  xla::XlaOp result = xla::Select(need_to_reflect, reflection, y);
+  return result;
+}
+
 }  // namespace xla
diff --git a/tensorflow/compiler/xla/client/lib/math.h b/tensorflow/compiler/xla/client/lib/math.h
index e7c8b50..e4c79b5 100644
--- a/tensorflow/compiler/xla/client/lib/math.h
+++ b/tensorflow/compiler/xla/client/lib/math.h
@@ -46,6 +46,12 @@
 // Computes an approximation of the inverse of the error function.
 XlaOp ErfInv(XlaOp x);
 
+// Computes an approximation of the lgamma function.
+XlaOp Lgamma(XlaOp input);
+
+// Computes an approximation of the digamma function.
+XlaOp Digamma(XlaOp input);
+
 }  // namespace xla
 
 #endif  // TENSORFLOW_COMPILER_XLA_CLIENT_LIB_MATH_H_
diff --git a/tensorflow/compiler/xla/client/lib/math_test.cc b/tensorflow/compiler/xla/client/lib/math_test.cc
index 068cd2e..1df287d 100644
--- a/tensorflow/compiler/xla/client/lib/math_test.cc
+++ b/tensorflow/compiler/xla/client/lib/math_test.cc
@@ -82,5 +82,59 @@
   std::vector<float> expected = {4, 1, 32, 0.4, 0.4472, 111.1080};
   ComputeAndCompareR1<float>(&builder, expected, {}, error_spec_);
 }
+
+XLA_TEST_F(MathTest, Lgamma) {
+  XlaBuilder builder(TestName());
+  auto x = ConstantR1<float>(&builder, {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 0.5, 1.5,
+                                        2.5, -1.5, -3.5, -5.5});
+  Lgamma(x);
+
+  std::vector<float> expected = {
+      0,
+      0,
+      static_cast<float>(std::log(2)),
+      static_cast<float>(std::log(6)),
+      static_cast<float>(std::log(24)),
+      static_cast<float>(std::log(120)),
+      static_cast<float>(std::log(M_PI) / 2),
+      static_cast<float>(std::log(M_PI) / 2 - std::log(2)),
+      static_cast<float>(std::log(M_PI) / 2 - std::log(4) + std::log(3)),
+      static_cast<float>(std::log(M_PI) / 2 - std::log(3) + std::log(4)),
+      static_cast<float>(std::log(M_PI) / 2 - std::log(105) + std::log(16)),
+      static_cast<float>(std::log(M_PI) / 2 - std::log(10395) + std::log(64))};
+  error_spec_ = ErrorSpec{0.001};
+  ComputeAndCompareR1<float>(&builder, expected, {}, error_spec_);
+}
+
+XLA_TEST_F(MathTest, Digamma) {
+  XlaBuilder builder(TestName());
+  auto x = ConstantR1<float>(&builder, {1.0, 0.5, 1 / 3.0, 0.25, 1 / 6.0, 0.125,
+                                        2.0, 3.0, 4.0, 6.0, 8.0, 9.0});
+  Digamma(x);
+
+  constexpr double euler_mascheroni =
+      0.57721566490153286060651209008240243104215933593992;
+  std::vector<float> expected = {
+      static_cast<float>(-euler_mascheroni),
+      static_cast<float>(-2 * std::log(2) - euler_mascheroni),
+      static_cast<float>(-M_PI / 2 / std::sqrt(3) - 3 * std::log(3) / 2 -
+                         euler_mascheroni),
+      static_cast<float>(-M_PI / 2 - 3 * std::log(2) - euler_mascheroni),
+      static_cast<float>(-M_PI * std::sqrt(3) / 2 - 2 * std::log(2) -
+                         3 * std::log(3) / 2 - euler_mascheroni),
+      static_cast<float>(
+          -M_PI / 2 - 4 * std::log(2) -
+          (M_PI + std::log(2 + std::sqrt(2)) - std::log(2 - std::sqrt(2))) /
+              std::sqrt(2) -
+          euler_mascheroni),
+      static_cast<float>(1 - euler_mascheroni),
+      static_cast<float>(1.5 - euler_mascheroni),
+      static_cast<float>(11 / 6.0 - euler_mascheroni),
+      static_cast<float>(137 / 60.0 - euler_mascheroni),
+      static_cast<float>(363 / 140.0 - euler_mascheroni),
+      static_cast<float>(761 / 280.0 - euler_mascheroni)};
+  ComputeAndCompareR1<float>(&builder, expected, {}, error_spec_);
+}
+
 }  // namespace
 }  // namespace xla
diff --git a/tensorflow/compiler/xla/client/lib/numeric.cc b/tensorflow/compiler/xla/client/lib/numeric.cc
index fd4e8fc..cdbeb18 100644
--- a/tensorflow/compiler/xla/client/lib/numeric.cc
+++ b/tensorflow/compiler/xla/client/lib/numeric.cc
@@ -13,11 +13,14 @@
 limitations under the License.
 ==============================================================================*/
 
-#include "tensorflow/compiler/xla/client/lib/numeric.h"
-
 #include <numeric>
 #include <vector>
 
+#include "tensorflow/compiler/xla/client/lib/arithmetic.h"
+#include "tensorflow/compiler/xla/client/lib/constants.h"
+#include "tensorflow/compiler/xla/client/lib/numeric.h"
+#include "tensorflow/core/lib/gtl/array_slice.h"
+
 namespace xla {
 
 namespace {
@@ -28,7 +31,7 @@
   for (int64 i = 0; i < size; ++i) {
     values[i] = static_cast<T>(i);
   }
-  return xla::ConstantR1<T>(builder, values);
+  return ConstantR1<T>(builder, values);
 }
 
 }  // namespace
@@ -76,4 +79,26 @@
   return ConvertElementType(indicator, type);
 }
 
+XlaOp Diagonal(XlaOp x) {
+  XlaBuilder* builder = x.builder();
+  return builder->ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
+    TF_ASSIGN_OR_RETURN(Shape shape, builder->GetShape(x));
+    const int64 n_dims = ShapeUtil::Rank(shape);
+    TF_RET_CHECK(n_dims >= 2);
+    const int64 n = shape.dimensions(n_dims - 1);
+    const int64 m = shape.dimensions(n_dims - 2);
+    tensorflow::gtl::ArraySlice<int64> major_dims(
+        AsInt64Slice(shape.dimensions()), /*pos=*/0, /*len=*/n_dims - 2);
+    auto a = Iota(builder, U32, n);
+    auto b = Iota(builder, U32, m);
+    auto indicator = Eq(a, Broadcast(b, {n}), /*broadcast_dimensions=*/{0});
+    auto mask = Broadcast(indicator, major_dims);
+    XlaComputation add =
+        CreateScalarAddComputation(shape.element_type(), builder);
+    auto diag = Reduce(Select(mask, x, Zeros(builder, shape)), ScalarLike(x, 0),
+                       add, {n_dims - 1});
+    return diag;
+  });
+}
+
 }  // namespace xla
diff --git a/tensorflow/compiler/xla/client/lib/numeric.h b/tensorflow/compiler/xla/client/lib/numeric.h
index 7970700..3ec0846 100644
--- a/tensorflow/compiler/xla/client/lib/numeric.h
+++ b/tensorflow/compiler/xla/client/lib/numeric.h
@@ -29,6 +29,9 @@
 // else.
 XlaOp IdentityMatrix(XlaBuilder* builder, PrimitiveType type, int64 m, int64 n);
 
+// Get the diagonals of the last two dimensions.
+XlaOp Diagonal(XlaOp x);
+
 }  // namespace xla
 
 #endif  // TENSORFLOW_COMPILER_XLA_CLIENT_LIB_NUMERIC_H_
diff --git a/tensorflow/compiler/xla/client/lib/prng.cc b/tensorflow/compiler/xla/client/lib/prng.cc
new file mode 100644
index 0000000..299a6ac
--- /dev/null
+++ b/tensorflow/compiler/xla/client/lib/prng.cc
@@ -0,0 +1,150 @@
+/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include <cmath>
+
+#include "tensorflow/compiler/xla/client/lib/constants.h"
+#include "tensorflow/compiler/xla/client/lib/math.h"
+#include "tensorflow/compiler/xla/client/lib/numeric.h"
+#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
+#include "tensorflow/compiler/xla/util.h"
+#include "tensorflow/core/lib/core/casts.h"
+
+namespace xla {
+namespace {
+
+// Rotates a 32-bit integer 'v' left by 'distance' bits.
+XlaOp RotateLeftS32(XlaOp v, int distance) {
+  return (v << ConstantR0<int32>(v.builder(), distance)) |
+         ShiftRightLogical(v, ConstantR0<int32>(v.builder(), 32 - distance));
+}
+
+using ThreeFry2x32State = std::array<XlaOp, 2>;
+
+// Implements the ThreeFry counter-based PRNG algorithm.
+// Salmon et al. SC 2011. Parallel random numbers: as easy as 1, 2, 3.
+// http://www.thesalmons.org/john/random123/papers/random123sc11.pdf
+ThreeFry2x32State ThreeFry2x32(ThreeFry2x32State input, ThreeFry2x32State key) {
+  XlaBuilder* builder = input[0].builder();
+  // Rotation distances specified by the Threefry2x32 algorithm.
+  constexpr std::array<int, 8> rotations = {13, 15, 26, 6, 17, 29, 16, 24};
+  ThreeFry2x32State x;
+
+  std::array<XlaOp, 3> ks;
+  // 0x1BD11BDA is a parity constant specified by the ThreeFry2x32 algorithm.
+  ks[2] = ConstantR0<int32>(builder, 0x1BD11BDA);
+  for (int i = 0; i < 2; ++i) {
+    ks[i] = key[i];
+    x[i] = input[i];
+    ks[2] = ks[2] ^ key[i];
+  }
+
+  x[0] = x[0] + ks[0];
+  x[1] = x[1] + ks[1];
+
+  // Performs a single round of the Threefry2x32 algorithm, with a rotation
+  // amount 'rotation'.
+  auto round = [builder](ThreeFry2x32State v, int rotation) {
+    v[0] = v[0] + v[1];
+    v[1] = RotateLeftS32(v[1], rotation);
+    v[1] = v[0] ^ v[1];
+    return v;
+  };
+
+  // There are no known statistical flaws with 13 rounds of Threefry2x32.
+  // We are conservative and use 20 rounds.
+  x = round(x, rotations[0]);
+  x = round(x, rotations[1]);
+  x = round(x, rotations[2]);
+  x = round(x, rotations[3]);
+  x[0] = x[0] + ks[1];
+  x[1] = x[1] + ks[2] + ConstantR0<int32>(builder, 1);
+
+  x = round(x, rotations[4]);
+  x = round(x, rotations[5]);
+  x = round(x, rotations[6]);
+  x = round(x, rotations[7]);
+  x[0] = x[0] + ks[2];
+  x[1] = x[1] + ks[0] + ConstantR0<int32>(builder, 2);
+
+  x = round(x, rotations[0]);
+  x = round(x, rotations[1]);
+  x = round(x, rotations[2]);
+  x = round(x, rotations[3]);
+  x[0] = x[0] + ks[0];
+  x[1] = x[1] + ks[1] + ConstantR0<int32>(builder, 3);
+
+  x = round(x, rotations[4]);
+  x = round(x, rotations[5]);
+  x = round(x, rotations[6]);
+  x = round(x, rotations[7]);
+  x[0] = x[0] + ks[1];
+  x[1] = x[1] + ks[2] + ConstantR0<int32>(builder, 4);
+
+  x = round(x, rotations[0]);
+  x = round(x, rotations[1]);
+  x = round(x, rotations[2]);
+  x = round(x, rotations[3]);
+  x[0] = x[0] + ks[2];
+  x[1] = x[1] + ks[0] + ConstantR0<int32>(builder, 5);
+
+  return x;
+}
+
+}  // namespace
+
+XlaOp StatelessRngUniform(std::array<XlaOp, 2> seeds, const Shape& shape,
+                          XlaOp minval, XlaOp maxval) {
+  XlaBuilder* builder = seeds[0].builder();
+  if (shape.element_type() != F32) {
+    return builder->ReportError(Unimplemented(
+        "Types other than F32 are not implemented by StatelessRngUniform."));
+  }
+  ThreeFry2x32State key = seeds;
+  const int64 size = ShapeUtil::ElementsIn(shape);
+
+  const int64 half_size = CeilOfRatio<int64>(size, 2);
+  const bool size_is_odd = (half_size * 2 != size);
+
+  // Fill the generator inputs with unique counter values.
+  ThreeFry2x32State inputs;
+  inputs[0] = Iota(builder, S32, half_size);
+  inputs[1] = inputs[0] + ConstantR0<int32>(builder, half_size);
+  ThreeFry2x32State outputs = ThreeFry2x32(inputs, key);
+
+  if (size_is_odd) {
+    outputs[1] = Slice(outputs[1], {0}, {half_size - 1}, {1});
+  }
+
+  auto bits = Reshape(ConcatInDim(builder, outputs, 0),
+                      AsInt64Slice(shape.dimensions()));
+
+  // Form 23 random mantissa bits, with a leading 1 bit. The leading 1 bit
+  // forces the random bits into the mantissa.
+  constexpr int kFloatBits = 32;
+  constexpr int kMantissaBits = 23;
+  bits = ShiftRightLogical(
+             bits, ConstantR0<int32>(builder, kFloatBits - kMantissaBits)) |
+         ConstantR0<int32>(builder, tensorflow::bit_cast<int32>(1.0f));
+  auto floats = BitcastConvertType(bits, F32);
+
+  // We have a floating point number in the range [1.0, 2.0).
+  // Subtract 1.0f to shift to the range [0.0, 1.0)
+  floats = floats - ConstantR0<float>(builder, 1.0f);
+  // Multiply and add to shift to the range [minval, maxval).
+  return floats * (maxval - minval) + minval;
+}
+
+}  // namespace xla
diff --git a/tensorflow/compiler/xla/client/lib/prng.h b/tensorflow/compiler/xla/client/lib/prng.h
new file mode 100644
index 0000000..ac86390
--- /dev/null
+++ b/tensorflow/compiler/xla/client/lib/prng.h
@@ -0,0 +1,34 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#ifndef TENSORFLOW_COMPILER_XLA_CLIENT_LIB_PRNG_H_
+#define TENSORFLOW_COMPILER_XLA_CLIENT_LIB_PRNG_H_
+
+#include <array>
+
+#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
+#include "tensorflow/compiler/xla/xla_data.pb.h"
+
+namespace xla {
+
+// Returns a tensor containing 'shape' random values uniformly distributed in
+// the range [minval, maxval). Requires 2 32-bit integer seeds.
+// Currently only 'shape's of type F32 are implemented.
+XlaOp StatelessRngUniform(std::array<XlaOp, 2> seeds, const Shape& shape,
+                          XlaOp minval, XlaOp maxval);
+
+}  // namespace xla
+
+#endif  // TENSORFLOW_COMPILER_XLA_CLIENT_LIB_PRNG_H_
diff --git a/tensorflow/compiler/xla/client/xla_client/xla_builder.cc b/tensorflow/compiler/xla/client/xla_client/xla_builder.cc
index aac7df4..3b4f9e1 100644
--- a/tensorflow/compiler/xla/client/xla_client/xla_builder.cc
+++ b/tensorflow/compiler/xla/client/xla_client/xla_builder.cc
@@ -1845,10 +1845,6 @@
     tensorflow::gtl::ArraySlice<int64> replica_group_ids,
     const tensorflow::gtl::optional<ChannelHandle>& channel_id) {
   return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
-    if (channel_id.has_value()) {
-      return Unimplemented("channel_id is not supported in AllReduce");
-    }
-
     HloInstructionProto instr;
     TF_ASSIGN_OR_RETURN(const Shape& operand_shape, GetShape(operand));
     TF_ASSIGN_OR_RETURN(
@@ -1858,6 +1854,10 @@
       instr.add_replica_group_ids(replica_group_id);
     }
 
+    if (channel_id.has_value()) {
+      instr.set_all_reduce_id(channel_id->handle());
+    }
+
     AddCalledComputation(computation, &instr);
 
     return AddInstruction(std::move(instr), HloOpcode::kCrossReplicaSum,
diff --git a/tensorflow/compiler/xla/service/BUILD b/tensorflow/compiler/xla/service/BUILD
index 85c6c63..989bb75 100644
--- a/tensorflow/compiler/xla/service/BUILD
+++ b/tensorflow/compiler/xla/service/BUILD
@@ -182,6 +182,7 @@
     name = "shape_inference_test",
     srcs = ["shape_inference_test.cc"],
     deps = [
+        ":hlo",
         ":shape_inference",
         "//tensorflow/compiler/xla:shape_util",
         "//tensorflow/compiler/xla:test",
diff --git a/tensorflow/compiler/xla/service/algebraic_simplifier.cc b/tensorflow/compiler/xla/service/algebraic_simplifier.cc
index af7728d..2205a7e 100644
--- a/tensorflow/compiler/xla/service/algebraic_simplifier.cc
+++ b/tensorflow/compiler/xla/service/algebraic_simplifier.cc
@@ -1156,6 +1156,19 @@
     return Status::OK();
   }
 
+  // 0*A => 0. Only applies for integral types for correct NaN-handling.
+  if (IsAll(lhs, 0) &&
+      primitive_util::IsIntegralType(multiply->shape().element_type()) &&
+      ReplaceInstructionIfSameShape(multiply, lhs)) {
+    return Status::OK();
+  }
+  // A*0 => 0
+  if (IsAll(rhs, 0) &&
+      primitive_util::IsIntegralType(multiply->shape().element_type()) &&
+      ReplaceInstructionIfSameShape(multiply, rhs)) {
+    return Status::OK();
+  }
+
   // exp(A) * exp(B) => exp(A+B)
   if (Match(multiply, m::Multiply(m::Exp(m::Op(&lhs)), m::Exp(m::Op(&rhs))))) {
     auto add = computation_->AddInstruction(HloInstruction::CreateBinary(
diff --git a/tensorflow/compiler/xla/service/algebraic_simplifier_test.cc b/tensorflow/compiler/xla/service/algebraic_simplifier_test.cc
index 92bbcbd..3f0f2af 100644
--- a/tensorflow/compiler/xla/service/algebraic_simplifier_test.cc
+++ b/tensorflow/compiler/xla/service/algebraic_simplifier_test.cc
@@ -74,6 +74,26 @@
   EXPECT_EQ(root, param0);
 }
 
+// Test that A * 0 is simplified to 0
+TEST_F(AlgebraicSimplifierTest, MulZero) {
+  Shape r0s32 = ShapeUtil::MakeShape(S32, {});
+  HloComputation::Builder builder(TestName());
+  HloInstruction* param0 = builder.AddInstruction(
+      HloInstruction::CreateParameter(0, r0s32, "param0"));
+  HloInstruction* zero = builder.AddInstruction(
+      HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32>(0)));
+  builder.AddInstruction(
+      HloInstruction::CreateBinary(r0s32, HloOpcode::kMultiply, param0, zero));
+
+  auto computation = module().AddEntryComputation(builder.Build());
+  HloInstruction* root = computation->root_instruction();
+  EXPECT_EQ(root->opcode(), HloOpcode::kMultiply);
+  AlgebraicSimplifier simplifier(/*is_layout_sensitive=*/false,
+                                 non_bitcasting_callback());
+  ASSERT_TRUE(simplifier.Run(&module()).ValueOrDie());
+  EXPECT_EQ(computation->root_instruction(), zero);
+}
+
 // Test that Reduce(Reduce(A)) -> Reduce(A)
 TEST_F(AlgebraicSimplifierTest, TwoReducesToOne) {
   HloComputation::Builder builder(TestName());
diff --git a/tensorflow/compiler/xla/service/buffer_assignment.cc b/tensorflow/compiler/xla/service/buffer_assignment.cc
index afe4b2e..783e3f7 100644
--- a/tensorflow/compiler/xla/service/buffer_assignment.cc
+++ b/tensorflow/compiler/xla/service/buffer_assignment.cc
@@ -1444,8 +1444,23 @@
             });
       } else if (opcode == HloOpcode::kCall) {
         const HloInstruction* call_hlo = instruction;
-        const HloInstruction* root_hlo =
-            call_hlo->to_apply()->root_instruction();
+        const HloComputation* callee = call_hlo->to_apply();
+        const HloInstruction* root_hlo = callee->root_instruction();
+        for (int64 i = 0; i < call_hlo->operand_count(); i++) {
+          const HloInstruction* call_param = callee->parameter_instruction(i);
+          const HloInstruction* call_operand = call_hlo->operand(i);
+          ShapeUtil::ForEachSubshape(
+              call_operand->shape(),
+              [&](const Shape& /*subshape*/, const ShapeIndex& index) {
+                std::vector<const LogicalBuffer*> colocated_set;
+                AddBufferToColocatedSet(call_param, index, points_to_analysis,
+                                        &colocated_set);
+                AddBufferToColocatedSet(call_operand, index, points_to_analysis,
+                                        &colocated_set);
+                AddSetToColocatedBufferSets(colocated_set,
+                                            colocated_buffer_sets);
+              });
+        }
         ShapeUtil::ForEachSubshape(
             call_hlo->shape(),
             [this, call_hlo, root_hlo, &points_to_analysis,
diff --git a/tensorflow/compiler/xla/service/buffer_assignment_test.cc b/tensorflow/compiler/xla/service/buffer_assignment_test.cc
index 125ade2..bfd2092 100644
--- a/tensorflow/compiler/xla/service/buffer_assignment_test.cc
+++ b/tensorflow/compiler/xla/service/buffer_assignment_test.cc
@@ -1094,7 +1094,7 @@
 
   // Allocations for the call computation should not be thread-local.
   auto& call_param_alloc = GetTopLevelAllocation(*assignment, call_param);
-  EXPECT_FALSE(call_param_alloc.is_entry_computation_parameter());
+  EXPECT_TRUE(call_param_alloc.is_entry_computation_parameter());
   EXPECT_FALSE(call_param_alloc.maybe_live_out());
   EXPECT_FALSE(call_param_alloc.is_thread_local());
 
@@ -1253,16 +1253,18 @@
 
   auto assignment = RunBufferAssignment(module.get());
 
-  EXPECT_EQ(3, assignment->Allocations().size());
+  EXPECT_EQ(2, assignment->Allocations().size());
   // Buffers for call are colocated with the sub-computation.
   EXPECT_EQ(GetAllocation(*assignment, call, /*index=*/{}),
             GetAllocation(*assignment, sub_tuple, /*index=*/{}));
   EXPECT_EQ(GetAllocation(*assignment, call, /*index=*/{0}),
             GetAllocation(*assignment, sub_param, /*index=*/{}));
-  // The parameter isn't aliased with anything.
+
+  // The parameter isn't aliased with the result tuple, but it is aliased with
+  // the call operand.
   EXPECT_NE(GetTopLevelAllocation(*assignment, param),
             GetTopLevelAllocation(*assignment, sub_tuple));
-  EXPECT_NE(GetTopLevelAllocation(*assignment, param),
+  EXPECT_EQ(GetTopLevelAllocation(*assignment, param),
             GetTopLevelAllocation(*assignment, sub_param));
 }
 
@@ -1326,13 +1328,15 @@
             GetAllocation(*assignment, c_call, /*index=*/{0}));
   EXPECT_EQ(GetAllocation(*assignment, c_call, /*index=*/{0}),
             GetAllocation(*assignment, d_param, /*index=*/{0}));
-  // The parameters aren't aliased with anything.
+
   EXPECT_TRUE(BuffersDistinct({a_param}, {b_param}, *assignment));
   EXPECT_TRUE(BuffersDistinct({a_param}, {c_param}, *assignment));
   EXPECT_TRUE(BuffersDistinct({a_param}, {d_param}, *assignment));
-  EXPECT_TRUE(BuffersDistinct({b_param}, {c_param}, *assignment));
-  EXPECT_TRUE(BuffersDistinct({b_param}, {d_param}, *assignment));
-  EXPECT_TRUE(BuffersDistinct({c_param}, {d_param}, *assignment));
+
+  EXPECT_EQ(GetAllocation(*assignment, b_param, /*index=*/{0}),
+            GetAllocation(*assignment, c_param, /*index=*/{0}));
+  EXPECT_EQ(GetAllocation(*assignment, c_param, /*index=*/{0}),
+            GetAllocation(*assignment, d_param, /*index=*/{0}));
 }
 
 TEST_F(BufferAssignmentTest, BitcastAsOutput) {
@@ -2031,6 +2035,56 @@
   EXPECT_TRUE(BuffersDistinct({call1}, {call2}, *assignment));
 }
 
+TEST_F(BufferAssignmentTest, CallParamCoAllocation) {
+  const char* hlo_text = R"(
+HloModule CallParamCoAllocation
+
+Callee {
+  param0 = (f32[100],(f32[200],f32[300])) parameter(0)
+  param1 = s32[20] parameter(1)
+  ROOT constant = f32[] constant(1)
+}
+
+ENTRY Main {
+  entry_param0 = f32[100] parameter(0)
+  entry_param1 = s32[20]  parameter(1)
+  custom_call = (f32[200],f32[300]) custom-call(), custom_call_target="call-target"
+  call_op0 = (f32[100],(f32[200],f32[300])) tuple(entry_param0, custom_call)
+  ROOT call_result = f32[] call(call_op0, entry_param1), to_apply=Callee
+}
+)";
+
+  TF_ASSERT_OK_AND_ASSIGN(
+      std::unique_ptr<HloModule> module,
+      HloRunner::CreateModuleFromString(
+          hlo_text, legacy_flags::GetDebugOptionsFromFlags()));
+
+  auto buffers = RunBufferAssignment(module.get());
+
+  HloComputation* main = module->entry_computation();
+  HloComputation* callee = module->GetComputationWithName("Callee");
+  EXPECT_NE(callee, nullptr);
+
+  HloInstruction* param0 = callee->parameter_instruction(0);
+  HloInstruction* param1 = callee->parameter_instruction(1);
+
+  HloInstruction* entry_param0 = main->parameter_instruction(0);
+  HloInstruction* entry_param1 = main->parameter_instruction(1);
+  HloInstruction* custom_call = main->GetInstructionWithName("custom_call");
+
+  EXPECT_EQ(GetAllocation(*buffers, entry_param0, {}),
+            GetAllocation(*buffers, param0, {0}));
+  EXPECT_EQ(GetAllocation(*buffers, entry_param1, {}),
+            GetAllocation(*buffers, param1, {}));
+
+  EXPECT_EQ(GetAllocation(*buffers, custom_call, {}),
+            GetAllocation(*buffers, param0, {1}));
+  EXPECT_EQ(GetAllocation(*buffers, custom_call, {0}),
+            GetAllocation(*buffers, param0, {1, 0}));
+  EXPECT_EQ(GetAllocation(*buffers, custom_call, {1}),
+            GetAllocation(*buffers, param0, {1, 1}));
+}
+
 static bool IsPostOrderTraversal(
     const std::vector<const HloInstruction*>& sequence) {
   tensorflow::gtl::FlatSet<const HloInstruction*> seen_so_far;
diff --git a/tensorflow/compiler/xla/service/cpu/cpu_transfer_manager.h b/tensorflow/compiler/xla/service/cpu/cpu_transfer_manager.h
index 6dfc666..593575c 100644
--- a/tensorflow/compiler/xla/service/cpu/cpu_transfer_manager.h
+++ b/tensorflow/compiler/xla/service/cpu/cpu_transfer_manager.h
@@ -39,13 +39,14 @@
 
   Status TransferLiteralToInfeed(se::StreamExecutor* executor,
                                  const LiteralSlice& literal) override;
-  Status TransferBufferToInfeed(se::StreamExecutor* executor, int64 size,
-                                const void* source) override;
   Status TransferLiteralFromOutfeed(se::StreamExecutor* executor,
                                     const Shape& literal_shape,
                                     Literal* literal) override;
 
  private:
+  Status TransferBufferToInfeed(se::StreamExecutor* executor, int64 size,
+                                const void* source);
+
   // Transfers infeed data to device. InfeedBuffer->Done() must be
   // called to clean up the memory allocated for InfeedBuffer.
   StatusOr<cpu::runtime::XfeedBuffer*> TransferBufferToInfeedInternal(
diff --git a/tensorflow/compiler/xla/service/elemental_ir_emitter.cc b/tensorflow/compiler/xla/service/elemental_ir_emitter.cc
index 004a80d..c516325 100644
--- a/tensorflow/compiler/xla/service/elemental_ir_emitter.cc
+++ b/tensorflow/compiler/xla/service/elemental_ir_emitter.cc
@@ -468,6 +468,10 @@
     }
     case HloOpcode::kNegate:
       return ir_builder_->CreateFNeg(operand_value);
+    case HloOpcode::kReal:
+      return operand_value;
+    case HloOpcode::kImag:
+      return llvm::ConstantFP::get(operand_value->getType(), 0.0);
     default:
       return Unimplemented("unary floating-point op '%s'",
                            HloOpcodeString(op->opcode()).c_str());
diff --git a/tensorflow/compiler/xla/service/generic_transfer_manager.cc b/tensorflow/compiler/xla/service/generic_transfer_manager.cc
index 3373004..e314a46 100644
--- a/tensorflow/compiler/xla/service/generic_transfer_manager.cc
+++ b/tensorflow/compiler/xla/service/generic_transfer_manager.cc
@@ -158,11 +158,6 @@
   return Unimplemented("Generic transfer to Infeed");
 }
 
-Status GenericTransferManager::TransferBufferToInfeed(
-    se::StreamExecutor* executor, int64 size, const void* source) {
-  return Unimplemented("Generic transfer to Infeed");
-}
-
 Status GenericTransferManager::TransferLiteralFromOutfeed(
     se::StreamExecutor* executor, const Shape& literal_shape,
     Literal* literal) {
diff --git a/tensorflow/compiler/xla/service/generic_transfer_manager.h b/tensorflow/compiler/xla/service/generic_transfer_manager.h
index d216fe7..3cd002c 100644
--- a/tensorflow/compiler/xla/service/generic_transfer_manager.h
+++ b/tensorflow/compiler/xla/service/generic_transfer_manager.h
@@ -61,9 +61,6 @@
   int64 GetByteSizeRequirement(const Shape& shape) const override;
 
  protected:
-  Status TransferBufferToInfeed(se::StreamExecutor* executor, int64 size,
-                                const void* source) override;
-
   Status WriteSingleTupleIndexTable(
       se::Stream* stream,
       tensorflow::gtl::ArraySlice<se::DeviceMemoryBase> elements,
diff --git a/tensorflow/compiler/xla/service/gpu/gpu_compiler.cc b/tensorflow/compiler/xla/service/gpu/gpu_compiler.cc
index e1da8d9..5e5d893 100644
--- a/tensorflow/compiler/xla/service/gpu/gpu_compiler.cc
+++ b/tensorflow/compiler/xla/service/gpu/gpu_compiler.cc
@@ -354,16 +354,30 @@
     return;
   }
 
+  // We need ptxas >= 9.0 as a hard requirement, because we compile targeting
+  // PTX 6.0.  An older ptxas will just fail to compile any of our code.
+  //
   // ptxas 9.0 before 9.0.276 and ptxas 9.1 before 9.1.121 miscompile some
   // address calculations with large offsets (e.g. "load ptr + large_constant"),
   // b/70245379.
-  if ((vmaj == 9 && vmin == 0 && vdot < 276) ||
-      (vmaj == 9 && vmin == 1 && vdot < 121)) {
-    LOG(WARNING) << "*** WARNING *** You are using ptxas " << vmaj << "."
-                 << vmin << "." << vdot
-                 << ", which is in range [9.0.0, 9.0.276) + [9.1.0, 9.1.121). "
-                    "These versions are known to miscompile XLA code, leading "
-                    "to incorrect results or invalid-address errors.";
+  //
+  // ptxas 9.1.121 miscompiles some large multioutput fusions, again in a way
+  // that appears related to address calculations.  ptxas 9.2.88 appears to
+  // work, as far as we can tell.
+  if (vmaj < 9) {
+    LOG(ERROR)
+        << "You are using ptxas 8.x, but XLA requires ptxas 9.x (and strongly "
+           "prefers >= 9.2.88).  Compilation of XLA kernels below will likely "
+           "fail.\n\nYou do not need to update CUDA; cherry-picking the ptxas "
+           "binary is sufficient.";
+  } else if ((vmaj < 9 || vmin < 2 || vdot < 88)) {
+    LOG(WARNING)
+        << "*** WARNING *** You are using ptxas " << vmaj << "." << vmin << "."
+        << vdot
+        << ", which older than 9.2.88. ptxas 9.x before 9.2.88 is known to "
+           "miscompile XLA code, leading to incorrect results or "
+           "invalid-address errors.\n\nYou do not need to update to CUDA "
+           "9.2.88; cherry-picking the ptxas binary is sufficient.";
   }
 }
 
@@ -391,6 +405,10 @@
     //  - 384.x before 384.108
     //  - 387.x before 387.40
     //  - 390.x before 390.10.
+    //
+    // TODO(jlebar): This list does not cover the address-calculation bug we've
+    // observed in ptxas 9.1.121.  Need to get a new safe range from nvidia
+    // corresponding to ptxas >= 9.2.88.
     auto vmaj = std::get<0>(version);
     auto vmin = std::get<1>(version);
     if ((vmaj == 384 && vmin < 108) ||  //
diff --git a/tensorflow/compiler/xla/service/gpu/gpu_transfer_manager.cc b/tensorflow/compiler/xla/service/gpu/gpu_transfer_manager.cc
index 6346653..6c23228 100644
--- a/tensorflow/compiler/xla/service/gpu/gpu_transfer_manager.cc
+++ b/tensorflow/compiler/xla/service/gpu/gpu_transfer_manager.cc
@@ -41,11 +41,9 @@
 // TODO(b/30467474) Once GPU infeed implementation settles, consider
 // folding back the cpu and gpu infeed implementations into a generic
 // one if possible.
-GpuTransferManager::GpuTransferManager()
-    : GenericTransferManager(
-          se::cuda::kCudaPlatformId,
-          /*pointer_size=*/llvm::DataLayout(gpu::GpuCompiler::kDataLayout)
-              .getPointerSize(0 /* default address space */)) {}
+GpuTransferManager::GpuTransferManager(se::Platform::Id id,
+                                       unsigned pointer_size)
+    : GenericTransferManager(id, pointer_size) {}
 
 Status GpuTransferManager::TransferLiteralToInfeed(
     se::StreamExecutor* executor, const LiteralSlice& literal) {
@@ -73,15 +71,6 @@
   return EnqueueBuffersToInfeed(executor, std::move(buffer_tree));
 }
 
-Status GpuTransferManager::TransferBufferToInfeed(se::StreamExecutor* executor,
-                                                  int64 size,
-                                                  const void* source) {
-  return InternalError(
-      "Attempted to transfer data to infeed on a GPU device using "
-      "TransferBufferToInfeed. This should be done using "
-      "TransferLiteralToInfeed instead.");
-}
-
 Status GpuTransferManager::EnqueueBuffersToInfeed(
     se::StreamExecutor* executor, ShapeTree<InfeedBuffer> buffers) {
   gpu::InfeedManager* infeed_manager = gpu::GetOrCreateInfeedManager();
@@ -188,13 +177,16 @@
 }  // namespace gpu
 }  // namespace xla
 
-static std::unique_ptr<xla::TransferManager> CreateGpuTransferManager() {
-  return xla::MakeUnique<xla::gpu::GpuTransferManager>();
+static std::unique_ptr<xla::TransferManager> CreateNVPTXTransferManager() {
+  return xla::MakeUnique<xla::gpu::GpuTransferManager>(
+      /*id=*/stream_executor::cuda::kCudaPlatformId,
+      /*pointer_size=*/llvm::DataLayout(xla::gpu::GpuCompiler::kDataLayout)
+          .getPointerSize(0 /* default address space */));
 }
 
 static bool InitModule() {
   xla::TransferManager::RegisterTransferManager(
-      stream_executor::cuda::kCudaPlatformId, &CreateGpuTransferManager);
+      stream_executor::cuda::kCudaPlatformId, &CreateNVPTXTransferManager);
   return true;
 }
 static bool module_initialized = InitModule();
diff --git a/tensorflow/compiler/xla/service/gpu/gpu_transfer_manager.h b/tensorflow/compiler/xla/service/gpu/gpu_transfer_manager.h
index 7a5fe69..dceeb9e 100644
--- a/tensorflow/compiler/xla/service/gpu/gpu_transfer_manager.h
+++ b/tensorflow/compiler/xla/service/gpu/gpu_transfer_manager.h
@@ -35,13 +35,11 @@
 // handles GPU-specific infeed.
 class GpuTransferManager : public GenericTransferManager {
  public:
-  GpuTransferManager();
+  GpuTransferManager(se::Platform::Id id, unsigned pointer_size);
   ~GpuTransferManager() override {}
 
   Status TransferLiteralToInfeed(se::StreamExecutor* executor,
                                  const LiteralSlice& literal) override;
-  Status TransferBufferToInfeed(se::StreamExecutor* executor, int64 size,
-                                const void* source) override;
   Status TransferLiteralFromOutfeed(se::StreamExecutor* executor,
                                     const Shape& literal_shape,
                                     Literal* literal) override;
diff --git a/tensorflow/compiler/xla/service/gpu/hlo_to_ir_bindings.cc b/tensorflow/compiler/xla/service/gpu/hlo_to_ir_bindings.cc
index d420863..6f2a7e1 100644
--- a/tensorflow/compiler/xla/service/gpu/hlo_to_ir_bindings.cc
+++ b/tensorflow/compiler/xla/service/gpu/hlo_to_ir_bindings.cc
@@ -145,7 +145,7 @@
 
   llvm::Value* typed_ir_value;
   if (llvm::isa<llvm::GlobalVariable>(ir_value)) {
-    typed_ir_value = llvm::ConstantExpr::getBitCast(
+    typed_ir_value = llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(
         llvm::cast<llvm::GlobalVariable>(ir_value), dest_type);
   } else {
     typed_ir_value =
diff --git a/tensorflow/compiler/xla/service/gpu/infeed_thunk.cc b/tensorflow/compiler/xla/service/gpu/infeed_thunk.cc
index 964efd3..fee6d2a 100644
--- a/tensorflow/compiler/xla/service/gpu/infeed_thunk.cc
+++ b/tensorflow/compiler/xla/service/gpu/infeed_thunk.cc
@@ -30,45 +30,64 @@
 Status InfeedThunk::ExecuteOnStream(const BufferAllocations& buffer_allocations,
                                     se::Stream* stream,
                                     HloExecutionProfiler* profiler) {
-  VLOG(2) << "Infeeding to GPU ";
+  VLOG(2) << "Infeeding to GPU: " << hlo_instruction()->ToString();
 
   auto op_profiler = profiler->MakeScopedInstructionProfiler(hlo_instruction());
-  // First copy the infeed data which is element 0 of the infeed instruction's
-  // two-tuple output (the other element is a token).
-  se::DeviceMemoryBase data_address =
-      buffer_allocations.GetDeviceAddress(infeed_slices_.element({0}));
-  InfeedManager* infeed_manager = GetOrCreateInfeedManager();
-  const Shape& data_shape =
-      ShapeUtil::GetTupleElementShape(hlo_instruction()->shape(), 0);
   ShapeTree<InfeedBuffer> infeed_buffers =
-      infeed_manager->BlockingGetNextDestination();
-  if (ShapeUtil::IsTuple(data_shape)) {
-    CHECK(!ShapeUtil::IsNestedTuple(data_shape));
-    // Transfer the tuple elements first.
-    std::vector<void*> tuple_element_addresses;
-    for (int i = 0; i < ShapeUtil::TupleElementCount(data_shape); ++i) {
-      const BufferAllocation::Slice& tuple_element_buffer =
-          infeed_slices_.element({0, i});
-      se::DeviceMemoryBase tuple_element_address =
-          buffer_allocations.GetDeviceAddress(tuple_element_buffer);
+      GetOrCreateInfeedManager()->BlockingGetNextDestination();
 
-      InfeedBuffer* buffer = infeed_buffers.mutable_element({i});
-      stream->ThenMemcpy(&tuple_element_address, *(buffer->device_memory()),
-                         buffer->length());
-      tuple_element_addresses.push_back(tuple_element_address.opaque());
-    }
-    // Transfer the tuple outer buffer.
-    auto host_size = tuple_element_addresses.size() * sizeof(void*);
-    stream->ThenMemcpy(&data_address, tuple_element_addresses.data(),
-                       host_size);
-  } else {
-    InfeedBuffer* buffer = infeed_buffers.mutable_element({});
-    stream->ThenMemcpy(&data_address, *(buffer->device_memory()),
-                       buffer->length());
+  {
+    // The infeed buffer has an extra outer tuple with a token. Adjust the index
+    // accordingly.
+    ShapeIndex index = {0};
+    std::function<void(std::vector<void*>*)> copy_tuple_contents =
+        [&](std::vector<void*>* tuple_element_addresses) {
+          const Shape& shape = ShapeUtil::GetSubshape(infeed_buffers.shape(),
+                                                      ShapeIndexView(index, 1));
+          // For the leaf buffers of the tuple copy the elements directly.
+          if (ShapeUtil::IsArray(shape)) {
+            const BufferAllocation::Slice& tuple_element_buffer =
+                infeed_slices_.element(index);
+            se::DeviceMemoryBase tuple_element_address =
+                buffer_allocations.GetDeviceAddress(tuple_element_buffer);
+
+            InfeedBuffer* buffer =
+                infeed_buffers.mutable_element(ShapeIndexView(index, 1));
+            stream->ThenMemcpy(&tuple_element_address,
+                               *(buffer->device_memory()), buffer->length());
+            tuple_element_addresses->push_back(tuple_element_address.opaque());
+            return;
+          }
+
+          const int64 tuple_element_count = ShapeUtil::TupleElementCount(shape);
+          index.push_back(0);
+          std::vector<void*> inner_tuple_element_addresses;
+          for (int64 i = 0; i < tuple_element_count; ++i) {
+            index.back() = i;
+            copy_tuple_contents(&inner_tuple_element_addresses);
+          }
+          index.pop_back();
+
+          // Create a buffer of pointers for non-leaf buffers.
+          CHECK_EQ(tuple_element_count, inner_tuple_element_addresses.size());
+          auto host_size = inner_tuple_element_addresses.size() * sizeof(void*);
+          se::DeviceMemoryBase tuple_address =
+              buffer_allocations.GetDeviceAddress(
+                  infeed_slices_.element(index));
+          stream->ThenMemcpy(&tuple_address,
+                             inner_tuple_element_addresses.data(), host_size);
+          tuple_element_addresses->push_back(tuple_address.opaque());
+        };
+
+    std::vector<void*> tuple_element_addresses;
+    copy_tuple_contents(&tuple_element_addresses);
+    CHECK_EQ(1, tuple_element_addresses.size());
   }
 
   // Construct top-level tuple of infeed containing the data and the token. Use
   // a nullptr for the token, it should never be dereferenced.
+  se::DeviceMemoryBase data_address =
+      buffer_allocations.GetDeviceAddress(infeed_slices_.element({0}));
   void* infeed_addresses[] = {data_address.opaque(), nullptr};
   se::DeviceMemoryBase top_level_address =
       buffer_allocations.GetDeviceAddress(infeed_slices_.element({}));
diff --git a/tensorflow/compiler/xla/service/gpu/ir_emission_utils.cc b/tensorflow/compiler/xla/service/gpu/ir_emission_utils.cc
index 388aa35..2799baa 100644
--- a/tensorflow/compiler/xla/service/gpu/ir_emission_utils.cc
+++ b/tensorflow/compiler/xla/service/gpu/ir_emission_utils.cc
@@ -242,15 +242,17 @@
        arguments_ptr});
 }
 
-llvm::Value* EmitShuffleDown(llvm::Value* value, llvm::Value* offset,
-                             llvm::IRBuilder<>* builder) {
+llvm::Value* EmitFullWarpShuffleDown(llvm::Value* value, llvm::Value* offset,
+                                     llvm::IRBuilder<>* builder) {
   int bit_width = value->getType()->getPrimitiveSizeInBits();
+  llvm::Value* all_warps_mask = builder->getInt32(-1);
 
   // Special case for efficiency
   if (value->getType()->isFloatTy() && bit_width == 32) {
     return llvm_ir::EmitCallToIntrinsic(
-        llvm::Intrinsic::nvvm_shfl_down_f32,
-        {value, offset, builder->getInt32(kWarpSize - 1)}, {}, builder);
+        llvm::Intrinsic::nvvm_shfl_sync_down_f32,
+        {all_warps_mask, value, offset, builder->getInt32(kWarpSize - 1)}, {},
+        builder);
   }
 
   // We must split values wider than 32 bits as the "shfl" instruction operates
@@ -264,10 +266,11 @@
   for (int i = 0; i < num_segments; ++i) {
     x = builder->CreateInsertElement(
         x,
-        llvm_ir::EmitCallToIntrinsic(llvm::Intrinsic::nvvm_shfl_down_i32,
-                                     {builder->CreateExtractElement(x, i),
-                                      offset, builder->getInt32(kWarpSize - 1)},
-                                     {}, builder),
+        llvm_ir::EmitCallToIntrinsic(
+            llvm::Intrinsic::nvvm_shfl_sync_down_i32,
+            {all_warps_mask, builder->CreateExtractElement(x, i), offset,
+             builder->getInt32(kWarpSize - 1)},
+            {}, builder),
         i);
   }
   return builder->CreateBitCast(
diff --git a/tensorflow/compiler/xla/service/gpu/ir_emission_utils.h b/tensorflow/compiler/xla/service/gpu/ir_emission_utils.h
index 59455f3..9bb4c42 100644
--- a/tensorflow/compiler/xla/service/gpu/ir_emission_utils.h
+++ b/tensorflow/compiler/xla/service/gpu/ir_emission_utils.h
@@ -125,13 +125,17 @@
                         llvm::IRBuilder<>* builder);
 
 // Emits code to shuffle data between threads of a warp. This has the same
-// semantics as the PTX "shfl.down" instruction [0] but works for values of any
-// size. The last operand of the emitted "shfl" is `kWarpSize - 1`.
+// semantics as the PTX "shfl.sync.down" instruction but works for values that
+// aren't 32 bits in size. The last operand of the emitted "shfl" is
+// `kWarpSize - 1`.
 //
-// [0]
-// http://docs.nvidia.com/cuda/parallel-thread-execution/#data-movement-and-conversion-instructions-shfl
-llvm::Value* EmitShuffleDown(llvm::Value* value, llvm::Value* offset,
-                             llvm::IRBuilder<>* builder);
+// This function emits a "full-warp" shuffle, which all threads of a warp
+// participate in.  *Do not use this function from a divergent context:* You
+// can't correctly do so on both Volta and earlier GPUs.
+//
+// https://docs.nvidia.com/cuda/parallel-thread-execution/#data-movement-and-conversion-instructions-shfl-sync
+llvm::Value* EmitFullWarpShuffleDown(llvm::Value* value, llvm::Value* offset,
+                                     llvm::IRBuilder<>* builder);
 
 }  // namespace gpu
 }  // namespace xla
diff --git a/tensorflow/compiler/xla/service/gpu/ir_emitter_unnested.cc b/tensorflow/compiler/xla/service/gpu/ir_emitter_unnested.cc
index 673ba53..75bbbbe 100644
--- a/tensorflow/compiler/xla/service/gpu/ir_emitter_unnested.cc
+++ b/tensorflow/compiler/xla/service/gpu/ir_emitter_unnested.cc
@@ -918,10 +918,13 @@
             ir_builder_.CreateBitCast(partial_reduction_result_addresses[i],
                                       shuffle_ir_type->getPointerTo()),
             "partial_reduction_result");
+        CHECK_EQ(launch_dimensions.threads_per_block() % kWarpSize, 0)
+            << "Requires block size a multiple of the warp size, otherwise we "
+               "will read undefined elements.";
         ir_builder_.CreateStore(
-            EmitShuffleDown(partial_reduction_result,
-                            ir_builder_.getInt32(shuffle_distance),
-                            &ir_builder_),
+            EmitFullWarpShuffleDown(partial_reduction_result,
+                                    ir_builder_.getInt32(shuffle_distance),
+                                    &ir_builder_),
             ir_builder_.CreateBitCast(result_from_other_lane,
                                       shuffle_ir_type->getPointerTo()));
         TF_RETURN_IF_ERROR(EmitCallToNestedComputation(
@@ -1498,10 +1501,13 @@
             ir_builder_.CreateBitCast(partial_reduction_result_addresses[i],
                                       shuffle_ir_type->getPointerTo()),
             "partial_reduction_result");
+        CHECK_EQ(launch_dimensions.threads_per_block() % kWarpSize, 0)
+            << "Requires block size a multiple of the warp size, otherwise we "
+               "will read undefined elements.";
         ir_builder_.CreateStore(
-            EmitShuffleDown(partial_reduction_result,
-                            ir_builder_.getInt32(shuffle_distance),
-                            &ir_builder_),
+            EmitFullWarpShuffleDown(partial_reduction_result,
+                                    ir_builder_.getInt32(shuffle_distance),
+                                    &ir_builder_),
             ir_builder_.CreateBitCast(result_from_other_lane,
                                       shuffle_ir_type->getPointerTo()));
         TF_RETURN_IF_ERROR(EmitCallToNestedComputation(
diff --git a/tensorflow/compiler/xla/service/gpu/llvm_gpu_backend/BUILD b/tensorflow/compiler/xla/service/gpu/llvm_gpu_backend/BUILD
index 7de8f9e..da31c65 100644
--- a/tensorflow/compiler/xla/service/gpu/llvm_gpu_backend/BUILD
+++ b/tensorflow/compiler/xla/service/gpu/llvm_gpu_backend/BUILD
@@ -34,6 +34,7 @@
         "//tensorflow/compiler/xla/service/llvm_ir:llvm_util",
         "//tensorflow/core:lib",
         "//tensorflow/core:lib_internal",
+        "@llvm//:amdgpu_code_gen",
         "@llvm//:analysis",
         "@llvm//:bit_reader",
         "@llvm//:bit_writer",
diff --git a/tensorflow/compiler/xla/service/gpu/llvm_gpu_backend/gpu_backend_lib.cc b/tensorflow/compiler/xla/service/gpu/llvm_gpu_backend/gpu_backend_lib.cc
index a4e4e85..2b0d692 100644
--- a/tensorflow/compiler/xla/service/gpu/llvm_gpu_backend/gpu_backend_lib.cc
+++ b/tensorflow/compiler/xla/service/gpu/llvm_gpu_backend/gpu_backend_lib.cc
@@ -206,7 +206,7 @@
       codegen_opt_level = CodeGenOpt::None;
   }
   return WrapUnique(target->createTargetMachine(
-      triple.str(), llvm_ir::AsStringRef(cpu_name), "+ptx42", target_options,
+      triple.str(), llvm_ir::AsStringRef(cpu_name), "+ptx60", target_options,
       Optional<Reloc::Model>(RelocModel), Optional<CodeModel::Model>(CMModel),
       codegen_opt_level));
 }
diff --git a/tensorflow/compiler/xla/service/hlo_computation.cc b/tensorflow/compiler/xla/service/hlo_computation.cc
index 166a83f..441288d 100644
--- a/tensorflow/compiler/xla/service/hlo_computation.cc
+++ b/tensorflow/compiler/xla/service/hlo_computation.cc
@@ -898,4 +898,13 @@
   name_ = name_uniquer->GetUniqueName(name_);
 }
 
+HloInstruction* HloComputation::GetInstructionWithName(
+    tensorflow::StringPiece name) {
+  auto instructions_in_computation = instructions();
+  auto it = c_find_if(instructions_in_computation, [&](HloInstruction* instr) {
+    return instr->name() == name;
+  });
+  return it == instructions_in_computation.end() ? nullptr : *it;
+}
+
 }  // namespace xla
diff --git a/tensorflow/compiler/xla/service/hlo_computation.h b/tensorflow/compiler/xla/service/hlo_computation.h
index abc1da4..49ed659 100644
--- a/tensorflow/compiler/xla/service/hlo_computation.h
+++ b/tensorflow/compiler/xla/service/hlo_computation.h
@@ -365,6 +365,10 @@
     unique_id_ = id;
   }
 
+  // Returns the instruction in this computation that has name `name`.  Returns
+  // null if there is no such computation.
+  HloInstruction* GetInstructionWithName(tensorflow::StringPiece name);
+
   int64 unique_id() const { return unique_id_; }
 
  private:
diff --git a/tensorflow/compiler/xla/service/hlo_domain_metadata.h b/tensorflow/compiler/xla/service/hlo_domain_metadata.h
index aa03081..f855f2a 100644
--- a/tensorflow/compiler/xla/service/hlo_domain_metadata.h
+++ b/tensorflow/compiler/xla/service/hlo_domain_metadata.h
@@ -71,12 +71,6 @@
 
   // Returns a string representation of the metadata.
   virtual string ToString() const = 0;
-
-  // Given a reachable set (the set of instructions which are reachable from
-  // each other via user/operand pathways, without crossing a kDomain
-  // instruciton), makes sure that all of them have metadata attributes which
-  // are coherent with this metadata object.
-  virtual Status NormalizeInstructions(const Domain& domain) const = 0;
 };
 
 }  // namespace xla
diff --git a/tensorflow/compiler/xla/service/hlo_domain_remover.cc b/tensorflow/compiler/xla/service/hlo_domain_remover.cc
index e2e8200..67fad07 100644
--- a/tensorflow/compiler/xla/service/hlo_domain_remover.cc
+++ b/tensorflow/compiler/xla/service/hlo_domain_remover.cc
@@ -47,12 +47,12 @@
                       HloDomainVerifier::VerifyDomain(domain));
   if (ref_metadata != nullptr) {
     VLOG(4) << "Applying domain normalization: " << ref_metadata->ToString();
-    TF_RETURN_IF_ERROR(ref_metadata->NormalizeInstructions(domain));
+    TF_RETURN_IF_ERROR(remover_->normalizer_(domain, ref_metadata));
   } else {
     // No kDomain instruction was present within this domain, so call the
     // generic normalization functions and have them apply their heuristic.
     VLOG(2) << "Applying domain-less normalization";
-    TF_RETURN_IF_ERROR(remover_->normalizer_(domain));
+    TF_RETURN_IF_ERROR(remover_->normalizer_(domain, nullptr));
   }
   return Status::OK();
 }
diff --git a/tensorflow/compiler/xla/service/hlo_domain_remover.h b/tensorflow/compiler/xla/service/hlo_domain_remover.h
index 0c71dd3..c859e05 100644
--- a/tensorflow/compiler/xla/service/hlo_domain_remover.h
+++ b/tensorflow/compiler/xla/service/hlo_domain_remover.h
@@ -35,9 +35,10 @@
   // instructions in it with the same attributes (ie, sharding), a normalizer
   // function is tasked at applying attribute normalization on the instructions
   // within such domain.
-  HloDomainRemover(
-      tensorflow::StringPiece kind,
-      std::function<Status(const DomainMetadata::Domain&)> normalizer)
+  HloDomainRemover(tensorflow::StringPiece kind,
+                   std::function<Status(const DomainMetadata::Domain&,
+                                        const DomainMetadata* metadata)>
+                       normalizer)
       : kind_(kind.ToString()), normalizer_(std::move(normalizer)) {}
 
   tensorflow::StringPiece name() const override { return "domain_remover"; }
@@ -48,7 +49,9 @@
   class RunContext;
 
   string kind_;
-  std::function<Status(const DomainMetadata::Domain&)> normalizer_;
+  std::function<Status(const DomainMetadata::Domain&,
+                       const DomainMetadata* metadata)>
+      normalizer_;
 };
 
 }  // namespace xla
diff --git a/tensorflow/compiler/xla/service/hlo_domain_test.cc b/tensorflow/compiler/xla/service/hlo_domain_test.cc
index 00b2c86..ffc18a0 100644
--- a/tensorflow/compiler/xla/service/hlo_domain_test.cc
+++ b/tensorflow/compiler/xla/service/hlo_domain_test.cc
@@ -97,12 +97,6 @@
 
   string ToString() const override { return opname_; }
 
-  Status NormalizeInstructions(
-      const DomainMetadata::Domain& domain) const override {
-    // For the purposes of this test, nothing to do.
-    return Status::OK();
-  }
-
   static tensorflow::StringPiece KindName() { return "opname"; }
 
  private:
@@ -124,7 +118,8 @@
                                       std::move(user_side_metadata));
 }
 
-Status OpNameDomainNormalizer(const DomainMetadata::Domain& domain) {
+Status OpNameDomainNormalizer(const DomainMetadata::Domain& domain,
+                              const DomainMetadata* metadata) {
   // Nothing to do for the particular use this test make of the OpName domains.
   return Status::OK();
 }
@@ -159,7 +154,7 @@
   EXPECT_FALSE(HasDomainEdge(module, "e", "d"));
 
   HloDomainRemover remover(ShardingMetadata::KindName(),
-                           NormalizeShardingDomain);
+                           ShardingMetadata::NormalizeShardingDomain);
   TF_ASSERT_OK_AND_ASSIGN(bool remover_changed, remover.Run(module));
   EXPECT_TRUE(remover_changed);
 
@@ -227,7 +222,7 @@
   EXPECT_FALSE(HasDomainEdge(module, "e", "d"));
 
   HloDomainRemover remover(ShardingMetadata::KindName(),
-                           NormalizeShardingDomain);
+                           ShardingMetadata::NormalizeShardingDomain);
   TF_ASSERT_OK_AND_ASSIGN(bool remover_changed, remover.Run(module));
   EXPECT_TRUE(remover_changed);
 
@@ -277,7 +272,7 @@
   LOG(INFO) << "Original module:\n" << module->ToString();
 
   HloDomainRemover remover(ShardingMetadata::KindName(),
-                           NormalizeShardingDomain);
+                           ShardingMetadata::NormalizeShardingDomain);
   TF_ASSERT_OK_AND_ASSIGN(bool remover_changed, remover.Run(module));
   EXPECT_FALSE(remover_changed);
 
@@ -324,7 +319,7 @@
   EXPECT_FALSE(HasDomainEdge(module, "e", "d"));
 
   HloDomainRemover sharding_remover(ShardingMetadata::KindName(),
-                                    NormalizeShardingDomain);
+                                    ShardingMetadata::NormalizeShardingDomain);
   TF_ASSERT_OK_AND_ASSIGN(bool sharding_remover_changed,
                           sharding_remover.Run(module));
   EXPECT_TRUE(sharding_remover_changed);
@@ -411,7 +406,7 @@
   }
 
   HloDomainRemover remover(ShardingMetadata::KindName(),
-                           NormalizeShardingDomain);
+                           ShardingMetadata::NormalizeShardingDomain);
   TF_ASSERT_OK_AND_ASSIGN(bool remover_changed, remover.Run(module));
   EXPECT_TRUE(remover_changed);
 
@@ -465,7 +460,7 @@
   TF_EXPECT_OK(module->entry_computation()->RemoveInstruction(tuple));
 
   HloDomainRemover remover(ShardingMetadata::KindName(),
-                           NormalizeShardingDomain);
+                           ShardingMetadata::NormalizeShardingDomain);
   TF_ASSERT_OK_AND_ASSIGN(bool remover_changed, remover.Run(module));
   EXPECT_TRUE(remover_changed);
 
diff --git a/tensorflow/compiler/xla/service/hlo_evaluator_typed_visitor.h b/tensorflow/compiler/xla/service/hlo_evaluator_typed_visitor.h
index 2ae5f8b..e1924a0 100644
--- a/tensorflow/compiler/xla/service/hlo_evaluator_typed_visitor.h
+++ b/tensorflow/compiler/xla/service/hlo_evaluator_typed_visitor.h
@@ -301,6 +301,14 @@
     return HandleFloor<ReturnT>(floor);
   }
 
+  Status HandleImag(HloInstruction* imag) override {
+    TF_ASSIGN_OR_RETURN(parent_->evaluated_[imag],
+                        ElementWiseUnaryOp(imag, [](ElementwiseT elem_operand) {
+                          return std::imag(elem_operand);
+                        }));
+    return Status::OK();
+  }
+
   Status HandleLog(HloInstruction* log) override {
     TF_ASSIGN_OR_RETURN(parent_->evaluated_[log],
                         ElementWiseUnaryOp(log, [](ElementwiseT elem_operand) {
@@ -604,6 +612,14 @@
     return Status::OK();
   }
 
+  Status HandleReal(HloInstruction* real) override {
+    TF_ASSIGN_OR_RETURN(parent_->evaluated_[real],
+                        ElementWiseUnaryOp(real, [](ElementwiseT elem_operand) {
+                          return std::real(elem_operand);
+                        }));
+    return Status::OK();
+  }
+
   template <
       typename NativeT,
       typename std::enable_if<!is_complex_t<NativeT>::value>::type* = nullptr>
diff --git a/tensorflow/compiler/xla/service/hlo_instruction.cc b/tensorflow/compiler/xla/service/hlo_instruction.cc
index 830ebfb..02139fa 100644
--- a/tensorflow/compiler/xla/service/hlo_instruction.cc
+++ b/tensorflow/compiler/xla/service/hlo_instruction.cc
@@ -386,6 +386,23 @@
                                        slice_sizes);
       break;
     }
+    case HloOpcode::kGather: {
+      TF_RET_CHECK(proto.operand_ids_size() == 2)
+          << "Gather instruction should have 2 operands but sees "
+          << proto.operand_ids_size();
+      TF_RET_CHECK(proto.has_gather_dimension_numbers())
+          << "Gather instruction should have GatherDimensionNumbers set.";
+      std::unique_ptr<GatherDimensionNumbers> gather_dimension_numbers =
+          MakeUnique<GatherDimensionNumbers>(proto.gather_dimension_numbers());
+      std::vector<int64> gather_window_bounds;
+      for (int64 bound : proto.gather_window_bounds()) {
+        gather_window_bounds.push_back(bound);
+      }
+      instruction =
+          CreateGather(proto.shape(), operands(0), operands(1),
+                       *gather_dimension_numbers, gather_window_bounds);
+      break;
+    }
     default: {
       instruction = WrapUnique(new HloInstruction(opcode, proto.shape()));
       for (const int64 operand_id : proto.operand_ids()) {
@@ -427,13 +444,6 @@
     instruction->set_sharding(sharding);
   }
 
-  if (proto.has_gather_dimension_numbers()) {
-    instruction->gather_dimension_numbers_ =
-        MakeUnique<GatherDimensionNumbers>(proto.gather_dimension_numbers());
-  }
-  for (int64 bound : proto.gather_window_bounds()) {
-    instruction->gather_window_bounds_.push_back(bound);
-  }
   return std::move(instruction);
 }
 
@@ -978,6 +988,8 @@
     case HloOpcode::kTrace:
     case HloOpcode::kHostCompute:
       return true;
+    case HloOpcode::kCrossReplicaSum:
+      return all_reduce_id().has_value();
     default:
       return false;
   }
@@ -1036,34 +1048,8 @@
     const Shape& shape, HloInstruction* operand, HloInstruction* gather_indices,
     const GatherDimensionNumbers& gather_dim_numbers,
     tensorflow::gtl::ArraySlice<int64> window_bounds) {
-  std::unique_ptr<HloInstruction> instruction =
-      WrapUnique(new HloInstruction(HloOpcode::kGather, shape));
-  instruction->AppendOperand(operand);
-  instruction->AppendOperand(gather_indices);
-  instruction->gather_dimension_numbers_ =
-      MakeUnique<GatherDimensionNumbers>(gather_dim_numbers);
-  c_copy(window_bounds, std::back_inserter(instruction->gather_window_bounds_));
-  return instruction;
-}
-
-/* static */ GatherDimensionNumbers HloInstruction::MakeGatherDimNumbers(
-    tensorflow::gtl::ArraySlice<int64> output_window_dims,
-    tensorflow::gtl::ArraySlice<int64> elided_window_dims,
-    tensorflow::gtl::ArraySlice<int64> gather_dims_to_operand_dims,
-    int64 index_vector_dim) {
-  GatherDimensionNumbers gather_dim_numbers;
-  for (int64 output_window_dim : output_window_dims) {
-    gather_dim_numbers.add_output_window_dims(output_window_dim);
-  }
-  for (int64 elided_window_dim : elided_window_dims) {
-    gather_dim_numbers.add_elided_window_dims(elided_window_dim);
-  }
-  for (int64 gather_dim_to_input_dim : gather_dims_to_operand_dims) {
-    gather_dim_numbers.add_gather_dims_to_operand_dims(gather_dim_to_input_dim);
-  }
-
-  gather_dim_numbers.set_index_vector_dim(index_vector_dim);
-  return gather_dim_numbers;
+  return MakeUnique<HloGatherInstruction>(shape, operand, gather_indices,
+                                          gather_dim_numbers, window_bounds);
 }
 
 /* static */ std::unique_ptr<HloInstruction> HloInstruction::CreateDomain(
@@ -1127,6 +1113,7 @@
     case HloOpcode::kPad:
     case HloOpcode::kDynamicSlice:
     case HloOpcode::kSort:
+    case HloOpcode::kGather:
       clone = CloneWithNewOperandsImpl(shape, new_operands, context);
       break;
     // Unary ops.
@@ -1228,11 +1215,6 @@
                                 true_computation(), new_operands[2],
                                 false_computation());
       break;
-    case HloOpcode::kGather:
-      CHECK_EQ(new_operands.size(), 2);
-      clone = CreateGather(shape, new_operands[0], new_operands[1],
-                           *gather_dimension_numbers_, gather_window_bounds_);
-      break;
     case HloOpcode::kDomain:
       CHECK_EQ(new_operands.size(), 1);
       clone =
@@ -1539,11 +1521,6 @@
       return protobuf_util::ProtobufEquals(dot_dimension_numbers(),
                                            other.dot_dimension_numbers());
 
-    case HloOpcode::kGather:
-      return protobuf_util::ProtobufEquals(gather_dimension_numbers(),
-                                           other.gather_dimension_numbers()) &&
-             gather_window_bounds() == other.gather_window_bounds();
-
     // Remaining instructions with special values.
     case HloOpcode::kCall:
       return eq_computations(to_apply(), other.to_apply());
@@ -1590,6 +1567,7 @@
     case HloOpcode::kHostCompute:
     case HloOpcode::kPad:
     case HloOpcode::kDynamicSlice:
+    case HloOpcode::kGather:
       LOG(FATAL) << "Base class impl called for opcode with subclass: "
                  << opcode();
   }
@@ -1863,6 +1841,10 @@
   }
 }
 
+bool HloInstruction::IsCrossModuleAllReduce() const {
+  return opcode() == HloOpcode::kCrossReplicaSum && all_reduce_id();
+}
+
 string HloInstruction::ToStringWithCanonicalNameMap(
     const HloPrintOptions& options,
     CanonicalNameMap* canonical_name_map) const {
@@ -1955,11 +1937,6 @@
   if (dot_dimension_numbers_ != nullptr) {
     extra.push_back(DotDimensionNumbersToString());
   }
-  if (gather_dimension_numbers_ != nullptr) {
-    extra.push_back(GatherDimensionNumbersToString());
-    extra.push_back(
-        StrCat("window_bounds={", Join(gather_window_bounds(), ","), "}"));
-  }
 
   if (options.print_subcomputation_mode() ==
       HloPrintOptions::PrintSubcomputationMode::kNameOnly) {
@@ -2089,14 +2066,6 @@
   if (dot_dimension_numbers_ != nullptr) {
     *proto.mutable_dot_dimension_numbers() = *dot_dimension_numbers_;
   }
-  if (gather_dimension_numbers_ != nullptr) {
-    *proto.mutable_gather_dimension_numbers() = *gather_dimension_numbers_;
-  }
-  if (opcode() == HloOpcode::kGather) {
-    for (int64 bound : gather_window_bounds()) {
-      proto.add_gather_window_bounds(bound);
-    }
-  }
 
   if (has_sharding()) {
     *proto.mutable_sharding() = sharding().ToProto();
@@ -2857,26 +2826,6 @@
   return os << ToString(kind);
 }
 
-string HloInstruction::GatherDimensionNumbersToString() const {
-  CHECK_NE(gather_dimension_numbers_.get(), nullptr);
-  string output_window_dims =
-      StrCat("output_window_dims={",
-             Join(gather_dimension_numbers_->output_window_dims(), ","), "}");
-  string elided_window_dims =
-      StrCat("elided_window_dims={",
-             Join(gather_dimension_numbers_->elided_window_dims(), ","), "}");
-  string gather_dims_to_operand_dims = StrCat(
-      "gather_dims_to_operand_dims={",
-      Join(gather_dimension_numbers_->gather_dims_to_operand_dims(), ","), "}");
-  string index_vector_dim = StrCat(
-      "index_vector_dim=", gather_dimension_numbers_->index_vector_dim());
-
-  return Join<std::initializer_list<string>>(
-      {output_window_dims, elided_window_dims, gather_dims_to_operand_dims,
-       index_vector_dim},
-      ", ");
-}
-
 bool HloInstruction::CouldBeBitcast() const {
   switch (opcode_) {
     case HloOpcode::kTranspose:
@@ -3190,4 +3139,14 @@
 const std::vector<int64>& HloInstruction::dynamic_slice_sizes() const {
   return Cast<HloDynamicSliceInstruction>(this)->dynamic_slice_sizes();
 }
+
+const GatherDimensionNumbers& HloInstruction::gather_dimension_numbers() const {
+  return Cast<HloGatherInstruction>(this)->gather_dimension_numbers();
+}
+
+tensorflow::gtl::ArraySlice<int64> HloInstruction::gather_window_bounds()
+    const {
+  return Cast<HloGatherInstruction>(this)->gather_window_bounds();
+}
+
 }  // namespace xla
diff --git a/tensorflow/compiler/xla/service/hlo_instruction.h b/tensorflow/compiler/xla/service/hlo_instruction.h
index b392d65..180b2fb 100644
--- a/tensorflow/compiler/xla/service/hlo_instruction.h
+++ b/tensorflow/compiler/xla/service/hlo_instruction.h
@@ -700,13 +700,6 @@
   // when we plumb a primordial token from the entry computation.
   static std::unique_ptr<HloInstruction> CreateToken();
 
-  // Creates an instance of GatherDimensionNumbers.
-  static GatherDimensionNumbers MakeGatherDimNumbers(
-      tensorflow::gtl::ArraySlice<int64> output_window_dims,
-      tensorflow::gtl::ArraySlice<int64> elided_window_dims,
-      tensorflow::gtl::ArraySlice<int64> gather_dims_to_operand_dims,
-      int64 index_vector_dim);
-
   // Returns the opcode for this instruction.
   HloOpcode opcode() const { return opcode_; }
 
@@ -1081,19 +1074,6 @@
   // Returns the dump string of the dot dimension numbers.
   string DotDimensionNumbersToString() const;
 
-  const GatherDimensionNumbers& gather_dimension_numbers() const {
-    CHECK(gather_dimension_numbers_ != nullptr);
-    return *gather_dimension_numbers_;
-  }
-
-  tensorflow::gtl::ArraySlice<int64> gather_window_bounds() const {
-    CHECK_EQ(opcode(), HloOpcode::kGather);
-    return gather_window_bounds_;
-  }
-
-  // Returns the dump string of the gather dimension numbers.
-  string GatherDimensionNumbersToString() const;
-
   // Clones the HLO instruction. The clone will have the same opcode, shape, and
   // operands. After creation the clone has no uses. "this" (the instruction
   // cloned from) is not changed. Suffix is the string to append to the name of
@@ -1148,6 +1128,9 @@
   // Returns true if this instruction is elementwise on all its operands.
   bool IsElementwise() const;
 
+  // Returns true if this is an cross module all-reduce instrucion.
+  bool IsCrossModuleAllReduce() const;
+
   // Returns true if this elementwise instruction implicitly broadcasts operand
   // `operand_idx`.
   //
@@ -1460,6 +1443,12 @@
 
   // Delegates to HloDynamicSliceInstruction::dynamic_slice_sizes.
   const std::vector<int64>& dynamic_slice_sizes() const;
+
+  // Delegates to HloGatherInstruction::gather_dimension_numbers.
+  const GatherDimensionNumbers& gather_dimension_numbers() const;
+  // Delegates to HloGatherInstruction::gather_window_bounds.
+  tensorflow::gtl::ArraySlice<int64> gather_window_bounds() const;
+
   // Old methods kept for smooth subclassing transition END.
 
  protected:
@@ -1603,9 +1592,6 @@
   // Describes the dimension numbers used for a dot.
   std::unique_ptr<DotDimensionNumbers> dot_dimension_numbers_;
 
-  std::unique_ptr<GatherDimensionNumbers> gather_dimension_numbers_;
-  std::vector<int64> gather_window_bounds_;
-
   // Used to tag kCopy instructions that are eligible for copy elision.
   bool copy_elision_allowed_ = true;
 
diff --git a/tensorflow/compiler/xla/service/hlo_instruction_test.cc b/tensorflow/compiler/xla/service/hlo_instruction_test.cc
index 87c0489..b75a2bd 100644
--- a/tensorflow/compiler/xla/service/hlo_instruction_test.cc
+++ b/tensorflow/compiler/xla/service/hlo_instruction_test.cc
@@ -24,6 +24,7 @@
 #include "tensorflow/compiler/xla/protobuf_util.h"
 #include "tensorflow/compiler/xla/service/dfs_hlo_visitor_with_default.h"
 #include "tensorflow/compiler/xla/service/hlo_computation.h"
+#include "tensorflow/compiler/xla/service/hlo_instructions.h"
 #include "tensorflow/compiler/xla/service/hlo_parser.h"
 #include "tensorflow/compiler/xla/shape_util.h"
 #include "tensorflow/compiler/xla/test.h"
@@ -1369,7 +1370,7 @@
   HloInstruction* gather_instruction =
       builder.AddInstruction(HloInstruction::CreateGather(
           gather_result_shape, input, gather_indices,
-          HloInstruction::MakeGatherDimNumbers(
+          HloGatherInstruction::MakeGatherDimNumbers(
               /*output_window_dims=*/{4, 5, 6, 7, 8},
               /*elided_window_dims=*/{},
               /*gather_dims_to_operand_dims=*/{0, 1, 2, 3, 4},
@@ -1405,7 +1406,7 @@
   HloInstruction* gather_instruction =
       builder.AddInstruction(HloInstruction::CreateGather(
           gather_result_shape, input, gather_indices,
-          HloInstruction::MakeGatherDimNumbers(
+          HloGatherInstruction::MakeGatherDimNumbers(
               /*output_window_dims=*/{4, 5, 6, 7, 8},
               /*elided_window_dims=*/{},
               /*gather_dims_to_operand_dims=*/{0, 1, 2, 3, 4},
diff --git a/tensorflow/compiler/xla/service/hlo_instructions.cc b/tensorflow/compiler/xla/service/hlo_instructions.cc
index 7ea42ca..702f808 100644
--- a/tensorflow/compiler/xla/service/hlo_instructions.cc
+++ b/tensorflow/compiler/xla/service/hlo_instructions.cc
@@ -291,8 +291,6 @@
       replica_group_ids_(replica_group_ids.begin(), replica_group_ids.end()),
       cross_replica_sum_barrier_(barrier.begin(), barrier.end()),
       all_reduce_id_(all_reduce_id) {
-  // TODO(b/79737069): Remove the CHECK when supported.
-  CHECK(!all_reduce_id_);
   for (auto operand : operands) {
     AppendOperand(operand);
   }
@@ -1914,4 +1912,93 @@
   return MakeUnique<HloDynamicSliceInstruction>(
       shape, new_operands[0], new_operands[1], dynamic_slice_sizes_);
 }
+
+HloGatherInstruction::HloGatherInstruction(
+    const Shape& shape, HloInstruction* operand, HloInstruction* gather_indices,
+    const GatherDimensionNumbers& gather_dim_numbers,
+    tensorflow::gtl::ArraySlice<int64> window_bounds)
+    : HloInstruction(HloOpcode::kGather, shape) {
+  AppendOperand(operand);
+  AppendOperand(gather_indices);
+  gather_dimension_numbers_ =
+      MakeUnique<GatherDimensionNumbers>(gather_dim_numbers);
+  c_copy(window_bounds, std::back_inserter(gather_window_bounds_));
+}
+
+string HloGatherInstruction::GatherDimensionNumbersToString() const {
+  CHECK(gather_dimension_numbers_ != nullptr);
+  string output_window_dims =
+      StrCat("output_window_dims={",
+             Join(gather_dimension_numbers_->output_window_dims(), ","), "}");
+  string elided_window_dims =
+      StrCat("elided_window_dims={",
+             Join(gather_dimension_numbers_->elided_window_dims(), ","), "}");
+  string gather_dims_to_operand_dims = StrCat(
+      "gather_dims_to_operand_dims={",
+      Join(gather_dimension_numbers_->gather_dims_to_operand_dims(), ","), "}");
+  string index_vector_dim = StrCat(
+      "index_vector_dim=", gather_dimension_numbers_->index_vector_dim());
+
+  return Join<std::initializer_list<string>>(
+      {output_window_dims, elided_window_dims, gather_dims_to_operand_dims,
+       index_vector_dim},
+      ", ");
+}
+
+/* static */ GatherDimensionNumbers HloGatherInstruction::MakeGatherDimNumbers(
+    tensorflow::gtl::ArraySlice<int64> output_window_dims,
+    tensorflow::gtl::ArraySlice<int64> elided_window_dims,
+    tensorflow::gtl::ArraySlice<int64> gather_dims_to_operand_dims,
+    int64 index_vector_dim) {
+  GatherDimensionNumbers gather_dim_numbers;
+  for (int64 output_window_dim : output_window_dims) {
+    gather_dim_numbers.add_output_window_dims(output_window_dim);
+  }
+  for (int64 elided_window_dim : elided_window_dims) {
+    gather_dim_numbers.add_elided_window_dims(elided_window_dim);
+  }
+  for (int64 gather_dim_to_input_dim : gather_dims_to_operand_dims) {
+    gather_dim_numbers.add_gather_dims_to_operand_dims(gather_dim_to_input_dim);
+  }
+
+  gather_dim_numbers.set_index_vector_dim(index_vector_dim);
+  return gather_dim_numbers;
+}
+
+HloInstructionProto HloGatherInstruction::ToProto() const {
+  HloInstructionProto proto = HloInstruction::ToProto();
+  *proto.mutable_gather_dimension_numbers() = gather_dimension_numbers();
+  for (int64 bound : gather_window_bounds()) {
+    proto.add_gather_window_bounds(bound);
+  }
+  return proto;
+}
+
+std::vector<string> HloGatherInstruction::ExtraAttributesToStringImpl(
+    const HloPrintOptions& options) const {
+  return {GatherDimensionNumbersToString(),
+          StrCat("window_bounds={", Join(gather_window_bounds(), ","), "}")};
+}
+
+bool HloGatherInstruction::IdenticalSlowPath(
+    const HloInstruction& other,
+    const std::function<bool(const HloComputation*, const HloComputation*)>&
+        eq_computations) const {
+  const auto& casted_other = static_cast<const HloGatherInstruction&>(other);
+  return protobuf_util::ProtobufEquals(
+             gather_dimension_numbers(),
+             casted_other.gather_dimension_numbers()) &&
+         gather_window_bounds() == casted_other.gather_window_bounds();
+}
+
+std::unique_ptr<HloInstruction> HloGatherInstruction::CloneWithNewOperandsImpl(
+    const Shape& shape,
+    tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
+    HloCloneContext* context) const {
+  CHECK_EQ(new_operands.size(), 2);
+  return MakeUnique<HloGatherInstruction>(
+      shape, new_operands[0], new_operands[1], gather_dimension_numbers(),
+      gather_window_bounds());
+}
+
 }  // namespace xla
diff --git a/tensorflow/compiler/xla/service/hlo_instructions.h b/tensorflow/compiler/xla/service/hlo_instructions.h
index e922d94..65a93cd 100644
--- a/tensorflow/compiler/xla/service/hlo_instructions.h
+++ b/tensorflow/compiler/xla/service/hlo_instructions.h
@@ -1148,6 +1148,49 @@
   // ('start' is specified dynamically in the second operand of the operation).
   std::vector<int64> dynamic_slice_sizes_;
 };
+
+class HloGatherInstruction : public HloInstruction {
+ public:
+  explicit HloGatherInstruction(
+      const Shape& shape, HloInstruction* operand,
+      HloInstruction* gather_indices,
+      const GatherDimensionNumbers& gather_dim_numbers,
+      tensorflow::gtl::ArraySlice<int64> window_bounds);
+  const GatherDimensionNumbers& gather_dimension_numbers() const {
+    CHECK(gather_dimension_numbers_ != nullptr);
+    return *gather_dimension_numbers_;
+  }
+  tensorflow::gtl::ArraySlice<int64> gather_window_bounds() const {
+    return gather_window_bounds_;
+  }
+  // Returns the dump string of the gather dimension numbers.
+  string GatherDimensionNumbersToString() const;
+  // Returns a serialized representation of this instruction.
+  HloInstructionProto ToProto() const override;
+
+  // Creates an instance of GatherDimensionNumbers.
+  static GatherDimensionNumbers MakeGatherDimNumbers(
+      tensorflow::gtl::ArraySlice<int64> output_window_dims,
+      tensorflow::gtl::ArraySlice<int64> elided_window_dims,
+      tensorflow::gtl::ArraySlice<int64> gather_dims_to_operand_dims,
+      int64 index_vector_dim);
+
+ private:
+  std::vector<string> ExtraAttributesToStringImpl(
+      const HloPrintOptions& options) const override;
+  bool IdenticalSlowPath(
+      const HloInstruction& other,
+      const std::function<bool(const HloComputation*, const HloComputation*)>&
+          eq_computations) const override;
+  std::unique_ptr<HloInstruction> CloneWithNewOperandsImpl(
+      const Shape& shape,
+      tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
+      HloCloneContext* context) const override;
+
+  std::unique_ptr<GatherDimensionNumbers> gather_dimension_numbers_;
+  std::vector<int64> gather_window_bounds_;
+};
+
 }  // namespace xla
 
 #endif  // TENSORFLOW_COMPILER_XLA_SERVICE_HLO_INSTRUCTIONS_H_
diff --git a/tensorflow/compiler/xla/service/hlo_module_group_metadata.cc b/tensorflow/compiler/xla/service/hlo_module_group_metadata.cc
index 6bcd7b0..3ffac2f 100644
--- a/tensorflow/compiler/xla/service/hlo_module_group_metadata.cc
+++ b/tensorflow/compiler/xla/service/hlo_module_group_metadata.cc
@@ -75,10 +75,23 @@
     if (tracked == nullptr) {
       return Status::OK();
     }
-    // Add the parent computation of this channel instruction and its peer
-    // computation (both must be while computations) as companions.
+
+    std::vector<HloComputation*> peers;
     if (IsChannelInstruction(hlo)) {
-      HloComputation* peer_computation = PeerComputation(hlo);
+      peers.push_back(PeerComputation(hlo));
+    } else if (hlo->IsCrossModuleAllReduce()) {
+      for (HloInstruction* instr : GetAllReduceGroup(*hlo->all_reduce_id())) {
+        if (instr == hlo) {
+          continue;
+        }
+        peers.push_back(instr->parent());
+      }
+    }
+
+    // Add the parent computation of this channel (or all-reduce) instruction
+    // and its peer computation(s) (both must be while computations) as
+    // companions.
+    for (HloComputation* peer_computation : peers) {
       const TrackedInstruction* peer_tracked =
           GetTrackedInstruction(peer_computation);
       TF_RET_CHECK(peer_tracked != nullptr)
@@ -175,7 +188,8 @@
 
 bool HloModuleGroupMetadata::InstructionCommunicates(
     HloInstruction* hlo) const {
-  return IsChannelInstruction(hlo) || IsCompanionInstruction(hlo);
+  return IsChannelInstruction(hlo) || IsCompanionInstruction(hlo) ||
+         hlo->IsCrossModuleAllReduce();
 }
 
 const HloModuleGroupMetadata::Channel& HloModuleGroupMetadata::GetChannel(
@@ -200,6 +214,13 @@
   }
 }
 
+const std::vector<HloInstruction*>& HloModuleGroupMetadata::GetAllReduceGroup(
+    int64 all_reduce_id) const {
+  auto it = all_reduce_map_.find(all_reduce_id);
+  CHECK(it != all_reduce_map_.end());
+  return it->second;
+}
+
 std::vector<HloModuleGroupMetadata::TrackedInstruction>
 HloModuleGroupMetadata::GetCompanionsPath(const HloInstruction* hlo) const {
   std::vector<TrackedInstruction> path;
@@ -278,10 +299,27 @@
       tracked_instructions_[hlo->to_apply()] =
           TrackedInstruction(hlo, ComputationKind::kCallFunction);
     }
+
+    // Group cross module all-reduce instructions by the all_reduce id.
+    if (hlo->IsCrossModuleAllReduce()) {
+      TF_RET_CHECK(channel_id_map_.find(*hlo->all_reduce_id()) ==
+                   channel_id_map_.end())
+          << "all_reduce_id " << *hlo->all_reduce_id()
+          << " is already used by a send/recv instruction";
+      all_reduce_map_[*hlo->all_reduce_id()].push_back(hlo);
+      max_channel_id_ = std::max(max_channel_id_, *hlo->all_reduce_id());
+      return Status::OK();
+    }
+
     if (!IsChannelInstruction(hlo)) {
       return Status::OK();
     }
 
+    TF_RET_CHECK(all_reduce_map_.find(hlo->channel_id()) ==
+                 all_reduce_map_.end())
+        << "channel id " << hlo->channel_id()
+        << " is already used by an all-reduce instruction";
+
     // Add a new channel if needed.
     if (channel_id_map_.find(hlo->channel_id()) == channel_id_map_.end()) {
       channels_.emplace_back();
@@ -324,6 +362,7 @@
     }
   }
   VLOG(2) << "Created " << channels_.size() << " channels";
+  VLOG(2) << "Created " << all_reduce_map_.size() << " all-reduce groups";
   return Status::OK();
 }
 
diff --git a/tensorflow/compiler/xla/service/hlo_module_group_metadata.h b/tensorflow/compiler/xla/service/hlo_module_group_metadata.h
index ffde3a3..9eea5c6 100644
--- a/tensorflow/compiler/xla/service/hlo_module_group_metadata.h
+++ b/tensorflow/compiler/xla/service/hlo_module_group_metadata.h
@@ -118,13 +118,17 @@
   // comment above on companion instructions.
   bool IsCompanionInstruction(HloInstruction* hlo) const;
 
-  // Returns true if the instruction is either a channel instruction or a
-  // companion instruction.
+  // Returns true if the instruction is either a channel instruction, a
+  // cross-module all-reduce instruction, or a companion instruction.
   bool InstructionCommunicates(HloInstruction* hlo) const;
 
   // Returns the Channel instance for the given channel id.
   const Channel& GetChannel(int64 channel_id) const;
 
+  // Returns the all-reduce instructions with the same all_reduce_id.
+  const std::vector<HloInstruction*>& GetAllReduceGroup(
+      int64 all_reduce_id) const;
+
   // Returns the computation that contains the peer channel instructions for
   // the given instruction.
   //
@@ -187,13 +191,14 @@
   // Returns all channels in the module group.
   const std::vector<Channel>& channels() const { return channels_; }
 
-  // Returns the maximum channel id used in the module group.
+  // Returns the maximum channel id or all_reduce_id used in the module group.
   int64 max_channel_id() const { return max_channel_id_; }
 
  private:
   Status Build();
 
-  // Record all channel instructions and While instructions.
+  // Record all channel instructions, cross-module AllReduce instructions, and
+  // While/Conditional/Call instructions.
   Status RecordInstructions();
 
   // Verifies the given HloModules are well-formed and follow the specification,
@@ -255,6 +260,9 @@
   // Map from channel ids to the index in channels_.
   tensorflow::gtl::FlatMap<int64, int64> channel_id_map_;
 
+  // Map from all-reduce ids to the all reduce instructions.
+  tensorflow::gtl::FlatMap<int64, std::vector<HloInstruction*>> all_reduce_map_;
+
   // The maximum channel id used in the module group.
   int64 max_channel_id_ = -1;
 
diff --git a/tensorflow/compiler/xla/service/hlo_module_group_util.cc b/tensorflow/compiler/xla/service/hlo_module_group_util.cc
index df1d562..9fd0ade 100644
--- a/tensorflow/compiler/xla/service/hlo_module_group_util.cc
+++ b/tensorflow/compiler/xla/service/hlo_module_group_util.cc
@@ -56,12 +56,17 @@
   };
 
   // If the given instruction is a companion instruction, we need to find the
-  // predecessors of all of its companion instructions.
+  // predecessors of all of its companion instructions. If the instruction is an
+  // all-reduce, we need to find the predecessors of all the peer all-reduce
+  // instructions.
   std::vector<HloInstruction*> instruction_group;
   if (metadata_.IsCompanionInstruction(instruction)) {
     for (HloInstruction* companion : metadata_.Companions(instruction)) {
       instruction_group.push_back(companion);
     }
+  } else if (instruction->IsCrossModuleAllReduce()) {
+    instruction_group =
+        metadata_.GetAllReduceGroup(*instruction->all_reduce_id());
   } else {
     instruction_group.push_back(instruction);
   }
@@ -112,12 +117,17 @@
   };
 
   // If the given instruction is a companion instruction, we need to find the
-  // successors of all of its companion instructions.
+  // successors of all of its companion instructions. If the instruction is an
+  // all-reduce, we need to find the successors of all its peer all-reduce
+  // instructions.
   std::vector<HloInstruction*> instruction_group;
   if (metadata_.IsCompanionInstruction(instruction)) {
     for (HloInstruction* companion : metadata_.Companions(instruction)) {
       instruction_group.push_back(companion);
     }
+  } else if (instruction->IsCrossModuleAllReduce()) {
+    instruction_group =
+        metadata_.GetAllReduceGroup(*instruction->all_reduce_id());
   } else {
     instruction_group.push_back(instruction);
   }
@@ -170,15 +180,17 @@
     HloInstruction* hlo = stack.top();
 
     // Find the instruction group of the currently visited instruction. The
-    // instruction group represents all companion instructions of the
-    // current instruction, and are considered to be a single entity for the
-    // purpose of the traversal (i.e., they must always be in the same visit
-    // state).
+    // instruction group represents all companion instructions of the current
+    // instruction, or all the all-reduce instructions that belong to the same
+    // group, or are considered to be a single entity for the purpose of the
+    // traversal (i.e., they must always be in the same visit state).
     std::vector<HloInstruction*> instruction_group;
     if (metadata_.IsCompanionInstruction(hlo)) {
       for (HloInstruction* companion : metadata_.Companions(hlo)) {
         instruction_group.push_back(companion);
       }
+    } else if (hlo->IsCrossModuleAllReduce()) {
+      instruction_group = metadata_.GetAllReduceGroup(*hlo->all_reduce_id());
     } else {
       instruction_group.push_back(hlo);
     }
diff --git a/tensorflow/compiler/xla/service/hlo_parser.cc b/tensorflow/compiler/xla/service/hlo_parser.cc
index f162d52..d387539 100644
--- a/tensorflow/compiler/xla/service/hlo_parser.cc
+++ b/tensorflow/compiler/xla/service/hlo_parser.cc
@@ -18,6 +18,7 @@
 #include "tensorflow/compiler/xla/literal.h"
 #include "tensorflow/compiler/xla/literal_util.h"
 #include "tensorflow/compiler/xla/service/hlo_domain_metadata.h"
+#include "tensorflow/compiler/xla/service/hlo_instructions.h"
 #include "tensorflow/compiler/xla/service/hlo_opcode.h"
 #include "tensorflow/compiler/xla/service/hlo_sharding_metadata.h"
 #include "tensorflow/compiler/xla/shape_util.h"
@@ -1192,11 +1193,12 @@
         return false;
       }
 
-      GatherDimensionNumbers dim_numbers = HloInstruction::MakeGatherDimNumbers(
-          /*output_window_dims=*/*output_window_dims,
-          /*elided_window_dims=*/*elided_window_dims,
-          /*gather_dims_to_operand_dims=*/*gather_dims_to_operand_dims,
-          /*index_vector_dim=*/*index_vector_dim);
+      GatherDimensionNumbers dim_numbers =
+          HloGatherInstruction::MakeGatherDimNumbers(
+              /*output_window_dims=*/*output_window_dims,
+              /*elided_window_dims=*/*elided_window_dims,
+              /*gather_dims_to_operand_dims=*/*gather_dims_to_operand_dims,
+              /*index_vector_dim=*/*index_vector_dim);
 
       instruction = builder->AddInstruction(HloInstruction::CreateGather(
           shape, /*operand=*/operands[0], /*gather_indices=*/operands[1],
diff --git a/tensorflow/compiler/xla/service/hlo_sharding_metadata.cc b/tensorflow/compiler/xla/service/hlo_sharding_metadata.cc
index 4f91d61..94f5a3b 100644
--- a/tensorflow/compiler/xla/service/hlo_sharding_metadata.cc
+++ b/tensorflow/compiler/xla/service/hlo_sharding_metadata.cc
@@ -245,21 +245,6 @@
 
 Status ApplyDomainSharding(const DomainMetadata::Domain& domain,
                            const HloSharding& sharding) {
-  // Here is the place to call external sharding normalizers, which are
-  // implemented in other modules (ie, spatial partitioning).
-  // The signature of the external normalizer function should be something
-  // like:
-  //
-  //   StatusOr<bool> Normalizer(const DomainMetadata::Domain&,
-  //                             const HloSharding& sharding);
-  //
-  // The function should return true if it has processed the domain
-  // normalization, false if domain was not one recognized by it, or an error.
-  // We will call the functions in order below, and fall back to local code if
-  // none of the external normalizers acted on the domain.
-  // External normalizers should not handle the cases that are already handled
-  // locally.
-
   // None of the external normalizers handled the domain sharding, try to see
   // whether this is a single sharding first.
   auto single_sharding = sharding.ExtractSingleSharding();
@@ -390,25 +375,36 @@
   return sharding_ != nullptr ? sharding_->ToString() : "{}";
 }
 
-Status ShardingMetadata::NormalizeInstructions(
-    const DomainMetadata::Domain& domain) const {
-  if (sharding_ != nullptr) {
-    VLOG(4) << "Normalizing sharding to " << sharding_->ToString() << ":";
-    TF_RETURN_IF_ERROR(ApplyDomainSharding(domain, *sharding_));
-    TF_RETURN_IF_ERROR(FixupPassThroughDomainLinks(domain, *sharding_));
+/*static*/ StatusOr<const ShardingMetadata*>
+ShardingMetadata::ToShardingMetadata(const DomainMetadata* metadata) {
+  if (metadata->Kind() != ShardingMetadata::KindName()) {
+    return Status(
+        tensorflow::error::INVALID_ARGUMENT,
+        "ShardingMetadata normalizer called with incorrect domain metadata");
   }
-  return Status::OK();
+  return static_cast<const ShardingMetadata*>(metadata);
 }
 
-Status NormalizeShardingDomain(const DomainMetadata::Domain& domain) {
-  TF_ASSIGN_OR_RETURN(std::unique_ptr<HloSharding> sharding,
-                      ExtractOriginalCommonSharding(domain.instructions));
-  if (sharding != nullptr) {
-    VLOG(4) << "Normalizing sharding-less domain to " << sharding->ToString()
-            << ":";
-    TF_RETURN_IF_ERROR(ApplyDomainSharding(domain, *sharding));
+Status ShardingMetadata::NormalizeShardingDomain(
+    const DomainMetadata::Domain& domain, const DomainMetadata* metadata) {
+  if (metadata != nullptr) {
+    TF_ASSIGN_OR_RETURN(const auto& sharding_metadata,
+                        ToShardingMetadata(metadata));
+    const HloSharding* sharding = sharding_metadata->sharding();
+    if (sharding != nullptr) {
+      VLOG(4) << "Normalizing sharding to " << sharding->ToString() << ":";
+      TF_RETURN_IF_ERROR(ApplyDomainSharding(domain, *sharding));
+      TF_RETURN_IF_ERROR(FixupPassThroughDomainLinks(domain, *sharding));
+    }
   } else {
-    VLOG(1) << "Unable to find common sharding";
+    TF_ASSIGN_OR_RETURN(std::unique_ptr<HloSharding> sharding,
+                        ExtractOriginalCommonSharding(domain.instructions));
+    if (sharding != nullptr) {
+      VLOG(4) << "Normalizing sharding-less domain to " << sharding->ToString();
+      TF_RETURN_IF_ERROR(ApplyDomainSharding(domain, *sharding));
+    } else {
+      VLOG(1) << "Unable to find common sharding";
+    }
   }
   return Status::OK();
 }
diff --git a/tensorflow/compiler/xla/service/hlo_sharding_metadata.h b/tensorflow/compiler/xla/service/hlo_sharding_metadata.h
index ec162c3..5e01fc0 100644
--- a/tensorflow/compiler/xla/service/hlo_sharding_metadata.h
+++ b/tensorflow/compiler/xla/service/hlo_sharding_metadata.h
@@ -38,23 +38,26 @@
 
   string ToString() const override;
 
-  Status NormalizeInstructions(
-      const DomainMetadata::Domain& domain) const override;
+  const HloSharding* sharding() const { return sharding_.get(); }
 
   static tensorflow::StringPiece KindName() { return "sharding"; }
 
+  static StatusOr<const ShardingMetadata*> ToShardingMetadata(
+      const DomainMetadata* metadata);
+
+  // Apply the specified domain metadata onto the specified domain. If no
+  // metadata is specified then apply sharding heuristics and normalize the
+  // instructions whose sharding deviates from the one which is inferred as to
+  // be the original one. Policy wise, HLO passes are allowed to create new
+  // unassigned instructions, but if they do create assigned ones, they have to
+  // conform to the ones around.
+  static Status NormalizeShardingDomain(const DomainMetadata::Domain& domain,
+                                        const DomainMetadata* metadata);
+
  private:
   std::unique_ptr<HloSharding> sharding_;
 };
 
-// Within a set of instructions which had common sharding attributes before
-// entring the HLO passes pipeline, apply sharding heuristics and normalize the
-// instructions whose sharding deviates from the one which is inferred as to be
-// the original one.
-// Policy wise, HLO passes are allowed to create new unassigned instructions,
-// but if they do create assigned ones, they have to conform to the ones around.
-Status NormalizeShardingDomain(const DomainMetadata::Domain& domain);
-
 // Given an HLO graph edge between instruction and one of its operands, creates
 // a ShardingMetadata based kDomain instruction if the sharding between
 // instruction and operand changes. Returns nullptr if there is no need for a
diff --git a/tensorflow/compiler/xla/service/platform_util.cc b/tensorflow/compiler/xla/service/platform_util.cc
index 7c63c0a..39fe3c7 100644
--- a/tensorflow/compiler/xla/service/platform_util.cc
+++ b/tensorflow/compiler/xla/service/platform_util.cc
@@ -75,19 +75,6 @@
     auto* platform = platform_pair.second;
     auto compiler_status = Compiler::GetForPlatform(platform);
     if (compiler_status.ok()) {
-      if (platform->VisibleDeviceCount() > 0) {
-        LOG(INFO) << "platform " << platform->Name() << " present with "
-                  << platform->VisibleDeviceCount() << " visible devices";
-      } else {
-        LOG(WARNING) << "platform " << platform->Name() << " present but no "
-                     << "visible devices found";
-      }
-      // Note: currently we call zero device platforms "supported" on the basis
-      // that, if the platform support was linked in, it was probably intended
-      // to be used for execution, and this way we can flag an error.
-      //
-      // TODO(b/33730287) If we want an alternative version of this behavior we
-      // could add an --xla_fallback_to_host flag.
       platforms.push_back(platform);
     } else {
       LOG(INFO) << "platform " << platform->Name() << " present but no "
diff --git a/tensorflow/compiler/xla/service/shape_inference.cc b/tensorflow/compiler/xla/service/shape_inference.cc
index 70edf78..214146c 100644
--- a/tensorflow/compiler/xla/service/shape_inference.cc
+++ b/tensorflow/compiler/xla/service/shape_inference.cc
@@ -222,13 +222,16 @@
       return shape;
     case HloOpcode::kReal:
     case HloOpcode::kImag:
-      if (!ShapeUtil::ElementIsComplex(shape)) {
+      if (ShapeUtil::ElementIsComplex(shape)) {
+        return ShapeUtil::ComplexComponentShape(shape);
+      } else if (ShapeUtil::ElementIsFloating(shape)) {
+        return shape;
+      } else {
         return InvalidArgument(
-            "Expected element type in shape to be complex for real/imag "
-            "operation; got %s.",
+            "Expected element type in shape to be floating or complex for "
+            "real/imag operation; got %s.",
             PrimitiveType_Name(shape.element_type()).c_str());
       }
-      return ShapeUtil::ChangeElementType(shape, F32);
     case HloOpcode::kAbs:
       if (ShapeUtil::ElementIsComplex(shape)) {
         return ShapeUtil::ChangeElementType(
diff --git a/tensorflow/compiler/xla/service/shape_inference_test.cc b/tensorflow/compiler/xla/service/shape_inference_test.cc
index bafe14d..9b1ce14 100644
--- a/tensorflow/compiler/xla/service/shape_inference_test.cc
+++ b/tensorflow/compiler/xla/service/shape_inference_test.cc
@@ -17,6 +17,7 @@
 
 #include <string>
 
+#include "tensorflow/compiler/xla/service/hlo_instructions.h"
 #include "tensorflow/compiler/xla/shape_util.h"
 #include "tensorflow/compiler/xla/test.h"
 #include "tensorflow/compiler/xla/test_helpers.h"
@@ -1543,45 +1544,45 @@
 };
 
 TEST_F(GatherShapeInferenceTest, TensorFlowGather) {
-  TF_ASSERT_OK_AND_ASSIGN(
-      Shape gather_shape,
-      ShapeInference::InferGatherShape(matrix_64_48_, s64_vector_32_,
-                                       HloInstruction::MakeGatherDimNumbers(
-                                           /*output_window_dims=*/{0},
-                                           /*elided_window_dims=*/{1},
-                                           /*gather_dims_to_operand_dims=*/{1},
-                                           /*index_vector_dim=*/1),
-                                       /*window_bounds=*/{64, 1}));
+  TF_ASSERT_OK_AND_ASSIGN(Shape gather_shape,
+                          ShapeInference::InferGatherShape(
+                              matrix_64_48_, s64_vector_32_,
+                              HloGatherInstruction::MakeGatherDimNumbers(
+                                  /*output_window_dims=*/{0},
+                                  /*elided_window_dims=*/{1},
+                                  /*gather_dims_to_operand_dims=*/{1},
+                                  /*index_vector_dim=*/1),
+                              /*window_bounds=*/{64, 1}));
   EXPECT_TRUE(
       ShapeUtil::Equal(gather_shape, ShapeUtil::MakeShape(F32, {64, 32})))
       << ShapeUtil::HumanString(gather_shape);
 }
 
 TEST_F(GatherShapeInferenceTest, TensorFlowGatherV2) {
-  TF_ASSERT_OK_AND_ASSIGN(
-      Shape gather_shape,
-      ShapeInference::InferGatherShape(matrix_64_48_, s64_vector_32_,
-                                       HloInstruction::MakeGatherDimNumbers(
-                                           /*output_window_dims=*/{1},
-                                           /*elided_window_dims=*/{0},
-                                           /*gather_dims_to_operand_dims=*/{0},
-                                           /*index_vector_dim=*/1),
-                                       /*window_bounds=*/{1, 48}));
+  TF_ASSERT_OK_AND_ASSIGN(Shape gather_shape,
+                          ShapeInference::InferGatherShape(
+                              matrix_64_48_, s64_vector_32_,
+                              HloGatherInstruction::MakeGatherDimNumbers(
+                                  /*output_window_dims=*/{1},
+                                  /*elided_window_dims=*/{0},
+                                  /*gather_dims_to_operand_dims=*/{0},
+                                  /*index_vector_dim=*/1),
+                              /*window_bounds=*/{1, 48}));
   EXPECT_TRUE(
       ShapeUtil::Equal(gather_shape, ShapeUtil::MakeShape(F32, {32, 48})))
       << ShapeUtil::HumanString(gather_shape);
 }
 
 TEST_F(GatherShapeInferenceTest, TensorFlowGatherNd) {
-  TF_ASSERT_OK_AND_ASSIGN(
-      Shape gather_shape,
-      ShapeInference::InferGatherShape(matrix_64_48_, s64_4d_tensor_10_9_8_7_1_,
-                                       HloInstruction::MakeGatherDimNumbers(
-                                           /*output_window_dims=*/{4},
-                                           /*elided_window_dims=*/{0},
-                                           /*gather_dims_to_operand_dims=*/{0},
-                                           /*index_vector_dim=*/4),
-                                       /*window_bounds=*/{1, 48}));
+  TF_ASSERT_OK_AND_ASSIGN(Shape gather_shape,
+                          ShapeInference::InferGatherShape(
+                              matrix_64_48_, s64_4d_tensor_10_9_8_7_1_,
+                              HloGatherInstruction::MakeGatherDimNumbers(
+                                  /*output_window_dims=*/{4},
+                                  /*elided_window_dims=*/{0},
+                                  /*gather_dims_to_operand_dims=*/{0},
+                                  /*index_vector_dim=*/4),
+                              /*window_bounds=*/{1, 48}));
   EXPECT_TRUE(ShapeUtil::Equal(gather_shape,
                                ShapeUtil::MakeShape(F32, {10, 9, 8, 7, 48})))
       << ShapeUtil::HumanString(gather_shape);
@@ -1592,7 +1593,7 @@
       Shape gather_shape,
       ShapeInference::InferGatherShape(
           f32_5d_tensor_50_49_48_47_46_, s64_4d_tensor_10_9_8_7_5_,
-          HloInstruction::MakeGatherDimNumbers(
+          HloGatherInstruction::MakeGatherDimNumbers(
               /*output_window_dims=*/{4, 5, 6, 7, 8},
               /*elided_window_dims=*/{},
               /*gather_dims_to_operand_dims=*/{0, 1, 2, 3, 4},
@@ -1609,7 +1610,7 @@
       Shape gather_shape,
       ShapeInference::InferGatherShape(
           f32_5d_tensor_50_49_48_47_46_, s64_4d_tensor_10_9_5_7_6_,
-          HloInstruction::MakeGatherDimNumbers(
+          HloGatherInstruction::MakeGatherDimNumbers(
               /*output_window_dims=*/{4, 5, 6, 7, 8},
               /*elided_window_dims=*/{},
               /*gather_dims_to_operand_dims=*/{0, 1, 2, 3, 4},
@@ -1627,7 +1628,7 @@
       Shape gather_shape,
       ShapeInference::InferGatherShape(
           f32_5d_tensor_50_49_48_47_46_, s64_4d_tensor_5_10_9_7_6_,
-          HloInstruction::MakeGatherDimNumbers(
+          HloGatherInstruction::MakeGatherDimNumbers(
               /*output_window_dims=*/{4, 5, 6, 7, 8},
               /*elided_window_dims=*/{},
               /*gather_dims_to_operand_dims=*/{0, 1, 2, 3, 4},
@@ -1646,7 +1647,7 @@
       Shape gather_shape,
       ShapeInference::InferGatherShape(
           f32_5d_tensor_50_49_48_47_46_, s64_vector_5_,
-          HloInstruction::MakeGatherDimNumbers(
+          HloGatherInstruction::MakeGatherDimNumbers(
               /*output_window_dims=*/{0, 1, 2, 3, 4},
               /*elided_window_dims=*/{},
               /*gather_dims_to_operand_dims=*/{0, 1, 2, 3, 4},
@@ -1664,7 +1665,7 @@
   TF_ASSERT_OK_AND_ASSIGN(Shape gather_shape,
                           ShapeInference::InferGatherShape(
                               f32_5d_tensor_50_49_48_47_46_, s64_scalar_,
-                              HloInstruction::MakeGatherDimNumbers(
+                              HloGatherInstruction::MakeGatherDimNumbers(
                                   /*output_window_dims=*/{0, 1, 2, 3},
                                   /*elided_window_dims=*/{0},
                                   /*gather_dims_to_operand_dims=*/{0},
@@ -1679,10 +1680,11 @@
 TEST_F(GatherShapeInferenceTest, TupleShapedTensorInput) {
   StatusOr<Shape> statusor = ShapeInference::InferGatherShape(
       tuple_shape_, s64_vector_32_,
-      HloInstruction::MakeGatherDimNumbers(/*output_window_dims=*/{0},
-                                           /*elided_window_dims=*/{1},
-                                           /*gather_dims_to_operand_dims=*/{1},
-                                           /*index_vector_dim=*/1),
+      HloGatherInstruction::MakeGatherDimNumbers(
+          /*output_window_dims=*/{0},
+          /*elided_window_dims=*/{1},
+          /*gather_dims_to_operand_dims=*/{1},
+          /*index_vector_dim=*/1),
       /*window_bounds=*/{64, 1});
   ASSERT_FALSE(statusor.ok());
   EXPECT_THAT(statusor.status().error_message(),
@@ -1693,10 +1695,11 @@
 TEST_F(GatherShapeInferenceTest, TupleShapedGatherIndicesInput) {
   StatusOr<Shape> statusor = ShapeInference::InferGatherShape(
       s64_vector_32_, tuple_shape_,
-      HloInstruction::MakeGatherDimNumbers(/*output_window_dims=*/{0},
-                                           /*elided_window_dims=*/{1},
-                                           /*gather_dims_to_operand_dims=*/{1},
-                                           /*index_vector_dim=*/0),
+      HloGatherInstruction::MakeGatherDimNumbers(
+          /*output_window_dims=*/{0},
+          /*elided_window_dims=*/{1},
+          /*gather_dims_to_operand_dims=*/{1},
+          /*index_vector_dim=*/0),
       /*window_bounds=*/{64, 1});
   ASSERT_FALSE(statusor.ok());
   EXPECT_THAT(statusor.status().error_message(),
@@ -1707,10 +1710,11 @@
 TEST_F(GatherShapeInferenceTest, FloatingPointGatherIndicesInput) {
   StatusOr<Shape> statusor = ShapeInference::InferGatherShape(
       s64_vector_32_, vector_32_,
-      HloInstruction::MakeGatherDimNumbers(/*output_window_dims=*/{0},
-                                           /*elided_window_dims=*/{1},
-                                           /*gather_dims_to_operand_dims=*/{1},
-                                           /*index_vector_dim=*/0),
+      HloGatherInstruction::MakeGatherDimNumbers(
+          /*output_window_dims=*/{0},
+          /*elided_window_dims=*/{1},
+          /*gather_dims_to_operand_dims=*/{1},
+          /*index_vector_dim=*/0),
       /*window_bounds=*/{64, 1});
   ASSERT_FALSE(statusor.ok());
   EXPECT_THAT(statusor.status().error_message(),
@@ -1722,7 +1726,7 @@
        InvalidGatherDimNumbers_NonAscendingWindowIndices) {
   StatusOr<Shape> statusor = ShapeInference::InferGatherShape(
       f32_5d_tensor_50_49_48_47_46_, s64_4d_tensor_10_9_8_7_5_,
-      HloInstruction::MakeGatherDimNumbers(
+      HloGatherInstruction::MakeGatherDimNumbers(
           /*output_window_dims=*/{4, 5, 6, 8, 7},
           /*elided_window_dims=*/{},
           /*gather_dims_to_operand_dims=*/{0, 1, 2, 3, 4},
@@ -1739,7 +1743,7 @@
        InvalidGatherDimNumbers_RepeatedWindowIndices) {
   StatusOr<Shape> statusor = ShapeInference::InferGatherShape(
       f32_5d_tensor_50_49_48_47_46_, s64_4d_tensor_10_9_8_7_5_,
-      HloInstruction::MakeGatherDimNumbers(
+      HloGatherInstruction::MakeGatherDimNumbers(
           /*output_window_dims=*/{4, 5, 6, 7, 7},
           /*elided_window_dims=*/{},
           /*gather_dims_to_operand_dims=*/{0, 1, 2, 3, 4},
@@ -1756,7 +1760,7 @@
        InvalidGatherDimNumbers_WindowIndexOutOfBounds) {
   StatusOr<Shape> statusor = ShapeInference::InferGatherShape(
       f32_5d_tensor_50_49_48_47_46_, s64_4d_tensor_10_9_8_7_5_,
-      HloInstruction::MakeGatherDimNumbers(
+      HloGatherInstruction::MakeGatherDimNumbers(
           /*output_window_dims=*/{4, 5, 99, 100, 101},
           /*elided_window_dims=*/{},
           /*gather_dims_to_operand_dims=*/{0, 1, 2, 3, 4},
@@ -1772,7 +1776,7 @@
        InvalidGatherDimNumbers_WindowIndexBarelyOutOfBounds) {
   StatusOr<Shape> statusor = ShapeInference::InferGatherShape(
       f32_5d_tensor_50_49_48_47_46_, s64_4d_tensor_10_9_8_7_5_,
-      HloInstruction::MakeGatherDimNumbers(
+      HloGatherInstruction::MakeGatherDimNumbers(
           /*output_window_dims=*/{4, 5, 6, 7, 9},
           /*elided_window_dims=*/{},
           /*gather_dims_to_operand_dims=*/{0, 1, 2, 3, 4},
@@ -1788,7 +1792,7 @@
        InvalidGatherDimNumbers_MismatchingElidedWindowDims) {
   StatusOr<Shape> statusor = ShapeInference::InferGatherShape(
       f32_5d_tensor_50_49_48_47_46_, s64_4d_tensor_10_9_8_7_5_,
-      HloInstruction::MakeGatherDimNumbers(
+      HloGatherInstruction::MakeGatherDimNumbers(
           /*output_window_dims=*/{4, 5, 6, 7, 8},
           /*elided_window_dims=*/{4},
           /*gather_dims_to_operand_dims=*/{0, 1, 2, 3, 4},
@@ -1806,7 +1810,7 @@
        InvalidGatherDimNumbers_OutOfBoundsWindowToInputMapping) {
   StatusOr<Shape> statusor = ShapeInference::InferGatherShape(
       f32_5d_tensor_50_49_48_47_46_, s64_4d_tensor_10_9_8_7_5_,
-      HloInstruction::MakeGatherDimNumbers(
+      HloGatherInstruction::MakeGatherDimNumbers(
           /*output_window_dims=*/{4, 5, 6, 7, 8},
           /*elided_window_dims=*/{0, 1, 2, 3, 19},
           /*gather_dims_to_operand_dims=*/{0, 1, 2, 3, 4},
@@ -1823,7 +1827,7 @@
        InvalidGatherDimNumbers_RepeatedWindowToInputMapping) {
   StatusOr<Shape> statusor = ShapeInference::InferGatherShape(
       f32_5d_tensor_50_49_48_47_46_, s64_4d_tensor_10_9_8_7_5_,
-      HloInstruction::MakeGatherDimNumbers(
+      HloGatherInstruction::MakeGatherDimNumbers(
           /*output_window_dims=*/{4, 5, 6, 7, 8},
           /*elided_window_dims=*/{0, 1, 2, 3, 3},
           /*gather_dims_to_operand_dims=*/{0, 1, 2, 3, 4},
@@ -1841,7 +1845,7 @@
        InvalidGatherDimNumbers_MismatchingGatherToInputMapping) {
   StatusOr<Shape> statusor = ShapeInference::InferGatherShape(
       f32_5d_tensor_50_49_48_47_46_, s64_4d_tensor_10_9_8_7_5_,
-      HloInstruction::MakeGatherDimNumbers(
+      HloGatherInstruction::MakeGatherDimNumbers(
           /*output_window_dims=*/{4, 5, 6, 7, 8},
           /*elided_window_dims=*/{},
           /*gather_dims_to_operand_dims=*/{0, 1, 2, 3},
@@ -1860,7 +1864,7 @@
        InvalidGatherDimNumbers_OutOfBoundsGatherToInputMapping) {
   StatusOr<Shape> statusor = ShapeInference::InferGatherShape(
       f32_5d_tensor_50_49_48_47_46_, s64_4d_tensor_10_9_8_7_5_,
-      HloInstruction::MakeGatherDimNumbers(
+      HloGatherInstruction::MakeGatherDimNumbers(
           /*output_window_dims=*/{4, 5, 6, 7, 8},
           /*elided_window_dims=*/{},
           /*gather_dims_to_operand_dims=*/{0, 1, 2, 3, 7},
@@ -1878,7 +1882,7 @@
        InvalidGatherDimNumbers_RepeatedGatherToInputMapping) {
   StatusOr<Shape> statusor = ShapeInference::InferGatherShape(
       f32_5d_tensor_50_49_48_47_46_, s64_4d_tensor_10_9_8_7_5_,
-      HloInstruction::MakeGatherDimNumbers(
+      HloGatherInstruction::MakeGatherDimNumbers(
           /*output_window_dims=*/{4, 5, 6, 7, 8},
           /*elided_window_dims=*/{},
           /*gather_dims_to_operand_dims=*/{0, 1, 2, 3, 3},
@@ -1896,7 +1900,7 @@
        InvalidGatherDimNumbers_NonAscendingElidedWindowDims) {
   StatusOr<Shape> statusor = ShapeInference::InferGatherShape(
       f32_5d_tensor_50_49_48_47_46_, s64_4d_tensor_10_9_8_7_5_,
-      HloInstruction::MakeGatherDimNumbers(
+      HloGatherInstruction::MakeGatherDimNumbers(
           /*output_window_dims=*/{4, 5, 6, 7, 8},
           /*elided_window_dims=*/{2, 1},
           /*gather_dims_to_operand_dims=*/{0, 1, 2, 3, 4},
@@ -1911,7 +1915,7 @@
 TEST_F(GatherShapeInferenceTest, InvalidGatherDimNumbers_WindowBoundsTooLarge) {
   StatusOr<Shape> statusor = ShapeInference::InferGatherShape(
       f32_5d_tensor_50_49_48_47_46_, s64_4d_tensor_10_9_8_7_5_,
-      HloInstruction::MakeGatherDimNumbers(
+      HloGatherInstruction::MakeGatherDimNumbers(
           /*output_window_dims=*/{4, 5, 6, 7},
           /*elided_window_dims=*/{2},
           /*gather_dims_to_operand_dims=*/{0, 1, 2, 3, 4},
@@ -1928,7 +1932,7 @@
        InvalidGatherDimNumbers_MismatchingNumberOfWindowBounds) {
   StatusOr<Shape> statusor = ShapeInference::InferGatherShape(
       f32_5d_tensor_50_49_48_47_46_, s64_4d_tensor_10_9_8_7_5_,
-      HloInstruction::MakeGatherDimNumbers(
+      HloGatherInstruction::MakeGatherDimNumbers(
           /*output_window_dims=*/{4, 5, 6, 7, 8},
           /*elided_window_dims=*/{},
           /*gather_dims_to_operand_dims=*/{0, 1, 2, 3, 4},
@@ -1946,7 +1950,7 @@
        InvalidGatherDimNumbers_WindowBoundsNot1ForElidedDim) {
   StatusOr<Shape> statusor = ShapeInference::InferGatherShape(
       f32_5d_tensor_50_49_48_47_46_, s64_4d_tensor_10_9_8_7_5_,
-      HloInstruction::MakeGatherDimNumbers(
+      HloGatherInstruction::MakeGatherDimNumbers(
           /*output_window_dims=*/{4, 5, 6, 7},
           /*elided_window_dims=*/{1},
           /*gather_dims_to_operand_dims=*/{0, 1, 2, 3, 4},
@@ -1962,7 +1966,7 @@
 TEST_F(GatherShapeInferenceTest, OutOfBoundsGatherIndicesLeafDim) {
   StatusOr<Shape> statusor = ShapeInference::InferGatherShape(
       f32_5d_tensor_50_49_48_47_46_, s64_4d_tensor_10_9_5_7_6_,
-      HloInstruction::MakeGatherDimNumbers(
+      HloGatherInstruction::MakeGatherDimNumbers(
           /*output_window_dims=*/{4, 5, 6, 7, 8},
           /*elided_window_dims=*/{},
           /*gather_dims_to_operand_dims=*/{0, 1, 2, 3, 4},
diff --git a/tensorflow/compiler/xla/service/transfer_manager.h b/tensorflow/compiler/xla/service/transfer_manager.h
index 249bdcc..82c599e 100644
--- a/tensorflow/compiler/xla/service/transfer_manager.h
+++ b/tensorflow/compiler/xla/service/transfer_manager.h
@@ -167,16 +167,6 @@
       const se::Platform* platform);
 
  protected:
-  // Transfer a memory block of the given size from 'source' buffer to the
-  // Infeed interface of the device using the given executor.
-  //
-  // size is the size to transfer from source in bytes.
-  //
-  // source is the source data that must be in the target-dependent layout that
-  // the Infeed HLO used in the computation expects.
-  virtual Status TransferBufferToInfeed(se::StreamExecutor* executor,
-                                        int64 size, const void* source) = 0;
-
   // Transfer a memory block of the given size from the device source into the
   // 'destination' buffer.
   //
diff --git a/tensorflow/compiler/xla/shape_util.h b/tensorflow/compiler/xla/shape_util.h
index d576be7..17c1d7b 100644
--- a/tensorflow/compiler/xla/shape_util.h
+++ b/tensorflow/compiler/xla/shape_util.h
@@ -110,7 +110,7 @@
 class ShapeIndexView {
  public:
   ShapeIndexView(const ShapeIndex& shape_index, int64 offset = 0)
-      : indices_(shape_index.data() + offset, shape_index.size()) {
+      : indices_(shape_index.data() + offset, shape_index.size() - offset) {
     CHECK_LE(offset, shape_index.size());
   }
   ShapeIndexView(std::initializer_list<int64> indices) : indices_(indices) {}
diff --git a/tensorflow/compiler/xla/shape_util_test.cc b/tensorflow/compiler/xla/shape_util_test.cc
index 6cdb46d..ed2d16c 100644
--- a/tensorflow/compiler/xla/shape_util_test.cc
+++ b/tensorflow/compiler/xla/shape_util_test.cc
@@ -31,6 +31,15 @@
 
 using ::testing::ElementsAre;
 
+TEST(ShapeUtilTest, ShapeIndexViewTest) {
+  ShapeIndex index = {1, 2, 3, 4};
+  ShapeIndexView index_view(index, 1);
+  EXPECT_EQ(3, index_view.size());
+  EXPECT_EQ(ShapeIndexView({2, 3, 4}), index_view);
+  EXPECT_EQ(ShapeIndexView({3, 4}), index_view.ConsumeFront());
+  EXPECT_EQ(ShapeIndexView({2, 3}), index_view.ConsumeBack());
+}
+
 TEST(ShapeUtilTest, GetDimensionHelperCanNegativeIndex) {
   Shape matrix = ShapeUtil::MakeShape(F32, {2, 3});
   EXPECT_EQ(3, ShapeUtil::GetDimension(matrix, -1));
diff --git a/tensorflow/compiler/xla/tests/conditional_test.cc b/tensorflow/compiler/xla/tests/conditional_test.cc
index 35f1400..369663d 100644
--- a/tensorflow/compiler/xla/tests/conditional_test.cc
+++ b/tensorflow/compiler/xla/tests/conditional_test.cc
@@ -172,88 +172,95 @@
 // Test true and false computations that do not take any parameters.
 XLA_TEST_F(ConditionalOpTest, Parameters0) {
   XlaBuilder builder(TestName());
-  auto pred = ConstantR0<bool>(&builder, true);
+  XlaOp pred;
+  auto pred_arg = CreateR0Parameter<bool>(true, 0, "pred", &builder, &pred);
   auto operands = Tuple(&builder, {});
   auto true_computation = CreateR0ConstantComputation(56.0f);
   auto false_computation = CreateR0ConstantComputation(12.0f);
   Conditional(pred, operands, true_computation, operands, false_computation);
 
-  ComputeAndCompareR0<float>(&builder, 56.0f, {}, error_spec_);
+  ComputeAndCompareR0<float>(&builder, 56.0f, {pred_arg.get()}, error_spec_);
 }
 
 // Test true and false computations that take in 1 parameter.
 XLA_TEST_F(ConditionalOpTest, Parameters1) {
   XlaBuilder builder(TestName());
-  auto pred = ConstantR0<bool>(&builder, false);
+  XlaOp pred;
+  auto pred_arg = CreateR0Parameter<bool>(false, 0, "pred", &builder, &pred);
   auto operand1 = ConstantR0<float>(&builder, 56.0f);
   auto operand2 = ConstantR0<float>(&builder, 12.0f);
   auto identity = CreateR0IdentityComputation();
   Conditional(pred, operand1, identity, operand2, identity);
 
-  ComputeAndCompareR0<float>(&builder, 12.0f, {}, error_spec_);
+  ComputeAndCompareR0<float>(&builder, 12.0f, {pred_arg.get()}, error_spec_);
 }
 
 // Test conditional with two different computations in the true and false cases
 // that take in different arguments.
 XLA_TEST_F(ConditionalOpTest, DiffComputationsDiffArgs) {
   XlaBuilder builder(TestName());
-  auto pred = ConstantR0<bool>(&builder, false);
+  XlaOp pred;
+  auto pred_arg = CreateR0Parameter<bool>(false, 0, "pred", &builder, &pred);
   auto operand1 = ConstantR0<float>(&builder, 56.4f);
   auto operand2 = ConstantR0<float>(&builder, 12.6f);
   Conditional(pred, operand1, CreateR0CeilComputation(), operand2,
               CreateR0FloorComputation());
 
-  ComputeAndCompareR0<float>(&builder, 12.0f, {}, error_spec_);
+  ComputeAndCompareR0<float>(&builder, 12.0f, {pred_arg.get()}, error_spec_);
 }
 
 // Test conditional with two different computations in the true and false cases
 // that take in the same arguments.
 XLA_TEST_F(ConditionalOpTest, DiffComputationsSameArg) {
   XlaBuilder builder(TestName());
-  auto pred = ConstantR0<bool>(&builder, false);
+  XlaOp pred;
+  auto pred_arg = CreateR0Parameter<bool>(false, 0, "pred", &builder, &pred);
   auto operand = ConstantR0<float>(&builder, 12.6f);
   Conditional(pred, operand, CreateR0CeilComputation(), operand,
               CreateR0FloorComputation());
 
-  ComputeAndCompareR0<float>(&builder, 12.0f, {}, error_spec_);
+  ComputeAndCompareR0<float>(&builder, 12.0f, {pred_arg.get()}, error_spec_);
 }
 
 // Test conditional with the same computation in the true and false cases but
 // take in different arguments.
 XLA_TEST_F(ConditionalOpTest, SameComputationDiffArgs) {
   XlaBuilder builder(TestName());
-  auto pred = ConstantR0<bool>(&builder, false);
+  XlaOp pred;
+  auto pred_arg = CreateR0Parameter<bool>(false, 0, "pred", &builder, &pred);
   auto operand1 = ConstantR0<float>(&builder, 56.4f);
   auto operand2 = ConstantR0<float>(&builder, 12.6f);
   auto floor = CreateR0FloorComputation();
   Conditional(pred, operand1, floor, operand2, floor);
 
-  ComputeAndCompareR0<float>(&builder, 12.0f, {}, error_spec_);
+  ComputeAndCompareR0<float>(&builder, 12.0f, {pred_arg.get()}, error_spec_);
 }
 
 // Test conditional with the same computation in the true and false cases that
 // take in the same arguments.
 XLA_TEST_F(ConditionalOpTest, SameComputationSameArg) {
   XlaBuilder builder(TestName());
-  auto pred = ConstantR0<bool>(&builder, false);
+  XlaOp pred;
+  auto pred_arg = CreateR0Parameter<bool>(false, 0, "pred", &builder, &pred);
   auto operand = ConstantR0<float>(&builder, 12.6f);
   auto floor = CreateR0FloorComputation();
   Conditional(pred, operand, floor, operand, floor);
 
-  ComputeAndCompareR0<float>(&builder, 12.0f, {}, error_spec_);
+  ComputeAndCompareR0<float>(&builder, 12.0f, {pred_arg.get()}, error_spec_);
 }
 
 // Test conditional with different instances of the same computation in the true
 // and false cases.
 XLA_TEST_F(ConditionalOpTest, SameComputationDiffInstances) {
   XlaBuilder builder(TestName());
-  auto pred = ConstantR0<bool>(&builder, false);
+  XlaOp pred;
+  auto pred_arg = CreateR0Parameter<bool>(false, 0, "pred", &builder, &pred);
   auto operand1 = ConstantR0<float>(&builder, 56.4f);
   auto operand2 = ConstantR0<float>(&builder, 12.6f);
   Conditional(pred, operand1, CreateR0FloorComputation(), operand2,
               CreateR0FloorComputation());
 
-  ComputeAndCompareR0<float>(&builder, 12.0f, {}, error_spec_);
+  ComputeAndCompareR0<float>(&builder, 12.0f, {pred_arg.get()}, error_spec_);
 }
 
 // Test the case when a call invokes a computation that contains a conditional.
@@ -268,75 +275,83 @@
   auto inner_builder_result = inner_builder.Build();
 
   XlaBuilder builder(TestName());
-  auto pred = ConstantR0<bool>(&builder, false);
+  XlaOp pred;
+  auto pred_arg = CreateR0Parameter<bool>(false, 0, "pred", &builder, &pred);
   auto operand1 = ConstantR0<float>(&builder, 56.4f);
   auto operand2 = ConstantR0<float>(&builder, 12.6f);
   Call(&builder, inner_builder_result.ConsumeValueOrDie(),
        {pred, operand1, operand2});
 
-  ComputeAndCompareR0<float>(&builder, 12.0f, {}, error_spec_);
+  ComputeAndCompareR0<float>(&builder, 12.0f, {pred_arg.get()}, error_spec_);
 }
 
 // Test true and false computations that take in 2 parameters and predicate is
 // true.
 XLA_TEST_F(ConditionalOpTest, Parameters2TrueBranch) {
   XlaBuilder builder(TestName());
-  auto pred = ConstantR0<bool>(&builder, true);
+  XlaOp pred;
+  auto pred_arg = CreateR0Parameter<bool>(true, 0, "pred", &builder, &pred);
   auto operand1 = ConstantR0<float>(&builder, 56.0f);
   auto operand2 = ConstantR0<float>(&builder, 12.0f);
   auto operands = Tuple(&builder, {operand1, operand2});
   Conditional(pred, operands, CreateR0TupleAddComputation(), operands,
               CreateR0TupleSubComputation());
 
-  ComputeAndCompareR0<float>(&builder, 68.0f, {}, error_spec_);
+  ComputeAndCompareR0<float>(&builder, 68.0f, {pred_arg.get()}, error_spec_);
 }
 
 // Test true and false computations that take in 2 parameters and predicate is
 // false.
 XLA_TEST_F(ConditionalOpTest, Parameters2FalseBranch) {
   XlaBuilder builder(TestName());
-  auto pred = ConstantR0<bool>(&builder, false);
+  XlaOp pred;
+  auto pred_arg = CreateR0Parameter<bool>(false, 0, "pred", &builder, &pred);
   auto operand1 = ConstantR0<float>(&builder, 56.0f);
   auto operand2 = ConstantR0<float>(&builder, 12.0f);
   auto operands = Tuple(&builder, {operand1, operand2});
   Conditional(pred, operands, CreateR0TupleAddComputation(), operands,
               CreateR0TupleSubComputation());
 
-  ComputeAndCompareR0<float>(&builder, 44.0f, {}, error_spec_);
+  ComputeAndCompareR0<float>(&builder, 44.0f, {pred_arg.get()}, error_spec_);
 }
 
 // Test true and false computations that take in 2 array parameters and
 // predicate is true.
 XLA_TEST_F(ConditionalOpTest, Parameters2ArrayTrueBranch) {
   XlaBuilder builder(TestName());
-  auto pred = ConstantR0<bool>(&builder, true);
+  XlaOp pred;
+  auto pred_arg = CreateR0Parameter<bool>(true, 0, "pred", &builder, &pred);
   auto operand1 = ConstantR1<float>(&builder, {24.0f, 56.0f});
   auto operand2 = ConstantR1<float>(&builder, {10.0f, 11.0f});
   auto operands = Tuple(&builder, {operand1, operand2});
   Conditional(pred, operands, CreateR1TupleAddComputation(), operands,
               CreateR1TupleSubComputation());
 
-  ComputeAndCompareR1<float>(&builder, {34.0f, 67.0f}, {}, error_spec_);
+  ComputeAndCompareR1<float>(&builder, {34.0f, 67.0f}, {pred_arg.get()},
+                             error_spec_);
 }
 
 // Test true and false computations that take in 2 array parameters and
 // predicate is false.
 XLA_TEST_F(ConditionalOpTest, Parameters2ArrayFalseBranch) {
   XlaBuilder builder(TestName());
-  auto pred = ConstantR0<bool>(&builder, false);
+  XlaOp pred;
+  auto pred_arg = CreateR0Parameter<bool>(false, 0, "pred", &builder, &pred);
   auto operand1 = ConstantR1<float>(&builder, {24.0f, 56.0f});
   auto operand2 = ConstantR1<float>(&builder, {10.0f, 11.0f});
   auto operands = Tuple(&builder, {operand1, operand2});
   Conditional(pred, operands, CreateR1TupleAddComputation(), operands,
               CreateR1TupleSubComputation());
 
-  ComputeAndCompareR1<float>(&builder, {14.0f, 45.0f}, {}, error_spec_);
+  ComputeAndCompareR1<float>(&builder, {14.0f, 45.0f}, {pred_arg.get()},
+                             error_spec_);
 }
 
 // Test true and false computations that return a tuple of scalars.
 XLA_TEST_F(ConditionalOpTest, ReturnTupleOfScalars) {
   XlaBuilder builder(TestName());
-  auto pred = ConstantR0<bool>(&builder, false);
+  XlaOp pred;
+  auto pred_arg = CreateR0Parameter<bool>(false, 0, "pred", &builder, &pred);
   auto operands = Tuple(&builder, {ConstantR0<float>(&builder, 12.2f),
                                    ConstantR0<float>(&builder, 25.6f)});
   Conditional(pred, operands, CreateR0TupleCeilComputation(), operands,
@@ -346,13 +361,14 @@
       &builder,
       *LiteralUtil::MakeTuple({LiteralUtil::CreateR0<float>(12.0f).get(),
                                LiteralUtil::CreateR0<float>(25.0f).get()}),
-      {}, error_spec_);
+      {pred_arg.get()}, error_spec_);
 }
 
 // Test true and false computations that return a tuple of arrays.
 XLA_TEST_F(ConditionalOpTest, ReturnTupleOfArrays) {
   XlaBuilder builder(TestName());
-  auto pred = ConstantR0<bool>(&builder, true);
+  XlaOp pred;
+  auto pred_arg = CreateR0Parameter<bool>(true, 0, "pred", &builder, &pred);
   auto operands =
       Tuple(&builder, {ConstantR1<float>(&builder, {12.2f, 15.8f}),
                        ConstantR1<float>(&builder, {25.6f, 29.2f})});
@@ -364,7 +380,7 @@
       *LiteralUtil::MakeTuple(
           {LiteralUtil::CreateR1<float>({13.0f, 16.0f}).get(),
            LiteralUtil::CreateR1<float>({26.0f, 30.0f}).get()}),
-      {}, error_spec_);
+      {pred_arg.get()}, error_spec_);
 }
 
 // Test true and false computations that return a tuple of a predicate, a
@@ -393,7 +409,8 @@
   EXPECT_IS_OK(false_builder_result.status());
 
   XlaBuilder builder(TestName());
-  auto pred = ConstantR0<bool>(&builder, true);
+  XlaOp pred;
+  auto pred_arg = CreateR0Parameter<bool>(true, 0, "pred", &builder, &pred);
   auto operands = Tuple(&builder, {});
   Conditional(pred, operands, true_builder_result.ConsumeValueOrDie(), operands,
               false_builder_result.ConsumeValueOrDie());
@@ -404,7 +421,7 @@
           {LiteralUtil::CreateR0<bool>(true).get(),
            LiteralUtil::CreateR0<float>(12.2f).get(),
            LiteralUtil::CreateR1<float>({12.8f, 14.6f}).get()}),
-      {}, error_spec_);
+      {pred_arg.get()}, error_spec_);
 }
 
 // Test true and false computations that return a nested tuple.
@@ -438,7 +455,8 @@
   EXPECT_IS_OK(false_builder_result.status());
 
   XlaBuilder builder(TestName());
-  auto pred = ConstantR0<bool>(&builder, false);
+  XlaOp pred;
+  auto pred_arg = CreateR0Parameter<bool>(false, 0, "pred", &builder, &pred);
   auto operands = Tuple(&builder, {});
   Conditional(pred, operands, true_builder_result.ConsumeValueOrDie(), operands,
               false_builder_result.ConsumeValueOrDie());
@@ -454,7 +472,7 @@
                {LiteralUtil::CreateR1<float>({62.1f, 67.4f}).get(),
                 LiteralUtil::CreateR0<float>(9.3f).get()})
                .get()}),
-      {}, error_spec_);
+      {pred_arg.get()}, error_spec_);
 }
 
 // Test conditional that takes in scalar operands in the form of external
@@ -515,8 +533,9 @@
   EXPECT_IS_OK(inner_builder_result.status());
 
   XlaBuilder builder(TestName());
-  auto pred1 = ConstantR0<bool>(&builder, true);
-  auto pred2 = ConstantR0<bool>(&builder, false);
+  XlaOp pred1, pred2;
+  auto pred1_arg = CreateR0Parameter<bool>(true, 0, "pred1", &builder, &pred1);
+  auto pred2_arg = CreateR0Parameter<bool>(false, 1, "pred2", &builder, &pred2);
   auto operand1 = ConstantR0<float>(&builder, 1.1f);
   auto operand2 = ConstantR0<float>(&builder, 12.2f);
   auto operand3 = ConstantR0<float>(&builder, 43.3f);
@@ -524,7 +543,8 @@
   Conditional(pred1, tuple_operand, inner_builder_result.ConsumeValueOrDie(),
               operand3, CreateR0IdentityComputation());
 
-  ComputeAndCompareR0<float>(&builder, 12.0f, {}, error_spec_);
+  ComputeAndCompareR0<float>(&builder, 12.0f,
+                             {pred1_arg.get(), pred2_arg.get()}, error_spec_);
 }
 
 XLA_TEST_F(ConditionalOpTest, ConditionalInNestedComputation) {
@@ -543,13 +563,14 @@
   EXPECT_IS_OK(inner_builder_result.status());
 
   XlaBuilder builder(TestName());
-  auto pred2 = ConstantR0<bool>(&builder, false);
+  XlaOp pred;
+  auto pred_arg = CreateR0Parameter<bool>(false, 0, "pred", &builder, &pred);
   auto operand1 = ConstantR0<float>(&builder, 1.1f);
   auto operand2 = ConstantR0<float>(&builder, 12.2f);
-  auto tuple_operand = Tuple(&builder, {pred2, operand1, operand2});
+  auto tuple_operand = Tuple(&builder, {pred, operand1, operand2});
   Call(&builder, inner_builder_result.ConsumeValueOrDie(), {tuple_operand});
 
-  ComputeAndCompareR0<float>(&builder, 12.0f, {}, error_spec_);
+  ComputeAndCompareR0<float>(&builder, 12.0f, {pred_arg.get()}, error_spec_);
 }
 
 // Test a mismatch in the shape of the true operand and true computation.
@@ -604,8 +625,9 @@
 
   auto test_swap = [&](float a, float b) {
     XlaBuilder builder(TestName());
-    auto x = ConstantR0<float>(&builder, a);
-    auto y = ConstantR0<float>(&builder, b);
+    XlaOp x, y;
+    auto x_arg = CreateR0Parameter<float>(a, 0, "x", &builder, &x);
+    auto y_arg = CreateR0Parameter<float>(b, 1, "y", &builder, &y);
     auto tuple_operand = Tuple(&builder, {x, y});
     Call(&builder, main, {tuple_operand});
 
@@ -613,7 +635,7 @@
         &builder,
         *LiteralUtil::MakeTuple({LiteralUtil::CreateR0<float>(a).get(),
                                  LiteralUtil::CreateR0<float>(b).get()}),
-        {}, error_spec_);
+        {x_arg.get(), y_arg.get()}, error_spec_);
   };
 
   test_swap(3.11f, 9.4f);
diff --git a/tensorflow/contrib/BUILD b/tensorflow/contrib/BUILD
index 60be9db..1322056 100644
--- a/tensorflow/contrib/BUILD
+++ b/tensorflow/contrib/BUILD
@@ -7,7 +7,6 @@
 
 load("//third_party/mpi:mpi.bzl", "if_mpi")
 load("@local_config_cuda//cuda:build_defs.bzl", "if_cuda")
-load("@local_config_tensorrt//:build_defs.bzl", "if_tensorrt")
 load("//tensorflow:tensorflow.bzl", "if_not_windows")
 load("//tensorflow:tensorflow.bzl", "if_not_windows_cuda")
 
@@ -103,6 +102,7 @@
         "//tensorflow/contrib/summary:summary",
         "//tensorflow/contrib/tensor_forest:init_py",
         "//tensorflow/contrib/tensorboard",
+        "//tensorflow/contrib/tensorrt:init_py",
         "//tensorflow/contrib/testing:testing_py",
         "//tensorflow/contrib/text:text_py",
         "//tensorflow/contrib/tfprof",
@@ -113,9 +113,7 @@
         "//tensorflow/contrib/util:util_py",
         "//tensorflow/python:util",
         "//tensorflow/python/estimator:estimator_py",
-    ] + if_mpi(["//tensorflow/contrib/mpi_collectives:mpi_collectives_py"]) + if_tensorrt([
-        "//tensorflow/contrib/tensorrt:init_py",
-    ]) + select({
+    ] + if_mpi(["//tensorflow/contrib/mpi_collectives:mpi_collectives_py"]) + select({
         "//tensorflow:with_kafka_support_windows_override": [],
         "//tensorflow:with_kafka_support": [
             "//tensorflow/contrib/kafka",
diff --git a/tensorflow/contrib/autograph/__init__.py b/tensorflow/contrib/autograph/__init__.py
index 4f8ef2d..7821c98 100644
--- a/tensorflow/contrib/autograph/__init__.py
+++ b/tensorflow/contrib/autograph/__init__.py
@@ -30,7 +30,6 @@
 from tensorflow.contrib.autograph.impl.api import RunMode
 from tensorflow.contrib.autograph.impl.api import to_code
 from tensorflow.contrib.autograph.core.errors import improved_errors
-from tensorflow.contrib.autograph.core.errors import rewrite_graph_construction_error
 from tensorflow.contrib.autograph.core.errors import GraphConstructionError
 from tensorflow.contrib.autograph.core.errors import TfRuntimeError
 from tensorflow.contrib.autograph.impl.api import to_graph
@@ -46,12 +45,14 @@
     'convert',
     'converted_call',
     'do_not_convert',
-    'improved_errors',
     'to_code',
     'to_graph',
     # Overloaded operators
     'operators',
-    'rewrite_graph_construction_error',
+    # Errors
+    'improved_errors',
+    'GraphConstructionError',
+    'TfRuntimeError',
     # Python language "extensions"
     'set_element_type',
     'set_loop_options',
diff --git a/tensorflow/contrib/autograph/converters/BUILD b/tensorflow/contrib/autograph/converters/BUILD
index b2e2e27..7cbba71 100644
--- a/tensorflow/contrib/autograph/converters/BUILD
+++ b/tensorflow/contrib/autograph/converters/BUILD
@@ -21,16 +21,18 @@
         "break_statements.py",
         "builtin_functions.py",
         "call_trees.py",
+        "conditional_expressions.py",
         "continue_statements.py",
         "control_flow.py",
         "decorators.py",
-        "ifexp.py",
-        "list_comprehension.py",
+        "directives.py",
+        "error_handlers.py",
+        "list_comprehensions.py",
         "lists.py",
         "logical_expressions.py",
         "name_scopes.py",
+        "return_statements.py",
         "side_effect_guards.py",
-        "single_return.py",
         "slices.py",
     ],
     srcs_version = "PY2AND3",
@@ -95,6 +97,17 @@
 )
 
 py_test(
+    name = "conditional_expressions_test",
+    srcs = ["conditional_expressions_test.py"],
+    srcs_version = "PY2AND3",
+    deps = [
+        ":converters",
+        "//tensorflow/contrib/autograph/core:test_lib",
+        "//tensorflow/python:client_testlib",
+    ],
+)
+
+py_test(
     name = "continue_statements_test",
     srcs = ["continue_statements_test.py"],
     srcs_version = "PY2AND3",
@@ -132,6 +145,18 @@
 )
 
 py_test(
+    name = "directives_test",
+    srcs = ["directives_test.py"],
+    srcs_version = "PY2AND3",
+    deps = [
+        ":converters",
+        "//tensorflow/contrib/autograph/core:test_lib",
+        "//tensorflow/contrib/autograph/lang",
+        "//tensorflow/python:client_testlib",
+    ],
+)
+
+py_test(
     name = "name_scopes_test",
     srcs = ["name_scopes_test.py"],
     deps = [
@@ -143,8 +168,8 @@
 )
 
 py_test(
-    name = "list_comprehension_test",
-    srcs = ["list_comprehension_test.py"],
+    name = "list_comprehensions_test",
+    srcs = ["list_comprehensions_test.py"],
     srcs_version = "PY2AND3",
     deps = [
         ":converters",
@@ -179,11 +204,6 @@
     name = "side_effect_guards_test",
     srcs = ["side_effect_guards_test.py"],
     srcs_version = "PY2AND3",
-    tags = [
-        # TODO(mdan): Fix.
-        "flaky",
-        "notap",
-    ],
     deps = [
         ":converters",
         "//tensorflow/contrib/autograph/core:test_lib",
@@ -192,8 +212,8 @@
 )
 
 py_test(
-    name = "single_return_test",
-    srcs = ["single_return_test.py"],
+    name = "return_statements_test",
+    srcs = ["return_statements_test.py"],
     srcs_version = "PY2AND3",
     deps = [
         ":converters",
@@ -204,8 +224,8 @@
 )
 
 py_test(
-    name = "ifexp_test",
-    srcs = ["ifexp_test.py"],
+    name = "error_handlers_test",
+    srcs = ["error_handlers_test.py"],
     srcs_version = "PY2AND3",
     deps = [
         ":converters",
diff --git a/tensorflow/contrib/autograph/converters/asserts.py b/tensorflow/contrib/autograph/converters/asserts.py
index e664a40..af2f20f 100644
--- a/tensorflow/contrib/autograph/converters/asserts.py
+++ b/tensorflow/contrib/autograph/converters/asserts.py
@@ -12,7 +12,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 # ==============================================================================
-"""Converts Assert statements to their corresponding TF calls."""
+"""Converts assert statements to their corresponding TF calls."""
 
 from __future__ import absolute_import
 from __future__ import division
@@ -24,8 +24,8 @@
 from tensorflow.contrib.autograph.pyct import templates
 
 
-class AssertsTransformer(converter.Base):
-  """Transforms Print nodes to Call so they can be handled as functions."""
+class AssertTransformer(converter.Base):
+  """Transforms Assert nodes to Call so they can be handled as functions."""
 
   def visit_Assert(self, node):
     self.generic_visit(node)
@@ -46,4 +46,4 @@
 
 
 def transform(node, ctx):
-  return AssertsTransformer(ctx).visit(node)
+  return AssertTransformer(ctx).visit(node)
diff --git a/tensorflow/contrib/autograph/converters/asserts_test.py b/tensorflow/contrib/autograph/converters/asserts_test.py
index 2cd0e62..9c58ae3 100644
--- a/tensorflow/contrib/autograph/converters/asserts_test.py
+++ b/tensorflow/contrib/autograph/converters/asserts_test.py
@@ -32,8 +32,8 @@
     def test_fn(a):
       assert a > 0
 
-    node = self.parse_and_analyze(test_fn, {})
-    node = asserts.transform(node, self.ctx)
+    node, ctx = self.prepare(test_fn, {})
+    node = asserts.transform(node, ctx)
 
     self.assertTrue(isinstance(node.body[0].body[0].value, gast.Call))
 
diff --git a/tensorflow/contrib/autograph/converters/break_statements.py b/tensorflow/contrib/autograph/converters/break_statements.py
index a990e35..2a60750 100644
--- a/tensorflow/contrib/autograph/converters/break_statements.py
+++ b/tensorflow/contrib/autograph/converters/break_statements.py
@@ -12,7 +12,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 # ==============================================================================
-"""Canonicalizes break statements by de-sugaring into a control boolean."""
+"""Lowers break statements to conditionals."""
 
 from __future__ import absolute_import
 from __future__ import division
@@ -24,17 +24,22 @@
 from tensorflow.contrib.autograph.pyct.static_analysis.annos import NodeAnno
 
 
-# Tags for local state.
-BREAK_USED = 'break_used'
-CONTROL_VAR_NAME = 'control_var_name'
+class _Break(object):
+
+  def __init__(self):
+    self.used = False
+    self.control_var_name = None
+
+  def __repr__(self):
+    return 'used: %s, var: %s' % (self.used, self.control_var_name)
 
 
-class BreakStatementTransformer(converter.Base):
+class BreakTransformer(converter.Base):
   """Canonicalizes break statements into additional conditionals."""
 
   def visit_Break(self, node):
-    self.set_local(BREAK_USED, True)
-    var_name = self.get_local(CONTROL_VAR_NAME)
+    self.state[_Break].used = True
+    var_name = self.state[_Break].control_var_name
     # TODO(mdan): This will fail when expanded inside a top-level else block.
     template = """
       var_name = True
@@ -57,12 +62,12 @@
         block=block)
     return node
 
-  def _track_body(self, nodes, break_var):
-    self.enter_local_scope()
-    self.set_local(CONTROL_VAR_NAME, break_var)
+  def _process_body(self, nodes, break_var):
+    self.state[_Break].enter()
+    self.state[_Break].control_var_name = break_var
     nodes = self.visit_block(nodes)
-    break_used = self.get_local(BREAK_USED, False)
-    self.exit_local_scope()
+    break_used = self.state[_Break].used
+    self.state[_Break].exit()
     return nodes, break_used
 
   def visit_While(self, node):
@@ -70,7 +75,7 @@
     break_var = self.ctx.namer.new_symbol('break_', scope.referenced)
 
     node.test = self.visit(node.test)
-    node.body, break_used = self._track_body(node.body, break_var)
+    node.body, break_used = self._process_body(node.body, break_var)
     # A break in the else clause applies to the containing scope.
     node.orelse = self.visit_block(node.orelse)
 
@@ -101,7 +106,7 @@
 
     node.target = self.visit(node.target)
     node.iter = self.visit(node.iter)
-    node.body, break_used = self._track_body(node.body, break_var)
+    node.body, break_used = self._process_body(node.body, break_var)
     # A break in the else clause applies to the containing scope.
     node.orelse = self.visit_block(node.orelse)
 
@@ -138,4 +143,4 @@
 
 
 def transform(node, ctx):
-  return BreakStatementTransformer(ctx).visit(node)
+  return BreakTransformer(ctx).visit(node)
diff --git a/tensorflow/contrib/autograph/converters/break_statements_test.py b/tensorflow/contrib/autograph/converters/break_statements_test.py
index dcff1c5..c26ca29 100644
--- a/tensorflow/contrib/autograph/converters/break_statements_test.py
+++ b/tensorflow/contrib/autograph/converters/break_statements_test.py
@@ -25,7 +25,11 @@
 
 class BreakCanonicalizationTest(converter_testing.TestCase):
 
-  def test_basic_while(self):
+  def assertTransformedEquivalent(self, test_fn, *inputs):
+    with self.converted(test_fn, break_statements, {}) as result:
+      self.assertEqual(test_fn(*inputs), result.test_fn(*inputs))
+
+  def test_while_loop(self):
 
     def test_fn(x):
       v = []
@@ -36,15 +40,11 @@
         v.append(x)
       return v
 
-    node = self.parse_and_analyze(test_fn, {})
-    node = break_statements.transform(node, self.ctx)
+    self.assertTransformedEquivalent(test_fn, 0)
+    self.assertTransformedEquivalent(test_fn, 1)
+    self.assertTransformedEquivalent(test_fn, 4)
 
-    with self.compiled(node) as result:
-      self.assertEqual([], result.test_fn(0))
-      self.assertEqual([], result.test_fn(1))
-      self.assertEqual([3], result.test_fn(4))
-
-  def test_basic_for(self):
+  def test_for_loop(self):
 
     def test_fn(a):
       v = []
@@ -55,18 +55,12 @@
         v.append(x)
       return v
 
-    node = self.parse_and_analyze(test_fn, {})
-    node = break_statements.transform(node, self.ctx)
-
-    with self.compiled(node) as result:
+    with self.converted(test_fn, break_statements, {}) as result:
       # The break is incompletely canonicalized. The loop will not interrupt,
       # but the section following the break will be skipped.
-      self.assertEqual([], result.test_fn([]))
-      self.assertEqual([3, 3], result.test_fn([4, 4]))
-      self.assertEqual([3], result.test_fn([4, 5]))
       self.assertEqual([3], result.test_fn([5, 4]))
 
-  def test_deeply_nested(self):
+  def test_nested(self):
 
     def test_fn(x):
       v = []
@@ -83,13 +77,9 @@
         v.append(x)
       return v, u, w
 
-    node = self.parse_and_analyze(test_fn, {})
-    node = break_statements.transform(node, self.ctx)
-
-    with self.compiled(node) as result:
-      self.assertEqual(([], [], []), result.test_fn(0))
-      self.assertEqual(([2, 1], [2], [0]), result.test_fn(3))
-      self.assertEqual(([10, 9, 8, 7], [10, 8], [6]), result.test_fn(11))
+    self.assertTransformedEquivalent(test_fn, 0)
+    self.assertTransformedEquivalent(test_fn, 3)
+    self.assertTransformedEquivalent(test_fn, 11)
 
   def test_nested_loops(self):
 
@@ -109,16 +99,12 @@
         v.append(x)
       return v, u
 
-    node = self.parse_and_analyze(test_fn, {})
-    node = break_statements.transform(node, self.ctx)
+    self.assertTransformedEquivalent(test_fn, 0)
+    self.assertTransformedEquivalent(test_fn, 2)
+    self.assertTransformedEquivalent(test_fn, 3)
+    self.assertTransformedEquivalent(test_fn, 5)
 
-    with self.compiled(node) as result:
-      self.assertEqual(([], []), result.test_fn(0))
-      self.assertEqual(([1], []), result.test_fn(2))
-      self.assertEqual(([2, 1], [1]), result.test_fn(3))
-      self.assertEqual(([4, 3, 2, 1], [3, 1]), result.test_fn(5))
-
-  def test_loop_else(self):
+  def test_loop_orelse(self):
 
     def test_fn(x):
       v = []
@@ -134,13 +120,9 @@
         v.append(x)
       return v, u
 
-    node = self.parse_and_analyze(test_fn, {})
-    node = break_statements.transform(node, self.ctx)
-
-    with self.compiled(node) as result:
-      self.assertEqual(([], []), result.test_fn(0))
-      self.assertEqual(([], [1]), result.test_fn(2))
-      self.assertEqual(([2], [1]), result.test_fn(3))
+    self.assertTransformedEquivalent(test_fn, 0)
+    self.assertTransformedEquivalent(test_fn, 2)
+    self.assertTransformedEquivalent(test_fn, 3)
 
 
 if __name__ == '__main__':
diff --git a/tensorflow/contrib/autograph/converters/builtin_functions_test.py b/tensorflow/contrib/autograph/converters/builtin_functions_test.py
index e9000e5..d5c3e2c 100644
--- a/tensorflow/contrib/autograph/converters/builtin_functions_test.py
+++ b/tensorflow/contrib/autograph/converters/builtin_functions_test.py
@@ -18,8 +18,6 @@
 from __future__ import division
 from __future__ import print_function
 
-import sys
-
 import six
 
 from tensorflow.contrib.autograph.converters import builtin_functions
@@ -36,55 +34,39 @@
     def test_fn(a):
       return len(a)
 
-    node = self.parse_and_analyze(test_fn, {'len': len})
-    node = builtin_functions.transform(node, self.ctx)
-
-    with self.compiled(node, array_ops.shape) as result:
+    with self.converted(test_fn, builtin_functions, {'len': len},
+                        array_ops.shape) as result:
       with self.test_session() as sess:
-        self.assertEqual(3,
-                         sess.run(
-                             result.test_fn(constant_op.constant([0, 0, 0]))))
-
-        self.assertEqual(3, result.test_fn([0, 0, 0]))
+        ops = result.test_fn(constant_op.constant([0, 0, 0]))
+        self.assertEqual(sess.run(ops), 3)
 
   def test_print(self):
 
+    if six.PY2:
+      return
+
     def test_fn(a):
-      print(a)
+      return print(a)
 
-    node = self.parse_and_analyze(test_fn, {'print': print})
-    node = builtin_functions.transform(node, self.ctx)
-
-    with self.compiled(node) as result:
+    with self.converted(test_fn, builtin_functions, {'print': print}) as result:
       with self.test_session() as sess:
-        try:
-          out_capturer = six.StringIO()
-          sys.stdout = out_capturer
-          result.test_fn(constant_op.constant('a'))
-          sess.run(sess.graph.get_operations())
-          self.assertEqual(out_capturer.getvalue(), 'a\n')
-        finally:
-          sys.stdout = sys.__stdout__
+        with self.assertPrints('a\n'):
+          sess.run(result.test_fn('a'))
 
-  def test_print_with_op_multiple_values(self):
+  def test_print_multiple_values(self):
+
+    if six.PY2:
+      return
 
     def test_fn(a, b, c):
-      print(a, b, c)
+      return print(a, b, c)
 
-    node = self.parse_and_analyze(test_fn, {'print': print})
-    node = builtin_functions.transform(node, self.ctx)
-
-    with self.compiled(node) as result:
+    with self.converted(test_fn, builtin_functions, {'print': print}) as result:
       with self.test_session() as sess:
-        try:
-          out_capturer = six.StringIO()
-          sys.stdout = out_capturer
-          result.test_fn(
-              constant_op.constant('a'), constant_op.constant(1), [2, 3])
-          sess.run(sess.graph.get_operations())
-          self.assertEqual(out_capturer.getvalue(), 'a 1 [2, 3]\n')
-        finally:
-          sys.stdout = sys.__stdout__
+        with self.assertPrints('a 1 [2, 3]\n'):
+          sess.run(
+              result.test_fn(
+                  constant_op.constant('a'), constant_op.constant(1), [2, 3]))
 
 
 if __name__ == '__main__':
diff --git a/tensorflow/contrib/autograph/converters/call_trees_test.py b/tensorflow/contrib/autograph/converters/call_trees_test.py
index 27d8281..8cdba65 100644
--- a/tensorflow/contrib/autograph/converters/call_trees_test.py
+++ b/tensorflow/contrib/autograph/converters/call_trees_test.py
@@ -36,37 +36,34 @@
     def test_fn_1(_):
       raise ValueError('This should not be called in the compiled version.')
 
-    def renamed_test_fn_1(a):
+    def other_test_fn_1(a):
       return a + 1
 
     def test_fn_2(a):
       return test_fn_1(a) + 1
 
-    node = self.parse_and_analyze(test_fn_2, {'test_fn_1': test_fn_1})
-    node = call_trees.transform(node, self.ctx)
+    ns = {'test_fn_1': test_fn_1}
+    node, ctx = self.prepare(test_fn_2, ns)
+    node = call_trees.transform(node, ctx)
 
-    with self.compiled(node) as result:
-      # Only test_fn_2 is transformed, so we'll insert renamed_test_fn_1
-      # manually.
-      result.renamed_test_fn_1 = renamed_test_fn_1
-      self.assertEquals(3, result.test_fn_2(1))
+    with self.compiled(node, ns) as result:
+      new_name, _ = ctx.namer.compiled_function_name(('test_fn_1',))
+      setattr(result, new_name, other_test_fn_1)
+      self.assertEquals(result.test_fn_2(1), 3)
 
   def test_dynamic_function(self):
 
     def test_fn_1():
-      raise ValueError('This should be masked by the mock.')
+      raise ValueError('This should be masked by the mock in self.compiled.')
 
     def test_fn_2(f):
       return f() + 3
 
-    node = self.parse_and_analyze(test_fn_2, {})
-    node = call_trees.transform(node, self.ctx)
-
-    with self.compiled(node) as result:
+    with self.converted(test_fn_2, call_trees, {}) as result:
       # 10 = 7 (from the mock) + 3 (from test_fn_2)
       self.assertEquals(10, result.test_fn_2(test_fn_1))
 
-  def test_simple_methods(self):
+  def test_basic_method(self):
 
     class TestClass(object):
 
@@ -76,49 +73,43 @@
       def test_fn_2(self, a):
         return self.test_fn_1(a) + 1
 
-    node = self.parse_and_analyze(
-        TestClass.test_fn_2, {'TestClass': TestClass},
+    ns = {'TestClass': TestClass}
+    node, ctx = self.prepare(
+        TestClass.test_fn_2,
+        ns,
         namer=converter_testing.FakeNoRenameNamer(),
         arg_types={'self': (TestClass.__name__, TestClass)})
-    node = call_trees.transform(node, self.ctx)
+    node = call_trees.transform(node, ctx)
 
-    with self.compiled(node) as result:
+    with self.compiled(node, ns) as result:
       tc = TestClass()
       self.assertEquals(3, result.test_fn_2(tc, 1))
 
-  def test_py_func_wrap_no_retval(self):
+  def test_py_func_no_retval(self):
 
     def test_fn(a):
       setattr(a, 'foo', 'bar')
 
-    node = self.parse_and_analyze(test_fn, {'setattr': setattr})
-    node = call_trees.transform(node, self.ctx)
-
-    with self.compiled(node) as result:
+    with self.converted(test_fn, call_trees, {'setattr': setattr}) as result:
       with self.test_session() as sess:
-        # The function has no return value, so we do some tricks to grab the
-        # generated py_func node and ensure its effect only happens at graph
-        # execution.
 
         class Dummy(object):
           pass
 
         a = Dummy()
         result.test_fn(a)
+        py_func_op, = sess.graph.get_operations()
         self.assertFalse(hasattr(a, 'foo'))
-        sess.run(sess.graph.get_operations()[0])
+        sess.run(py_func_op)
         self.assertEquals('bar', a.foo)
 
-  def test_py_func_wrap_known_function(self):
+  def test_py_func_known_function(self):
 
     def test_fn():
       return np.random.binomial(2, 0.5)
 
-    node = self.parse_and_analyze(test_fn, {'np': np})
-    node = call_trees.transform(node, self.ctx)
-
-    with self.compiled(node, dtypes.int64) as result:
-      result.np = np
+    with self.converted(test_fn, call_trees, {'np': np},
+                        dtypes.int64) as result:
       with self.test_session() as sess:
         self.assertTrue(isinstance(result.test_fn(), ops.Tensor))
         self.assertIn(sess.run(result.test_fn()), (0, 1, 2))
@@ -130,22 +121,17 @@
       a = math_ops.add(a, constant_op.constant(1))
       return a
 
-    node = self.parse_and_analyze(
-        test_fn, {
-            'math_ops': math_ops,
-            'constant_op': constant_op
-        },
+    ns = {'math_ops': math_ops, 'constant_op': constant_op}
+    node, ctx = self.prepare(
+        test_fn,
+        ns,
         arg_types=set(((math_ops.__name__,), (constant_op.__name__,))))
-    node = call_trees.transform(node, self.ctx)
+    node = call_trees.transform(node, ctx)
 
-    with self.compiled(node) as result:
-      result.math_ops = math_ops
-      result.constant_op = constant_op
+    with self.compiled(node, ns) as result:
       with self.test_session() as sess:
-        # Not renamed, because the converter doesn't rename the definition
-        # itself (the caller is responsible for that).
         result_tensor = result.test_fn(constant_op.constant(1))
-        self.assertEquals(3, sess.run(result_tensor))
+        self.assertEquals(sess.run(result_tensor), 3)
 
 
 if __name__ == '__main__':
diff --git a/tensorflow/contrib/autograph/converters/conditional_expressions.py b/tensorflow/contrib/autograph/converters/conditional_expressions.py
new file mode 100644
index 0000000..63f649d
--- /dev/null
+++ b/tensorflow/contrib/autograph/converters/conditional_expressions.py
@@ -0,0 +1,129 @@
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Converts the ternary conditional operator."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from tensorflow.contrib.autograph.core import converter
+from tensorflow.contrib.autograph.pyct import anno
+from tensorflow.contrib.autograph.pyct import templates
+from tensorflow.contrib.autograph.pyct.static_analysis.annos import NodeAnno
+
+
+class _FunctionDefs(object):
+
+  def __init__(self):
+    self.nodes = []
+
+
+class _Statement(object):
+
+  def __init__(self):
+    self.scope = None
+
+
+class ConditionalExpressionTransformer(converter.Base):
+  """Converts contitional expressions to functional form."""
+
+  def _postprocess_statement(self, node):
+    """Inserts any separate functions that node may use."""
+    replacements = []
+    for def_node in self.state[_FunctionDefs].nodes:
+      replacements.extend(def_node)
+    replacements.append(node)
+    node = replacements
+    # The corresponding enter is called by self.visit_block (see _process_block)
+    self.state[_FunctionDefs].exit()
+    return node, None
+
+  def _create_branch(self, expr, name_stem):
+    scope = self.state[_Statement].scope
+    name = self.ctx.namer.new_symbol(name_stem, scope.referenced)
+    template = """
+      def name():
+        return expr,
+    """
+    node = templates.replace(template, name=name, expr=expr)
+    self.state[_FunctionDefs].nodes.append(node)
+    return name
+
+  def visit_IfExp(self, node):
+    if anno.hasanno(node.test, anno.Basic.QN):
+      name_root = anno.getanno(node.test, anno.Basic.QN).ssf()
+    else:
+      name_root = 'ifexp'
+
+    true_fn_name = self._create_branch(node.body, '%s_true' % name_root)
+    false_fn_name = self._create_branch(node.orelse, '%s_false' % name_root)
+
+    return templates.replace_as_expression(
+        'ag__.utils.run_cond(test, true_fn_name, false_fn_name)',
+        test=node.test,
+        true_fn_name=true_fn_name,
+        false_fn_name=false_fn_name)
+
+  def _process_block(self, scope, block):
+    self.state[_Statement].enter()
+    self.state[_Statement].scope = scope
+    block = self.visit_block(
+        block,
+        before_visit=self.state[_FunctionDefs].enter,
+        after_visit=self._postprocess_statement)
+    self.state[_Statement].exit()
+    return block
+
+  def visit_FunctionDef(self, node):
+    node.args = self.generic_visit(node.args)
+    node.decorator_list = self.visit_block(node.decorator_list)
+    node.body = self._process_block(
+        anno.getanno(node, anno.Static.SCOPE), node.body)
+    return node
+
+  def visit_For(self, node):
+    node.target = self.visit(node.target)
+    node.body = self._process_block(
+        anno.getanno(node, NodeAnno.BODY_SCOPE), node.body)
+    node.orelse = self._process_block(
+        anno.getanno(node, NodeAnno.ORELSE_SCOPE), node.orelse)
+    return node
+
+  def visit_While(self, node):
+    node.test = self.visit(node.test)
+    node.body = self._process_block(
+        anno.getanno(node, NodeAnno.BODY_SCOPE), node.body)
+    node.orelse = self._process_block(
+        anno.getanno(node, NodeAnno.ORELSE_SCOPE), node.orelse)
+    return node
+
+  def visit_If(self, node):
+    node.test = self.visit(node.test)
+    node.body = self._process_block(
+        anno.getanno(node, NodeAnno.BODY_SCOPE), node.body)
+    node.orelse = self._process_block(
+        anno.getanno(node, NodeAnno.ORELSE_SCOPE), node.orelse)
+    return node
+
+  def visit_With(self, node):
+    node.items = self.visit_block(node.items)
+    node.body = self._process_block(
+        anno.getanno(node, NodeAnno.BODY_SCOPE), node.body)
+    return node
+
+
+def transform(node, ctx):
+  node = ConditionalExpressionTransformer(ctx).visit(node)
+  return node
diff --git a/tensorflow/contrib/autograph/converters/conditional_expressions_test.py b/tensorflow/contrib/autograph/converters/conditional_expressions_test.py
new file mode 100644
index 0000000..95a3108
--- /dev/null
+++ b/tensorflow/contrib/autograph/converters/conditional_expressions_test.py
@@ -0,0 +1,53 @@
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Tests for conditional_expressions module."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from tensorflow.contrib.autograph.converters import conditional_expressions
+from tensorflow.contrib.autograph.core import converter_testing
+from tensorflow.python.platform import test
+
+
+class ConditionalExpressionsTest(converter_testing.TestCase):
+
+  def assertTransformedEquivalent(self, test_fn, *inputs):
+    ns = {}
+    with self.converted(test_fn, conditional_expressions, ns) as result:
+      self.assertEqual(test_fn(*inputs), result.test_fn(*inputs))
+
+  def test_basic(self):
+
+    def test_fn(x):
+      return 1 if x else 0
+
+    self.assertTransformedEquivalent(test_fn, 0)
+    self.assertTransformedEquivalent(test_fn, 3)
+
+  def test_nested_orelse(self):
+
+    def test_fn(x):
+      y = x * x if x > 0 else x if x else 1
+      return y
+
+    self.assertTransformedEquivalent(test_fn, -2)
+    self.assertTransformedEquivalent(test_fn, 0)
+    self.assertTransformedEquivalent(test_fn, 2)
+
+
+if __name__ == '__main__':
+  test.main()
diff --git a/tensorflow/contrib/autograph/converters/continue_statements_test.py b/tensorflow/contrib/autograph/converters/continue_statements_test.py
index 2ce1837..3a7c7d1 100644
--- a/tensorflow/contrib/autograph/converters/continue_statements_test.py
+++ b/tensorflow/contrib/autograph/converters/continue_statements_test.py
@@ -25,7 +25,11 @@
 
 class ContinueCanonicalizationTest(converter_testing.TestCase):
 
-  def test_basic_continue(self):
+  def assertTransformedEquivalent(self, test_fn, *inputs):
+    with self.converted(test_fn, continue_statements, {}) as result:
+      self.assertEqual(test_fn(*inputs), result.test_fn(*inputs))
+
+  def test_basic(self):
 
     def test_fn(x):
       v = []
@@ -36,17 +40,12 @@
         v.append(x)
       return v
 
-    node = self.parse_and_analyze(test_fn, {})
-    node = continue_statements.transform(node, self.ctx)
+    self.assertTransformedEquivalent(test_fn, 0)
+    self.assertTransformedEquivalent(test_fn, 1)
+    self.assertTransformedEquivalent(test_fn, 3)
+    self.assertTransformedEquivalent(test_fn, 4)
 
-    with self.compiled(node) as result:
-      self.assertEqual(test_fn(0), result.test_fn(0))
-      self.assertEqual(test_fn(1), result.test_fn(1))
-      self.assertEqual(test_fn(2), result.test_fn(2))
-      self.assertEqual(test_fn(3), result.test_fn(3))
-      self.assertEqual(test_fn(4), result.test_fn(4))
-
-  def test_basic_continue_for_loop(self):
+  def test_for_loop(self):
 
     def test_fn(a):
       v = []
@@ -57,16 +56,12 @@
         v.append(x)
       return v
 
-    node = self.parse_and_analyze(test_fn, {})
-    node = continue_statements.transform(node, self.ctx)
+    self.assertTransformedEquivalent(test_fn, [])
+    self.assertTransformedEquivalent(test_fn, [1])
+    self.assertTransformedEquivalent(test_fn, [2])
+    self.assertTransformedEquivalent(test_fn, [1, 2, 3])
 
-    with self.compiled(node) as result:
-      self.assertEqual(test_fn([]), result.test_fn([]))
-      self.assertEqual(test_fn([1]), result.test_fn([1]))
-      self.assertEqual(test_fn([2]), result.test_fn([2]))
-      self.assertEqual(test_fn([1, 2, 3]), result.test_fn([1, 2, 3]))
-
-  def test_continue_deeply_nested(self):
+  def test_nested(self):
 
     def test_fn(x):
       v = []
@@ -83,15 +78,10 @@
         v.append(x)
       return v, u, w
 
-    node = self.parse_and_analyze(test_fn, {})
-    node = continue_statements.transform(node, self.ctx)
-
-    with self.compiled(node) as result:
-      self.assertEqual(test_fn(0), result.test_fn(0))
-      self.assertEqual(test_fn(1), result.test_fn(1))
-      self.assertEqual(test_fn(2), result.test_fn(2))
-      self.assertEqual(test_fn(3), result.test_fn(3))
-      self.assertEqual(test_fn(4), result.test_fn(4))
+    self.assertTransformedEquivalent(test_fn, 0)
+    self.assertTransformedEquivalent(test_fn, 1)
+    self.assertTransformedEquivalent(test_fn, 3)
+    self.assertTransformedEquivalent(test_fn, 4)
 
 
 if __name__ == '__main__':
diff --git a/tensorflow/contrib/autograph/converters/control_flow.py b/tensorflow/contrib/autograph/converters/control_flow.py
index f4a8710..a25232f 100644
--- a/tensorflow/contrib/autograph/converters/control_flow.py
+++ b/tensorflow/contrib/autograph/converters/control_flow.py
@@ -25,8 +25,7 @@
 from tensorflow.contrib.autograph.pyct import ast_util
 from tensorflow.contrib.autograph.pyct import parser
 from tensorflow.contrib.autograph.pyct import templates
-from tensorflow.contrib.autograph.pyct.static_analysis import cfg
-from tensorflow.contrib.autograph.pyct.static_analysis.annos import NodeAnno
+from tensorflow.contrib.autograph.pyct.static_analysis import annos
 
 
 class SymbolNamer(object):
@@ -47,6 +46,7 @@
 
 class ControlFlowTransformer(converter.Base):
   """Transforms control flow structures like loops an conditionals."""
+
   def _create_cond_branch(self, body_name, aliased_orig_names,
                           aliased_new_names, body, returns):
     if aliased_orig_names:
@@ -90,55 +90,51 @@
       return templates.replace(
           template, test=test, body_name=body_name, orelse_name=orelse_name)
 
+  def _fmt_symbol_list(self, symbol_set):
+    if not symbol_set:
+      return 'no variables'
+    return ', '.join(map(str, symbol_set))
+
   def visit_If(self, node):
-    self.generic_visit(node)
+    node = self.generic_visit(node)
 
-    body_scope = anno.getanno(node, NodeAnno.BODY_SCOPE)
-    orelse_scope = anno.getanno(node, NodeAnno.ORELSE_SCOPE)
-    body_defs = body_scope.created | body_scope.modified
-    orelse_defs = orelse_scope.created | orelse_scope.modified
-    live = anno.getanno(node, 'live_out')
+    body_scope = anno.getanno(node, annos.NodeAnno.BODY_SCOPE)
+    orelse_scope = anno.getanno(node, annos.NodeAnno.ORELSE_SCOPE)
+    defined_in = anno.getanno(node, anno.Static.DEFINED_VARS_IN)
+    live_out = anno.getanno(node, anno.Static.LIVE_VARS_OUT)
 
-    # We'll need to check if we're closing over variables that are defined
-    # elsewhere in the function
-    # NOTE: we can only detect syntactic closure in the scope
-    # of the code passed in. If the AutoGraph'd function itself closes
-    # over other variables, this analysis won't take that into account.
-    defined = anno.getanno(node, 'defined_in')
+    modified_in_cond = body_scope.modified | orelse_scope.modified
+    returned_from_cond = set()
+    for s in modified_in_cond:
+      if s in live_out:
+        returned_from_cond.add(s)
+      elif s.is_composite():
+        # Special treatment for compound objects: if any of their owner entities
+        # are live, then they are outputs as well.
+        if any(owner in live_out for owner in s.owner_set):
+          returned_from_cond.add(s)
 
-    # We only need to return variables that are
-    # - modified by one or both branches
-    # - live (or has a live parent) at the end of the conditional
-    modified = []
-    for def_ in body_defs | orelse_defs:
-      def_with_parents = set((def_,)) | def_.support_set
-      if live & def_with_parents:
-        modified.append(def_)
+    need_alias_in_body = body_scope.modified & defined_in
+    need_alias_in_orelse = orelse_scope.modified & defined_in
 
-    # We need to check if live created variables are balanced
-    # in both branches
-    created = live & (body_scope.created | orelse_scope.created)
+    created_in_body = body_scope.modified & returned_from_cond - defined_in
+    created_in_orelse = orelse_scope.modified & returned_from_cond - defined_in
 
-    # The if statement is illegal if there are variables that are created,
-    # that are also live, but both branches don't create them.
-    if created:
-      if created != (body_scope.created & live):
-        raise ValueError(
-            'The main branch does not create all live symbols that the else '
-            'branch does.')
-      if created != (orelse_scope.created & live):
-        raise ValueError(
-            'The else branch does not create all live symbols that the main '
-            'branch does.')
+    if created_in_body != created_in_orelse:
+      raise ValueError(
+          'if statement may not initialize all variables: the true branch'
+          ' creates %s, while the false branch creates %s. Make sure all'
+          ' these variables are initialized either in both'
+          ' branches or before the if statement.' %
+          (self._fmt_symbol_list(created_in_body),
+           self._fmt_symbol_list(created_in_orelse)))
 
-    # Alias the closure variables inside the conditional functions
-    # to avoid errors caused by the local variables created in the branch
-    # functions.
+    # Alias the closure variables inside the conditional functions, to allow
+    # the functions access to the respective variables.
     # We will alias variables independently for body and orelse scope,
     # because different branches might write different variables.
-    aliased_body_orig_names = tuple(body_scope.modified - body_scope.created)
-    aliased_orelse_orig_names = tuple(orelse_scope.modified -
-                                      orelse_scope.created)
+    aliased_body_orig_names = tuple(need_alias_in_body)
+    aliased_orelse_orig_names = tuple(need_alias_in_orelse)
     aliased_body_new_names = tuple(
         self.ctx.namer.new_symbol(s.ssf(), body_scope.referenced)
         for s in aliased_body_orig_names)
@@ -153,58 +149,47 @@
     node_body = ast_util.rename_symbols(node.body, alias_body_map)
     node_orelse = ast_util.rename_symbols(node.orelse, alias_orelse_map)
 
-    if not modified:
+    returned_from_cond = tuple(returned_from_cond)
+    if returned_from_cond:
+      if len(returned_from_cond) == 1:
+        # TODO(mdan): Move this quirk into the operator implementation.
+        cond_results = returned_from_cond[0]
+      else:
+        cond_results = gast.Tuple([s.ast() for s in returned_from_cond], None)
+
+      returned_from_body = tuple(
+          alias_body_map[s] if s in need_alias_in_body else s
+          for s in returned_from_cond)
+      returned_from_orelse = tuple(
+          alias_orelse_map[s] if s in need_alias_in_orelse else s
+          for s in returned_from_cond)
+
+    else:
       # When the cond would return no value, we leave the cond called without
       # results. That in turn should trigger the side effect guards. The
       # branch functions will return a dummy value that ensures cond
       # actually has some return value as well.
-      results = None
-    elif len(modified) == 1:
-      results = modified[0]
-    else:
-      results = gast.Tuple([s.ast() for s in modified], None)
+      cond_results = None
+      # TODO(mdan): This doesn't belong here; it's specific to the operator.
+      returned_from_body = templates.replace_as_expression('1')
+      returned_from_orelse = templates.replace_as_expression('1')
 
     body_name = self.ctx.namer.new_symbol('if_true', body_scope.referenced)
     orelse_name = self.ctx.namer.new_symbol('if_false', orelse_scope.referenced)
-    if modified:
-
-      def build_returns(aliased_names, alias_map, scope):
-        """Builds list of return variables for a branch of a conditional."""
-        returns = []
-        for s in modified:
-          if s in aliased_names:
-            returns.append(alias_map[s])
-          else:
-            if s not in scope.created | defined:
-              raise ValueError(
-                  'Attempting to return variable "%s" from the true branch of '
-                  'a conditional, but it was not closed over, or created in '
-                  'this branch.' % str(s))
-            else:
-              returns.append(s)
-        return tuple(returns)
-
-      body_returns = build_returns(aliased_body_orig_names, alias_body_map,
-                                   body_scope)
-      orelse_returns = build_returns(aliased_orelse_orig_names,
-                                     alias_orelse_map, orelse_scope)
-
-    else:
-      body_returns = orelse_returns = templates.replace('tf.ones(())')[0].value
 
     body_def = self._create_cond_branch(
         body_name,
-        aliased_orig_names=tuple(aliased_body_orig_names),
-        aliased_new_names=tuple(aliased_body_new_names),
+        aliased_orig_names=aliased_body_orig_names,
+        aliased_new_names=aliased_body_new_names,
         body=node_body,
-        returns=body_returns)
+        returns=returned_from_body)
     orelse_def = self._create_cond_branch(
         orelse_name,
-        aliased_orig_names=tuple(aliased_orelse_orig_names),
-        aliased_new_names=tuple(aliased_orelse_new_names),
+        aliased_orig_names=aliased_orelse_orig_names,
+        aliased_new_names=aliased_orelse_new_names,
         body=node_orelse,
-        returns=orelse_returns)
-    cond_expr = self._create_cond_expr(results, node.test, body_name,
+        returns=returned_from_orelse)
+    cond_expr = self._create_cond_expr(cond_results, node.test, body_name,
                                        orelse_name)
 
     return body_def + orelse_def + cond_expr
@@ -212,11 +197,11 @@
   def visit_While(self, node):
     self.generic_visit(node)
 
-    body_scope = anno.getanno(node, NodeAnno.BODY_SCOPE)
+    body_scope = anno.getanno(node, annos.NodeAnno.BODY_SCOPE)
     body_closure = body_scope.modified - body_scope.created
     all_referenced = body_scope.referenced
 
-    cond_scope = anno.getanno(node, NodeAnno.COND_SCOPE)
+    cond_scope = anno.getanno(node, annos.NodeAnno.COND_SCOPE)
     cond_closure = set()
     for s in cond_scope.referenced:
       for root in s.support_set:
@@ -277,7 +262,7 @@
   def visit_For(self, node):
     self.generic_visit(node)
 
-    body_scope = anno.getanno(node, NodeAnno.BODY_SCOPE)
+    body_scope = anno.getanno(node, annos.NodeAnno.BODY_SCOPE)
     body_closure = body_scope.modified - body_scope.created
     all_referenced = body_scope.referenced
 
@@ -331,7 +316,5 @@
 
 
 def transform(node, ctx):
-  cfg.run_analyses(node, cfg.Liveness(ctx.info))
-  cfg.run_analyses(node, cfg.Defined(ctx.info))
   node = ControlFlowTransformer(ctx).visit(node)
   return node
diff --git a/tensorflow/contrib/autograph/converters/control_flow_test.py b/tensorflow/contrib/autograph/converters/control_flow_test.py
index 735eb92..6670b8a 100644
--- a/tensorflow/contrib/autograph/converters/control_flow_test.py
+++ b/tensorflow/contrib/autograph/converters/control_flow_test.py
@@ -20,16 +20,22 @@
 
 from tensorflow.contrib.autograph.converters import control_flow
 from tensorflow.contrib.autograph.core import converter_testing
+from tensorflow.contrib.autograph.pyct import transformer
 from tensorflow.python.framework import constant_op
 from tensorflow.python.framework import dtypes
-from tensorflow.python.ops import array_ops
-from tensorflow.python.ops import control_flow_ops
 from tensorflow.python.platform import test
 
 
 class ControlFlowTest(converter_testing.TestCase):
 
-  def test_simple_while(self):
+  def assertTransformedResult(self, test_fn, inputs, expected):
+    if not isinstance(inputs, tuple):
+      inputs = (inputs,)
+    with self.converted(test_fn, control_flow, {}) as result:
+      with self.test_session() as sess:
+        self.assertEqual(sess.run(result.test_fn(*inputs)), expected)
+
+  def test_while_basic(self):
 
     def test_fn(n):
       i = 0
@@ -39,29 +45,18 @@
         i += 1
       return s, i, n
 
-    node = self.parse_and_analyze(test_fn, {})
-    node = control_flow.transform(node, self.ctx)
+    self.assertTransformedResult(test_fn, constant_op.constant(5), (10, 5, 5))
 
-    with self.compiled(node) as result:
-      with self.test_session() as sess:
-        self.assertEqual((10, 5, 5),
-                         sess.run(result.test_fn(constant_op.constant(5))))
-
-  def test_while_single_var(self):
+  def test_while_single_output(self):
 
     def test_fn(n):
       while n > 0:
         n -= 1
       return n
 
-    node = self.parse_and_analyze(test_fn, {})
-    node = control_flow.transform(node, self.ctx)
+    self.assertTransformedResult(test_fn, constant_op.constant(5), 0)
 
-    with self.compiled(node) as result:
-      with self.test_session() as sess:
-        self.assertEqual(0, sess.run(result.test_fn(constant_op.constant(5))))
-
-  def test_simple_if(self):
+  def test_if_basic(self):
 
     def test_fn(n):
       a = 0
@@ -72,114 +67,85 @@
         b = 2 * n
       return a, b
 
-    node = self.parse_and_analyze(test_fn, {})
-    node = control_flow.transform(node, self.ctx)
+    self.assertTransformedResult(test_fn, constant_op.constant(1), (-1, 0))
+    self.assertTransformedResult(test_fn, constant_op.constant(-1), (0, -2))
 
-    with self.compiled(node) as result:
+  def test_if_complex_outputs(self):
+
+    class TestClass(object):
+
+      def __init__(self, a, b):
+        self.a = a
+        self.b = b
+
+    def test_fn(n, obj):
+      obj.a = 0
+      obj.b = 0
+      if n > 0:
+        obj.a = -n
+      else:
+        obj.b = 2 * n
+      return obj
+
+    with self.converted(test_fn, control_flow, {}) as result:
       with self.test_session() as sess:
-        self.assertEqual((-1, 0),
-                         sess.run(result.test_fn(constant_op.constant(1))))
-        self.assertEqual((0, -2),
-                         sess.run(result.test_fn(constant_op.constant(-1))))
+        res_obj = result.test_fn(constant_op.constant(1), TestClass(0, 0))
+        self.assertEqual(sess.run((res_obj.a, res_obj.b)), (-1, 0))
+        res_obj = result.test_fn(constant_op.constant(-1), TestClass(0, 0))
+        self.assertEqual(sess.run((res_obj.a, res_obj.b)), (0, -2))
 
-  def test_if_single_var(self):
+  def test_if_single_output(self):
 
     def test_fn(n):
       if n > 0:
         n = -n
       return n
 
-    node = self.parse_and_analyze(test_fn, {})
-    node = control_flow.transform(node, self.ctx)
+    self.assertTransformedResult(test_fn, constant_op.constant(1), -1)
 
-    with self.compiled(node) as result:
-      with self.test_session() as sess:
-        self.assertEqual(-1, sess.run(result.test_fn(constant_op.constant(1))))
-
-  def test_imbalanced_aliasing(self):
+  def test_if_semi(self):
 
     def test_fn(n):
       if n > 0:
         n = 3
       return n
 
-    node = self.parse_and_analyze(test_fn, {})
-    node = control_flow.transform(node, self.ctx)
+    self.assertTransformedResult(test_fn, constant_op.constant(2), 3)
+    self.assertTransformedResult(test_fn, constant_op.constant(-3), -3)
 
-    with self.compiled(node, control_flow_ops.cond) as result:
-      with self.test_session() as sess:
-        self.assertEqual(3, sess.run(result.test_fn(constant_op.constant(2))))
-        self.assertEqual(-3, sess.run(result.test_fn(constant_op.constant(-3))))
-
-  def test_ignore_unread_variable(self):
+  def test_if_local_var(self):
 
     def test_fn(n):
-      b = 3  # pylint: disable=unused-variable
       if n > 0:
         b = 4
+        n = b + 1
       return n
 
-    node = self.parse_and_analyze(test_fn, {})
-    node = control_flow.transform(node, self.ctx)
+    self.assertTransformedResult(test_fn, constant_op.constant(1), 5)
+    self.assertTransformedResult(test_fn, constant_op.constant(-1), -1)
 
-    with self.compiled(node, control_flow_ops.cond, array_ops.ones) as result:
-      with self.test_session() as sess:
-        self.assertEqual(3, sess.run(result.test_fn(constant_op.constant(3))))
-        self.assertEqual(-3, sess.run(result.test_fn(constant_op.constant(-3))))
+  def test_if_no_outputs(self):
 
-  def test_handle_temp_variable(self):
+    def test_fn(n):
+      if n > 0:
+        b = 4  # pylint:disable=unused-variable
+      return n
 
-    def test_fn_using_temp(x, y, w):
-      if x < y:
-        z = x + y
-      else:
-        w = 2
-        tmp = w
-        z = x - tmp
-      return z, w
+    # Without side effect guards, the if statement will stage a cond,
+    # but that will be pruned at execution.
+    self.assertTransformedResult(test_fn, constant_op.constant(1), 1)
+    self.assertTransformedResult(test_fn, constant_op.constant(-1), -1)
 
-    node = self.parse_and_analyze(test_fn_using_temp, {})
-    node = control_flow.transform(node, self.ctx)
+  def test_if_imbalanced_outputs(self):
 
-    with self.compiled(node, control_flow_ops.cond, array_ops.ones) as result:
-      with self.test_session() as sess:
-        z, w = sess.run(
-            result.test_fn_using_temp(
-                constant_op.constant(-3), constant_op.constant(3),
-                constant_op.constant(3)))
-        self.assertEqual(0, z)
-        self.assertEqual(3, w)
-        z, w = sess.run(
-            result.test_fn_using_temp(
-                constant_op.constant(3), constant_op.constant(-3),
-                constant_op.constant(3)))
-        self.assertEqual(1, z)
-        self.assertEqual(2, w)
+    def test_fn(n):
+      if n > 0:
+        b = 4
+      return b
 
-    def test_fn_ignoring_temp(x, y, w):
-      if x < y:
-        z = x + y
-      else:
-        w = 2
-        tmp = w
-        z = x - tmp
-      return z
-
-    node = self.parse_and_analyze(test_fn_ignoring_temp, {})
-    node = control_flow.transform(node, self.ctx)
-
-    with self.compiled(node, control_flow_ops.cond, array_ops.ones) as result:
-      with self.test_session() as sess:
-        z = sess.run(
-            result.test_fn_ignoring_temp(
-                constant_op.constant(-3), constant_op.constant(3),
-                constant_op.constant(3)))
-        self.assertEqual(0, z)
-        z = sess.run(
-            result.test_fn_ignoring_temp(
-                constant_op.constant(3), constant_op.constant(-3),
-                constant_op.constant(3)))
-        self.assertEqual(1, z)
+    node, ctx = self.prepare(test_fn, {})
+    with self.assertRaises(transformer.AutographParseError):
+      control_flow.transform(node, ctx)
 
   def test_simple_for(self):
 
@@ -191,22 +157,11 @@
         s2 += e * e
       return s1, s2
 
-    node = self.parse_and_analyze(test_fn, {})
-    node = control_flow.transform(node, self.ctx)
+    self.assertTransformedResult(test_fn, constant_op.constant([1, 3]), (4, 10))
+    empty_vector = constant_op.constant([], shape=(0,), dtype=dtypes.int32)
+    self.assertTransformedResult(test_fn, empty_vector, (0, 0))
 
-    with self.compiled(node) as result:
-      with self.test_session() as sess:
-        l = [1, 2, 3]
-        self.assertEqual(
-            test_fn(l), sess.run(result.test_fn(constant_op.constant(l))))
-        l = []
-        self.assertEqual(
-            test_fn(l),
-            sess.run(
-                result.test_fn(
-                    constant_op.constant(l, shape=(0,), dtype=dtypes.int32))))
-
-  def test_for_single_var(self):
+  def test_for_single_output(self):
 
     def test_fn(l):
       s = 0
@@ -214,22 +169,11 @@
         s += e
       return s
 
-    node = self.parse_and_analyze(test_fn, {})
-    node = control_flow.transform(node, self.ctx)
+    self.assertTransformedResult(test_fn, constant_op.constant([1, 3]), 4)
+    empty_vector = constant_op.constant([], shape=(0,), dtype=dtypes.int32)
+    self.assertTransformedResult(test_fn, empty_vector, 0)
 
-    with self.compiled(node) as result:
-      with self.test_session() as sess:
-        l = [1, 2, 3]
-        self.assertEqual(
-            test_fn(l), sess.run(result.test_fn(constant_op.constant(l))))
-        l = []
-        self.assertEqual(
-            test_fn(l),
-            sess.run(
-                result.test_fn(
-                    constant_op.constant(l, shape=(0,), dtype=dtypes.int32))))
-
-  def test_for_with_iterated_expression(self):
+  def test_for_iterated_expression(self):
 
     eval_count = [0]
 
@@ -243,14 +187,13 @@
         s += e
       return s
 
-    node = self.parse_and_analyze(test_fn, {'count_evals': count_evals})
-    node = control_flow.transform(node, self.ctx)
+    ns = {'count_evals': count_evals}
+    node, ctx = self.prepare(test_fn, ns)
+    node = control_flow.transform(node, ctx)
 
-    with self.compiled(node) as result:
-      result.count_evals = count_evals
-      self.assertEqual(test_fn(5), result.test_fn(5))
-      # count_evals ran twice, once for test_fn and another for result.test_fn
-      self.assertEqual(eval_count[0], 2)
+    with self.compiled(node, ns) as result:
+      self.assertEqual(result.test_fn(5), 10)
+      self.assertEqual(eval_count[0], 1)
 
 
 if __name__ == '__main__':
diff --git a/tensorflow/contrib/autograph/converters/decorators_test.py b/tensorflow/contrib/autograph/converters/decorators_test.py
index d41c7fd..095abc5 100644
--- a/tensorflow/contrib/autograph/converters/decorators_test.py
+++ b/tensorflow/contrib/autograph/converters/decorators_test.py
@@ -61,13 +61,13 @@
         'simple_decorator': simple_decorator,
         'converter_testing': converter_testing,
     }
-    node = self.parse_and_analyze(
+    node, ctx = self.prepare(
         f,
         namespace,
         recursive=False,
         autograph_decorators=autograph_decorators)
-    node = decorators.transform(node, self.ctx)
-    import_line = '\n'.join(self.ctx.program.additional_imports)
+    node = decorators.transform(node, ctx)
+    import_line = '\n'.join(ctx.program.additional_imports)
     result, _ = compiler.ast_to_object(node, source_prefix=import_line)
     return getattr(result, f.__name__)
 
@@ -76,11 +76,8 @@
     def test_fn(a):
       return a
 
-    node = self.parse_and_analyze(test_fn, {})
-    node = decorators.transform(node, self.ctx)
-    result, _ = compiler.ast_to_object(node)
-
-    self.assertEqual(1, result.test_fn(1))
+    with self.converted(test_fn, decorators, {}) as result:
+      self.assertEqual(1, result.test_fn(1))
 
   def test_function(self):
 
@@ -124,7 +121,7 @@
         return b + 11
       return inner_fn(a)
 
-    # Expected to fail because simple_decorator cannot be imported.
+    # Expected to fail because simple_decorator could not be imported.
     with self.assertRaises(transformer.AutographParseError):
       test_fn(1)
 
diff --git a/tensorflow/contrib/autograph/converters/directives.py b/tensorflow/contrib/autograph/converters/directives.py
new file mode 100644
index 0000000..ccdf79d
--- /dev/null
+++ b/tensorflow/contrib/autograph/converters/directives.py
@@ -0,0 +1,108 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Handles directives.
+
+This converter removes the directive functions from the code and moves the
+information they specify into AST annotations. It is a specialized form of
+static analysis, one that is specific to AutoGraph.
+"""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import gast
+
+from tensorflow.contrib.autograph.core import converter
+from tensorflow.contrib.autograph.lang import directives
+from tensorflow.contrib.autograph.pyct import anno
+from tensorflow.python.util import tf_inspect
+
+ENCLOSING_LOOP = 'enclosing_loop'
+
+
+def _map_args(call_node, function):
+  """Maps AST call nodes to the actual function's arguments.
+
+  Args:
+    call_node: ast.Call
+    function: Callable[..., Any], the actual function matching call_node
+  Returns:
+    Dict[Text, ast.AST], mapping each of the function's argument names to
+    the respective AST node.
+  """
+  args = call_node.args
+  kwds = {kwd.arg: kwd.value for kwd in call_node.keywords}
+  return tf_inspect.getcallargs(function, *args, **kwds)
+
+
+class DirectivesTransformer(converter.Base):
+  """Parses compiler directives and converts them into AST annotations."""
+
+  def _process_symbol_directive(self, call_node, directive):
+    if len(call_node.args) < 1:
+      raise ValueError('"%s" requires a positional first argument'
+                       ' as the target' % directive.__name__)
+    target = call_node.args[0]
+    defs = anno.getanno(target, anno.Static.ORIG_DEFINITIONS)
+    for def_ in defs:
+      def_.directives[directive] = _map_args(call_node, directive)
+    return call_node
+
+  def _process_statement_directive(self, call_node, directive):
+    if self.local_scope_level < 1:
+      raise ValueError(
+          '"%s" must be used inside a statement' % directive.__name__)
+    target = self.get_local(ENCLOSING_LOOP)
+    node_anno = anno.getanno(target, converter.AgAnno.DIRECTIVES, {})
+    node_anno[directive] = _map_args(call_node, directive)
+    anno.setanno(target, converter.AgAnno.DIRECTIVES, node_anno)
+    return call_node
+
+  def visit_Expr(self, node):
+    if isinstance(node.value, gast.Call):
+      call_node = node.value
+      if anno.hasanno(call_node.func, 'live_val'):
+        live_val = anno.getanno(call_node.func, 'live_val')
+
+        if live_val is directives.set_element_type:
+          call_node = self._process_symbol_directive(call_node, live_val)
+        elif live_val is directives.set_loop_options:
+          call_node = self._process_statement_directive(call_node, live_val)
+        else:
+          return self.generic_visit(node)
+
+        return None  # Directive calls are not output in the generated code.
+    return self.generic_visit(node)
+
+  # TODO(mdan): This will be insufficient for other control flow.
+  # That means that if we ever have a directive that affects things other than
+  # loops, we'll need support for parallel scopes, or have multiple converters.
+  def _track_and_visit_loop(self, node):
+    self.enter_local_scope()
+    self.set_local(ENCLOSING_LOOP, node)
+    node = self.generic_visit(node)
+    self.exit_local_scope()
+    return node
+
+  def visit_While(self, node):
+    return self._track_and_visit_loop(node)
+
+  def visit_For(self, node):
+    return self._track_and_visit_loop(node)
+
+
+def transform(node, ctx):
+  return DirectivesTransformer(ctx).visit(node)
diff --git a/tensorflow/contrib/autograph/converters/directives_test.py b/tensorflow/contrib/autograph/converters/directives_test.py
new file mode 100644
index 0000000..5f798a5
--- /dev/null
+++ b/tensorflow/contrib/autograph/converters/directives_test.py
@@ -0,0 +1,78 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Tests for directives module."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from tensorflow.contrib.autograph.converters import directives as directives_converter
+from tensorflow.contrib.autograph.core import converter_testing
+from tensorflow.contrib.autograph.core.converter import AgAnno
+from tensorflow.contrib.autograph.lang import directives
+from tensorflow.contrib.autograph.pyct import anno
+from tensorflow.python.platform import test
+
+
+class DirectivesTest(converter_testing.TestCase):
+
+  def test_local_target(self):
+
+    def test_fn():
+      l = []
+      string_var = 0
+      directives.set_element_type(l, 'a', string_var)
+
+    node, ctx = self.prepare(test_fn, {'directives': directives})
+    node = directives_converter.transform(node, ctx)
+
+    def_, = anno.getanno(node.body[0].body[0].targets[0],
+                         anno.Static.DEFINITIONS)
+    d = def_.directives[directives.set_element_type]
+    self.assertEqual(d['dtype'].s, 'a')
+    self.assertEqual(d['shape'].id, 'string_var')
+
+  def test_argument_target(self):
+
+    def test_fn(a):
+      directives.set_element_type(a, 1, shape=2)
+
+    node, ctx = self.prepare(test_fn, {'directives': directives})
+    node = directives_converter.transform(node, ctx)
+
+    def_, = anno.getanno(node.body[0].args.args[0], anno.Static.DEFINITIONS)
+    d = def_.directives[directives.set_element_type]
+    self.assertEqual(d['dtype'].n, 1)
+    self.assertEqual(d['shape'].n, 2)
+
+  def test_loop_target(self):
+
+    def test_fn():
+      a = True
+      while True:
+        directives.set_loop_options(parallel_iterations=10, back_prop=a)
+
+    node, ctx = self.prepare(test_fn, {'directives': directives})
+    node = directives_converter.transform(node, ctx)
+
+    d = anno.getanno(node.body[0].body[1], AgAnno.DIRECTIVES)
+    d = d[directives.set_loop_options]
+    self.assertEqual(d['parallel_iterations'].n, 10)
+    self.assertEqual(d['back_prop'].id, 'a')
+    self.assertEqual(d['swap_memory'], directives.UNSPECIFIED)
+
+
+if __name__ == '__main__':
+  test.main()
diff --git a/tensorflow/contrib/autograph/converters/error_handlers.py b/tensorflow/contrib/autograph/converters/error_handlers.py
new file mode 100644
index 0000000..3f23662
--- /dev/null
+++ b/tensorflow/contrib/autograph/converters/error_handlers.py
@@ -0,0 +1,52 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Wraps function bodies with a try/except to rewrite error tracebacks.
+
+Only adds try/except wrappers to functions that have the anno.Basic.ORIGIN
+annotation because these are the functions originally written by the user.
+"""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from tensorflow.contrib.autograph.core import converter
+from tensorflow.contrib.autograph.pyct import anno
+from tensorflow.contrib.autograph.pyct import templates
+
+
+class ErrorRewritingTransformer(converter.Base):
+  """Possibly wraps the body of a function in a try/except.
+
+  Only wraps functions that were originally defined by the user, detected by
+  checking for the anno.Basic.ORIGIN annotation.
+  """
+
+  def visit_FunctionDef(self, node):
+    node = self.generic_visit(node)
+
+    if anno.hasanno(node, anno.Basic.ORIGIN):
+      template = """
+        try:
+          body
+        except:
+          ag__.rewrite_graph_construction_error(ag_source_map__)
+      """
+      node.body = templates.replace(template, body=node.body)
+    return node
+
+
+def transform(node, ctx):
+  return ErrorRewritingTransformer(ctx).visit(node)
diff --git a/tensorflow/contrib/autograph/converters/error_handlers_test.py b/tensorflow/contrib/autograph/converters/error_handlers_test.py
new file mode 100644
index 0000000..878526c
--- /dev/null
+++ b/tensorflow/contrib/autograph/converters/error_handlers_test.py
@@ -0,0 +1,55 @@
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Tests for error_handlers module."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from tensorflow.contrib.autograph.converters import error_handlers
+from tensorflow.contrib.autograph.core import converter_testing
+from tensorflow.contrib.autograph.core import errors
+from tensorflow.contrib.autograph.pyct import anno
+from tensorflow.contrib.autograph.pyct import origin_info
+from tensorflow.python.platform import test
+
+
+class ErrorHandlersTest(converter_testing.TestCase):
+
+  def test_basic(self):
+
+    def test_fn():
+      raise ValueError()
+
+    node, ctx = self.prepare(test_fn, {})
+    anno.setanno(node.body[0], anno.Basic.ORIGIN,
+                 origin_info.OriginInfo('test_path', None, None, None, None))
+    node = error_handlers.transform(node, ctx)
+    with self.compiled(node, {}) as result:
+      with self.assertRaises(errors.GraphConstructionError):
+        result.test_fn()
+
+  def test_no_origin_annotation(self):
+
+    def test_fn():
+      raise ValueError()
+
+    with self.converted(test_fn, error_handlers, {}) as result:
+      with self.assertRaises(ValueError):
+        result.test_fn()
+
+
+if __name__ == '__main__':
+  test.main()
diff --git a/tensorflow/contrib/autograph/converters/ifexp.py b/tensorflow/contrib/autograph/converters/ifexp.py
deleted file mode 100644
index e996138..0000000
--- a/tensorflow/contrib/autograph/converters/ifexp.py
+++ /dev/null
@@ -1,49 +0,0 @@
-# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ==============================================================================
-"""Canonicalizes the ternary conditional operator."""
-
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-from tensorflow.contrib.autograph.core import converter
-from tensorflow.contrib.autograph.pyct import templates
-
-
-class IfExp(converter.Base):
-  """Canonicalizes all IfExp nodes into plain conditionals."""
-
-  def visit_IfExp(self, node):
-    template = """
-        ag__.utils.run_cond(test, lambda: (body,), lambda: (orelse,))
-    """
-    desugared_ifexp = templates.replace_as_expression(
-        template, test=node.test, body=node.body, orelse=node.orelse)
-    return desugared_ifexp
-
-
-def transform(node, ctx):
-  """Desugar IfExp nodes into plain conditionals.
-
-  Args:
-     node: ast.AST, the node to transform
-     ctx: converter.EntityContext
-
-  Returns:
-     new_node: an AST with no IfExp nodes, only conditionals.
-  """
-
-  node = IfExp(ctx).visit(node)
-  return node
diff --git a/tensorflow/contrib/autograph/converters/ifexp_test.py b/tensorflow/contrib/autograph/converters/ifexp_test.py
deleted file mode 100644
index cdd5a2f..0000000
--- a/tensorflow/contrib/autograph/converters/ifexp_test.py
+++ /dev/null
@@ -1,106 +0,0 @@
-# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ==============================================================================
-"""Tests for ifexp module."""
-
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-from tensorflow.contrib.autograph import utils
-from tensorflow.contrib.autograph.converters import ifexp
-from tensorflow.contrib.autograph.core import converter_testing
-from tensorflow.python.platform import test
-
-
-class IfExpTest(converter_testing.TestCase):
-
-  def compiled_fn(self, test_fn, *args):
-    node = self.parse_and_analyze(test_fn, {})
-    node = ifexp.transform(node, self.ctx)
-    module = self.compiled(node, *args)
-    return module
-
-  def test_simple(self):
-
-    def test_fn(x):
-      return 1 if x else 0
-
-    with self.compiled_fn(test_fn) as result:
-      result.autograph_util = utils
-      for x in [0, 1]:
-        self.assertEqual(test_fn(x), result.test_fn(x))
-
-  def test_fn(self):
-
-    def f(x):
-      return 3 * x
-
-    def test_fn(x):
-      y = f(x * x if x > 0 else x)
-      return y
-
-    with self.compiled_fn(test_fn) as result:
-      result.autograph_util = utils
-      result.f = f
-      for x in [-2, 2]:
-        self.assertEqual(test_fn(x), result.test_fn(x))
-
-  def test_exp(self):
-
-    def test_fn(x):
-      return x * x if x > 0 else x
-
-    with self.compiled_fn(test_fn) as result:
-      result.autograph_util = utils
-      for x in [-2, 2]:
-        self.assertEqual(test_fn(x), result.test_fn(x))
-
-  def test_nested(self):
-
-    def test_fn(x):
-      return x * x if x > 0 else x if x else 1
-
-    with self.compiled_fn(test_fn) as result:
-      result.autograph_util = utils
-      for x in [-2, 0, 2]:
-        self.assertEqual(test_fn(x), result.test_fn(x))
-
-  def test_in_cond(self):
-
-    def test_fn(x):
-      if x > 0:
-        return x * x if x < 5 else x * x * x
-      return -x
-
-    with self.compiled_fn(test_fn) as result:
-      result.autograph_util = utils
-      for x in [-2, 2, 5]:
-        self.assertEqual(test_fn(x), result.test_fn(x))
-
-  def test_assign_in_cond(self):
-
-    def test_fn(x):
-      if x > 0:
-        x = -x if x < 5 else x
-      return x
-
-    with self.compiled_fn(test_fn) as result:
-      result.autograph_util = utils
-      for x in [-2, 2, 5]:
-        self.assertEqual(test_fn(x), result.test_fn(x))
-
-
-if __name__ == '__main__':
-  test.main()
diff --git a/tensorflow/contrib/autograph/converters/list_comprehension.py b/tensorflow/contrib/autograph/converters/list_comprehension.py
deleted file mode 100644
index c4a13ee..0000000
--- a/tensorflow/contrib/autograph/converters/list_comprehension.py
+++ /dev/null
@@ -1,77 +0,0 @@
-# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ==============================================================================
-"""Canonicalizing list comprehensions into for and if statements.
-
-e.g.
-result = [x * x for x in xs]
-
-becomes
-
-result = []
-for x in xs:
-  elt = x * x
-  result.append(elt)
-"""
-
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-import gast
-
-from tensorflow.contrib.autograph.core import converter
-from tensorflow.contrib.autograph.pyct import parser
-from tensorflow.contrib.autograph.pyct import templates
-
-
-class ListCompCanonicalizationTransformer(converter.Base):
-  """NodeTransformer to canonicalize list comprehensions."""
-
-  def make_update_list_node(self, list_, elt):
-    return templates.replace('list_.append(elt)', list_=list_, elt=elt)[0]
-
-  def instantiate_list_node(self):
-    return parser.parse_str('[]').body[0].value
-
-  def visit_Assign(self, node):
-    if not isinstance(node.value, gast.ListComp):
-      return node
-    if len(node.targets) > 1:
-      raise ValueError('Only support single assignment.')
-    return self.canonicalize_listcomp(node.targets[0], node.value)
-
-  def canonicalize_listcomp(self, result_node, list_comp_node):
-
-    make_list = templates.replace(
-        'list_ = create_list',
-        list_=result_node,
-        create_list=self.instantiate_list_node())
-    loop_body = self.make_update_list_node(result_node, list_comp_node.elt)
-
-    for gen in reversed(list_comp_node.generators):
-      for gen_if in reversed(gen.ifs):
-        loop_body = templates.replace(
-            'if test: loop_body', test=gen_if, loop_body=loop_body)
-      loop_body = templates.replace(
-          'for target in iter_: loop_body',
-          iter_=gen.iter,
-          target=gen.target,
-          loop_body=loop_body)
-
-    return make_list + loop_body
-
-
-def transform(node, ctx):
-  return ListCompCanonicalizationTransformer(ctx).visit(node)
diff --git a/tensorflow/contrib/autograph/converters/list_comprehension_test.py b/tensorflow/contrib/autograph/converters/list_comprehension_test.py
deleted file mode 100644
index 2bbee93..0000000
--- a/tensorflow/contrib/autograph/converters/list_comprehension_test.py
+++ /dev/null
@@ -1,75 +0,0 @@
-# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ==============================================================================
-"""Tests for list_comprehension module."""
-
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-from tensorflow.contrib.autograph.converters import list_comprehension
-from tensorflow.contrib.autograph.core import converter_testing
-from tensorflow.python.platform import test
-
-
-class ListCompTest(converter_testing.TestCase):
-
-  def test_basic(self):
-
-    def test_fn(l):
-      s = [e * e for e in l]
-      return s
-
-    node = self.parse_and_analyze(test_fn, {})
-    node = list_comprehension.transform(node, self.ctx)
-
-    with self.compiled(node) as result:
-      l = [1, 2, 3]
-      self.assertEqual(test_fn(l), result.test_fn(l))
-      l = []
-      self.assertEqual(test_fn(l), result.test_fn(l))
-
-  def test_multiple_generators(self):
-
-    def test_fn(l):
-      s = [e * e for sublist in l for e in sublist]
-      return s
-
-    node = self.parse_and_analyze(test_fn, {})
-    node = list_comprehension.transform(node, self.ctx)
-
-    with self.compiled(node) as result:
-      l = [[1], [2], [3]]
-      self.assertEqual(test_fn(l), result.test_fn(l))
-      l = []
-      self.assertEqual(test_fn(l), result.test_fn(l))
-
-  def test_conds(self):
-
-    def test_fn(l):
-      s = [e * e for e in l if e > 1]
-      return s
-
-    node = self.parse_and_analyze(test_fn, {})
-    node = list_comprehension.transform(node, self.ctx)
-
-    with self.compiled(node) as result:
-      l = [1, 2, 3]
-      self.assertEqual(test_fn(l), result.test_fn(l))
-      l = []
-      self.assertEqual(test_fn(l), result.test_fn(l))
-
-
-if __name__ == '__main__':
-  test.main()
diff --git a/tensorflow/contrib/autograph/converters/list_comprehensions.py b/tensorflow/contrib/autograph/converters/list_comprehensions.py
new file mode 100644
index 0000000..ecf4628
--- /dev/null
+++ b/tensorflow/contrib/autograph/converters/list_comprehensions.py
@@ -0,0 +1,82 @@
+# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Lowers list comprehensions into for and if statements.
+
+Example:
+
+  result = [x * x for x in xs]
+
+becomes
+
+  result = []
+  for x in xs:
+    elt = x * x
+    result.append(elt)
+"""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import gast
+
+from tensorflow.contrib.autograph.core import converter
+from tensorflow.contrib.autograph.pyct import templates
+
+
+# TODO(mdan): This should covert directly to operator calls.
+
+
+class ListCompTransformer(converter.Base):
+  """Lowers list comprehensions into standard control flow."""
+
+  def visit_Assign(self, node):
+    if not isinstance(node.value, gast.ListComp):
+      return self.generic_visit(node)
+    if len(node.targets) > 1:
+      raise NotImplementedError('multiple assignments')
+
+    target, = node.targets
+    list_comp_node = node.value
+
+    template = """
+      target = []
+    """
+    initialization = templates.replace(template, target=target)
+
+    template = """
+      target.append(elt)
+    """
+    body = templates.replace(template, target=target, elt=list_comp_node.elt)
+
+    for gen in reversed(list_comp_node.generators):
+      for gen_if in reversed(gen.ifs):
+        template = """
+          if test:
+            body
+        """
+        body = templates.replace(template, test=gen_if, body=body)
+      template = """
+        for target in iter_:
+          body
+      """
+      body = templates.replace(
+          template, iter_=gen.iter, target=gen.target, body=body)
+
+    return initialization + body
+
+
+def transform(node, ctx):
+  return ListCompTransformer(ctx).visit(node)
diff --git a/tensorflow/contrib/autograph/converters/list_comprehensions_test.py b/tensorflow/contrib/autograph/converters/list_comprehensions_test.py
new file mode 100644
index 0000000..59b5ce9
--- /dev/null
+++ b/tensorflow/contrib/autograph/converters/list_comprehensions_test.py
@@ -0,0 +1,61 @@
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Tests for list_comprehensions module."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from tensorflow.contrib.autograph.converters import list_comprehensions
+from tensorflow.contrib.autograph.core import converter_testing
+from tensorflow.python.platform import test
+
+
+class ListCompTest(converter_testing.TestCase):
+
+  def assertTransformedEquivalent(self, test_fn, *inputs):
+    with self.converted(test_fn, list_comprehensions, {}) as result:
+      self.assertEqual(test_fn(*inputs), result.test_fn(*inputs))
+
+  def test_basic(self):
+
+    def test_fn(l):
+      s = [e * e for e in l]
+      return s
+
+    self.assertTransformedEquivalent(test_fn, [])
+    self.assertTransformedEquivalent(test_fn, [1, 2, 3])
+
+  def test_multiple_generators(self):
+
+    def test_fn(l):
+      s = [e * e for sublist in l for e in sublist]
+      return s
+
+    self.assertTransformedEquivalent(test_fn, [])
+    self.assertTransformedEquivalent(test_fn, [[1], [2], [3]])
+
+  def test_cond(self):
+
+    def test_fn(l):
+      s = [e * e for e in l if e > 1]
+      return s
+
+    self.assertTransformedEquivalent(test_fn, [])
+    self.assertTransformedEquivalent(test_fn, [1, 2, 3])
+
+
+if __name__ == '__main__':
+  test.main()
diff --git a/tensorflow/contrib/autograph/converters/lists.py b/tensorflow/contrib/autograph/converters/lists.py
index d77a044..a02fc82 100644
--- a/tensorflow/contrib/autograph/converters/lists.py
+++ b/tensorflow/contrib/autograph/converters/lists.py
@@ -33,6 +33,7 @@
 import gast
 
 from tensorflow.contrib.autograph.core import converter
+from tensorflow.contrib.autograph.lang import directives
 from tensorflow.contrib.autograph.pyct import anno
 from tensorflow.contrib.autograph.pyct import parser
 from tensorflow.contrib.autograph.pyct import templates
@@ -88,12 +89,12 @@
     scope = anno.getanno(node, NodeAnno.ARGS_SCOPE)
     target_node = node.func.value
 
-    # Attempt to use a related name if can get one. Otherwise use something
+    # Attempt to use a related name if one exists. Otherwise use something
     # generic.
     if anno.hasanno(target_node, anno.Basic.QN):
       target_name = anno.getanno(target_node, anno.Basic.QN).ssf()
     else:
-      target_name = 'list'
+      target_name = 'list_'
     pop_var_name = self.ctx.namer.new_symbol(target_name, scope.referenced)
 
     pop_uses = self.get_local(POP_USES, [])
@@ -104,9 +105,10 @@
 
   def _replace_stack_call(self, node):
     assert len(node.args) == 1
-    dtype = anno.getanno(
+    dtype = self.get_definition_directive(
         node.args[0],
-        'element_type',
+        directives.set_element_type,
+        'dtype',
         default=templates.replace_as_expression('None'))
     template = """
       ag__.list_stack(
@@ -134,7 +136,10 @@
         node = self._replace_append_call(node)
       elif func_name == 'pop' and (len(node.args) <= 1):
         node = self._replace_pop_call(node)
-      elif func_name == 'stack' and (len(node.args) == 1):
+      elif (func_name == 'stack' and (len(node.args) == 1) and
+            (not node.keywords or node.keywords[0].arg == 'strict')):
+        # This avoids false positives with keyword args.
+        # TODO(mdan): handle kwargs properly.
         node = self._replace_stack_call(node)
 
     return node
@@ -146,15 +151,22 @@
       pop_element = original_call_node.args[0]
     else:
       pop_element = parser.parse_expression('None')
+
     # The call will be something like "target.pop()", and the dtype is hooked to
     # target, hence the func.value.
-    dtype = anno.getanno(
+    # TODO(mdan): For lists of lists, this won't work.
+    # The reason why it won't work is because it's unclear how to annotate
+    # the list as a "list of lists with a certain element type" when using
+    # operations like `l.pop().pop()`.
+    dtype = self.get_definition_directive(
         original_call_node.func.value,
-        'element_type',
+        directives.set_element_type,
+        'dtype',
         default=templates.replace_as_expression('None'))
-    shape = anno.getanno(
+    shape = self.get_definition_directive(
         original_call_node.func.value,
-        'element_shape',
+        directives.set_element_type,
+        'shape',
         default=templates.replace_as_expression('None'))
 
     template = """
diff --git a/tensorflow/contrib/autograph/converters/lists_test.py b/tensorflow/contrib/autograph/converters/lists_test.py
index ea04097..447a88b 100644
--- a/tensorflow/contrib/autograph/converters/lists_test.py
+++ b/tensorflow/contrib/autograph/converters/lists_test.py
@@ -18,9 +18,11 @@
 from __future__ import division
 from __future__ import print_function
 
-from tensorflow.contrib.autograph import utils
 from tensorflow.contrib.autograph.converters import lists
 from tensorflow.contrib.autograph.core import converter_testing
+from tensorflow.contrib.autograph.lang import directives
+from tensorflow.contrib.autograph.pyct import anno
+from tensorflow.contrib.autograph.pyct import parser
 from tensorflow.python.framework import dtypes
 from tensorflow.python.framework import ops
 from tensorflow.python.ops import array_ops
@@ -28,6 +30,9 @@
 from tensorflow.python.platform import test
 
 
+tf = None  # Will be replaced by a mock.
+
+
 class ListTest(converter_testing.TestCase):
 
   def test_empty_list(self):
@@ -35,10 +40,7 @@
     def test_fn():
       return []
 
-    node = self.parse_and_analyze(test_fn, {})
-    node = lists.transform(node, self.ctx)
-
-    with self.compiled(node) as result:
+    with self.converted(test_fn, lists, {}) as result:
       tl = result.test_fn()
       # Empty tensor lists cannot be evaluated or stacked.
       self.assertTrue(isinstance(tl, ops.Tensor))
@@ -49,10 +51,7 @@
     def test_fn():
       return [1, 2, 3]
 
-    node = self.parse_and_analyze(test_fn, {})
-    node = lists.transform(node, self.ctx)
-
-    with self.compiled(node) as result:
+    with self.converted(test_fn, lists, {}) as result:
       with self.test_session() as sess:
         tl = result.test_fn()
         r = list_ops.tensor_list_stack(tl, dtypes.int32)
@@ -66,10 +65,7 @@
       l.append(3)
       return l
 
-    node = self.parse_and_analyze(test_fn, {})
-    node = lists.transform(node, self.ctx)
-
-    with self.compiled(node) as result:
+    with self.converted(test_fn, lists, {}) as result:
       with self.test_session() as sess:
         tl = result.test_fn()
         r = list_ops.tensor_list_stack(tl, dtypes.int32)
@@ -79,23 +75,19 @@
 
     def test_fn():
       l = [1, 2, 3]
-      utils.set_element_type(l, dtypes.int32, ())
       s = l.pop()
       return s, l
 
-    node = self.parse_and_analyze(
-        test_fn,
-        {
-            'utils': utils,
-            'dtypes': dtypes
-        },
-        include_type_analysis=True,
-    )
-    node = lists.transform(node, self.ctx)
+    node, ctx = self.prepare(test_fn, {})
+    def_, = anno.getanno(node.body[0].body[0].targets[0],
+                         anno.Static.ORIG_DEFINITIONS)
+    def_.directives[directives.set_element_type] = {
+        'dtype': parser.parse_expression('tf.int32'),
+        'shape': parser.parse_expression('()'),
+    }
+    node = lists.transform(node, ctx)
 
-    with self.compiled(node) as result:
-      result.utils = utils
-      result.dtypes = dtypes
+    with self.compiled(node, {}, dtypes.int32) as result:
       with self.test_session() as sess:
         ts, tl = result.test_fn()
         r = list_ops.tensor_list_stack(tl, dtypes.int32)
@@ -108,10 +100,7 @@
       s = l.pop().pop()
       return s
 
-    node = self.parse_and_analyze(test_fn, {})
-    node = lists.transform(node, self.ctx)
-
-    with self.compiled(node) as result:
+    with self.converted(test_fn, lists, {}) as result:
       test_input = [1, 2, [1, 2, 3]]
       # TODO(mdan): Pass a list of lists of tensor when we fully support that.
       # For now, we just pass a regular Python list of lists just to verify that
@@ -120,29 +109,24 @@
 
   def test_list_stack(self):
 
-    tf = None  # Will be replaced with a mock.
-
     def test_fn():
       l = [1, 2, 3]
-      utils.set_element_type(l, dtypes.int32)
       return tf.stack(l)
 
-    node = self.parse_and_analyze(
-        test_fn,
-        {
-            'utils': utils,
-            'dtypes': dtypes
-        },
-        include_type_analysis=True,
-    )
-    node = lists.transform(node, self.ctx)
+    node, ctx = self.prepare(test_fn, {})
+    def_, = anno.getanno(node.body[0].body[0].targets[0],
+                         anno.Static.ORIG_DEFINITIONS)
+    def_.directives[directives.set_element_type] = {
+        'dtype': parser.parse_expression('tf.int32')
+    }
+    node = lists.transform(node, ctx)
 
-    with self.compiled(node, array_ops.stack, dtypes.int32) as result:
-      result.utils = utils
-      result.dtypes = dtypes
+    with self.compiled(node, {}, array_ops.stack, dtypes.int32) as result:
       with self.test_session() as sess:
         self.assertAllEqual(sess.run(result.test_fn()), [1, 2, 3])
 
+  # TODO(mdan): Add a test with tf.stack with axis kwarg.
+
 
 if __name__ == '__main__':
   test.main()
diff --git a/tensorflow/contrib/autograph/converters/logical_expressions_test.py b/tensorflow/contrib/autograph/converters/logical_expressions_test.py
index 4818602..ca07de5 100644
--- a/tensorflow/contrib/autograph/converters/logical_expressions_test.py
+++ b/tensorflow/contrib/autograph/converters/logical_expressions_test.py
@@ -31,10 +31,8 @@
     def test_fn(a, b):
       return a == b
 
-    node = self.parse_and_analyze(test_fn, {})
-    node = logical_expressions.transform(node, self.ctx)
-
-    with self.compiled(node, math_ops.equal) as result:
+    with self.converted(test_fn, logical_expressions, {},
+                        math_ops.equal) as result:
       with self.test_session() as sess:
         self.assertTrue(sess.run(result.test_fn(1, 1)))
         self.assertFalse(sess.run(result.test_fn(1, 2)))
@@ -44,11 +42,8 @@
     def test_fn(a, b, c):
       return (a or b) and (a or b or c)
 
-    node = self.parse_and_analyze(test_fn, {})
-    node = logical_expressions.transform(node, self.ctx)
-
-    with self.compiled(node, math_ops.logical_or,
-                       math_ops.logical_and) as result:
+    with self.converted(test_fn, logical_expressions, {}, math_ops.logical_or,
+                        math_ops.logical_and) as result:
       with self.test_session() as sess:
         self.assertTrue(sess.run(result.test_fn(True, False, True)))
 
diff --git a/tensorflow/contrib/autograph/converters/name_scopes_test.py b/tensorflow/contrib/autograph/converters/name_scopes_test.py
index 444d0bc..a329b0d 100644
--- a/tensorflow/contrib/autograph/converters/name_scopes_test.py
+++ b/tensorflow/contrib/autograph/converters/name_scopes_test.py
@@ -31,17 +31,13 @@
 
     def test_fn(l):
       """This should stay here."""
-      a = 5
+      a = 1
       l += a
       return l
 
-    node = self.parse_and_analyze(test_fn, {})
-    node = name_scopes.transform(node, self.ctx)
-
-    with self.compiled(node, ops.name_scope) as result:
+    with self.converted(test_fn, name_scopes, {}, ops.name_scope) as result:
       result_op = result.test_fn(constant_op.constant(1))
       self.assertIn('test_fn/', result_op.op.name)
-
       self.assertEqual('This should stay here.', result.test_fn.__doc__)
 
   def test_long_docstring(self):
@@ -54,13 +50,12 @@
       Returns:
         l
       """
-      return l
+      return l + 1
 
-    node = self.parse_and_analyze(test_fn, {})
-    node = name_scopes.transform(node, self.ctx)
-
-    with self.compiled(node, ops.name_scope) as result:
-      self.assertIn('Multi-line', result.test_fn.__doc__)
+    with self.converted(test_fn, name_scopes, {}, ops.name_scope) as result:
+      result_op = result.test_fn(constant_op.constant(1))
+      self.assertIn('test_fn/', result_op.op.name)
+      self.assertIn('Multi-line docstring.', result.test_fn.__doc__)
       self.assertIn('Returns:', result.test_fn.__doc__)
 
   def test_nested_functions(self):
@@ -68,21 +63,16 @@
     def test_fn(l):
 
       def inner_fn(i):
-        return i ** 2
+        return i + 1
 
-      l += 4
-      return inner_fn(l)
+      l += 1
+      return l, inner_fn(l)
 
-    node = self.parse_and_analyze(test_fn, {})
-    node = name_scopes.transform(node, self.ctx)
-
-    with self.compiled(node, ops.name_scope) as result:
-      result_op = result.test_fn(constant_op.constant(1))
-      first_result_input_name = result_op.op.inputs[0].name
-      second_result_input_name = result_op.op.inputs[1].name
-      self.assertIn('test_fn/', first_result_input_name)
-      self.assertNotIn('inner_fn', first_result_input_name)
-      self.assertIn('test_fn/inner_fn/', second_result_input_name)
+    with self.converted(test_fn, name_scopes, {}, ops.name_scope) as result:
+      first, second = result.test_fn(constant_op.constant(1))
+      self.assertIn('test_fn/', first.op.name)
+      self.assertNotIn('inner_fn', first.op.name)
+      self.assertIn('test_fn/inner_fn/', second.op.name)
 
   def test_method(self):
 
@@ -91,48 +81,20 @@
       def test_fn(self, l):
 
         def inner_fn(i):
-          return i ** 2
+          return i + 1
 
-        l += 4
-        return inner_fn(l)
+        l += 1
+        return l, inner_fn(l)
 
-    # Note that 'TestClass' was needed in the namespace here.
-    node = self.parse_and_analyze(
-        TestClass, {'TestClass': TestClass}, owner_type=TestClass)
-    node = name_scopes.transform(node, self.ctx)
+    ns = {'TestClass': TestClass}
+    node, ctx = self.prepare(TestClass, ns, owner_type=TestClass)
+    node = name_scopes.transform(node, ctx)
 
-    with self.compiled(node, ops.name_scope) as result:
-      result_op = result.TestClass().test_fn(constant_op.constant(1))
-      first_result_input_name = result_op.op.inputs[0].name
-      second_result_input_name = result_op.op.inputs[1].name
-      self.assertIn('TestClass/test_fn/', first_result_input_name)
-      self.assertNotIn('inner_fn', first_result_input_name)
-      self.assertIn('TestClass/test_fn/inner_fn/', second_result_input_name)
-
-  def test_operator(self):
-
-    class TestClass(object):
-
-      def __call__(self, l):
-
-        def inner_fn(i):
-          return i ** 2
-
-        l += 4
-        return inner_fn(l)
-
-    # Note that 'TestClass' was needed in the namespace here.
-    node = self.parse_and_analyze(
-        TestClass.__call__, {'TestClass': TestClass}, owner_type=TestClass)
-    node = name_scopes.transform(node, self.ctx)
-
-    with self.compiled(node, ops.name_scope) as result:
-      result_op = result.__call__(TestClass(), constant_op.constant(1))
-      first_result_input_name = result_op.op.inputs[0].name
-      second_result_input_name = result_op.op.inputs[1].name
-      self.assertIn('call__/', first_result_input_name)
-      self.assertNotIn('inner_fn', first_result_input_name)
-      self.assertIn('call__/inner_fn/', second_result_input_name)
+    with self.compiled(node, {}, ops.name_scope) as result:
+      first, second = result.TestClass().test_fn(constant_op.constant(1))
+      self.assertIn('TestClass/test_fn/', first.op.name)
+      self.assertNotIn('inner_fn', first.op.name)
+      self.assertIn('TestClass/test_fn/inner_fn/', second.op.name)
 
 
 if __name__ == '__main__':
diff --git a/tensorflow/contrib/autograph/converters/single_return.py b/tensorflow/contrib/autograph/converters/return_statements.py
similarity index 100%
rename from tensorflow/contrib/autograph/converters/single_return.py
rename to tensorflow/contrib/autograph/converters/return_statements.py
diff --git a/tensorflow/contrib/autograph/converters/return_statements_test.py b/tensorflow/contrib/autograph/converters/return_statements_test.py
new file mode 100644
index 0000000..3c7c8c8
--- /dev/null
+++ b/tensorflow/contrib/autograph/converters/return_statements_test.py
@@ -0,0 +1,167 @@
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Tests for return_statements module."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from tensorflow.contrib.autograph.converters import return_statements
+from tensorflow.contrib.autograph.core import converter_testing
+from tensorflow.python.framework import ops
+from tensorflow.python.platform import test
+
+
+class SingleReturnTest(converter_testing.TestCase):
+
+  def assertTransformedEquivalent(self, test_fn, *inputs):
+    ns = {'ops': ops}
+    with self.converted(test_fn, return_statements, ns) as result:
+      self.assertEqual(test_fn(*inputs), result.test_fn(*inputs))
+
+  def test_straightline(self):
+
+    def test_fn(x):
+      return x * x
+
+    self.assertTransformedEquivalent(test_fn, 2)
+
+  def test_conditional(self):
+
+    def test_fn(x):
+      if x > 0:
+        return x
+      else:
+        return x * x
+
+    self.assertTransformedEquivalent(test_fn, 2)
+    self.assertTransformedEquivalent(test_fn, -2)
+
+  def test_missing_orelse(self):
+
+    def test_fn(x):
+      if x > 0:
+        return x
+
+    node, ctx = self.prepare(test_fn, {})
+    with self.assertRaises(ValueError):
+      return_statements.transform(node, ctx)
+
+  def test_missing_orelse_recovrable(self):
+
+    def test_fn(x):
+      if x > 0:
+        return x
+      return x * x
+
+    self.assertTransformedEquivalent(test_fn, 2)
+    self.assertTransformedEquivalent(test_fn, -2)
+
+  def test_missing_branch_return_recoverable(self):
+
+    def test_fn(x):
+      if x < 0:
+        x *= x
+      else:
+        return x
+      return x
+
+    self.assertTransformedEquivalent(test_fn, 2)
+    self.assertTransformedEquivalent(test_fn, -2)
+
+  def test_conditional_nested(self):
+
+    def test_fn(x):
+      if x > 0:
+        if x < 5:
+          return x
+        else:
+          return x * x
+      else:
+        return x * x * x
+
+    self.assertTransformedEquivalent(test_fn, 2)
+    self.assertTransformedEquivalent(test_fn, -2)
+    self.assertTransformedEquivalent(test_fn, 5)
+
+  def test_context_manager(self):
+
+    def test_fn(x):
+      with ops.name_scope(''):
+        return x * x
+
+    self.assertTransformedEquivalent(test_fn, 2)
+    self.assertTransformedEquivalent(test_fn, -2)
+
+  def test_context_manager_in_conditional(self):
+
+    def test_fn(x):
+      if x > 0:
+        with ops.name_scope(''):
+          return x * x
+      else:
+        return x
+
+    self.assertTransformedEquivalent(test_fn, 2)
+    self.assertTransformedEquivalent(test_fn, -2)
+
+  def text_conditional_in_context_manager(self):
+
+    def test_fn(x):
+      with ops.name_scope(''):
+        if x > 0:
+          return x * x
+        else:
+          return x
+
+    self.assertTransformedEquivalent(test_fn, 2)
+    self.assertTransformedEquivalent(test_fn, -2)
+
+  def test_no_return(self):
+
+    def test_fn(x):
+      x *= x
+
+    self.assertTransformedEquivalent(test_fn, 2)
+
+  def test_nested_functions(self):
+
+    def test_fn(x):
+
+      def inner_fn(y):
+        if y > 0:
+          return y * y
+        else:
+          return y
+
+      return inner_fn(x)
+
+    self.assertTransformedEquivalent(test_fn, 2)
+    self.assertTransformedEquivalent(test_fn, -2)
+
+  def test_loop(self):
+
+    def test_fn(x):
+      for _ in range(10):
+        return x
+      return x
+
+    node, ctx = self.prepare(test_fn, {})
+    with self.assertRaises(ValueError):
+      return_statements.transform(node, ctx)
+
+
+if __name__ == '__main__':
+  test.main()
diff --git a/tensorflow/contrib/autograph/converters/side_effect_guards_test.py b/tensorflow/contrib/autograph/converters/side_effect_guards_test.py
index a7ad8ef..de18743 100644
--- a/tensorflow/contrib/autograph/converters/side_effect_guards_test.py
+++ b/tensorflow/contrib/autograph/converters/side_effect_guards_test.py
@@ -25,140 +25,138 @@
 from tensorflow.python.framework import ops
 from tensorflow.python.ops import control_flow_ops
 from tensorflow.python.ops import state_ops
-from tensorflow.python.ops import variables
+from tensorflow.python.ops import variable_scope
 from tensorflow.python.platform import test
 
 
+tf = None  # Will be replaced by a mock.
+
+
 class SideEffectGuardsTest(converter_testing.TestCase):
 
   def test_side_effect_on_return_only_variable(self):
 
-    tf = None
-
     def test_fn(a):
       tf.assign(a, a + 1)
       return a
 
-    node = self.parse_and_analyze(test_fn, {})
-    node = side_effect_guards.transform(node, self.ctx)
+    node, ctx = self.prepare(test_fn, {})
+    node = side_effect_guards.transform(node, ctx)
 
-    with self.compiled(node, state_ops.assign) as result:
-      self.assertEqual(len(node.body[0].body), 1)
+    self.assertEqual(len(node.body[0].body), 1)
+
+    with self.compiled(node, {}, state_ops.assign) as result:
       with self.test_session() as sess:
-        v = variables.Variable(2)
+        v = variable_scope.get_variable('test', initializer=2)
         sess.run(v.initializer)
-        # NOTE: We don't expect the assignment to execute in this case, because
-        # variables cannot be reliably guarded.
-        self.assertEqual(2, sess.run(result.test_fn(v)))
+        sess.run(result.test_fn(v))
+        # TODO(mdan): Add support for this use case.
+        # Right now the variable `a` is not conditioned on the `assign` because
+        # there's no way to add control dependencies to a variable object.
+        self.assertEqual(2, sess.run(v))
 
   def test_side_effect_on_used_variable(self):
 
-    tf = None
-
     def test_fn(a):
       tf.assign(a, a + 1)
       return a + 1
 
-    node = self.parse_and_analyze(test_fn, {})
-    node = side_effect_guards.transform(node, self.ctx)
+    node, ctx = self.prepare(test_fn, {})
+    node = side_effect_guards.transform(node, ctx)
 
-    with self.compiled(node, state_ops.assign) as result:
-      self.assertEqual(len(node.body[0].body), 1)
+    self.assertEqual(len(node.body[0].body), 1)
+
+    with self.compiled(node, {}, state_ops.assign) as result:
       with self.test_session() as sess:
-        v = variables.Variable(2)
+        v = variable_scope.get_variable('test', initializer=2)
         sess.run(v.initializer)
-        # NOTE: Unlike test_side_effect_on_return_only_variable, the variable
-        # was used in the local scope and so we could catch the assign's side
-        # effect.
-        self.assertEqual(4, sess.run(result.test_fn(v)))
+        sess.run(result.test_fn(v))
+        # TODO(mdan): Ensure the result of test_fn(v) is also deterministic.
+        # Right now it's 3 or 4 based on whether the read is synchronized.
+        self.assertEqual(3, sess.run(v))
 
   def test_side_effect_on_tensor(self):
 
-    tf = None
-
     def test_fn(a):
       tf.Assert(a > 0, ['expected in throw'])
       return a
 
-    node = self.parse_and_analyze(test_fn, {})
-    node = side_effect_guards.transform(node, self.ctx)
+    node, ctx = self.prepare(test_fn, {})
+    node = side_effect_guards.transform(node, ctx)
 
-    with self.compiled(node, control_flow_ops.Assert) as result:
-      self.assertEqual(len(node.body[0].body), 1)
+    self.assertEqual(len(node.body[0].body), 1)
+
+    with self.compiled(node, {}, control_flow_ops.Assert) as result:
       with self.test_session() as sess:
-        # NOTE: In this case we can also capture the side effect because the
-        # argument is a tensor ans we can wrap it inside an identity.
         with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
                                      'expected in throw'):
           sess.run(result.test_fn(constant_op.constant(-1)))
 
   def test_multiline_block(self):
 
-    tf = None
-
     def test_fn(a):
-      tf.assign(a, a + 1)
+      tf.assign_add(a, 1)
       b = a + 1
-      tf.assign(a, b + 1)
-      c = b + 1
-      d = c + 1
-      return d
+      tf.assign_add(a, 1)
+      b += 1
+      return b
 
-    node = self.parse_and_analyze(test_fn, {})
-    node = side_effect_guards.transform(node, self.ctx)
+    node, ctx = self.prepare(test_fn, {})
+    node = side_effect_guards.transform(node, ctx)
 
-    with self.compiled(node, state_ops.assign) as result:
-      self.assertEqual(len(node.body[0].body), 1)
+    self.assertEqual(len(node.body[0].body), 1)
+
+    with self.compiled(node, {}, state_ops.assign_add) as result:
       with self.test_session() as sess:
-        v = variables.Variable(2)
+        v = variable_scope.get_variable('test', initializer=2)
         sess.run(v.initializer)
-        self.assertEqual(6, sess.run(result.test_fn(v)))
+        sess.run(result.test_fn(v))
+        # TODO(mdan): Ensure the result of test_fn(v) is also deterministic.
+        self.assertEqual(4, sess.run(v))
 
   def test_multiline_nested_block(self):
 
-    tf = None
-
     def test_fn(a):
       with tf.name_scope('foo'):
         tf.assign(a, a + 1)
         b = a + 1
-        c = b + 1
-        d = c + 1
-      return d
+      return b
 
-    node = self.parse_and_analyze(test_fn, {})
-    node = side_effect_guards.transform(node, self.ctx)
+    node, ctx = self.prepare(test_fn, {})
+    node = side_effect_guards.transform(node, ctx)
 
-    with self.compiled(node, state_ops.assign, ops.name_scope) as result:
-      self.assertEqual(len(node.body[0].body[0].body), 1)
+    self.assertEqual(len(node.body[0].body[0].body), 1)
+
+    with self.compiled(node, {}, state_ops.assign, ops.name_scope) as result:
       with self.test_session() as sess:
-        v = variables.Variable(2)
+        v = variable_scope.get_variable('test', initializer=2)
         sess.run(v.initializer)
-        self.assertEqual(6, sess.run(result.test_fn(v)))
+        sess.run(result.test_fn(v))
+        # TODO(mdan): Ensure the result of test_fn(v) is also deterministic.
+        self.assertEqual(3, sess.run(v))
 
   def test_multiline_block_unsafe(self):
 
-    tf = None
-
     def test_fn(a):
       tf.assign(a, a + 1)
       b = a + 1
-      tf.assign(a, a + 1)
+      tf.assign_add(a, 1)
       c = b + 1
-      d = c + 1
-      return d
+      return c
 
-    node = self.parse_and_analyze(test_fn, {})
-    node = side_effect_guards.transform(node, self.ctx)
+    node, ctx = self.prepare(test_fn, {})
+    node = side_effect_guards.transform(node, ctx)
 
-    with self.compiled(node, state_ops.assign) as result:
-      self.assertEqual(len(node.body[0].body), 1)
+    self.assertEqual(len(node.body[0].body), 1)
+
+    with self.compiled(node, {}, state_ops.assign,
+                       state_ops.assign_add) as result:
       with self.test_session() as sess:
-        v = variables.Variable(2)
+        v = variable_scope.get_variable('test', initializer=2)
         sess.run(v.initializer)
-        # NOTE: This intentionally highlights the flakiness. The test should be
-        # tightened down once that is solved.
-        self.assertTrue(sess.run(result.test_fn(v)) in (6, 7))
+        sess.run(result.test_fn(v))
+        # TODO(mdan): Ensure the result of test_fn(v) is also deterministic.
+        self.assertEqual(4, sess.run(v))
 
 
 if __name__ == '__main__':
diff --git a/tensorflow/contrib/autograph/converters/single_return_test.py b/tensorflow/contrib/autograph/converters/single_return_test.py
deleted file mode 100644
index 1f0de43..0000000
--- a/tensorflow/contrib/autograph/converters/single_return_test.py
+++ /dev/null
@@ -1,189 +0,0 @@
-# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ==============================================================================
-"""Tests for single_return module."""
-
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-from tensorflow.contrib.autograph.converters import single_return
-from tensorflow.contrib.autograph.core import converter_testing
-from tensorflow.python.framework.ops import name_scope
-from tensorflow.python.platform import test
-
-
-class SingleReturnTest(converter_testing.TestCase):
-
-  def compiled_fn(self, test_fn, *args):
-    node = self.parse_and_analyze(test_fn, {})
-    node = single_return.transform(node, self.ctx)
-    module = self.compiled(node, *args)
-    return module
-
-  def test_noop(self):
-    # Noop
-    def test_fn(x):
-      return x
-
-    with self.compiled_fn(test_fn) as result:
-      self.assertEqual(test_fn(2.0), result.test_fn(2.0))
-
-  def test_return_expression(self):
-    # ANF
-    def test_fn(x):
-      return x * x
-
-    with self.compiled_fn(test_fn) as result:
-      x = 2
-      self.assertEqual(test_fn(x), result.test_fn(x))
-
-  def test_merge(self):
-    # Simple merge
-    def test_fn(x):
-      if x > 0:
-        return x
-      else:
-        return x * x
-
-    with self.compiled_fn(test_fn) as result:
-      for x in [-2, 2]:
-        self.assertEqual(test_fn(x), result.test_fn(x))
-
-  def test_orphan_branch(self):
-
-    def test_fn(x):
-      if x > 0:
-        return x
-
-    with self.assertRaises(ValueError):
-      self.compiled_fn(test_fn)
-
-  def test_lift_body_into_false_branch(self):
-
-    def test_fn(x):
-      if x > 0:
-        return x
-      return x * x
-
-    with self.compiled_fn(test_fn) as result:
-      for x in [-2, 2]:
-        self.assertEqual(test_fn(x), result.test_fn(x))
-
-  def test_lift_body_into_true_branch(self):
-
-    def test_fn(x):
-      if x < 0:
-        x *= x
-      else:
-        # TODO(alexbw): linter bug here that requires us suppress this warning.
-        return x  # pylint: disable=undefined-loop-variable
-      return x
-
-    with self.compiled_fn(test_fn) as result:
-      for x in [-2, 2]:
-        self.assertEqual(test_fn(x), result.test_fn(x))
-
-  def test_nested_if(self):
-
-    def test_fn(x):
-      if x > 0:
-        if x < 5:
-          return x
-        else:
-          return x * x
-      else:
-        return x * x * x
-
-    with self.compiled_fn(test_fn) as result:
-      for x in [-2, 2, 5]:
-        self.assertEqual(test_fn(x), result.test_fn(x))
-
-  def test_context_manager(self):
-
-    def test_fn(x):
-
-      with name_scope(''):
-        return x * x
-
-    with self.compiled_fn(test_fn) as result:
-      result.name_scope = name_scope
-      for x in [-2, 2]:
-        self.assertEqual(test_fn(x), result.test_fn(x))
-
-  def test_context_manager_in_conditional(self):
-
-    def test_fn(x):
-      if x > 0:
-        with name_scope(''):
-          return x * x
-      else:
-        return x
-
-    with self.compiled_fn(test_fn, name_scope) as result:
-      result.name_scope = name_scope
-      for x in [-2, 2]:
-        self.assertEqual(test_fn(x), result.test_fn(x))
-
-  def text_conditional_in_context_manager(self):
-
-    def test_fn(x):
-      with name_scope(''):
-        if x > 0:
-          return x * x
-        else:
-          return x
-
-    with self.compiled_fn(test_fn) as result:
-      result.name_scope = name_scope
-      for x in [-2, 2]:
-        self.assertEqual(test_fn(x), result.test_fn(x))
-
-  def test_no_return(self):
-
-    def test_fn(x):
-      x *= x
-
-    with self.compiled_fn(test_fn) as result:
-      self.assertEqual(test_fn(2), result.test_fn(2))
-
-  def test_nested_functiondefs(self):
-
-    def test_fn(x):
-
-      def inner_fn(y):
-        if y > 0:
-          return y * y
-        else:
-          return y
-
-      return inner_fn(x)
-
-    with self.compiled_fn(test_fn) as result:
-      for x in [-2, 2]:
-        self.assertEqual(test_fn(x), result.test_fn(x))
-
-  def test_loop(self):
-
-    def test_fn(x):
-      for _ in range(10):
-        return x
-      return x
-
-    with self.assertRaises(ValueError):
-      self.compiled_fn(test_fn)
-
-
-if __name__ == '__main__':
-  test.main()
diff --git a/tensorflow/contrib/autograph/converters/slices.py b/tensorflow/contrib/autograph/converters/slices.py
index 3f5fc57..9cfa066 100644
--- a/tensorflow/contrib/autograph/converters/slices.py
+++ b/tensorflow/contrib/autograph/converters/slices.py
@@ -21,7 +21,7 @@
 import gast
 
 from tensorflow.contrib.autograph.core import converter
-from tensorflow.contrib.autograph.pyct import anno
+from tensorflow.contrib.autograph.lang import directives
 from tensorflow.contrib.autograph.pyct import templates
 
 
@@ -56,17 +56,17 @@
   def visit_Subscript(self, node):
     node = self.generic_visit(node)
     if not isinstance(node.slice, gast.Index):
-      # TODO(mdan): It might make more sense to wave them through.
-      raise NotImplementedError('non-index slice')
+      return node
 
     if not isinstance(node.ctx, gast.Load):
       # Index writes are handled at a higher level, one at which the rvalue is
       # also available.
       return node
 
-    dtype = anno.getanno(
+    dtype = self.get_definition_directive(
         node.value,
-        'element_type',
+        directives.set_element_type,
+        'dtype',
         default=templates.replace_as_expression('None'))
 
     template = """
diff --git a/tensorflow/contrib/autograph/converters/slices_test.py b/tensorflow/contrib/autograph/converters/slices_test.py
index df9a4c8..3c0f81e 100644
--- a/tensorflow/contrib/autograph/converters/slices_test.py
+++ b/tensorflow/contrib/autograph/converters/slices_test.py
@@ -18,9 +18,12 @@
 from __future__ import division
 from __future__ import print_function
 
-from tensorflow.contrib.autograph import utils
 from tensorflow.contrib.autograph.converters import slices
 from tensorflow.contrib.autograph.core import converter_testing
+from tensorflow.contrib.autograph.lang import directives
+from tensorflow.contrib.autograph.pyct import anno
+from tensorflow.contrib.autograph.pyct import parser
+from tensorflow.contrib.autograph.pyct import transformer
 from tensorflow.python.framework import constant_op
 from tensorflow.python.framework import dtypes
 from tensorflow.python.ops import list_ops
@@ -32,28 +35,42 @@
   def test_index_access(self):
 
     def test_fn(l):
-      utils.set_element_type(l, dtypes.int32)
       return l[1]
 
-    node = self.parse_and_analyze(
-        test_fn,
-        {
-            'utils': utils,
-            'dtypes': dtypes
-        },
-        include_type_analysis=True,
-    )
-    node = slices.transform(node, self.ctx)
+    node, ctx = self.prepare(test_fn, {})
+    def_, = anno.getanno(node.body[0].args.args[0], anno.Static.DEFINITIONS)
+    def_.directives[directives.set_element_type] = {
+        'dtype': parser.parse_expression('tf.int32')
+    }
+    node = slices.transform(node, ctx)
 
-    with self.compiled(node, dtypes.int32) as result:
-      result.utils = utils
-      result.dtypes = dtypes
+    with self.compiled(node, {}, dtypes.int32) as result:
       with self.test_session() as sess:
         tl = list_ops.tensor_list_from_tensor(
             [1, 2], element_shape=constant_op.constant([], dtype=dtypes.int32))
         y = result.test_fn(tl)
         self.assertEqual(2, sess.run(y))
 
+  def test_index_access_multiple_definitions(self):
+
+    def test_fn(l):
+      if l:
+        l = []
+      return l[1]
+
+    node, ctx = self.prepare(test_fn, {})
+    def_, = anno.getanno(node.body[0].args.args[0], anno.Static.DEFINITIONS)
+    def_.directives[directives.set_element_type] = {
+        'dtype': parser.parse_expression('tf.int32')
+    }
+    def_, = anno.getanno(node.body[0].body[0].body[0].targets[0],
+                         anno.Static.DEFINITIONS)
+    def_.directives[directives.set_element_type] = {
+        'dtype': parser.parse_expression('tf.float32')
+    }
+    with self.assertRaises(transformer.AutographParseError):
+      slices.transform(node, ctx)
+
 
 if __name__ == '__main__':
   test.main()
diff --git a/tensorflow/contrib/autograph/core/annos.py b/tensorflow/contrib/autograph/core/annos.py
deleted file mode 100644
index b8937ce..0000000
--- a/tensorflow/contrib/autograph/core/annos.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ==============================================================================
-"""Annotations specific to AutoGraph."""
-
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-from enum import Enum
-
-
-class NoValue(Enum):
-
-  def __repr__(self):
-    return self.name
-
-
-class NodeAnno(NoValue):
-  """Additional annotations used by AutoGraph converters.
-
-  These are in addition to the basic annotations declared in pyct/anno.py and
-  pyct/static_analysis/annos.py.
-  """
-
-  # The directives collection - see directives.py
-  DIRECTIVES = (
-      'Dict depicting static directive calls. See the directives converter.')
diff --git a/tensorflow/contrib/autograph/core/converter.py b/tensorflow/contrib/autograph/core/converter.py
index 54e6aa0..a93e4a8 100644
--- a/tensorflow/contrib/autograph/core/converter.py
+++ b/tensorflow/contrib/autograph/core/converter.py
@@ -64,15 +64,29 @@
 from __future__ import print_function
 
 import collections
+from enum import Enum
+
 
 from tensorflow.contrib.autograph.core import config
 from tensorflow.contrib.autograph.core import naming
+from tensorflow.contrib.autograph.pyct import anno
+from tensorflow.contrib.autograph.pyct import ast_util
+from tensorflow.contrib.autograph.pyct import cfg
+from tensorflow.contrib.autograph.pyct import compiler
+from tensorflow.contrib.autograph.pyct import qual_names
 from tensorflow.contrib.autograph.pyct import transformer
+from tensorflow.contrib.autograph.pyct.static_analysis import activity
+from tensorflow.contrib.autograph.pyct.static_analysis import live_values
+from tensorflow.contrib.autograph.pyct.static_analysis import liveness
+from tensorflow.contrib.autograph.pyct.static_analysis import reaching_definitions
+from tensorflow.contrib.autograph.pyct.static_analysis import type_info
 
 # TODO(mdan): These contexts can be refactored into first class objects.
 # For example, we could define Program and Entity abstractions that hold on
 # to the actual entity and have conversion methods.
 
+# TODO(mdan): Add a test specific to this converter.
+
 
 class ProgramContext(object):
   """ProgramContext keeps track of converting function hierarchies.
@@ -197,6 +211,46 @@
     self._used = False
     self._ast_depth = 0
 
+  def get_definition_directive(self, node, directive, arg, default):
+    """Returns the unique directive for a symbol, or a default if none exist.
+
+    See lang/directives.py for details on directives.
+
+    Args:
+      node: ast.AST
+      directive: Callable[..., Any]
+      arg: str
+      default: Any
+
+    Raises:
+      ValueError: if conflicting annotations have been found
+    """
+    defs = anno.getanno(node, anno.Static.ORIG_DEFINITIONS, ())
+    if not defs:
+      return default
+
+    # TODO(mdan): Simplify this.
+    arg_values = []
+    for def_ in defs:
+      if (directive not in def_.directives or
+          arg not in arg not in def_.directives[directive]):
+        continue
+      arg_value = def_.directives[directive][arg]
+      for prev_value in arg_values:
+        if not ast_util.matches(arg_value, prev_value):
+          qn = anno.getanno(node, anno.Basic.QN)
+          raise ValueError('%s has ambiguous annotations for %s(%s): %s, %s' %
+                           (qn, directive.__name__, arg,
+                            compiler.ast_to_source(arg_value).strip(),
+                            compiler.ast_to_source(prev_value).strip()))
+      arg_values.append(arg_value)
+
+    if not arg_values:
+      return default
+
+    arg_value, = arg_values
+    return arg_value
+
   def visit(self, node):
     if not self._ast_depth:
       if self._used:
@@ -208,3 +262,69 @@
       return super(Base, self).visit(node)
     finally:
       self._ast_depth -= 1
+
+
+class AnnotatedDef(reaching_definitions.Definition):
+
+  def __init__(self):
+    super(AnnotatedDef, self).__init__()
+    self.directives = {}
+
+
+class AgAnno(Enum):
+  """Annotation labels specific to AutoGraph. See anno.py."""
+
+  DIRECTIVES = 'User directives associated with the annotated statement.'
+
+  def __repr__(self):
+    return self.name
+
+
+def standard_analysis(node, context, is_initial=False):
+  """Performs a complete static analysis of the given code.
+
+  Args:
+    node: ast.AST
+    context: converter.EntityContext
+    is_initial: bool, whether this is the initial analysis done on the input
+        source code
+
+  Returns:
+    ast.AST, same as node, with the static analysis annotations added
+  """
+  # TODO(mdan): Clear static analysis here.
+  # TODO(mdan): Consider not running all analyses every time.
+  # TODO(mdan): Don't return a node because it's modified by reference.
+  graphs = cfg.build(node)
+  node = qual_names.resolve(node)
+  node = activity.resolve(node, context.info, None)
+  node = reaching_definitions.resolve(node, context.info, graphs, AnnotatedDef)
+  node = liveness.resolve(node, context.info, graphs)
+  node = live_values.resolve(node, context.info, config.PYTHON_LITERALS)
+  node = type_info.resolve(node, context.info)
+  # This second call allows resolving first-order class attributes.
+  node = live_values.resolve(node, context.info, config.PYTHON_LITERALS)
+  if is_initial:
+    anno.dup(
+        node,
+        {
+            anno.Static.DEFINITIONS: anno.Static.ORIG_DEFINITIONS,
+        },
+    )
+  return node
+
+
+def apply_(node, context, converter_module):
+  """Applies a converter to an AST.
+
+  Args:
+    node: ast.AST
+    context: converter.EntityContext
+    converter_module: converter.Base
+
+  Returns:
+    ast.AST, the result of applying converter to node
+  """
+  node = standard_analysis(node, context)
+  node = converter_module.transform(node, context)
+  return node
diff --git a/tensorflow/contrib/autograph/core/converter_testing.py b/tensorflow/contrib/autograph/core/converter_testing.py
index 0e46aac..2025e32 100644
--- a/tensorflow/contrib/autograph/core/converter_testing.py
+++ b/tensorflow/contrib/autograph/core/converter_testing.py
@@ -20,19 +20,19 @@
 
 import contextlib
 import imp
+import sys
+
+import six
 
 from tensorflow.contrib.autograph import operators
 from tensorflow.contrib.autograph import utils
 from tensorflow.contrib.autograph.core import config
 from tensorflow.contrib.autograph.core import converter
+from tensorflow.contrib.autograph.core import errors
 from tensorflow.contrib.autograph.pyct import compiler
 from tensorflow.contrib.autograph.pyct import parser
 from tensorflow.contrib.autograph.pyct import pretty_printer
-from tensorflow.contrib.autograph.pyct import qual_names
 from tensorflow.contrib.autograph.pyct import transformer
-from tensorflow.contrib.autograph.pyct.static_analysis import activity
-from tensorflow.contrib.autograph.pyct.static_analysis import live_values
-from tensorflow.contrib.autograph.pyct.static_analysis import type_info
 from tensorflow.python.platform import test
 
 
@@ -74,7 +74,17 @@
   """Base class for unit tests in this module. Contains relevant utilities."""
 
   @contextlib.contextmanager
-  def compiled(self, node, *symbols):
+  def assertPrints(self, expected_result):
+    try:
+      out_capturer = six.StringIO()
+      sys.stdout = out_capturer
+      yield
+      self.assertEqual(out_capturer.getvalue(), expected_result)
+    finally:
+      sys.stdout = sys.__stdout__
+
+  @contextlib.contextmanager
+  def compiled(self, node, namespace, *symbols):
     source = None
 
     self.dynamic_calls = []
@@ -89,7 +99,11 @@
       fake_ag = self.make_fake_mod('fake_ag', converted_call)
       fake_ag.__dict__.update(operators.__dict__)
       fake_ag.__dict__['utils'] = utils
+      fake_ag.__dict__['rewrite_graph_construction_error'] = (
+          errors.rewrite_graph_construction_error)
       result.__dict__['ag__'] = fake_ag
+      for k, v in namespace.items():
+        result.__dict__[k] = v
       yield result
     except Exception:  # pylint:disable=broad-except
       if source is None:
@@ -98,6 +112,13 @@
         print('Offending compiled code:\n%s' % source)
       raise
 
+  @contextlib.contextmanager
+  def converted(self, entity, converter_module, namespace, *tf_symbols):
+    node, ctx = self.prepare(entity, namespace)
+    node = converter_module.transform(node, ctx)
+    with self.compiled(node, namespace, *tf_symbols) as result:
+      yield result
+
   def make_fake_mod(self, name, *symbols):
     fake_mod = imp.new_module(name)
     for s in symbols:
@@ -114,17 +135,15 @@
     for k, v in ns.items():
       setattr(module, k, v)
 
-  def parse_and_analyze(self,
-                        test_fn,
-                        namespace,
-                        namer=None,
-                        arg_types=None,
-                        include_type_analysis=True,
-                        owner_type=None,
-                        recursive=True,
-                        autograph_decorators=()):
+  def prepare(self,
+              test_fn,
+              namespace,
+              namer=None,
+              arg_types=None,
+              owner_type=None,
+              recursive=True,
+              autograph_decorators=()):
     node, source = parser.parse_entity(test_fn)
-
     if namer is None:
       namer = FakeNamer()
     program_ctx = converter.ProgramContext(
@@ -141,12 +160,5 @@
         arg_types=arg_types,
         owner_type=owner_type)
     ctx = converter.EntityContext(namer, entity_info, program_ctx)
-
-    node = qual_names.resolve(node)
-    node = activity.resolve(node, entity_info)
-    node = live_values.resolve(node, entity_info, {})
-    if include_type_analysis:
-      node = type_info.resolve(node, entity_info)
-      node = live_values.resolve(node, entity_info, {})
-    self.ctx = ctx
-    return node
+    node = converter.standard_analysis(node, ctx, is_initial=True)
+    return node, ctx
diff --git a/tensorflow/contrib/autograph/examples/notebooks/ag_vs_eager_collatz_speed_test.ipynb b/tensorflow/contrib/autograph/examples/notebooks/ag_vs_eager_collatz_speed_test.ipynb
new file mode 100644
index 0000000..c10a574
--- /dev/null
+++ b/tensorflow/contrib/autograph/examples/notebooks/ag_vs_eager_collatz_speed_test.ipynb
@@ -0,0 +1,299 @@
+{
+  "cells": [
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "colab_type": "text",
+        "id": "aQkTGc-d8I1k"
+      },
+      "source": [
+        "This notebook runs a basic speed test for a simple algorithm that implements the process described in Collatz Conjecture.\n",
+        "\n",
+        "https://en.wikipedia.org/wiki/Collatz_conjecture"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "colab_type": "text",
+        "id": "x5ChBlH09jk_"
+      },
+      "source": [
+        "### Imports"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": 0,
+      "metadata": {
+        "colab": {
+          "autoexec": {
+            "startup": false,
+            "wait_interval": 0
+          }
+        },
+        "colab_type": "code",
+        "id": "X-QAUpWdPxUh"
+      },
+      "outputs": [],
+      "source": [
+        "!pip install -U -q tf-nightly"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": 0,
+      "metadata": {
+        "colab": {
+          "autoexec": {
+            "startup": false,
+            "wait_interval": 0
+          }
+        },
+        "colab_type": "code",
+        "id": "wiKQu3w05eCa"
+      },
+      "outputs": [],
+      "source": [
+        "import numpy as np\n",
+        "from matplotlib import pyplot as plt\n",
+        "import tensorflow as tf\n",
+        "from tensorflow.contrib import autograph as ag\n",
+        "from tensorflow.python.eager import context"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "colab_type": "text",
+        "id": "_cRFTcwT9mnn"
+      },
+      "source": [
+        "### Plotting helpers"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": 0,
+      "metadata": {
+        "colab": {
+          "autoexec": {
+            "startup": false,
+            "wait_interval": 0
+          }
+        },
+        "colab_type": "code",
+        "id": "ww7rc0GQ9pMu"
+      },
+      "outputs": [],
+      "source": [
+        "def plot_results(counts, times, title):\n",
+        "  plt.plot(counts, np.array(times) * 1000., 'o')\n",
+        "  plt.ylabel('Time (milliseconds)')\n",
+        "  plt.xlabel('Collatz counter')\n",
+        "  plt.title(title)\n",
+        "  plt.ylim(0, 30)\n"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "colab_type": "text",
+        "id": "ESZGw9s9-Y5_"
+      },
+      "source": [
+        "### Collatz function definition"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": 0,
+      "metadata": {
+        "colab": {
+          "autoexec": {
+            "startup": false,
+            "wait_interval": 0
+          }
+        },
+        "colab_type": "code",
+        "id": "qeunWm9m-dT7"
+      },
+      "outputs": [],
+      "source": [
+        "def collatz(a):\n",
+        "  count = 0\n",
+        "  while a \u003e 1.1:\n",
+        "    if a % 2 \u003c 0.1:\n",
+        "      a //= 2\n",
+        "    else:\n",
+        "      a = 3 * a + 1\n",
+        "    count += 1\n",
+        "  return count\n"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "colab_type": "text",
+        "id": "nnFmPDvScsDo"
+      },
+      "source": [
+        "# AutoGraph"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": 5,
+      "metadata": {
+        "colab": {
+          "autoexec": {
+            "startup": false,
+            "wait_interval": 0
+          },
+          "height": 301
+        },
+        "colab_type": "code",
+        "executionInfo": {
+          "elapsed": 9153,
+          "status": "ok",
+          "timestamp": 1531757473651,
+          "user": {
+            "displayName": "",
+            "photoUrl": "",
+            "userId": ""
+          },
+          "user_tz": 240
+        },
+        "id": "6fU4vlxYcsDe",
+        "outputId": "11b50f28-aced-4506-a743-4b749e9645c3"
+      },
+      "outputs": [
+        {
+          "data": {
+            "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYkAAAEcCAYAAAAydkhNAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzt3XtcVGXCB/DfGRBUQA0ZURQvyIspm1reQkxNSPICgoqW\npWZu1vbmjZJV3Jc+axappVLu7guV25rU5g3wlq3iBd1wXHSN3hXy9ZaCgoOIIKAzMOf9g5dZkTkz\nB5i7v+9fzJlzzjzPHD2/Oc/znOcIoiiKICIiMkBh6wIQEZH9YkgQEZEkhgQREUliSBARkSSGBBER\nSWJIEBGRJIYEkQNYsWIFkpOTbV0MegQxJMipzJ49G8OHD4dWq5W9zeOPP45r164163O2bt2KqKgo\nDB48GKNGjcKcOXOwf//+5haXyO4xJMhpFBUV4fTp0xAEAYcPH5a9nSAIzfqc9957D1999RVWrFiB\nU6dO4fjx41iyZAmOHz8uuQ3vWSVHxZAgp5GRkYHBgwdj6tSpSE9P1y+fPXs2duzYoX+dnp6OWbNm\nAQBefvlliKKIqKgoPPXUU/juu+8AANu2bcP48eMxYsQIvPnmm7h58yYA4PLly/jmm2+wYcMGhISE\nwM3NDYIg4KmnnkJSUlKjz9ywYQNefPFFDB48GIWFhdi1axcmTpyIp556Cs899xy+/fZb/fqnTp3C\nmDFjkJKSgqeffhphYWHYs2dPo/rduXMHr7/+Op566inMnDmz2Vc/RC3BkCCnkZmZiaioKEyePBkn\nTpxAWVmZ5LoNVw9bt24FAOzevRtnzpzBhAkTkJOTg/Xr1+OTTz7BiRMn4Ofnh7i4OACASqVCt27d\nMGDAAJPl2bNnD1avXo0zZ86gW7du6Ny5M1JTU3HmzBkkJSUhKSkJ+fn5+vVLS0tRXl6O48eP48MP\nP0RiYiKuXLmif3/fvn1YuHAhcnNz4e/vj40bN7bkayJqFoYEOYXc3Fxcv34dEyZMQHBwMHr27Nnk\nl7hce/fuxfTp0/H444+jTZs2iIuLw9mzZ3H9+nXcvn0bSqWy0fpjxozBsGHDMHDgQNy4cUO/PCYm\nBn379oVCoYCrqyvGjBmDHj16AACGDh2K0NBQ5Obm6tcXBAFLlixBmzZtMGzYMIwZM0Z/ZQMA48eP\nx69+9SsoFApERkY2ChgiS2FIkFPIzMzEqFGj0LFjRwDApEmTkJGR0aJ93bx5E35+fvrX7du3R6dO\nnVBSUoJOnTrpm54aHDt2DCdPnoRWq23U99C1a9cm682cORMjRozAsGHDkJ2djdu3b+vf79ChA9zd\n3fWv/fz8Gn2Wj4+P/u927dqhurq6RfUjag5XWxeAqLXu37+P7777DjqdDqNGjQIAaDQaVFZWoqCg\nAO3bt8e9e/f066vVaqP769KlC65fv65/XV1djfLycvj6+qJTp05YvXo1/vWvfyE4OLjRdg93Tj/Y\nIa7RaLB48WKsW7cOYWFhUCgU+M///M9G21RUVODevXto27YtAODGjRsICgpq5rdBZF68kiCHd/Dg\nQbi4uOC7775DZmYmMjMz8d1332Ho0KHIzMxE//798be//Q337t3DL7/8gp07dzba3sfHp1En8OTJ\nk7Fr1y4UFBRAo9Fg/fr1GDRoEPz8/NCnTx/MnDkTcXFx+OGHH3D//n3odDqcOXPG6CgprVYLrVaL\nxx57DAqFAseOHcPf//73RuuIoohPPvkEWq0Wubm5OHr0KCZMmGDeL4uomXglQQ4vIyMD06ZNg6+v\nb6Pls2bNwvvvv4+9e/fip59+QmhoKPr164fIyEjk5OTo11u4cCHi4+Oh0WiwatUqPP/881i8eDEW\nLlyIiooKPPnkk1i/fr1+/cTERGzduhVJSUm4du0avLy80Lt3b2zcuFHfTPVwYHh4eGDlypVYvHgx\ntFotnn32WYSFhTVaR6lUomPHjnjmmWfQvn17rFq1Cr179zbzt0XUPIIlHzqk0Wjw0ksvQavVoq6u\nDhEREXjrrbdQWFiIuLg43LlzB8HBwVi7di1cXZlX9Og6deoU4uPjcfToUVsXhagRizY3ubm5YcuW\nLcjIyEBGRgays7Px448/4qOPPsK8efPw/fffw8vLq9EYdiIish8W75No164dgPqritraWgiCAJVK\nhYiICAD1wwQPHjxo6WIQEVELWDwkdDodoqOjERoaitDQUPj7+6NDhw5QKOo/umvXrk2GFBI9aoYP\nH86mJrJLFg8JhUKhb2rKy8vDxYsXm6zT3LlziIjIOqw2BNbT0xPDhg3Djz/+iIqKCuh0OgBAcXEx\nunTpYnJ7TpBGRGR9Fh1SVFZWhjZt2sDLywv37t1DTk4OFixYgBEjRuDAgQOYOHEi0tPTmwwFNEQQ\nBKjVlZYsrk0plV6sn4Ny5roBrJ+jUyq9WrW9RUNCrVZj+fLl0Ol00Ol0mDhxIsaMGYOAgADExcUh\nOTkZ/fv3x/Tp0y1ZDCIiaiGL3idhbs6e9qyfY3LmugGsn6Nr7ZUEp+UgIiJJDAkiIpLEkCAiIkkM\nCSIiksSQICIiSQwJIiKSxJAgIiJJDAkiIpLEkCAiIkkMCSIiksSQICIiSQwJIiKSxJAgIiJJDAki\nIpLEkCAiIkkMCSIiksSQICIiSQwJIiKSxJAgIiJJDAkiIpLEkCAiIkkMCSIiksSQICIiSQwJIiKS\nxJAgIiJJDAkiIpLEkCAiIkkMCSIikuRqyZ0XFxcjPj4epaWlcHFxwYwZMzB79mxs2rQJ27ZtQ+fO\nnQEAS5cuxejRoy1ZFCIiagGLhoSLiwtWrFiB/v37o6qqClOnTsXIkSMBAPPmzcO8efMs+fFERNRK\nFg0JpVIJpVIJAPDw8EDfvn1x8+ZNAIAoipb8aCIiMgOr9UkUFhaioKAAAwcOBACkpaVhypQpWLly\nJSorK61VDCIiagarhERVVRUWLVqEhIQEeHh4YNasWTh06BAyMzPh4+ODpKQkaxSDiIiaSRAt3O5T\nW1uL119/HaNHj8bcuXObvF9UVIQ33ngDe/bssWQxiIioBSzaJwEACQkJCAwMbBQQarVa31dx8OBB\nBAUFydqXWu28zVJKpRfr56CcuW4A6+folEqvVm1v0ZA4ffo09uzZg6CgIERHR0MQBCxduhR79+5F\nfn4+FAoFunfvjlWrVlmyGERE1EIWDYkhQ4YgPz+/yXLeE0FE5Bh4xzUREUliSBARkSSGBBERSWJI\nEBGRJIYEERFJYkgQEZEkhgQREUliSBARkSSGBBERSWJIEBGRJIYEERFJYkgQEZEkhgQREUliSBAR\nkSSGBBERSWJIEBGRJIYEERFJYkgQEZEk2Y8vvXfvHtRqNdzd3dGlSxdLlomIiOyE0ZDQ6XTIyMjA\n9u3bUVBQAE9PT2g0Gri6uiI8PByvvPIK+vTpY62yEhGRlRkNiRdffBGDBw/GihUrEBwcDBcXFwDA\nrVu3cPz4cSQmJuKFF17ApEmTrFJYIiKyLkEURVHqzbKyMnh7exvdgZx1zEWtrrTK59iCUunF+jko\nZ64bwPo5OqXSq1XbG+24NnTyv3XrFs6ePWt0HSIicg6yRjfNmjULlZWVqKioQHR0NFauXIk1a9ZY\numxERGRjskKiuroaXl5eOHLkCCIjI7Fnzx6cOHHC0mUjIiIbkxUSGo0GAKBSqTBy5EgoFAp9JzYR\nETkvWSExfPhwREREIDc3F8OHD0dFRQUUCt6HR0Tk7GTdTPfuu++ioKAA/v7+cHNzw927d7F69WpL\nl42IiGzMaEhcuHBB/3ebNm1QXFysf+3m5ma5UhERkV0wGhILFiyAIAgQRRE3btyAp6cnAODu3bvo\n1q0bDh8+bHTnxcXFiI+PR2lpKVxcXBAbG4s5c+bgzp07WLp0KYqKitCjRw9s3LgRXl6tG8tLRETm\nZzQkGkJg9erVGDJkCCZMmAAAOHDgAM6dO2dy5y4uLlixYgX69++PqqoqTJ06FaGhodi1axdCQkLw\n2muvITU1FSkpKXjnnXfMUB0iIjInWb3PeXl5+oAAgOeffx4nT540uZ1SqUT//v0BAB4eHujbty9K\nSkqQlZWFmJgYAEBMTAwOHTrUkrITEZGFyQqJmpoa5Obm6l/n5uaipqamWR9UWFiIgoICDBo0CLdu\n3YKPjw+A+iC5fft2s/ZFRETWIXt0U1xcHNq1awcAuH//Pj7++GPZH1JVVYVFixYhISEBHh4eEASh\nRYVt7Rwk9o71c1zOXDeA9XuUyQqJoUOH4tChQ7h8+TJEUURAQIDs0U21tbVYtGgRpkyZgvDwcABA\n586dUVpaCh8fH6jVatnzPzn7JFysn2Ny5roBrJ+js+gEfw+qq6uDm5sbXF1dcfXq1UbDY41JSEhA\nYGAg5s6dq182btw47Nq1CwCQnp6OsLCwZhabiIisQdaVRFpaGj766CN06tRJ31QkCAKysrKMbnf6\n9Gns2bMHQUFBiI6OhiAIWLp0KV577TUsWbIEO3fuhJ+fH5KTk1tfEyIiMjtZIbF582bs3bsX3bt3\nb9bOhwwZgvz8fIPvffnll83aFxERWZ+s5ialUtnsgCAiIscn60pi5MiRWLt2LSZNmgR3d3f98sDA\nQIsVjIiIbE9WSGRkZACov9O6gZw+CSIicmyyQsLUHE1EROScZIUEUD8jrEqlAgA8/fTT6Nu3r8UK\nRURE9kFWx3VGRgZeeeUV5OfnIz8/H/PmzcPu3bstXTYiIrIx2UNg09PToVQqAQBqtRrz589HVFSU\nRQtHRES2JfuO64aAePhvIiJyXrJComfPnvjkk09QUlKCmzdvYtOmTfD397d02YiIyMZkhcTvf/97\nXL58GVFRUYiKisKlS5ewatUqS5eNiIhsTFafROfOnbFhwwZLl4WIiOyMrCuJ1NRUlJeX61/fvn0b\nn3/+ucUKRURE9kFWSOzbtw+dOnXSv37sscewd+9eixWKiIjsg6yQEEWxybK6ujqzF4aIiOyLrJDo\n3bs3/vznP0MUReh0OmzevBk9e/a0dNmIiMjGZIXEypUrceTIEQwcOBCDBw/GsWPHkJiYaOmyERGR\njcka3eTr64stW7aguroaANC+fXuLFoqIiOyD7D6J7du3449//CPat2+PwsJCnDlzxtJlIyIiG5MV\nEklJSTh58iQOHToEAPDw8MAHH3xg0YIREZHtyQoJlUqFjz76CG3btgVQPwT2/v37Fi0YERHZnqyQ\ncHd3hyAI+tc6nc5iBSIiIvshq+M6KCgIu3fvhiiKKCwsRGpqKoYMGWLpshERkY3JupJYvnw5Tp06\nBbVajdjYWNTV1WHZsmWWLhsREdmYrCsJT09PrF692tJlISIiOyPrSmL//v24e/cuACA5ORnz58/H\n//zP/1i0YEREZHuyQuJPf/oTPD09kZeXhxMnTiA6OppXFkREjwBZIeHqWt8q9fe//x2xsbGIjIzk\nEFgiokeArJAQBAG7d+/Gvn37EBISAgDQarUWLRgREdmerJD43e9+hwMHDiA2Nhb+/v64cuUKRowY\nYXK7hIQEjBw5EpGRkfplmzZtwujRoxETE4OYmBhkZ2e3vPRERGRRgmjoYRFmkpubCw8PD8THx2PP\nnj0A6kPCw8MD8+bNa/b+1OpKcxfRbiiVXqyfg3LmugGsn6NTKr1atb3RIbB/+ctfMHfuXKxdu9bg\n+/Hx8UZ3PnToUBQVFTVZbsFcIiIiMzIaEu7u7gDMPzV4WloaMjMz8atf/QrLly+Hl1frko6IiCzD\nos1NAFBUVIQ33nhD39xUVlaGxx57DIIgYMOGDVCr1ZxRlojIThm9kkhLSzO68UsvvdTsD/T29tb/\nPWPGDLzxxhuyt3X2dkPWzzE5c90A1s/RWbRPwhx3VT98oaJWq6FUKgEABw8eRFBQUKs/g4iILMNo\nSCQlJbVq52+//TZUKhXKy8sxduxYLFy4ECqVCvn5+VAoFOjevTtWrVrVqs8gIiLLMRoSx44dM7rx\nmDFjjL7/8ccfN1k2bdo0GcUiIiJ7YDQkPv/8c8n3BEEwGRJEROTYjIbEV199Za1yEBGRHTIaEteu\nXYO/vz8uXLhg8P3AwECLFIqIiOyD0ZBYvXo1UlJSsGDBgibvCYKArKwsixWMiIhsz2hIpKSkAAAO\nHz5slcIQEZF9kfX4UgCoqalBcXEx6urq9MvY3ERE5NxkhcSWLVuwYcMGdOzYEQpF/ezibG4iInJ+\nskLiL3/5Cw4cOABfX19Ll4eIiOyIrIcOde3alQFBRPQIknUlsXDhQqxcuRJjxozRTx8OmL7jmoiI\nHJuskDhy5AiOHDmCK1euNOqTYEgQETk3WSFx8OBBHD58GG3btrV0eYiIyI7I6pPw9/eHq6vs0bJE\nROQkZJ35e/Xqhblz5yI8PBxubm765S156BARETkOWSGh1WrRs2dPnD9/3tLlISIiOyIrJFr78CEi\nInJMRvskTD2+VKPR4OLFi2YtEBER2Q+TE/zV1NRg8uTJGDRoEHx8fHD//n1cvnwZx48fx7Fjx7B8\n+XL07dvXWuUlIiIrMhoSn376KfLy8vDtt9/iD3/4A4qLi9GuXTsEBQUhPDwcaWlp8PT0tFZZiYjI\nykz2SQwcOBADBw60RlmIiMjOyLpPgoiIHk0MCSIiksSQICIiSQwJIiKSJCskbt26hXfeeUc/DUdB\nQQG++eYbixaMiIhsT1ZI/O53v8OQIUNQUVEBAAgICMDXX39t0YIREZHtyQqJkpISvPjii3BxcQEA\nuLm56Z8rQUREzkvWmf7hacIrKiogiqJFCkRERPZDVkiMHz8eiYmJqKqqwq5du/Dqq69i2rRpJrdL\nSEjAyJEjERkZqV92584dvPrqq4iIiMD8+fNRWVnZ8tITEZFFyQqJX//61xg6dCiCg4Nx7NgxzJ49\nG3PnzjW53dSpU/HFF180WpaamoqQkBB8//33GDFiBFJSUlpWciIisjjZj5uLiopCVFRUs3Y+dOhQ\nFBUVNVqWlZWFrVu3AgBiYmIwe/ZsvPPOO83aLxERWYeskLh16xa2bt2Kq1evora2Vr88OTm52R9Y\nVlYGHx8fAIBSqcTt27ebvQ8iIrIOWSHx5ptvYsCAAQgJCdGPcLIFpdLLZp9tDayf43LmugGs36NM\nVkjU1NTg3XffNcsHdu7cGaWlpfDx8YFarYa3t7fsbdVq5+3kViq9WD8H5cx1A1g/R9faAJTVcT1o\n0CD8/PPPLfqAh4fKjhs3Drt27QIApKenIywsrEX7JSIiy5N1JfHCCy/g5ZdfRteuXeHu7q5fvmPH\nDqPbvf3221CpVCgvL8fYsWOxcOFCLFiwAIsXL8bOnTvh5+fXon4NIiKyDlkhsWzZMrzxxhsYMGBA\ns/okPv74Y4PLv/zyS9n7ICIi25EVEu7u7pg/f76ly0JERHZGVp/EM888g+zsbEuXhYiI7IysK4lt\n27YhNTUVHh4ecHNzgyiKEAQBOTk5li4fERHZkKyQ2Llzp6XLQUREdkhWSHTv3t3S5SAiIjtkNCSW\nLVuGdevWYdq0aRAEocn7pobAEhGRYzMaEg0zvf72t7+1SmGIiMi+GA2Jr7/+Gh988AGGDx9urfIQ\nEZEdMToENj8/31rlICIiO8QHVRMRkSSjzU3nz59HSEhIk+W8T4KI6NFgNCR69+6N1NRUa5WFiIjs\njNGQcHNz4z0SRESPMKN9Em3atLFWOYiIyA4ZDYlt27ZZqxxERGSHOLqJiIgkMSSIiEgSQ4KIiCQx\nJIiISBJDgoiIJDEkiIhIEkOCiIgkMSSIiEgSQ4KIiCTJesY1EdGjTnWuBPtyruB6aTX8fNpjUkhv\njBjga+tiWRxDgojIBNW5EqTs/pf+daG6Sv/a2YOCIUFEVuHIv8T35VyRWP6Lw9ShpRgSRGRxjv5L\n/HpptcHlN25VWbkk1mezkBg3bhw8PT2hUCjg6uqKHTt22KooRGRhjv5L3M+nPQrVTQOhW2cPG5TG\numwWEoIg4KuvvkLHjh1tVQQishJH/yU+KaR3oyuhfy/v1ei1IzepSbFZSIiiCJ1OZ6uPJ3I69nyC\nsuYvcUt8Dw3b78v5BTduVaFbZw9MCunVaL+O3qQmxaZXEvPnz4cgCJg5cyZmzJhhq6IQOTx7P0HJ\n/SXeWpb8HkYM8DW6D0dvUpNis5D461//CqVSibKyMsybNw8BAQEYOnSorYpDZHXm/MVr7ycoOb/E\nzcGW34OjN6lJsVlIKJVKAIC3tzeee+45/PTTTyZDQqn0skbRbIb1c1zNrVv2PwsN/uLt0KEtRj/Z\no9F627P+F1dLKtHT1wuxYf/R6P0G129Jn6DM8b2bYx+Tx3hh8pjAZm0jt/4NWvo9mKN+Pbt64cqN\niibL/X29HPrfvk1CoqamBjqdDh4eHqiursaJEyfw1ltvmdxOra60QulsQ6n0Yv0cVEvq9s33BRLL\nf0b/HvWDOR5uOrlyowLrtp5GRcW9Jr+K/TpLt/m39nu31bFrTv0btOR7MFf9Iob5G2xSixjmb9N/\n+60NKJuERGlpKd566y0IgoC6ujpERkZi1KhRtigKkU3IaZpoTtOJsTZ/e+7QNqYlTUfW6vswxFpN\natZmk5Dw9/dHZmamLT6ayCKaeyI2NdpHda7E4PuA4TZuqRMUALvu0DbGVJAa+85tdaI21bntiHjH\nNVErSfUvANInYlO//A2916Cjp5vB5YZOUIlfqAyuay8d2sYYC1JTo5jsvW6OhFOF0yNLda4EiV+o\n8Os1R5D4hQqqcyUt2s/2rP81uHxfzi+S24wY4IvXo4LRQ+kJF4WAHkpPvB4VjBEDfCWbWRqUVdyX\nXVZHHnEzKaS3xPJeRpuiyLx4JUGPJHOOp79aYrhT0tSJWOoXr9SJ/UFyrwQceToJY01Hn+05Z3Ab\nRwg/R8OQoEeSOcfT9/Q1PPSxpSdiqRP7gwrVd/HrNUdM9n/YsiPXHKSC1JHDz9GwuYkeSVK/1otK\n7za7CSo27D8MLm/piViqmeVhOlHUXwFJldNYs5YjM9YURebFKwl6JEn9EhVF6JfLbYIa/WQPVFTc\nM9uImobtth+5gLLK+7K2MXYF5IwdubYexfQoYUhQqznSOPyGshaVym+7bugMNVZHS5yI5QYE0LQt\n3pGOSUs5Y/jZI4YEtZjqXEmTX7v2NA7/4RNlv56PIet0YZP1BAF4zNNd8qRcVHrX6vcaSPWZtHFR\nQFvXdPbkB9vi7X2yP3IsDAlqEVNj+c05Dr8lv4oNnSilOoMf83JHWYX0r3ZXheETsyXvNZDqM6mV\nmF7/wbZ4e5/sjxwLQ4JaxNRYfnMNRZTzq9hQiJgq34OMBQQgfWK25HBLqT6T7j6e/3+fgHRbvCPf\nG0H2hyHhpCzdJm1qLL+5hiJuP3LB8PKjFzBigK9kiAiCWT4er0cFY1/OFbMPtzR1fIwNXTXVFs/h\noWRODAknZI02aVNj+Zs7FFHqpCnVT9Dw61/qikGqicgQby/D/RE9lJ7678uc9xrIOT6tGb3j6PdG\nkH1hSDgha7RJS52IvDu4I3ZsYLM+x9hJ09R2zW27DxvSAz9fLTc6CV6DhvfMPdxS7vFp6egdDg8l\nc2JIOCFrtEmb80Rk7KQp9SsfqD+xu0g0K8lpuzf0eVLrmnO4pbWOD0OBzIEh4YRMzZ7Zmr4KS/R1\nSJ00C9V3YaproU40vFxO2/2DrHlSZZ8BORKGhBOSagrq17NTq/oqpJqFUnf/C92VHvrASDt4Htln\ni6CtE+EiAO3atkH1/Vr4dTYcKsb6NyQyoIk2LgroRNEhmlbYZ0COhCHhhKSaglrTV6E6V4LN+wzP\nvCni34Fx4qcb+NflMv17dSJwt0YLQDqUpE6azaETRXwW/2yr9mEt7DMgR8KQcFKGmk9aOr2yqRvn\nHvRgQEgx1EELGO48lsvRmmrYZ0COgiHhhKT6DVraFt6cG9PkkHr8ptT9CA28vdwBwfDNb2yqIbIM\nThXuZBp+9Reqq5pMJd3S6ZXlPASnOaRCydQU2bHPBuKjN0OdcuprInvFKwknY6zfYdX84fq/m9MW\nLuchOA2C+3ibbHKSCqUH2+qLSu/CVaFAnU4Hv/8fzvrgjWYMBSLrYEg4GVNj8FtygpXqWDZ0Y9q/\nRzddh7ZOBxdBQLu2rqi5XysrlBgARPaFIWFhDz6/QCEIqNPVD+r09nJH7LPNuzNZDkuMwW/uaJyX\nngvCS88FNVqmVHpBrTb8LGgisl8MiYcYegbBz1dvm7x5zFBnMdB4xE6d+O9R/2WV9y0yx7+lxuDz\nFz7Ro4kh8QBTzyCQGucvdZOZt5e7yc809xz/HINPRObEkHiA3KGeD5/YpbaT8/hJS8zxz1/9RGQu\nHAL7ALlDPR8+sbdmiKij3QRGRI8WhsQD/Hzay1rv4RO71HZympt4ExgR2TObhUR2djaef/55RERE\nIDU1tUX7UJ0rQeIXKvx6zREkfqGC6lxJq8pk6mauf6/X66HXhreLfTZQf+OXQgBcFP+e09Tby503\ngRGR3bNJn4ROp8N7772HL7/8El26dMH06dMRFhaGvn37Sm4zZdnuRrOIWuLpa4Y6ffv17GTwXgBT\n2z188xcRkSOySUjk5eWhV69e6N69OwBg0qRJyMrKMhoSOp3YKAgs9fS11jwNjGFARM7GJs1NJSUl\n6Natm/61r68vbt68KXv7fTm/WOXpXkREjzqbhIQoyn2UjGE3blVJdhZztBARkfnYpLmpa9euuH79\nuv51SUkJunTpInt7f18vxIb9B9ZtPd3kvRcj+kGp9DJLOa3NUcstlzPXz5nrBrB+jzJBbO3P+hao\nq6vD888/jy+//BJKpRKxsbFYv3690T4JIiKyPptcSbi4uOC//uu/8Oqrr0IURUyfPp0BQURkh2xy\nJUFERI6Bd1wTEZEkhgQREUliSBARkSS7DwlzzPFkb8aNG4eoqChER0dj+vTpAIA7d+7g1VdfRURE\nBObPn4/KSsd5iltCQgJGjhyJyMhI/TJj9Vm9ejXGjx+PKVOmID8/3xZFbhZD9du0aRNGjx6NmJgY\nxMTEIDs7W/9eSkoKxo8fjwkTJuDEiRO2KLJsxcXFmDNnDiZOnIjIyEhs2bIFgPMcv4fr99VXXwFw\nnuOn0WiMkXxQAAAKiUlEQVQQGxuL6OhoREZGYtOmTQCAwsJCzJgxAxEREYiLi0Ntba1+/aVLl2L8\n+PGYOXNmo1sRJIl2rK6uTgwPDxcLCwtFjUYjRkVFiRcuXLB1sVpt3LhxYnl5eaNla9euFVNTU0VR\nFMWUlBRx3bp1tihai/zjH/8Qz507J06ePFm/TKo+R48eFV977TVRFEXx7NmzYmxsrPUL3EyG6vfp\np5+KmzdvbrLuhQsXxClTpoharVa8du2aGB4eLup0OmsWt1lu3rwpnjt3ThRFUbx79644fvx48cKF\nC05z/KTq5yzHTxRFsbq6WhRFUaytrRVjY2PFs2fPiosXLxb3798viqIoJiYmit98840oiqKYlpYm\nvvvuu6IoiuK+ffvEJUuWmNy/XV9JPDjHU5s2bfRzPDk6URSh0+kaLcvKykJMTAwAICYmBocOHbJF\n0Vpk6NCh6NChQ6NlD9en4bhlZWUhOjoaADBo0CBUVlaitLTUugVuJkP1AwzPHJCVlYWJEyfC1dUV\nPXr0QK9evZCXl2eNYraIUqlE//79AQAeHh7o27cvSkpKnOb4GapfwxRAznD8AKBdu3YA6q8Samtr\nIQgCVCoVIiIiADQ+nzx4XCMiIpCTk2Ny/3YdEq2d48leCYKA+fPnY9q0adi+fTsA4NatW/Dx8QFQ\n/w/79u3btixiq5WVlTWqT1lZGQDg5s2b6Nq1q349X19flJS0bop3W0lLS8OUKVOwcuVKfXOMoX+z\njlK/wsJCFBQUYNCgQU3+PTrD8Wuo38CBAwE4z/HT6XSIjo5GaGgoQkND4e/vjw4dOkChqD+9d+3a\nVV+HB4+fi4sLOnTogPLycqP7t+uQMJT0zuCvf/0rdu3ahc8++wxpaWnIzc2FIAimN3QCho6pI9Z9\n1qxZOHToEDIzM+Hj44MPP/wQgOPWr6qqCosWLUJCQgI8PDwky+ws9XOm46dQKJCRkYHs7Gzk5eXh\n4sWLTdZpqMPD9RNF0WT97DokWjvHk71SKpUAAG9vb4SHhyMvLw+dO3fWX7ar1Wp4e3vbsoitJlUf\nX19fFBcX69crLi52yGPq7e2t/881Y8YMfZNE165dcePGDf16jlC/2tpaLFq0CFOmTEF4eDgA5zp+\nhurnTMevgaenJ4YNG4Yff/wRFRUV+ibtB+vw4PGrq6vD3bt30bFjR6P7teuQeOKJJ3D16lUUFRVB\no9Fg3759CAsLs3WxWqWmpgZVVfXTmVdXV+PEiRMICgrCuHHjsGvXLgBAenq6w9Xz4V8oUvUJCwtD\nRkYGAODs2bPo0KGDvlnDnj1cP7Varf/74MGDCAoKAlBf7/3790Oj0eDatWu4evWqvnnDXiUkJCAw\nMBBz587VL3Om42eofs5y/MrKyvRNZffu3UNOTg4CAwMxYsQIHDhwAEDj4zdu3Dikp6cDAA4cOICn\nn37a5GfY/bQc2dnZeP/99/VzPC1YsMDWRWqVa9eu4a233oIgCKirq0NkZCQWLFiA8vJyLFmyBDdu\n3ICfnx+Sk5MNdpbao7fffhsqlQrl5eXw8fHBwoULER4ejsWLFxusz6pVq3D8+HG0a9cOSUlJCA4O\ntnENjDNUP5VKhfz8fCgUCnTv3h2rVq3SnyxTUlKwY8cOuLq6YuXKlRg1apSNayDt9OnTePnllxEU\nFARBECAIApYuXYqBAwdK/nt0pOMnVb+9e/c6xfH7+eefsXz5cuh0Ouh0OkycOBG/+c1vcO3aNcTF\nxaGiogL9+/fHunXr0KZNG2g0Gixbtgz5+fno1KkT1q9fjx49ehj9DLsPCSIish27bm4iIiLbYkgQ\nEZEkhgQREUliSBARkSSGBBERSWJIEBGRJIYE2b3a2lokJycjIiICkZGRmDRpEtasWYO6ujqj261Y\nsQJpaWkA6qeGXrt2rcnPOnToEH766SezlNsSioqKsG3bNlsXgx4hDAmye8uXL8fFixeRkZGBPXv2\nYPfu3QgICIBGozH7Z2VlZdn1rJ+FhYX49ttvW7StqVAlMsTV1gUgMuaXX35BVlaW/g5foH72ytjY\nWAD1M2CuW7dO/3CYUaNGIT4+3uikZefPn8fvf/971NTUQKPRYMaMGZgzZw5OnDiBw4cPIycnBzt2\n7MArr7yCwsJCHDx4EIIgQKPR4NKlS/jHP/4BT0/PRvv85z//iXXr1qGqqgqCICA+Ph4jR45EXl4e\nPvjgA9TU1KBdu3ZYuXIlnnjiCZw6dQpr1qzBzp07AaDR61OnTuGDDz7AwIEDcfbsWSgUCqxfvx4B\nAQF47733UFRUhJiYGPTs2RPJycm4dOkSkpKSUF5eDq1Wizlz5mDq1KkAgMcffxzLli3D0aNHMWzY\nMCxatMjsx4icnFmeekFkIfv37xejo6Ml3//666/FefPmibW1taJWqxXnzp2rf8DK8uXLxa1bt4qi\nWP+QoDVr1oiiKIpVVVWiRqPR/z1x4kTx4sWLTbZ52LJly8QPP/ywyfLy8nIxNDRUPHv2rCiKoqjT\n6cSKigpRo9GIY8eOFXNyckRRFMUffvhBHDt2rKjVakWVSiVOmzZNv48HX6tUKjE4OFjMz88XRVEU\n//SnP4nvvPNOk/VEsf5BMzExMeKlS5dEUax/sE5ERIT+db9+/cTPP/9c8vsjMoVXEmTXRBOzxuTk\n5CAmJgYuLi4AgKlTp+LQoUN44YUXJLepqanBu+++i4KCAigUCqjVahQUFCAgIEBym40bN6Kmpga/\n/e1vm7x39uxZBAYGYtCgQQDqp2X28vLC+fPn4ebmpp9ELSQkBG5ubrh8+bLJevfp0wePP/44gPqH\n+xw9etTgeleuXMGlS5cQFxen/660Wi0uXryIPn36AID+IUFELcGQILsWHByMK1euoLKyEl5eXk3e\nFw3Mh29qfvz169dDqVRi7dq1+gdAGevf2LlzJ06ePKl//rOhMshd3lBeFxeXRk8nvH//fqP13N3d\n9X+7uLjon1FsaH/e3t76mT0fJggC2rdvb/A9IjnYcU12rVevXhg3bhwSExP1U6zX1dVhy5YtqKmp\nwciRI5Geno7a2lpotVpkZGQgNDTU6D4rKyvRrVs3CIKA8+fPIzc3V/+eh4cH7t69q3/9ww8/4LPP\nPsMf//hHuLm5Gdzfk08+iQsXLuDHH38EUN9PUlFRgYCAAGi1Wpw6dQoAcPLkSdTW1qJ3797o0aMH\nCgsLUVlZCVEUsW/fPlnfh6enp35qaKD+iqNt27bIzMzUL7t06ZL+uzJ1JUZkCq8kyO6tWbMGn376\nKaZOnQo3NzeIoojRo0fDzc0NM2fOxNWrV/XP7X3mmWf0ndpSfvOb3yA+Ph67d+9Gz549MWzYMP17\nU6ZMwYoVK3DgwAG88sor2LlzJ2pqajB//nz9VUBaWlqjX+cdO3bEpk2bkJSUhOrqari4uCA+Ph4h\nISH45JNPsHr1an3H9aeffgpXV1f4+vpi3rx5iImJgb+/P5544glcuHDB5HfRr18/9OnTB5GRkQgI\nCEBycjL++7//G++//z42b96Muro6+Pj4YOPGjQDs/6lqZP84VTgREUlicxMREUliSBARkSSGBBER\nSWJIEBGRJIYEERFJYkgQEZEkhgQREUliSBARkaT/AzLfG+oMx+5pAAAAAElFTkSuQmCC\n",
+            "text/plain": [
+              "\u003cmatplotlib.figure.Figure at 0x7fc3b259add0\u003e"
+            ]
+          },
+          "metadata": {
+            "tags": []
+          },
+          "output_type": "display_data"
+        }
+      ],
+      "source": [
+        "counts = []\n",
+        "times = []\n",
+        "for n in np.logspace(0, 7, 50):\n",
+        "\n",
+        "  with tf.Graph().as_default():\n",
+        "    tf_collatz = ag.to_graph(collatz)\n",
+        "    count = tf_collatz(tf.constant(n, dtype=tf.float32))\n",
+        "    with tf.Session() as sess:\n",
+        "      count_value = sess.run(count)\n",
+        "\n",
+        "      res = %timeit -n10 -r1 -o -q sess.run(count)\n",
+        "      counts.append(count_value)\n",
+        "      times.append(res.best)\n",
+        "      \n",
+        "plot_results(counts, times, 'AutoGraph')"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "colab_type": "text",
+        "id": "RRENYzLRF_f3"
+      },
+      "source": [
+        "# Eager"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": 6,
+      "metadata": {
+        "colab": {
+          "autoexec": {
+            "startup": false,
+            "wait_interval": 0
+          },
+          "height": 301
+        },
+        "colab_type": "code",
+        "executionInfo": {
+          "elapsed": 5003,
+          "status": "ok",
+          "timestamp": 1531757478713,
+          "user": {
+            "displayName": "",
+            "photoUrl": "",
+            "userId": ""
+          },
+          "user_tz": 240
+        },
+        "id": "dhDf8LLdF_f-",
+        "outputId": "3de0a5a5-7a11-4b41-8ab0-e4e21ce8d59b"
+      },
+      "outputs": [
+        {
+          "data": {
+            "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYkAAAEcCAYAAAAydkhNAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzt3XtYVWW+B/Dv2hshBdSQHaighhwas7Qeb6GFDjIyI3LZ\nGphdJLLMzqSlKaPQsTPm5KhZkZ7moKOnManGK17wsUfIS87QNj2jnEnIg5cQEtyAyDWBvdf5g4d9\nBPbaLGCvfeP7+QvW2mvt38tGvq71vut9BVEURRAREZmhsncBRETkuBgSREQkiSFBRESSGBJERCSJ\nIUFERJIYEkREJIkhQUREktzsXQCRvYWHh6OiogJqtRqiKEIQBMyePRtvv/22vUsjsjuGBBGA9PR0\nPPHEE3Z5b4PBALVabZf3JuoMbzcRATA38cCNGzeQmJiISZMmITQ0FMuXL0dtba1p//fffw+tVotx\n48bhjTfewNKlS5GWlmbaf+LECcTFxWHChAmYN28efvjhB9O+8PBwbNu2DTExMXj88cdhNBqVbSBR\nNzEkiCSIoohFixbhb3/7G44ePYqysjJs3rwZANDU1ITFixdjzpw5OHv2LGbNmoXjx4+bjv3++++R\nmpqKd999F2fPnsXcuXPx2muvoampyfSao0ePYtu2bTh37hxUKv5TJMfE30wiAL/97W8xceJETJgw\nARMnTsSePXswbNgwhIaGws3NDffffz8SExPx3XffAQAuXLgAg8GA559/Hmq1Gr/61a8wZswY0/n2\n7NmDZ555Bo8++igEQUBcXBzc3d1x8eJF02vmz58PPz8/uLu727y9RHKxT4IIwCeffNKhT6KyshJr\n167FuXPnUF9fD4PBgIEDBwIA9Ho9/Pz82rx+8ODBpq9/+uknHDx4ELt27QLQclXS3NyMW7dumV7j\n7++vVHOIrIYhQQTzfRKbNm2CIAg4cuQI+vfvj+zsbKxduxYAoNFoUFZW1ub1N2/exLBhwwC0BMCi\nRYvw6quvKl88kYJ4u4lIQl1dHTw9PeHl5YWysjJs377dtO+xxx6DWq1GRkYGDAYDsrOzkZeXZ9qf\nkJCAL7/80rStvr4ep06dQn19vc3bQdQTvJIgAvDaa69BpVKZnpOYMmUK3nzzTaxYsQLjx4/H8OHD\nERsbi08//RQA0KdPH2zevBmpqanYtGkTwsLCEB4ebupfeOSRR/Duu+9izZo1KCoqgoeHB8aNG4cJ\nEyYAAARBsFdTibpEUHLRocbGRjz33HNoamqCwWBAZGQkXn/9dRQXF2PZsmW4c+cORo8ejQ0bNsDN\njXlFzi0hIQHz5s2DVqu1dylEVqPo7SZ3d3fs3LkTmZmZyMzMxOnTp3Hx4kW8//77SEpKwldffQVv\nb2/s3btXyTKIFPHdd9+hvLwcBoMBBw4cwOXLl/HUU0/Zuywiq1K8T6Jv374AWq4qmpubIQgCdDod\nIiMjAQBarbbN+HIiZ3Ht2jXExsZi/Pjx+PTTT/Hxxx/D19fX3mURWZXi93iMRiNmz56NoqIiPPfc\ncwgMDET//v1NDw/5+/u3GRZI5CwSEhKQkJBg7zKIFKX4lYRKpTLdasrLy8OVK1c6vIadeEREjslm\nQ2C9vLwwYcIEXLx4EdXV1aa5akpLS/HAAw90eryC/etERCRB0dtNlZWV6NOnD7y9vfHzzz8jNzcX\nCxcuxKRJk3Ds2DHMnDkTBw4cwPTp0zs9lyAI0OtrlCzXrjQab7bPSbly2wC2z9lpNN49Ol7RkNDr\n9Vi5ciWMRiOMRiNmzpyJqVOnIigoCMuWLUNaWhpGjRqFp59+WskyiIiomxR9TsLaXD3t2T7n5Mpt\nA9g+Z9fTKwlOy0FERJIYEkREJIkhQUREkhgSREQkiSFBRESSGBJERCSJIUFERJIYEkREJIkhQURE\nkhgSREQkiSFBRESSGBJERCSJIUFERJIYEkREJIkhQUREkhgSREQkiSFBRESSGBJERCSJIUFERJIY\nEkREJIkhQUREkhgSREQkiSFBRESSGBJERCSJIUFERJIYEkREJIkhQUREkhgSREQkyU3Jk5eWliI5\nORnl5eVQq9VISEjACy+8gC1btmD37t0YNGgQAGDp0qUICwtTshQiIuoGRUNCrVZj1apVGDVqFOrq\n6jB79mxMnjwZAJCUlISkpCQl356IiHpI0ZDQaDTQaDQAAE9PT4wcORK3bt0CAIiiqORbExGRFdis\nT6K4uBgFBQUYM2YMACAjIwOxsbFITU1FTU2NrcogIqIusElI1NXVYcmSJUhJSYGnpyeeffZZZGdn\n4+DBg/D19cW6detsUQYREXWRICp836e5uRmvvvoqwsLCkJiY2GF/SUkJFi1ahMOHDytZBhERdYOi\nfRIAkJKSguDg4DYBodfrTX0Vx48fR0hIiKxz6fWue1tKo/Fm+5yUK7cNYPucnUbj3aPjFQ2J8+fP\n4/DhwwgJCUFcXBwEQcDSpUtx5MgR5OfnQ6VSYejQoVizZo2SZRARUTcpGhLjxo1Dfn5+h+18JoKI\nyDnwiWsiIpLEkCAiIkkMCSIiksSQICIiSQwJIiKSxJAgIiJJDAkiIpLEkCAiIkkMCSIiksSQICIi\nSQwJIiKSxJAgIiJJDAkiIpLEkCAiIkkMCSIiksSQICIiSQwJIiKSxJAgIiJJspcv/fnnn6HX6+Hh\n4YEHHnhAyZqIiMhBWAwJo9GIzMxM7NmzBwUFBfDy8kJjYyPc3NwQERGBF198EQ8++KCtaiUiIhuz\nGBLz5s3DY489hlWrVmH06NFQq9UAgIqKCnzzzTdYvXo1nnnmGURFRdmkWCIisi1BFEVRamdlZSV8\nfHwsnkDOa6xFr6+xyfvYg0bjzfY5KVduG8D2OTuNxrtHx1vsuDb3x7+iogIXLlyw+BoiInINskY3\nPfvss6ipqUF1dTXi4uKQmpqK9evXK10bERHZmayQqK+vh7e3N06cOIHo6GgcPnwYZ86cUbo2IiKy\nM1kh0djYCADQ6XSYPHkyVCqVqRObiIhcl6yQmDhxIiIjI3Hu3DlMnDgR1dXVUKn4HB4RkauT9TDd\nO++8g4KCAgQGBsLd3R21tbVYu3at0rUREZGdWQyJwsJC09d9+vRBaWmp6Xt3d3flqiIiIodgMSQW\nLlwIQRAgiiJu3rwJLy8vAEBtbS0GDx6Mr7/+2uLJS0tLkZycjPLycqjVasTHx2P+/Pm4c+cOli5d\nipKSEgQEBOCjjz6Ct3fPxvISEZH1WQyJ1hBYu3Ytxo0bh9/85jcAgGPHjuHSpUudnlytVmPVqlUY\nNWoU6urqMHv2bEyZMgX79+9HaGgoXnnlFWzduhXp6elYvny5FZpDRETWJKv3OS8vzxQQAPDrX/8a\n3377bafHaTQajBo1CgDg6emJkSNHoqysDDk5OdBqtQAArVaL7Ozs7tROREQKkxUSDQ0NOHfunOn7\nc+fOoaGhoUtvVFxcjIKCAowdOxYVFRXw9fUF0BIkt2/f7tK5iIjINmSPblq2bBn69u0LALh79y42\nbdok+03q6uqwZMkSpKSkwNPTE4IgdKvYns5B4ujYPuflym0D2D65Tv+jGHty/hdFZTUY5ueN+On/\ngrDHA6xybnuRFRLjx49HdnY2rl27BlEUERQUJHt0U3NzM5YsWYLY2FhEREQAAAYNGoTy8nL4+vpC\nr9fLnv/J1SfhYvuckyu3DWD75NJdKkP6oe9N31+/WY2Nu86juvpnTHrYr8fn7y5FJ/i7l8FggLu7\nO9zc3FBUVNRmeKwlKSkpCA4ORmJiomlbeHg49u/fDwA4cOAApk+f3sWyiYgcS1budYntP9q0DmuT\ndSWRkZGB999/HwMHDjTdKhIEATk5ORaPO3/+PA4fPoyQkBDExcVBEAQsXboUr7zyCt58803s27cP\nQ4YMQVpaWs9bQkRkRz+V15vdfrOizsaVWJeskNixYweOHDmCoUOHdunk48aNQ35+vtl9n376aZfO\nRUTkyIb49kOxvmMgDB7kaYdqrEfW7SaNRtPlgCAi6k2iQkdIbB9u20KsTNaVxOTJk7FhwwZERUXB\nw8PDtD04OFixwoiIHInuUhmycq/jp/J6DPHth6jQEW06pFu/zsr9ETcr6jB4kCeiQofbtdPaGmSF\nRGZmJoCWJ61byemTICJyBe1HLhXr60zftw8KZw+F9mSFRGdzNBERuTJLI5dcLRTakxUSQMuMsDqd\nDgDwxBNPYOTIkYoVRUTUXZ3dFuoOVx25JIesjuvMzEy8+OKLyM/PR35+PpKSknDo0CGlayMi6pLW\n20LF+joYRdF0W0h3qaxH5x3i28/sdmcfuSSH7CGwBw4cgEajAQDo9XosWLAAMTExihZHRNQVSt0W\nigod0aZP4v+3O/fIJTlk325qDYj2XxMROQqlbgu56sglOWSFxLBhw/Dxxx9j7ty5EAQBu3fvRmBg\noNK1ERF1iZIPtLniyCU5ZPVJ/P73v8e1a9cQExODmJgYXL16FWvWrFG6NiKiLnHVB9rsSdaVxKBB\ng/Dhhx8qXQsRUY/05ttCSpEVElu3bkVCQgIGDhwIALh9+zb27duHl19+WdHiiIi6qrfeFlKKrNtN\nWVlZpoAAgPvvvx9HjhxRrCgiInIMskJCFMUO2wwGg9WLISIixyIrJEaMGIH/+q//giiKMBqN2LFj\nB4YNG6Z0bUREZGeyQiI1NRUnTpzAmDFj8Nhjj+HUqVNYvXq10rUREZGdyeq49vPzw86dO1Ff3/Kg\nSr9+5h9RJyIi1yK7T2LPnj345JNP0K9fPxQXF+O///u/la6NiIjsTFZIrFu3Dt9++y2ys7MBAJ6e\nnnjvvfcULYyIiOxP1u0mnU6HzMxMaLVaAC1DYO/evatoYUREligxJTh1JCskPDw8IAiC6Xuj0ahY\nQUREnZG7Uhz1nKzbTSEhITh06BBEUURxcTH+/d//HePGjVO6NiIisyxNCU7WJSskVq5cibNnz0Kv\n1yM+Ph4GgwErVqxQujYiIrN680pxtibrdpOXlxfWrl2rdC1ERLIoOSU4tSXrSuLo0aOora0FAKSl\npWHBggX45z//qWhhRES6S2VYvV2Hl9efwOrtOtMypJwS3HZkhcSf/vQneHl5IS8vD2fOnEFcXByv\nLIhIUZbWq570sB9ejRmNAI0X1CoBARovvBozmp3WCpB1u8nNreVlf/vb3xAfH4/o6Gjs2LFD0cKI\nqHfrbL1qTgluG7KuJARBwKFDh5CVlYXQ0FAAQFNTk6KFEVHvxs5pxyArJN5++20cO3YM8fHxCAwM\nxPXr1zFp0qROj0tJScHkyZMRHR1t2rZlyxaEhYVBq9VCq9Xi9OnT3a+eiFzWEF/zc8Sxc9q2BNHc\nYhFWcu7cOXh6eiI5ORmHDx8G0BISnp6eSEpK6vL59Poaa5foMDQab7bPSbly2wD7ta/9A3OtrN33\n0Bs+v56w2Cfxl7/8BYmJidiwYYPZ/cnJyRZPPn78eJSUlHTYrmAuEZGD6uo0Glyv2jFYDAkPDw8A\n1p8aPCMjAwcPHsQjjzyClStXwtu7Z0lHRI6tu9NosHPa/hS93QQAJSUlWLRokel2U2VlJe6//34I\ngoAPP/wQer2eM8oSubjF75/A9ZvVHbaPGNwfm5f/0g4VkVwWryQyMjIsHvzcc891+Q19fHxMXyck\nJGDRokWyj3X1+4Zsn3Ny5bYB1mlfUan542+U1dj9Z9cbPr+esBgS1niquv2Fil6vh0ajAQAcP34c\nISEhPX4PInJsnEbDeVkMiXXr1vXo5G+99RZ0Oh2qqqowbdo0LF68GDqdDvn5+VCpVBg6dCjWrFnT\no/cgIscXFTrC7EglTqPh+CyGxKlTpywePHXqVIv7N23a1GHbnDlzZJRFRK6EI5Wcl8WQ+POf/yy5\nTxCETkOCiKgVRyo5J4sh8dlnn9mqDiIickAWQ+LGjRsIDAxEYWGh2f3BwcGKFEVERI7BYkisXbsW\n6enpWLhwYYd9giAgJydHscKIiMj+LIZEeno6AODrr7+2STFERORYZK0nAQANDQ0oLS2FwWAwbePt\nJiLH1NV5koikyAqJnTt34sMPP8SAAQOgUrXMLs7bTUSOqbvzJBGZIysk/vKXv+DYsWPw8+MvGJGj\n62xFN6KukLXokL+/PwOCyElIrehWrK/F6u066C6V2bgicmayriQWL16M1NRUTJ061TR9OND5E9dE\nJM3a/Qat5zNamNiZt56oq2SFxIkTJ3DixAlcv369TZ8EQ4Koe6zdbyC1ipsU3noiuWSFxPHjx/H1\n11/jvvvuU7oeol6hJ/0G5q5ApM4n5WZFxxlZicyRFRKBgYFwc5M9WpaIOiHVb9DZH2+pKxBB6Nr7\nc4pukkvWX/7hw4cjMTERERERcHd3N23vzqJDRNT5+gpS/RVSVwxuKhWaDMYO2328PVBZc7fDdk7R\nTXLJCommpiYMGzYMly9fVroeol7B0voKlvorpK5Amo0dAwIA4n/Z8sArp+im7pIVEj1dfIiI2rK0\nvsLq7Tqzx6Qf+h591AKMho77hvp6ISp0uGQYMBSouzpdvvSRRx6R3N/Y2IgbN25g5MiRVi+MyNVJ\nra8gdbUAAE0G88NbWwOBYUDW1ukEfw0NDZg1axbGjh0LX19f3L17F9euXcM333yDU6dOYeXKlQwJ\nIiuS6q+4Vx+1CkZR5O0jUpzFkNi8eTPy8vLw17/+Ff/xH/+B0tJS9O3bFyEhIYiIiEBGRga8vLxs\nVStRryDVX3EvoyhiW/IvbVQR9Wad9kmMGTMGY8aMsUUtRISW21B7ThSaHZXUikNYyVZkzd1ERLbV\nOipJCoewkq3wCTkiO+hs3qZJD/uhsOQOcs4Xdzh2+rgA9kGQzTAkiGxM7rxNz/0qBMFDB/AZB7Ir\nhgSRjXVl3iYOayV7k9UnUVFRgeXLl5um4SgoKMAXX3yhaGFErqq78zYR2YOskHj77bcxbtw4VFdX\nAwCCgoLw+eefK1oYkasa4tvP7HaOWCJHJCskysrKMG/ePKjVagCAu7u7aV0JIuqaqNAREts5Yokc\nj6w+ifbThFdXV0O0sPoVUW/TlVXmLM3bRORoZIXEjBkzsHr1atTV1WH//v34/PPPMWfOnE6PS0lJ\nwcmTJzFo0CAcPnwYAHDnzh0sXboUJSUlCAgIwEcffQRvb++etYLIjk7/o7jLq8yxQ5qchax7Ri+/\n/DLGjx+P0aNH49SpU3jhhReQmJjY6XGzZ8/G9u3b22zbunUrQkND8dVXX2HSpElIT0/vXuVEDmJP\nzv+a3Z6V+6ONKyGyPtlDYGNiYhATE9Olk48fPx4lJSVttuXk5GDXrl0AAK1WixdeeAHLly/v0nmJ\nHElRWY3Z7RytRK5AVkhUVFRg165dKCoqQnNzs2l7Wlpal9+wsrISvr6+AACNRoPbt293+RxE1tCV\nfgRLrx3m543rN6s7HMPRSuQKZIXEv/7rv+Lhhx9GaGioaYSTPWg0rt13wfbZjlQ/wtbD32O4f3/E\nT/8XhD0eYPG1/fvfh7DHAxA//V+wcdf5Du8xL/Ihh2pzT7hKO6S4evt6QlZINDQ04J133rHKGw4a\nNAjl5eXw9fWFXq+Hj4+P7GP1evOX9a5Ao/Fm+2zoi68KzG4XReD6zWps3HUe2w/+E/G/DJZ8QvqL\nr37AqIABCHs8ANXVP3cYrTQqYIBDtbm7HO2zs7be0L6ekBUSY8eOxQ8//ICHHnqoy2/QfqhseHg4\n9u/fj4ULF+LAgQOYPn16l89J1FOWVn9rVVlzF+mHvocgmN9/b58DRyuRq5IVEs888wyef/55+Pv7\nw8PDw7R97969Fo976623oNPpUFVVhWnTpmHx4sVYuHAh3njjDezbtw9DhgzpVr8GUU/JWf2tlZtK\nhSaDscN29jlQbyArJFasWIFFixbh4Ycf7lKfxKZNm8xu//TTT2Wfg8iaWjugS8rljzxqNnYMCIBP\nSFPvICskPDw8sGDBAqVrIeoWuaOU2k/RLddQXy9EhQ7nE9LUK8kKiaeeegqnT59GWFiY0vUQdYnc\ntRkA6Sm6AzQtIbDnZCEqqzsuGdoaCAwF6o1khcTu3buxdetWeHp6wt3dHaIoQhAE5ObmKl0fkUWW\n1mZo3d96hSF1i+lmRZ0pBFquSnjFQNRKVkjs27dP6TqIukVqlFJJeW2HKwwp93ZA84qBqC1ZITF0\n6FCl6yDqFqlRSlIjksxhBzSRNIshsWLFCmzcuBFz5syBYGaweGdDYImUcG9H9UAvd7OvkRqRJAgt\nHdG8nUQkj8WQaJ3p9Xe/+51NiiEyp30oVNb8f+dy69c+3h64U9do+sOflXvd7BXGUF8vrFkw0UaV\nEzk/iyHx+eef47333sPEifxHRfbRfvTSvQFxr3739cH7v53SZpu54a68tUTUNRZDIj8/31Z1EJm1\n50ShrNe1n5abq78RWYfs9SSIlNT+gbiHht2PH4puS145tGduigyOVCLqOYshcfnyZYSGhnbYzuck\nyJrMPRAnd16lVryNRKQMiyExYsQIbN261Va1UC8l9UCcHH3UKrwUNYpXDEQKsRgS7u7ufEaCFCdn\n2m4pDAgiZaks7ezTp4+t6qBebIhvP9mv7aNWQSW0zLf0asxoBgSRwixeSezevdtWdVAvFhU6Qtbs\nrAwFItvj6Cayu9Y//FKzsPr090D8tGAGBJEdMCTI6syt7zBrquV1djkLK5FjEsT2i1A7MFdfrNzZ\n2mcuDADzTzq3zJnkKbkgkDNzxs+uK9g+56bRWP4PWmd4JUHdIrXYj4+3h9nXi6LlBYGIyDFZHN1E\nJEXq2QY5T0i3LghERI6PIUHd0pNnG9rPs0REjou3m0iW9v0P/e5zQ21DU7fOZW6eJSJyTAwJ6pS5\n/oee4DxLRM6DIUGd6sncSq1UAjDE14tDWomcDEOCOtWd/of2K8XNmhrs0sMMiVwVQ4I6NcS3n+xb\nTHw6msi1MCSoU3LmVgrQ8FYSkStiSFCnTHMrnSg0+xwEJ94jcl12C4nw8HB4eXlBpVLBzc0Ne/fu\ntVcpJAPnViLqnewWEoIg4LPPPsOAAQPsVUKvZG6+pa78kee60US9i91CQhRFGI1Ge719ryQ13xLA\nuZSIyDy7XkksWLAAgiBg7ty5SEhIsFcpLkfqakHqeYes3B8ZEkRklt1C4ssvv4RGo0FlZSWSkpIQ\nFBSE8ePH26sclyF1tVBYckfyeQfOpUREUhxiPYktW7bA09MTSUlJ9i7F6S1+/wSu36w2u893YF+U\nVzV02D5icH9sXv5LpUsjIidklyuJhoYGGI1GeHp6or6+HmfOnMHrr7/e6XGu/MSutRY+KSqVPodU\nH1DkhEDFf7auvLCLK7cNYPucnVMuOlReXo7XX38dgiDAYDAgOjoaTz75pD1KcTmWno6+U9uIV2NG\ncwgrEclml5AIDAzEwYMH7fHWLs/S09GDB3lyCCsRdQkXHXIxkx72w/RxAWb3cYpuIuoqTsvhgp77\nVQiChw7gbSUi6jGGhIvibSUisgbebiIiIkkMCSIiksSQICIiSeyTsKGezsBKRGRrDAkbyTh+GTnn\ni03fcwZWInIGDAmF6S6VSa7oBnAGViJybAwJBbWfkdUczsBKRI6MIWEl5vobpNZvuNfgQZ5Kl0ZE\n1G0MCSuQWsNBEDo/llNlEJEj4xBYK5C6YnBTWf7xTh8XwP4IInJovJKwAqkV35ol1m/w8fZA/C+D\nGRBE5PAYElYgtYbDUF8vRIUO50R7ROS0GBKdkPMAnNQaDq2BwFAgImfFkLBAqkMaaPsAXOvXvGIg\nIlfDkGjn3isHtUS/s7kH4HjFQESuiCFxj/ZXDkaD+dfxATgi6i04BPYech5+A/gAHBH1HgyJe0gN\nZW2PD8ARUW/B2033kBrK2ketglEU2SFNRL0OQ+IeUkNZX4oaxWAgol6JIXEPDmUlImqLIdEOh7IS\nEf0/dlwTEZEkp76S4JrRRETKctqQkDtlBhERdZ/dbjedPn0av/71rxEZGYmtW7d2+XipB9+ycn/s\nWWFERGRil5AwGo149913sX37dhw5cgRZWVm4cuVKl84h9eAbp8wgIrIeu4REXl4ehg8fjqFDh6JP\nnz6IiopCTk6OxWNiVxzC6u066C6VAWh58M0cTplBRGQ9dgmJsrIyDB482PS9n58fbt26ZfEYo1E0\n9TvoLpUhKnSE2ddxygwiIuuxS0iIotij41un6n41ZjQCNF5QqwQEaLzwasxodloTEVmRXUY3+fv7\n46effjJ9X1ZWhgceeED28Tcr6qDReGPWVG/MmhqsRIl2odF427sERbly+1y5bQDb15vZJSQeffRR\nFBUVoaSkBBqNBllZWfjggw8sHnN4U6yNqiMiolZ2CQm1Wo1/+7d/w0svvQRRFPH0009j5MiR9iiF\niIgsEMSedhAQEZHL4txNREQkiSFBRESSGBJERCTJ4UOip3M8OaLw8HDExMQgLi4OTz/9NADgzp07\neOmllxAZGYkFCxagpqbGzlXKl5KSgsmTJyM6Otq0zVJ71q5dixkzZiA2Nhb5+fn2KLlLzLVvy5Yt\nCAsLg1arhVarxenTp0370tPTMWPGDPzmN7/BmTNn7FGybKWlpZg/fz5mzpyJ6Oho7Ny5E4DrfH7t\n2/fZZ58BcJ3Pr7GxEfHx8YiLi0N0dDS2bNkCACguLkZCQgIiIyOxbNkyNDc3m16/dOlSzJgxA3Pn\nzm3zKIIk0YEZDAYxIiJCLC4uFhsbG8WYmBixsLDQ3mX1WHh4uFhVVdVm24YNG8StW7eKoiiK6enp\n4saNG+1RWrd899134qVLl8RZs2aZtkm15+TJk+Irr7wiiqIoXrhwQYyPj7d9wV1krn2bN28Wd+zY\n0eG1hYWFYmxsrNjU1CTeuHFDjIiIEI1Goy3L7ZJbt26Jly5dEkVRFGtra8UZM2aIhYWFLvP5SbXP\nVT4/URTF+vp6URRFsbm5WYyPjxcvXLggvvHGG+LRo0dFURTF1atXi1988YUoiqKYkZEhvvPOO6Io\nimJWVpb45ptvdnp+h76S6M4cT85AFEUYjcY223JycqDVagEAWq0W2dnZ9iitW8aPH4/+/fu32da+\nPa2fW04mDIcbAAAJLUlEQVRODuLi4gAAY8eORU1NDcrLy21bcBeZax9gfuaAnJwczJw5E25ubggI\nCMDw4cORl5dnizK7RaPRYNSoUQAAT09PjBw5EmVlZS7z+ZlrX+sUQK7w+QFA3759AbRcJTQ3N0MQ\nBOh0OkRGRgJo+/fk3s81MjISubm5nZ7foUOiO3M8OQNBELBgwQLMmTMHe/bsAQBUVFTA19cXQMsv\n9u3bt+1ZYo9VVla2aU9lZSUA4NatW/D39ze9zs/PD2VlZXapsacyMjIQGxuL1NRU0+0Yc7+zztK+\n4uJiFBQUYOzYsR1+H13h82tt35gxYwC4zudnNBoRFxeHKVOmYMqUKQgMDET//v2hUrX8eff39ze1\n4d7PT61Wo3///qiqqrJ4focOCXNJ7wq+/PJL7N+/H9u2bUNGRgbOnTsHQRDsXZZNmPtMnbHtzz77\nLLKzs3Hw4EH4+vrij3/8IwDnbV9dXR2WLFmClJQUeHp6StbsKu1zpc9PpVIhMzMTp0+fRl5entll\nF1rb0L59oih22j6HDomezvHkqDQaDQDAx8cHERERyMvLw6BBg0yX7Xq9Hj4+PvYsscek2uPn54fS\n0lLT60pLS53yM/Xx8TH940pISDDdkvD398fNmzdNr3OG9jU3N2PJkiWIjY1FREQEANf6/My1z5U+\nv1ZeXl6YMGECLl68iOrqatMt7XvbcO/nZzAYUFtbiwEDBlg8r0OHxL1zPDU2NiIrKwvTp0+3d1k9\n0tDQgLq6loWR6uvrcebMGYSEhCA8PBz79+8HABw4cMDp2tn+fyhS7Zk+fToyMzMBABcuXED//v1N\ntzUcWfv26fV609fHjx9HSEgIgJZ2Hz16FI2Njbhx4waKiopMtzccVUpKCoKDg5GYmGja5kqfn7n2\nucrnV1lZabpV9vPPPyM3NxfBwcGYNGkSjh07BqDt5xceHo4DBw4AAI4dO4Ynnnii0/dw+Gk5Tp8+\njT/84Q+mOZ4WLlxo75J65MaNG3j99dchCAIMBgOio6OxcOFCVFVV4c0338TNmzcxZMgQpKWlme0s\ndURvvfUWdDodqqqq4Ovri8WLFyMiIgJvvPGG2fasWbMG33zzDfr27Yt169Zh9OjRdm6BZebap9Pp\nkJ+fD5VKhaFDh2LNmjWmP5bp6enYu3cv3NzckJqaiieffNLOLZB2/vx5PP/88wgJCYEgCBAEAUuX\nLsWYMWMkfx+d6fOTat+RI0dc4vP74YcfsHLlShiNRhiNRsycOROvvfYabty4gWXLlqG6uhqjRo3C\nxo0b0adPHzQ2NmLFihXIz8/HwIED8cEHHyAgIMDiezh8SBARkf049O0mIiKyL4YEERFJYkgQEZEk\nhgQREUliSBARkSSGBBERSWJIkMNrbm5GWloaIiMjER0djaioKKxfvx4Gg8HicatWrUJGRgaAlqmh\nN2zY0Ol7ZWdn43/+53+sUrcSSkpKsHv3bnuXQb0IQ4Ic3sqVK3HlyhVkZmbi8OHDOHToEIKCgtDY\n2Gj198rJyXHoWT+Li4vx17/+tVvHdhaqROa42bsAIkt+/PFH5OTkmJ7wBVpmr4yPjwfQMgPmxo0b\nTYvDPPnkk0hOTrY4adnly5fx+9//Hg0NDWhsbERCQgLmz5+PM2fO4Ouvv0Zubi727t2LF198EcXF\nxTh+/DgEQUBjYyOuXr2K7777Dl5eXm3O+Y9//AMbN25EXV0dBEFAcnIyJk+ejLy8PLz33ntoaGhA\n3759kZqaikcffRRnz57F+vXrsW/fPgBo8/3Zs2fx3nvvYcyYMbhw4QJUKhU++OADBAUF4d1330VJ\nSQm0Wi2GDRuGtLQ0XL16FevWrUNVVRWampowf/58zJ49GwDwi1/8AitWrMDJkycxYcIELFmyxOqf\nEbk4q6x6QaSQo0ePinFxcZL7P//8czEpKUlsbm4Wm5qaxMTERNMCKytXrhR37dolimLLIkHr168X\nRVEU6+rqxMbGRtPXM2fOFK9cudLhmPZWrFgh/vGPf+ywvaqqSpwyZYp44cIFURRF0Wg0itXV1WJj\nY6M4bdo0MTc3VxRFUfz73/8uTps2TWxqahJ1Op04Z84c0znu/V6n04mjR48W8/PzRVEUxT/96U/i\n8uXLO7xOFFsWmtFqteLVq1dFUWxZWCcyMtL0/UMPPST++c9/lvz5EXWGVxLk0MROZo3Jzc2FVquF\nWq0GAMyePRvZ2dl45plnJI9paGjAO++8g4KCAqhUKuj1ehQUFCAoKEjymI8++ggNDQ343e9+12Hf\nhQsXEBwcjLFjxwJomZbZ29sbly9fhru7u2kStdDQULi7u+PatWudtvvBBx/EL37xCwAti/ucPHnS\n7OuuX7+Oq1evYtmyZaafVVNTE65cuYIHH3wQAEyLBBF1B0OCHNro0aNx/fp11NTUwNvbu8N+0cx8\n+J3Nj//BBx9Ao9Fgw4YNpgWgLPVv7Nu3D99++61p/WdzNcjd3lqvWq1uszrh3bt327zOw8PD9LVa\nrTatUWzufD4+PqaZPdsTBAH9+vUzu49IDnZck0MbPnw4wsPDsXr1atMU6waDATt37kRDQwMmT56M\nAwcOoLm5GU1NTcjMzMSUKVMsnrOmpgaDBw+GIAi4fPkyzp07Z9rn6emJ2tpa0/d///vfsW3bNnzy\nySdwd3c3e77HH38chYWFuHjxIoCWfpLq6moEBQWhqakJZ8+eBQB8++23aG5uxogRIxAQEIDi4mLU\n1NRAFEVkZWXJ+nl4eXmZpoYGWq447rvvPhw8eNC07erVq6afVWdXYkSd4ZUEObz169dj8+bNmD17\nNtzd3SGKIsLCwuDu7o65c+eiqKjItG7vU089ZerUlvLaa68hOTkZhw4dwrBhwzBhwgTTvtjYWKxa\ntQrHjh3Diy++iH379qGhoQELFiwwXQVkZGS0+d/5gAEDsGXLFqxbtw719fVQq9VITk5GaGgoPv74\nY6xdu9bUcb1582a4ubnBz88PSUlJ0Gq1CAwMxKOPPorCwsJOfxYPPfQQHnzwQURHRyMoKAhpaWn4\nz//8T/zhD3/Ajh07YDAY4Ovri48++giA46+qRo6PU4UTEZEk3m4iIiJJDAkiIpLEkCAiIkkMCSIi\nksSQICIiSQwJIiKSxJAgIiJJDAkiIpL0f3zF2/hGE4QYAAAAAElFTkSuQmCC\n",
+            "text/plain": [
+              "\u003cmatplotlib.figure.Figure at 0x7fc3af690a50\u003e"
+            ]
+          },
+          "metadata": {
+            "tags": []
+          },
+          "output_type": "display_data"
+        }
+      ],
+      "source": [
+        "with context.eager_mode():\n",
+        "\n",
+        "  counts = []\n",
+        "  times = []  \n",
+        "  for n in np.logspace(0, 7, 50):\n",
+        "\n",
+        "    n_tensor = tf.constant(n, dtype=tf.float32)\n",
+        "    count = collatz(n_tensor)\n",
+        "\n",
+        "    res = %timeit -n10 -r1 -o -q collatz(n_tensor)\n",
+        "    times.append(res.best)\n",
+        "    counts.append(count)\n",
+        "      \n",
+        "plot_results(counts, times, 'Eager')\n"
+      ]
+    }
+  ],
+  "metadata": {
+    "colab": {
+      "collapsed_sections": [
+        "x5ChBlH09jk_",
+        "_cRFTcwT9mnn"
+      ],
+      "default_view": {},
+      "last_runtime": {
+        "build_target": "",
+        "kind": "local"
+      },
+      "name": "Autograph vs. Eager Collatz speed test",
+      "provenance": [
+        {
+          "file_id": "0B8bm7KvwJklpMUQtbnVpYkdJUjRtOTRyWVVfSEhpRl9HYm5n",
+          "timestamp": 1531512047714
+        }
+      ],
+      "version": "0.3.2",
+      "views": {}
+    },
+    "kernelspec": {
+      "display_name": "Python 3",
+      "language": "python",
+      "name": "python3"
+    }
+  },
+  "nbformat": 4,
+  "nbformat_minor": 0
+}
diff --git a/tensorflow/contrib/autograph/examples/notebooks/ag_vs_eager_mnist_speed_test.ipynb b/tensorflow/contrib/autograph/examples/notebooks/ag_vs_eager_mnist_speed_test.ipynb
new file mode 100644
index 0000000..952ec09
--- /dev/null
+++ b/tensorflow/contrib/autograph/examples/notebooks/ag_vs_eager_mnist_speed_test.ipynb
@@ -0,0 +1,652 @@
+{
+  "cells": [
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "colab_type": "text",
+        "id": "etTmZVFN8fYO"
+      },
+      "source": [
+        "This notebook runs a basic speed test for a short training loop of a neural network training on the MNIST dataset."
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "colab_type": "text",
+        "id": "eqOvRhOz8SWs"
+      },
+      "source": [
+        "### Imports"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": 0,
+      "metadata": {
+        "colab": {
+          "autoexec": {
+            "startup": false,
+            "wait_interval": 0
+          }
+        },
+        "colab_type": "code",
+        "id": "nHY0tntRizGb"
+      },
+      "outputs": [],
+      "source": [
+        "!pip install -U -q tf-nightly"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": 0,
+      "metadata": {
+        "colab": {
+          "autoexec": {
+            "startup": false,
+            "wait_interval": 0
+          }
+        },
+        "colab_type": "code",
+        "id": "Pa2qpEmoVOGe"
+      },
+      "outputs": [],
+      "source": [
+        "import gzip\n",
+        "import os\n",
+        "import shutil\n",
+        "import time\n",
+        "\n",
+        "import numpy as np\n",
+        "import six\n",
+        "from six.moves import urllib\n",
+        "import tensorflow as tf\n",
+        "\n",
+        "from tensorflow.contrib import autograph as ag\n",
+        "from tensorflow.contrib.eager.python import tfe\n",
+        "from tensorflow.python.eager import context\n"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "colab_type": "text",
+        "id": "PZWxEJFM9A7b"
+      },
+      "source": [
+        "### Testing boilerplate"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": 0,
+      "metadata": {
+        "colab": {
+          "autoexec": {
+            "startup": false,
+            "wait_interval": 0
+          }
+        },
+        "colab_type": "code",
+        "id": "kfZk9EFZ5TeQ"
+      },
+      "outputs": [],
+      "source": [
+        "# Test-only parameters. Test checks successful completion not correctness. \n",
+        "burn_ins = 1\n",
+        "trials = 1\n",
+        "max_steps = 2\n"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "colab_type": "text",
+        "id": "k0GKbZBJ9Gt9"
+      },
+      "source": [
+        "### Speed test configuration"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": 0,
+      "metadata": {
+        "colab": {
+          "autoexec": {
+            "startup": false,
+            "wait_interval": 0
+          }
+        },
+        "colab_type": "code",
+        "id": "gWXV8WHn43iZ"
+      },
+      "outputs": [],
+      "source": [
+        "#@test {\"skip\": true} \n",
+        "burn_ins = 3\n",
+        "trials = 10\n",
+        "max_steps = 500\n"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "colab_type": "text",
+        "id": "kZV_3pGy8033"
+      },
+      "source": [
+        "### Data source setup"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": 0,
+      "metadata": {
+        "colab": {
+          "autoexec": {
+            "startup": false,
+            "wait_interval": 0
+          }
+        },
+        "colab_type": "code",
+        "id": "YfnHJbBOBKae"
+      },
+      "outputs": [],
+      "source": [
+        "def download(directory, filename):\n",
+        "  filepath = os.path.join(directory, filename)\n",
+        "  if tf.gfile.Exists(filepath):\n",
+        "    return filepath\n",
+        "  if not tf.gfile.Exists(directory):\n",
+        "    tf.gfile.MakeDirs(directory)\n",
+        "  url = 'https://storage.googleapis.com/cvdf-datasets/mnist/' + filename + '.gz'\n",
+        "  zipped_filepath = filepath + '.gz'\n",
+        "  print('Downloading %s to %s' % (url, zipped_filepath))\n",
+        "  urllib.request.urlretrieve(url, zipped_filepath)\n",
+        "  with gzip.open(zipped_filepath, 'rb') as f_in, open(filepath, 'wb') as f_out:\n",
+        "    shutil.copyfileobj(f_in, f_out)\n",
+        "  os.remove(zipped_filepath)\n",
+        "  return filepath\n",
+        "\n",
+        "\n",
+        "def dataset(directory, images_file, labels_file):\n",
+        "  images_file = download(directory, images_file)\n",
+        "  labels_file = download(directory, labels_file)\n",
+        "\n",
+        "  def decode_image(image):\n",
+        "    # Normalize from [0, 255] to [0.0, 1.0]\n",
+        "    image = tf.decode_raw(image, tf.uint8)\n",
+        "    image = tf.cast(image, tf.float32)\n",
+        "    image = tf.reshape(image, [784])\n",
+        "    return image / 255.0\n",
+        "\n",
+        "  def decode_label(label):\n",
+        "    label = tf.decode_raw(label, tf.uint8)\n",
+        "    label = tf.reshape(label, [])\n",
+        "    return tf.to_int32(label)\n",
+        "\n",
+        "  images = tf.data.FixedLengthRecordDataset(\n",
+        "      images_file, 28 * 28, header_bytes=16).map(decode_image)\n",
+        "  labels = tf.data.FixedLengthRecordDataset(\n",
+        "      labels_file, 1, header_bytes=8).map(decode_label)\n",
+        "  return tf.data.Dataset.zip((images, labels))\n",
+        "\n",
+        "\n",
+        "def mnist_train(directory):\n",
+        "  return dataset(directory, 'train-images-idx3-ubyte',\n",
+        "                 'train-labels-idx1-ubyte')\n",
+        "\n",
+        "def mnist_test(directory):\n",
+        "  return dataset(directory, 't10k-images-idx3-ubyte', 't10k-labels-idx1-ubyte')\n",
+        "\n",
+        "def setup_mnist_data(is_training, hp, batch_size):\n",
+        "  if is_training:\n",
+        "    ds = mnist_train('/tmp/autograph_mnist_data')\n",
+        "    ds = ds.cache()\n",
+        "    ds = ds.shuffle(batch_size * 10)\n",
+        "  else:\n",
+        "    ds = mnist_test('/tmp/autograph_mnist_data')\n",
+        "    ds = ds.cache()\n",
+        "  ds = ds.repeat()\n",
+        "  ds = ds.batch(batch_size)\n",
+        "  return ds\n"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "colab_type": "text",
+        "id": "qzkZyZcS9THu"
+      },
+      "source": [
+        "### Keras model definition"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": 0,
+      "metadata": {
+        "colab": {
+          "autoexec": {
+            "startup": false,
+            "wait_interval": 0
+          }
+        },
+        "colab_type": "code",
+        "id": "x_MU13boiok2"
+      },
+      "outputs": [],
+      "source": [
+        "def mlp_model(input_shape):\n",
+        "  model = tf.keras.Sequential((\n",
+        "      tf.keras.layers.Dense(100, activation='relu', input_shape=input_shape),\n",
+        "      tf.keras.layers.Dense(100, activation='relu'),\n",
+        "      tf.keras.layers.Dense(10, activation='softmax')))\n",
+        "  model.build()\n",
+        "  return model\n"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "colab_type": "text",
+        "id": "DXt4GoTxtvn2"
+      },
+      "source": [
+        "# AutoGraph"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": 0,
+      "metadata": {
+        "colab": {
+          "autoexec": {
+            "startup": false,
+            "wait_interval": 0
+          }
+        },
+        "colab_type": "code",
+        "id": "W51sfbONiz_5"
+      },
+      "outputs": [],
+      "source": [
+        "def predict(m, x, y):\n",
+        "  y_p = m(x)\n",
+        "  losses = tf.keras.losses.categorical_crossentropy(y, y_p)\n",
+        "  l = tf.reduce_mean(losses)\n",
+        "  accuracies = tf.keras.metrics.categorical_accuracy(y, y_p)\n",
+        "  accuracy = tf.reduce_mean(accuracies)\n",
+        "  return l, accuracy\n"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": 0,
+      "metadata": {
+        "colab": {
+          "autoexec": {
+            "startup": false,
+            "wait_interval": 0
+          }
+        },
+        "colab_type": "code",
+        "id": "CsAD0ajbi9iZ"
+      },
+      "outputs": [],
+      "source": [
+        "def fit(m, x, y, opt):\n",
+        "  l, accuracy = predict(m, x, y)\n",
+        "  opt.minimize(l)\n",
+        "  return l, accuracy\n"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": 0,
+      "metadata": {
+        "colab": {
+          "autoexec": {
+            "startup": false,
+            "wait_interval": 0
+          }
+        },
+        "colab_type": "code",
+        "id": "RVw57HdTjPzi"
+      },
+      "outputs": [],
+      "source": [
+        "def get_next_batch(ds):\n",
+        "  itr = ds.make_one_shot_iterator()\n",
+        "  image, label = itr.get_next()\n",
+        "  x = tf.to_float(tf.reshape(image, (-1, 28 * 28)))\n",
+        "  y = tf.one_hot(tf.squeeze(label), 10)\n",
+        "  return x, y\n"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": 0,
+      "metadata": {
+        "colab": {
+          "autoexec": {
+            "startup": false,
+            "wait_interval": 0
+          }
+        },
+        "colab_type": "code",
+        "id": "UUI0566FjZPx"
+      },
+      "outputs": [],
+      "source": [
+        "def train(train_ds, test_ds, hp):\n",
+        "  m = mlp_model((28 * 28,))\n",
+        "  opt = tf.train.MomentumOptimizer(hp.learning_rate, 0.9)\n",
+        "\n",
+        "  train_losses = []\n",
+        "  test_losses = []\n",
+        "  train_accuracies = []\n",
+        "  test_accuracies = []\n",
+        "  ag.set_element_type(train_losses, tf.float32)\n",
+        "  ag.set_element_type(test_losses, tf.float32)\n",
+        "  ag.set_element_type(train_accuracies, tf.float32)\n",
+        "  ag.set_element_type(test_accuracies, tf.float32)\n",
+        "\n",
+        "  i = tf.constant(0)\n",
+        "  while i \u003c hp.max_steps:\n",
+        "    train_x, train_y = get_next_batch(train_ds)\n",
+        "    test_x, test_y = get_next_batch(test_ds)\n",
+        "    step_train_loss, step_train_accuracy = fit(m, train_x, train_y, opt)\n",
+        "    step_test_loss, step_test_accuracy = predict(m, test_x, test_y)\n",
+        "\n",
+        "    train_losses.append(step_train_loss)\n",
+        "    test_losses.append(step_test_loss)\n",
+        "    train_accuracies.append(step_train_accuracy)\n",
+        "    test_accuracies.append(step_test_accuracy)\n",
+        "\n",
+        "    i += 1\n",
+        "  return (ag.stack(train_losses), ag.stack(test_losses),\n",
+        "          ag.stack(train_accuracies), ag.stack(test_accuracies))\n"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": 0,
+      "metadata": {
+        "colab": {
+          "autoexec": {
+            "startup": false,
+            "wait_interval": 0
+          },
+          "height": 215
+        },
+        "colab_type": "code",
+        "executionInfo": {
+          "elapsed": 12156,
+          "status": "ok",
+          "timestamp": 1531752050611,
+          "user": {
+            "displayName": "",
+            "photoUrl": "",
+            "userId": ""
+          },
+          "user_tz": 240
+        },
+        "id": "K1m8TwOKjdNd",
+        "outputId": "bd5746f2-bf91-44aa-9eff-38eb11ced33f"
+      },
+      "outputs": [
+        {
+          "name": "stdout",
+          "output_type": "stream",
+          "text": [
+            "('Duration:', 0.6226680278778076)\n",
+            "('Duration:', 0.6082069873809814)\n",
+            "('Duration:', 0.6223258972167969)\n",
+            "('Duration:', 0.6176440715789795)\n",
+            "('Duration:', 0.6309840679168701)\n",
+            "('Duration:', 0.6180410385131836)\n",
+            "('Duration:', 0.6219630241394043)\n",
+            "('Duration:', 0.6183009147644043)\n",
+            "('Duration:', 0.6176400184631348)\n",
+            "('Duration:', 0.6476900577545166)\n",
+            "('Mean duration:', 0.62254641056060789, '+/-', 0.0099792188690656976)\n"
+          ]
+        }
+      ],
+      "source": [
+        "#@test {\"timeout\": 90}\n",
+        "with tf.Graph().as_default():\n",
+        "  hp = tf.contrib.training.HParams(\n",
+        "      learning_rate=0.05,\n",
+        "      max_steps=max_steps,\n",
+        "  )\n",
+        "  train_ds = setup_mnist_data(True, hp, 500)\n",
+        "  test_ds = setup_mnist_data(False, hp, 100)\n",
+        "  tf_train = ag.to_graph(train)\n",
+        "  losses = tf_train(train_ds, test_ds, hp)\n",
+        "\n",
+        "  with tf.Session() as sess:\n",
+        "    durations = []\n",
+        "    for t in range(burn_ins + trials):\n",
+        "      sess.run(tf.global_variables_initializer())\n",
+        "\n",
+        "      start = time.time()\n",
+        "      (train_losses, test_losses, train_accuracies,\n",
+        "       test_accuracies) = sess.run(losses)\n",
+        "\n",
+        "      if t \u003c burn_ins:\n",
+        "        continue\n",
+        "\n",
+        "      duration = time.time() - start\n",
+        "      durations.append(duration)\n",
+        "      print('Duration:', duration)\n",
+        "\n",
+        "    print('Mean duration:', np.mean(durations), '+/-', np.std(durations))\n"
+      ]
+    },
+    {
+      "cell_type": "markdown",
+      "metadata": {
+        "colab_type": "text",
+        "id": "A06kdgtZtlce"
+      },
+      "source": [
+        "# Eager"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": 0,
+      "metadata": {
+        "colab": {
+          "autoexec": {
+            "startup": false,
+            "wait_interval": 0
+          }
+        },
+        "colab_type": "code",
+        "id": "hBKOKGrWty4e"
+      },
+      "outputs": [],
+      "source": [
+        "def predict(m, x, y):\n",
+        "  y_p = m(x)\n",
+        "  losses = tf.keras.losses.categorical_crossentropy(tf.cast(y, tf.float32), y_p)\n",
+        "  l = tf.reduce_mean(losses)\n",
+        "  accuracies = tf.keras.metrics.categorical_accuracy(y, y_p)\n",
+        "  accuracy = tf.reduce_mean(accuracies)\n",
+        "  return l, accuracy\n"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": 0,
+      "metadata": {
+        "colab": {
+          "autoexec": {
+            "startup": false,
+            "wait_interval": 0
+          }
+        },
+        "colab_type": "code",
+        "id": "HCgTZ0MTt6vt"
+      },
+      "outputs": [],
+      "source": [
+        "def train(ds, hp):\n",
+        "  m = mlp_model((28 * 28,))\n",
+        "  opt = tf.train.MomentumOptimizer(hp.learning_rate, 0.9)\n",
+        "\n",
+        "  train_losses = []\n",
+        "  test_losses = []\n",
+        "  train_accuracies = []\n",
+        "  test_accuracies = []\n",
+        "\n",
+        "  i = 0\n",
+        "  train_test_itr = tfe.Iterator(ds)\n",
+        "  for (train_x, train_y), (test_x, test_y) in train_test_itr:\n",
+        "    train_x = tf.to_float(tf.reshape(train_x, (-1, 28 * 28)))\n",
+        "    train_y = tf.one_hot(tf.squeeze(train_y), 10)\n",
+        "    test_x = tf.to_float(tf.reshape(test_x, (-1, 28 * 28)))\n",
+        "    test_y = tf.one_hot(tf.squeeze(test_y), 10)\n",
+        "\n",
+        "    if i \u003e hp.max_steps:\n",
+        "      break\n",
+        "\n",
+        "    with tf.GradientTape() as tape:\n",
+        "      step_train_loss, step_train_accuracy = predict(m, train_x, train_y)\n",
+        "    grad = tape.gradient(step_train_loss, m.variables)\n",
+        "    opt.apply_gradients(zip(grad, m.variables))\n",
+        "    step_test_loss, step_test_accuracy = predict(m, test_x, test_y)\n",
+        "\n",
+        "    train_losses.append(step_train_loss)\n",
+        "    test_losses.append(step_test_loss)\n",
+        "    train_accuracies.append(step_train_accuracy)\n",
+        "    test_accuracies.append(step_test_accuracy)\n",
+        "\n",
+        "    i += 1\n",
+        "  return train_losses, test_losses, train_accuracies, test_accuracies\n"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": 0,
+      "metadata": {
+        "colab": {
+          "autoexec": {
+            "startup": false,
+            "wait_interval": 0
+          },
+          "height": 215
+        },
+        "colab_type": "code",
+        "executionInfo": {
+          "elapsed": 52499,
+          "status": "ok",
+          "timestamp": 1531752103279,
+          "user": {
+            "displayName": "",
+            "photoUrl": "",
+            "userId": ""
+          },
+          "user_tz": 240
+        },
+        "id": "plv_yrn_t8Dy",
+        "outputId": "55d5ab3d-252d-48ba-8fb4-20ec3c3e6d00"
+      },
+      "outputs": [
+        {
+          "name": "stdout",
+          "output_type": "stream",
+          "text": [
+            "('Duration:', 3.9973549842834473)\n",
+            "('Duration:', 4.018772125244141)\n",
+            "('Duration:', 3.9740989208221436)\n",
+            "('Duration:', 3.9922947883605957)\n",
+            "('Duration:', 3.9795801639556885)\n",
+            "('Duration:', 3.966722011566162)\n",
+            "('Duration:', 3.986541986465454)\n",
+            "('Duration:', 3.992305040359497)\n",
+            "('Duration:', 4.012261867523193)\n",
+            "('Duration:', 4.004716157913208)\n",
+            "('Mean duration:', 3.9924648046493529, '+/-', 0.015681688635624851)\n"
+          ]
+        }
+      ],
+      "source": [
+        "#@test {\"timeout\": 90}\n",
+        "with context.eager_mode():\n",
+        "  durations = []\n",
+        "  for t in range(burn_ins + trials):\n",
+        "    hp = tf.contrib.training.HParams(\n",
+        "        learning_rate=0.05,\n",
+        "        max_steps=max_steps,\n",
+        "    )\n",
+        "    train_ds = setup_mnist_data(True, hp, 500)\n",
+        "    test_ds = setup_mnist_data(False, hp, 100)\n",
+        "    ds = tf.data.Dataset.zip((train_ds, test_ds))\n",
+        "    start = time.time()\n",
+        "    (train_losses, test_losses, train_accuracies,\n",
+        "     test_accuracies) = train(ds, hp)\n",
+        "    \n",
+        "    train_losses[-1].numpy()\n",
+        "    test_losses[-1].numpy()\n",
+        "    train_accuracies[-1].numpy()\n",
+        "    test_accuracies[-1].numpy()\n",
+        "\n",
+        "    if t \u003c burn_ins:\n",
+        "      continue\n",
+        "\n",
+        "    duration = time.time() - start\n",
+        "    durations.append(duration)\n",
+        "    print('Duration:', duration)\n",
+        "\n",
+        "  print('Mean duration:', np.mean(durations), '+/-', np.std(durations))\n"
+      ]
+    }
+  ],
+  "metadata": {
+    "colab": {
+      "collapsed_sections": [
+        "eqOvRhOz8SWs",
+        "PZWxEJFM9A7b",
+        "kZV_3pGy8033"
+      ],
+      "default_view": {},
+      "name": "Autograph vs. Eager MNIST speed test",
+      "provenance": [
+        {
+          "file_id": "1tAQW5tHUgAc8M4-iwwJm6Xs6dV9nEqtD",
+          "timestamp": 1530297010607
+        },
+        {
+          "file_id": "18dCjshrmHiPTIe1CNsL8tnpdGkuXgpM9",
+          "timestamp": 1530289467317
+        },
+        {
+          "file_id": "1DcfimonWU11tmyivKBGVrbpAl3BIOaRG",
+          "timestamp": 1522272821237
+        },
+        {
+          "file_id": "1wCZUh73zTNs1jzzYjqoxMIdaBWCdKJ2K",
+          "timestamp": 1522238054357
+        },
+        {
+          "file_id": "1_HpC-RrmIv4lNaqeoslUeWaX8zH5IXaJ",
+          "timestamp": 1521743157199
+        },
+        {
+          "file_id": "1mjO2fQ2F9hxpAzw2mnrrUkcgfb7xSGW-",
+          "timestamp": 1520522344607
+        }
+      ],
+      "version": "0.3.2",
+      "views": {}
+    }
+  },
+  "nbformat": 4,
+  "nbformat_minor": 0
+}
diff --git a/tensorflow/contrib/autograph/examples/notebooks/autograph_vs_eager_mnist_benchmark.ipynb b/tensorflow/contrib/autograph/examples/notebooks/autograph_vs_eager_mnist_benchmark.ipynb
deleted file mode 100644
index fff6739..0000000
--- a/tensorflow/contrib/autograph/examples/notebooks/autograph_vs_eager_mnist_benchmark.ipynb
+++ /dev/null
@@ -1,666 +0,0 @@
-{
-  "cells": [
-    {
-      "cell_type": "code",
-      "execution_count": 0,
-      "metadata": {
-        "colab": {
-          "autoexec": {
-            "startup": false,
-            "wait_interval": 0
-          }
-        },
-        "colab_type": "code",
-        "id": "Pa2qpEmoVOGe"
-      },
-      "outputs": [],
-      "source": [
-        "from __future__ import absolute_import\n",
-        "from __future__ import division\n",
-        "from __future__ import print_function\n",
-        "\n",
-        "import os\n",
-        "import time\n",
-        "\n",
-        "import tensorflow as tf\n",
-        "\n",
-        "import matplotlib.pyplot as plt\n",
-        "import numpy as np\n",
-        "import six\n",
-        "\n",
-        "from tensorflow.contrib import autograph\n",
-        "from tensorflow.contrib.eager.python import tfe\n",
-        "from tensorflow.python.eager import context\n"
-      ]
-    },
-    {
-      "cell_type": "code",
-      "execution_count": 0,
-      "metadata": {
-        "colab": {
-          "autoexec": {
-            "startup": false,
-            "wait_interval": 0
-          }
-        },
-        "colab_type": "code",
-        "id": "YfnHJbBOBKae"
-      },
-      "outputs": [],
-      "source": [
-        "import gzip\n",
-        "import shutil\n",
-        "\n",
-        "from six.moves import urllib\n",
-        "\n",
-        "\n",
-        "def download(directory, filename):\n",
-        "  filepath = os.path.join(directory, filename)\n",
-        "  if tf.gfile.Exists(filepath):\n",
-        "    return filepath\n",
-        "  if not tf.gfile.Exists(directory):\n",
-        "    tf.gfile.MakeDirs(directory)\n",
-        "  url = 'https://storage.googleapis.com/cvdf-datasets/mnist/' + filename + '.gz'\n",
-        "  zipped_filepath = filepath + '.gz'\n",
-        "  print('Downloading %s to %s' % (url, zipped_filepath))\n",
-        "  urllib.request.urlretrieve(url, zipped_filepath)\n",
-        "  with gzip.open(zipped_filepath, 'rb') as f_in, open(filepath, 'wb') as f_out:\n",
-        "    shutil.copyfileobj(f_in, f_out)\n",
-        "  os.remove(zipped_filepath)\n",
-        "  return filepath\n",
-        "\n",
-        "\n",
-        "def dataset(directory, images_file, labels_file):\n",
-        "  images_file = download(directory, images_file)\n",
-        "  labels_file = download(directory, labels_file)\n",
-        "\n",
-        "  def decode_image(image):\n",
-        "    # Normalize from [0, 255] to [0.0, 1.0]\n",
-        "    image = tf.decode_raw(image, tf.uint8)\n",
-        "    image = tf.cast(image, tf.float32)\n",
-        "    image = tf.reshape(image, [784])\n",
-        "    return image / 255.0\n",
-        "\n",
-        "  def decode_label(label):\n",
-        "    label = tf.decode_raw(label, tf.uint8)\n",
-        "    label = tf.reshape(label, [])\n",
-        "    return tf.to_int32(label)\n",
-        "\n",
-        "  images = tf.data.FixedLengthRecordDataset(\n",
-        "      images_file, 28 * 28, header_bytes=16).map(decode_image)\n",
-        "  labels = tf.data.FixedLengthRecordDataset(\n",
-        "      labels_file, 1, header_bytes=8).map(decode_label)\n",
-        "  return tf.data.Dataset.zip((images, labels))\n",
-        "\n",
-        "\n",
-        "def mnist_train(directory):\n",
-        "  return dataset(directory, 'train-images-idx3-ubyte',\n",
-        "                 'train-labels-idx1-ubyte')\n",
-        "\n",
-        "def mnist_test(directory):\n",
-        "  return dataset(directory, 't10k-images-idx3-ubyte', 't10k-labels-idx1-ubyte')\n",
-        "\n",
-        "def setup_mnist_data(is_training, hp, batch_size):\n",
-        "  if is_training:\n",
-        "    ds = mnist_train('/tmp/autograph_mnist_data')\n",
-        "    ds = ds.cache()\n",
-        "    ds = ds.shuffle(batch_size * 10)\n",
-        "  else:\n",
-        "    ds = mnist_test('/tmp/autograph_mnist_data')\n",
-        "    ds = ds.cache()\n",
-        "  ds = ds.repeat()\n",
-        "  ds = ds.batch(batch_size)\n",
-        "  return ds\n"
-      ]
-    },
-    {
-      "cell_type": "code",
-      "execution_count": 0,
-      "metadata": {
-        "colab": {
-          "autoexec": {
-            "startup": false,
-            "wait_interval": 0
-          }
-        },
-        "colab_type": "code",
-        "id": "x_MU13boiok2"
-      },
-      "outputs": [],
-      "source": [
-        "def mlp_model(input_shape):\n",
-        "  model = tf.keras.Sequential((\n",
-        "      tf.keras.layers.Dense(100, activation='relu', input_shape=input_shape),\n",
-        "      tf.keras.layers.Dense(100, activation='relu'),\n",
-        "      tf.keras.layers.Dense(10, activation='softmax')))\n",
-        "  model.build()\n",
-        "  return model\n"
-      ]
-    },
-    {
-      "cell_type": "code",
-      "execution_count": 0,
-      "metadata": {
-        "colab": {
-          "autoexec": {
-            "startup": false,
-            "wait_interval": 0
-          }
-        },
-        "colab_type": "code",
-        "id": "kfZk9EFZ5TeQ"
-      },
-      "outputs": [],
-      "source": [
-        "# Test-only parameters. Test checks successful completion not correctness. \n",
-        "burn_ins = 1\n",
-        "trials = 1\n",
-        "max_steps = 2"
-      ]
-    },
-    {
-      "cell_type": "code",
-      "execution_count": 0,
-      "metadata": {
-        "colab": {
-          "autoexec": {
-            "startup": false,
-            "wait_interval": 0
-          }
-        },
-        "colab_type": "code",
-        "id": "gWXV8WHn43iZ"
-      },
-      "outputs": [],
-      "source": [
-        "#@test {\"skip\": true} \n",
-        "burn_ins = 3\n",
-        "trials = 10\n",
-        "max_steps = 500"
-      ]
-    },
-    {
-      "cell_type": "markdown",
-      "metadata": {
-        "colab_type": "text",
-        "id": "DXt4GoTxtvn2"
-      },
-      "source": [
-        "# Autograph"
-      ]
-    },
-    {
-      "cell_type": "code",
-      "execution_count": 0,
-      "metadata": {
-        "colab": {
-          "autoexec": {
-            "startup": false,
-            "wait_interval": 0
-          }
-        },
-        "colab_type": "code",
-        "id": "W51sfbONiz_5"
-      },
-      "outputs": [],
-      "source": [
-        "def predict(m, x, y):\n",
-        "  y_p = m(x)\n",
-        "  losses = tf.keras.losses.categorical_crossentropy(y, y_p)\n",
-        "  l = tf.reduce_mean(losses)\n",
-        "  accuracies = tf.keras.metrics.categorical_accuracy(y, y_p)\n",
-        "  accuracy = tf.reduce_mean(accuracies)\n",
-        "  return l, accuracy\n"
-      ]
-    },
-    {
-      "cell_type": "code",
-      "execution_count": 0,
-      "metadata": {
-        "colab": {
-          "autoexec": {
-            "startup": false,
-            "wait_interval": 0
-          }
-        },
-        "colab_type": "code",
-        "id": "CsAD0ajbi9iZ"
-      },
-      "outputs": [],
-      "source": [
-        "def fit(m, x, y, opt):\n",
-        "  l, accuracy = predict(m, x, y)\n",
-        "  opt.minimize(l)\n",
-        "  return l, accuracy\n"
-      ]
-    },
-    {
-      "cell_type": "code",
-      "execution_count": 0,
-      "metadata": {
-        "colab": {
-          "autoexec": {
-            "startup": false,
-            "wait_interval": 0
-          }
-        },
-        "colab_type": "code",
-        "id": "RVw57HdTjPzi"
-      },
-      "outputs": [],
-      "source": [
-        "def get_next_batch(ds):\n",
-        "  itr = ds.make_one_shot_iterator()\n",
-        "  image, label = itr.get_next()\n",
-        "  x = tf.to_float(tf.reshape(image, (-1, 28 * 28)))\n",
-        "  y = tf.one_hot(tf.squeeze(label), 10)\n",
-        "  return x, y\n"
-      ]
-    },
-    {
-      "cell_type": "code",
-      "execution_count": 0,
-      "metadata": {
-        "colab": {
-          "autoexec": {
-            "startup": false,
-            "wait_interval": 0
-          }
-        },
-        "colab_type": "code",
-        "id": "UUI0566FjZPx"
-      },
-      "outputs": [],
-      "source": [
-        "def train(train_ds, test_ds, hp):\n",
-        "  m = mlp_model((28 * 28,))\n",
-        "  opt = tf.train.MomentumOptimizer(hp.learning_rate, 0.9)\n",
-        "  train_losses = []\n",
-        "  train_losses = autograph.utils.set_element_type(train_losses, tf.float32)\n",
-        "  test_losses = []\n",
-        "  test_losses = autograph.utils.set_element_type(test_losses, tf.float32)\n",
-        "  train_accuracies = []\n",
-        "  train_accuracies = autograph.utils.set_element_type(train_accuracies,\n",
-        "                                                      tf.float32)\n",
-        "  test_accuracies = []\n",
-        "  test_accuracies = autograph.utils.set_element_type(test_accuracies,\n",
-        "                                                     tf.float32)\n",
-        "  i = tf.constant(0)\n",
-        "  while i \u003c hp.max_steps:\n",
-        "    train_x, train_y = get_next_batch(train_ds)\n",
-        "    test_x, test_y = get_next_batch(test_ds)\n",
-        "    step_train_loss, step_train_accuracy = fit(m, train_x, train_y, opt)\n",
-        "    step_test_loss, step_test_accuracy = predict(m, test_x, test_y)\n",
-        "\n",
-        "    train_losses.append(step_train_loss)\n",
-        "    test_losses.append(step_test_loss)\n",
-        "    train_accuracies.append(step_train_accuracy)\n",
-        "    test_accuracies.append(step_test_accuracy)\n",
-        "    i += 1\n",
-        "  return (autograph.stack(train_losses), autograph.stack(test_losses),  autograph.stack(train_accuracies),\n",
-        "          autograph.stack(test_accuracies))\n"
-      ]
-    },
-    {
-      "cell_type": "code",
-      "execution_count": 37,
-      "metadata": {
-        "colab": {
-          "autoexec": {
-            "startup": false,
-            "wait_interval": 0
-          },
-          "height": 789
-        },
-        "colab_type": "code",
-        "executionInfo": {
-          "elapsed": 11529,
-          "status": "ok",
-          "timestamp": 1531163743912,
-          "user": {
-            "displayName": "",
-            "photoUrl": "",
-            "userId": ""
-          },
-          "user_tz": 240
-        },
-        "id": "K1m8TwOKjdNd",
-        "outputId": "59db8f19-23a5-413a-e9d0-fb756b0e4757"
-      },
-      "outputs": [
-        {
-          "name": "stdout",
-          "output_type": "stream",
-          "text": [
-            "Duration: 0.592790126801\n",
-            "Duration: 0.594069957733\n",
-            "Duration: 0.591835975647\n",
-            "Duration: 0.592386007309\n",
-            "Duration: 0.595040082932\n",
-            "Duration: 0.594245910645\n",
-            "Duration: 0.624264001846\n",
-            "Duration: 0.6021900177\n",
-            "Duration: 0.592960119247\n",
-            "Duration: 0.599496841431\n",
-            "Mean duration: 0.597927904129 +/- 0.0093268291102\n"
-          ]
-        },
-        {
-          "data": {
-            "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYkAAAEcCAYAAAAydkhNAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzs3Xd8FGX+wPHPbMum90IKvQSQ3jtSbYCAqHee9TxPT0VF\njztRT+9UzvMOsdzPUxTO3gURsYsgTRBFmvROQkJ63T7z+2OS3Wx2EwIkBC7f9+vFi+zO7Mwzz84+\n33nKPKNomqYhhBBCBGFo7gQIIYQ4d0mQEEIIUScJEkIIIeokQUIIIUSdJEgIIYSokwQJIYQQdZIg\nIYQQok4SJISow6ZNm7j44oubOxknlZWVRWZmJqqqNndSxP8gCRLilI0ZM4YePXpQXFzs9/6UKVPI\nzMwkOzsbgD//+c9kZmaybds27zpHjhwhMzPT+/raa6/lgw8+8L5+4YUXGDt2LH379mX06NHMmjUL\ngMsuu4y+ffvSt29funXrRs+ePenTpw99+/ZlwYIFAWn897//zezZs8/oOPv3789nn312Sp958cUX\nmT9/Phs3bmTUqFFntP9qtfMoGEVRGmVfQtRmau4EiPNTeno6y5cv55prrgFgz549OBwOv8JKURRi\nYmJ4+umnWbhwod/7wSxZsoRly5bx6quvkp6eTkFBAStWrADgk08+8a537bXXcvnllzN9+vQzOgZN\n0xq9cF21ahX33XcfLpdLCm7xP0FqEuK0TJkyhSVLlnhfL1myhKlTpwasN3XqVHbv3s2mTZtOus3t\n27czfPhw0tPTAYiPj2fGjBlB161vNpnVq1fzwgsv8Omnn9KnTx8uv/xyQA8u8+fP51e/+hW9e/fm\n2LFjLF68mEsuuYS+ffsyfvx43n33Xe92atcGxowZw6JFi5g8eTIDBgxg1qxZOJ1O7/LS0lIOHz5M\nt27duOWWWzhx4oS3tpOXl4emaSxYsIDx48czePBg7rnnHkpLSwFwOp388Y9/ZNCgQQwYMIAZM2ZQ\nWFjI/Pnz+fHHH3n00Ufp27cvjz322Enz8cSJE9x2220MGjSIiRMn8v7773uXbd26lenTp9OvXz+G\nDx/OP/7xj3r3D1BeXs4DDzzA8OHDGTVqFE8//bQ3/48cOcK1115L//79GTJkiLfmJ/53SE1CnJZe\nvXqxdOlSDhw4QNu2bfn888956623mD9/vt96VquVW2+9laeeeoq33nrrpNt8/PHHSUpKYtCgQXTr\n1g2D4dSvY0aMGMGtt97KkSNHePLJJ/2WLVu2jJdeeol27dqhqirx8fEsWLCA9PR0Nm3axM0330zP\nnj3p2rUrEFjr+fzzz1m0aBEWi4Wrr76aJUuWcNVVVwGwZs0aBg8ejNVq5aWXXmL27NmsXLnS+9lX\nXnmFFStW8OabbxIbG8tjjz3GX//6V+bNm8eSJUsoLy9n9erVmM1mdu7cSUhICPfccw8//fQTU6ZM\n4YorrmjQ8c+aNYsuXbrw7LPPsn//fm688UYyMjIYPHgwc+fO5frrr2fy5MnYbDb27t0LUOf+AWbP\nnk1SUhLffPMNFRUV3HrrraSmpnLllVfyzDPPMHz4cF5//XWcTifbt28/5e9LnNukJiFO25QpU/jo\no49Yu3Yt7du3JykpKeh6V155JcePH2f16tX1bm/y5Mk89NBDrF27lmuvvZahQ4cG7W84E1OnTqVD\nhw4YDAZMJhOjRo3y1lz69+/PsGHD6q31XHfddSQkJBAVFcWFF17Izp07vctWrlxZbz/Ee++9x913\n301SUhJms5nbb7+dL774AlVVMZlMFBcXc/DgQRRFoVu3boSHh5/y8R0/fpzNmzdz3333YTabyczM\nZMaMGSxduhQAk8nEkSNHKCoqIjQ0lJ49e3rfD7b/goICVq9ezZw5cwgJCSEuLo7rr7+e5cuXez+X\nlZVFbm4uFouFvn37nnKaxblNahLitE2ePJnf/OY3HDt2jClTptS5nsVi4Q9/+APPPPMM8+bNq3eb\nl112GZdddhkej4evv/6ae++9l+7duzNs2LBGSXNKSorf61WrVvH8889z6NAhVFXFbrfTpUuXOj8f\nHx/v/Ts0NJS8vDxAb/5at24d999/f52fzc7O5o477vDWjjRNw2QykZ+fz5QpU8jJyWHWrFmUlZUx\nadIkZs2ahdFoPKXjy8vLIzo6mtDQUO97qamp7NixA4C5c+fyzDPPcPHFF5ORkcHtt9/O6NGjA/Y/\nefJk7rnnHrKysnC73QwfPtybZk3TaNWqFaDXMp5++mmuuOIKYmJiuOGGG864r0icWyRIiNOWmppK\nWloa3333HXPnzq133WnTpvHyyy/z1VdfNWjbRqORiRMnsmDBAvbu3dtoQaJm85HT6eSuu+7in//8\nJ2PHjsVgMHD77bfX299Rl23btpGenk5sbGzAfqq1atWKuXPn0qdPn6DbuP3227n99tvJzs7md7/7\nHe3bt2f69Omn1AGelJRESUkJlZWVhIWFAXrtorqW17p1a2+g/uKLL5g5cyYbN27EarUG7L9du3aM\nHDmSkJAQNmzYEDQd8fHxPProowD8+OOP3HjjjQwcOJCMjIwGp1mc26S5SZyRuXPn8uqrr2K1Wutd\nz2g0cscdd/DSSy/Vuc6SJUtYtWoVFRUVaJrGqlWr2L9/v7dJ5FQkJCSQlZVVb4HvcrlwuVzExsZi\nMBhYtWoVa9euPeV9gd7UNHLkSO/r+Ph4iouLKS8v97531VVX8dRTT3mHCBcWFvLNN98AsGHDBvbs\n2YOqqoSFhWEymby1iISEBI4ePVrv/quPMyUlhT59+vDUU0/hdDrZtWsXH3zwAZMnTwbg448/9nZI\nR0ZGoigKBoOhzv0nJiYybNgw5s6dS3l5OZqmcfToUX744QdA76PJzc0FICoqCoPBcFr9SOLc1aQ1\niTlz5rBy5Uri4+NZtmyZ9/3XX3+dN998E7PZzKhRo7jvvvuaMhmikdW8oqx9xVjfVe9ll13GggUL\nKCsrC7p+REQEL7zwAgcOHMDj8ZCamsojjzwS0M7dkCvriy66iI8//phBgwaRnp7O4sWLAz4XHh7O\nAw88wF133YXL5eLCCy9k7NixdW6zvv2uWrWKv/3tb97X7du359JLL2Xs2LFomsby5cu5/vrrAbjp\nppvIy8sjPj6eiy++mLFjx5Kfn8/DDz9Mbm4u4eHhXHLJJd6C/brrruNPf/oT77zzDpMnT+aBBx6o\nN23z5s3j4YcfZsSIEURHR3PXXXcxZMgQQB/59cQTT2C320lLS2P+/PlYLJZ69/+Pf/yDf/3rX1x6\n6aVUVlaSkZHBzTffDOg1qOoAkpCQwAMPPEBaWlq93404vyhN+WS6TZs2ER4ezuzZs71BYsOGDbz4\n4ossWLAAk8lEYWEhcXFxTZUEIZpcQUEBl19++Uk75oU4HzVpvbB///5ERUX5vff222/zu9/9DpNJ\nr8RIgBDnu7Kysno7rIU4n531xsNDhw6xadMmrrzySq699lq/KRuEOB+1bduWSy65pLmTIUSTOOuj\nmzweD6Wlpbz33nts3bqVu+++29t5J4QQ4txy1msSKSkpTJgwAYCePXtiMBgoKio66eeasOtECCFE\nHZq8JlG7cB83bhzr169nwIABHDx4ELfb7R1bXh9FUcjLKzvpei1BYmKk5EUVyQsfyQsfyQufxMTI\nM/p8kwaJe++9lw0bNlBcXMzo0aO58847mT59Ovfffz+TJk3CbDZ7JxgTQghx7mnSIbCNTa4MdHKV\n5CN54SN54SN54XOmNQm5NVIIIUSdJEgIIYSokwQJIYQQdZIgIYQQok4SJM4zTo+LVza/T7GjpFG3\nuyn3Z37I2dyo22xqdredV356jzJn+clXPgUbjv/ITye2Nuo2m1qlq5JXfnqPSldlo253bfYGtubt\naNRtNrUyZzmvbH4fu9veqNv97tg6dhTsbtRtng8kSJxndhXu4dM9K1ifffJnRp+K/+54i1d+ebtR\nt9nUtufv5NO937Ix56dG3e5rO99l4fY3GnWbTe3nvO18uvdbfjyxpdG2qWoqb+36kBe3vdpo2zwb\nfszdwqd7VrDlFIJbeXk5S5Z8UOdyl8fFu3s+4vktCwOWzZ59NxUVDb9QWbRoAe+8c/6cXxIkzjO2\nqqujEmdpo23To3q8f2eX57CnaD+bT2zD5razLf+XRttPY7N5qvLC0Xh5YXc7vH8fLctmT9F+tuRt\np9JVyY6CXY22n8bmPS8aMS8qatRKjpQeY0/Rfrbl/0K5s4KdhXsabT+N7XTyoqyslCVL3g+6TFVV\nSp2+4bQHS46wt2g/2/N3UuIo47d/voPw8IgzS/Q5TJ5Md55xePRCrDELgzKX7yro8Y1PBSy/reeN\nXJDQtdH211iqC/STBUxN0xr8dLfSGtt64oenA5bf0/c2Osa0O4VUnh32Jjgvam7rH5ueDVh+/4C7\nSY9MbbT9NRbvb+QULqReeOHfZGdncdNN19C//yCGDBnGf//7EvHxCezbt4e//vtJDr69FVeJg9+5\nryVhSAbx/fRj3/nUOhYufJ1Qzcp9982kR4/ebN++hcTEZJ54Yh4Wi6XO/e7du5t//esJHA4HaWlp\n3H//w0RERPD++++wdOliTCYTbdu245FHHmfz5h959tl5Veeywv/930t+j6ltKhIkzpCmaVS6bYSb\nw87K/uwN+AFUuioJNYX6P6rT40QDQoyBJ+zJCpYiR3GD0na28sLudLN8/WGUVjag/vS//90vfLbu\nOM/eNZKIUDMADo8TBbAEzYv6b8CqeUVZH1VTsbvthDVSXmiaRoXd7T2G2nwFY93pq3BVBnw3drcD\ng2LAYgzc7skK2XJXxcmSDVTnhYMwc8MKtPdW7OOHXScatG4wFW4LDvcoVmwzs/HrdQAMyEziyjEd\nfevUyovbbruTAwf38+LLr2I2mNi8+Ud27vyF119/j+jYBP67bgWtL++KMdSM6vKw98VNRHdLxBRq\nBgUq3TZCjVaOHTvKX//6d/70pwe4/e57eP29pfz2NzP88sKtur2vH3vsEWbN+hO9evVm4cIX+e9/\nF3DnnbN4881X+eCDZZhMJm9T1jvvvMG99/6ZCy7oid1urzf4NCZpbjpDa7K/Z/bqR065KeLjNQf5\neV/+Ke/P4XECUFpHYXao9Ah/XP0Inx9a4ff+X7//J/ev+Rvf/nSM1Vuy/ZadLEgYGnCafL8jh/9b\ns5TZqx9hX/HBk65/Jj5YuZ/l6w+zeV8OUHfBvadoHyvdr2BMPszOw75JJB9c+zgPrw8+HczJCsaa\nP/D6fHrwK/64+hGOlB1r0Pons2brcWY+s7rOc8ZRVasqDfJdbj9YwIsrVzB79SOsy97ot+yPqx9m\nbpDaI5w8YFYHppNZvO8T/rj6YXIqcimtcFJS3rDPna7qSSTUOiaT2JSzmdmrH+HH3J+976mayvGK\nHP616d/e97p1605KSgqb9+ax+VAWeeuPsvv5jex96UdcpQ6cBbaqHYLNpTdxpbRKJTw2FVXTyCqP\n5ONv/fuI3t71IV8c/pZKVyUVFeVUVJTTq1dvAC666FJ+/lkfPNKxYyceeeQBvvzyMwwG/TG2PXr0\n4tlnn+KDD96hrKz0rD0mVmoSp2HBxzvYe6yEf/5hqLcw/jF3C93jMxv0+bJKJx+t0QvSRX8ec0r7\ndtRoYlE1lez8SkLMRr79KYvIcDPuhJ0AfHLwCy5u53sUZ/VoqDdW/ozmDGNEr1Q278nj25+z6DOo\n/hExFQ0YMbNg2S9Y+3+PYoCteTvqbJL5YuMRsvMruOHizAY3AdWWX2KvSpcNrHUHuR9z9R+oOW0/\n+SU27/uV7uoaSBnRIf5TFgQrZGtqSF4AfHZIn/5+V+FeWkemN+gz9fl84xEA3vxyN+1aRREd7n8V\nWV3DLA4S5J56dwuWjpsxxsFnB74lM6IncVFW3KobVVPJsxVQ6bIFXOmXniRgNjQvvj26BoADJYd5\n+TW9M7m+8/7KMR39rvpP1YKtr7Ilfwfx1jj+NvTPActXZ30PwMpj6+iXrBfQDrd+8XWsPNt7IWa1\nWvn+lxxe/mQnDsMhyg8W0emW/hhMBvYt+gnV7evLq3BV6udihYc5C77n4RsGgKKA6vFr7lx3XH82\neHFVAK5rVqR//vMZfv75J9asWcUrr7zMG2+8z29+cwNDh45g/fo1/P73N/L008/TunWb086nhpKa\nxClwq26e2byAH078SEGpHYfT472aCjGGeNf75sh3vLj1Vb8TQFU13B6VnIpc/rX5GZRQ31Xatz8d\n4+/vr+TR7+dxsOQIa7cdx+HST8D/W7KNj1Yf8K5bXRiomkqFq5K/vPYdf1n9L778ZQuLt6zh88O+\nGsTS/Z+xL6uEQ8d9P3Zr7+8wZeyiuNzBc4u3sf1AIUeK6q/RbMr9mbkb5/sNNV26/zNe/+U9lu7/\njEXb3gJAMah6Xph8efHujs94euN/ASgstfPuin2s3nqcrPwKjpZl8/iGpzhRmee3v6Nl2Ty2YR7Z\n5Tne93KLKtm63z+dmuL25ond7aDAVsjcjfM5UnqM9cc3sSZ7g54uk4tNRWsB/5rAnLWPsmz/537b\nDFbI1rT++A/8bd1T/HI01/veB3s+5u3di/lw7zLe3Ok/QsZa47xYduALXv/lvaDbPVhymMc3PEWB\nzVfj+WTdIf65dAWPbZiH26TnfUGpg4cXbQz4fHXBVu6swKN6yK04weMbniK7PAdj4hGMcXp6C50F\nzPnoTUBvgqz2x9UP89lB/+e6nKyG+d2xdTy56Tm/zv63d33Ih3uX8fbuxby3Z6nf+jWbOt/asZS3\ndy8Out09RfuYu3F+wP73Fu3n8Q1PUWiv/9EC3tq2sxRVVckqP151nuWz4sh37CvRL9AOlBxi5VH9\nvDBZTagO/Td376qHvKPElq87DIDqKccYasZgMmDPq6DymH/aVmWt48Utr+Fw6efXL4cKMcYfxxBV\nwKJt7/DRvk/91jcaDISHRxAVFcX8T57jw73L+OKLT+ndW3+ee25uDn369GPUlRPILTpBXkk+WVnH\naNeuPddccz2p7TKY99WzJ63tNQapSZyCnIoT7Cnah6U92PLTKat0en8gZoOvTXfxvk8Avc020hKB\nzeHmLws3UlLhoP3wneQ78jC3dePcOQiA17/cQ0i39RgqS3hr2yfsX5OJ060yuFsyP+7O48fdeVw6\npC1mk4Gf9h2HcH0/eeXFmNP2Ywgvw9JpM4rZ6ZfeLw9/y9KNIaCohA7wvW9udYgdBwu9r/fmnIB6\nmjezKo4DekHWM7G7d9t+DON8f6tGlq8/xMSBrfkuV1+v0mnn281ZvjxadYATSV9S5D7Bkn2f8vue\n13uXvbztNfLthXx5+Ftu6P4rAF77fDe7jhTx9J3Dvc0ImsFX4Jc6S1m6/3Oyyo/z6s53yanwFeIA\nOZaf0bRf+RWMAJ8fXsGkDhfhcquYjArlzvrb2bPK9bx4atkqXv7DlWiaxrfH1vitM639ZO/fRsXo\n21dV7eKarlfgdmuYTAYMVVeYL29/g2JHCV8c/oZfZ16h59F3B7D2/Rqlwo0lej/kdtKPtcL/GMBX\nw9TQKHOV89buD8muyOHdPUuwtPNv/jOm70ZVNW9hWu2Tg18wvvWFAJiMBspO0udwtFxvtnzmkzUc\nO2TmsVsGegNztRmdfHlRZvPtb22uXjhf3XlqQI3y+S2LcKluvj26hovbTCTErOfh05tfBGD98U1c\n2m48ALuPFHGi2MaInr4O9OoLKZfqZua/vyWmzyaKXAUs/Gkxx5z7/Pb1/t6lDE8dginUQljraHb/\n3wYiO8VT1qmQeHcYefl6HkR2TKR4j8bu5zcSkhBGeEaNxzIr+nmhOt1g0M/N3ccKMISXgtnDT/l6\nE1JbbaD3I9UjCufMeYTbH7oV1a1iNbbnuXlP4Ha7+dvfHqKiooKs8mzihqSyrWwnP727nq9WriUq\nwoojvpLWrbuy5siPXNppNJt2nWDBsh08evMgkmMbt09QgsQpcKo1flRmOw98thBTsn5SHMwug05Q\nXKO99VD+CY45NnAiK4SC0qorPbu+XDHbMbfZQZG9n/66qmbhcIIhooifStYRltMdU9pe3Fkd2X20\niHatoqh02akudnLKCqGqoNRUI8Eab4xJhzHGBnYCHsotxZSxG09eGnllpZjiT378Jc5SPKqHd/d8\nFLDM0vlH79/rtmdxfKf/8v15eazKWYkhIhq1PI6f9+UTYq3EEAZF9iLe3r2YSe0mEm4OI9+uB7Cs\nvEp2F+7jYMkR9pcWYmxVRG5RX28h6cHlS5ujzFerMwSPeB/t/oYDFYFDNw8cL+KJFW9zUYcRVIQ2\nrAlFMTsoLKvk2R9eC1h2z/JnMMbofwdrt88qLuDR5R8S7kjjst59GNsv3VvDyass4I1fPmRC+jhA\nQzFVfb9oGKLzUKwVaPZw5q/8kLEZo+nZQf/i7DX2U+Io9QaNEEMIwXy460t2lwQOb571/CqsbfZx\n54gplDkaNvZ/b24uqjOR//z8SsCy/+54y/v3m9/8ArT2W17hquTrI6vol9yLjMg0QC/cAbZnH+az\nH/7Db/tNZc+RUm+7h8vjYmveDoocJbz31TGchnKslssYkJkUkBc2tQJPeQWGEDiUXYkpITD9d727\nEEvCCdpc0d3v/ZFdb+azQ1/iyu6AMcRD+2t7BT3+rvcM1f8IM9P+sutRy9zsMn1B0jD/Y/3Pj29h\nSoKUC9sxrKsejDt27ESnW/rrad00jle2f8rvYy7i+edfBuD2FbMBOFh0lMjR7chQ+4ECof2/AuBY\nYREbsn7m5bVb0KLMvPnTl8wae3nQdJ4uCRKnoGYbrDltH6YkX6dkYUUFJ4pt/PmF9YRWXTA89+UK\nzK33YHEkAnowsLtcYAKD1YbBepR3dn0EhlQUo95UU+YuIaTbIQ6ocOLQLsxpZaglCRzMLtVHthh9\n7aB5FUUo1a89vivWmixtdwZ9/5eyzZhbHcQYdxzNHt6g4/9h3xGiLJGsrXW1CGCM8jUBFJTrV18H\nskuh6nlSH21ZiydxN0nx6VT8kkxZpQsUPcAeLc/maHk2HtXD5R18z4o+WnKCZ39eAICSEYrZamNP\n3lGKyvRCwK25vO2lr3+zBWua/v3UbO6q6evsL4O+P3/VYsytDvFVXgGt4qKCrlObYnHw+sZvyVMO\nBCwzxviaz9b+coztG7fSKsF3dffNgQ2YUw9gK6zgza/CGd6jlXfZnuL9wH427shHsfj6MjymckK6\n6PtS7WHsUyvZvszC87dfQojZSIXTd3dxsaOU/LJyUPxruDWtzAn+yGBH/C94Ig7z+MpFGMwulAZc\nlCoWB8bEoxy1B+ZFzZv7vOcqqve99cd/4KsjKylxljI67lKS43z9IsfdBzElw8vfL8dTmIK1p/7+\nCVs+Xx1ZqW+pVSgWq421u7oxIDOJVT9nkVdS5i3ZFIsDxVhV4/TUUdyl7CWwbgafHfsUU0o2isUB\nJleQNQIpZgemlEN+v4dqNcuL6lpczYsIU9IR8i17WHUskl9lTvf77M8F+gwAxqSuqGUx3vcLHYW8\ntns9lnagOqzsx065czwRlob9phvC+MgjjzzSaFtrYpWVwb7Ks+dAyWG25usdb4Zw/zZJm1bGVxty\nUEwuTAl6NVwJsaGYXHg0D1HGeLSoXNyR2WDw/UgqXXYcDg1jjN7e7tZcKFVVVqemH6/mslBuc7P8\nl42Ya5xoRwrz8eBGsThQ7WEYQho+DYGt3IwSVqIX1KoBxXLyvM0vUNlS9JNfM08wSkglmtuMS7Hh\njDwKQKm7BMXkIizESNu4ZE549BpO9bGCHkC//fkITqte81EMqrdgqb6iLiyAwjIbhpgTmBJ9zVfl\n7grsaiWqwYmzPAynoeFj5F12M4awcjTViN3lRDOePC80l4Vi6x4weOpdr9RVQk6ek30ncjDF630s\n+bZCPIoTNAXNGcIvBbsoVI56gyaAR1XB6PYWNqqmevNAqSqwNEcoTs3OcechNuf7RuqU2MvJrSxA\nMahY3LGUeAoanBeax4jBagO3BUxO7z7rz4sQTMlH/L7LYBSLHTx6Xlf3kezNO45mcIJqYOmqY3y+\ncxPG6Nrp1VDMToyR+lBsg6J4h996a1r2cBIS4J0f1uKO9J0XitGNIVyvpau2CO/fDaPvV3OEYbBW\nevvc6mM1hKElHORkYzKK7MWEmUM5VHrUOzKyurywGq2YFCM7Cnazq2hvwGcNFjuGCH0gSnGpSw9i\nVOWFAqnhKWSX5fHV7p8IDzWSEZ9yCsccSB46dAq+PrKKJfuWn/X9qg4ritlR74/Q6orHbm54YeAp\niccYXYBqCwODekoBpjmp9lCUEDuKUndeeMpivAVKQ1Sv7ymLwWCtQDE37KrxbNI0Agoe1R6GElJZ\nb4HkKY0NelVbF9UWjiG0Ak9REoaofG8N95yiKX4BFcDgCkc119+Hcqp5obnMKGYX7rw0jAlZJy34\nm4OmKgHlQrgSQ4Wmn/9RxlhevmLuGe1DRjedgoYO+atLvJKBY08f7FtH0J0JeAqT/ZarDmvQzxlC\n7CgGDdVWdxXSHO7f9q1WBD6NynW8LVHGOAAUa9WxeMz1Bgj7z6Ow/zwKrUZzln3bUFxHO3tfR+YN\nDPbRevVI6EYXzzguib0ex56+eIoS/ZZrzuBNRgarDUXRSAlLqnPbhhD/7ynCEBOwjiu7Pao9tGqb\nVeurxnoDhG3zaD0vavwm7VuH48rq4H2tHAvebl0fd0GKngc7RhJybBCeEv8OIs0ZErSAMlj1AFHf\neWGw+heccdbA58m7jnX05rdiqToXFK3eAGH/eRT2LSP939s6HFe2b+iz89Cp36Xvzk/Fsacv9q3D\n6eIZj6fUP72aMyQgQADeAFH9nQYTHu3/G9GcgX1XriNd0NxmvfCtOhcUs7PeAGH/eSSGPaP93vvL\n4D8yMm2I97Vy/DTy4kS6Ny8ce/qilvs3haoOa9ALx+oA4cpqT6ZnwinvtzYJEqegooF3mNZU86Tt\nndaRSHcGwzp15IL4bqiV/gW5pyBwigPV7msUbu0aVOd+as+EqlZEB6wzoFM6mQn6j9gQot8rUF1t\nrYvmDNWX+CEEAAAgAElEQVT/ufUflKcwGc0WhafYV6ifONywdvwQzXe87aPbMHP8BC7t050h6T1R\nbf5z37jz0wI+H2uJ8/49o/OUOvdTu+ksxRqYr5rL4s2j6lFhgc0ctbisaM5QcJur0tgKzR7BbWP1\nTkjNY6TyRFx9W/AyeXzHO65rT4a36Y2zIozi7FjC8Q9qwfIiMdQXSK7ObHheVHcO16S5Qogx6kG3\nunmvZr9KMJozFM0Rhqbqpaf7RDqaPQK1TC/UFbcVQ3lyfZvwqvkbUUvjUIuT0OwRzBw/HrPH/zcS\nLC9q/kZcR+oujO2a/8WDWhl43mquENTKSL/C9+R5EUZFse+i5sKM4SSHJdI+uq2+H1s4tvyGnRf+\neRFPa2sHnrxxAjFqBqrD/2LAk19/eeHOaUduzplXf5o0SMyZM4ehQ4cyadKkgGULFy4kMzOT4uKG\nNws0txK7HiSc+3riPHBBwHJ3rm80g0kxMrHNGK7v6RtpkBgew5O3DuWGSzIZkJmEBf9eQa0yArVC\nP3E1txlrcRfu7nsLIYVdcB7O5OpBQ7ilx3WMib7K70q+pgszhhObeyFakE66CIsVax2dusE4dumj\nLtISw/l1l2l0CumD65g+DFOzReI82B37lhGgBu7LneO7ycegmXBldaBnuO/KKtri+4EO79kKzeWf\nLrU8mhBVL8Q1p4XBCcO4p9/vmdhmDFd1nkpmXCda20Zh3zGE0OIuQdPvym6HfcdgIq1Bri5VI3ER\nDe/cc+z0jSF2HuhBO1MvrulxGXfP6EnvVp0w5/TAsW243pZfy7jWo3wvPGZcWR3I0Hp73+qYlEy/\nLr6g2ybBv1allsZ5a5kxIdFc1HYsd/a+hQltLuSazCsY1aE3N3b7FTd1vhV3bkbQ9F/Udix/GjAz\n6LQsqEZaxTb8OciOX3wXK7+94BouTB+BO1uvTd13yQRMORdwRer1PH3b2IDPTmzju4lOc1kYlz6W\nXtG+mmiUJZKkmFCuv0j/Tnu39Q8KanECmks/BtVhxZXVAeeuAbiy2+Hc34P/u2kGkzMux75tGO78\nVgRzabvx3D/gbvp2DGyr1zzGOgeBBGPfMbjqLwXnvl70iBjo/b77JffCmNMdx85BaM7AVoKaNVDV\nYcV1tBOeE74y5OaL+vDAdf2Ij7bStXVsQO26Z2J3NLf+21PtYSQ5ejHAPEnPi329MCsh5BbZOFNN\nGiSmTZvGwoWBU+vm5OSwbt06UlPPvcnB6lNaNSTQU5jCr/sF/gA8RUmYNP1kGNyqP5M7XESneF9h\nGW2Jwlw1Nj40xMTYnv53lY7r0554RV/ffbwdtw28gi4pqTw44RpmjppCu1bR9Eq8gFsvGs2Q1L4B\n+7cYLUzpcAndEzuCGniiR4eFBi8k6qCWJtA5PZpHfzuIEe17cfewX9Exwfej9eRloDmCF7Ttw3wF\n99jWI7ip7xTGd/cNMYwO8QWJLq1juWxArbvV3RZaW/Uf0YiUkVzbcwrxobFM7nARI9P1YHPjsDG0\nj23NNf0D7941ahbcxzqhVcQQExaYxoGdU+nRNjHg/eAU1DLflbtakkTfiFGM7NqJnh0SUBQFa2lH\nvZYRZCBy/2RfQBiRPJJ0T18m9KiZF5H0aB/PA9f2487pPRjQwX/opOa2oBbrV/qXthvPpPYTiQ+N\nZUqHixmaqhew/VP6MDyzG+68wLu7oy1RXNZuAq0j0/1u7vPyGIkNb9jYejNW1HJfE1C/lJ5c0XlS\n1bFDZps4nvn1dYy+oAOh5sB9DWrVz/t3iqMPUztPJMnqazq8uF9nnrh1CKN66+dZp2T/gtyshGK2\n6e+5j3bGndUJzRmK+1gXopztCbEYubDtIDRbJJ4TgQEzMTSei9uOIz0ylVCTr+D22N3kbzwGqpGo\nBk6al2hNJC1Mz++iA2tw5SUwIX0CMSH6xY1BMeDJbcvR1Yvo0yrwvHDXSJ/7aGfcxzv41ajbJSRi\nrJp646qxnRjS2f/u6mtG98BToo/p7RcxkocvvoabxvfjotYTGNiqNw9e15/fXdatQcdSnyYNEv37\n9ycqKrBKN3fuXGbPnt2Uu24Sdre9KnIb6N0xAeeBC3Adb+tdrrlCvEMOnarenhlr9TUd1CwYAdKi\n/augQ7q05t5xk8iwdqBP0gW0T9XXj4uy0qO9fzt1sKvjtlGtMRtMXD6iHWN6tw1YnhQdGXQiN4BB\nyb4r5R4J3fDs0a8WQ0P8awmpCXqB2yY5kumj2gNgtRgZETfB7yqxf4cMbz+GBzeDu6eQFOYbpB5l\n8b9ybZ/oX2DfNLEXv+47ngviM5nQZQDBJMWE8vQ9o2mbFNjOnmpNp/r0DgtSWA3skkqYOXgfUBK+\nK7wL4rsywBTYnFM7Xyod+iibET1bcUWnyd6bvQCiatSaYqPM/OWGAXRKTA1Y3iEtmj6dEompdZ7E\nhUXgzm1N97jMeqd+iQg1c8flvQPe7xjTznvDWkiQIHHLZb2wBAsewAU19ucpTmRm35v58zV9uWRw\nG349rpN32dSR7bk6yFQal3e4xG9Yc3SN731YD72wbxXh++7jQ/2bSaNqTZvisBm5od/FhDvTCPf4\n1xTiIvVjsFTdfKcFuVDqUDMvatSqPTYXBRuzQDXSvU3w/q4orSpgadA9PpPf9fwNf/vtQB67eRCu\nnO/p3ymW1sn+zaa3XX4BUWEWRvdJ49J245na4TLvsh6tfenXtKo01xiOXvO8iQg1k5nu33wXbg7D\nfbwdnqIkojTftqaN7MDvJnUnIymCzDaBv41Tddbvk1ixYgWtWrWiS5fgTQTnMpvHjuYxcdnQNsRG\nhnD7qEvILark43J9UrBhXdpy1LoHm60Mp0cPEgbFF4drF4yt4xOgxs2wVpOVWGsMfx76+5OmRfUE\nxvekqnbq0BATaXHRUKsp1WwwB5351J2bQaeu7diQq88rc2vPG3js500cKC7lRLF/dTU1Xj+JI8LM\nXDSoNf0zk/zu8PyialqQoZltWZZnxoXHe5ezuUaAql0QJkf6t8P3aJNCpCWC23rdVHcmVAl2TB0T\n0zhgVOjRPh6rMbBJM8RkCRowx2SMQHWGcCJ3PwC39bqRNVuP8x07SY4LI7ewsupY/PPfVhUkwqwm\nLswYDsDyg/oNT5E1xqxXXzyEmnxBPrrWeRFaK3g98KuhoBmJiTh5U2HbpFjwv6mYpDBfIRwsSMRH\nhpNlD8yLi9qOxaN62F6wC0014NzTj/bTWkMMdM7w/74mDW0bND3j24wG4KP9+rQU1hpX7waT3u6f\nFOEryOLC/M8Lk1KroHeb6ZPRgT4Zd1FYaueTdYcoLHOwdX8B7VNr9cMFCRLJfnnhO2+Of70fZ5GN\nrG9e5aeSNjAklBNrjlC8IxfNozFw6BC6TEjj+2NZZL2/i0r1GBvUL7n++pspLMynoqyIH5bPZ+/a\nGJ555j/e7V7QLp7UhHAsJiOXtBvPV199zu4X9PuMLhiXAfGgqRrH132GpaKcUKuJskojiUMyWLZk\nid904Rf/fprfsViMFq4Y2Jf3V0YzaNiZzw9Wl7MaJOx2Oy+88AKLFi3yvncejcDFoerjvKunCejd\nSb8yzv2lH9sLdnLjhT35pdDK81sW+rVDD08dxPaCXQFBIikyBqshDLtaidUYQmxIYGdzXRKjwqFW\n2RcT4vvhBmtW0DQ1aHNT/05paJr/SJZRvVI5kF1KXJR/gZWaqBd4kaFmjAZDwBQAvRN7cLj0KKHm\nEG7ocSUvbXuNETVGeQxI7sPBksN+hSRArDWWMFMolW4bEebwU5pu3BLkhrGksASeu2sgRqPiDX41\nGRVj0OASbg7HYFGgxqweQy9IoazSyYCuScz+z3oAbE7/+wc6pkWz+2gxrZN833G3uC4U2IswKAau\n73Y1r/7yDgNTfM2EvRIvIK8y3y94AqSEJWMxWnB6nMSERBMdFtrgyRCDHVPNGlywPqkQoyXoeRFu\nDvNODKmgMLBr3SPKTqZDdDtcVQHy6i7TeGf3Ynom6E1ukeEWPMUJ+n00VjOL933C5hPbAPzOS01V\nCOm1iofW1Zi7KgrUCI3YeA87Qow8tE7Pp5ThGk63G1utAVo1A2bN30ir8R2wn6jgv68uZEPuJj78\n5kMchZV0/v0ANE3jxNLjxOxPpPRoAdaYMP47T7+TvLKygrCwcN59922ee+7FoC0n1fLz83nhhX8z\n8s5LiYyIZNfrW+macgE/VmynbXQoz73yKgZF4R9rn8YaFsabz/pPF+4xaygoaGjeAQgXDWrNhX3T\nsFqarig/q0HiyJEjZGVlMWXKFDRNIzc3l+nTp/P+++8TH3/yeSESExveudbYVE3FqTrQPGHExYb5\npeXeUTd7/05K6s/ozP5+n52ZeEOd231txrzTSs8V4zP5oNaDtNITEr3piqoMbI4Kj7R4O4Nr6tI6\nkXCr7weTmBjJ1LGdiYsNo0fHBOKjfdsaGGml87pDDOuTHvT7mDPmD96/xycOYXy3IX7L/zj6ljqP\n6ZXpwaesPplWyYFV6tZJyaSn6UEzrDwwiERFhxBvCPxBJ8fGUOH01Z6qj/G6SfpAhTk3DODtL3cz\nYUg7IsJ8BetDNw/mx125XNgvw1ugPzL+bu/ySxNHcWmPGh3YwANjbq/jiCJ544pn6lhWv7TkwFE0\nHVulk5igH4fxROBFWWxsGDFB+pZaxcXjKKq6i91i4qFfDQlYp6H+fpGveXla4nim9fY1x4VHWnHu\n0X8z6VfHEFZhwWioDopGEsPi0DQoKncQGm6qsaxqDYOC2WQIeM9kMmOrNWo9M60NidF6XriPBd40\nmZYcR3hZCGX7CynfX8ie/2xE0yDaGEFFUQmhyRGc+Oogr722gFGjRtG/v55ugwHi48OJiQn8TZjN\nRmJjw8jOPsDQoUN4YsajAHxQ8QH79+/nrdue54rPruClBc8yatQo/jnlQRRF4Xfv7+Lvf3+YcePG\nMW7cOMLCwnj3qucbkNuNq8mDRM2aQufOnVm7dq339ZgxY1iyZAnR0Q27gm7Om+mqH4mIx4Tb4W7W\ntCQmRlJQEDivTqIh2Zsui0sv2N0FKWSkWjjuOEKoO5KcysB5nBw2D1FVtZBu8V282+jeOgbVGXis\nf/61fjXc3Dc3gp4X+fmBeRGlxnrTF1o1jHJoqwHsKThKvjMHg91KaVngyA9bhYd4q3612S+pV8Ax\ndkyJ5KHr+mOrcGCr8B9336NNbNC0nC2JiZEUFQQek8UZ7j2OKEUPqGMyRrA9fycnbPm4yg0Ulgam\n21WpkWLR+076J/Vusu+7ZhlRUWbjorQJXJR2ZuP7ExMjOZ5bxF0r5/i9b7BZyat6/kiCUf+eL2k7\njq926M2kFSUuCstKQdNIG92RqD567emuPr+nwFbIG7ve56a/3Ul8bgT/+Mc/GThwMDfccDOqqlFQ\nUI7LFdjE5XJ5KCqqpKSkEpvN6c3HsjI7lZVOHA6FhQvfZMOG9fz3v6+yZMnH3H//X3j88Xne6cKf\ne+7fvPHG+6f1DIkzvbhu0iBx7733smHDBoqLixk9ejR33nkn06f75iRRFOW8aW6yVwUJzW0ixNLw\nIXJnw+UdLqFnQjeSw33NAe2i22DfPhStMoJx3buSlqGQHpnK4bKjAZ83Goy0i27DnwfcRXI9N6md\nD67oNJnu8V38bhrLjOvEnwbMJD0iFafHRYG9kMSweOwnAm8iNBmMdI3rzJ/6z6RVxJlNZ9AcajZL\n/brLdDrHdiTC7Ksl9Erozp8GzCQjIo1L2o2jyF5CdEik9/w2G8zeZiGTwUSPuM7M7n8naRHBh5M2\ndppNxsYbS2My+Iq367peRYeYdn79UANS+tAqIpmMiDT6RffkD69sw2qyYnc7iOwYT/7KI4R3j8do\nMWIrrqBLdHt+1/4auqR0JrRXKKGhVj77TJ+BISwsnIqKCqKi6r7g7dbtAp599ilKS0sID4/g66+/\n4IorrqakpBiz2cyoUReSmprG3//+V8A3XXiPHr34+usvsNkqm+VZ2k0aJObNq78p5Ztvgk8ydi6q\nflANHrN39MS5IsoS6RcgqoW4Y7HjIToslIxIvRmiui8gMTQeDci3FXgL1GA3Wp1vokOi/Nqdq1U/\n+MdqCvEWeNWFZ5vIDPJtBVS4K70d6q2jmq4j8GyJsUaTGObfjKsoijcvQk2hhEbo50N1f1mH6Lbs\nLtqHhkakJRJFUWgTFfzei8aUnhiuT/rYROKsMSSE+jfFGRSDNy9S4lPo3asv119/NfFdWhE5OI5E\nezRbXtoEwHPxOfztkb/jPGHj1odvwlDVnHXfffcDMHny5dx330wSEhL9Oq7BFwTj4xP4/e9v5847\n9YEpQ4YMZ/jwkezbt5e5c/+KpqkoisKtt97pN104aFx11TXNEiBAZoFtsOrmJs1j8nZcnyvqmvX0\n4RsHsHV/gd8wuD6JPbi6y1R6JlyAqnnYUbCLXgndg37+fBT0PoA6DEkdgEtz0z+pNw6Pg91F++kc\ne/pPRDvXBBvJVJcxGSMwGUwMbtWPMmc5B0uOkBF59u5j+ttv655NoDHU9Rup6S9/0fsKXKqb1Vnr\nGTZqEEW/LeJYWTb9U/oAkJqaxsCBgwM+O336VUyfflXQ7T777Avev8eNm8i4cRP9lnfs2IlFi94I\n+Fz1dOHNTYJEA9mqaxJuE9ZzrLnJbAj+NSbHhjG+v/8oIUVR/EYbDU8LPOHPZ6dyR7lBMTA6fRgA\nEYQzNLRhUyecL04lYBoNRu/Q3VBTaNDa2PnsVPLCbDAxJmMEACnhyaSEN2x6kf9VMndTA9WsSZxr\nzU1K0McNtUwmRa57qp1KTeJ/neTF6ZMg0UB27+gmMyHmcyvbGjqGviWQvPAxGyVgVpMgcfrOrdLu\nHFZUdVOR5gpp0htXTsWENvrso9Wdby3ZqKpmo8TQIM+nbGEGt9LH7tcc1dRS9U3qiclgqnM6GnFy\n8tChBlqw9VW25O/A9tOFvHj3BMym5mtySkyM9OaFqql+U3+0NJIXPpIXPpIXPmd6n0TLzblTdLQ0\nB8VjRvFYGnUs95lqySd/bZIXPpIXPpIXZ0ZyrwFKKuwU2AtxV4bTNiVa2r2FEC2GBIkGyC4tQDFo\naI5QxvQ9/284E0KIhpIg0QA2pz4RWKg5hCEXnH9TNQghxOmSINEAdrc+XUByTDgGaWoSQrQgEiQa\nwO7yTXgmhBAtiQSJBnC4q56sFuThNkII8b9MgkQDVNck5A5WIURLI0GiAZxVfRIWCRJCiBZGgkQD\nODzVQUKam4QQLYsEiQZweqQmIYRomSRINIDT7QYgxGQ5yZpCCPG/RYJEA1TXJELM0twkhGhZmjRI\nzJkzh6FDhzJp0iTve08++SQXX3wxU6ZM4c4776S8vLwpk3DaDpcepdJVCYDLo9ckrNInIYRoYZo0\nSEybNo2FCxf6vTd8+HCWL1/O0qVLadOmDS+++GJTJuG0fPLjLzy56Tn+svopnl+yDZeqBwmL1CSE\nEC1MkwaJ/v37ExUV5ffe0KFDMRj03fbu3ZucnJymTMJpWbJ2NwA2Stm0O88bJKzSJyGEaGGatU/i\ngw8+YOTIkc2ZhAZxVt1xHWaRmoQQomVptjGd//nPfzCbzX79FSdzpk9Yaij/ViUNZ1WfREpi7FlL\nw8mcK+k4F0he+Ehe+EheNI5mCRJLlixh1apVvPbaa6f0ubP1+FLFqPpemFzeaTls5U7ylOZ7hGq1\nmo9mbOkkL3wkL3wkL3zONFg2eZCo/Qjt7777jpdffpk33ngDi+XcbOM3GHxBQrHYcHpcmACzzAIr\nhGhhmrTUu/fee9mwYQPFxcWMHj2aO++8kxdffBGXy8VNN90EQK9evXjkkUeaMhmnTFVUb2eNEmKH\nqqAhU4ULIVqaJi315s2bF/De9OnTm3KXjULV3N4gYQipBKU6SEjHtRCiZZE7rmtRNQ0Vj/e1ElKJ\nUlWTMBuMzZUsIYRoFtJ+UovHo3qblwAUayWKUR/dJM1NQoiWRkq9Wlxuzdu8BGCMLvD+LUFCCNHS\nSHNTLW6P6m1eqs2gSHYJIVoWKfVqcXtUMOh9EjVH75qVc3O4rhBCNCUJErW4PKqvucntG800NfU3\nzZQiIYRoPhIkanG7fc1NmttXe+iakdBcSRJCiGYjQaIWt8fXca3VqElYTSHNlSQhhGg2EiRqcdUc\nAlsjSFgM0ichhGh5JEjU4qnZcV2juckiT6UTQrRAEiRqcXlUlCDNTTL8VQjREknJV4vbrXmbm6YP\n69rMqRFCiOYlQaIWd40+iQhzWDOnRgghmpcEiVqq75NQMMiIJiFEiydBohb9PgkPRsVIiFGChBCi\nZZMgUYvbo4LRjVmxYJbnRwghWjgJErW4PBqK0U2IIQRFae7UCCFE85IgUYvL7QGjG4tBmpqEEEKC\nRC02pxPFoGE1WkkJTwagT2KPZk6VEEI0jyZ9is6cOXNYuXIl8fHxLFu2DICSkhLuuecesrKySE9P\n5+mnnyYyMrIpk3FKyl2VYIBQs5UoSyT/HPFXGeUkhGixmrQmMW3aNBYuXOj33oIFCxgyZAhffPEF\ngwYN4sUXX2zKJJyyCqcNgDBzqPd/udtaCNFSNWnp179/f6Kiovze++abb5g6dSoAU6dO5euvv27K\nJJyySpcdgAhLaDOnRAghmt9Zv0QuLCwkIUF/NkNiYiJFRUVnOwn1srklSAghRLUm7ZNobImJTd93\n4dIcACTHxZ6V/Z2uczltZ5vkhY/khY/kReM460EiPj6e/Px8EhISyMvLIy4ursGfzcsra8KU6aqb\nm9x25azs73QkJkaes2k72yQvfCQvfCQvfM40WDZ5c5OmaX6vx4wZw+LFiwFYsmQJY8eObeoknBKn\nqtckQk3WZk6JEEI0vyYNEvfeey9XX301Bw8eZPTo0Xz44YfccsstrFu3jokTJ7J+/XpuueWWpkzC\nKatubgo1SpAQQogmbW6aN29e0PdfeeWVptztaXO5VTSDC4AwmSZcCCHkjuuabA43mKqChElGNwkh\nhASJGmxON0p1kDBLkBBCCAkSNdgcbjC6QFOwyrMkhBBCgkRNNrtekzArISgyT7gQQkiQqKnS4UEx\nurEoMrJJCCFAgoSfSrsLTC55bKkQQlSRIFFDudOOYlAJNUqntRBCgAQJP2WOCgDCTHKPhBBCgAQJ\nP2VOPUhEWCRICCEESJDwU+4sByA6RGaPFEIIkCDhp8Kj1yRiQ6ObOSVCCHFukCBRg60qSMSFRp1k\nTSGEaBkkSNTg0CoBiJUgIYQQgAQJPy7FBkifhBBCVJMgUYPHoD+VLtIc0cwpEUKIc0ODgsSnn35K\nebk+8ueZZ57ht7/9Ldu3b2/ShDUH1WgDjxmz0dzcSRFCiHNCg4LEf/7zHyIiIti6dStr1qzh8ssv\n57HHHmvqtJ1VTo8LzVKOySn9EUIIUa1BQcJk0h9gt3btWmbMmMGkSZNwOBxNmrCzLacyFxQwu2Oa\nOylCCHHOaFCQUBSFjz/+mOXLlzNkyBAAXC5XkybsbMsqzwEgxCNBQgghqjUoSDz44IN8/vnnzJgx\ng4yMDA4dOsSgQYPOaMevvPIKl112GZMmTeLee+/F6XSe0fbOVKGtCIAQTUY2CSFEtQYFib59+/L8\n889z/fXXA9C2bVseeuih095pbm4ur7/+OosXL2bZsmV4PB4+/fTT095eY3B53ACYDKZmTYcQQpxL\nGhQknnjiCcrKynC73fz617+md+/eLF269Ix2rKoqNpsNt9uN3W4nKSnpjLZ3plweDwBmo7FZ0yGE\nEOeSBgWJdevWERkZyZo1a0hOTuaLL75g0aJFp73T5ORkbrzxRkaPHs3IkSOJjIxk6NChp729xiBB\nQgghAp1S28oPP/zA+PHjSU5OPqNnQJeWlvLNN9/w7bffEhkZycyZM1m2bBmTJk2q93OJiU3XX2C0\n6McTHmpt0v00lvMhjWeL5IWP5IWP5EXjaFCQiI+P58EHH2Tt2rXccsstuN1uPFVX3qdj3bp1ZGRk\nEBOjjyQaP348mzdvPmmQyMsrO+19nkx5hT6kV3VrTbqfxpCYGHnOp/FskbzwkbzwkbzwOdNg2aDm\npnnz5tGxY0fmz59PdHQ0OTk53Hjjjae909TUVLZs2YLD4UDTNL7//ns6dOhw2ttrDG5V77i2mKTj\nWgghqjWoRIyLi+M3v/kNBw8eZN++fbRt25Zp06ad9k579uzJxIkTufzyyzGZTHTr1o0rr7zytLfX\nGNyq9EkIIURtDQoS27ZtY+bMmVgsFjRNw+1289xzz9G9e/fT3vEdd9zBHXfccdqfb2zVQcJilJqE\nEEJUa1CJ+PjjjzN37lzv3dbff/89jz76KO+8806TJu5s8kiQEEKIAA3qk7DZbN4AATB48GBsNluT\nJao5eFQVkD4JIYSoqUFBIjQ0lO+//977euPGjYSGhjZZopqDt7lJgoQQQng1qEScM2cOd911FxaL\nBdAn93v22WebNGFnm0eT5iYhhKitQSViz549+fLLLzl48CCaptGuXTsmTJjAypUrmzh5Z4+3ucks\nQUIIIao1uEQ0m8107tzZ+1rTtCZJUHOprkmESHOTEEJ4nfYzrs9kWo5zkaqpaBqESE1CCCG86i0R\n9+3bV+cyt9vd6IlpTh7NA5qC2XTacVMIIf7n1BskbrnlljqXhYSENHpimpOqqaAZJEgIIUQN9QaJ\nFStWnK10NDsVVa9JGCVICCFENSkRq+g1CWluEkKImqRErKJR3dwkE/wJIUQ1CRJV9NFNUpMQQoia\npESsokmfhBBCBJASsUp1kDCZ/rfu/xBCiDMhQaKKhgoYMBokS4QQopqUiFU0RUOR7BBCCD9SKlbR\nUFE0aWoSQoiaJEhUU1SpSQghRC3NViqWlZUxc+ZMLr74Yi699FK2bNnSXEmpomFQJEgIIURNzTbl\n6eOPP86oUaN49tlncbvd2O325kqKfre1gtQkhBCilmYpFcvLy9m0aRPTp08HwGQyERER0RxJAcCj\n6eCf+HkAABL8SURBVA8cMkiQEEIIP81SKh47dozY2Fjuv/9+pk6dykMPPdSsNQmn2wUgzU1CCFGL\nojXDI+a2b9/OVVddxTvvvEOPHj14/PHHiYyMZObMmWc7KQDc/I9PKW27jAhXOot+80CzpEEIIc5F\nzdInkZKSQkpKCj169ABg4sSJvPzyyyf9XF5eWZOkJ7ewgtC24HY13T4aU2Ji5HmRzrNB8sJH8sJH\n8sInMTHyjD7fLO0rCQkJtGrVioMHDwLw/fff06FDh+ZIik7R+yQczv+t53YLIcSZarbRTQ8++CD3\n3XcfbrebjIwM/v73vzdXUlAUPTi43BIkhBCipmYLEpmZmXz44YfNtXt/VUEiKvR/65GsQghxpmQ4\nD2C16v/3bJ/YvAkRQohzjAQJwGPQh9/GhUU3c0qEEOLc0uKDhKZp3iARZWm+G/qEEOJc1OKDhKpp\nYHICEGk5s6FiQgjxv6bFBwm3R0MxOwCIkiAhhBB+WnyQ8HjUGkFCmpuEEKKmFh8kXB4NxSzNTUII\nEUyLDxJ6TcKJQTMRYrQ0d3KEEOKc0uKDhNujgtGNEXNzJ0UIIc45LT5IuDwaisGDQYKEEEIEaPFB\nwuNRweDB2HwzlAghxDmrxQcJt0cDgweTIjUJIYSorcUHCYfbhWLQMCpSkxBCiNpafJCwu/R7JEzS\nJyGEEAEkSLj1eyRMBqlJCCFEbS0+SDiqgoRZkXskhBCithYfJOwevbnJbJDmJiGEqK3FBwmnxwVI\nkBBCiGBafJDwNjdJkBBCiADNGiRUVWXq1KnceuutzZYGp6oHCZm3SQghAjVrkHjttdfo0KFDcybB\n29xkMUpNQgghamu2IJGTk8OqVauYMWNGcyUB8NUkLMaQZk2HEEKci5otSMydO5fZs2ejKEpzJQEA\npyo1CSGEqEuz3EG2cuVKEhIS6Nq1Kxs2bGjw5xITG/+hQIrRA25IiI5qku03lfMprU1N8sJH8sJH\n8qJxNEuQ+Omnn1ixYgWrVq3C4XBQUVHB7NmzefLJJ+v9XF5eWaOnpcJhB8DtUJtk+00hMTHyvElr\nU5O88JG88JG88DnTYNksQWLWrFnMmjULgI0bN7Jo0aKTBoim4lLdYACrWUY3CSFEbS3+PgmP6gbA\napIgIYQQtTX7rHYDBw5k4MCBzbZ/t+YBIFRqEkIIEaDF1yTc1TUJCRJCCBGgxQcJD3qQCLVIkBBC\niNpafJBQpblJCCHqJEECPUiEyM10QggRoMUHCY+mNzfJk+mEECJQiw8SGh5QDc0+PYgQQpyLWnyQ\nUBUPaC0+G4QQIqgWXzpqeFA0Y3MnQwghzkkSJBQVBQkSQggRTIsPEigqBgkSQggRVIsOEm6PCgZp\nbhJCiLq06CDhcqtgUDEoEiSEECKYFh0knC4PikHFKM1NQggRVIsOEjaX/nxroyI30gkhRDAtOkjY\nXfrzrY3S3CSEEEG16CBRWqk/utRskHmbhBAimPMmSLg8LrLLcwBQNRW724GqqWe0zRKbDYAQkwQJ\nIYQI5rxpjL9l8UNUqCVYK9NwhRTgMdoxYuKC+K5M7XQJiWHxp7zNUptek5AgIYQQwTVLTSInJ4fr\nrruOSy65hEmTJvHaa6+d9DMVagkA9rAsPEY7amUELruZLQXbeHLj8xTaik45HcW2cgDCLaGn/Fkh\nhGgJmqUmYTQauf/+++natSsVFRVMmzaNYcOG0aFDhzo/MzTqUoa37cH3ew6SEBnJ/7d390FR1f8e\nwN+7KynyoCIrGJKDOPhTygdMsOCiFwkMQXYn0IlxakbNMgt5SMKdUeeq6Uw4zNRtHDMrs7g5eUt/\nU/izudH4dMW1SLQGLdExWIpdEZAnZV32c//gsoayiLl4kH2//trztPs9n+Hw3u+ec76nuXEIrDc7\n8H3NEbQF/YatJ7cjL+qVe+pRNLY3AQD8ho28730iIhqMFAkJrVYLrVYLAPDy8kJoaCgsFkuvIZH1\nbDKuXGnGeH+/bvP/vSEIm//nv9Dm/xt2nP4Mhqdeg0bdebWSpa0OgGDMcK1jfXOrBaOGjcIjGg80\nW5uAR4DRwxkSREQ9UfzEtclkwvnz5zF16tS/tb121HBk/dsi2K+NRm17DTaWFuJi42WUmc9gk3Eb\n/uNkAX6trwQAXL5WhY3GbfjvC/8EALTYOn9uCvBmSBAR9UTRE9etra3IzMyEwWCAl5fX336fkLG+\nmD92IQ5W/wt1o2tR+NP2bsvfLd+JkCHTUWW+BowG/vePU8j4RxpuSCsAQOvFkCAi6oliIWGz2ZCZ\nmYnU1FTEx8f3aRut1sfpsuUpkZhW8RgKSoogI6vQcU0LsQ4D7GoMefQSLtnPAD4e6Hr+nHXITdyw\nd4ZE6LggDBsy9H536YHqrRbuhrW4hbW4hbVwDZWIiBIfnJeXh1GjRmHt2rV93ubKlea7rtN24yaq\nLS34s74NlaZrmBg0Akfr/wWz6rfuK3YMATQ2qMUD/znvrXttvqK0Wp8+1cIdsBa3sBa3sBa33G9Y\nKtKTKCsrw9dff42wsDDodDqoVCpkZ2cjNjb2vt97+DAPTHpsFCY9NgpzpwcBAOzVk/Dlhc6QiBoT\nBaPFCGhsAABP9d//mYuIaLBTJCRmzpyJc+fOPbDPG+sV4HidEBLTGRL/z8+T5yOIiJx5aO64vh9h\nI0Mxd1w0IgMjoPX077Ys0MfPyVZEROQWIaFRa5AeluqYHj1sFK7e6LxDe8RQntwiInJG8fsklGCI\nzHG8fkTziIItISIa2NwyJP56uatCF3cRET0U3DIkACBxfBwAIHz0JIVbQkQ0cLnFOYmeJE9IwJxx\n0TwnQUTUC7ftSahVagYEEdFduG1IEBHR3TEkiIjIKYYEERE5xZAgIiKnGBJEROQUQ4KIiJxiSBAR\nkVMMCSIicoohQURETjEkiIjIKYYEERE5xZAgIiKnFAuJo0ePYv78+UhMTMTOnTuVagYREfVCkZCw\n2+3YtGkTPvzwQ3zzzTcoLi7GxYsXlWgKERH1QpGQOHv2LMaPH4+goCB4eHhgwYIFKCkpUaIpRETU\nC0VCwmw2Y+zYsY7pgIAAWCwWJZpCRES9UCQk+FxpIqKHgyKPLw0MDMQff/zhmDabzRgzZsxdt9Nq\n+SS5LqzFLazFLazFLayFayjSk3jiiSdQVVWFmpoaWK1WFBcXY968eUo0hYiIeqFIT0Kj0WDdunVY\nunQpRARpaWkIDQ1VoilERNQLlfAEAREROcE7romIyCmGBBEROcWQICIipwZ8SLjjGE8GgwFPP/00\nUlJSHPOuXbuGpUuXIjExEcuWLUNzc7Nj2ebNm5GQkIDU1FScO3dOiSb3i9raWrzwwgtISkpCSkoK\n9uzZA8A9a2G1WpGeng6dToeUlBS89957AACTyYRFixYhMTEROTk5sNlsjvWzs7ORkJCAxYsXd7vk\nfLCw2+3Q6/V45ZVXALhvLeLi4rBw4ULodDqkpaUBcPExIgNYR0eHxMfHi8lkEqvVKgsXLpTKykql\nm9XvfvjhB6moqJDk5GTHvLffflt27twpIiLvv/++FBQUiIjI4cOH5aWXXhIRkfLycklPT3/wDe4n\nFotFKioqRESkpaVFEhISpLKy0i1rISLS1tYmIiI2m03S09OlvLxcVq9eLQcPHhQRkfXr18vnn38u\nIiJFRUWyYcMGEREpLi6WrKwsRdrcnz7++GPJzc2Vl19+WUTEbWsRFxcnjY2N3ea58hgZ0D0Jdx3j\n6cknn4Svr2+3eSUlJdDr9QAAvV7vqENJSQl0Oh0AYNq0aWhubkZdXd2DbXA/0Wq1mDx5MgDAy8sL\noaGhMJvNblkLAPD09ATQ+c3YZrNBpVLBaDQiMTERQGctvvvuOwDd/14SExNRWlqqTKP7SW1tLY4c\nOYL09HTHvJMnT7plLUQEdru92zxXHiMDOiQ4xtMt9fX18Pf3B9D5z7O+vh4AYLFYEBgY6FgvICAA\nZrNZkTb2J5PJhPPnz2PatGm4evWqW9bCbrdDp9MhOjoa0dHRCA4Ohq+vL9TqzsM4MDDQsb9/rYVG\no4Gvry8aGxsVa7urbdmyBXl5eVCpVACAhoYGjBgxwi1roVKpsGzZMjz33HPYt28fALj0GFHkZrq+\nEt7CcVc91ajrwBksWltbkZmZCYPBAC8vL6f7N9hroVarceDAAbS0tGDVqlU9Dq/ftb+310JEBk0t\nDh8+DH9/f0yePBlGoxFA5/7dvs/uUAsA2Lt3ryMIli5dipCQEJceIwM6JP7uGE+D0ejRo1FXVwd/\nf39cuXIFfn5+ADq/CdTW1jrWq62tHVQ1stlsyMzMRGpqKuLj4wG4by26eHt7Y9asWThz5gyamppg\nt9uhVqu77W9XLQICAtDR0YGWlhaMGDFC4Za7xk8//YTvv/8eR44cQXt7O1pbW7FlyxY0Nze7XS2A\nzp4CAPj5+SE+Ph5nz5516TEyoH9ucucxnm5P/Li4OHz11VcAgP379zvqMG/ePBw4cAAAUF5eDl9f\nX0c3czAwGAyYOHEiXnzxRcc8d6xFfX294wqVGzduoLS0FBMnTkRUVBQOHToEoHst4uLisH//fgDA\noUOHMHv2bGUa3g9ycnJw+PBhlJSUoLCwEFFRUdi2bZtb1uL69etobW0FALS1teH48eMICwtz6TEy\n4IflOHr0KN566y3HGE8rVqxQukn9Ljc3F0ajEY2NjfD398frr7+O+Ph4rF69Gn/++SceffRRvPPO\nO46T2xs3bsSxY8fg6emJrVu3Ijw8XOE9cI2ysjIsWbIEYWFhUKlUUKlUyM7OxtSpU5GVleVWtfj1\n11+Rn58Pu90Ou92OpKQkrFy5EtXV1cjJyUFTUxMmT56MgoICeHh4wGq1Ys2aNTh37hxGjhyJwsJC\njBs3TundcLlTp07ho48+wo4dO9yyFtXV1XjttdegUqnQ0dGBlJQUrFixAo2NjS47RgZ8SBARkXIG\n9M9NRESkLIYEERE5xZAgIiKnGBJEROQUQ4KIiJxiSBARkVMMCXroLFq0CHq9HgsWLEB4eDj0ej30\nej0MBsM9v9fy5cv7NHT02rVrUV5e/neae08qKirw7bff9vvnEPUV75Ogh1ZNTQ3S0tJ6HdWza5iG\nh8W+fftQWlqKwsJCpZtCBGCAj91EdK9KS0tRUFCA6dOno6KiAqtWrUJ9fT2KioocD6HJz89HZGQk\nAGDOnDnYvXs3QkJCkJGRgRkzZuD06dOwWCxITk5GVlYWACAjIwOvvvoqYmJisGbNGnh7e+PixYsw\nm82IiIjA1q1bAXSOhZOXl4eGhgYEBwejo6MDcXFxWLx4cbd21tXVITc3Fw0NDQCAmJgYLF++HNu3\nb0dbWxv0ej2ioqKQn5+P06dPo7CwENevXwcAZGZmIjY2FlVVVcjIyEBycjLKyspgtVqxYcMGRERE\nPJBak5u4n4ddECnJZDLJ7Nmzu807ceKETJkyRX7++WfHvL8+kKWyslLmzp3rmI6NjZVLly6JiMjz\nzz8vubm5IiLS1NQkkZGRYjKZHMuOHTsmIiJvvPGGLFmyRG7evCnt7e0yf/58MRqNIiKycuVK+eCD\nD0REpLq6WmbMmCF79+69o+27du2S9evXO6abmppEROSLL76QnJycbm3X6XRy9epVERGpra2V2NhY\naWlpkd9//10mTZokxcXFjn2fO3eu2Gy2vheR6C7Yk6BBZ8KECXj88ccd05cvX8a7774Li8UCjUYD\ni8WCxsZGjBw58o5tn332WQCAj48PQkJCUFVVhaCgoDvWe+aZZzBkSOfhM2XKFFRVVSEyMhJGoxGb\nN28GAIwbN87RY7nd9OnT8dlnn2Hbtm2YNWsWYmJielyvrKwMJpMJy5Ytcwz6qNFoUF1djeHDh8PT\n0xNJSUkAgKeeegoajQaXL19GaGhoX8tF1CuGBA06Xl5e3aazs7OxYcMGzJkzB3a7HVOnTkV7e3uP\n2w4dOtTxWq1Wo6Oj457W6+tzCmbOnIn9+/fjxIkT+PLLL7Fr1y58+umnd6wnIggPD8fu3bvvWFZV\nVXXHPLvdPqielUDKe3jO6BH1QPpw3UVLS4tj1M+9e/c6/cfvCpGRkY4hmmtqanDq1Kke1zOZTPD2\n9kZSUhLy8/Pxyy+/AOh8VsRfH1ofERGByspK/Pjjj455Z8+edby+fv06Dh48CKDz8Z0AMH78eNfu\nFLk19iToodaXb80GgwErVqzA2LFjERUVBR8fnx63v/29nC3rbb1169bhzTffRHFxMSZMmICIiIhu\nn9eltLQUe/bsgUajgYhg06ZNAIDo6Gh88skn0Ol0mD17NvLz87F9+3YUFBSgubkZN2/eRHBwMHbs\n2AEA8Pf3x4ULF5Ceng6r1YrCwkJoNJq71oSor3gJLJELtbe3w8PDA2q1GmazGenp6SgqKkJwcLDL\nP6vr6qbjx4+7/L2JurAnQeRCly5dwtq1ayEisNvtyM7O7peAIHpQ2JMgIiKneOKaiIicYkgQEZFT\nDAkiInKKIUFERE4xJIiIyCmGBBEROfV/smX5vm0Z6kkAAAAASUVORK5CYII=\n",
-            "text/plain": [
-              "\u003cmatplotlib.figure.Figure at 0x7f970d490590\u003e"
-            ]
-          },
-          "metadata": {
-            "tags": []
-          },
-          "output_type": "display_data"
-        },
-        {
-          "name": "stdout",
-          "output_type": "stream",
-          "text": [
-            "test_accuracy 0.1\n"
-          ]
-        },
-        {
-          "data": {
-            "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYwAAAEcCAYAAADUX4MJAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzsnXl4FFW6/79V1Vv2BEhIAG/AuCAIsgjoCEFgFDSsio7I\n6Dg4l/GODgpu4wxcnWHEHYXBDQVllJ/LRUAhDKCgYd+XsCVhS0IWOnvSSXqtqt8f1V1d1V2d7iwN\nIbyf5+Ghu6q66tRJ1fmedznnMKIoiiAIgiCIILCXuwAEQRDElQEJBkEQBBESJBgEQRBESJBgEARB\nECFBgkEQBEGEBAkGQRAEERIkGARBEERIkGAQVx0HDhzAPffcc7mL0eEZOHAgioqKLncxiDaEBIOQ\nGT16NPr164eamhrV9kmTJqF3794oKSkBAPzlL39B7969cezYMfmYwsJC9O7dW/7+yCOPYNWqVfL3\njz76CGPGjMGgQYNw5513Ys6cOQCA8ePHY9CgQRg0aBD69OmD/v37Y+DAgRg0aBCWLl3qV8YlS5bg\nhRdeaNV93nrrrfjPf/7TrN98/PHHePfdd7Fv3z6MHDmyVdf34FtHHY3Dhw+jR48el7sYRBuiu9wF\nINoXPXr0QGZmJqZPnw4AyMvLg91uB8Mw8jEMwyA+Ph7vvfceli1bptquxZo1a7Bu3TqsWLECPXr0\nQGVlJbZu3QoAWL9+vXzcI488gsmTJ+P+++9v1T2IohiwLC0lKysLzz33HJxOZ5ufu73C8zw4jrvc\nxSDaEWRhEComTZqENWvWyN/XrFmDKVOm+B03ZcoU5Obm4sCBA0HPefz4cQwfPlzubXbu3BkPPPCA\n5rFNzVSzfft2fPTRR9iwYQMGDhyIyZMnA5CE5t1338W0adMwYMAAFBUVYfXq1bj33nsxaNAg3HXX\nXfjmm2/k8/haCaNHj8by5csxceJEDBkyBHPmzIHD4ZD319XVoaCgAH369MHMmTNRVlYmW0Hl5eUQ\nRRFLly7FXXfdhdtuuw2zZ89GXV0dAMDhcOD555/HsGHDMGTIEDzwwAOoqqrCu+++i4MHD2L+/PkY\nNGgQ/vnPf2re89NPP43hw4djyJAheOSRR3DmzBl5n91ux+uvv47Ro0djyJAhmD59ulzuAwcO4KGH\nHsKQIUMwatQorF27Vq4rpVWzZs0aPPzww/L33r17Y+XKlRg7dizGjh0LAHj11Vdx5513YvDgwbj/\n/vtVf3NBEPDRRx/hrrvukvebzWb5XBcuXJDr4Y033sCoUaMwfPhwvPLKK3JZq6ur8cQTT2DIkCEY\nNmwYfvvb3wZ8BojLCwkGoeKWW25BQ0MDzp07B0EQsHHjRkycONGvITeZTHjiiSewcOHCkM65du1a\nLFu2DMePH4cgCC0q24gRI/DEE0/g3nvvxeHDh+VGEADWrVuHf/7znzh06BBSUlLQuXNnLF26FIcO\nHcJrr72G1157DadOnZKP97USNm7ciOXLl2PLli3IyclRieaOHTtw2223wWQy4ZNPPkFSUhIOHz6M\nQ4cOITExEStWrMDWrVuxcuVKbN++HbGxsfj73/8OQGqQ6+vrsX37duzbtw9///vfYTQaMXv2bAwe\nPBjz5s3DoUOHMHfuXM17HjlyJH788Ufs2rULffr0wXPPPSfve/3113Hy5El888032LdvH55//nkw\nDIPS0lLMnDkTjz76KPbs2YO1a9eq3IW++NbF1q1bsWrVKmzYsAEA0L9/f/zwww/Yv38/JkyYgGee\neUZu7JcvX44NGzbg008/xcGDB7FgwQKYTCa/87711lsoKCjADz/8gM2bN8NsNuP9998HAHz22WdI\nTk7G3r17sWvXLsyePTtgWYnLCwkG4cekSZOwdu1a7Ny5E9deey2SkpI0j3vwwQdRWlqK7du3N3m+\niRMnYt68edi5cyceeeQR/OpXv9KMT7SGKVOmIC0tDSzLQqfTYeTIkbJFc+utt+KOO+5o0hp69NFH\n0aVLF8TGxmLUqFEqcfnll1+ajFt8++23eOaZZ5CUlAS9Xo8nn3wSmzZtgiAI0Ol0qKmpwfnz58Ew\nDPr06YOoqKiQ7+u+++5DRESEfN6cnBzU19dDFEWsXr0ac+fORWJiIhiGwYABA6DX67Fu3Trccccd\nuPfee8FxHOLi4poUDF/++Mc/IiYmBgaDAQAwYcIExMbGgmVZPPbYY3A4HDh//jwAYNWqVZg9ezZS\nU1MBADfeeCPi4uIAqK3FVatW4aWXXkJMTAwiIyMxc+ZM2R2p0+lQXl6OoqIicByHwYMHh1xW4tJC\nMQzCj4kTJ+K3v/0tioqKMGnSpIDHGQwG/OlPf8KiRYvwzjvvNHnO8ePHY/z48eB5Hj/99BOeffZZ\n9O3bF3fccUeblDk5OVn1PSsrCx988AHy8/MhCAJsNhtuvPHGgL/v3Lmz/DkiIgLl5eUApEZv165d\neOmllwL+tqSkBE899RRYlpV/o9PpUFFRgUmTJuHixYuYM2cOLBYLJkyYgDlz5oQUGxAEAQsXLsSm\nTZtQXV0NhmHAMAyqq6vhcDjgcDhwzTXX+P2utLRUc3uo+Nbl8uXLsWrVKrlOGhoaUF1dDQC4ePFi\n0GtVVVXBarWqYlOCIMiC8vjjj2PJkiWYMWMGGIbBAw88gJkzZ7a4/ET4IAuD8KNbt27o3r07tm3b\nhrvvvrvJY++77z5YLBb8+OOPIZ2b4ziMHTsWN954I06fPt0WxQWgdn84HA48/fTT+MMf/oDdu3dj\n//79SE9PbzI+Eohjx46hR48eSEhI8LuOh5SUFHzyySfYt28f9u3bh/379+PIkSNISkqCTqfDk08+\niczMTHz99df45ZdfZFdasOD5unXr8PPPP2PFihU4cOAAtm7dKt9DQkICjEYjCgsLNcujtR0AIiMj\nYbPZ5O8eEVCiLNeBAwfw6aefYvHixdi/fz/279+P6OhouRzJyckBr+UhISEBERERWL9+vVxHBw4c\nwMGDBwEAUVFRePHFF/HTTz/ho48+wueff449e/Y0eU7i8kCCQWiyYMECrFixQvZHB4LjODz11FP4\n5JNPAh6zZs0aZGVloaGhAaIoIisrC2fPnkX//v2bXa4uXbqguLi4ycbf6XTC6XQiISEBLMsiKysL\nO3fubPa1AMkdlZ6eLn/v3LkzampqUF9fL2/7zW9+g4ULF8ppx1VVVdiyZQsAYO/evcjLy4MgCIiM\njIROp5Otiy5dushBYS0aGhpgMBgQGxuLxsZGvPPOO3JjzjAM7rvvPrz++usoKyuDIAg4cuQInE4n\nJkyYgN27d2Pjxo3geR41NTXIyckBIAWiN2/eDJvNhoKCAnz33XdN3n9DQwN0Oh3i4+PhcDiwZMkS\nNDQ0yPsfeOABLFq0CAUFBQCA3Nxc1NbWqs7hsRoWLFiAqqoqAIDZbMaOHTvkOvaITmRkJDiOo+ys\ndkrYBWPbtm0YN24cxo4dG9BvvWHDBmRkZGDChAmqoB5xaVH2LK+55hr07dtXc58v48ePR1JSkl/q\nrYfo6Gh89NFHcjbPO++8g1deeQWDBg0KeP1AjBs3DqIoYtiwYbjvvvs0fxcVFYW//e1vePrppzF0\n6FBs2LABY8aMCXjOpq6blZWlil9ce+21yMjIwJgxYzB06FCUl5fjd7/7HcaMGYMZM2Zg8ODBeOih\nh5CdnQ0AqKiowKxZszB48GCMHz8ew4YNw8SJEwFIcZONGzdi2LBhePXVV/2uPXnyZKSkpCA9PR3j\nx4/HwIEDVftffPFF3HDDDZg6dSqGDRuGd955B6IoIiUlBUuXLsXy5csxdOhQTJkyRRaMxx57DHq9\nHnfccQdeeuklTJgwocm6GDFiBEaMGIGxY8dizJgxiIiIULmsfv/73+Oee+6R733u3LmyBaM813PP\nPYfU1FQ8+OCDuPXWWzFjxgzk5+cDAPLz8/HYY49h4MCBmDZtGqZPn44hQ4YE/JsQlw8mnCvuCYKA\nsWPH4vPPP0dSUhKmTp2KhQsXIi0tTT6moKAAs2fPxr///W9ER0ejqqoKnTp1CleRCCJkKisrMXny\n5KBBfYK4WgirhZGdnY3U1FR0794der0eGRkZsqnu4dtvv8XDDz+M6OhoACCxINoNFoulyWA3QVxt\nhDVLymw2IyUlRf7etWtX1XQSAGSzdNq0aRBFEU8++SRGjBgRzmIRREj07NkTPXv2vNzFIIh2Q1gF\nIxRvF8/zKCwsxMqVK1FSUoLp06cjMzNTtjgIgiCI9kFYXVLJycly5gggWRy+g8C6du2KMWPGgGVZ\n9OjRA7169ZKtjkCEMexCEARBBCCsFka/fv1QWFiI4uJiJCYmIjMz028qiV//+tfIzMzE5MmTUVVV\nhYKCgqADgRiGQXm5JZxFv2JITIyhunBDdeGF6sIL1YWXxMSYVv0+rILBcRzmzZuHGTNmQBRFTJ06\nFWlpaVi8eDH69euHUaNGYcSIEdi5cycyMjLAcRxeeOEFeWoBgiAIov0Q1rTacEI9BgnqPXmhuvBC\ndeGF6sJLay0MGulNEARBhAQJBkEQBBESJBgEQRBESJBgEARBECFBgkEQBEGEBAkGQRAEERIkGARB\nEERIkGAQBEEQIUGCQRAEQYQECQZBEAQREiQYBEEQREiQYBAEQRAhQYJBEARBhAQJBoA6hwX/PvkN\nNpz/8XIXhSAIot0S1vUwrhROVuZi78WDAIDR16TDpDNe5hIRBEG0P8jCAMCLvPxZEIXLWBKCIIj2\nCwkGAEGxhhQJBkEQhDYkGFCLhAASDIIgCC1IMOAjGGRhEARBaEKCAUBUiAQvkGAQBEFocVUKRo29\nFg7eKX/nFYIhkkuKIAhCk6tOMGwuO/6281W8tv9deZuoCHrz5JIiCILQ5OoTDN4GAChrrJC3KQPd\nIgkGQRCEJledYDAat6y0KsjCIAiC0KZDC0aj04ptRbtU8QqtGIWoypIS/fYTBEEQHXxqkK9yv8Oh\nsmzUOiyYcO1YANpZUOqBe7zffoIgCKKDWxhFlhIAQLkiXqFlYQhkYRAEQQSlQwuGRwhYxnubWjEK\nGrhHEAQRnLALxrZt2zBu3DiMHTsWS5cu9du/Zs0a3H777ZgyZQqmTJmCVatWtdm1BUjWglIwtARB\nmSVFgkEQBKFNWGMYgiBg/vz5+Pzzz5GUlISpU6dizJgxSEtLUx2XkZGBuXPntvn1PeMrGDDeMmla\nGDT5IEEQRDDCamFkZ2cjNTUV3bt3h16vR0ZGBrZs2eJ3nBimuIHXJeUVDK2pzMklRRAEEZywCobZ\nbEZKSor8vWvXrigrK/M7bvPmzZg0aRKefvppXLx4sc2u73E1MQFcUk7B5beNpywpgiAITcIqGKFY\nDqNHj8bWrVvx/fff4/bbb8eLL77Y5tcPFMNw8A73cd5tDXbvmA2CIAjCS1hjGMnJySgpKZG/m81m\nJCUlqY6Ji4uTPz/44IN4++23Qzp3YmJM8IPcnqjICIN8fAVM8u7YeCO6RMVAf46Tt8XEGEM7dzvi\nSitvOKG68EJ14YXqom0Iq2D069cPhYWFKC4uRmJiIjIzM7Fw4ULVMeXl5UhMTAQAbNmyBdddd11I\n5y4vtwQ9hhck95Ld5pKPr6qul/dfLK+G2KhHo9Uhb6uubQjp3O2FxMSYK6q84YTqwgvVhReqCy+t\nFc6wCgbHcZg3bx5mzJgBURQxdepUpKWlYfHixejXrx9GjRqFL774Alu3boVOp0NcXBxee+21Nrt+\nsHEYDncMQzmYT6D1MAiCIDQJ+9Qg6enpSE9PV22bNWuW/HnOnDmYM2dOWK7tSZdlGO20WqcgWRZK\nEXEJFPQmCILQokOP9BY9A/cQKOgtBbhpxT2CIIjgdFjBqKi1yu4lRjUOQ2lhSIIhqBZQIguDIAhC\niw4rGC98uFsWBzbASG+tcRgusjAIgiA06bCCAQAKw0JGaxyGQC4pgiCIoHRowfDgmYQQ8LUmtEZ6\nk2AQBEFocXUIRgBB8GREKbdRWi1BEIQ2V51gKFfUcwpO/FSYhdM1Z+VtFPQmCILQpkMu0eobhxAD\nTF/uElxYf35zk78lCIIgJDqkhWF3qK0E5QJJvEYMQwnFMAiCILTpkIJh8xGMBpt3riilteHUEAyK\nYRAEQWhzVQjGmeIa+TOvimFoWRgUwyAIgtDiqhCMQEuwarukwrP6H0EQxJVOBxUMtRAEWoJV2yVF\nFgZBEIQWHVQw1I0+H3Achv/qehT0JgiC0KaDCoYLUIzuFgNYGC5Rw8IgwSAIgtCkQwqG3cEDjFIk\ntGMYTp7SagmCIEKlQwqGwyUAjLZIBLMwRBIMgiAITTrkSG8XLwCst+G3O1348PtjiE7LwxnLGXm7\ndlotCQZBEIQWHVIweF5UuaTAiDhwthCmmL2q47TSaimGQRAEoU2HdEm5BAEMq8iU0lgXQzqOBIMg\nCCJUOqZg8KLKJRVl4jSP0xyHQQP3CIIgNOmYguESAIWFkdQpAso0WwBgwcpreitRTlRIEARBeOmY\ngiGIYBQWBsOIqqwpAICokxdQUkIuKYIgCG06pmDwagtDSzAYgdMc6U2CQRAEoU0HFQwX2Ngq+ftF\n60UweofqGJHnNGMYNA6DIAhCmw4pGMW6Q9CnnJe/23g7jDftUx0j8tqBcAEU9CYIgtCiQwqGhSsO\negzv1B6CQhYGQRCENh1SMBhRH/QY0aUtGJQlRRAEoU2HFAwI2u4mJSIfQDDIwiAIgtAk7IKxbds2\njBs3DmPHjsXSpUsDHrdx40b07t0bJ06caPU1GSGEGU98BCMlojtEgYFIMQyCIAhNwioYgiBg/vz5\nWLZsGdavX4/MzEycPXvW77iGhgZ8+eWXGDBgQBtdOLhgKC2MCFs3PNzzdwAYimEQBEEEIKyCkZ2d\njdTUVHTv3h16vR4ZGRnYsmWL33GLFi3Cf//3f0OvDx57CIkA7iYPDBjVMRwM0LEcIDKUJUUQBBGA\nsAqG2WxGSkqK/L1r164oKytTHXPq1ClcvHgRI0eObLPrBkqZ9aBnjBAVcQ6O4cCyDCAyECnoTRAE\noUlYpzcXg0zkJ4oiFixYgDfeeCPk33hITIwJfN4gv43QG1GvEBW9jkOXztEAGDBM0+duj1xp5Q0n\nVBdeqC68UF20DWEVjOTkZJSUlMjfzWYzkpKS5O8NDQ04c+YMHnnkEYiiiIqKCvzpT3/Chx9+iL59\n+zZ57vJyS8B9vMYcUUoMPhaGKDCorWkERAa8wDd57vZGYmLMFVXecEJ14YXqwgvVhZfWCmdYBaNf\nv34oLCxEcXExEhMTkZmZiYULF8r7o6OjsXv3bvn7I488gpdeegl9+vRp1XWDTVEeoTepUm85hiWX\nFEEQRBDCKhgcx2HevHmYMWMGRFHE1KlTkZaWhsWLF6Nfv34YNWqU6niGYUJ2STVFsLEUPWJScFpp\nhIgsWIaBKFJaLUEQRCDCvkRreno60tPTVdtmzZqleey///3vNrmmCAEMgFE9huPnoh2qfcmRSXjg\nhonY+mOm4geMZGGALAyCIIhAdMiR3h6X1K+6DfXbd0+vX8PA6QFVJhUDTnZJkYVBEAShRYcTDFEU\nZSuBZfxvz7NNFfT2WBgkGARBEAHpcILh4r0NPsswfvs5t2BE6k3ejQIjHSsyCJ6USxAEcXXSAQVD\nkFfXa8rCeO0Pw+Vtouh2SVEMgyAIIiAdTjB4wbscK6NxeywjuaJiIg3ejeSSIgiCCEqHEwwXL8Dj\nVmrKJaVEEL1Bb9+1vwmCIAiJDicYgsLCYBkWg5NuUe3XEozrr4kiC4MgCCIIHU4weEEEoxCMGTdP\nx8ged8j7PS4pADByklsqMtojEgxAMQyCIAhNOpxgCILXQmDcLikd6xUJZSA8UhcJAGhwNkrHgwEY\nWnWPIAhCi6CCYTabL0U52gxl0Jt1356eUax9oRCMLhGdAACe2Ug8QfIXtr8SdAJDgiCIq42ggnH/\n/ffjz3/+s2qSwPaMSjDcFgYXwMJ45KbfYHDSLZiUNs69RTre6rLB4qy/NAUmCIK4QggqGFu3bsWY\nMWPw3nvv4d5778XKlStRX99+G1PJJaUeh6FjvRaGUjA6RyRgxs3TkWCKB6BOw9Uaw0EQBHE1E7RV\nNBgMmDx5Mr755hv885//xCeffIL09HTMnz8flZWVl6KMzUI1DkNDMJTWhi8svGm4bTFrLkEQREci\npG50cXEx3nnnHTz77LO4/fbb8emnn6Jz5854/PHHw12+ZqNKq3ULgC5ADMMfr2BQ4JsgCEJN0OnN\nn3jiCeTl5eGhhx7C6tWrkZCQAAAYNGgQNmzYEPYCNhdeUA7ck8Qh2hAl72/K1cSChSfUzZNgEARB\nqAgqGJMmTcLdd98NjvN35axfvz4shWoNXguDkdNqY/TR8v6mBINR7BNEypIiCIJQEtQlFRcXh8bG\nRvl7XV1du86Y4kVp4B6jcC/FGryCwTGBYxgMuaQIgiACElQw3nzzTURHexvc6OhovPnmm2EtVGvg\nec/Eg97GP8bgXfi8qRiG0voglxRBEISaoIIhiqLs2gEAlmXB8+3XXeNxSSlTZCN03rUvmnRJKUSG\nJ5cUQRCEiqCCERUVhaNHj8rfjx49isjIyLAWqjXw7nEYysZfJXhNuKRYKGMYZGEQBEEoCRr0fv75\n5/Hkk0/iuuuuAwCcOXMGS5YsCXvBWoogui0MjanNAe0pzz2og94kGARBEEqCCsbAgQORmZmJI0eO\nQBRFDBw4EHFxcZeibC3CM3CP9TGeJl47Dudq85sMeivFhOaSIgiCUBNUMAApU2rkyJHhLkubIGi4\npABgbM/RQX+rtEoo6E0QBKEmaAwjJycHv/nNb3DLLbfgpptukv+1VyQLAwFdUk2iWG3vqy25KC5v\nv3NmEQRBXGqCCsYrr7yCZ555BqmpqcjKysLMmTMxe/bsS1G2FqGVJRUqjEIwiivqsXxDTlsWjSAI\n4oomaKvqcDhw++23QxRFJCUlYfbs2di+ffulKFuL8GRJNRXcDoTqJ4wIp4vcUgRBEB6CCgbLSofE\nxcUhJycH1dXVKC4uDnvBWopnidaWWBhKlxQY0T0vFUEQBAGEEPTOyMhAdXU1Zs6ciWnTpkEQBMya\nNetSlK1FCK2yMETVZ8+ocYIgCCKIYAiCgNtvvx0JCQlIT0/Hvn37YLfbVVOFBGPbtm1YsGABRFHE\n/fffj5kzZ6r2f/3111i5ciU4jkNUVBT+8Y9/IC0trWV3A/dstYzYsgWQfFxSZGEQBEF4abJVZVkW\nf/vb3+Tver2+WWIhCALmz5+PZcuWYf369cjMzMTZs2dVx0yYMAHr1q3D2rVr8fjjj+O1115r5i34\nXrMVQW8oXVICXAJZGARBEB6CtqppaWkoKipq0cmzs7ORmpqK7t27Q6/XIyMjA1u2bFEdExXlXaui\nsbFRjpm0FHngXossDJ8YBrmkCIIgZILGMKqqqjBx4kQMHjxYNYfUokWLgp7cbDYjJSVF/t61a1cc\nO3bM77iVK1fi888/h8vlwooVK0ItuyaC6Fk8qXXjMMCI4LucxvGKLri5S/sdd0IQBHGpCCnonZGR\n0aKTh7ou9vTp0zF9+nRkZmbigw8+wOuvvx70N4mJMZrbjUY94BCh1+kCHhMInY4BXNJnhnMBKXn4\nMDsH3/7mw2ad51LT3PvsyFBdeKG68EJ10TYEFYwpU6a0+OTJyckoKSmRv5vNZiQlJQU8/t5778XL\nL78c0rnLyy2a2y31dgAiRCHwMYHgeUWQW+cIeq32QGJiTLsu36WE6sIL1YUXqgsvrRXOoIIxa9Ys\nzWk2QnFJ9evXD4WFhSguLkZiYiIyMzOxcOFC1TEFBQVITU0FAPz888/o2bNniEXXRhBEgG1ZWq3I\niJ7lwMHonK0qB0EQREcjqGCMGjVK/my327Fp06aQ0145jsO8efMwY8YMiKKIqVOnIi0tDYsXL0a/\nfv0watQofPnll9i9ezf0ej1iY2PxxhtvtPxuALhEAQzT9EJJAcurmMmWBIMgCEJNs11S9913H/7n\nf/4n5Aukp6cjPT1dtU058E+ZttsWCO6xEy0RjP6mdGyuKwTD8SqXFEEQBBFCWq0vDMO0OM32UuCS\nBaP5LqkYXRwceYMBAAwJBkEQhIpmxTBEUURubi5uv/32sBespbTGwuBYBhCleyWXFEEQhJpmxTA4\njsOMGTMwYMCAsBaqNXim8+BaKBiiRzD0JBgEQRBKwppWezngW2Fh6DhWtjAIgiAINUFb1WnTpqG2\ntlb+XlNTg+nTp4e1UK3B5V5atSVTjBgNHCD6/47W9yYIgghBMBobGxEXFyd/j4+PR319+126VGiF\nS8pk4DQtDJdIgkEQBBG0VRUEAY2NjfL3hoYG8Hz7bUB5d+POsVyQI/0xGXTagiG4Wl0ugiCIK52g\nMYzx48djxowZmDZtGgDgq6++wsSJE8NesJbiEqV0WBNnaPZvA1kYToEC4ARBEEEF449//COSkpKw\ndetWiKKIhx56CJMnT74UZWsRLlFq3I06Y7N/azJwUK+i5D4nWRgEQRDBBQOQMqWulGwpwT3dbEst\nDJFcUgRBEJoEjWH8+c9/Rk1Njfy9uroaTz/9dFgL1RpccFsYXPMtjEBZUk4SDIIgiOCCceHCBcTH\nx8vfExISUFhYGNZCtQaXKDXuhhZYGByrPQ7DwVMMgyAIIqhg8DyvyopyOp1wONrvPEs8pLIZWyAY\nADQFw+4kwSAIgggawxg+fDhmz56NRx99FACwYsUKv9ln2xOeGEZLLAwAmoJhdbZfgSQIgrhUBBWM\nOXPm4OOPP5aXTR01ahSGDRsW9oK1FF6OYbRUMPyNLhIMgiCIEFxSer0eTz31FN5//33cdddd+OGH\nH/DXv/71UpStRQiMZGG0JOgtwUAU1NXicJFLiiAIokkLw+VyYevWrfjuu+9w5MgRuFwuLFu2rF3P\nVutxSbXYwgAAgQNY7/reNhIMgiCIwBbGa6+9hjvvvBNff/01xo8fj6ysLMTFxbVrsQCUFkbLBOOv\nvx2MCL36txabDau3nUNtA7mmCIK4egloYXz11VcYOHAgZs6cidtuuw0A5IWU2jMi4wKDlge9r+sR\nh+hCI2yoDujAAAAgAElEQVS2BnnbgbwSVJ5lcMFswdMP3NJGJSUIgriyCCgYO3bswLp16/Dmm2+i\ntrYWkydPbteTDnoQWUkwWh7DUE6NzgAQYbHbAACVdfZWl48gCOJKJaBLKjY2FtOnT8fq1avx/vvv\no7a2FjabDdOnT8fXX399KcvYLES29TEMxl0tBkYSHY+b6wowsAiCIMJGSItG9O7dG3PnzsX27dsx\nffp0bNmyJdzlahGCKIJheUBkWjS9uQfWrQwGxgQA0jmhNS0hQRDE1UNIkw960Ov1uPfee3HvvfeG\nqzytgudFgBHAiC0XC8C7vKue1QMCAM49lxQpBkEQVzHNX5auHePiBYAVwKJ1gsG4lUHHSHoqWxjk\nkyII4iqmQwkGL7gtjFYKhsclpWP07g28e3urTksQBHFF0+EEg2EFsK10SXmC3hzLQRQBcLy8hyAI\n4mqlYwmGxyXFtI2FAYiAwIFhKUuKIAiiQwmGy+2SanUMg/FUiwjwOtnCIMEgCOJqJuyCsW3bNowb\nNw5jx47F0qVL/fZ//vnnyMjIwKRJk/D73/8epaWlLb6WZGHwbRb0FhkRosAp0mpJMQiCuHoJq2AI\ngoD58+dj2bJlWL9+PTIzM3H27FnVMX369MHq1avx/fff4+6778abb77Z4us5XTwYBuCaly3sh8ol\nxXNyWi1ZGARBXM2EVTCys7ORmpqK7t27Q6/XIyMjw2/Q39ChQ2E0SiOqBwwYALPZ3OLr2d1LqbY2\nhqF0SYkC586SEsm+IAjiqiasgmE2m5GSkiJ/79q1K8rKygIev2rVqlat5md3L3TEtTbo7XFJuWMY\nDAMpXZdMDIIgrmJa57sJgiiKIR/7/fff48SJE/jiiy9COj4xMcZvm6myAgBg1Bk194fKr3oNQk71\naQxM6Y8LxYekjRwPg0HXqvOGi/ZYpssF1YUXqgsvVBdtQ1gFIzk5GSUlJfJ3s9mMpKQkv+N27dqF\npUuX4ssvv4Rerw/p3OXlFr9tFVW1AABRYDT3h8rAuIH429Du4BzR+F48LG1kBDidrladNxwkJsa0\nuzJdLqguvFBdeKG68NJa4QyrS6pfv34oLCxEcXExHA4HMjMzMWbMGNUxJ0+exMsvv4wPP/wQCQkJ\nrbqeJ4aha/U4DBbdopOh43SAe7lWhlxSBEFc5YTVwuA4DvPmzcOMGTMgiiKmTp2KtLQ0LF68GP36\n9cOoUaPw1ltvwWq14umnn4YoiujWrRs++OCDFl3PyUvZTJ45oFqLjmMA0T3V+fWHwdti2+S8BEEQ\nVyJhFQwASE9P9wtkz5o1S/782Weftdm1HG4Lg2Pb5rY4jpUFg42yoNywF0DLg/IEQRBXMh1qpLfD\nbWHo20owWAai4K0igXG2yXkJgiCuRDqUYDgFdwyjDQXDY2FIUAyDIIirlw4lGK42jmFwHCMHvQFp\napAfC37Bq3sXwim42uQaBEEQVwodSjAcotslxbWNYLAMA4hKq4JBTtVplDRcRJ2d0vQIgri66FCC\n4XIHvfVsaGM5gsEwjHoxJhGwOOsBAA7B0SbXIAiCuFLoUIJhd0mCYdS1jWAAUM98KzKoc0iWhZ23\nt9k12gKn4EJ+XWGzRtd3VBy8EwV1Fy53MdoFdt6Bwrqiy12MdoHNZcMFS/HlLsYVTYcSjEaH1OuP\ndk9m2BawiioSIaLe0QAAcPDty8JYeWoV3jqwBMcqTl7uolx2lp9YiTcP/At51WeDH9zB+fDocrxx\nYDE1lADePfQRXt+/CGWN5Ze7KFcsHUowrC6pEY+JMLXZOZUz3wqMQ5qQEFLPrT1xwCxNYZJPPWtZ\nNIuokcTpmnMAgNKGls8C3VEoqpemKaqwVl3mkly5dCjBsDsll1SbCobCJcVzXjdUexMMz7QlHkEj\nQDVBaELPRcvpUIJhc1sYkQZDm51TKRgi6xWJ9hbDoNUA/SHx9EKxLSVUFy2lQwmGZ2qQthq4BwRe\nW6PdWRju/6lhIIimoXek5XQYwRBFUZ58sK3SaoHAq/fll1Uj70JNm12n1dBMun5Qw+CFrC0vVBct\np8MIht3JQwAPoO1Gekvn0haMPaeK8frKQ6i3to/5pbyrkNPLQPhDT4UXXhQudxGuWDqMYNRbnQAr\nPQhtNdIbCOySYlhJnBrajWA0HfQ+W1KLV784gGqLN/bCCzyWHPkUe0sP4oKlGG8fWNKhMkiaI55O\nwYVFh5fiUFk2ztcW4O0D76PGXhvG0l1axGY0knbegXcPfYhjFSeRV30W7xx8X04nvxQIogi7kw/b\n+V3NmNan0WnFOwc/wKmqPJyszMXCgx/A6rKGrWztnbBPb36psDl4MIz0UrRpDIMNsBgTJz3Q4Xyw\nm4XHJRWgjVy8KhuWRic27C7A9LtvAACUNJhxqioPp6rykBLVFaUNZvxw9j+YcfP0S1ToMNOMbvXZ\nmvPIqz6DvOoziDXEoM5hwab8n/GbGyeHr3yXkOY0kscqTuJMzXmcqTkPlmEhiAKyincho9ddYSyh\nl7e/Ooycwhosff5O6Li279M2Zx64/ebDOFebjyVHPpW37bt4GCN7/KrNy3Ul0GEsDLuDly2MtnRJ\ncYHO5bYwbI52IhhuAvWqBcF/u15DDHmxfd1Pa2iO60HZyfDUYUeqi+Y0klodLkG4dHWRUyjFBsP1\nbrmE0L0CWi7pjvRcNJcrVjB4gVdlKtkcPMBIf8i2Wg8D8HdJiU4pZZdxWxiOdmJhsB6XlChqZnBJ\n8R0RDCOZ2YBk+su/Z6RHwdPIWl02CFe4r9cpODVH5Dt4h3xvnrpQ/p09nwW5Lqwdti7s7roQRVGu\nC2XSiP9zYb1kyQROV3jq3Mlr14XNZYcoihBFUXY76Tn/BBrls3O1JVZcsYLxjz1vYU7WXPm7zeEC\nWAEMmMBupBZQWmFTfRftEdIHzuW+bvsQDE/Y++eiHZiTNRfnawvkPQ3ORqD/f6C/NhvFTDae3/4y\nTlXlwSV6e52cu2EQRAE2lw3Pbftf/Ethhl+JbCrYitlZc1FcXypvq7XXYXbWXHyduwbrz23G89tf\nxrnafFWvkVXUhcVRj+e2vYxPjn1xycvflqw7twmzs+aqpsWosFZhTtZcrD2zAavPrMfz219GcX2p\n3PkAvB0RQRRQbavBc9texoqTX1+SMjtd4Xm3vjuzHrOz5qLa5s1yLG0w49lt87Dh/I/4Kvc7PLft\nZZQ1VgSwtgRcbCjD89tfxjd5a8NSxvbKFSsYFTYpOOtRe5uDB8MK6skC24CGRvVDK7oMEGwRYE31\nAMIbnGsOvlm1+y4ekj97gre6LqUoZrIBAEfKj4MXlI2kt1dd655gMa/6TDiLfMk4XHZM/lzWWAEA\n2FmyF//J/wkAcKIy16cuPL1qHuVW6fjsihOXqrhh5WRlnvzZM1XGlgvbsPXCdgBAXvVZVUdCfi4g\nyPNR7XdPQxNuHGGyMDycrc33fq45DwDYkP8TdpbsAwAU1F3QTBYQIOCc+7fbi3eHtYztjStWMDxY\n3Wa05JIS2jR+AQBjh/RUfTewRojWGDB6J6B3tBvB8A1dWF1ey0hpNovuxAAWjCoQyroVhxcFzTHj\nVrsLLl798giiiCWrj+GnA+17/iqboi60rE8GjE8j6XXvNbXKYqHZglc+24eKWv+smUZb+8ie80X5\nXJg4/0k6GTBwKcRTaXkyl3isTyCXlCiK+GDtcWzcW9iq8yvrItYQ47efYdR14UFyz12d456uSMHI\nyfemflocVjh4J/KteQArgGvD+AUATBl+neq7iTNBaJQeLjbCIgXbFdTYa+XJ7wRRwKGybFUansVR\nj1NVedCiuL4UedVncaTsWLN8o5W1NjTa1eVQvgzKxtAFyXfr+zJ4PgsiD9+XodHmxJPvbsPSH9S9\nbEujE4fyynEoT3v2zypbNU5U5gKQYk4HzUdhc3nTemvtloBWTKGlCKerzyK7vPU9e1VdaAR/pbrw\nbmfg9ds3NeXK+2uOodBcj9Xbzqm27zlxEU+9tx0HcsrkbRXWSvnv7hRcOGg+qvKj19hrcbpafR4P\nBXUXcLr6LI5XnGrqNkPCynufRa26YBnfjoR2XRRcbPkCYubGcvnv7uAdOGg+KgflRVEEY7CCja7W\nFIzztQXINufhUOkJfPtz6yxgm+od8RcG346E5/4FgW8z8SxtMOOM27qxuew4aD6qsnYrrJU4X6st\njGdqziOv+ixyqk63SVlC4YoUjL9+uFP+XN3YgG/z1uKQYyNYU2ObWxg6nx5plF4hGJEWPwvj9f2L\n8FH25yipv4jdJfux7PiX+PLU/8n7Fx78AEuOfIpCi/8aBQv2vYtFhz/GJ8e/aNbU3BfK6jUsDG/D\n4BkBr4QFq3oZPI1XlcXmZ4afKZZcWgdy1cJQ4x7TESiO8/c9b+GDo8tQbavBz0U7sPzESvzf6e/l\n/Qv2LcSiw0s1p5t+Y/9ivHf4Y3x8bEWr13NQ1YVWIwlWJZ4WmycpQPBz9YmiiHU7z+NMUS3sTqme\nDDr1M/LzYcl1s+Wgt9wv734DS458ikanFRvP/4TlJ1Zi3blN3v27Xsd7hz9Crc9KjqIo4s0D/8J7\nhz/Gh9mftXpqbmUjqVUXDMOqGk9PxpggqJ+Jv3++v8Vl+Meet7Do8FLwAo81ZzZg+YmV2FzwMwDA\nxQswDciCsc9eNDjUlptLcOHtg+9j6cllMN54END5z+dmd/BY+WOearxRXmE11mzzf58aVe+Iv0XI\nMqymq9IpuNrMvvjn3nfw7qEPAQBf567B8hMrsU3h5np59xt4++ASv6SLRmcj3j30IRYd/hj/OvLJ\nJZuq6IoUDGXPo87WgNOKxrUtB+0B0kMyb9hz8vdoYyREqyQYTKRFbixdvIB6qxMWh7QiX3Fthezz\n9UwxDQBlbp94sCVeq+yhTzui0/k/vqH1qr0vg+eBq6hthM2pfnnyA/QmaxuklzKQW85z3TqHBWer\npSC8Mhhf75QGgzU41Q2D78tR66jTPH+oWF02rNych882nNJMqfTtVTfYbe5y8H6B1+LyBqzZfh4L\nvjwoZ8gZ9OrXyKCXBETLB2912WTfuXKRJ08jbePVSRYOn/L61lVzaQzyXPi6Kj3PhUt0+fWqeaF1\nMQYbb8cZ97vhSUxQdj58BUP5TAPeTEUPdQ0OZO7Jx5aDRVj0f0fl7c8u2oZ1u8/7XT+QFe5h+9FS\nVV14EiPsgqPNJ/t08k7kVkuWwoYjx5F1RD01v83n3n3rwsE7UFzRgL98vBtF5fVtWjYlV6RgKLHY\nG1UpkW2ZUushOSpJ/hylj8DYW24ECx3YCIvcaHy49jhmLdouH/fxumMoqZL+cKFYPXaXuofg8R2H\ngsMp+LlUg70MDMOAV7wMckPFiKi1qh/G/FJJMDrFqn3eNfVSmW0OHrwgYM/Ji35xDkBaCfHwGck9\no5V14tuI+84EHGg+r1BpdNmw5VARtmeXajaSosioc+vdDREvCigqV4ulskHzCKVRry6fQSf97Rwa\nWT523i5fS6su/BsGdaNpaXTg2LlKv99poZUKHNzCYFS9aqf7byNZoL6uytDHdmhhddnkZ9Mz3kHp\n4m1wNt1IesZCAcCR0xV45l87sHmfJMLFFT4j0xn/uvB07gDtusg+VwGHhnVudzlk67qtsPI2ud7r\n6l1YsTEXh097rUnfe7f5vCN23oGVm3NRVm3FF5ty27RsSq5IwdD18FaIxdaA+kbvw6CVN92WROhM\neHDUDegW1RVMRD1+OVqEilorjpaehq6HNzbBsALqGqU/slag1bdhqKhXP+AsGDgFFz47ugqf/LRb\nsyH2YLW7/F4Ia5CGgQULp6KR9BzPRtXhZIXXJ7rmTCYuOiWrwGRQN3A19W4Lw8Fj874LWPrDScx8\n6xdsOH4Qmec2y8eVVNXJ5dOaasX3ZWj0szh4OHgnvsldi4sNZWgKrdiPxe6tW6tTYyyCg1fHMNwD\nQE9V5aHI6vUfr8r7AWdqvdas51K+o5E9FobTKeBkZS425W/1Xt9lky27UOrC9zn5dMMxvLvqID49\n8g0qrE0Lh1bA1mPVSfv9rS1RFDU7GEfKjyO/zlsX+tSTOFUh1cWBnDKV++1gbjl+3O+fCHGs4iR+\nKsySv1tdVjhd0rUEQRIjq8N77UZno+r3vnUBlkdlfR1e+XEZ1uyR4oYeq473HajKagmGtzOgaYWz\nAup8Ok8AsN98CHkV3vv7Kuc7Vd2UVjbgi025ftZpWXUj/r0pF1a7C4fKsvHLBa9r/bON2bB6LHtR\nqot/rT4i728M8o44eAecsELf8zhELnxLL1yRU4Pou3nNy3qnDQLPyHdivASCAQBdIrqgqKEYjN6O\n1VnnYOyzV30gy0trjBu8vSdlY2b1cT1U1qt7snbeiUMlOThQuQ+uslSkHb0Gowf10CyTJ0NMtY23\nSWZuYR2+2HUSSPH/DR+h3UPcXu5t4H4qzAK6AigY5zdI0WNhNNpdqpl7M8u+UR13zlwll88TE1L2\nfn0byYp6tUm9YvNJpPevwbayXTDpjJiUdo9muStrrfIU90osTgukIA+Depv/y9RodyEmUtuttqdq\nm/z556Id7k/jVMf4irnn71xRa8P7R5ep9lldVtnC4FgOlbU2REcxiv0+DYNfQ2EHl1CPw1XH0MPc\nBeN6jtEsNwAsyzwORKu3VVqrpMCyjxvOQ1b2BQy6qZPm+Tac/1H+rOtaiBVnPsOGLVNRaJb+XqMH\ndQfDMHh/jZTGPGZwD7Cs994+yv5cdT6ry4Z6uwNggNOFdUB/oMZqUe1X3buPtcWwPD7ZsQXlhlw4\nXAyAnprllg72FwzlvGmaU6cwAsw12nNo5dgOyp93lOzFjpK9eHXofPyw4zx+OSKlK3dPjFK9s59t\nyEHuhRpwLINd3Jeq82UXmGG8yQWGAURB6oAwesXAZJ9793Vd2nk7GkwF0MUXwVbnfdkdTh42B4/Y\nqLZZI+iKtDCUNDqsqh68oY1jGL6Y9JJbJlLvXtWP5REVoSFSnEsSDHhdD0pXi+/LUNWgbiR3nSzC\nsi173OdyykG8eqsTe0+a1eJjd4Jh/XvWFxvLcPR0pWYjeaGsXrMH2hS+PnmlWX70bODe7skLZsBd\nPo7hsOt4KaoavHEJ37ow16on/attbMSmbClbqqC8StOKKDRb8Ng/NuOrrf7muAAejEnqrTbY/evi\nXGk1LBo9yVBxOH3E2uHJOPMvp2RhSI2Tpd6F5z/chR8Pn1HtBwBzdSP2nTLLaeMeGJYHEyk1qr4N\nqBKeF7A/76Lf9kaXVR6Xo2V55ptrYXWEHkD1iAXgH8uqD5JabHVZIbpnZ6ixSGWptgUWDD8Lg+NR\naZcsTkbX9LUYDcGoddTJFpfm1CmsgBP5FU2eV8mcJTtlsdDC8zyczPef4JPhXN53WHQ3y3rt9sLh\n5HHkXKny57DzDjj0UqdNUCz09q/Vx/DMv3bgfGmdKmuvpVz5guGywil6K6gt18LQwshJSm3SuRWb\n42EyaOT2cy44eG9PEpCCvx58X4Zaq1owTpdUgYmwyOdqtEsP9NIfTuDjH05g9wmpMcgvrcPq7drp\nhcX1pdLvNF4WvV47ttEUdQ0OrPwxT0468AS9g/7O1ii/sJW1Dny6/hS+yjou71fWhdXuwsqtJ9Un\n4FzgoqX6OVFYhiNn/F/i00VSI7gtW3sdb09dNmg0hgVldfjpYMtz+n1dD02N/le6pKrqpEYu6+R5\nxX4raurtePXfB/HR9yewJ9fnfjgXWLdgWJ2BRa6i1ia71nzxBJg1G0lGQKWlZTPT+sY0ausd+H8/\n5uGERgMJSHUhwlvGaosdtQrBsPE2rN1+Tvbl1/uJpwt2TmokGc7/XmwOlzdBJkBdlNRL75FTa34p\nRoCIlo+z8nWLJcRInc3Sykb/g5Xld78rSgtD+Y5k7i7AtuPq5zWvuAINkOq5rM6CI6eld+TEeWnb\nu98exQdrj6O1hN0ltW3bNixYsACiKOL+++/HzJkzVfsPHDiABQsWIDc3F++++y7uvvvuZp3/jOMw\nlIO723KmWi0i3BaG0T3oSZ9yDj/VHQcbpT5Ol3Ieojt4esFSjKd/fglDEofJ+60uG+oaHWAZBrwg\nYt3eMzBerzgB620YuE5l2C0sR3XW7TgbfxCmgQJOFMUhB1uxP6cM+l7aZf3i1LcwGDqBYRP99rkE\nV7MsDOMtv8B+dCS2HCxCgXgQxdwRONleAG4AIMLQex+E+ni4im70+62u+xk5o6VKLILp1hKcr0sD\nIjx14W0IDuSUQWDULy/D8oBJskh0XUqxrPAd3G8aj435W8AyLP46dDZ2NayDvpcDjF5bxIzXHwFf\nlwCro7f/TlaA3eVEqF0NY7/tsB8bIZWnRy726jeh9KfBEC6m4flpt6C002bohAS4iq/3++33ZzfI\nAcs6w3mYBhfAXuP9A1pdNhw7Vymvs5JbXA50UZaVBxshieeu0n3YZz6EKddlIPPcZhg4A/42dA6W\nHf8SUboEGK7L0Sz/h9mfoW/n3ugWlaxZF1WW0DOxjH13wX5Cmrn1q1NrcarhMLiuN4I398T6PeeQ\nzfyAnTu6YX6nh/1++395P4CH2wrvWohX9v8DaRE3y/uPnr+Ig6WRAIDlfxmN/Xm+4snDZagFA0CX\nXAAu8QKcF26EvsdpiC499uddj12N30N3DQuuk1mz/IsOf4wBXfrByET67WMYIaDQaGHovReOHOkd\n1/c6hu/rNsFUPAUjut8GO+9AbsT34JK6gS/7L//f9vI25vpu56HrWgi+3OvO2pNbhBuib0ZCjBFl\nNVY/gVy39wz0PS3u35/D0vx34Np+A0yD8yA6TKg/fgdMhtZ3psNqYQiCgPnz52PZsmVYv349MjMz\ncfasOh+6W7dueP311zFhwoRWX++G+DQMS7m11efRwp43CC7zNegaIWVMeSwNrpMZbJR/2qlvyp9L\n5LGr2Ju73ui04pnFO/Dy8n3IL63zewAYnROMydvTY1gRObb9gN4ORu9Efv15HCw7CrZTKXRd1Oap\nEoe+SvOhd/Au2TUi8sEfA9Zok3tBhY35ACOC6+R2eXAucLHV0Hc7j+t7xPn91rcuGFaAI8abXqvs\nPVXX2wHORzD0dvA6b12IjID/nP8J9c4G1DksyK0+g4uu89AlFoOLD+xC4GKrNd0tyoYhpLqIaPAG\n8eMqAUZEfmMeThfV4qKlBi5jFfTdz/pllQH+2S0MJ4CP8/YWyyx1qKhRWFw+bifWaAVj8J7DJbiw\n4fyPsqsppyoPOdWncbB8H9jowOnIJypzAvjtRdS6kzVEPnh2GhtVB88goOOVORAhgkuQGucD5/Kl\n/d1y8NyHO/x+6+uHd8GJHIuiF+zzHJRb1O8ZG1EPRqdIVuAE6LufAaNzgTVZsWLndhTWF0Kfkg/W\nGFgEj1Qcw84TGpYpKwKMdG+iEDyNlout9n6OL4MIUR54WmQpgUNXC0PPU2prwlN2nc/7z/HgOnvd\nWycKzXj2/Z1wuni4XALg44JjI+tUbmmG4+WOGhvRACbCgi5xEUHvIRhhFYzs7Gykpqaie/fu0Ov1\nyMjIwJYtW1THdOvWDTfccEObjJx8etAf0bezfw+3LRBqkuAs6CtnwHgEozkweu8fuapBevirLXZY\nGp1+DxEbVecXl1D+vhL5Aa/j55Zj/R9QycJwC4ZDepBEgYVgjfI7Vr6+u4zy/6ZGgOFVYhdhCvhz\nFSKnMLcVDcfFqkY/8WSj/RcyanB5zfpD5qN+++XruNQWp+8YEwAAI4DxNAzuuhDsJgj2Jm7GU0Z3\n3TKR9QAEfLjOm9kSGer7qXj59+YUyVONdIo1wi74pBhH+4/PaVBkEy3d/qPffg++dRHIDVNRJ51P\ndEj3LzTEQHQ10Tt1p7d6FhWTLGNRTk8GoNlIaqGMRSifA0EQYfcRGK26UL4jus6BO1IRnM/fVsuS\nUFoYLkn8+drOclBaGxFRJp2cgexx/ymTPIb01U4q8Lu84l489feXj/fgTEmtxjvSdF2wkRYkxof4\ncjZBWAXDbDYjJcUbse/atSvKylofeLmceATD0ALBUFLeUA02tgKMwYq9F05IvVYFWo2kEq5T4Hrk\nGE41sEjrYSqutOBAgWTteRoGhhUAIXCvkjFYwRisgLs3xDAAl1AGNtrbs7JFNX9UdqW1CvNXb8Ci\n7/di/4WT4CLUPt5gdXG0iYkBRd4nFVjQqDfOG0iW60LnbLoujFZAb5N7hgwrgI2rQJldEfSMD9xg\nBTyvqRF7L5wAY7QisUcDGGPz6qKp50J0qZ/Z0gZ/N01kBMC4XV6eqfzBuZq0Njx14WnUGJ0LbEwV\n2Cjvc+exOpoDY2oAG1sB6G34n6XfoYFXW0zB6yLwNRle3XhKk4mqiYpSbHd5ljVwAU3UBWu0Ys70\n3rJ1VOuoQ2FdkSrtNqVn8wfWsRH1YGMrUG2rhYUtAWPwFc+m64KNq4CDa/l0Lh7C6vC/lHPF63h/\nH2RbMvvBW3DsXCW6dZauY9SYuK051KMKxt5SQOocAF2IvVHRqYco6Jo0sR28EzpGB6coPbRKU9mD\nrvNFeLZ6GkmgaTeEsY80i6eyh2W4Tt27v2DcieZSaCkC4otwEYBBI8QQCMFuAsMKquBgMCysf+aQ\nrou3kRed0t+V4XiITQiGqa80fYPSVWG88ZDqmMq4fSGXywMXUwOu9wEAQCEAXXTTx3sQbJFgdA4/\n14b6IHX/UDlbqwdnfL43JOhpJHUur3hoYOrn/zc33qSeOsTQq/lzgnFxVeDimr9ksGiNhj7Kpu1y\nc+OwsYBCM7Tcd474s3KPWnAYwEUC0DkhChwYaGdlGW/ZhrePbVNNKfPGgcWqY368+J9Qb0WGi69o\n0t0aiO7RKSiuvwhd54vIRyZ8U8KbS1gFIzk5GSUl3pfRbDYjKSmpiV+0DMFuwi2GSUhM9J9xsq0Y\nnRiD0cN6yt+TXP6+eiXOwhsh2KJgvEFqRETROwW57sKtiO3sRKUuR+WPBgDHuZthuDZwNoNgjQFn\nvtSov7cAACAASURBVAlOUxn013gH2Ik8B74mCbrOpRDAQ+ARsv2oFIymetUeGFaA0BArWRz6ptMZ\nAcBR0Bs6PhrstVJDqKyLJ2/9A1Zs2QNLVK5PaiQDR/5NMPQ86X9CNzpnHKyFqTAm1AIp3nRa0aUH\n6juDib8ITic0Z6VWVcNo0hkRTIoYVgRviQcXaVG7YLTOLQLOwt4Q7ZHyc+EeHgJRBBynByE6wQpH\npzxV3EfkWbiKr4f+vwKP4BUaYjGwy63o05fFN8d/kLcnmOJQYdaBi6tE5zgjqp2h9269FoYTsIXW\nIePrOoGNqZZdfAHPLbBwXrgRoj3C7x0xsHpYcm4GG1EPXfezqmwv0aWD62JP6HtImYEMGL9VJjl7\nAl4YOxmL1/2M+jjvuyTYIgBeDzaqDnanCF+vVJPlVVgYTYmnqhwNSeCjgntUInQmTOs/CZ0i4vH2\nzo+l67nrIkofiT8OmY4SixnfHFunulfRYYSrops8Lk0UWL/MuL7J1+PhpEnIyjmF265XT6TaEsLq\nkurXrx8KCwtRXFwMh8OBzMxMjBkTeKBRSy0SobYLOhvjUV5uuWT/rPWKF9qp9u/ytZ1xW9KvINR4\nxVGwJLgPZlB/sTMcRb3AOvy7jwZLapM+UrExBjd06olZI+4DX9VV3j6g8wCIivhDc6rSxHobA18X\nTsByOI3ISNUeQKc6t7UbeHNPRNq7y9s8daFnDDh+gEV5bg+IPg0SyzB444HfeK+ncT8pkcmIZ1Ng\nvdALfI03lchV3h2dTPEAgGYnhijcNjGmEM0+pxGdGwcGPYyvSgZv7ql6LroaesjXFWqSMOPWCUiO\n7uLzQz1cZdd4v2uIumiNwU0JaUhPGo4Ywftc3J48RLaaWK5575fcSDJo0tpS/cZuguvCDUGP4ytT\nwJtTVXXRKyYVABBtiMZN8b0xrtdoxBli1ed3mFR1EaX3FzK9Mw7dddfgGnEghAbv7/mya7wrZmqk\nmjd5Xwr3XCidKgBwNJjgLAreSA9NHoTB8YPRy5gmb/O8I9G6aKSZrseIxOGINqjji4ItEryiLvSi\nvwJ24rqgp+la/G5ABm6Man18N6yCwXEc5s2bhxkzZmD8+PHIyMhAWloaFi9ejJ9/lmanPHbsGEaO\nHImNGzfi5ZdfbnG2VJe41gd0moMy6M3XqV/waKMRv79H7VsRLFKgSyeaIIoMymtsMDD+DRLLMPID\nyYoaq301xiA6Qo8eSdEqYYmLiAr5QfYlWq8QrhDP0aNTPPp3D5DPq8CzJnqUydtyJ+ok8eAEEyxW\nqQ+v1WvrFBPhnRvMpbFfn4iYCPd2RV2kxMXhum5SfWtNW90UUTrvS9k1LjSLVXTp0SMmJehxqUnx\nuHvINZh4R09527VxUh167j8hxoR4k8Z1FX8XTlC4EN3TSAiNMeifJj2HKZ28jWSEzoTru0uNT3PW\nslaWCUCTfnsVvB6CNQQ/msZz1ruL1LjGGmLw7EMDcV96GjpHxvodp/xtjMF7LUaQnhUjL93vrwf3\nQJTB6zoWeZ13UFwz0mUBAO6gP8MKIYun4OL8OkJaaK7q524vYo3eZyFa75uQwqjKYlLk9nv+dt2j\ngz+XzSHsA/fS09OxadMmbN68WR6DMWvWLIwaNQqAZIVkZWXh8OHD2LNnD9atW9fsa/CVKUiMb33K\nWHNQCUZliqoH7OKlqRfmPupN8R3V93p0jUhCDOMVlwifwRsjuqbD4RLkjBQjEwlnaU8AwJCkWyEK\nDIT6BMRGGRAdofc+/ACiDRFBsjcAoV7bjZZolHp5fHViyIJxXUoXJEcGdy96Jg5U+nRv6poK0R4B\nV0M0KmvdKZxOdUxo4rWSrzVSF+neb8CAToOk+yi/BqLAontUD0SapJdNVNTFqFt6IjFW+h0fQDDi\nWe2yj+s7AADgqkhBbERoz5TI69A3uWfQ43p1jcNDY67H5BHXytuu75wK0WGE0Cg1jF3iTH695ju6\njgDAyFlOejECrkqpIeDLu0PkOSQZU+SBYUadV5xNughc00X6uwcadxPPddHcLtRK22+JH4S+qf5j\nebQQeZ2qVx/wOI1nNS2uJ6L1UeihaOSUsULe5kL5zxYfwYjBzZ1vAgBwlu4QXTpEuweu9E5NwH8l\nSfd+7suj4K2C97qMdl0IARr4G+Ikq8lZ2rN54lmfEPQwrcHGQl0niC49ronpJm9jfSYkdZlTAV7x\nt2Yj0TO6p3TpmkQwvEF7rE0ruKJHeg/pOgjWQ6MhWDpfBgvD+yALNYmwHR4tN8icTlKPa7t5X5zr\nuyXghSFPYWrqA/K2CL23QbIeGo1f9xgNp0uQXUscy8B14Ub0a5yGaTfeB9uRURBtUeiRGC1NeKd4\n6WKMkSoB0cKeeytsx37lF9hOik6A9eAYOM4MDCn3HpB6rnpOj7dG/N1vn+PczZK/GN6pv5Vz2XSK\nikavugxYTvVFbqE7k8Z9LybOhNeH/y9+/V8jAQBdIjoDkHrnM/o/gLdGvILOllthO3InkqO7yIKh\nrIsInSnoiP+HUh/FX4fOhu8MrCP7XIdOhRMx7YapoSc28Dpc0zkefx86129XVNlQGFnpPFqLeyVG\nxeKGhkkYl5KBJc+MQIRRJzcMXSI647Xh8/DwgLsAeEVVzxjhPNcP1oNj4Mzvg/8d+iJenj5cPqey\nxxqpM8lWmmYaLYCpPR7Bi0Nm+W0fe8tNGGB7GI8PeACdogKnW6vgdYDLiGHib/12/bHf7+TPCdH+\nYhxnjMXLt72AqTdM8tvXPbI7os6MgL40H+/NGiH3tmP0Ufjvfo/gzeEvgynuB9vRkRjRN1X+nefe\nr/3tLWC5qKAWxq3Mfegv+ns5pgwdAOvBMXBduDF095xLB9ERAduRkX777DlD5M+cxrLSoiMCMQV3\na86bdnPnm/Dszc9BqO6qeuc5wYTZt85Er4oH4Mzvi8TSe2DStS45x5crcvJBDxzLyq6K+Ji2rZhg\nqNNqGcQao5GSHI/C+lokd/YvC8fqYNKZ0LdnEgApkBtl1EOeecBlQKRRauQ8QWi7aAXAIMoQCaNe\nJ99rjyS3Ga54WCL1EcGtA14H0RorNa6KoGqXmEhvT0UI7ZHwTMIYqfd/8QVrtFyW7okm9L61BzJu\n74m/uudnjDQacH23KJw6Xw8R0pQJid1icUGQLBGlmyHW/bnR1QiO5RDJRqJrQhRKKqyIidAj0ugu\nr+i99widSTVOQ4teyQmIjtCDY1iVFWIy6DH/Manx/f5saFMpiLwe8TFGxEb6u82eGT8ciw+fhF2w\nq6aTl6+nM+GZ+wMPNlUuHSo6jUBEAwTWLv3t3YMMk+PiVb9RTvFvUghGIPdccnwskqP9e9YPjvb6\n30NNI/dYxz27dMFen+nFukWnyEHqO25OwVqfBQQjdCbN5wkA9DoO0ZX7UV5Wimee+j2c3VmwPU3I\n/PJbXEg9hTNn8vC/ry3Fq/94Cf/O/RKfOhx44IFp0PWU7v3Uwl1IHdUfMXE8sv+1B7GpXWAprIQ+\n1oieD/cH656S/vF7+sPBO/H40s9gzsqHyIvgInWIfNMhWQwuB4o374Wt6iLAMEi+sxfi+iSi7nQl\nLv50DqIoQhepR9pjA1F+6AAYZz5uHT4eJQByl+xFr9/eAkBE1c8rwZ8R0XihDqNe+hXefvt15Oae\nxPnKAsT1TUJc3Gjc0ee/cDo3F4sX///2zjwgynJ7/J+ZYdgZkE1kEVEUccEdUMktrpgrXEWvZup1\nrVxyqUS+Zd+ytG96vdXtdjXNTLMsb9mvm7bp1dJETZOstMUV0QABkX0GmOf3xzADA4MMCiLwfP5i\n3vV5D+/7nOc85zzn/I2iomIydVn4P9SVr/7+EYOejDTJ5vzmU/iO6YitnwIbpQ2d/Dw5cyGP0IDW\nFiR5ZzRthaFQsmZeJEXaUlMd5rtF1YV7M0Z05nBBeSoGC4kAjVla1TZKEh/qQ+r1fHKdfuZcpSzQ\nduU5qYwjyTJRitpGiZ+n+eiuTXlo76Du/hy5blgx7WBT+5SUh8aBrNxibO2gcr48D03FR9on2IfT\nxbWXfDQqDIuUVkyX6SljSrS5E9TTxQkP/4pOblAPX0q8s7iSWt3sdrUzWGk6fUW8Ukd/N36+mI2P\nh2MNFoZDrXVRnMsTRtY0ZQV1WJxZamO6XlVcbJ1NI35LU0KWLCFjidiq77TQGd4LPbdeBFe5/oqj\njQM25fewVB8DwENjX+vC2arWVklKCGXZ1ac7nNVO4KjiP99eorjYfGT94i9nKSoeBMAXP1W00TgC\n/0yfxoPR5lM4xvdBqVDwyCMLuXTpAlu27OC15M18d/IYWZczmLd6AT4+hrb8428v4eLiglarZc6c\naQxYEGO4kALG39eZayWlnMguos3kQHzHBnPpg5+4eSaDVmEVz2KjVOEU6EbHuQZFnnXyGh/9+11a\nufTlt+N7UTnbEzLfkAKkrLiU0gIdqZ/8QvCsPti62VNWntbF+E56t3LkWnkbjGRl/EH7sb3wHx2C\nq4cb8+bNx8XFhUf3PcH5raeYMdONmHB/pj44gVWr/o+QkM68cHgdV4vT6TiwG/v37SXhwVno8q+z\n/AclDq2daeVm6D9G929HgJczYcEeNf9Db5MmrjBUtG7VsOsvakKpUBLh04fDxw2LYezUSpNSqGz6\nz+gymYOp3xLSqiKvULCfK8F+ruSXuHH2xq/8/p3hZVUplSyOD+M/R+2xdSlhVPtoggZ2MCmSByLa\noivRm+ovuDk7QnmNFQcbe/Q3PdHnu1Ka1g6hszdLuT6gTTgTB0VSWqbn8W8/N20vve6Lb1jFiN5W\neetO0sfRG7VKTadWFaPPv4T8mTNZv9LFI4Sfs85yXOtoUl6VE9xNDhnP0T9O0MmzLcK94uvx93Im\nOHAoF3IvE99xrNn9YtoN43LuFTPTfHi/AAb39MXBzgbHcme6qDIl1ce7J99eO84D7e7HwcaeV069\nYdpvn1sRjVKZKN8Is9+1TUn5OvmgVKiY8ZcHTJ37hI5juXDzEu1d23E+5yLOaieLU0ITO8WSfP0n\nPByqz3GP6zCC9MIMpnSeYNo2d0wXvjljh975JJqcXmTXsA4AzKtO2tvYE+UXwanrpxnXYSR//yDZ\nUN60nJI/2pkWo1ZmqH+U2W/7WmRhTLBpo7JBWa7wHNQOlOnLsFGqKNWXoUCBQqFACPNAWDuVPXpR\natH5G99pHFt+3sGDnSdAXoXC+0tIHJfPXsC+c6hJWQB88MG7HDpkqLmRkZFBQWaeYb2FgB5BrelU\npOFD97dZ/KcFFJYU8sKh59HdMPjRHihPFa9UKNHdLOba++cozdfiqHTgYuB5fPsNJjnzdwL6RWH8\n8FT2NuT+molzOzdsy1dSt/MKRKlQkVpyBYUSfNwdKLnQAVHyPX1ahYO6mJutU3D0MwyGSvQl7N//\nBZ988jE5RTmU5WhxFHmkXrmMp6cXISGGAJq/9pzK9rPvMzU+nmUPL2D+/MVs2vQO8eMmkuacR2z5\nN6JUKujVyTqfU11pkgrDaNZWHY3ebaZ1mcTBTwy1I1QqpWkkV3nBUD+fXvTzsRxy6ax24vG+85nz\n34OmbWEdPMujXQZUOz5+qHmIXuVRtIONPQvjelGk7c7mMwZbf1HPubyabOgoHww1dD5qmwqZ6S52\npex6ABonW8I6eHD6fJbBF3KL2Zy2Gn+md/mL2bb7/CK5zy/S9Pfxvf81WRiVZRHlF0GUX3mnrDQ4\neDNvFhPg7YSrnSNP9l1Y7X4aWxce77vAbJtSqcChfCpKbSxeJMwVhqPagYR+j5m2ze72EJt/2g6A\nT1F4tfvM6DK52v/pllYU0N41kMmdx5ttGxoQxdCAKNPfQCULo0IWg/0HMNi/+v8YoJW9WzWfQmRX\nHyK7+gAD2fbFr4Ah99Hi+LBq59tUeS9cbJ3L/TWgv3kV3cUu2AYZpkVn9x5f7fy53afRw6ub2Tb7\nKrJQt/3VbF3IsID7GN+x9gjHxMOruKnLY0CbcPb/P0Mk0HMzBtSY58jb0dP0f0zLq1g57+ngwZTO\n49mZXFFX4tSpk3z//QneeGMrtra2LFw4j8rGmL2NA2pbG3w0renUyjBoCPUM4ec0w/cyun2M6dir\ne37De2BbEiclUHw5j7fe2kRg6/LpwSrTtlVDvrt6hjKmfQzjNvwPQgjUNipKr3ZEX6xioO9AvFzt\nSXLYh1KhRC/0ZKZfZ+/OXbz55nacnJxZvfpZdDpttev6OvuwvFwW/fpFcOjQQQ4c2MfmzdtxcWm4\nNWiVaZpO7/LBaWMrjMooFFQaSVqfNlylvP1nqNox9OroxYBubVgzL5LnZlbvFKuiLzS8ZPa2Khb8\nuTuLJoTRJeDWkU9WO4ItWBhVeWp6X5ZP6YV3fViJVaakqu2uNPVkmsaqhKXww9oUhrWysLmN9+JW\nGG0zJ3sbUyhtZdQK8/ei+gUqeqLw0Orz3LcnC+um7yrLomuQQWG0stL/6OjoSGFhzaOZgoJ8XFxc\nsLW15fLlS/z8808mqx/AVmmMqKvcE1tem6LXlmGjscPPuQ2fffYpAKP6B9ItrA/ZP1WsWi8rKsEp\nQEP+pRx0OYbsC/oiw/95YJ/OaG9epXt7d4pvplJSdANHu4piakZZFBYW4uDggKOjE9nZWRw9egSA\nwMB2ZGVl8ssvZ03H6cvrqI8ePY6XX15HaGjXu6YsoIlaGEoUlCEslrhsLOzUKtztDPPybna3XgVe\nlRHhbU3RRHXB3MKo6CSN03QXbt46Umh8eA/Ss3SmKa6ewZ5cyr21s9jGQrnZqswaFcpnqRe4QSau\ndjW/zBpHWzRt66cSWGULw1JkiLFzF0JhSA5XhdaO1U14S4qnMtY6gr0cPEjJSzVz5t8Jrs6G+3q3\nstw+o6WrVqqrTfMsndSDXT/kkImluH4D7vbVp8nqS3l6O3qRVXwDZ1tHpsX3oEwvrB40aTSudO/e\ng+nT/0JExAD69x9otj8iYgAff/whM2ZMoW3bQLp1627qIxQKpclPU9lfY5yCrfr/bz20HZd3/khi\n0jK6dOlGWtofONjZ8NcZs0l8IYFfXzsGSgU+Q4NwDfXCf2xnLr33I0JAgfc1xr0+ikUz47n221Ge\nXj6Pm8Vu2Dp5mSxjhUKBj6M3KXmpBAa1I7tjCA89NAlfXz/CwnoAYGNjw7PPruHvf38JrVaLvb09\nL7/8Ovb29oSEdMbJyYlRo+48y3ddaJIKw7D0lHrJcHunPDWtLz9fzCLA2xlvj6GUCT2D/PvX6RqV\no1HqgrFjUCqU2FpwngZp2jKyXTTdvbqYbV/Ycw452ptEtqk+l19bJ1lioQRqVQZ2b0Ovzg/y5eUD\npmmZhiKgtaET9nXXcB3DXLsly7OLRwhlf3SgJNMHx7AKWT3aYyZFJUUW667X1kmKGpzIVZnYKZZW\n9m6mUOE7JaZfW7S6Mob29rO43+jDsNT+bkEehAaO49OLDkT49DHbN7f7dMpEmUX51fZeqKy09h8K\nnciBK4cZHjgUpVJhVsLVGlauXGX2u1evimdQq9WsW2eet2nvxa84e/EiUStGotG4otG48vbbO037\nl89NYM/FrxhYxX+1dMISbOJVhHl1Ndvu6a7BN3I0duX5voxoOnqg6WhwMj/Y2RA6b+jg/wnAzBcN\nU9e+bQzrKt5+eyfZxTf4JjWJoQH38UBitMXn7dw5lI0b36q2PTPzOkII+vWLtHBWw9E0FcY9RHtf\njWm9hb2NHbHBI+/avdXloycHG8tRLgqFglHtqxek6uxevbCPkdo6Sa0VCgMM4bZ3QxZd27mzYmpv\nrul/4YNz39XYsSkVSsZ3Gsl7V36nf9cKJ2lXj5qzHdbWSWr11iU9dLZ1Ii54lFXHWoOdraqaP6sy\nxiipmtqvUqosxvf3qNI5Vqa290Jn5SpyVzvNXf1GjBaWQ03huiq1xfb09q7uGwJDgMbU+7uy6+oJ\ni/vBfPrTyEsP96ekSu13d/tWtyWLzz/fw6ZN/2LRoqV1PvdOaZIKw5i6+25mw70XsVEZRsrVcvvf\nAbVdS1dmXVnWu0lHfzdupBmmFm7VsUX39WdYHz+rp0BqVZ6l1mfJvZsYpypra39dqH0gca/Kov6/\nkVB/L2PMgUV0FmThWY+ZKEaMGMWIEfU3AKkL947XuA40/kTUvUFDdAxqlWW/h/HDC27V3uL+xsbY\nvqrRPJVRKBR1CjKoKZTU6LsIcg20uL+xsbmLCsMoi7Yu/hb3NzYNIosarBXjtHAb5/pNx3Ev0SQt\nDGNioqppjVsatU093CkrIx6nVJRRUFJAB9cgLuam0P6e7SQN03OO9dgxVPZrPNd/BcVlxRSUFNDe\ntR2Xcq/QwbVdvd2rPmmITrKyU/v5AYkUlhZRUFJIkGsgKbmpdHBrV2/3qk8qZFF/30hla+W5/ivQ\nlmkpKCkgUNOWq/nX7tmBRH3QJBWG0cJo6QrjVs7N+sDN3s0sXDLYrfbstI2FNRbGneBu72bmJ7q3\nZVH/70XlZ29l70YrKlbq36vKAhreCq+68LI5KwtoslNS5S9vy9YXDTJ6qoy1kS/3AkZZODaQLO6F\niDxrMc3bN5AsmhINoTBaMk2nR7BAS7cw3O1b4ah2IMDFcnjl7RLqbsj9dC+tc6kNTwd37G3s6l0W\nHVzbWQxZvpfxcvTEzsaOAGff2g+uA/7Ovrio62ctye2Qn5/P7t3/rtM53o5e2Kls8Xfx5YMP3kOr\nrZ+gDU8HDzzt3evlWk0JhWiCoUbTPlxMcanW6nQEzRl3D0eys2692K6uCCHQC73FtQn3MlIWFTSU\nLBozJc8ff1xj+fIlbNv2fp3OM8oiPn4sb765HY2mbgtrLWFM5FhXWZSVlaFSNd67dKdlrJuoD0M6\nvY00REemUCialHVhRMqigoaShaIRYxQ3bHiNa9euMnPmg/TtG8Gjjy7i3Xe3c+DAV5SUlDJo0BBm\nzpxLcXExK1cmcP16Bnq9noULF3DpUiqZmddZuPBh3NzceOWVf5lde+vWzXz77SF0Oi3duoXxxBOJ\nAFy9msratavJyclBpVKxatWL+Pr68d672/nyy89QKpVERg5k3rz5LFw4jwULlhAS0pmbN3OYPXsa\nu3Z9wmeffcqRI4fR6bQUF2t58cW/kZCwjPz8PEpLS5kz52Giosoz9n72KTt37kCpVNChQ0eWLl3O\n9OmT2bnzI1QqFYWFBeW/dzeK4mmSCqOS11sikTQCH537lFMZP9brNXt5d+fPwaNr3F85vTnAd98d\nJTU1hU2btiGEYPnypfzwQzI5Odl4enrx0ksvA+DgoKBvX8H777/HP/6xEY2mekXA8eMnMWPGbABW\nrVrJkSOHGTAgimeffYpp0/5KVNRgSkpK0Ov1HD16hMOHv2HTpm3Y2tqSl5dXQ4srlOvPP//Itm3v\n4+zsjF6vZ82adTg6OnLzZg7z5hmuf+HCed55Zyv/+tcWNBoNeXl5ODo60rt3H5KSDhMVNZh9+75k\nyJD7G81KaZIKQ1oYEonk+PFjfPfdcWbOfBAhBEVFxaSmphAW1pN//vMVNmx4jf79o4iOvo+iojwM\nI0zLfcbJk8d5993taLXF5OXl0b59B3r27E1m5nXT6F+tNviyTpw4zqhRY7C1NUQQWpP8r1+/CJyd\nDf4fvV7Pxo2vkZx8CqVSQWbmdW7cyObUqRMMGXK/SaEZrzt69DjefXc7UVGD2bv3PyxfXr2y492i\nSSoMiUTSuPw5ePQtrYG7gRCChx6awdixcdX2vfnmOyQlfcvGja/x228/Eh//UI3X0el0rF//Elu2\nvIOnpxdbtryBTqejJuVicPtWn5pTqVSm/GKG8ytwqFQf/quvPicnJ4e33tqBUqkkPn4sWq2uxswV\n3bv3IC3t/0hO/h69Xk9QUOMtnm2SUVJyRkoiaXlUTW8eERHJnj2fUFRkSCtuGKnfIDMzEzs7O4YP\nH8HkyVM5c+ZM+flOFBQUVLuuTqdDoTBkwy0sLOTgwf2m4729W3Po0EEASkpK0GqLCQ833FerNRRe\nys3NBaBNGz9++cVwrwMH9tX4HPn5+bRq5Y5SqeT770+Qlmao89GnTzgHDuwjN/em2XUBYmJG8r//\n+z+MGjXW4jXvFk3Swujg3o7T6WfxtJCGWSKRNE+qpjd/9NFFXLp0iYcf/itgUChPP72K1NQr/POf\nr6BUKrCxUfPCC4YMt2PHxvL444vw9PQyc3o7OzszZkwc06ZNok0bX0JDK5IwPvXUs6xdu5rNmzei\nVqtZtepFIiL6c+7cb8yaNQ1bWzWRkQOZO/dRJk9+kKefXsEXX3xGnz79anyO4cNHsHz5UubMmUZw\ncAiBgYZFoEFB7Zk2bSYLFsxFpVLRsWMIiYnPlJ/zAJs3byA6unoy0btJkwyrzdXm89WZIwzw7Wex\nrGNLwsvLhevXa3K6tSykLCqQsqigOcjiwIF9fPvtIZ566tk7uk6LDKvV2DnXueaERCKRNEVefnkt\nR48msW7dK43dlKapMCQSiaSlsHjxE43dBBNN0uktkUgkkruPVBgSiUQisQqpMCQSiURiFQ2uML75\n5htGjBhBTEwMb7zxRrX9Op2OJUuWMHz4cCZNmsS1a9caukkSiUQiuQ0aVGHo9XpWrVrFm2++yaef\nfsqePXs4f/682TH//ve/cXV15csvv2T69OmsXbu2IZskkUgkktukQRXG6dOnCQwMxM/PD7VazahR\no9i/f7/ZMfv37ycuzrC0PyYmhqSkpIZskkQikUhukwZVGOnp6bRp08b0u3Xr1mRkZJgdk5GRgY+P\noWi6SqVCo9GQk5PTkM2SSCQSyW3QoArDmkXkVY8RQjSpcpgSiUTSUmjQhXs+Pj5mTuz09HS8vb2r\nHZOWlkbr1q0pKysjPz8fV9faK2Ld6RL35oSURQVSFhVIWVQgZVE/NKiF0b17d1JSUrh69So6nY49\ne/Zw//33mx0zdOhQdu/eDcDnn39OZGRkQzZJIpFIJLdJgycf/Oabb3jhhRcQQjBhwgTmzp3Ls8Qs\nGwAACZ5JREFUq6++Svfu3Rk6dCg6nY4nnniCs2fP4ubmxvr16/H392/IJkkkEonkNmiS2WolEolE\ncveRK70lEolEYhVSYUgkEonEKqTCkEgkEolVNDmFUVtuquZGYmIiAwYMYMyYMaZtN2/eZObMmcTE\nxDBr1izy8iqqiT3//PMMHz6ccePGcfbs2cZocoOQlpbGtGnTGDlyJGPGjGHbtm1Ay5SFTqcjPj6e\n2NhYxowZw2uvvQZAamoqEydOJCYmhqVLl1JaWmo6vrnna9Pr9cTFxfHwww8DLVcWw4YNY+zYscTG\nxjJhwgSgnr8R0YQoKysT0dHRIjU1Veh0OjF27Fhx7ty5xm5Wg/Ldd9+JM2fOiNGjR5u2vfTSS+KN\nN94QQgixceNGsXbtWiGEEAcPHhRz5swRQgiRnJws4uPj736DG4iMjAxx5swZIYQQ+fn5Yvjw4eLc\nuXMtUhZCCFFYWCiEEKK0tFTEx8eL5ORk8dhjj4m9e/cKIYRYuXKleO+994QQQuzYsUM888wzQggh\n9uzZIxYvXtwobW5I3nrrLbFs2TIxb948IYRosbIYNmyYyMnJMdtWn99Ik7IwrMlN1dzo27cvGo3G\nbFvl/FtxcXEmGezfv5/Y2FgAevToQV5eHpmZmXe3wQ2El5cXoaGhADg5OdGhQwfS09NbpCwAHBwc\nAMOIubS0FIVCwbFjx4iJiQEMsti3bx/Q/PO1paWl8fXXXxMfH2/advTo0RYpCyEEer3ebFt9fiNN\nSmFYk5uqJZCdnY2npydg6Eizs7MB87xcYJBPenp6o7SxIUlNTeWXX36hR48eZGVltUhZ6PV6YmNj\nGThwIAMHDiQgIACNRoNSafikfXx8TM/b3PO1rV69mieffNKUUujGjRu4urq2SFkoFApmzZrF+PHj\n2bVrF0C9fiNNqqa3kEtGbokl+TS3vFwFBQUsWrSIxMREnJycany+5i4LpVLJxx9/TH5+PvPnz69W\nNgAqnreqLEQzytd28OBBPD09CQ0N5dixY4Dh+ao+c0uQBcDOnTtNSmHmzJkEBQXV6zfSpBSGNbmp\nWgIeHh5kZmbi6enJ9evXcXd3BwwjhLS0NNNxaWlpzUo+paWlLFq0iHHjxhEdHQ20XFkYcXZ2pl+/\nfvzwww/k5uai1+tRKpVmz2uURV3ztTUFvv/+e/773//y9ddfo9VqKSgoYPXq1eTl5bU4WYDBggBw\nd3cnOjqa06dP1+s30qSmpKzJTdUcqToSGDZsGB999BEAu3fvNsng/vvv5+OPPwYgOTkZjUZjMkWb\nA4mJiQQHBzN9+nTTtpYoi+zsbFOkS3FxMUlJSQQHBxMREcHnn38OmMti2LBhzTZf29KlSzl48CD7\n9+9n/fr1REREsG7duhYpi6KiIgoKCgAoLCzk8OHDdOrUqV6/kSaXGsRSbqrmzLJlyzh27Bg5OTl4\nenqycOFCoqOjeeyxx/jjjz/w9fXllVdeMTnGn3vuOQ4dOoSDgwNr1qyha9eujfwE9cPJkyeZOnUq\nnTp1QqFQoFAoWLJkCWFhYSxevLhFyeLXX38lISEBvV6PXq9n5MiRPPLII1y5coWlS5eSm5tLaGgo\na9euRa1Wt5h8bcePH2fLli1s2LChRcriypUrLFiwAIVCQVlZGWPGjGHu3Lnk5OTU2zfS5BSGRCKR\nSBqHJjUlJZFIJJLGQyoMiUQikViFVBgSiUQisQqpMCQSiURiFVJhSCQSicQqpMKQSCQSiVVIhSFp\n0kycOJG4uDhGjRpF165diYuLIy4ujsTExDpfa/bs2Valu16xYgXJycm309w6cebMGb744osGv49E\nYi1yHYakWXD16lUmTJhwy+yjxlQRTYVdu3aRlJTE+vXrG7spEgnQxHJJSSR1ISkpibVr19KzZ0/O\nnDnD/Pnzyc7OZseOHaaCOgkJCYSHhwMwePBgtm7dSlBQEFOmTKFXr16cOnWKjIwMRo8ezeLFiwGY\nMmUKjz76KFFRUTzxxBM4Oztz/vx50tPT6d27N2vWrAEMuXmefPJJbty4QUBAAGVlZQwbNoxJkyaZ\ntTMzM5Nly5Zx48YNAKKiopg9ezavv/46hYWFxMXFERERQUJCAqdOnWL9+vUUFRUBsGjRIgYNGkRK\nSgpTpkxh9OjRnDx5Ep1OxzPPPEPv3r3viqwlLYQ7KdYhkdwrpKamisjISLNtR44cEV26dBE//vij\naVvl4jLnzp0TQ4YMMf0eNGiQuHDhghBCiMmTJ4tly5YJIYTIzc0V4eHhIjU11bTv0KFDQgghHn/8\ncTF16lRRUlIitFqtGDFihDh27JgQQohHHnlEbNq0SQghxJUrV0SvXr3Ezp07q7V98+bNYuXKlabf\nubm5QgghPvjgA7F06VKztsfGxoqsrCwhhBBpaWli0KBBIj8/X1y+fFmEhISIPXv2mJ59yJAhorS0\n1HohSiS1IC0MSbOmffv2dOvWzfT70qVLvPrqq2RkZKBSqcjIyCAnJwc3N7dq5z7wwAMAuLi4EBQU\nREpKCn5+ftWO+9Of/oSNjeFT6tKlCykpKYSHh3Ps2DGef/55APz9/U2WTFV69uzJO++8w7p16+jX\nrx9RUVEWjzt58iSpqanMmjXLlJBSpVJx5coVHB0dcXBwYOTIkQD0798flUrFpUuX6NChg7Xikkhu\niVQYkmaNk5OT2e8lS5bwzDPPMHjwYPR6PWFhYWi1Wovn2tnZmf5WKpWUlZXV6Thr6yz06dOH3bt3\nc+TIET788EM2b97M9u3bqx0nhKBr165s3bq12r6UlJRq2/R6fbOq9SBpfJqOB1AiqQVhRfxGfn6+\nKTvpzp07a1QC9UF4eLgprfTVq1c5fvy4xeNSU1NxdnZm5MiRJCQk8NNPPwGGWhfGNOYAvXv35ty5\nc5w4ccK07fTp06a/i4qK2Lt3L2AoUQoQGBhYvw8ladFIC0PSbLBmNJ2YmMjcuXNp06YNERERuLi4\nWDy/6rVq2ner455++mmWL1/Onj17aN++Pb179za7n5GkpCS2bduGSqVCCMGqVasAGDhwIG+//Tax\nsbFERkaSkJDA66+/ztq1a8nLy6OkpISAgAA2bNgAgKenJ7///jvx8fHodDrWr1+PSqWqVSYSibXI\nsFqJpIHQarWo1WqUSiXp6enEx8ezY8cOAgIC6v1exiipw4cP1/u1JRIj0sKQSBqICxcusGLFCoQQ\n6PV6lixZ0iDKQiK5W0gLQyKRSCRWIZ3eEolEIrEKqTAkEolEYhVSYUgkEonEKqTCkEgkEolVSIUh\nkUgkEquQCkMikUgkVvH/AcQ/YGad+SX7AAAAAElFTkSuQmCC\n",
-            "text/plain": [
-              "\u003cmatplotlib.figure.Figure at 0x7f971b401110\u003e"
-            ]
-          },
-          "metadata": {
-            "tags": []
-          },
-          "output_type": "display_data"
-        }
-      ],
-      "source": [
-        "with tf.Graph().as_default():\n",
-        "  hp = tf.contrib.training.HParams(\n",
-        "      learning_rate=0.05,\n",
-        "      max_steps=max_steps,\n",
-        "  )\n",
-        "  train_ds = setup_mnist_data(True, hp, 500)\n",
-        "  test_ds = setup_mnist_data(False, hp, 100)\n",
-        "  tf_train = autograph.to_graph(train)\n",
-        "  (train_losses_, test_losses_, train_accuracies_,\n",
-        "   test_accuracies_) = tf_train(train_ds, test_ds, hp)\n",
-        "\n",
-        "  with tf.Session() as sess:\n",
-        "    durations = []\n",
-        "    for t in range(burn_ins + trials):\n",
-        "      sess.run(tf.global_variables_initializer())\n",
-        "      start = time.time()\n",
-        "      (train_losses, test_losses, train_accuracies,\n",
-        "       test_accuracies) = sess.run([train_losses_, \n",
-        "                                    test_losses_, \n",
-        "                                    train_accuracies_,\n",
-        "                                    test_accuracies_])\n",
-        "      if t \u003c burn_ins:\n",
-        "        continue\n",
-        "      duration = time.time() - start\n",
-        "      durations.append(duration)\n",
-        "      print('Duration:', duration)\n",
-        "\n",
-        "    print('Mean duration:', np.mean(durations), '+/-', np.std(durations))\n",
-        "    plt.title('MNIST train/test losses')\n",
-        "    plt.plot(train_losses, label='train loss')\n",
-        "    plt.plot(test_losses, label='test loss')\n",
-        "    plt.legend()\n",
-        "    plt.xlabel('Training step')\n",
-        "    plt.ylabel('Loss')\n",
-        "    plt.show()\n",
-        "    plt.title('MNIST train/test accuracies')\n",
-        "    plt.plot(train_accuracies, label='train accuracy')\n",
-        "    plt.plot(test_accuracies, label='test accuracy')\n",
-        "    print('test_accuracy', test_accuracies[-1])\n",
-        "    plt.legend(loc='lower right')\n",
-        "    plt.xlabel('Training step')\n",
-        "    plt.ylabel('Accuracy')\n",
-        "    plt.show()\n"
-      ]
-    },
-    {
-      "cell_type": "markdown",
-      "metadata": {
-        "colab_type": "text",
-        "id": "A06kdgtZtlce"
-      },
-      "source": [
-        "# Eager"
-      ]
-    },
-    {
-      "cell_type": "code",
-      "execution_count": 0,
-      "metadata": {
-        "colab": {
-          "autoexec": {
-            "startup": false,
-            "wait_interval": 0
-          }
-        },
-        "colab_type": "code",
-        "id": "hBKOKGrWty4e"
-      },
-      "outputs": [],
-      "source": [
-        "def predict(m, x, y):\n",
-        "  y_p = m(x)\n",
-        "  losses = tf.keras.losses.categorical_crossentropy(tf.cast(y, tf.float32), y_p)\n",
-        "  l = tf.reduce_mean(losses)\n",
-        "  accuracies = tf.keras.metrics.categorical_accuracy(y, y_p)\n",
-        "  accuracy = tf.reduce_mean(accuracies)\n",
-        "  return l, accuracy\n"
-      ]
-    },
-    {
-      "cell_type": "code",
-      "execution_count": 0,
-      "metadata": {
-        "colab": {
-          "autoexec": {
-            "startup": false,
-            "wait_interval": 0
-          }
-        },
-        "colab_type": "code",
-        "id": "HCgTZ0MTt6vt"
-      },
-      "outputs": [],
-      "source": [
-        "def train(ds, hp):\n",
-        "  m = mlp_model((28 * 28,))\n",
-        "  opt = tf.train.MomentumOptimizer(hp.learning_rate, 0.9)\n",
-        "  train_losses = []\n",
-        "  test_losses = []\n",
-        "  train_accuracies = []\n",
-        "  test_accuracies = []\n",
-        "  i = 0\n",
-        "  train_test_itr = tfe.Iterator(ds)\n",
-        "  for (train_x, train_y), (test_x, test_y) in train_test_itr:\n",
-        "    train_x = tf.to_float(tf.reshape(train_x, (-1, 28 * 28)))\n",
-        "    train_y = tf.one_hot(tf.squeeze(train_y), 10)\n",
-        "    test_x = tf.to_float(tf.reshape(test_x, (-1, 28 * 28)))\n",
-        "    test_y = tf.one_hot(tf.squeeze(test_y), 10)\n",
-        "    if i \u003e hp.max_steps:\n",
-        "      break\n",
-        "    with tf.GradientTape() as tape:\n",
-        "      step_train_loss, step_train_accuracy = predict(m, train_x, train_y)\n",
-        "    grad = tape.gradient(step_train_loss, m.variables)\n",
-        "    opt.apply_gradients(zip(grad, m.variables))\n",
-        "    step_test_loss, step_test_accuracy = predict(m, test_x, test_y)\n",
-        "\n",
-        "    train_losses.append(step_train_loss)\n",
-        "    test_losses.append(step_test_loss)\n",
-        "    train_accuracies.append(step_train_accuracy)\n",
-        "    test_accuracies.append(step_test_accuracy)\n",
-        "    i += 1\n",
-        "  return train_losses, test_losses, train_accuracies, test_accuracies\n"
-      ]
-    },
-    {
-      "cell_type": "code",
-      "execution_count": 40,
-      "metadata": {
-        "colab": {
-          "autoexec": {
-            "startup": false,
-            "wait_interval": 0
-          },
-          "height": 789
-        },
-        "colab_type": "code",
-        "executionInfo": {
-          "elapsed": 56025,
-          "status": "ok",
-          "timestamp": 1531163800231,
-          "user": {
-            "displayName": "",
-            "photoUrl": "",
-            "userId": ""
-          },
-          "user_tz": 240
-        },
-        "id": "plv_yrn_t8Dy",
-        "outputId": "68be955d-61dd-43e4-b540-3794e3c8f990"
-      },
-      "outputs": [
-        {
-          "name": "stdout",
-          "output_type": "stream",
-          "text": [
-            "Duration: 4.2232978344\n",
-            "Duration: 4.2386469841\n",
-            "Duration: 4.24286484718\n",
-            "Duration: 4.24036884308\n",
-            "Duration: 4.25758385658\n",
-            "Duration: 4.23242998123\n",
-            "Duration: 4.4213449955\n",
-            "Duration: 4.29613113403\n",
-            "Duration: 4.28209114075\n",
-            "Duration: 4.24192905426\n",
-            "Mean duration: 4.26766886711 +/- 0.055508619589\n"
-          ]
-        },
-        {
-          "data": {
-            "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYwAAAEcCAYAAADUX4MJAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzsnXdgFGX6x78zW7KbTSE9JIA0pQkIooCgqBx2qiK/O0XU\n8zyFAw/w8MSCFcuJCHqKoFiwIHIgIgooaGjSCU1aaCEJ6W1btszM74/ZmZ2tWchuQjbP55/s7szO\nvDPZeb/vU97nZQRBEEAQBEEQ9cA2dQMIgiCI5gEJBkEQBBESJBgEQRBESJBgEARBECFBgkEQBEGE\nBAkGQRAEERIkGARBEERIkGAQRIjs3r0bt99+e1M3o14KCwvRtWtX8Dzf1E0hogwSDKLB3HzzzejZ\nsyeqq6s9Ph85ciS6du2KoqIiAMC///1vdO3aFQcPHpT3yc/PR9euXeX348ePx/Lly+X3CxYswNCh\nQ9G3b1/ceOONmDZtGgDgrrvuQt++fdG3b190794dvXr1Qp8+fdC3b18sXLjQp43vvfceZsyY0aDr\n7NevH3766acL+s6HH36IuXPnYufOnRgyZEiDzi/hfY/8wTBMWM5FEErUTd0AIjpo06YN1qxZg/vu\nuw8AcPz4cdhsNo+Oi2EYtGrVCu+88w4+/vhjj8/9sXLlSqxevRqfffYZ2rRpg4qKCmzcuBEA8MMP\nP8j7jR8/HqNGjcLdd9/doGsQBCHsHW1OTg6efPJJOBwO6sSJZg9ZGERYGDlyJFauXCm/X7lyJUaP\nHu2z3+jRo3Hs2DHs3r273mMeOnQIgwcPRps2bQAAKSkpGDt2rN99g1W42bx5MxYsWIAff/wRffr0\nwahRowCIQjN37lz8+c9/xlVXXYWCggKsWLECd9xxB/r27Ythw4bhm2++kY/jbSXcfPPNWLx4MUaM\nGIFrrrkG06ZNg91ul7fX1tbi7Nmz6N69Ox599FGUlpbKVlBZWRkEQcDChQsxbNgwDBgwAFOnTkVt\nbS0AwG6341//+hf69++Pa665BmPHjkVlZSXmzp2LPXv24OWXX0bfvn3xyiuv1HsfS0tL8fjjj6N/\n//649dZb8e2338rbDhw4gLvvvhtXX301Bg8ejDfeeCPo+QHAZDLhmWeeweDBgzFkyBC888478v3P\nz8/H+PHj0a9fPwwcOFC2CInogCwMIiz07t0bq1atwqlTp9C+fXusXbsWX331FebOneuxn06nw2OP\nPYa3334bX331Vb3HfPXVV5Geno7+/fuje/fuYNkLH+Ncf/31eOyxx5Cfn48333zTY9vq1auxaNEi\ndOjQATzPIyUlBQsXLkSbNm2we/duPPLII+jVqxe6desGwNcaWrt2LRYvXgytVov/+7//w8qVKzFu\n3DgAwJYtWzBgwADodDosWrQIM2bMwG+//SZ/99NPP8XGjRvx5ZdfIikpCa+88gpefPFFzJkzBytX\nroTJZMLmzZuh0Whw5MgRxMTEYOrUqdi7dy9GjhyJe+65J6TrnzZtGrp06YL58+fj5MmTeOihh9C2\nbVsMGDAAs2fPxoQJEzBixAhYrVacOHECAAKeHwBmzJiB9PR0bNiwAWazGY899hiysrJw7733Yt68\neRg8eDCWLFkCu92OQ4cOXfD/i7h0IQuDCBsjR47Ed999h61bt6Jjx45IT0/3u9+9996L8+fPY/Pm\nzUGPN2LECDz33HPYunUrxo8fj+uuu85vfKIhjB49Gp06dQLLslCr1RgyZIhs0fTr1w+DBg0Kag09\n8MADSE1NRUJCAm666SYcOXJE3vbbb78FjVssW7YM//znP5Geng6NRoNJkyZh3bp14HkearUa1dXV\nOH36NBiGQffu3WEwGC74+s6fP499+/bhySefhEajQdeuXTF27FisWrUKAKBWq5Gfn4+qqiro9Xr0\n6tVL/tzf+SsqKrB582bMnDkTMTExSE5OxoQJE7BmzRr5e4WFhSgpKYFWq0Xfvn0vuM3EpQtZGETY\nGDFiBO6//34UFBRg5MiRAffTarWYOHEi5s2bhzlz5gQ95l133YW77roLHMfhl19+wfTp09GjRw8M\nGjQoLG3OzMz0eJ+Tk4P3338fZ86cAc/zqKurQ5cuXQJ+PyUlRX6t1+tRVlYGQHSRbdu2DU8//XTA\n7xYVFeEf//iHbDUJggC1Wo3y8nKMHDkSxcXFmDZtGoxGI4YPH45p06ZBpVJd0PWVlZUhMTERer1e\n/iwrKwuHDx8GAMyePRvz5s3D7bffjrZt22LSpEm48cYbfc4/YsQITJ06FYWFhXA6nRg8eLDcZkEQ\n0Lp1awCi9fHOO+/gnnvuQatWrfDggw82OLZEXDqQYBBhIysrC9nZ2di0aRNmz54ddN8xY8bgo48+\nws8//xzSsVUqFW699VYsXLgQJ06cCJtgKF1MdrsdTzzxBP7zn/9g6NChYFkWkyZNChofCcTBgwfR\npk0bJCUl+ZxHonXr1pg9ezb69Onj9xiTJk3CpEmTUFRUhL/97W/o2LEj7r777gsKnqenp6OmpgYW\niwWxsbEARKtDsv7atWsni/a6deswZcoU7Ny5Ezqdzuf8HTp0wA033ICYmBjs2LHDbztSUlLw8ssv\nAwD27NmDhx56CNdeey3atm0bcpuJSxdySRFhZfbs2fjss8+g0+mC7qdSqfCPf/wDixYtCrjPypUr\nkZOTA7PZDEEQkJOTg5MnT8pukwshNTUVhYWFQTt/h8MBh8OBpKQksCyLnJwcbN269YLPBYjuqBtu\nuEF+n5KSgurqaphMJvmzcePG4e2335bTjisrK7FhwwYAwI4dO3D8+HHwPI/Y2Fio1WrZukhNTcW5\nc+eCnl+6zszMTPTp0wdvv/027HY7jh49iuXLl2PEiBEAgO+//14OZsfHx4NhGLAsG/D8aWlpGDRo\nEGbPng2TyQRBEHDu3Dns2rULgBjTKSkpAQAkJCSAZdmLijsRlyYRtTCKi4sxY8YMlJeXQ6VSYezY\nsXjggQc89tm5cycmTpwoj0CGDRuGiRMnRrJZRJhRjjS9R5LBRsN33XUXFi5cCKPR6Hf/uLg4LFiw\nAKdOnQLHccjKysILL7zg4xcPZcR922234fvvv0f//v3Rpk0brFixwud7BoMBzzzzDJ544gk4HA7c\ndNNNGDp0aMBjBjtvTk4OXnrpJfl9x44dceedd2Lo0KEQBAFr1qzBhAkTAAAPP/wwysrKkJKSgttv\nvx1Dhw5FeXk5Zs2ahZKSEhgMBtxxxx1yJ//AAw/gqaeewtKlSzFixAg888wzQds2Z84czJo1C9df\nfz0SExPxxBNPYODAgQDEDLLXX38ddXV1yM7Oxty5c6HVaoOe/4033sBbb72FO++8ExaLBW3btsUj\njzwCQLSsJDFJTU3FM888g+zs7KD/G6L5wERyxb2ysjKUl5ejW7duMJvNGDNmDN5//3106tRJ3mfn\nzp1YvHgxFixYEKlmEESjUlFRgVGjRtUb1CeI5kZEbcW0tDQ5HdFgMKBTp04oLS2N5CkJoskxGo1B\ng90E0VxptKB3QUEBjh496tf/nJubi1GjRiE9PR0zZsxA586dG6tZBBF22rdvj/bt2zd1Mwgi7ETU\nJSVhNpsxfvx4TJw4EX/60598trEsC71ej5ycHMyePRvr1q2LdJMIgiCICyTi6QtOpxNTpkzByJEj\nfcQCEF1VUo74kCFD4HA4fIrYedMIGkcQBEF4EXGX1MyZM9G5c2c5I8Sb8vJypKamAhDr2gBAq1at\ngh6TYRiUlRmD7tNSSEuLp3vhgu6FG7oXbuheuElLi2/Q9yMqGHv27MHq1atxxRVXYNSoUWAYBlOn\nTkVRUREYhsG4ceOwbt06fP3111Cr1dDpdD61hwiCIIhLg0aJYUQCGjGI0OjJDd0LN3Qv3NC9cNNQ\nC4OmYBIEQRAhQYJBEARBhAQJBkEQBBESJBgEQRBESJBgEARBECFBgkEQBKHAZDJh5crlF/XdGTP+\nCbPZVP+OLhYvXoilS7+4qHM1BSQYBEEQCozGWqxc+a3fbTzPB/3um2++A4MhLhLNuiSgFfcIgiAU\nLFjwHoqKCvHww/ehX7/+GDhwED75ZBFSUlKRl3ccS5Ysw9NPP4myslLY7TaMHftnDB8+CgAwduwI\nfPzxElgsFjz55BT07HkVDh3aj7S0DLz++hxotdqA5z1x4hjeeut12Gw2ZGdn4+mnZyEuLg7ffrsU\nq1atgFqtRvv2HfDCC69i3749mD9/jmvdEwb//e8ij2V4IwUJBkEQlyzLNuZh19GGLYmgUjHgOPf8\n5Gu6puPemwNXxH788ck4c+YUFi/+EgCwb98eHDnyB5YsWSavAT9z5izEx8fDZrPhb397AEOG3IyE\nhAQA7oWrCgrO4cUXX8NTTz2D559/Gr/9thG33HJbwPO+8soLmDbtKfTufRU+/vhDfPLJQkyePA1f\nfvkZli9fDbVaLbu7li79AtOn/xtXXtkLdXV1QYUonDRLl9T3m042dRMIgmhBdO/eQxYLAFi27Cs8\n+OBf8Pe/P4TS0lIUFOS7triFqXXrLHTqJApTly5dUVxcFPD4ZrMJZrMJvXtfBQC47bY7kZu7DwDQ\nufPleOGFZ7B+/U9gWXGZ3p49e2P+/LexfPlSGI21jbYMbrO0MBatOoTOrQcirVXkTTCCIJqOe2/u\nHNQaCIVwlAZRrlG/b98e7N27GwsXfgqtVovJk/8Ou93u8x3lqJ9lVX73URKoStN//jMPubl7sWVL\nDj799CN88cW3uP/+B3Hdddfj99+34O9/fwjvvPM+2rW77CKvLnSapYUBAAVloWciEARBhEpsbCws\nFkvA7WazCfHx8dBqtTh79gwOHz7kd78LKdNnMMQhISEBBw7kAgDWrfsRV10lrl1fUlKMPn2uxuOP\nT4HZbILVakFhYQE6duyE++6bgC5duiE//0zoF9gAmqWFAQAFZWb0uTytqZtBEESUkZCQiJ49e2PC\nhP9D//7XYeDAQR7b+/e/Dt999z88+OBf0K7dZbjyyp6Kre4YhhiQDp2ZM1/AW2+9BpvNhqysbMyc\nOQtOpxMvvfQczGYzAAHjxt0HgyEOixZ9gL17d0OlUqF9+44YMGBQvccPB82yWu3w6atwbbd0PDby\nyqZuSpNDlTjd0L1wQ/fCDd0LNy2yWq1Wo0JJpbWpm0EQBNGiaJYuqYTsMpTXNk4aGUEQBCHSLC0M\nc/pO8J23wGbnmropBEEQLYZmKRgSFbV1Td0EgiCIFgMJBkEQBBESzVIwkrViOm1JTW0Tt4QgCKLl\n0CwFIzsuGwBQYa5p4pYQBBFtNKS8OQAsW/Y1bDab322TJ/8dx44dvehjNzXNUjCS9YkAgCorWRgE\nQYSXYOXNQ+Hbb7+GzRad7vJmmVabFp8EAKi1U3kQgiDCi3d584kTp+Crr5bg119/hsPhxA033IiH\nH34UdXV1eP75f6OsrBQ8z2PChEdQWVmO8vIyTJ78GFq1aoV58z4IeJ6ff16LL774FAAwYMAgPP74\nZPA8j9dffxnHjh0BwODOO0fg3nv/7LfEeVPQLAWjdWIyAMDoJJcUQUQzK/J+wL7Sgw06hoplwPHu\nghZ90ntiTOe7Au7vXd58167tKCjIx6JFn0MQBDz11DTs35+L6upKpKam4c033wEAWCxmxMYa8M03\nX+Pddz90lTv3T3l5ORYseA+ffPIl4uLiMXXqJGzZkoO0tAyUlZXis8+WAoBcztxfifOmoFm6pDol\ntwUAmJmKJm4JQRDRzs6dO7Br1048/PB9ePjh+5CffxYFBfno2LEzdu/eiQUL3sP+/bmIjTW4viFA\nWebcH0ePHkbfvv2QkJAIlmUxbNhtyM3dh6ysbJw/X4R33nkLO3b8Lh/TX4nzpqBZWhiZ8ekAp4Zd\nU9nUTSEIIoKM6XxXUGsgFBpaS0oQBIwf/yBGjBjts+3jj7/A779vxYcfvodrrx2ABx98JORj+ivj\nFx8fj08//Ro7dvyOFSuWYePGn/H008/7LXHeWGtgKGmWFgbLsNA6kiHEmGB1RGdwiSCIpsG7vHn/\n/gOwZs33sFrF+nXl5WWoqqpCeXk5YmJicMstt+HPf74fx48fc33f4KouG5ju3a/E/v37UFtbA47j\n8Msv63DVVX1RU1MNnucwZMhNeOSRx3HihHhMfyXOm4JmaWEAgA7xsKMUJaZKtE/KaurmEAQRJXiX\nN584cQrOnDmDxx57CIAoKM899zIKCs7hv/+dB5ZloFZr8OSTTwMARowYhSefnILU1DSfoLdU8jwl\nJRV///skTJ78dwDAwIGDMXjwDcjLO4HZs1+EIPBgGAaPPTY5YInzpqBZljcHgH9+tQBFqv24r8ME\nXNehR1M3p8mg0s1u6F64oXvhhu6FmxZZ3hwAWunECy+urWrilhAEQbQMmq1gpBrEyXtlJkqtJQiC\naAyarWBkJoiT96rqyNQkCIJoDJqtYGS3EgXDaCfBIAiCaAyarWBkuMqDWLmmSS8jCIJoaTRbwTBo\nYwEBcIDW9iYIgmgMmq1gsAwLhteCY2x+Z0wSBEEQ4SWiglFcXIwHHngAd9xxB4YPH47PP//c736v\nvPIKbrnlFowcORJHjhwJ+fhq6AC1A1Ybre1NEAQRaSI601ulUuHpp59Gt27dYDabMWbMGAwaNAid\nOnWS98nJyUF+fj7Wr1+P/fv3Y9asWVi2bFlIx9cyMbCrjKgx2xCra7aT1gmCIJoFEbUw0tLS0K1b\nNwCAwWBAp06dUFpa6rHPhg0bMGrUKABA7969YTQaUV5eHtLxdaweDCug3EiZUgRBEJGm0WIYBQUF\nOHr0KHr16uXxeWlpKTIzM+X3GRkZKCkpCemYsepYAEC5iVbeIwiCiDSN4scxm82YMmUKZs6cCYPB\n4LHNX8BaKtAVjLS0eCTHJeBcNWCFrcE1UpozLfnavaF74YbuhRu6F+Eh4oLhdDoxZcoUjBw5En/6\n0598tmdkZKC4uFh+X1xcjPT09HqPW1ZmRCyrAwAUlFe02OJiVFjNDd0LN3Qv3NC9cHPJFx+cOXMm\nOnfujAkTJvjdPnToUHz33XcAgNzcXCQkJCA1NTWkYycbxCUQq620tjdBEESkiaiFsWfPHqxevRpX\nXHEFRo0aBYZhMHXqVBQVFYFhGIwbNw5DhgxBTk4Ohg0bBr1ej9deey3k46fFiYJRS+VBCIIgIk5E\nBePqq68OaV7F888/f1HHz4gTLRETRxVrCYIgIk2znekNAGl6UTDsrJFmexMEQUSYZi0YerUOKl4H\nQWuGxeZs6uYQBEFENc1aMABAJySAibHCaLU1dVMIgiCimuYvGEwcGEZAhYUm7xEEQUSSZi8YWlYL\nALDY6pq4JQRBENFN8xcMlQYAYCLBIAiCiCjNXjBiVC4Lw06CQRAEEUmav2CoYwAAFoe9iVtCEAQR\n3TR7wdCpRQvD6iALgyAIIpI0e8HQa0QLo85JabUEQRCRpNkLRqwsGOSSIgiCiCTNXjAkC8NGgkEQ\nBBFRmr1gGGLENTFsPAkGQRBEJIkawbBzJBgEQRCRpNkLRrxWFAwH72jilhAEQUQ3zV4w9FoxhkEW\nBkEQRGRp9oKhkyfu2cDTmhgEQRARo9kLhlR8UGCcqKihyXsEQRCRotkLhlRLCiyP8xXmpm0MQRBE\nFNPsBUPFqsBCBUblREmltambQxAEEbU0e8EAgBiVDlA7YKVlWgmCICJGVAiGXqUDo3LQut4EQRAR\nJCoEI1YTC6idsNhoLgZBEESkiArBMGhiwTACzDaKYRAEQUSKqBCMeK0eAGB2UlotQRBEpIgKwTBo\nDQAAq9PSxC0hCIKIXqJCMGLVooVhJQuDIAgiYkSFYBg0sQAAG08xDIIgiEgRFYIhWRh2gZZpJQiC\niBRRIRgJMfEAAE5lAcfzTdwagiCI6CQqBCPLkAkAYPUmWG1cE7eGIAgiOokKwUjQxkMlxICJNVJ5\nEIIgiAgRFYLBMAwMSAITY0GNlVJrCYIgIkFUCAYAxLFJYBig1FTR1E0hCIKISqJGMOLVCQCACmt1\nE7eEIAgiOomoYMycORPXXXcdhg8f7nf7zp070a9fP4wePRqjR4/G+++/f9HnStSKglFlI8EgCIKI\nBOpIHnzMmDEYP348ZsyYEXCffv36YcGCBQ0+V6uYVoAZqLHXNvhYBEEQhC8RtTD69euHhISESJ5C\nJlnfCgBgcpJgEARBRIImj2Hk5uZi1KhRePTRR5GXl3fRx0mLTQIAmDljuJpGEARBKIioS6o+evTo\ngV9//RV6vR45OTmYNGkS1q1bd1HHitfpIDjVsKmonhRBEEQkaFLBMBgM8ushQ4bgxRdfRHV1NVq1\nalXvd9PS4j3eMxo1hF0aOFQ2n23RTku73mDQvXBD98IN3YvwEHHBEAQh4Lby8nKkpqYCAA4cOAAA\nIYkFAJSVebqerDYnwGnghNlnWzSTlhbfoq43GHQv3NC9cEP3wk1DhTOigjF9+nTs2LED1dXVuPHG\nGzF58mQ4HA4wDINx48Zh3bp1+Prrr6FWq6HT6TB37tyLPpdOq4Lg1EBgONg5B7QqTRivhCAIgoio\nYMyZMyfo9vvuuw/33XdfWM7FMAzUgg4CAIvTAq0qMSzHJQiCIESaPEsqnGjZGACA2UH1pAiCIMJN\nVAmGjhVX3qutMzVxSwiCIKKPqBIMaeW99w4sgpOnMucEQRDhJLoEQxsjv66xUVYEQRBEOIkqwcjW\nXSa/5gRaeY8gCCKcRJVgZMalwlnSFgAJBkEQRLiJKsGI02sBgQEAcDwJBkEQRDiJKsHQaVUQBPGS\nyMIgCIIIL1ElGDEaldvCIMEgCIIIK1ElGFoNC0gWBrmkCIIgwkpIgvHjjz/CZBInw82bNw9//etf\ncejQoYg27GIQLQzxkpxkYRAEQYSVkATjgw8+QFxcHA4cOIAtW7Zg1KhReOWVVyLdtgtGq1EBPAW9\nCYIgIkFIgqFWizUKt27dirFjx2L48OGw2WwRbdjFEKNhKehNEAQRIUISDIZh8P3332PNmjUYOHAg\nAMDhcES0YReD1iPozTdxawiCIKKLkATj2Wefxdq1azF27Fi0bdsWZ86cQf/+/SPdtgtGrWLBgoLe\nBEEQkSCk9TD69u2L999/X37fvn17PPfccxFrVENQs+IlUdCbIAgivIRkYbz++uswGo1wOp34y1/+\ngquuugqrVq2KdNsuCjWrAgDwZGEQBEGElZAEY9u2bYiPj8eWLVuQkZGBdevWYfHixZFu20WhUYmC\nQRYGQRBEeLmgiXu7du3CsGHDkJGRAYZhItWmBqFWiS4pypIiCIIILyEJRkpKCp599ln8+OOPGDRo\nEJxOJzju0uyQta4YBgW9CYIgwktIgjFnzhx07twZc+fORWJiIoqLi/HQQw9Fum0XhUYtuqTsHK24\nRxAEEU5CEozk5GTcf//9MBgMyMvLQ2ZmJsaMGRPptl0UGpdLyu4kwSAIgggnIaXVHjx4EFOmTIFW\nq4UgCHA6nXj33XfRo0ePSLfvgolRawAANuelN7GQIAiiOROSYLz66quYPXu2PMt7+/btePnll7F0\n6dKINu5i0GlEwSALgyAIIryE5JKyWq2yWADAgAEDYLVaI9aohhCjcbmkKIZBEAQRVkISDL1ej+3b\nt8vvd+7cCb1eH7FGNQS9RgsAsJGFQRAEEVZCcknNnDkTTzzxBLRasTN2OByYP39+RBt2scRqtYAd\ncJBgEARBhJWQBKNXr15Yv349Tp8+DUEQ0KFDB9xyyy347bffIty8C0cfowZMgIMnwSAIgggnIQkG\nAGg0GlxxxRXye0EQItKghhKrjQEAOCiGQRAEEVYuek3vS7U0iCHG5Ta7RGeiEwRBNFeCWhh5eXkB\ntzkv0RhBrEswnFQahCAIIqwEFYxHH3004LaYmJiwNyYcGHTiPAwnxTAIgiDCSlDB2LhxY2O1I2wY\nXEJG1WoJgiDCy0XHMC5VpBgGVaslCIIIL1EnGFqNCgLPws6aYeeonhRBEES4iDrBAABVdTvwagt2\nFu9p6qYQBEFEDREVjJkzZ+K6667D8OHDA+7zyiuv4JZbbsHIkSNx5MiRsJzXYGsDAKi1G8NyPIIg\nCCLCgjFmzBh8/PHHAbfn5OQgPz8f69evx0svvYRZs2aF5bx6jQ4AYHPaw3I8giAIIsKC0a9fPyQk\nJATcvmHDBowaNQoA0Lt3bxiNRpSXlzf4vAaXYJjtdQ0+FkEQBCHSpDGM0tJSZGZmyu8zMjJQUlLS\n4OPG6UgwCIIgwk2TCoa/elThKDkSFyOWXrc6bA0+FkEQBCEScvHBSJCRkYHi4mL5fXFxMdLT00P6\nblpafMBtrVNbAeWAE46g+wXCzjnwxf4VGNbperRNzLrg7zc2F3ON0QrdCzd0L9zQvQgPEReMYFVt\nhw4dii+//BJ33HEHcnNzkZCQgNTU1JCOW1YWOANKzbMQBMBkswbdLxCbCrZh7YnfsOXMLrxxfXgC\n8ZEiLS3+oq4xGqF74YbuhRu6F24aKpwRFYzp06djx44dqK6uxo033ojJkyfD4XCAYRiMGzcOQ4YM\nQU5ODoYNGwa9Xo/XXnstLOeN02sAXgU7d3FZUmaHuPysyWEOS3sIgiCigYgKxpw5c+rd5/nnnw/7\neRMMWoBTw666OMHgqQ4VQRCED1E50zsxLgYCr4JDcJcGKbdWYMf50GZ+8wIPAGCZqLw9BEEQF0WT\nBr0jRaJBC3AqcIJV/mzW728AANrGZyMrLjPQVwEAnCQYuDQXiSIIgmgKonIIHaNRgRXUEBgnzhkL\nPSrXStZDMHiQhUEQBOFNVFoYAKBmtHAywOu75uGWy26SPw9lnQy3S0oVsfYRBEE0N6J2CK1hNfLr\nXcX75NehLN0qCYaKLAyCIAiZqO0RdSq9/Fq5XKuDr3+NDCmGEY5Z5wRBENFC1ApGK1WK/FoZtwhl\nrW+BLAyCIAgforZHTNG6S4ywrPsyQxEMjmIYBEEQPkStYGTo3amzVoc7vdYRimDwlCVFEAThTdT2\niK1iDXCWtwYAOBWZUaEIhtMV5yCXFEEQhJuo7RENOg0c57r4fB6KS0oKjDMkGARBEDJR2yMaXAUI\nvXGGkCXCp1jIAAAgAElEQVRld4kKS1lSBEEQMtErGDo1IPheXiguKYerym3gwuwEQRAtj+gVDL0G\n4C9OMOwuK4T3M8nvaOUJvLN3AaxOq882giCIaCZ6BUOnBsAAgqdb6UJiGP7KiLybuwgnqk9hx/m9\nYWknQRBEcyFqBUPFstDHqHzcUiEJBud07Ru4jIhADiuCIFoYUSsYgJgp5R34DqU0iCQqoRQqJAiC\naClEvWAIvLdLKvTig1yQUuhkYRAE0dKIbsHQqyF4Bb5DKz7Iefz1i0CCQRBEyyK6BUOnkWMYrOtS\nL6SWlL8sKQmSC4IgWhrRLRiK1NoYVgcgxFpSLsvCKXAQAlgS5JIiCKKlEd2CoZy8x4kLKtVnYQiC\n4FEOPZQlXRsbk92M8+aSpm4GQRAtjKgWDH2MGoJLMBw2FipGVa+F4S0Qmwp/xz9/m4kamzFi7bxQ\nntn2Kl7ZMScka4kgCCJcRLVg1Nk5MBobAMBm0ULNqOsNentnRi0/8T0cvBMHyw97fB7IVdUYSFaS\ng6s/gE8QBBEuolowEmI1YLRiCQ/epocKGtQ564J+hw+QGcXg0itESPNECIJoTKJaMG7qmw2GFS0B\nwa6H4FTDytXBZuewcPVhFJabfb4TbO6Fkksh6B1KxhdBEES4iGrBUCmWZtVy8bDZWFiddVi78yy2\nHy7Bf77yrQcVapC7KV1SEqFMQiQIgggXUS0YADD96okY1u5GXNHqcjhsLHiBh91Vvtxo8YwBcDyH\n1afWhXTcxnIH8QKPUkuZX4FyCmRhEATReES9YHRMbI9Rne9AWis9BE4NAMgXDoBtVeLjVPr9/C5s\nLdoR0nG5Rhrdrz/7K17c/h/8fn63zzZySQG1diNKzKVN3QyCaBFEvWBIxMaoAac4F+MUvxsxV+zz\n2cdo941pSHjHLEKNdTSUPSX7AQCHKo74bCPBAJ7e8jJe2vFWUzeDIFoELUYwDDqNbGHIsKFbCd7x\ngkshQ4kEw82lEFMiiGinxQhGrE4NeAkGE+NtUQTudLznbzS6YPiLYVDQW+ZSEHCCiHZalGAIrvIg\nEqw+sAvKG+/OubE6a4YJPP+Dgt5uSDwJIvK0HMGIUfsYEIzOgnOlJuUn8ist6ykuzqa2MPzQWKVB\nmoO7J9CES4IgwkeLEQxlqXMZlQOzFu9UfODuGPVqnceuDq/RPMc3blFCf112fTGMrYU78N/cj33m\nlhypOI5lx1eFJARFpmL849ensOP8ngtpbqPTWEkIBNGSaTGCEatTgyvPgiP/Ctj+6A8AYNSi1eCv\n89ep9R7vvTtnrpHdQf5mltfnhsktP4Q/Ko/B7LB4fP7e/o+QU7AVxZb601G3nRcF9ZvjKy+gtY0P\nJQAQROSJuGBs2rQJt912G2699VYsXLjQZ/vKlSsxcOBAjB49GqNHj8by5csj0o5YnRoAC2dxR/B1\nBgAAoxI7mYpam+/+3hYG5y0YTT+ira+TtDnFCYoNcZ9J1gnLqOrZs2m5FP4fBBHtqOvf5eLheR4v\nv/wyPv30U6Snp+Oee+7B0KFD0alTJ4/97rzzTjz77LORbApiNIoOz5UtpTaYwPTKwYdbq9Cv9ZUo\nV1nlXXReguEdYG6siXvBqC/obeNEIQxkiYRyDbzLbaViLm1j9FKIKRFEtBNRwThw4AAuu+wyZGdn\nAxCFYcOGDT6C0RhBVYZhMLhnazh5HtsPl0DgVECMGSyAYuzAD9U74CjqCE2WuL9PDMPHJdVIWVJy\nIF68R8p4RL0WhkswuAD72UNY31wKJrOXumBcAgJOENFORHuBkpIStG7dWn6fkZGB0lJfv/n69esx\ncuRIPPHEEyguLo5Yex6+sxsevqOb+MZ7Eh8ANq5Kfu0tGD4xjAvooOqcdTAFmEXu5J04VXMmYNFD\n76Ra7gIEo04SjADHDmU9DU52SV3igkEWBkFEnIhaGKFYDjfffDPuuusuaDQaLF26FE899RQ+++yz\ner+XlhbfoLapoAUPz9iFKsEtGMnxCR7bzJwJKSkG+b2d57D5UDEcTh7jhnUJeq57v5kBAFg27gOf\nbR/vWYp1eTl4/JrxuKnjdT7b1WrRlabRqpGWFo86h3s9D61O3BboXkgWRHxiDNKSxX2U/xN9vLre\n+6g96Tq/StXgex5JEhJFgb+U29jY0L1wQ/ciPERUMDIzM1FUVCS/LykpQXp6usc+iYmJ8ut7770X\nb70VWl2gsrKLXzJ1wfQhmL//CE7XBj6GYPcM8p6qysdHO5a53xdV4cgffwAAbr4qK6Tz+mvzznNi\nrah9547gyviePtudnDjCt9ucKCszwuJwx1lWHlmLUd1uhana11LgBR42pyiI5ZW1iOfEc9tclXoB\noLyqFmWa4Pex1iJaRoLANOieR5qyylp0TmnY76K5wws8BEGAihXFvSXfCyV0L9w0VDgj6mfo2bMn\n8vPzUVhYCLvdjjVr1mDo0KEe+5SVlcmvN2zYgM6dO0eySQAArUaFWI0+6D7eLikA+CU/x/2GcY/U\nrTYnVm05jYIyk8936kNy9YSa5ePtevnm0Gq/+9kVwqAMeludVr/7BMLqWqHwUgx6K914NHEPeGPX\nfEzNiWzyCNGyiaiFoVKp8Nxzz+Hhhx+GIAi455570KlTJ8yfPx89e/bETTfdhCVLlmDjxo1Qq9VI\nTEzEa6+9FskmyfgTBCU6dUzwAzDuzmrN72fx4/az+P1wMV7/+8CAXymrsiAtKdbjM6kjrr/DEwXK\nWzACxSGk+IX3d6yKJWrrW99cuf+lOM9BKRj1TaS0OutgcViRok+KdLOajAJTUf07EUQDiKhgAMAN\nN9yAG264weOzKVOmyK+nTZuGadOmRboZPsRpDEG3/7b3PBDMCFFYGL/uKwQAlFZZfXZTjuif+nAb\npo/rix4dkuXPWFkwgge9pbN575cS678DVLqeLE4rquqqkaRr5SEY9hCC3lL7bSFYI42NRwJAPSnG\nL/7+JowOE+bdOBtq1v2zLzGXotRajp6p3SPWzsamOZRyIZonl56foZFI0rUKuj2voB6fp8LCsNrc\nnVVRdSU2F/4OO+dAta0GT26apfiOgEOnKzwOU59geOM9klZ2fkqk+AUAfHzoCzy7bTZMDvNFWxj+\nBEMQBNQpjtfYKDPV6nPpGR2iu9A7PfqlHW9hwYFPfWbD+8PqtOLzP75BhbXyIlrbeDjJPUdEiBYr\nGCm65OA7CAycFZngqlMx54aX8UC3cR6bGdZ/B/W/wxux9NhKzN37Ac6bSzw3MoJP9VlVkBhGjckG\nh9Pzc2/XVSBXkY3znb2+pXAHamw18vtQ0mqlOIeDd/iI2rLjqzB90/Mot1b4+2rEUbraQk1zDnS/\nQonnfH9yHXYU78HHh74MrYFNxKXoPiSig4i7pC5Vkr0sDN6mAxujHC0zcJy8CgCgZbXINHhmd0Hl\n+VBe2y0duXnlyCs7DyQA+cYC6FRecRKWh3e1cqnkhj8LY+p7WxHT3Qw2zl1LyltYvEuWSNT5EYzV\np9Z6vA9l4p5ytGrn7B4z4DcVbgMAnKw+g1R9Sr3HCjceMYwQR9UNEQyTy0qxOOu3RpoSEgwiUrRY\nCyNZ5+n7F2zeAQu3H7isxoo4lWc6GqPiPPbpnJ2I7NQ42AWl6HjXU/f1LdfvkmI8DiUJRvuEdgAC\nlzgPJeZQn2BwPOfRrkDHbKpJc6FaGMoONFCZFBtf//2S7r3qEq+rVZ9gVFirkF9b0EitIZqaalsN\nqhWehYbQYgXDO+gt2Dyzl27sm4Ur2ohzRJ7+cDvmfn3U9yAKK6NVXAwMejWgltJQVT6dE8PwqLN7\nfsa6TI5QO13JJRWj0gIAnAHcSsoYRiCCuaQsDisWH/Z0vfhzcwFNt3iRMp4T7P4p4xPK4LiHGDpD\nEAzXdarYS08wlIHu+mJTXx/7H+bt+zDSTWpSjlXm4VD5kaZuxiXBM1tfxTNbXw3LsVqsYDAMg8d6\nPSi/97YwDDo1+nV1u6GKyn3dEIzKgZf+ei1u6puN3p1TEafXgNG4C/6dr/QKnDMCrHWeoz+3heFp\nfQRKE5U6Rq1LMCQLw8bZsf7Mr6hzCUWgzl1JMAtjw7lNyC075PFZiaUMB8oOB2xTY6M8b7BAr1Iw\njledxObC3wF4phjbQ7IwXIJxCVoYSvGrb2GtKlsN6jibj1X2R8UxbMjfFJH2NTbzcxfigwOfNHUz\noo4WKxgAPFIp777OM62ydWoskhM8YxBclVccQ+1Em7Q4jL+lCzRqFgadCtCIHQ/DAHvzvIPePCw2\nz4dZFSCGYbJ4duZyDMMlJLKF4Xrolx9fhVWnfsL3p34C4D+GIZERmwYAcChcTA7OgV/yc2C0i356\nf4Kz4MCn+PDgZzhZfcbj86bymXtM3Ati5Zgd7jpey45/h6XHVsLssHgISSguPOmeMD4Vvpoeh4fb\nLfj/w+K6bm9h+e/+j7Ei7wd50EEQ3rRowVCSluBZO0rNAu3S46BWsejdKQWPDu8OR35X8DYdeKvo\nzrqqS6LHdzQ6p0dQ21jnNS+DEXDwVAXyCmtQWi1uqzWLwuAtGEYvwTBZXYs9ebmkpIf+ZM0ZAJBT\nPoN1gFL8xq7oMH49twUr89bg08Nfi00N0imWeC28FErAOBJ4xDCCpNWa/KTMztj8AnaV7JPfhyIY\nUgFJK+c736bYXIKcgm1NNgdC6WoLZmEIgiALRqC5K6GkGEcLds7hMVcq2gj377HFZkn5Y1BWf2wt\n2gEAiFHHILWVHv+degM0alFX+1xxO5Zu6AxD+3P4teRnDL4q1eP7lapTgKKfL60xAcpYOcNDEIDZ\nS/YgwaDFv/7cB+fKjFAleqbLWm1O5Jd6urOksiOSsGjlGIYTJocZJRaxxIqaVcPisOCcsTDgdcZp\n4qBlNXLHAQCVtmoAwDmT7/c0rMbDL27xesCCWTNKzhmLkBGbKre9oXi4pIJYGJYAHeCPp3+WX4ci\netJcDmU9L4k3d78LG2dHRmwauiZfXu+xwo0zRAvDwTtk951yP2XHYnKYomZGfH3p1s9ufRVmpwX/\nvfnNRmpR46J8RsIhHi3ewojXxgEAErTx+EvXu/HMtdNwW/uh6JHSFQBksQDERZgm3NYV2UliSm4d\n79lRnnEcgsCpwBld2707IUWWlIk9j+NFJfJndo7DkvXHsOdYGWYu3I6PfvAM2Dk5Hk6Od1sYrGRh\nOPDt8VXyflW2Gry+az6OVeUFvOZYjR7JuiRU1rmr80rBd3/ZWglazwyxyjpRXCQrxBrC5L2ztefw\n+q53sPDg5363VxltKKu+sJGeMs7zw+l1KLf4n1Bn5epvX7CYjyAIKLGUyddpddb5PHyShVJsrn/Z\n20igFMxgguGRAKDYz6xIFfZnkYWTs7XnsOb0zxGzxpTHrS8T0NyAFOkySwUWHVyCKtfzcCmiHOiF\nI9bY4i2MGf0m40TVKXRKbA8AyIrLRFZcZtDv6F2FCyVTluM5LD22AkauGoI1EXCIdagY1hWgFmJh\nZyxol2HA2VPA4GvisYdZi2+LDgCMmJ1VXmvGmX2F+HVvYMvgx9/PIqOD+ANgoQEgPvSVio6y0lol\nj4SVZBkyUWQW1xqJVeuRrE9CsaUU5dZKrMxbA6tD7PAkwVC6pBK0caioc5+jwjVRT8Wq4OSdIc32\nliygI5XH/W6f/t+tAIDF/7653mNJeE9iXH74R9zdfqTPfqH45INZGHtKcvHJH1/L7zmBg513yG5B\nQExe4AUelbYqf4eIOMpFsoK5pJTW4cZzm+HknfhL13tQY6uVP1fGfCLBm7vfBQB0T+6CDontwn58\np8e9qH+uESA+wxea/fbl0W9xovoUVAyLh6+874K+21jYudB+F6HS4i2MZF0S+re+2mcGdjCk9b6l\nEefRqjxsO78LACDYdRAE1211CUasVtz/nps64L1/3oD4BHEExGhtYFwlRjiNCWCD/UMFfLflNL76\n5RgAYOfhcgDixD2T3YxkXRK6Jl3uVywA4OqM3vLrU/kWJKpFK2jx4S+RW3YQx6rF43I8L8dLJOK9\nLAyp85dmqQeyMEwOM/aUiOXbA5Uw8b7GMkvgWeMlljIsObJMdjF5xy10AVxddSFZGIEF41DFMZ/P\njHZPl2FyjHg/KwOMNo9UHsdHB5eIAl9XhUkbZ2BL4fZ62xUqTiE0C0Ppnssp2IatRTvh4J0egmEK\nQTA2FfyOY5WBrdhQECDAyTuxMX8TamzhKz+uFIlgqeOemWWe+31z7Du8vef9oOepC1I2JxR4gceB\nssMRTRpRXlc4ztPiBeNi0KtFC0N6+DSKkYmejQMEl/i4BEOvliwOAbE6NZLjFSm8CjeVpp17rsdl\nGfEY3Ku1z34Wm/jjLCqzQsWo4OCdMNqNiNfGITkmcLmTGJW7+u7BE0YcPyUe52ztOY/9HByHqe9u\n8VjqL8HltpMos1bAZDfL0xKVMYw6uxN1dvGHOW/vh1h8+EscrTwBdZBUVMmFoG57DC9sfyOgFfLR\nwSXYfn431p39FYCviR2r9V8tMpQ5KcqHnuM5HCz/Qy7O6G8sccbrvsVqREuxIkCZlPdyP8K+soPI\nNxZgvys1+etjK+ptV6g4L8LCkKhz1qFaaWEEWB0SEDvg38/vxjfHV2J+7sKQyssEghd4bC3aif/l\n/YBFAVyVF4PSDRXMwlC6IVfk/YBFB5fI7w+UH8bJmjNBxUD6/VucVvzvxOoLDp7nFGzDhwc/w/IT\n/pcoCAeOEO9FqJBgXARS3ENKQVVWfb217+W4tovo0hJngwN6jdhZSx1cWqJSMNyjHFXKeQzskYGJ\no67ErIeuwd1DFGufS8Ii/RVYsGBRW2eEU+CQoI2Dw+w5+VBJnMbd6QucBtZa/6NxhhXAXnbAY/Tl\nbWEAwKmaM3JnoXRJTXx7E6bM2wxe4GUXmNlhCVpcUSreqE4XO+HD5X4mScI98pUsGu+AZqBg+rmK\nwD7m7DhRlJWdx8Zzm7HgwKdYmbcGgKd7Ttr/lCsrTULqpPy5v5QWmCAAWpUmYHsuFs+02sAdg9lP\nwP7fW17C/rKD8vtgFsaPZ37BF0fcC4ntU3zvQrFxdnkGcr4xfDPPleVygsUwlMkLW4t2IrfsIIrN\npR7t8rYk/XGq5gw2ntss/15C5axrtv3hCv+/93CgFHRySTUR0ixx95wF9yhEzaoQqxM7LrVa7CSl\ntTUkF4pe5+6ADLHukTdvTMLfhveQJwwmGrRITxbdWR2yXJ22LBgMnE5GDvSWlfPYsivwjztGUMxs\nd2qgsicG3FedVugRRFXOio9jxJpRO4r3yHNDvF1STk7A3L0L5PecwHn8WJXBdgAwSi4wTrwXoUyi\nA3wD9NJs7RqTDUvWHYOlzoFqkw1nSgPHFeQUY8WDddbVeW0q3IbcskMe7spOiR2gYlTIry2A1WmV\nvyfNafE3Ij1ZfVp+beftcsJCOAk1SypQHaxDik4rmGB4lxTJU1zbhWLjbBdcrTkUPF1SF2Ztvbzj\nLfx6bov8vjaIYAhepX9MfiyzIlMx9pX6F1XJMyH9vywOK7459l2DqiGXmEux/syv8v20k0uq6VGz\nasSq9XK8QBkwbROXJfv2e14udsrSyFcaESs7z1idCq1ixP3aZvq6VGK00r+IR0KsBnBZLe0zWoHn\nGFmEzhU5INTF+XxfQnC4JyEKPAsVFxd0EakCxahco1LLbazKF91eylngUozA5nCLjHIE/sXGQ/hh\n+0n5/XPbPBfJkiYpCrxLMAK4Obw9QzUWT6GycXYsP/49nt3+MjYVbcXKLSfF+SyqwA9Ksi4JDBiY\nHWbwAo8jFZ7usEUHPwerOHOsRo9WMYk4XZuPJzfNwqeuYLjUZn9ip3Rf2TmHd4WxsBCyS8qPhSEh\n/U4tF1Cy/nTNWZ/PVp9ah8m//tuvi0b5v7VxdlkwvDvfhhCqGybQvZAqAQBA7QXEVjR+LMdXd76N\njw75z6RSueJ60v9u7dkN2FS4DYsPfxXyOb15ffd8rDr1E/5wxd3IJXWJEK+N97EwhrW7EV2TL5dn\nb0sTo6RsGs5P/jsncFCzamhYNTR+PBXSSIETeKQk6sGoxXON6H8FILj/fYJDi9v6XBGwvTaz4uBO\nDVgwaBvfJuD+J8vd2Vqck8Hw9L+g7vBAOM939D02Zwcv8O7Z6V7B+zrOiqLKwMvXykF2WTA8O90q\now3Lfs0DL/UprpjH4TOe8QK7045DFUfAs3ZoLzuKU/wu1JrtYLwE4/FeD8mv4zVxSNYlodRajg35\nm/De/o+wr/SAx/5KC0PLajwqHR921SuSihfaOLtPuqgyTnS2tAq/7PXtZC+UIlMxlhxZJrvAQg16\nB0sjzTZkQqvSXpAv/ry5xCdLbu2ZDeAFHqf8iIkyA8vO2SNSZsXOhSgYAa5TmQAQzMLwRs0ETuyo\nqPO1ciWxlP5fVod4H6v87Bsq0rMjXZuHS6oB8SYJEoyLJF5rgNlhAcdz8j/p8iQx5iA9BNLnsoXh\n6vyVI0CO56BiVNCyWr8/bqnGlCAI6N05Ra5VlRrXCjq1wrXhiEHrFIOniHDuhzG/xIwXBjyFbNMQ\nCPZYgAF6p/YIeH2M3v1gf/5THhb+7zQEcyJ8x/kitVYL3l1xANDYoO/3i+ex1A45xVjim40n8NbS\nfdifV+4jGJVmz07tvysPYu2OfNSaxftZXiM+WBVG8aEQHKIYHj5bCqvdAYFTga+LRbHqEEqM1T4W\nhkHjjvXEavRIj02F0W7yqZ2luAL5lValRasYt2BwAg+O5+SHkRd4n7pWysmQq7efxKlid4fgLS5O\njkdFTf0j/PdyP8L287uxqUAsMa8UiR/P/AKjzb9AW4NYGMm6JMSq9bA4rDhaeQKTNs6o1+UkQEB5\nABeKt+sRgFc5FltY1op38k6PtWeUz1GwVSUDueeU1s6FCEawisf+3ExS3Ez6vbBs4LVxLhZPC4Nc\nUk1GvDYeAgSYHBb5Hy9ZEtJD4BYMsUOT6h15+JsFJ1QMC41K43cugKCwMG7vfxlSUljX+eOg07qt\nBsGuQ2ZyLFSu+RlcTQrq9ruXxt19tBSp+mTE2kSrwmx14I+9Bqh5/5lFUsBePLjnz8Rxzncm88b9\nZ5FfYoIqsVz+7NrMvuILlcMjuA8A63adxR9nqjBv+QG5DIrkkjpTKprvuSfK8d6KgzhVVOvxXXOd\nuH+VSez8JKsnv7QKRrsJgtUAriwbYAScNp72sTCUgqFX6eTaWkWm837vhTK4rlVpEKd1f1/sMCs8\nOhnl/5EXeE/fNst5LL7lHfP46ufj+NcH23D6vOc1eyN1ZFKGmrdVsTV/N+ycHW/smo/NivRdY5AM\nqDitAVomBhanFd+d/BEAsO7sxoD7S6nSyjk6Skot5T6f1Sg6YBtnv6B09kB8/sc3eGXHHLnGWUNd\nUkqCxjC8xD5QRQEAKLX63gvJOpS8CNJAM5yCYQ/RVRkqJBgXSbxGypQyyg+9JBisK5gljTiklFZ/\nJRmszjqoWRW0XuU3JKQfDy9w0KhZpCQzYBkWsWo9DFq3hSHYY5CRrIeaEQVDcMQAzhjYT/cA8gai\ntNqKFZtO4dAp8eGuNtmx+0gljLuvB28LHMsQD8bg0eHd8ehwsUCj83xHOMtbe+xSXF3jOq9bxDJi\nxeA9o3YA3isUKiyOo/mukSgv/hwZlsO+E2WY/78D2Hu8zKc5VpuYumu0ivdXcLrOqXaAUfEQnFpw\ntWLZlmO2XWDUTgic+6fOcu77tvC7E+CtogAEyqhR/l9ES9DzwSu2eLZRKRhWZx0ECLK7gmF5D/H0\nniT3W26R2O780GYPS0LlLRh6tQ7Hq04i31iApcdWYHdJLowWO44WBp6JrmV0OF/mgNXhDuaXmMvw\n780v+V0/o7Xr/+s9ek5yWWD+Zr1vK9opv7b5qZh7MewpFef6nDWKrj9HiC6pGntgUZYyIWvtRtTY\narH65Np6Kxr4EwzJ7VTmRzyVrjzR0yAlAISv+rPTI+hNLqkmQ6q1U2otlwVDK1sYomBIIyzvGIZ3\nh6NiVNCoNKixG7Hx3GaPbbwsGOJfo92EOI0BLMNCrxCMB4f1RnysFipB7DylUTVX1hYPDL4O+hgV\n1vx+1qeMOsACXPBJdXcO6IABPTKRKqcDM4hTeWZZ5Ve4On2lMNh0EHgWsQbBQyDE/dzvD5wtAiC4\nM8BYDut2es5zUFJSZcX+vAq54xVc7We04ohRx+qRrE4DBAZmeI1+BeDpD/a43zq1yDsWPGvpWKH7\nYeccLK5OFydBSpaJdzFGKY7x+dqjWL5dLHAYKy3AxXIe114TYATr5HhY6hx44ZOd+GW3773wHpl7\n19Kqc9o8XEKfHP4Kp4trRGsvAJxdDTjVAAOYXPG5irpKGB0mfHlEnDOitKQyDRkAgJ/zc7DkyDL5\nNyp1kiavSaS8wONg+R/y82F12MI7ac3121aOqpceW4kKi/+YQHVd4EWFLotvCzWrRq3NhC+OfIu1\nZzdi+Ynvsfrk2oBVAcyKmAgvCHj3fweghvjb8mepKEvWmJ2WiFgYlFZ7idAmLgsAUGAsCuiSktCy\n7iypM7X5+PbEKo/tKpeFAQD/O7Haw+8qPaBSR2+0m+XRj+QSiFFpcX3PtgDcJUOULqWMZD06tPas\nxqtEUAhG2/g2MDCey9cmxYsWkkHv3q+Dqg+cFa3hrBDnnJQbxc5BGatYsek04NRAq3P6rIF+x3Vt\nkJKgAxNjgb7vr8joc0zuSBmVE8fPBR9hf/j9YberidNA4FkwWvEBTI1LRFK8HoLdbTk5Czu7rlUD\nTtkUpxrxbBKuSOoc8FzKTBmjmccVSZ3wxuBZGNnxDgDAqpM/eexv42z442wVck4cwU6H+L82Vrvi\nSSznIarKAKdUYBIA6uwczhYbkV9iwle/nMD5Cv+uJOlzKcGiZ2o3AKJlU+iaByNxrqpCtPYCYLOq\nIHDi78c7OJ5fVo3P1h7FiQL3/0XKyqu1G7H9/G6UWsphcVhlq8nG2cHxPH7dWwCrzYlauxGcwKF9\ngp5oOOIAACAASURBVPhb3ZtXHHQdk/rILzHi87W+cxi8rYrdhQd89gEQdBW6VrpEJGjjUWs3oszl\nTtp+fjfWnt0ou+u8UVoYNSY79p0oh80h/l/8xVKUc3bqnHXyIMDJcXByDRMNwY94kmA0IZJgnDMV\nyqmUsmB41aSJUQS939nru9KZaGG4R7nVNvdDyStcUg7OgTquTnaHSSM8pR/8xvbXAABu63a1/Jle\n67kYlA+u2IGW1eDf10xB70zPGEVGsmhZGHRud9O9N3ZFRu110NSJrh9G5cSgnplIT3Ffh7MqHQKn\nhsBy0Lg+7p7cBQDQ/8pUvPH4QDx0t3gfazVnkJHsmhGvtfmMhNtlxEGjEn+ukoBJGWOJOgPAq8Cw\n4kPSp0MWurZLguBw3ffaJDgrxPPI7isXAqfG+UoLLk8ILBjSGicAUFhqxQ/bzmDOV39g8Xdn/O5+\ntrQam/cXQRXvtm4cVnd9MaV4StkzgiDg+Y/d7poft5/F5+vcJUmKK7zcHa6B/t7jZeAFQR5JsjZR\n7MtrjSg0esZkfij6Fow6SMqthREtDH+onMjJLYLD6bYwtu2rggbuCgKv75qHf22eJcdV6pw2/LQ9\nH0vWH8dHPx3Egv3igkaJajE12+qou6BUT++YwSuf75FdeEq8j2nQxsLisOD1XfOw8ODn2FYklvGp\nCiIYBnUsErTxqLJV+0zGlFxw3u2p42zILTuE/NoCVyKHIKfB+0u3Vrqk6jibey4KI3gMHi4Gd4IN\nzcO4JIjTGtAqJhGFxiJ5wphkSXinCmoVLil/D4hoYbgfVGU9ImVareRzTYgR3RvXZw8AALR2CQcA\n3NbxBjzVbwqGX+Eu4hejVWFI7yz8c6y7nhQA9OjgKiXisjCkH1m7BK90W9dzEatztzG9lR4v/bU/\n+nfJdl2EExNu64qh/cTYRrZpiChEvAoO3o6+XcRzSQFnB28HyzAex9QoPEOsoQa9O6Wg7xVpmP/E\n9XjhoWsRFyvu0L2Dq/S2a7TMcFqPjLAEXRwG9cyUG945qxX6XS7eIz2rR5u0OGRX34K72t6FyzPT\nUVJpwffrA1s0jEIwNueWYMWmUzhbbISxxn/n+vnPf+Do2SpZsADI1k5KksYjhiH9rw+f9g0cl1S5\nXRw1Fs8OR9lXlVVbsf246LbatV+0hn7cfgJnazw7U9bg6xaRKisDgMkICLz/a2Jj6qBuc8xzXXpe\nBdbhntTp/du2cXbkl4jnPG07iHMmsT0qh8s9p+LgVEysyyuowRfrj+G3fYU+9czeWroP73zraSl4\nj8Klxcm800ctDisOVRzFOWMh9pcdwpdHv8X6M7/6WBgjO90uv47V6OUqzd712Q5VHMU3x1bKMTQA\ncvHSRQc/xxu756OwugxgBDByNWpfwVC6pGzO8LrnpGOFGs8JFRKMBtAmLgs1diPK6yqhYdWyZeHj\nkvLKkvJGxag8ROb387vk0YskGIIgyJN/pKBia0MGXrp5Oib2flj+LsMwaJfQBizDYtq9vXHnwMuQ\naNCCYRj06pSCuf8YhFZxYkd2W3+xUqjUmUkxlnZe8zPaxouioFb5/lx6tBU74lv6t4ZaxcqB41v6\ndUC7jDhkJsXDzjnkY0uCIVlFyrIbyh83a6jBwCsz8Y8xPRGn97IKwGPGn/ugXZY4ur3vph5gBPf9\ni9fGISMpFhkpomWk02rwyJ09wTIsumW3xkt/vRYzx/wJt19+Azq71m231/qWP5HvqZc7rX/3DDw3\noR9iNQGSBVgOtRbPQL+0BHDHbIOHu7DCWonCcjPeXrY/4PkBYEXOKZRWiVZGrcXunpMCYMfhEhTX\n1Hich9GbwTMBgvjn28uv7Sf6yq/PFtqgjg2c6aPJOu3hchR4Vs7K80ed04bjLheWco7Clj01oguR\n5WB1uNs4+8td2Li3EJ+vO4YPvjsEQRDwyY9HsGTdMfxxpgoHT1WgpDJw+77fKqYAe5ezt9itOFzu\nWUBylWtlSo+qzGp3XM6gjgXnDNw9bir83cPF0ye9F2IYd8ZhbuU+j1iVOFdJQG5eOTieh51zeIhI\nHWfzOJ7VduGuui0H3BalFOBWpjF7u04vBhKMBtA2XnRzVNZVedQx8rEwWLdLyh8qRgWHYvWz3SW5\n2OvK/JBiF5zAyyZ0kmLiWNe0znJ5C2+u7JiCu4d08giQJsbFYOb4qzH13t7o0T4ZN/fNxg0dPS2P\nrDh3BtT8G19DnNY9ipw0+kpMH3eV/D5BL25jY0SzXfLVJsXG4oWHrkVKnAECBNn8lgRD2i/QDGVG\nZ8aVHVL8XpeTd6LrZUnQ6DioGRWu6pSJtqnuhz1VJ1ozIy+/BQBwU9vroVVpMfmqRzCm83CPY/W5\nXAxcQ1BhSpfpSFSL91LgGXBVab4n59T489DL0aF1AjKT3fdFa0uFI190t0mxlaREdycpxYlsvN1j\nguahgkJ89pPSDy8AfmY9m6wOvP7lXgDAb/sK3fswYhVjyT0nCQYbJ7q6HIWd4Ch01yRL4LPAlSkG\nBAoXXU0N0DnG/b/1hxQnAgDwKhhNQWZoMwJqXbPxVayiq3HEiGVgWA4HTisyzBQd7JGzVfjw+8PY\nfOA8ck7lQttlF8A68dOOfJQHWTfF5uDkOSh3d74LALD7RAF2FfiuRQ8AYy6/S3596A/Fb9GmwoFz\ngRMvAICHYrKknYWzzu2eO2M+6XE9ds6OX3YXYP7yA/jqtwN4dturHseqc9o8LACr7cKsDUEQsPhH\n9xo6UhJErSN8VYABEowGIcUxAPeoH3BniUhIMYxApcfVrMrHjD5SeQIAwMMdw3BbGIHrQIVCaqIe\nPTuKnfH9t3TB2GuuFdvhEjoNq8bjvR7C9Ksn+cRjru6S7nZlQbRGtCotDpQdhiAIsq9WsqqkYL4U\nRI2VBUOaGe32Dzt5p3xt6rQiLM37xm/7pYfB7LDAoIkFwzAea1Ok6sX29UnviblDXkGPFLEjvyKp\ns89Kch2zEtCvSxr+b+jl6JKdgSSdmBwg2HXgje7rFATAfrInBJsBCQat6z66LYwJfYaDd3XWOh3Q\nKk6L63q5V2S8vpM4SdLBOaBz1RLjbTowGjvyCsWBABNjgf7adVBnncTrfx/gc93VJjtKq61Yt/Oc\nPDKWrB9GbYeKUePqTqIYMCrX78acgPuvHiofQy3o5fRlEQa8yfV74tS4odOVuFn1NzhL2vqcH3DF\nlyR4Vly22BK4JI00adIuKOam1BmgZrSAygmrXXE8ZSYd60Su8ycwhhrEdNkDVWIFVClF2LS/CDMW\nuEt3eMByWPDdIew4kQ8A6JjYAQBwpCwPjNbXJZSh6oCuSe543bY97s71fKkDzoLgKydyikHemUIr\nHA7FhD+hwmP+j5134Gi+6HbcVrJNHvnzZvH3dryoHFa7u42WOv/WYbG5FL8VbAXnyqKTz+e1pPNJ\n1+RQk90ENatGliH4Gj+hQoLRAC5LcD9UbeLd4uHdyUrvt5/f7fc48Zo4H/+lVE5CDnpDkJdRVVoY\n4UCr0uLpa/6J5wf8S/7sytRu6Jh4WQjf1aBnSjeU11Uip3CbbDlIQiFZXmaHBQwYxLpKw0uCoSyN\n7uAdiNfGyYK7p3S/h59Z6iTdxdosMLgKI2YrrCKdokZWfcvBsgyDiaN74pZrxP9lgk48HqOxe8RF\nBEsCuIps/N/N7uB4crwOzhLRrdcpuS2GDxBH8tdfnYSX/tpf7gCnXz0RE/7UGxpWDTvngJQNLdh1\ngNoOyVro3Uf8q2mTh/SkWEwfdxWm3NPLo70vf7oLVpsTLCvei/atXZMI1Q7EaWLx+IjeHgMWwRaL\nHm2y5ffdsjIxqKf7t9o+Mx4JRTfCunsYAAadshPx/+2deVwV57nHfzNzVg5nAQ77JqsiKosKLkQR\nCbihUEEbkza9as1iNKJZDPfT2BtTc29MbZO0ualNW5PWW1vbmn760U+allSjDcFoJGpQEzSKGAHZ\nZD/bvPePOTPMcEBRIQq833/kzHZmXs+8z/u8z/P+nonR/nBcSgSxCyNm4lAjtKNnEah0bcKB2LyQ\nZ30Yhro02M5NBukVNBen37odwv9znCMLcGqg5TTCPlk8R+69qAIvgfO5Bm1Cec9+WYbX1cYOT80B\nlsdn5xtBVN0gThWOfCp00Jyx7/gU59LDSyW0n2A0e674/se14Nv84LjiKYUjIvcwyk8rZWoYlgdr\nUK7zOPWVkH7tsvd4daLBOHSyGqdk3laHree9IITg9FeNuN5uw39/8ir2fvFX7PzgQzzx08NobhOO\n612t8kRVvbBWyd4OL9YAa5fyd3S7UINxB8g77iCvniyk3lNS/ckf6DihY5sTniHN/U+0jke0eQyu\ndtTBxbsU6zDEvHq5NzNYhBlD4Kfvv57GjciLnge9So/3L37gIYciehodjg6oWZW0XXxeeYaXg3dC\nzaolowIA+y/8Q1rcJV+kxhMeXc5ueLmrH4qyLHdKsv9EAO6OThYAJg4tCu6LQk5aT4U4J8/DcSkB\ntmM5MGq8kZuYBL1Kj+MNn0KrgSx7Tuh4NawGNt4OTkVAeAZwaIRaGyoHOJZBcoxyCiw0hMMvq18G\nF9ijydTR7cS89AhpiifIXweDTgVG7YC3xuD2tnqmRhKCQ+Bn6mlPs16PlfenwnI9Gd2fT0eo1YA1\niydImXI+Ri1iQk1ISwiAUSecp2I0eHC2ctoSALzcWQrTxgfC4owCf90ffKcyfXvt0gSEWg1wQmiL\nU+e63OfqBM0xWZxHrhIgBtcZlhfaCnAnORCwxkb856/+7TFxJ8ZXGLUdxKHBB5/UKfbbKtNhvzRO\n+nypxoGNPz2G7opZsJ1JUxwrZdO5biBFL4/nuFRwNgiG2KoR+gLWolyf4xINjCxxQBrocE5FfZc/\nHDqHr6624vi5a1i/cz/+96v/wVN7/iBNW50i74PRt+HVsv/D2x8cx4ef9coWY3gcOXkVzV1taGkB\nPjndf0bYrUANxh2SEiBYbrm30dtA9Bdj2Dj5MZSkFcNP7yv9EERxOwKC6/ZWRfC7svEcfLQWqZO8\nV/D38kOUKQLX7W3SQjRRuVM0EDaXHQQ9noetjykpAgI1q1ZU5/vo6lH84tTbwn638XQSJzocnSAg\nkocxzicWLMNibrjnSPhWmBqUgimBybBfHA9Xqy/CtNGwXxoH+4UJCPBR1htJTwgEwGBFttAJ6VQ6\nTAlMRrujA1931ErZc+J0mU6lQ7ezG3odAxWrgr9RmAoK8ldhTLARKq5noOHiXSit/lBow8gz7pgE\nD4Bgycwo6Tgn78SP1qSB4ZySDL3ObTC81QY8tUxIsxYHGeK6hycz85EcEoOiObEIdD9XsJ/wL8ey\neHTJBPga3OnbZnOfv+HiolQ8/70p8DXpoFa5f/O8crBkNnGIDjH1dK7u/RaDl5AGLXbyYBA/vu9p\nGDUjPA+r6wQXcBnahE+gDvvS80CGF9pIZQfH6wGeAyE9XgPf5Q1X3RjpsxhXInYvmLz0eHZFirTP\n18uIZx5I8UjD7o8N30rF+sw8lKQVY1PaI9CwGqj8lOtgGLcop+gpWfV+SAoRPFaGcyoMEKPtxNa3\nj+Hn+07BbrootIOswBqjckI38d+4pjqDcvu7ioC38F08/lZeBbA8DGoDHsicOKDnuBnUYNwhDycs\nx6bJjyNeNsKVT0k9NXktNJwGP83c5nFugN4qjTASfYVOJ94nBmatMEprsV33kH0eihrIg0GgQRgd\n17QJQntioF/DypMBWMmA/O3Ce/jHpYNSpyiiYlWSYRCpbDwHF++SYhdO3ikV3BFXW3upvbC78DUU\nxC68o+dgGRb/kbgCrvoIwKHDipgHhU7GqUVyrFVxbEyoGT8vnoU5KT1TPqKnea2rUbagU+u+Rz06\nHZ1w8k7o1RpMiRXiDQ8tiMJTy1MUWTPvnj+gWPWvHV8Ofdr7CJh6AloNJ02e2HkHCCecJxoMsY1N\nssJXa5NXIdFvHLLC7wMABPh4Yd3SSTAZNPDWq7FtzTT853d61u4I1xE6S51KA2+1AepeZXYtBh3G\nBAm/VdFgsHaD4hiby4as1DBpPQJxqcCxjJRhpvfiwTEcfHUWNNtasLZgAnpj1rsNmU89NGMqhe/x\nEbyHmRN65ubjI41YlhMBhgGCzT4AGCllnCVqyVtI9he+g3Tr8eO1M1GYGYONy5IwNsJHkrbZtCwV\n8eEWxaJWAODbTXDWek7VhvlZMDFaeJ9NGiMWx8zzOCY8SI+XH50uxXXWJa9GpNVtiDnl4lbOKFud\n7u4COK5v3S1GY4M1uhaKZAmGl1brR/pZMXPczaeXBwI1GHeImlMj2p2DLSKfkopyxwF6v2yA0rAs\niV2ADSmPYEZImlR7QszRH+sTi0luZdlYS/9zqncTUTdKlFUWn1deXW7FuEJp9AugzxWzalYlZZPN\nDElHSsAkt8hjh7Sa2cm7cNadFDBWtkKbY7lBEbMDgJhQoSO0WnSYkxqKB7LjoNV4SnHrtSrFd4oB\n94auRpmWmNCBe6n0sPMOdDm7oWbVUgfvQDe0Gk6hVdRbIkakjalX1qJ2OaQ4j2ggwtyDEPkUY7Ah\nEI8nrZRUAnoT5OsFL51yND0teCqMam+M840DwzAeK4XlnuDksYLhnheRg3mRWZg/Rgi021x2RAYZ\nERUidPrfnjMO6wsnSW2i0bmgZlUwaUxotbchJd4q1H2RxTbkXqgIq+1GTFq1lBoOACZvFcbHCt/j\nbxA8Ko4I3yMmU0QGGvG9xBVYFLgMq+6bDR+jFgumRSIiUGi7ZMcyeF9YiCBfL7Asg4mRymCxr7cR\n8yZ7LvIUi6SJzAnP8DhmyewIWC16RITo3OfoEC4aDFYZz2Hdiz6FMs3C70vFMSB83122JvQr7Fg3\nQ/qs1fasHwo0WqBTaW8azxsINxYRotwW/cUsfpC+CTaXHS8fex2AMptKzaqkeXipWJHbYIij3s+u\nnUZqwOAErwYbeQxHw6qlTlT+I51gTYCaVSEveh4qG8/ifK8ypwCgYtWSweAYVqon3mpvV3gYVS0X\noGI4D2M9WDz17RR02Zww6NT4Ts7YAZ/nrxeyz75ur8Wl60I2k9o9DSfGZq7bWxFkCJQMhqi51OXq\nCVwa1F6KHPpoc6RUX6Ld0aGQyhc1y/y9BA/oO+OXY1xQNCJ1Y275ueVMD56C6cFTpM8+WsELKIpf\nAgaMwoPJmBiMyEAjwgK8wTKxKHOvphZrS+j1AGzA3ORIcCyH02c10rN4qw0waY3gW3l0ODqx+Tsp\n2PX5WVx2O1ydzi6EegfjSi814a9RiRBrj0fjghPN7sSQaP9ATP3WePyr9Qucb70AG+lC8bIkRAYa\noWZVmJ84BX2xZpEyVrN4egxe6ZEeQ0yAFeGWQMCtWB9likSzrUURNxJ5dNL3sPPUO4izRONccxXO\nd55BCuLgbQDQIigla9zTUHERBrQ4HHAQb7R1OMB4CVO7S2ZG4WKZHo0QastzLAO5/23RmkEIQYej\nE1rZLXAckZIIArwFoxRqUAqG3g7UYHyDiFIeWeH39VtDAAAs7ikp0cNgGAYaTo2pQSn9nnO3iTZH\nSh2KXaHu2jNqFUeV88ZkwaI19Wkw1CwnqXWyDCtTDW2VgoI2lx1NthYEewcNSX1sANCqOWjVt17c\nx1fvCwaMpKAK9AgFymNPk6zjpfUtf/zir4qYBeAp4xBhDEOEMQwHa/6N5u4WmZClQ5LOFo2VmlVh\nSUIOrl0b3Bz8J5JXobajHskBnvPhDMNIo3QAUjXHvV/+FVMCk9HtErwq0auWDyTaHR0wu41Pq70N\nH1w9jMv2nhgFT3iYtSbJYCRZE/FZw+fSuSJO3olmt6Cgj9aMlAh/uOqm4fznFxBhDJNSyW8F+X3O\nH5ONtKAUWPV+aOpuRrfThrzoXOn5ezPROh6vZm5DafWHONdchX9dPoLJAUm40n4VWk4DjuXAEhYc\nw4FwDrAuHmqiAt+tAWtswv88NhV+Zh3iIr3ReFWs1kjAdxjBaLvAqJyIMIbBwTtwpukLhVy70ZtD\nu7t2jtifPJmy5pafvzfUYAwBNxNUWxqXd8P9ooch1hlgh8HMIcdyyI6cjb1fKIUV+3ODk/0n4lhd\nBc40KUuiBhoCEGWKxNnmLxHo5S9Ne8hLXIoSKfJU2nsFNatCiHeQx2gYgJTCCQi1QkRj6uSd+MMX\n7yqOtbnsYMBIMSwtp5WmPeTV29rs7ah3y6sHeCljLINNkCFQGvTcjLG+cbBozWixXcf2Y6+jobtJ\n4VH3HpGbNEKndt3Wio9rPdPP5ZlzsZYoWHQWHKr5N2plhZPsLofkYfi4g/STA5Lga/GGH26gpXYD\nQgxBWBh1P8b6xCHGMkbanhM5Z0Dn916T9crxnwPomc5jGAZmrQkttuvgCQ8dp0VSaDhOtzXBxrYB\nMEqGQPwtJIWNwXV7C6o7LiPMGIKGLiGlV66N5WfRYoyPBcebAbO7P+mrhOytMuQ90Ycffoh58+Yh\nNzcXO3fu9Nhvt9tRXFyMnJwcLF++HF9/7SkmNty4U41/i9YMHafD541CVkR/dRruNWaHzsCCMdko\nilsibZN7GHJ0Ki2eSF7tsT3ZfwJWTXgQK8YuRUboNMnD6KsmcvAAO69vmm+P/Vaf2+WdXoDeCj+9\nLzZP3dDvdWaGpkt/q1gOZnen2tDZk/Pfam/DsboKsAw7JOnWt4tepcP65O8DABrcAx957EXbayBh\n1vZ4GH1fr6fttCot/NwGQV6LxME7PBa3MgyD9LAUKZHkVmEYBgui7lcYi1ul3eGpNCz3IEXD2uHo\nhIpVYVyQkHFZ11mP67ZWnGxQrlL3NRjh6yU8X5h3iCRGKn9HCFzSlJTlNp+9L4bUw+B5Hlu3bsWu\nXbsQEBCAwsJCzJ07FzExPRlFf/rTn2A2m/H+++/jwIED2L59O37yk58M5W0NOao+Aty3AsuwCDT4\nS4v36js9iwjdizAMg4XROYptfB8yF3JK0orh5J041XAGHY5OWN3TKmJnKc6Tn7h2yuPcCX7jPLbd\nC0SbI/Fa5kt45vAPFenWetmUlDg1E24MQV50Lg5f+VgKXmtYNUxaE+aEZUDHafHP6kNI9Bsnqab+\nq+aIx3fGW2I8FozebQINAZgbPgttjnYcrf1UsU9uMNSsWurQ93/1jz6vZVB7warzRUN3E8waEwxq\nYVBWJyvSVN1Wg5r2r8Ey7G0biKFgbsQs1HXW41TDGagYzmMGQjRuLuJCp7NLSlr49ef/1+f1vFR6\n+GgtONd8HtHmSKl/+MMX+6Rjvmy5AAAesaY7ZUgNxsmTJxEZGYnQUCHtcOHChSgtLVUYjNLSUqxf\nvx4AkJubixdeeGEob+kbId4nBvMis5B8BwFqcdQAAMvi8wfjtu4KvUuQ9kacVpJ3rHLE/P86mdFc\nHp+PWWEz+jz+XoFjObx83w8VUxJsP/XQ542Zi/sjMrH+4HMAgB/P3goGDBiGwZKY+cgKvw9mrUmK\ne8lH4WaNCQ7eccfpxEPFt+IWgSc8jtZ+ijhZhp98hP1f05+FQe2FsT6xONdc5XENHadDRkg67gud\nhsrGL5DoN06q4d3bePKExxhThMdU0N3EpDHi0Un/geu2Vmg5DT68UqaQ6rDIpH4SfOMRY4mCXqVH\nl7NvzayxPnGItUQhK/w+cCwneeF9VQQ0qL0GdSAxpAajrq4OwcE988yBgYE4dUo5Uqyvr0dQkNB4\nHMfBZDKhpaUFFsu9417fKizDIq+PPOxboTBuMbScBkvjFkvu+nAkziIsMhvonG9veqeBzh+Tfc8b\nC5H+XlT59Ir8WLPGBC2nURoZ2WjZr9fiudzIrD7z/e81WIbFj2dthUrWHvLqdOLzPZG8GicbKlHb\nUY+/XXhP2r9ywoOSqsKMEGEhYn/TkfMis5DZR0rrvYD4nDd6Fx4cVwiGYfB40kqcvPY5ciIz8fTh\nHwIQ4ikTrAmI8xEMr5i+31+qNADMCp0+SHcvMKQGo3eBkYEcQwgZtFz64Yy/lx9WTnjwbt/GHWPV\n++GnmdskYcPbYWlcHv785d+wZuJ3pfUow5G04Mm40lGL2f28xFtnPHfD3z7DMJgckITj9Z9hScx8\nzAm7NzvGvui9TiHRbyzeu1gqZRkBgmFJ9p8A+ANTA1PgIi64iKtP48AwDMb5xOFss5BNtSw+H+lB\nqQodseGCWH9mTliG9P8fbY6UtNz+O+N5qFiVlHnWmyiTclGeWWPCjJCpmBqYgkDD7QX7+4MhA+nV\nb5OKigq8/vrr+NWvfgUAUtB7zZqe9K7Vq1dj3bp1SEpKgsvlQkZGBsrK+lGjpFAoFMpdY0gn+iZO\nnIjq6mpcuXIFdrsd+/fvx9y5cxXHzJkzB/v2CcGa9957D9Omeco6UygUCuXuM6QeBiCk1f7oRz8C\nIQSFhYVYs2YNXnvtNUycOBFz5syB3W7H008/jTNnzsBisWDHjh0ICwu7+YUpFAqF8o0y5AaDQqFQ\nKCODeyf3jEKhUCj3NNRgUCgUCmVAUINBoVAolAEx7AzGzbSpRholJSWYMWMG8vJ6BAuvX7+OlStX\nIjc3F6tWrUJbW8/K3xdffBE5OTlYsmQJzpw5czdueUiora3Fd7/7XSxYsAB5eXl45513AIzOtrDb\n7SgqKkJ+fj7y8vLws5/9DABQU1ODZcuWITc3Fxs3boTT6ZSOH2l6bb3heR4FBQV49NFHAYzetsjK\nysLixYuRn5+PwsJCAIP8jpBhhMvlItnZ2aSmpobY7XayePFiUlVVdbdva0j55JNPSGVlJVm0aJG0\n7eWXXyY7d+4khBDyi1/8gmzfvp0QQsjBgwfJ97//fUIIIRUVFaSoqOibv+Ehor6+nlRWVhJCCGlv\nbyc5OTmkqqpqVLYFIYR0dnYSQghxOp2kqKiIVFRUkCeffJIcOHCAEELI888/T37/+98TQgjZvXs3\n2bJlCyGEkP3795MNGzbclXseSn7zm9+QTZs2kUceeYQQQkZtW2RlZZGWlhbFtsF8R4aVhyHXT7us\nDgAACDZJREFUplKr1ZI21UhmypQpMJmUQmqlpaUoKCgAABQUFEhtUFpaivx8QXcqKSkJbW1taGho\n+GZveIjw9/dHQkICAMBgMCAmJgZ1dXWjsi0AQK8X5EXsdjucTicYhkF5eTlyc4WV0wUFBfjnP/8J\nQPl7yc3NHXELY2tra3Ho0CEUFRVJ2z7++ONR2RaEEPC8ssTxYL4jw8pg9KVNVV9ff4MzRiZNTU2w\nWoXaB/7+/mhqEkTp5LpcgNA+dXV1fV5jOFNTU4OzZ88iKSkJjY2No7IteJ5Hfn4+Zs6ciZkzZyI8\nPBwmkwksK7zSQUFB0vP2p9c2Uti2bRueeeYZSVajubkZZrN5VLYFwzBYtWoVli5dir179wLAoL4j\nw6qAEqFLRm5IX+0z0nS5Ojo6sH79epSUlMBgMPT7fCO9LViWxbvvvov29nasXbsW58+f9zhGfN7e\nbUFGkF7bwYMHYbVakZCQgPLycgDC8/V+5tHQFgCwZ88eySisXLkSUVFRg/qODCuDERQUpAhS1dXV\nISBgcMW1hgN+fn5oaGiA1WrFtWvX4OvrC0AYIdTW1krH1dbWjqj2cTqdWL9+PZYsWYLs7GwAo7ct\nRLy9vTF16lR89tlnaG1tBc/zYFlW8bxiWwQGBsLlcqG9vR1ms/kmVx4efPrpp/jggw9w6NAh2Gw2\ndHR0YNu2bWhraxt1bQEIHgQA+Pr6Ijs7GydPnhzUd2RYTUkNRJtqJNJ7JJCVlYW//OUvAIB9+/ZJ\nbTB37ly8+65Q6rOiogImk0lyRUcCJSUliI2NxcMPPyxtG41t0dTUJGW6dHd3o6ysDLGxsUhPT8d7\n7wmy4PK2yMrKGrF6bRs3bsTBgwdRWlqKHTt2ID09Ha+88sqobIuuri50dAjV/To7O3HkyBHEx8cP\n6jsy7KRB+tKmGsls2rQJ5eXlaGlpgdVqxbp165CdnY0nn3wSV69eRUhICF599VUpMP7CCy/g8OHD\n0Ov1eOmll5CYOHzlwOUcP34cDz30EOLj48EwQnGh4uJiTJo0CRs2bBhVbXHu3Dls3rwZPM+D53ks\nWLAAjz32GC5fvoyNGzeitbUVCQkJ2L59O9Rq9ajRazt69Ch+/etf48033xyVbXH58mU88cQTYBgG\nLpcLeXl5WLNmDVpaWgbtHRl2BoNCoVAod4dhNSVFoVAolLsHNRgUCoVCGRDUYFAoFAplQFCDQaFQ\nKJQBQQ0GhUKhUAYENRgUCoVCGRDUYFCGNcuWLUNBQQEWLlyIxMREFBQUoKCgACUlJbd8rdWrVw9I\n7vq5555DRUXF7dzuLVFZWYm///3vQ/49FMpAoeswKCOCK1euoLCw8Ibqo6JUxHBh7969KCsrw44d\nO+72rVAoAIaZlhSFciuUlZVh+/btSE5ORmVlJdauXYumpibs3r1bKqizefNmpKWlAQBmz56NXbt2\nISoqCitWrEBKSgpOnDiB+vp6LFq0CBs2bAAArFixAo8//jgyMjLw9NNPw9vbG+fPn0ddXR1SU1Px\n0ksvARC0eZ555hk0NzcjPDwcLpcLWVlZWL58ueI+GxoasGnTJjQ3NwMAMjIysHr1arzxxhvo7OxE\nQUEB0tPTsXnzZpw4cQI7duxAV1cXAGD9+vWYNWsWqqursWLFCixatAjHjx+H3W7Hli1bkJqa+o20\nNWWUcCfFOiiUe4Wamhoybdo0xbaPPvqIjB8/npw6dUraJi8uU1VVRTIzM6XPs2bNIhcuXCCEEPLA\nAw+QTZs2EUIIaW1tJWlpaaSmpkbad/jwYUIIIU899RR56KGHiMPhIDabjcybN4+Ul5cTQgh57LHH\nyC9/+UtCCCGXL18mKSkpZM+ePR73/tZbb5Hnn39e+tza2koIIeSPf/wj2bhxo+Le8/PzSWNjIyGE\nkNraWjJr1izS3t5OLl26RMaOHUv2798vPXtmZiZxOp0Db0QK5SZQD4MyoomOjsaECROkzxcvXsRr\nr72G+vp6cByH+vp6tLS0wGKxeJw7f/58AIDRaERUVBSqq6sRGhrqcdz9998PlUp4lcaPH4/q6mqk\npaWhvLwcL774IgAgLCxM8mR6k5ycjN/97nd45ZVXMHXqVGRkZPR53PHjx1FTU4NVq1ZJgpQcx+Hy\n5cvw8vKCXq/HggULAADTp08Hx3G4ePEiYmJiBtpcFMoNoQaDMqIxGAyKz8XFxdiyZQtmz54Nnucx\nadIk2Gy2Ps/VarXS3yzLwuVy3dJxA62zMHnyZOzbtw8fffQR/vznP+Ott97Cb3/7W4/jCCFITEzE\nrl27PPZVV1d7bON5fkTVeqDcfYZPBJBCuQlkAPkb7e3tkjrpnj17+jUCg0FaWpokK33lyhUcPXq0\nz+Nqamrg7e2NBQsWYPPmzTh9+jQAodaFKGMOAKmpqaiqqsKxY8ekbSdPnpT+7urqwoEDBwAIJUoB\nIDIycnAfijKqoR4GZcQwkNF0SUkJ1qxZg+DgYKSnp8NoNPZ5fu9r9bfvRsf94Ac/wLPPPov9+/cj\nOjoaqampiu8TKSsrwzvvvAOO40AIwdatWwEAM2fOxNtvv438/HxMmzYNmzdvxhtvvIHt27ejra0N\nDocD4eHhePPNNwEAVqsVX375JYqKimC327Fjxw5wHHfTNqFQBgpNq6VQhgibzQa1Wg2WZVFXV4ei\noiLs3r0b4eHhg/5dYpbUkSNHBv3aFIoI9TAolCHiwoULeO6550AIAc/zKC4uHhJjQaF8U1APg0Kh\nUCgDgga9KRQKhTIgqMGgUCgUyoCgBoNCoVAoA4IaDAqFQqEMCGowKBQKhTIgqMGgUCgUyoD4f001\n1ZxdsABYAAAAAElFTkSuQmCC\n",
-            "text/plain": [
-              "\u003cmatplotlib.figure.Figure at 0x7f96f1241810\u003e"
-            ]
-          },
-          "metadata": {
-            "tags": []
-          },
-          "output_type": "display_data"
-        },
-        {
-          "name": "stdout",
-          "output_type": "stream",
-          "text": [
-            "test_accuracy tf.Tensor(0.99, shape=(), dtype=float32)\n"
-          ]
-        },
-        {
-          "data": {
-            "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYwAAAEcCAYAAADUX4MJAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzsnXdgFGXex79TtiabZJNsGoGEBEihhRJAQBQQQaqABQue\n4h3HqYeK7ThFz0NRz8N2nHIoqKe+dyIKJ4KA0gQpofcaSO/JJluSrTPvH7PTtiSBJIIwn792Z6c8\n88zs83t+9SFYlmWhoKCgoKDQAuSVboCCgoKCwq8DRWAoKCgoKLQKRWAoKCgoKLQKRWAoKCgoKLQK\nRWAoKCgoKLQKRWAoKCgoKLQKRWAoKCgoKLQKRWAoXHfs378ft91225VuxjVPv379UFJScqWbodCO\nKAJDQWDUqFHo3bs36uvrZdunTJmCzMxMlJWVAQD+9Kc/ITMzE8eOHRP2KSoqQmZmpvB95syZWLVq\nlfB96dKlGD16NPr374+bb74Z8+bNAwBMnDgR/fv3R//+/ZGdnY0+ffqgX79+6N+/P5YtWxbQxiVL\nluDZZ59t030OHDgQ33///SUd869//Qtvv/028vLycNNNN7Xp+jz+fXStcejQISQnJ1/pZii0I/SV\nboDC1UVycjLWrVuH++67DwBw9uxZOJ1OEAQh7EMQBKKiovDOO+9g+fLlsu3BWL16NdauXYtPP/0U\nycnJqK2txZYtWwAA3333nbDfzJkzcfvtt2P69OltugeWZUO25XLZvn07nn76abjd7nY/99WK1+sF\nRVFXuhkKVxGKhqEgY8qUKVi9erXwffXq1Zg6dWrAflOnTsWZM2ewf//+Fs95/PhxDB8+XJhtxsTE\n4M477wy6b3OVanbs2IGlS5di/fr16NevH26//XYAnKB5++23cc899yAnJwclJSX45ptvMH78ePTv\n3x9jxozBl19+KZzHX0sYNWoUVqxYgcmTJyM3Nxfz5s2Dy+USfrdYLCgsLER2djZmz56NqqoqQQuq\nrq4Gy7JYtmwZxowZgyFDhuDJJ5+ExWIBALhcLjzzzDMYPHgwcnNzceedd6Kurg5vv/02Dhw4gIUL\nF6J///545ZVXgt7z448/juHDhyM3NxczZ87E+fPnhd+cTidef/11jBo1Crm5ubjvvvuEdu/fvx8z\nZsxAbm4uRo4ciTVr1gh9JdVqVq9ejXvvvVf4npmZiS+++AJjx47F2LFjAQCvvvoqbr75ZgwYMADT\np0+XPXOGYbB06VKMGTNG+L2yslI4V3FxsdAPb7zxBkaOHInhw4fjL3/5i9BWs9mMOXPmIDc3F4MH\nD8b9998f8h1QuLIoAkNBRt++fWG323HhwgUwDIMNGzZg8uTJAQO5VqvFnDlz8NZbb7XqnGvWrMHy\n5ctx/PhxMAxzWW278cYbMWfOHIwfPx6HDh0SBkEAWLt2LV555RUcPHgQiYmJiImJwbJly3Dw4EG8\n9tpreO2113Dq1Clhf38tYcOGDVixYgU2b96M06dPy4Tmzp07MWTIEGi1Wnz44YeIi4vDoUOHcPDg\nQZhMJnz66afYsmULvvjiC+zYsQMRERF4+eWXAXADss1mw44dO5CXl4eXX34ZGo0GTz75JAYMGIAF\nCxbg4MGDeOGFF4Le80033YQffvgBu3btQnZ2Np5++mnht9dffx0nT57El19+iby8PDzzzDMgCALl\n5eWYPXs2HnjgAezZswdr1qyRmQv98e+LLVu2YNWqVVi/fj0AoE+fPvj222+xb98+TJo0CU888YQw\n2K9YsQLr16/HRx99hAMHDmDRokXQarUB533zzTdRWFiIb7/9Fps2bUJlZSX++c9/AgA+/vhjJCQk\nYO/evdi1axeefPLJkG1VuLIoAkMhgClTpmDNmjX4+eefkZaWhri4uKD73XXXXSgvL8eOHTuaPd/k\nyZOxYMEC/Pzzz5g5cyaGDh0a1D/RFqZOnYr09HSQJAmapnHTTTcJGs3AgQMxbNiwZrWhBx54ALGx\nsYiIiMDIkSNlwmXbtm3N+i1WrlyJJ554AnFxcVCpVHj00UexceNGMAwDmqZRX1+PixcvgiAIZGdn\nIywsrNX3NW3aNOh0OuG8p0+fhs1mA8uy+Oabb/DCCy/AZDKBIAjk5ORApVJh7dq1GDZsGMaPHw+K\nohAZGdmswPDn97//PQwGA9RqNQBg0qRJiIiIAEmSePDBB+FyuXDx4kUAwKpVq/Dkk08iJSUFAJCR\nkYHIyEgAcm1x1apVmD9/PgwGA/R6PWbPni2YI2maRnV1NUpKSkBRFAYMGNDqtir8sig+DIUAJk+e\njPvvvx8lJSWYMmVKyP3UajUeeeQRvPvuu1i8eHGz55w4cSImTpwIr9eLH3/8EU899RR69uyJYcOG\ntUubExISZN+3b9+O999/HwUFBWAYBg6HAxkZGSGPj4mJET7rdDpUV1cD4Aa9Xbt2Yf78+SGPLSsr\nw2OPPQaSJIVjaJpGTU0NpkyZgoqKCsybNw9WqxWTJk3CvHnzWuUbYBgGb731FjZu3Aiz2QyCIEAQ\nBMxmM1wuF1wuFzp37hxwXHl5edDtrcW/L1esWIFVq1YJfWK322E2mwEAFRUVLV6rrq4OTU1NMt8U\nwzCCQHn44YexZMkSzJo1CwRB4M4778Ts2bMvu/0KHYeiYSgEkJSUhE6dOuGnn37Crbfe2uy+06ZN\ng9VqxQ8//NCqc1MUhbFjxyIjIwPnzp1rj+YCkJs/XC4XHn/8cfz2t7/F7t27sW/fPowYMaJZ/0go\njh07huTkZBiNxoDr8CQmJuLDDz9EXl4e8vLysG/fPhw+fBhxcXGgaRqPPvoo1q1bh//+97/Ytm2b\nYEpryXm+du1abN26FZ9++in279+PLVu2CPdgNBqh0WhQVFQUtD3BtgOAXq+Hw+EQvvNCQIq0Xfv3\n78dHH32E9957D/v27cO+ffsQHh4utCMhISHktXiMRiN0Oh2+++47oY/279+PAwcOAADCwsLw3HPP\n4ccff8TSpUvxySefYM+ePc2eU+HKoAgMhaAsWrQIn376qWCPDgVFUXjsscfw4Ycfhtxn9erV2L59\nO+x2O1iWxfbt25Gfn48+ffpccrtiY2NRWlra7ODvdrvhdrthNBpBkiS2b9+On3/++ZKvBXDmqBEj\nRgjfY2JiUF9fD5vNJmy7++678dZbbwlhx3V1ddi8eTMAYO/evTh79iwYhoFerwdN04J2ERsbKziF\ng2G326FWqxEREYHGxkYsXrxYGMwJgsC0adPw+uuvo6qqCgzD4PDhw3C73Zg0aRJ2796NDRs2wOv1\nor6+HqdPnwbAOaI3bdoEh8OBwsJCfP31183ev91uB03TiIqKgsvlwpIlS2C324Xf77zzTrz77rso\nLCwEAJw5cwYNDQ2yc/Baw6JFi1BXVwcAqKysxM6dO4U+5oWOXq8HRVFKdNZVSocKjD//+c8YOnQo\nJk2aFHKfV155BbfeeiumTJkisxsr/PJIZ5adO3dGz549g/7mz8SJExEXFxcQessTHh6OpUuXCtE8\nixcvxl/+8hf0798/5PVDMW7cOLAsi8GDB2PatGlBjwsLC8Pzzz+Pxx9/HIMGDcL69esxevTokOds\n7rrbt2+X+S/S0tIwYcIEjB49GoMGDUJ1dTV+85vfYPTo0Zg1axYGDBiAGTNm4OjRowCAmpoazJ07\nFwMGDMDEiRMxePBgTJ48GQDnN9mwYQMGDx6MV199NeDat99+OxITEzFixAhMnDgR/fr1k/3+3HPP\noUePHrjjjjswePBgLF68GCzLIjExEcuWLcOKFSswaNAgTJ06VRAYDz74IFQqFYYNG4b58+cH/Df9\n++LGG2/EjTfeiLFjx2L06NHQ6XQyk9VDDz2E2267Tbj3F154QdBgpOd6+umnkZKSgrvuugsDBw7E\nrFmzUFBQAAAoKCjAgw8+iH79+uGee+7Bfffdh9zc3JDPROHKQXTkinv79+9HWFgYnn32Waxduzbg\n9+3bt+OLL77AsmXLcOTIEbz66qtYuXJlRzVHQeGSqK2txe23396iU19B4XqhQzWMgQMHIiIiIuTv\nmzdvFmLp+/btC6vVipqamo5skoJCq7Farc06uxUUrjeuaJRUVVWVTL2Nj49HZWUlYmNjr2CrFBQ4\nUlNTkZqaeqWboaBw1XBFnd7BrGHXS9kFBQUFhV8bV1TDiI+PR0VFhfC9oqIiZJKYlI6oFdSefHpo\nFdad3QwtrcG/p79zWee468s/CJ+jdVFYOvm19mpeh1FurcLj618CAKy8+wMAwHu7V2Bn0T7EhcVg\nycTg5S8W/7wMe0sOAQA+mvI3RGgNwm9bLvyMpfs+DzjmnxNfgSksJmD71cxft76D41Vn0D2mK169\nhSugWGGtwlxfn/nzybS3oFfphO8sy+LulY8I33vG9cBLI6/erOjT1efx4hZ5fg7/Xizc9g6OVZ5B\nt+hULBrzHABgT/FBvLXrQ2hoDT6+/e+4d9UfAQDzhv4OQzrLAyQ2nNuGFQe/RDAitRH4cMob7X07\nbeJ8bQH+/CPXpi/vel82fj2z8VUU1otVfVfe/YHw/3/hprnok5CFnYX78N6eFQCA/9y5BBQpRpF9\neWwtvj65Puh1aZLGx1MXQ0Or2+U+OlxgNOdTHz16NL744guMHz8ehw8fRkRERKvMUQRBoLra2p7N\nbFdcDi8AwMN4L6udDU6L7HtaRGrI85hMhqumL87WivH4fJtcLq4v3J7gffG//O8FYQEAP58/jAh1\nOPIqDuGejGm4UFUa9FrPblyEV4Y+DzWlEra1R194GS/+e+YbDIjPQWZ090s+nmVZfHZqJeocZmhp\nLYYkDEBOXG8AgMvtey/cDKqrrWhwWvDS7tAD2+5zR0CRFI5UH8eMjGmosFfJfj9RdRZPr38FRk0U\nHup5L1Tt3Bdt5Xx5YGlzvk1a6Ll96grww8ndyDH1wt4CLrLM6XHi6e/FqLG3dn2Il4Y8ixJbGc6Y\nz+PuHrejrC60r7PBYUFpRS3UlBpWlw3fFHyLMUmjkBSeEPKYULi8bnx+aiVGdxmBlIjLS4Yss1Xg\nrYMfCN/nb3wD47uOQVZ0DwCAjhAnBUZNlOy5LdnzKSLVEahsFJ/9/gsnUeeoR4GlCNO7TUJhbVnI\na3sYD/acP4rsmAysPLsGjw6beVn3wNOhJqmnnnoKM2bMwMWLF3HzzTfj66+/xn//+1+hEBxfvmHM\nmDF48cUX8dJLwWdavzYogutWL+O9rONP18kT2rSUps1t+iWoaaoN2Eb6XjEWgRMHs6Memwq3Bpzj\n3UPLsLt8H87W56O2iYvbD6P1sv3s7kYUWppPGLscTtSexq7yffjH4dB5Jc3R4LJgb8UBnKu/gGM1\nJ/Hh8c9C7ru1eCfcjDvk7yW2Mrx/ZAV+LstDsbUUp+vOBuxTZC3FkZoTOGM+H+QMV5bqIO8Dj0Ed\nLnz+8Ni/Acjf+zJ7hWz/U3Vnsfz459hZugd1DjOsruDCMCEsHgBQ43tvVp9fh7ySw/jPmW8u6x4O\nVh3Bgaoj+Nv+f1zW8QDwwdGP0eRpEr5faCjEksMfCd/rnGbhc7TWCDfjEb43uhtRaC2Gw+sUtlXa\nq/Hxif/D1uKdsLhsqG2qAwECNCmf/9/ShQsHr2qsgcPjxPaSXZd9DzwdKjAWL16MnTt34vjx49i2\nbRumT5+OGTNm4O677xb2efHFF/HDDz/g22+/lcX9/5ohfeoiCxb1Ti6JqcRaJggQs6MeDU75C+/y\nulBm4/4kp83cH+d3vbjZgJdl4GE8KLaGnklcaRiWwa7yfQHbSV54sty9syyLQksxGJbB2gsbA/b3\nSIRsibUMlY3VoEkaTw98LGDfat+g0J6YnQ0t79QMlhADGQCwLF90kROeUnOTFL7PaiT3d67+Ak76\nBMYfc34XcMy6i5uEgabOYYa5qW33cbkwLIMiSwk8jAe7g7wP/D35TyCqG2tR66hDplGu1fGDoFSA\n1DTVweLiEief6Pd7YfsNibkYkjDAtw8nrEps/H+mddkDdRYHzFZxcFZToinnWM1JVNirUGGvwvGa\nUzhWcxIuryvYaQBwE8Yia0nISUFtkxkMy6CuyYyUiM5Qkyq4GTfOmy8AAAbG5+Dtm1+FilTJjiuQ\nTJRqHbWobqpFrC4a83OfELaPSx2N7lFpALixxeqyoT1Qakl1ABQh2hef//lVPNL3Ybx/ZDluSMzF\n/Vl34oVdiwAA/xz1N2G/5ce/wPHaU3h24B9xvv4iwlVh6GzoBIAbbL8v2IwNBZvx/KB5l6VadzTH\na06h2MqZj6QaEW+q5U2TJ2pP44OjHyNKEykIUym8YAGANfmcXTZBHyczPWkpDRxeJ2qbmcFeLv7m\nwEuluT9mo2+Wyc8WvSGq9kZrolDrMMs0ttXn1wHgZtAmnei7yTB2wxnzeRRZS/Fz6V7c3HkY3j20\nDGEaHZ7tP7dN93IplFTb8PW2fPTKteObC2sQ7e6GelXg831p9+v4x8jX4fKKg2iMNhr5DVwxwz6m\nnrjQUACXb5A1aiJR3VSLY9UnhP2rm2phddlAExQiNWLYvppSIUYXDQAotVRj87bDqDJxpiuPn7Zf\nU98EmiYRFS7X3p9+n5uFr/jTKADA7hOioFp69JOA+xmfegsmpAUvn/Ppyf/iQNWRoL8BwIu7X8Or\nw56Hh/UiRmtEbVMdah11WHKE0z6KS904qKqGs5EGqRX7a2fZXuFzibUcNrcdnQ2doKXFe1GTavyY\nVw5oASfjgtXdPuZJpTRIB0CT8rIGxVbOlhtsxsVzvJbLcr/QUAizox6JYfGCY4thGRyrOQkAqG66\nOvNUpIObRjIrc/pmYIxvdl3la38wYQFwNld/0iJTZbMs3uzQnMnjcuEFRqSac7x7GQb/XH0Mu49X\nNHeYgKUZgcELE6uLqzZ74Dw3+52UNhYP97offWI5DZsiKURro4Ka+LKM3WW+Cj2tw9RuEwBwfepl\nvKhpqkVhfQnqHOaA49uTH/cX46utnCns3a+O4Eh+LfZc4LSgGoZ7529LvSXgOLu7USYwaJIW3odY\nXQxujRDXw6B8GkaDRHOrbaqD1WVFuDoc63aJfhI1qUa4iqsEfKqkCscuVgmze/++fHbpbsxbIi8X\n4/aIAtza6EJxlQ2H85t/7rVN9SF/a05Y8Jgd3H2HqcKgIlWwuxuF30rKXVjyzTGA4cYBLREecPze\n4uMAuH7TkKLAyC9qxLHz3LmdXmez7+WloGgYbeRg1VEcrjqG32TPEAZ4f0e/nhZNDwv3/F34vDZ/\nA1SUGuNSRwnbVp37FgD3AvCaitnRgFJbOYDmB6S28lPJLhRbS3FfVvDFjZpD2q4GlxX/Pvkl7s+6\nEw4PN5vmBYaOEmtTqSk1Mo3dcbRGnD1KNQyeblFdZfbZWF00SqylMpPNt/kbcOrAGUxOvQ1ZMT0u\nuf08dc564R7mb34LpohwHL+QjANnqtE9ORKxUeKzLKm24b+bz+GeW3qgUyw3UAWzrf/f6VWYkTEN\nNjdXg8nubsT8D3fDbKgHHQ/0NfVCYlg8DldxS94SBIlYXUxQv0QPYzpUkr6w2LzYcsgFJHOai9Ut\nPofTdecwNGlQ0Ps8U2TGpn3F+O3EbOg0wYeB7y5sxL7yY4jWRYAFgyaPAwzLQEfr0MXQCet/5Aaw\niUNTUW/jJgaNbANAAKSGKw+SY+qF7wt+lJ3X4rLCzXD7EwyNysYqwTz57bZSnDsHqLOiQBnqg/rv\nCswVsLhsiNfHYceuKuh81dBVlApNjZxK64EThEo0LTV6mvDmyjzcOSIT8UbRH3ax3IIIvRoxkVqY\nbU7Qnc4BXhrnSnrjbHE9QDbvh9xbuR+RWgOmpN+GAksRNhRsxm+y72l1octKO1f8sbLGBcbrN3+n\n5JOnRisF0k9mXLRdAEFx/4ldR8VCkkXlTQDDTdzqbHYcbWgfc7aiYbSRTYVbcaDqCCokUQz+g56W\nFgdJ6X4bCrdg7YUNcHsDbZycwOAeD6+uA8EHpPbiy7NrsKt8nzC4Xwr87FnnE457Kw6g2FoKh5cb\nOPg+kTr0RiYPl5magOAaRh9TT9ASM5+aVCMxLB6ltjLBhry1ZCeKGkqxvfTyigzy1DvEGaOFqEC+\n9TzUXblZ3PnSBtTbnCiv5Qb+XccqcLLAjAUf7QXjGyD4fsiNF8NAfy7LQ4W9SuhXFiyqnZXCgPDN\n1iJsPViCKenj0cXQCQ9l34P+cYGFGRl7BGpKw1FRLXGA1rpQWcP1gcPjkJnE/IMnpCz+8jAOnavB\njqPlQX93ed34vmAzapxVOFt/HufqL6DEVoYyewXyGy5ia8lOYd+iSiu8DHf/UgcuADQ1EtDV9ZJt\nyztbJGgYXpf8+Z8r4N4X98VeiNMmYKZk8kKAAOuhcc56Gm7GDT0ihdk3AOw8VIn3vuQ09WqrBYRK\n7l84XVGK91cfR53ER7Hw0/145oNd8DIMzBYHVJ3yoepyBkv/dxyb9hWDoFoOXNlUuBV/+7+DePvA\nv3Cs5hS2l+zCgdLgdfFU9Wny+63mBvIT5y0wN8jffY1Fvi/rDRTsfPs276rDFz+Iz7vW7BH2P3i+\nAjtPFrR4H61BERhtwOayo8TniJaqvP720pbIbygI2BatjRKcn1KkA0JNUy12Fe2XRWC0B8EG7Zaw\n+GykRk2kuM1lFTQMN+NBdWOtYCKY3fs3mJw+DoTfK9jolt/LY31/Cx2tlcWdqygaGdHd4WG9OF/P\nCVM+qv2c+cJlR6fxbfaHiqoBaBfqbS7MW/Iznv9wLxiGhcPF9xOLjScOg2VZ4fgp6eOQFpkqnKPc\nXik/Z0QNCJ/AOHDSjM82nQXp0YM8PwKEMxLDOw1BnF4eYu48MRSfb8jHwk8PCNusNq8wMDS6HbL2\nHyo/jcIKuU/m+MVavL3yCDxeboCvqGvEiYI6vPb5ASz6/AAaHW7YHW68vqZ15epBMDhUykdvMSDU\n8uf3zdZiNBWnwmsxCtvWHzgnmCrh8csP8H1nHeGYYnoAbnsYWA93f16nFt4GsU+OHyUBVsxnqKl3\ng/VwAsjqsoOK5sxJjJObxNCmUphtTahsaAARJjeJ/ufHc9h58ajYDC8n3DUarp+cpwI1NakScbqo\nHh7fu731cAE+3x184mI52w2sS9SaTpRx1YpZLw2WEf8LjsMjMH2oXNDCK/4HXOf7wms2Cd+rKuX/\nI9ZLi8KU9Mq0rbagCIw2cMZ8Toj2kJpH/DWMlgawiw2FAdvi9LEy5zn/WWr6ef/Ix3hn93JsKNhy\n6Y1vBvdlCAyrywY1qUKYSlT36xz1cHjEtRe+OP2VIDDUPp8EyZfr9g35/iY3qSOPhyZpZBi7AeCE\nLcuywozV4XWi1B581hwMl9uLd746ggNnquD2umXhi1JUyeewcqtoIqq3OYWZKt35DL6r+g92leeh\nys6FOIarw2W+nHJflE+vmCzuvg31osnB98d+e+VhnCo04/3VnGmqpyEnRKvFQdLjJgCfwLA4GoX+\nI1gKDOXEe9+JA1d5rR1vfXkExy6Ik5tth0qx+L+Hca6kAedLGlBQYcWWAyUoaQxddl2EharLKexo\n+hpUdDkItQMEKY6iLEPgTIEVTU4PwrSiJkGonGh0OcEyZOCsmSXRN51z6m8+UIxV2/KF+2UdeoS7\nk4RdGUuMrC/AUABDgWUIUFE1oBO4/xVjjQIA0PFFIDudwr/2rYIme7dMuG07fwQHveuE7zf04drg\nZrn3ShPENEawcl8ly3LDqdnuAGkI5T8i4a0XB/oGt28/6QAPYGhGV4zom4S//eEGDO7MCY5ErZgH\nwro1gEU8D+uUR9yxXhpd431CmvSgU6f2GeoVH0YbkKr8cg1DPuC2NAC7/MLuJqeNQ2pEF5lpKEoT\nGRB/bvY5NWsvMby0zuJAk9OD+Gg9ln17AgMz4zAoK17S3tC5AaGwumwwqA0yX0NNUy0cXid0tA5N\nniY0eRxCX/D78WY3kiDhZb0B0RxScx6PiqCFaJgGpwVuxi0L07S57LL9nS4viqqs6J4cFXCus8X1\nOJpfi6P5tfjDnWkBv/P4z9BqLQ7UWZzQqCgQJs7xes58AYXWYrD2CLhdrExgFPpsyEZtFDcrptyI\nCKdg81LgB72Saq7dZqsTRZVWrF9LgYwcANajBtwhMnUZCnFGPSxeCnZXk/B+UA4jPLoaWJkGeBkG\nBEHgpRV5Ie+Px9bkxs5j5SBi7MF3YAmA8PU1yYCK5e6LNJi5dkrQUlo4fPemogkIUwfahXp7I8BQ\niNLr4K/TZaYYcSS/FicKuPdbN4h7HxmrEXfn3gzSmAG3U4UP8+SJjNyASwBeFUCK5ijGFgXEcpMI\nOqEIrEsDggBuH52AtIiu6GQKw6INX0F6x+kpWuw+CpiiVagD0D0pBv4GPoIQ37pbB3XCT17f5Idk\nQGgb4Q9xhsuLcBdmgfWooEq6CELLXVWn0iI+NhJl7lqwXhI39U0GSRCIjdRhZu8pGGTuiazoHnhs\nK6ddPjV9ENSMAbVkHzRaaHgjo/HFD2KeTlJUBIb2TMLXZgIE7YGZKQ/QWC8HRcNoA6fN56H1OXGr\nmzFJ8Xb8UHhZr0ybGBDPzSwJyexJRakQrgpDfkOBEDHFz3qks/L8sgb87f8Owtoot9+6PQxOFNSB\nZVk8/+FeLFieh1XHf8AR51Ys/d8J2b5v7l+CmqZabC3eiY+OfSY48BiWxbJvT2DHEbkDbfme71Hv\nbADrVqOkUvyjbCneAZvbjnh9LGhGhxJbGTYUcAsL8b4LXnDwUVBVjfIoMA2pxrELtWh0SKNqVIjw\nJX5ZXVaYG+V/Tj589Wh+LQ6fr8Fnm87gtc8P4uDZwNXl+EEapAcf5zeTrOfngDxX0oCSahv0nYtA\n0Nxv52vKQBAsvA0xOFVYL3umpyqLxPtmKNAqBjodEKbWYdygLrJzO1xefLnlPAACTIMJrD0SrEuH\nOGNg3gbLUOjVNRrw0mh0O1Bj40wtTWYuyotVNaKm3oFNecWCGQqkB+qMfSCjKqHJ3g1N9m6A4vq3\nuMoGc/hR0DEVINjA4UHmyyUYkL5XlI4vgiFFrpWEq8W1yylKslaK2gm7ywmCodApNjDyJ6NLoGAH\nOIGRlhiNiARjAAAgAElEQVSJgQn9kJucHbQv/jxzAGjItQHGHin7Tqg54b+h9kswhgosPbUU9uij\nsn2i4zxIufEw6lScmJh2Y2AghXSScnP/RMBnUgozNoEgGTBN4v3H601gHb57ZSl4azlNiX93/jAp\nB0nRvhBhhkZEuCh8KZJCdkyGrJxISkws0pOMGJTQHzf36INeadGytv12Qm9kpxpBsiqQYRa4GBcy\njZcfDMKjaBiXSaO7CXUOM7JjMpBffxE2yaDtZeWDC2/HDwXDMKBICl4vJ2j4wZAgCFAEBS/rBU1Q\n6GFMx4GqIzhafQK9Y7MFDUQ6K3/3q6OwNbmxYW8RGJZFo8ODh8ZnYdW2fPywvxgP3pYJp69ExY7a\nzaDjAHexfK3remcDPjr2GYp9SU9uxg01pUZdgwN7TlZiz8lK3NiXe+Evlluwr/IQKANQcSEKlMEM\nSv7uIpyKhMtVC1KiLPCCYnzXMSi3V2JK+m34+4F/BvRNQVkTlnzFLf6j85mRfz5aCcvFElAEBYvL\nhq+2nwb04P6wJINGdxOKq2x45yt5WOPhczXo38Mk21ZYyfUdGVkDgg6tWRF+AoMzlbBwxB4TttU5\n6kCouBntP1cfQ+oQ8bmwak4wqUgVWC8FimLg8Dpg0IRhZGYnbMiTZ62fKgw0afxhSi98vP4Uiqok\nZjuGwg09E7DnqApNHgd2nSkEogDGZgRwEaS2EQs/3Y9GJ9f+4X0ScaTuEDyRtaAixUkOGV4PpsGE\nw/kVUKVyiWNhap0Q2QUAzrP9oe4mlnEB6QVJEuCnRy6dPAQ1QiMOmCBYIXeOCrMAlBfGsHDQtDgI\nTus6Fd179hKimAgAzz8wEIdrSWw8fRCM1YiYSO4loqlAYXZzn87o1ikSUfnhqPVYfecgsHTuVHx9\nmsDO0j0BCYPBcisA4GTtaVQ5xfsxGcICdyLEc4XrKcGf4lJx/co0GkDquP5TU2poVBSanL4EVpdc\nc9artMLEjPXSiAoLrlHOyJiGgoYiIbhEaF+UDkOy40HrR8GlL0fn6BiQBIkovR5mJzd5TDYkBj3n\npaBoGJdJjYN7KUy6GE71lti+Q2kYj/R9OOi5vCwjMz9Js0t5kw1FUrgnk1thjtco3D7BJHWEe3U1\nAOVG3qlKbMwrxo6j5SivteOH/dzs78CZIGs4awKd5sU2UYvgHZR86CShbsLn+3/A01+sxMJ/54HU\nNIFx6OCtTBUcd6zEQWfwJskcdgDw2ffn8b+dF2FQh+OJ/nNkdXqkM7MlXwVGm1TWObExrxiMS40G\npxVHL3CmCcb3JzxfWY2SahtIQ60wcwaAXQUn8PK/d+F/h/bjpf9swtnqYpwvqYdeQ6Nr99AZuwBA\n6KwgdFb86T4x+onQy40phIq7Vve4JBAEUFYbGAJ9rsgGlqHgVdlgc9uhpTUwRekw766+sv3UKhK3\n5op9Mu+uvkhJMODpe/phwW8GijsyFExROhjDwgDKAw/BvWs9Yrr62miBQ10F0liJxFQr7hndHdOH\nB840CQ2npZU7xbwGqbAYn3oL0BAvP4bywItAcyuvGetVOrzwwEB0T46EXis+f0JnA6FyIUytFfx9\nKRGdMbrrDegSb4BGTeGlB3Px1mPDkJYUgYmZN8KdnwMVTTdbdHRoTy7RNT5STOZ7ov8cJMVGYEbG\nVDwuyQpviQq7/H+iJpsv3udm3IAvYoklfNFwjWIRTQ2lxh+n90Fmlyj8/ZGhePauQbJwey2lgdM3\nhoSrtVCrgi9Re2OnIZiZfVdAP5AEgdmTe2LWkHGY0+chIWBGahb1FzKXgyIwLhPeyR2ri4GW1sq0\nCH8fRpPP8SsNDZXiZtwhI5NI3zE0QUNLaUGTtCAgeGd6k8cBt9eNqsZqoNtuaHruQq1FbM/zH4qZ\noaU1gYMYoWls1jH//PJdyDtViTordx+qlJPYbfkBTYn7QcWWglA7wTp9zm7ejMGSoF2cKWDrT06w\njPzezxZZsX6P6OyXRoSxMnt9kAHCJ5S8Tq64HENyAzU/ayuqqcOpmnPQZO2DOp3TMgi9BZqsPFQl\nr8Em80rUxP+Id4/9A7UWJ/p2i4FHVxV4HR80qwFBstD2/hlpSRHolRaNznHhmDou0HRCgMC8qUPx\ntzlDkRyvD/j91MUGwUkNQEg0M0nMTS88MBB/fXgwpgzvKmwz6Lk+Cdep0DVRHBBZhoRBr4LJYABB\nMoiOYaGjtHjzkVugp/WgDPXQZOVB0/0Q6uN+RoOnFlpt4N9+UA6n1VKRokmwn6m35MYIvP77IaBI\n8XmQ+sCIMpqkkeoT/lGaSKQlRWD+/QNkznAeNaUS3nv//0ZKggGRvixstYrCX2cNwt/m3CDbZ/bk\nbAzKEqtb82ZNad0xaR5H18guCKP1AXXJgiEt9gdAFqUXDIfXGRCCKzWFqSk1uiZG4Nl7+yM6QovM\nFCNidaIqrqXFSWdStNyE1hakk8/2qEmnCIzLhHdyx2qjoaU1Mj8FP2uK13PmD16YSF+6EZ1uwH2Z\ndwCAEBabFJaAv94gX+GNn0jQJAWCIGBQhcPisoJhGVk01v78Ylh9zl5S24RQtXPqfILEaBBfHlLT\nhLe/OhR0fwCwO51Y+r8TKKvhzk/oxJknGc7lLagZ32zKN5jr1DSeGfQHOI4PBevSyQZJAABLwu1h\nsOVgCfJLG3D8oiQT1z/U0h+GxLBeCWDdajDwAj6HNC8wqiwWnK7jIpqoKG4AJOhQGgSLzO5aVDXV\nIDs6A0/0myMrvQEAsWGSPzDBYN5dOXh51iCwFPfMe0WJORNRmkioSBoxkVoYwoJYfBlKJjzvzZwO\nAIiJ4NquVpFIS4pAXJQOOg0tPCfp8/I/H0EQQnCAxWOGQcMN/iZ9YPn3Wkd9UBOph+ImEmRELQiW\nwpP9/4D7/RI4Y6N0MvlNRnGz8Elp44RtYbQeD/e6Hw/1vBeT08XtwXJ7VKRKeIcpsnnreHJcuCBA\neIZkJ2DOFDH0lNdspIUNpUETNEnjqQGPYG6/2SGvw9+LVLtqDXyFgM7hSfhN9gzM7vUg+sSLpt5g\nUVaxkvdMS2vh9D0XLRUY6HG5SDWMYAEkl4oiMC4TqYaho7RwMx5htsSbpLr5in/xAkHqBM2K7iGU\nz270aSBJ4QmI0Ynx6oCYNU6RFEqrbdCSeljdNhw6L58B/XvzMdQ1ijM+QheoScy81WeKIBhQ3UWt\nQ9XlDC7EfBX6Zn3Zrkfya+Efa0+buPpREwdk4ZHbewl2XRVNITk6Gl189bBovwJqg3pw9tTPN53F\nq58dwFtfiv4Gf23En37d45FkCuNCCwFouh/mjvMJDIZwwUKKobXanK3QxAQvI9I9RY+vyrnaPT1j\nMtHdmCYk4fFInbcurwuFlmK8lvcOCiycmW9wsigwpLNGKkgeDRhKMM+pSBpRvrwVmiLx6u8G4405\nQ2W7L3x4EF56MBcRIWzaN/l8SfzskQULg4oT3v6CDwDeP7IcR6qPB2yvcdQBKgdIvQ3xqmR0i+oK\nLa0Vssr5d1caiBEex/lZhiQOEO4jTKWHURuFgfE5gvYkPV4adq2mVMJ26cDWVuQCQz5Qx4fFoVN4\naFv+gLi+IX9rDkFgGJIxKKE/+sZlY+508VyaICYtqcBQS/4f0j5qKxpFw7g64J3cERoDNL6Xklcp\necGh80l0fjtNUvhjzu/QP64PsmMyBDOMwydQgtlJGZ+mQBM0FizPQ0kZZ756f+0B2X5u1oll6w4L\n3wltI/qkiy/kotlDcHM/bvAmdFbYabmDMpjJQPyNExiFFVYx1r5JvlZ7H1M2BmSYkBjLvex8WfPo\nCK5vKFY+gzRFBnEi+khLMGJk8nCMTBgtbJtwQ4rweVBWHBKM+oAY/pRon3mCdsvMJYTaCZguIhgT\nRkcJs9x+vnUr/GfDpETQO70u/OPwRyixlQlVhWO0opCPUIt263sypiMrugf6SbK2WS8lxNtThLz9\niTFhiPQTDHqtCikJBoRiYAbnV5AODHzQRLTWGPQYvt1SKu1V0Edxs+qsBLGvn+z/B/SOzcJNyUMD\njnEwTYhQGxCliRRm0KHs5A9k340exm6YkTFN2Kajdbg/6y5kRffAjIypIe+xJWb1vBc5pl5CUU6D\n5BkEGyQJgsCtKSODnitSI+/rgfE5mJJ+GwBgbs5spEWmIi0yBX/M+R2eGvCosB+fQBoqdDU2iPDO\nMfVC5/AkDO80BARB4MGe9yI7JkOoDdYe6CTmN52iYVw5mnxCQEdphZeSV/X5AYhXLfnkNYqgkBnd\nHQ/3uh80SYvJeE7O4Rh0VSyJhgFAmFUTanmoLkEygCTKJyNNh8nDuoKMqgQVUwozSsTw2yDhkoHX\nFWeS94xJ40IojRWCOcpIibbjGzvdgISweBAEgbQk7g/HC0OSt3n7JTnRFPc9XKfC/Pv7y+oZdUuM\nxh09JmNaFlcFlCCA6Tely47vEm+QtREApt+UBS2lBaltBEF5oWODD5hSLG4uDHV81zFC5VP/SBq3\npIS10+uUZdbTBCUTEtI/ZYzOiMdyfitoWQA4k5RPw6DIdvj7+bpAWpCQHzCpED4zf/qaeoEFi/65\nnKAMV4uDTEpEZ8zp81DIwUa89+ZLtncKT8Tj/WbLZvcGdThiddF4LOe3goZyOQyIz8Hvej8gvHMR\nEg3Df40IninptwlFLKWoSJXMn3JH98mCcMmI7oanBjyCpwY8iszo7kiLTMFknwmLD3XPjA4euhps\nMa6UiM7406AncI9PiCaExeHRvg/LNKS2EtGMtnU5KALjMmBYFnU2K1QkDYqkBNugU9AwuLwKq110\nSgOBVWzF4oKcH0BH63DgTBVe/mSfkHfAD16sLymIL33gLzCmjOgiC/3M7haGiCgvND0OQZ1+DEsO\nf4Q3+UVgWiioBshfrthoNdSpp6DpfhiaFC45qHuMOAuVvuB8e/k/r94nCFS0/FUb2b8TeqfF4Jl7\n+qF7chT+8lCu8JtKkgX+tzk3YPGjw2THEgAXXukn+LQqDQzqMCHOPkmdgpao8/W9NGLFX8OQJlZW\nNsqjZwxqg6ySribIn1IqUITkMoQOgmgNyeGcKSpGy5nApNopb0LrFtU18MAg9PVVyT1v4TSP5gaW\n3Ph+su/8s/f6+kztZ3r0R9rPsn5pR1o74PJtkZrZCIKAXmIS0rcQWcQLaqvbBj2tQye/pQf4SaNs\n0vALIu2LYH6US6XD8zB++uknLFq0CCzLYvr06Zg9W+5wKisrw5///GfU1dUhKioKb775JuLjAyX/\n1cSP+0tQZbFCreW6T+d7ELxg8LAeUASFH/LKoE4VTVLSGd+u4+X4Pu8ikCqag1irEZuPl6CwworC\nCiuyUqMFH4aHlwW8fd8vL8Bk1GDUoDjsKOOcvRaXFS5G7tw0O+u5UhwSgTGmy83IieuFN/cvke2r\nV+kER76bcYMy+ZKytJypZ2DXFOT5cp2ksxh+sOVLfky7KR3WRjf0XapwSJKQHqFX40lJKKlJUgVW\nWpBQWh2Wh5//d4oxQOrJoQgK6ZFdhSTKPp07o1Nxb/zkDlz1LtPYHafN54QS4M0JDGnme4lNXnbE\noA6Xze51QRyW/KAOcFFNvJ+nJUdvczzZfw6qm+oEE4ha1gbufcyM7o75uU9AS2tAgMCLu18Peq7U\nSC5xsMHF2eGbs3XPyJwGFiz2VnAmUX7QZ4Xn3rwQlPazQdV+M2kprRVEYT5tiCAILLxhvtB2Pa2D\nxWWFhlK3GB0lnSwYg9R/e3HIM/CynhbP01FI+yJYbbpLpUM1DIZhsHDhQixfvhzfffcd1q1bh/z8\nfNk+b7zxBqZOnYpvv/0Wjz76KBYvXhzibFcPRZVWEJQXXrevTr1Pw3j74AdwMx54GS9nw/dFDPE+\nDelL8/X2CyitFjOUWYbEN9+bcbqIm/Gabb6y4L7h8WKZzXcObpDhtQn+pf/3qS+xs2y3cD6ryxa0\nxIe622GAFAdEkiCRGtElYD+pw1ZaD4onQmLrldqMeQHHx4lHhqkx944+0Gha/6q1NEvlX/zBWfLZ\nnNPrlKn+iYY4GLVRQvE6KXwJdEFgSEwp/jMx6aBQ5icwItThsnLjwSJRpNFKM0ZmIj6aO39bNAwt\nrUVng1hXSaphSEMpkw1JiNXFCKVUghGtNcp8D81F06hIWgjmAAI1DLKZPAlA/h/oMA2jlYJI77Pv\n0yQNozZK8F/w70Jr8hakzz7Y/URqDCF9Sb8E7WneAjpYYBw9ehQpKSno1KkTVCoVJkyYgM2bN8v2\nyc/Px5AhQwAAgwcPDvj9aoQiCYASywdLVfgfj52Gm/Fwhcj8on1on5OTYVgu81Zig2cdepmdf2Ne\nMSrqGsH4ykY3WDkBMaA7p311T+FeBOlAJ7W9W11W2SI1PISxXKZhhKobJZ2NVNgDcxSkL2JQDcPv\n1Wrt+gAAoAoRMTM3ZzZyTL3RO5YrC6Gm5YJlYFIf9I7NQnZMBrJjMpAemYKMzlEy53jPmExMShsr\nzKJ5k5Q0MmV27wdk5324l7igD78uSW58P6REdMaQxFxZXwWbnUsHkv7dEpAcz5mMyHacdUo1jFCm\nh9vTxwfdriJpmKQ5AS2YLqSDJP8eiBpG64eU9h7MeCiSwm2po3F/ZvPruuTG90NqRBdM6ipfMY+f\n4Blb4VeRTiY66n7agqGdhXKHmqQqKyuRmCg6ueLj43Hs2DHZPpmZmdi0aRNmzpyJTZs2obGxEQ0N\nDYiMbL/klUuhsrEalfYq9DGJ64t7GS/2Vx5GuDoMMVojKMpXh943EDklTtGvd5xBeGYTAEJWrhgA\nVqw7DQpqJETr4XR5uX1YX66Fnz2+uMqGPy/bA20uCwJiJcy4yDDADnTtpENBCaBX64FG0dbDVUoN\nw4WGQlQ1BWZ1A8DtNyfj+1IuoopfT0JF0rIiidI/fqldHlEFyGdx4ZLPjJ8PQ9h+CQIjlIaREd0N\nGdHdhO/SGfp9mXdCRamgpbV4VJJRn5KgRVykATUOTksalzoKaZGp2F/J3b/ZGejDSDYkYU6fB4Wy\nEfF6E+7Pugufn1opmLumpN/GFRL0I9jsXNoXakoFb4hktbYgHbjUIQTumJSbsbFwK5o8TQhXhcly\nDWJ1MSjyLbHbUjQNHWRW7b0MgRGh6RgNAwAmpo1tcZ+smB5BF9sq9i1Z0NmQ3OI51NTVLTDaW4vr\nUIHRmlnls88+i4ULF2L16tUYOHAg4uPjQVEt/5FMpvbriItlDVjwr134y+9uwF/3vAkA+GjKm3j1\no0PomhSJrr3N+PepL4X9E2unAQkAvDRMJgN6etOxmq98Tbnh8no4E4Ff2Ofhs3UB0UJC9U82uCrP\na/jJJgOevnsk8puOA2UAVNwf1D8qJVytR6zeCKvLhnUXNwU9Z2Q0CXBjA4Z0zYHJZICaVsPtEgXG\n6G7D8PmRbwAAlY3ytRwM6jAkxEchOSIRJZZypCclQqviBpnBKX1wpPo4hnfNlT2jwal9sK/yINdF\nJNXs84uOCm/V842yiKG5Rl+YbrDjjPow1Dg44RkfEwWT0YB4t9xM0DnehCideGysVwwbNpkMiHOI\nExiKINEtuVPQwTEhxhi0DSZ9NKob65CcEIvhzoE4Xnsao7oNbbf3ONYptjc+Jkpotz86lYYTGBq9\nIDBMJgO6xCTiYBXnlEoyRcMUEbpdsW7xt86mOJhMBtzSbTjWnNqIYWn9Wrwnoy4S5qYGdE1MANke\nkWKt4FL6+Zb04fghfwduzRwGU2zzx0nfk0RjbLuOS+1BNMNpzjG64O/lpdKhAiMhIQFlZWJNosrK\nSsTFxcn2iYuLwz/+wUXvNDY2YtOmTQgPb1lSV1e338pzH6w6ggabC/9ceRjwBTOcLazCyYt1OHmx\nDr0ZefnwixW10CYAjJdCdbUViVQyRnW+EVuKd3DVJwkGHg9Au4xgGULMcfDTIoZkx+MwSwBgYdBp\nkZMdj9QEg69SqRwtTUNPE2iycyakeht3/2F+AiNMFY4Hs+7DS7tfR4MzeB9VmH2z5LTb0FWTjupq\nK2i/V2FI9GAQWTQ+O7Uy4DxhqnBUV1vxZM4f0ORxwFrvhhVcu/oY+uL5QQlICIuTPaNMfRaeHzQP\nakoFHa1r9vlZrc5WPd8muyjgGm3c9YMdR7HiLNBmcaPaY4XTLndsN1oYuG3isVZJaZXqaiscdtGM\np6f1qK0JngnssHmDtuFPA59Ak8eBhjoHeoX3xoLBTyFeH9du77HDJravyeoB4oP3Bf+c9ZQerw9/\nEQC3n54V/3N2iwfVId4dALBbRTOmysU9y9EJI9Ensg/i1aYW7+n53Hlwel2orb20bOrLxWQyXFI/\nT0geh2GmoTCyMS0eJ+0L0qVq13GpvXh12PPQUGpUV1vbLDQ6VLz37t0bRUVFKC0thcvlwrp16zB6\n9GjZPmazWdBE/vWvf2H69Okd2aQAVp37FsWxawCwcHvFQaTW0gQirB7agRtxplae9KXpxS1KI3Wm\npho4xzHd6TwI2g2vh0CSMdJXNZRH1CLuvaU7Fxnk29TFFInfT+6JsYO6IJjfUKfmrsWbA/jIK38N\nI0IVjhitUWai8IefWXaWhPr5JywRBCH7HRAzhw2+DF4trQ0wyxAEgaTwhIDZN789VhfTYiZr0Azp\nFvZrLgpFaibizV3+Ic6qFiKWpH6BULkG/tfy3873FUEQQt5Ke6FqhQ8DENdmMagNMKjDBTNKrFZS\npqKF0hRSk5RRw90TSZBCKZyW0NG6NuVddDQqShW0rEowpObTjnLit5UoTWS7FB4EOlhgUBSFBQsW\nYNasWZg4cSImTJiA9PR0vPfee9i6dSsAIC8vD+PGjcO4ceNQV1eHOXPmdGSTAthavBNeqgmg3Sis\nEGcHtRY7VMnnQJAsqCi5L4CvYc94SWw7VAqHywPGJzxI38IpnppOiI7QyipW8lnPE25IwS0DO0Or\nocAHiUoHsHf+OBzvPX6j/JqEPHafTxL0H7wM6nAQBCFzRPeKycSwJHGJSb54oXSQeajnvUJsP480\n8oYAgUlpY9E9Kg03JcvzItqb1trBpWGpzfkDpEEJ/D13MSRjeNJgDEro36rMWqmturnY/PYov3A5\nSNsXyocBiD4rg0qeac9nIhMgWizTIRWuVypc9GohKTwBA+L6IsfUC10jW877+bXT4XkYI0aMwIgR\nI2Tb5s6dK3weO3Ysxo5t2UHV4RByE0V1gz1gm6c2EXSMGFZJ6uz498YzWLPjAqyohdZXB411q+Gt\nTEF0sgasVRxA/v7IMFSZGxEbyQ04Xi8raBjSMhF8ZdL3Hr8Rz+3eAACIjfCtSyxoGJwTV6+WD168\nw9KgNqDWFzI6NGkw+pp6IlITifUXf8AZM2fykg4ycXoTHup5DxbuFcOapb+rKBUGxOcIizt1JC3F\n8vNIhWxzA5c0N4IXgiRB4p7M0Nqs/9xfrmGE1pDao8Db5SAV7s0N+E6GExj+CYZGbSQogoKaUrWo\n+VxKAMO1Dk3SmNXrvivdjF8MJdPbB+GX/Xy+3MyV25DANsp9K14z54+xNLoBiXmKcHEDSpIpDGlx\n8toycUa9UC4jK9WIYBoGT7hOkhRk8C0cwwsMn4bhb97hB05pxAavNvsPJP61q/gQPF7TkIdqtl9x\nuFBkGrkcigR9XAt7ckgTIZvTMKTFA1syPfGYfAlx6ZFctrQ0ciiYhsGbWH6JfgpGa58VHzLqXwyP\nJEikRCQjTteyWSnKV0KlV0zm5TRV4VeMsuIej59wKKpqgLqrXIjwdZwAgLFFwlstht2xXvEP27dL\nF4wZOBApCQYYk3pg2fFdQS+ZnhQJgltMLmTNGx6+fEGAhqGSz2j5QTRYPR3/gcTfzxGm0uOVoX8W\nqoxKZ9XBqm22N4/0nYUGl6XViU5SgdFc1jQ/6ANotd8gShOJhUPnC3ZpaRhxMB/Gi0OegcPjaJds\n2stB+iyb81/xBCth8oc+s1p1LaM2CouGLQgIuFC49rkuNYyLDUV4ctvzOFEtqdrpX1+JZBCml89a\npYvcM049ZIYLSQhtosGErokRIAkCRq28qmsoWhQYvA+D9PNh+Jmk+MFemrDDzz79naHSWSmPURsl\n2PlJghTs4cEGmPaGIqlLyoqVmqGac5R3jQzMZG8N0Vqj8FykgiaYhqGh1ELxwiuB1G/RnFDk7ydY\nNrRepWvWoS8lUmNo8Z1VuPa4Lp/4tpKdcDFufHTsc2EbQXrlNUpJBrSKBXw5eRRB4Y+398Oyk1wu\ngX8mM0CgK3JhiG3EIEmBttYm86iIVgoM3358VrdUCPSOzcaozjcGXJefcfo7Q5tzjvJoKQ1cXleL\nS1ReCaRmvOYGL5qkcX/WXSFXNbxULiVr/Zeitaa2ZwY8hr0VBzDwF/BFKVx7XJcCIymMq0HkgmQt\naz8NIz5aI8t81tIaGHSi+Sc90YiTkrJYqQkGPD1qVMC1WiswWjtb899PRYnfH8i6S9AOpCF+oU1S\nLV9TFeLYqwFpoEBLpbxvSBzY7O+tgVu73YFGT+Aa6Fea1prCkg1JSDYktbyjgkIQrkuTVNAoD5LB\ntBGirXvCsM6yOkvcetrioKShxcFq4cOD8Nx9/YNeix+sW4o7D+b0ljXPZ/7yH+SlA7lUY5CaHEST\nVKCjsyV438nVKDDkGkbHh3dOSBsDgFs/4mqlNf4LBYXL5brSMHYfr0DnuPCgBfe0WmDs4GR8v923\ngWACNAxaMqPVSArfRUdooVGFHrAWj1jYYjJaixoGQQTdL0IbqEkAwZ3e0t8Xj1jY/PWEywa/7tWA\nLHGvHesyhWJk8nDkxve7KmsGAcDfR7z8i/SDwvXL1TcKdBAWuwsffsetijVheqDAYDsfxpGabOG7\nw+uUVX/VUhrZoCmtlKpRN/8nbc1KV6H+6FpKA4fXKeQS+O8XFaKAm8zp7Zt1EhKFsrWrb/ECg8HV\nZ7eXmaR+AQ2DIIirVlgArSvHraDQFq4bgeFwS0t6B3d+fnzi/4TP58wXZL/paJ3MHCQ1SbW0BkBr\nCPOJaOkAACAASURBVKWBPDXgUewqy0NuAudIlwktUgWtSouHsu9Bo9+aFTpaC5qg4GG9wjHJ4YmY\nlDYWWSGWkQwG79y/Gh29MpOUMrNWUOhwrnmB4fYwUNEkHE5RSJTUNLR4HL9GL0+YSi8brLWq9rUV\nEyEERlJ4Au7oMVn4Lh0keS1iYEK/gOO42bABVpdV8FUQBIFxqaMD9m2+Xb6lYf1WobsakIXVXoUm\nMwWFa41r2um9+3gFfv/3bThxsQ4Ol6hhnCura+YoDtbPBKOndbLBWku3rxO4tQllJEEGXew+GCkR\nnZEYZKH7S4G/1tVokqJbmemtoKDQPlzT07LvdhcAALYfKcONfRK50FnCK5T8YL0UtxBSK9CpdDKn\nt1atgpCk0Q6QAdWLQsOvatfSalqzet4bIPguFT5K6qrUMCRC4kplWCsoXE9c0wJDit3phDZnGwja\nDdbLDTSsR9VqgRFG62UmEJqkMWt8ulCBtq2EMkk1R0sO2PZwBMfoolFiK7uiWcyhkN5fe5YKV1BQ\nCM51IzAsDjsImouOEoSE34p4BAjZjJwmaSE7WK/SyWaxNEnjhj6JaC8uRcPg8V+voiOYkTEVJl0M\nbk0Z2eHXulQUrUJB4ZfluvnHNbnk5iOWIQG/Nbf91zKWRkX51w9qb5v55WgYWdHd27UNwYhQGzC1\n24QWFz1SUFC49rluBQYYEp4KMbO7d2x2gMCQRkX5F2Vr77j/SzGpdI9KQ7gqTFj0RkFBQeGXoMNN\nUj/99BMWLVoElmUxffp0zJ49W/Z7eXk5nnvuOVitVjAMg3nz5uGmm25ql2uzhAf8ehNNbr9kPYZC\ndlQvPDLiTqF0xuv73gVgFnaROrlV5KXXYboULsUk9Xi/3wuObwUFBYVfig7VMBiGwcKFC7F8+XJ8\n9913WLduHfLz82X7fPDBBxg/fjxWr16Nt956Cy+//HK7XLvR3YiGtG+hSjsKsCwcnkCTVFyUDhpa\nDYIgQBBEwBKl0sJ+ejr4uhNtha//dCkmH4IgrvulMRUUFH55OlTDOHr0KFJSUtCpE+ecnTBhAjZv\n3oz09HRhH4IgYLNxa0xbLBbEx7ctb4CnxMYtpUrHlsNTx8LrdsvuNkKnxR3D02XH3J1xO0z6GKy9\nsBEAV8jt2YF/xMWGIsToomX7tlexu+cGzsWR6uPIjslol/Ndb/y218wW63QpKCi0Dx0qMCorK5GY\nKEYSxcfH49ixY7J9HnvsMcyaNQufffYZHA4HPv7443a5tsVlFT5X06cR5omS/R4drg8oGKim1BiX\nOhqbCrfC6XWBJmikRHRGSkTngPO3VzG+hLA4JIQFlkVXaB394npf6SYoKFw3dKjAaE39oXXr1mH6\n9Ol48MEHcfjwYTzzzDNYt25di8eZTM0nrTmq7cLnuoj9sJcMBiSH6DTakOfQqrRwel3QazUh94mN\njoApuvk2/FK01BfXE0pfiCh9IaL0RfvQoQIjISEBZWVlwvfKykrExcXJ9lm1ahWWL18OAMjJyYHT\n6URdXR2io+UmIH+qq63N/l5YUyb7bnXZIXVbE14y5DlI1ldwz0OE3MdS70C1t/k2/BKYTIYW++J6\nQekLEaUvRJS+EGmr4OxQ42/v3r1RVFSE0tJSuFwurFu3DqNHy4vfJSUlYdeuXQCA/Px8uFyuFoVF\na6h1mGXfNTp5RrfUoe0PnxDWnNlJSRpTUFC43uhQDYOiKCxYsACzZs0Cy7K44447kJ6ejvfeew+9\ne/fGyJEj8dxzz+GFF17AJ598ApIk8cYbb7TLtZ1eeVRUdrcwnBCtVOgcHjpLmmqFwGhrjSYFBQWF\nXxsdnocxYsQIjBgxQrZt7ty5wuf09HT85z//affrerxyjYJWuwGJwMhsZk0IoRx4u7dKQUFB4dfL\nNWtXaWiULyjkJpwAAJMuBrG6GKQGiXziEUp6B0mOG5k8XDiPgoKCwvXENVt80O50Qerltrs59WJq\nt4noa+rZ7LG8wPAGERh39JiM6d0nKdVRFRQUrjuuSQ3D4fLAw8hNUjafwGhN/kRzGgaglNJWUFC4\nPrkmBUZJlR0g5E5pm5vLJm9NlVmqBYGhoKCgcD1yTQqM4iorCEI+2PNRU63RMAhFYCgoKCgEcE0K\nDLPNGaBh8LSmBhTVjA9DQUFB4XrlmhQYDTYXQLAwaePw0pBnZb+1hw9DQUFB4Xrk2hQYdhdAMFBT\ndLOLIoWiiyEZAJBsSGphTwUFBYXrh2syrLbB7gKMLGiSChQYRMu3PL7rGMSHxaGfSamEqqCgoMBz\nTQoMi90FgmBAkRRokoaaUsN1CU5vNaXCDYkDO7qZCgoKCr8qrjmTFMOysNidACE6r/W0uB63Slmp\nTkFBQeGyuOYEhr3JLUQ38cuoSgVGey18pKCgoHC9cc0JDN7hDUBY91q6XnZ7rcWtoKCgcL1xjQoM\nLgfDX8NQkbQgRBQUFBQULo1rTmBYbFKBwd2emuKqEOppfcjjFBQUFBSa55oTGMFMUg4vV+pcr9KF\nPE5BQUFBoXk63AP8008/YdGiRWBZFtOnT8fs2bNlv7/22mvYu3cvCIJAY2MjzGYz8vLyLvt69TYn\nCD+TVKO7CYDc+a2goKCgcGl0qMBgGAYLFy7EJ598gri4ONxxxx0YPXo00tPThX3mz58vfP78889x\n6tSpNl3TItEw+BIf3aPSkN9QgF6xWW06t4KCgsL1TIcKjKNHjyIlJQWdOnHrZ0+YMAGbN2+WCQwp\n3333HR5//PE2XVPu9OYExviuY5Ae1RVZzSzLqqCgoKDQPB3qw6isrERiYqLwPT4+HlVVVUH3LSsr\nQ2lpKYYMGdKma9bbHdBm7QMg+jAokkJ2TIay8JGCgoJCG+hQDYNlg5cYD8a6deswduzYVg/qJpMh\n6Hartw5Qcet3h+t1Ife7lrge7rG1KH0hovSFiNIX7UOHCoyEhASUlZUJ3ysrKxEXFxd03/Xr1+Ol\nl15q9bmrq60B29weBvZGBny5QZfDG3S/awmTyXDN32NrUfpCROkLEaUvRNoqODvUJNW7d28UFRWh\ntLQULpcL69atw+jRowP2u3DhAiwWC3Jyctp0PWujS/ad92EoKCgoKLSdDtUwKIrCggULMGvWLLAs\nizvuuAPp6el477330Lt3b4wcORIAp11MmDChzdeTOrwBgFSyuhUUFBTajQ7PwxgxYgRGjBgh2zZ3\n7lzZ98cee6xdrtVgkwsMpW6UgoKCQvtxTdls6u1OIQcDUExSCgoKCu3JNTWiXii1yE1SisBQUFBQ\naDeumRGVYVkcvVALvVa8JTfjuYItUlBQULi2uGYExsot52Gxu9A1SQwbczPuK9giBQUFhWuLa0Zg\n7G/4Cepuh3BzPzGz3O1VBIaCgoJCe3HNrFfaFHUGFAC1WswUVzQMBQUFhfbjmtAwjteIFW6bPA7h\ns0sRGAoKCgrtxq9eYJRbq/HB0Y+F702eJuFzj6jgVXEVFP6/vTsPbKpKHz7+TdK0LC2b3QCZikVB\nsAqoLMKUdYChBVoBFas4U6SAQNlEFgXGqQNYmAr8FBVBQUBRXwGFMOpYQUAqKIIwLDrgQGmRlq3Q\njaTJPe8fLSmhQFJoUtM+n79yb05Ozn2g98k5595zhRDl5/UJI+PsRYftgpIeRmTjjrQLbVsZTRJC\niCrJacLIysryRDtumk6vOWxv+PVzAG73byTLmQshRAVymjAGDhzI2LFjSUtL80R7yu16V0LJOlJC\nCFGxnCaMr7/+mh49erBgwQL69u3L6tWrycvL80TbXHLJZr7mflkWRAghKpbTs6qvry8xMTF8+OGH\nvPzyy7z99ttERkaSlJTE2bNnPdHGGzJbr93DkIQhhBAVy6WzamZmJv/85z+ZNGkSHTt2ZOnSpdx2\n220MGzbM3e1zymIrfgaGKvJ12C8r1QohRMVyeuPeyJEj+eWXX3j88cdZu3Yt9evXB6Bt27Zs2rTJ\n7Q10xlwyh6Hl18VQ77R9vyw8KIQQFctpwhgwYAC9evXCYCj7i33jxo1Ov2Dr1q3Mnj0bpRQDBw4k\nISGhTJlNmzbx+uuvo9frad68OfPnz3ex+Vf2MIwO+w0y6S2EEBXKacKoW7cuBQUFBAQUL+p38eJF\nDhw4QMeOHZ1WrmkaSUlJLF++nODgYAYNGkSPHj0IDy+9oe748eMsXbqUDz/8EH9/f86dO1euA7CU\n9DCU1c9hvwxJCSFExXI6bpOcnIy/v79929/fn+TkZJcq37dvH2FhYTRu3Bij0UhUVBSpqakOZT76\n6COeeOIJ+3c0aNCgPO2nSCt5jvdVcxgyJCWEEBXL6VlVKeVwA5xer8dms7lUeVZWFg0blq4eGxIS\nQnZ2tkOZY8eO8b///Y8hQ4bw+OOPs23bNlfbDpSuF9Whxe0O+6WHIYQQFcvpkFTt2rX56aefuP/+\n+wH46aefqFWrlkuVK6WclrHZbKSnp7N69WpOnjxJXFwcJpPJoVdzI5dv3PMzOA5JSQ9DCCEqltOE\nMXnyZEaPHk2zZs0AOHLkCK+99ppLlYeGhnLy5En7dlZWFsHBwQ5lQkJCaNOmDXq9nttvv52mTZty\n7Ngx7r333hvWHRRU8qAkHw3McGeDO6gd/Ef+fbS4hxLYIICgBgE3qKHqsMdCSCyuILEoJbGoGE4T\nRps2bTCZTOzduxelFG3atKFu3bouVR4REUF6ejqZmZkEBQVhMplISUlxKNOzZ09MJhMxMTGcO3eO\n48eP06RJE6d1nz6dC0ChuXixwaJLiu53dbUnjIsXLnHalutSO71ZUFCAPRbVncSilMSilMSi1K0m\nTpceoFS3bl26dOlS7soNBgMzZswgPj4epRSDBg0iPDycRYsWERERQbdu3fjjH//It99+S1RUFAaD\ngeeff97lhARgVcXP7a7h44tRX3o4cqe3EEJULKcJ4/Dhw8yaNYvDhw9jsVjs+w8dOnSDT5WKjIwk\nMjLSYV9iYqLD9tSpU5k6dapL9V3NqornMHx9jPjoSg9HL5PeQghRoZz+DP/b3/7G+PHjCQsL45tv\nviEhIYEJEyZ4om0usSorStPhZ/DBx6GHIQlDCCEqktOEYbFY6NixI0opgoODmTBhQrkvfXUnqyoC\nzYDBoHe4/NeglyEpIYSoSE7PqvqSE2/dunU5fPgw58+fJzMz0+0Nc5WN4oThY3A8FOlhCCFExXI6\nhxEVFcX58+dJSEhgyJAhaJpWZg6iMtlUEcrmg4/B8el6ch+GEEJUrBsmDE3T6NixI/Xr1ycyMpJd\nu3ZhNptdvqnOE2xYQfPFUKaHIQlDCCEq0g3Pqnq9nhdeeMG+bTQaf1fJQlMams6KshnK9DBkSEoI\nISqW05/h4eHhZGRkeKIt5VakFd+DgWbAoJchKSGEcCencxjnzp2jf//+PPDAAw5rSC1cuNCtDXOF\nueR53sVzGI4JQhKGEEJULJcmvaOiojzRlnIzW0tuJLziKqm764XzS85Rh0tshRBC3DqnCSM2NtYT\n7bgpFq00YVwekkpsk4CmtEpslRBCVE1OE0ZiYuI1f63/voakSnsYOp1OJryFEMINnCaMbt262V+b\nzWa++OILh0esViaz7XIPwweDQYaghBDCnco9JPXII48watQotzWoPC4nDJ1mQC9zFkII4VblvpRI\np9P9bi6ztZQkDKPeWMktEUKIqq9ccxhKKX7++Wc6duzo9oa54pK1eA4joIZrj4wVQghx88o1h2Ew\nGIiPj6d169ZubZSrLhQUAFDPxWeMCyGEuHluv6x269atzJ49G6UUAwcOJCEhweH9devWkZycTGho\nKABxcXEMGjTIpbovFBQCUN+/5i21UQghhHNO5zCGDBnChQsX7Ns5OTnExcW5VLmmaSQlJbFs2TI2\nbtyIyWTi6NGjZcpFRUWxbt061q1b53KyALhYWDwk1cBfehhCCOFuThNGQUGBwzO269WrR15enkuV\n79u3j7CwMBo3bozRaCQqKorU1NQy5ZRS5WhyqcKi4oRRt1aNm/q8EEII1zlNGJqmUVAyVwCQn5+P\nzWZzqfKsrCwaNmxo3w4JCSE7O7tMuS+//JIBAwYwbtw4Tp065VLdAFatuB1+Bqcja0IIIW6R0zNt\ndHQ08fHxDBkyBIAPPviA/v37u1S5Kz2H7t27Ex0djdFoZM2aNUyZMoUVK1a4VL+1ZLVaPx8/l8oL\nIYS4eU4TxogRIwgODubrr79GKcXjjz9OTEyMS5WHhoZy8uRJ+3ZWVhbBwcEOZa4c7nr00UeZP3++\nS3UHBQWAQYENgm+rU7xdTVXnY7+axKKUxKKUxKJiuDSWExsbe1NXS0VERJCenk5mZiZBQUGYTCZS\nUlIcypw+fZqgoCAAUlNTadasmUt1nz6di7nIAnowFxRx+nRuudtXFQQFBVTbY7+axKKUxKKUxKLU\nrSZOp3MYY8eOJScnx759/vx5xo0b51LlBoOBGTNmEB8fT3R0NFFRUYSHh7No0SI2b94MwMqVK4mO\njiYmJoZVq1YxZ84clxtvU8VzGDWMcqe3EEK4m9MexokTJ6hXr559u379+qSnp7v8BZGRkURGRjrs\nS0xMtL+eOHEiEydOdLm+K11OGH4+vjf1eSGEEK5z2sOw2WwOV0UVFRVhsVjc2ihXaap40lt6GEII\n4X5OexidO3dmwoQJDB06FIAVK1aU6TFUFhsyJCWEEJ7iNGFMnDiRt956i7lz5wLFa0u1b9/e7Q1z\nhYYNpekx+sgDk4QQwt2cDkkZjUbGjBnD66+/zp/+9Cc+++wzpk+f7om2OaVhA01vfzyrEEII97lh\nD8NqtfL111/zySefsHfvXqxWK8uWLfvdrFar0EDpr/kIWSGEEBXruj2MOXPm0LVrV9asWUN0dDTf\nfPMNdevW/d0kCwCFDZ0q9zOghBBC3ITr9jA++OAD2rRpQ0JCAh06dAD43f2SVzoNNJm/EEIIT7hu\nwti+fTsbNmwgOTmZCxcuEBMT4/Kig56idDZ0yBVSQgjhCdcdz6lTpw5xcXGsXbuW119/nQsXLnDp\n0iXi4uJYs2aNJ9t4fToNPdLDEEIIT3BpAqBFixa8+OKLbNu2jbi4uGs+06JS6DR0ShKGEEJ4Qrke\nJGE0Gunbty99+/Z1V3tcpikNdEp6GEII4SFee4lRka0IQBKGEEJ4iNcmjEtFJQlDJwlDCCE8wWsT\nRmFR8QKIBulhCCGER3htwrhklR6GEEJ4ktcmDHPJkJSPrlzz9kIIIW6S2xPG1q1b6dOnD71792bJ\nkiXXLff555/TokULDhw44FK9l6wlQ1J66WEIIYQnuDVhaJpGUlISy5YtY+PGjZhMJo4ePVqmXH5+\nPqtWrSrXOlXSwxBCCM9ya8LYt28fYWFhNG7cGKPRSFRU1DVv+lu4cCHDhw/HWI4HIZlLLqv1kR6G\nEEJ4hFsTRlZWFg0bNrRvh4SEkJ2d7VDm0KFDnDp1ii5dupSrbnPJpLdBLz0MIYTwBLeebZVSTt+f\nPXs2r7zyisufuczoV5zravv5ERQUcPONrAKq+/FfSWJRSmJRSmJRMdyaMEJDQzl58qR9Oysri+Dg\nYPt2fn4+R44c4amnnkIpxZkzZ3j22Wd54403aNWq1Q3rPncxHwBl03H6dK57DsALBAUFVOvjv5LE\nopTEopTEotStJk63JoyIiAjS09PJzMwkKCgIk8lESkqK/X1/f3/S0tLs20899RTTpk2jZcuWTusu\nKhmSMsqQlBBCeIRbz7YGg4EZM2YQHx+PUopBgwYRHh7OokWLiIiIoFu3bg7ldTqdy0NSFs0KgNEg\nCUMIITzB7WfbyMhIIiMjHfYlJiZes+x7773ncr0W2+UehjxASQghPMFr7/S2lvQwfKWHIYQQHuG1\nCaPIVpIwfKSHIYQQnuC9CUN6GEII4VFemzAuD0n5SQ9DCCE8wusThgxJCSGEZ3hvwlDFCaOGJAwh\nhPAIr00YNs0GgJ+PbyW3RAghqgevTRiXexgyhyGEEJ7htQnDpop7GDXKsSS6EEKIm1cFEoYMSQkh\nhCd4bcLQkB6GEEJ4kvcmjJI5jJoy6S2EEB7hvQkDDaXA6CN3egshhCd4ccKwgfLa5gshhNfx2jOu\n0mnoNENlN0MIIaoN700Y0sMQQgiPcvsZd+vWrfTp04fevXuzZMmSMu+vWbOGfv36ERMTQ1xcHEeP\nHnWpXqWThCGEEJ7k1jOupmkkJSWxbNkyNm7ciMlkKpMQ+vXrx4YNG1i/fj3Dhg1jzpw5LtWtdBo6\nJUNSQgjhKW5NGPv27SMsLIzGjRtjNBqJiooiNTXVoUzt2rXtrwsKCtDrXWySTkPnvSNqQgjhddx6\nTWpWVhYNGza0b4eEhLB///4y5VavXs3y5cuxWq2sWLHCtcp1NulhCCGEB7k1YSilXCoXFxdHXFwc\nJpOJxYsXM3fuXOcf0mnodQaCggJusZXeT2JQSmJRSmJRSmJRMdyaMEJDQzl58qR9Oysri+Dg4OuW\n79u3L7NmzXJar02zgQ50Ss/p07kV0lZvFRQUUO1jcJnEopTEopTEotStJk63TgJERESQnp5OZmYm\nFosFk8lEjx49HMocP37c/nrz5s3ccccdTustshUByByGEEJ4kFt7GAaDgRkzZhAfH49SikGDBhEe\nHs6iRYuIiIigW7durFq1irS0NIxGI3Xq1OGVV15xWq+lJGHokTkMIYTwFLcvxBQZGUlkZKTDvsTE\nRPvrF154odx1mq2SMIQQwtO8ckznUpEFkIQhhBCe5JUJw1xUvLS5JAwhhPAcr0wYl6zFPQyDThKG\nEEJ4ilcmDPschiQMIYTwGO9MGCVzGAYZkhJCCI/xzoRhLZ7DMOjkaXtCCOEpXpkwLPY5DEkYQgjh\nKd6ZMGyXexgyJCWEEJ7ilQnDXHKnt49eehhCCOEpXpkwikqukpIehhBCeI5XJozLQ1LSwxBCCM/x\n0oRRPOktCUOI6iMvL4916/7fTX32+efHk5+fV8Etqn68MmEUXe5hyJCUENVGbu5F1q37+JrvaZp2\nw88mJy+gdm1/dzTrlrn6oLnfA6/8iX55eXOjwVjJLRFCeMqbb77GyZOZxMfH8eCD7enYsRPvvvs2\nt90WyJEjv7By5UdMm/Ycp09nY7GYGTx4CP36xQAweHB/li1bSUFBAc89l0hERGv+85+fCAoKYe7c\nf+Lr6+vwXd9+u40VK5ZhtVqpW7cuM2e+TP369SksLOTVV5P5+edD6HR6/vrX4XTp0o3vvtvBkiWL\n0TSNevXqsWDBYt55Zwm1atXi8cefBGDo0MdITl4IKJ57LpE2bR7kwIH9zJkzn5Url/Pzzwcxm810\n7dqD+PgEAA4dOsCiRf+ksPASvr6+LFiwmMmTxzFhwvM0a3YXAKNGDWPy5GnceWczt/8beHfC0EvC\nEKIyfPT1Eb4/nF2hdT7UIphHu1//pDdq1FiOHfuVd95ZDcCePbs5dOggK1d+RGhoKADTp88iICAA\ns9nM8OFD6dKle8lT5nT2ejIyTvDSS3OYMuUFZs6cxpYtX9OrVx+H77r//jYsWbIcgI0b1/P+++8x\nevQ4li9fSkBAACtWrAGKh8lycnJITv4HixcvIzQ0lNzcaz/dT6crbcOJE+m88MLfmDRpCgAjRowm\nICAATdMYN24Uv/56hD/84Q5mzZpOUtIrNG/egoKCAvz8/OjXL4ZNmz4jMXESJ06kY7UWeSRZgJcm\njCKteEjKKHMYQlRrLVu2sicLgI8+ep9t274BIDs7m4yMdMLDGwOlwz4NGzYiPLz4BNu8eQtOnTrJ\n1bKzTzFz5gLOnj2D1WqlYcNGAPzwwy7+/vc59nL+/v58++022rRpa29HQMC1H4N65dBTSEgo99zT\nyr6dmvoFn322HpvNxrlzZ/nf//4HQGBgEM2btwCgVq1aAHTr1oPly5cxevR4TKbP+POf+7kYrVvn\n9jPu1q1bmT17NkopBg4cSEJCgsP7y5cv5+OPP8bHx4cGDRowe/ZsGjZseMM6i0omvWVISojK8Wj3\nZjfsDXhKjRo17K/37NnNjz/+wJIly/H19WXs2BFYLJYyn7ly+EmvN1yzzKuvzmPIkKd4+OHO7Nmz\nm3fffRu49nzD9eYgDAYDmlb63pXfU7NmTfvr3347yZo1q1m2bCW1a/sze/ZLWCxmrje14edXg4ce\nas+2bVvYvPkrli5dee2CbuDWSW9N00hKSmLZsmVs3LgRk8nE0aNHHcq0bNmStWvX8umnn9KrVy+S\nk5Od1nu5h+ErQ1JCVBu1atWioKDguu/n5+cREBCAr68vx48f48CB/1yznCuTzPn5+QQGBgLwr39t\ntO9v164Dn3zyoX07NzeXe++9j71793Dq1G8AXLx4ESjuyfzyy2EAfv75ML/9VtqTubIN+fn51KxZ\nk1q1anPu3Fm++24HAGFhd3D27BkOHz4EQEFBgX1yPzp6AAsWzOeee1pdt0fjDm7tYezbt4+wsDAa\nN24MQFRUFKmpqYSHh9vLtGvXzv66devWbNiwwWm9Vq14DsNXehhCVBt16tQlIuJ+nn76cdq3f5iO\nHTs5vN++/cOsX/8Jf/nLE/zhD2Hce2/EFe+Wzh9cOZdwPfHxw3nxxSkEB4fQsuW99mTw9NPDSEl5\nhaFDH8NgMPDXvyYQGdmV559/genTn0MpRf36DUhJeY0uXbrz+ecm4uPjaNGiJU2ahF2zDc2a3cVd\ndzXnqaceo1Gjxtx33/0A+Pj48NJLc3j11WTMZjM1atRgwYLF1KhRg+bNW1C7dm2iojw3HAWgU268\npuuLL75g+/btJCUlAfDpp5+yf/9+XnzxxWuWT0pKIigoiJEjR96w3omfzifj0lGGhIyhc6s/VHi7\nvUlQUACnT197kq26kViUkliUqoqxOHPmNImJI3n//U/K9bniCwBunlt7GOXJRZ9++ikHDhxg5Urn\n43GXexiB9evccgCqAolBKYlFKYlFqaoUi/Xr17Nw4UKmTZvm8eNya8IIDQ3l5MnScbusrCyCMd1e\n6gAAERdJREFUg4PLlNuxYwdLlixh1apVGI3Oh5msyopSUJhXVOV+OZRXVfz1dLMkFqUkFqWqWiw6\ndepBp049AMp9XLeaYNw66R0REUF6ejqZmZlYLBZMJhM9evRwKHPw4EFmzZrFG2+8Qf369V2q16pZ\nQTPg6yt3egshhKe4tYdhMBiYMWMG8fHxKKUYNGgQ4eHhLFq0iIiICLp168a8efMoLCxk3LhxKKVo\n1KgRixcvvmG9xQlDTw1JGEII4TFuvw8jMjKSyMhIh32JiYn21++++26567SqIpRmoIZREoYQQniK\nVy4+aFNWUHr8pIchhBAe45UJQ8MGmoEavrI0iBDVxa0sbw7w0UcfYDabK7BF1Y+XJgyZwxCiurnR\n8uau+PjjDzCbL1Vgi8rPZrNV6vffKq/8ia50GigDPgavzHdCiJtw9fLmzz6byPvvr2Tz5n9TVGQl\nMrIr8fEJXLp0iZkzp3L6dDaapjF27BiOHcvgzJnTjB07knr16rFw4RsOdS9fvpRvv92GxWLm3nvv\nY/Lk6QBkZmYwb95scnJyMBgMJCXNpVGjxqxevYIvv/wXer2eDh06MWLEaMaOHcGYMRNo3rwFFy7k\n8MwzQ/n448/41782smPHdiwWM5cumZk7959MnTqJvLxcrFYrw4ePpHPnLkDxMiRr1qxGr9cRHn4X\nEydO4emnh7BmzVoMBgMFBfkl2+swGDz/g9krEwaAXknvQojKsvbIRvZk76/QOtsER/BIs+jrvn/1\n8ubff/8dGRnpvP32eyilmDJlIj/9tJecnHMEBgaRnLwAgJo1dTz4oOLDDz/g//7vLerUqVOm7oED\nH+Mvf3kGgKSkmezYsZ2HH+7MSy+9yNChf6Vz5y4UFRWhaRrffbeD7du38vbb7+Hr63vd5cyvXI7k\nwIH9vPfeh/j7+6NpGnPmzKdWrVpcuJDDiBHF9f/661FWrVrOG2+8Q506dcjNzaVWrVq0bfsAaWnb\n6dy5C1999SVdu/aolGQBXpwwDPK0PSGqtV27dvL997uIj49DKUVh4SUyMtK5777WvP76Qt588zU6\nduxMz55/pLAwl+Ilzq+9+sTu3bt4//2VmM2XyM3N5c47w2ndui1nzpy2//q/fFPxDz/sIiqqn33V\nW1cW/3voofb4+xc/8U/TNN566zX27t2DXq/jzJnTnD9/jj17fqBr1x72hHa53ujoAbz//ko6d+7C\npk0bmDLl2ksreYLXJgy9ThYeFKKyPNIs+oa9AU9QSvHUU3+hf//YMu8tW7aKtLRveeut1/jll/0M\nHvzUdeuxWCykpCTzzjurCAwM4p13lpQsRX7t5FK85FHZBQwNBgNKafY6r3Tlcub//vfn5OTk8O67\nq9Hr9Qwe3B+z2XLdpZQiIu7n1KlX2Lv3RzRNo2nTO697LO7mtZMAvqqm80JCiCrj6uXN27fvgMn0\nGYWFhQAlv9TPc+bMGfz8/OjVqw9DhjzJwYMHSz5fm/z8/DL1WiwWdLri1XALCgrYsiXVXj44OIRt\n27YAUFRUhNl8iXbtir/38gR66XLmjTl8uPi7Nm/+6rrHkZeXR/36DdDr9fz44w/2lXAfeKAdmzd/\nxcWLFxzqBejduy9/+9sLREX1L3/gKpBX9jDMhx/iD/XvqOxmCCE86OrlzZ99NpFjx44xcuRfgeKE\nMmNGEhkZJ3j99YXo9Tp8fIz84x/Fq2X37x/Dc88lEhgY5DDp7e/vT79+sQwd+hgNGzZyeBLeiy++\nxLx5s1m69C2MRiNJSXNp374jR478wrBhQ/H1NdKhQycSEp5lyJA4ZsyYxhdf/IsHHnjousfRq1cf\npkyZyPDhQ2nWrDlhYU0BaNr0ToYOjWfMmAQMBgN33dWc6dNnlXzmzyxd+iY9e/aq8LiWh1uXN3eX\nfpM+pc1dgYwdeF9lN6XSVbWF1W6FxKKUxKJUVYjF5s1f8e2323jxxZduqZ7f9fLm7iR3eQshqoMF\nC+bx3XdpzJ+/sLKb4r0JI6Cmr/NCQgjh5caPn1zZTbDz2knv+gF+ld0EIYSoVrw2YdTzlx6GEEJ4\nktcmDOlhCCGEZ7k9YWzdupU+ffrQu3dvlixZUub9H374gUceeYRWrVrx5ZdfulxvPX9JGEII4Ulu\nTRiappGUlMSyZcvYuHEjJpOJo0ePOpRp1KgRc+fOpV+/fuWqu570MIQQwqPcepXUvn37CAsLo3Hj\nxgBERUWRmppKeHi4vUyjRo0A0OnK3mp/PQ1vq42fPG1PCCE8yq09jKysLBo2bGjfDgkJITs7+5br\nfXVCl1uuQwghRPm4NWG46yby2jVl4UEhhPA0tw5JhYaGcvLkSft2VlYWwcHBFVL3rd7iXpVILEpJ\nLEpJLEpJLCqGW3sYERERpKenk5mZicViwWQy0aNHj+uW98JlrYQQotpw++KDW7du5R//+AdKKQYN\nGkRCQgKLFi0iIiKCbt26sX//fsaMGcPFixfx8/MjKCiIDRs2uLNJQgghboJXrlYrhBDC87z2Tm8h\nhBCeJQlDCCGESyRhCCGEcInXJQxna1NVNdOnT+fhhx92WDrlwoULxMfH07t3b4YNG0ZubunTxF5+\n+WV69erFgAEDOHToUGU02S1OnTrF0KFD6du3L/369eO9994DqmcsLBYLgwcPJiYmhn79+vHaa68B\nkJGRwaOPPkrv3r2ZOHEiVqvVXn7ChAn06tWLxx57zOFS96pC0zRiY2MZOXIkUH1j0b17d/r3709M\nTAyDBg0CKvhvRHkRm82mevbsqTIyMpTFYlH9+/dXR44cqexmudX333+vDh48qKKjo+37kpOT1ZIl\nS5RSSr311ltq3rx5SimltmzZooYPH66UUmrv3r1q8ODBnm+wm2RnZ6uDBw8qpZTKy8tTvXr1UkeO\nHKmWsVBKqYKCAqWUUlarVQ0ePFjt3btXjRs3Tm3atEkppdTMmTPVBx98oJRSavXq1WrWrFlKKaVM\nJpMaP358pbTZnd599101adIkNWLECKWUqrax6N69u8rJyXHYV5F/I17Vw7hybSqj0Whfm6oqe/DB\nB6lTp47DvtTUVGJjYwGIjY21xyA1NZWYmBgA7r//fnJzczlz5oxnG+wmQUFB3HPPPQDUrl2b8PBw\nsrKyqmUsAGrWrAkU/2K2Wq3odDp27txJ7969geJYfPXVV4Dj/5fevXuTlpZWOY12k1OnTvHNN98w\nePBg+77vvvuuWsZCKYWmaQ77KvJvxKsShrvWpvI2586dIzAwECg+kZ47dw6A7OxsQkND7eVCQkLI\nysqqlDa6U0ZGBocPH+b+++/n7Nmz1TIWmqYRExNDp06d6NSpE02aNKFOnTro9cV/0qGhofbjvTIW\nBoOBOnXqkJOTU2ltr2izZ8/m+eefty9gev78eerWrVstY6HT6Rg2bBgDBw7k448/BqjQvxGveqa3\nkltGbuha8SnPKsDeID8/n8TERKZPn07t2rWve3xVPRZ6vZ7169eTl5fH6NGjyzw2AEqP9+pYKKWq\nTCy2bNlCYGAg99xzDzt37gSKj+/qY64OsQBYs2aNPSnEx8fTtGnTCv0b8aqE4c61qbzJbbfdxpkz\nZwgMDOT06dM0aNAAKP6FcOrUKXu5U6dOVan4WK1WEhMTGTBgAD179gSqbywu8/f356GHHuKnn37i\n4sWLaJqGXq93ON7LsQgJCcFms5GXl0fdunUrueUV48cff+Trr7/mm2++wWw2k5+fz+zZs8nNza12\nsYDiHgRAgwYN6NmzJ/v27avQvxGvGpIq79pUVcXVvwS6d+/O2rVrAVi3bp09Bj169GD9+vUA7N27\nlzp16ti7olXB9OnTadasGU8//bR9X3WMxblz5+xXuly6dIm0tDSaNWtG+/bt+fzzzwHHWHTv3p11\n69YB8Pnnn9OhQ4fKabgbTJw4kS1btpCamkpKSgrt27dn/vz51TIWhYWF5OfnA1BQUMD27du5++67\nK/RvxOuWBrnW2lRV2aRJk9i5cyc5OTkEBgYyduxYevbsybhx4/jtt99o1KgRCxcutE+M//3vf2fb\ntm3UrFmTOXPm0KpVq0o+goqxe/dunnzySe6++250Oh06nY4JEyZw3333MX78+GoVi59//pmpU6ei\naRqaptG3b19GjRrFiRMnmDhxIhcvXuSee+5h3rx5GI1GLBYLkydP5tChQ9SrV4+UlBRuv/32yj6M\nCrdr1y7eeecd3nzzzWoZixMnTjBmzBh0Oh02m41+/fqRkJBATk5Ohf2NeF3CEEIIUTm8akhKCCFE\n5ZGEIYQQwiWSMIQQQrhEEoYQQgiXSMIQQgjhEkkYQgghXCIJQ3i1Rx99lNjYWKKiomjVqhWxsbHE\nxsYyffr0ctf1zDPPuLTc9bRp09i7d+/NNLdcDh48yBdffOH27xHCVXIfhqgSMjMzGTRo0A1XH728\nVIS3+Pjjj0lLSyMlJaWymyIE4GVrSQlRHmlpacybN4/WrVtz8OBBRo8ezblz51i9erX9gTpTp06l\nXbt2AHTp0oXly5fTtGlTnnjiCdq0acOePXvIzs4mOjqa8ePHA/DEE0/w7LPP0rlzZyZPnoy/vz9H\njx4lKyuLtm3bMmfOHKB4bZ7nn3+e8+fP06RJE2w2G927d+exxx5zaOeZM2eYNGkS58+fB6Bz5848\n88wzLF68mIKCAmJjY2nfvj1Tp05lz549pKSkUFhYCEBiYiKRkZGkp6fzxBNPEB0dze7du7FYLMya\nNYu2bdt6JNaimriVh3UI8XuRkZGhOnTo4LBvx44dqmXLlmr//v32fVc+XObIkSOqa9eu9u3IyEj1\n66+/KqWUGjJkiJo0aZJSSqmLFy+qdu3aqYyMDPt727ZtU0op9dxzz6knn3xSFRUVKbPZrPr06aN2\n7typlFJq1KhR6u2331ZKKXXixAnVpk0btWbNmjJtX7p0qZo5c6Z9++LFi0oppT766CM1ceJEh7bH\nxMSos2fPKqWUOnXqlIqMjFR5eXnq+PHjqnnz5spkMtmPvWvXrspqtboeRCGckB6GqNLuvPNO7r33\nXvv2sWPHWLRoEdnZ2RgMBrKzs8nJyaFevXplPvvnP/8ZgICAAJo2bUp6ejqNGzcuU+5Pf/oTPj7F\nf0otW7YkPT2ddu3asXPnTl5++WUAbr/9dntP5mqtW7dm1apVzJ8/n4ceeojOnTtfs9zu3bvJyMhg\n2LBh9gUpDQYDJ06coFatWtSsWZO+ffsC0LFjRwwGA8eOHSM8PNzVcAlxQ5IwRJVWu3Zth+0JEyYw\na9YsunTpgqZp3HfffZjN5mt+1s/Pz/5ar9djs9nKVc7V5yw88MADrFu3jh07dvDJJ5+wdOlSVq5c\nWaacUopWrVqxfPnyMu+lp6eX2adpWpV61oOofN4zAyiEE8qF6zfy8vLsq5OuWbPmukmgIrRr186+\nrHRmZia7du26ZrmMjAz8/f3p27cvU6dO5T//+Q9Q/KyLy8uYA7Rt25YjR47www8/2Pft27fP/rqw\nsJBNmzYBxY8oBQgLC6vYgxLVmvQwRJXhyq/p6dOnk5CQQMOGDWnfvj0BAQHX/PzVdV3vvRuVmzFj\nBlOmTMFkMnHnnXfStm1bh++7LC0tjffeew+DwYBSiqSkJAA6derEihUriImJoUOHDkydOpXFixcz\nb948cnNzKSoqokmTJrz55psABAYG8t///pfBgwdjsVhISUnBYDA4jYkQrpLLaoVwE7PZjNFoRK/X\nk5WVxeDBg1m9ejVNmjSp8O+6fJXU9u3bK7xuIS6THoYQbvLrr78ybdo0lFJomsaECRPckiyE8BTp\nYQghhHCJTHoLIYRwiSQMIYQQLpGEIYQQwiWSMIQQQrhEEoYQQgiXSMIQQgjhkv8PZHg4l1eLyCQA\nAAAASUVORK5CYII=\n",
-            "text/plain": [
-              "\u003cmatplotlib.figure.Figure at 0x7f96f7389490\u003e"
-            ]
-          },
-          "metadata": {
-            "tags": []
-          },
-          "output_type": "display_data"
-        }
-      ],
-      "source": [
-        "with context.eager_mode():\n",
-        "  durations = []\n",
-        "  for t in range(burn_ins + trials):\n",
-        "    hp = tf.contrib.training.HParams(\n",
-        "        learning_rate=0.05,\n",
-        "        max_steps=max_steps,\n",
-        "    )\n",
-        "    train_ds = setup_mnist_data(True, hp, 500)\n",
-        "    test_ds = setup_mnist_data(False, hp, 100)\n",
-        "    ds = tf.data.Dataset.zip((train_ds, test_ds))\n",
-        "    start = time.time()\n",
-        "    (train_losses, test_losses, train_accuracies,\n",
-        "     test_accuracies) = train(ds, hp)\n",
-        "    if t \u003c burn_ins:\n",
-        "      continue\n",
-        "    train_losses[-1].numpy()\n",
-        "    test_losses[-1].numpy()\n",
-        "    train_accuracies[-1].numpy()\n",
-        "    test_accuracies[-1].numpy()\n",
-        "    duration = time.time() - start\n",
-        "    durations.append(duration)\n",
-        "    print('Duration:', duration)\n",
-        "\n",
-        "\n",
-        "  print('Mean duration:', np.mean(durations), '+/-', np.std(durations))\n",
-        "  plt.title('MNIST train/test losses')\n",
-        "  plt.plot(train_losses, label='train loss')\n",
-        "  plt.plot(test_losses, label='test loss')\n",
-        "  plt.legend()\n",
-        "  plt.xlabel('Training step')\n",
-        "  plt.ylabel('Loss')\n",
-        "  plt.show()\n",
-        "  plt.title('MNIST train/test accuracies')\n",
-        "  plt.plot(train_accuracies, label='train accuracy')\n",
-        "  plt.plot(test_accuracies, label='test accuracy')\n",
-        "  print('test_accuracy', test_accuracies[-1])\n",
-        "  plt.legend(loc='lower right')\n",
-        "  plt.xlabel('Training step')\n",
-        "  plt.ylabel('Accuracy')\n",
-        "  plt.show()\n"
-      ]
-    }
-  ],
-  "metadata": {
-    "colab": {
-      "collapsed_sections": [],
-      "default_view": {},
-      "last_runtime": {
-        "build_target": "",
-        "kind": "local"
-      },
-      "name": "Autograph vs. Eager MNIST benchmark",
-      "provenance": [
-        {
-          "file_id": "1tAQW5tHUgAc8M4-iwwJm6Xs6dV9nEqtD",
-          "timestamp": 1530297010607
-        },
-        {
-          "file_id": "18dCjshrmHiPTIe1CNsL8tnpdGkuXgpM9",
-          "timestamp": 1530289467317
-        },
-        {
-          "file_id": "1DcfimonWU11tmyivKBGVrbpAl3BIOaRG",
-          "timestamp": 1522272821237
-        },
-        {
-          "file_id": "1wCZUh73zTNs1jzzYjqoxMIdaBWCdKJ2K",
-          "timestamp": 1522238054357
-        },
-        {
-          "file_id": "1_HpC-RrmIv4lNaqeoslUeWaX8zH5IXaJ",
-          "timestamp": 1521743157199
-        },
-        {
-          "file_id": "1mjO2fQ2F9hxpAzw2mnrrUkcgfb7xSGW-",
-          "timestamp": 1520522344607
-        }
-      ],
-      "version": "0.3.2",
-      "views": {}
-    }
-  },
-  "nbformat": 4,
-  "nbformat_minor": 0
-}
diff --git a/tensorflow/contrib/autograph/examples/notebooks/dev_summit_2018_demo.ipynb b/tensorflow/contrib/autograph/examples/notebooks/dev_summit_2018_demo.ipynb
index 0702273..86e38c3 100644
--- a/tensorflow/contrib/autograph/examples/notebooks/dev_summit_2018_demo.ipynb
+++ b/tensorflow/contrib/autograph/examples/notebooks/dev_summit_2018_demo.ipynb
@@ -1,49 +1,20 @@
 {
-  "nbformat": 4,
-  "nbformat_minor": 0,
-  "metadata": {
-    "colab": {
-      "name": "Dev Summit 2018 - Autograph",
-      "version": "0.3.2",
-      "views": {},
-      "default_view": {},
-      "provenance": [
-        {
-          "file_id": "1wCZUh73zTNs1jzzYjqoxMIdaBWCdKJ2K",
-          "timestamp": 1522238054357
-        },
-        {
-          "file_id": "1_HpC-RrmIv4lNaqeoslUeWaX8zH5IXaJ",
-          "timestamp": 1521743157199
-        },
-        {
-          "file_id": "1mjO2fQ2F9hxpAzw2mnrrUkcgfb7xSGW-",
-          "timestamp": 1520522344607
-        }
-      ],
-      "collapsed_sections": []
-    },
-    "kernelspec": {
-      "name": "python2",
-      "display_name": "Python 2"
-    }
-  },
   "cells": [
     {
-      "metadata": {
-        "id": "g7nGs4mzVUHP",
-        "colab_type": "text"
-      },
       "cell_type": "markdown",
+      "metadata": {
+        "colab_type": "text",
+        "id": "g7nGs4mzVUHP"
+      },
       "source": [
-        "# Experimental: TF Autograph\n",
+        "# Experimental: TF AutoGraph\n",
         "**TensorFlow Dev Summit, 2018.**\n",
         "\n",
-        "This interactive notebook demonstrates **autograph**, an experimental source-code transformation library to automatically convert TF.Eager and Python code to TensorFlow graphs.\n",
+        "This interactive notebook demonstrates **AutoGraph**, an experimental source-code transformation library to automatically convert Python, TensorFlow and NumPy code to TensorFlow graphs.\n",
         "\n",
         "**Note: this is pre-alpha software!** The notebook works best with Python 2, for now.\n",
         "\n",
-        "> ![alt text](https://lh3.googleusercontent.com/QOvy0clmg7siaVKzwmSPAjicWWNQ0OeyaB16plDjSJMf35WD3vLjF6mz4CGrhSHw60HnlZPJjkyDCBzw5XOI0oBGSewyYw=s688)\n",
+        "\u003e ![alt text](https://lh3.googleusercontent.com/QOvy0clmg7siaVKzwmSPAjicWWNQ0OeyaB16plDjSJMf35WD3vLjF6mz4CGrhSHw60HnlZPJjkyDCBzw5XOI0oBGSewyYw=s688)\n",
         "\n",
         "### Table of Contents\n",
         "1. _Write Eager code that is fast and scalable._\n",
@@ -53,37 +24,39 @@
       ]
     },
     {
+      "cell_type": "code",
+      "execution_count": 0,
       "metadata": {
-        "id": "uFcgBENZqkB2",
-        "colab_type": "code",
         "colab": {
           "autoexec": {
             "startup": false,
             "wait_interval": 0
           }
-        }
+        },
+        "colab_type": "code",
+        "id": "uFcgBENZqkB2"
       },
-      "cell_type": "code",
+      "outputs": [],
       "source": [
         "# Install TensorFlow; note that Colab notebooks run remotely, on virtual\n",
         "# instances provided by Google.\n",
         "!pip install -U -q tf-nightly"
-      ],
-      "execution_count": 0,
-      "outputs": []
+      ]
     },
     {
+      "cell_type": "code",
+      "execution_count": 0,
       "metadata": {
-        "id": "Pa2qpEmoVOGe",
-        "colab_type": "code",
         "colab": {
           "autoexec": {
             "startup": false,
             "wait_interval": 0
           }
-        }
+        },
+        "colab_type": "code",
+        "id": "Pa2qpEmoVOGe"
       },
-      "cell_type": "code",
+      "outputs": [],
       "source": [
         "import os\n",
         "import time\n",
@@ -96,170 +69,172 @@
         "import six\n",
         "\n",
         "from google.colab import widgets"
-      ],
-      "execution_count": 0,
-      "outputs": []
+      ]
     },
     {
-      "metadata": {
-        "id": "ZVKfj5ttVkqz",
-        "colab_type": "text"
-      },
       "cell_type": "markdown",
+      "metadata": {
+        "colab_type": "text",
+        "id": "ZVKfj5ttVkqz"
+      },
       "source": [
         "# 1. Write Eager code that is fast and scalable\n",
         "\n",
         "TF.Eager gives you more flexibility while coding, but at the cost of losing the benefits of TensorFlow graphs. For example, Eager does not currently support distributed training, exporting models, and a variety of memory and computation optimizations.\n",
         "\n",
-        "Autograph gives you the best of both worlds: write your code in an Eager style, and we will automatically transform it into the equivalent TF graph code. The graph code can be executed eagerly (as a single op), included as part of a larger graph, or exported."
+        "AutoGraph gives you the best of both worlds: you can write your code in an Eager style, and we will automatically transform it into the equivalent TF graph code. The graph code can be executed eagerly (as a single op), included as part of a larger graph, or exported."
       ]
     },
     {
-      "metadata": {
-        "id": "snaZRFdWd9ym",
-        "colab_type": "text"
-      },
       "cell_type": "markdown",
+      "metadata": {
+        "colab_type": "text",
+        "id": "snaZRFdWd9ym"
+      },
       "source": [
-        "For example, autograph can convert a function like this:"
+        "For example, AutoGraph can convert a function like this:"
       ]
     },
     {
+      "cell_type": "code",
+      "execution_count": 0,
       "metadata": {
-        "id": "9__n8cSIeDnD",
-        "colab_type": "code",
         "colab": {
           "autoexec": {
             "startup": false,
             "wait_interval": 0
           }
-        }
+        },
+        "colab_type": "code",
+        "id": "9__n8cSIeDnD"
       },
-      "cell_type": "code",
+      "outputs": [],
       "source": [
         "def g(x):\n",
-        "  if x > 0:\n",
+        "  if x \u003e 0:\n",
         "    x = x * x\n",
         "  else:\n",
         "    x = 0\n",
         "  return x"
-      ],
-      "execution_count": 0,
-      "outputs": []
+      ]
     },
     {
-      "metadata": {
-        "id": "gq0eQcuReHET",
-        "colab_type": "text"
-      },
       "cell_type": "markdown",
+      "metadata": {
+        "colab_type": "text",
+        "id": "gq0eQcuReHET"
+      },
       "source": [
         "... into a TF graph-building function:"
       ]
     },
     {
+      "cell_type": "code",
+      "execution_count": 4,
       "metadata": {
-        "id": "sELSn599ePUF",
-        "colab_type": "code",
         "colab": {
           "autoexec": {
             "startup": false,
             "wait_interval": 0
           },
-          "output_extras": [
-            {}
-          ],
-          "base_uri": "https://localhost:8080/",
-          "height": 413
+          "height": 431
         },
-        "outputId": "bb0c7216-1ca3-4da1-d1fb-589902cdcd1a",
+        "colab_type": "code",
         "executionInfo": {
+          "elapsed": 69,
           "status": "ok",
-          "timestamp": 1522345737505,
-          "user_tz": 240,
-          "elapsed": 243,
+          "timestamp": 1531750911837,
           "user": {
-            "displayName": "Dan Moldovan",
-            "photoUrl": "//lh5.googleusercontent.com/-Rneh8xjecyk/AAAAAAAAAAI/AAAAAAAACB4/c5vwsJpbktY/s50-c-k-no/photo.jpg",
-            "userId": "112023154726779574577"
-          }
-        }
+            "displayName": "",
+            "photoUrl": "",
+            "userId": ""
+          },
+          "user_tz": 240
+        },
+        "id": "sELSn599ePUF",
+        "outputId": "2858bde5-ae05-4c32-be01-7770ac914f02"
       },
-      "cell_type": "code",
-      "source": [
-        "print(autograph.to_code(g))"
-      ],
-      "execution_count": 0,
       "outputs": [
         {
+          "name": "stdout",
           "output_type": "stream",
           "text": [
             "from __future__ import print_function\n",
             "import tensorflow as tf\n",
-            "from tensorflow.contrib.autograph.impl import api as autograph_api\n",
-            "from tensorflow.contrib.autograph import utils as autograph_utils\n",
             "\n",
             "def tf__g(x):\n",
-            "  with tf.name_scope('g'):\n",
+            "  try:\n",
+            "    with tf.name_scope('g'):\n",
             "\n",
-            "    def if_true():\n",
-            "      with tf.name_scope('if_true'):\n",
-            "        x_1, = x,\n",
-            "        x_1 = x_1 * x_1\n",
-            "        return x_1,\n",
+            "      def if_true():\n",
+            "        with tf.name_scope('if_true'):\n",
+            "          x_1, = x,\n",
+            "          x_1 = x_1 * x_1\n",
+            "          return x_1,\n",
             "\n",
-            "    def if_false():\n",
-            "      with tf.name_scope('if_false'):\n",
-            "        x_1, = x,\n",
-            "        x_1 = 0\n",
-            "        return x_1,\n",
-            "    x = autograph_utils.run_cond(tf.greater(x, 0), if_true, if_false)\n",
-            "    return x\n",
+            "      def if_false():\n",
+            "        with tf.name_scope('if_false'):\n",
+            "          x_2, = x,\n",
+            "          x_2 = 0\n",
+            "          return x_2,\n",
+            "      x = ag__.utils.run_cond(tf.greater(x, 0), if_true, if_false)\n",
+            "      return x\n",
+            "  except:\n",
+            "    ag__.rewrite_graph_construction_error(ag_source_map__)\n",
             "\n"
-          ],
-          "name": "stdout"
+          ]
         }
+      ],
+      "source": [
+        "print(autograph.to_code(g))"
       ]
     },
     {
-      "metadata": {
-        "id": "j74n-8hEe6dk",
-        "colab_type": "text"
-      },
       "cell_type": "markdown",
+      "metadata": {
+        "colab_type": "text",
+        "id": "j74n-8hEe6dk"
+      },
       "source": [
         "You can then use the converted function as you would any regular TF op -- you can pass `Tensor` arguments and it will return `Tensor`s:"
       ]
     },
     {
+      "cell_type": "code",
+      "execution_count": 5,
       "metadata": {
-        "id": "AkVaY0-dfEbH",
-        "colab_type": "code",
         "colab": {
           "autoexec": {
             "startup": false,
             "wait_interval": 0
           },
-          "output_extras": [
-            {}
-          ],
-          "base_uri": "https://localhost:8080/",
           "height": 53
         },
-        "outputId": "4ffe3757-c44d-424c-c2a8-7ddc973bfcce",
+        "colab_type": "code",
         "executionInfo": {
+          "elapsed": 83,
           "status": "ok",
-          "timestamp": 1522345737841,
-          "user_tz": 240,
-          "elapsed": 257,
+          "timestamp": 1531750911965,
           "user": {
-            "displayName": "Dan Moldovan",
-            "photoUrl": "//lh5.googleusercontent.com/-Rneh8xjecyk/AAAAAAAAAAI/AAAAAAAACB4/c5vwsJpbktY/s50-c-k-no/photo.jpg",
-            "userId": "112023154726779574577"
-          }
-        }
+            "displayName": "",
+            "photoUrl": "",
+            "userId": ""
+          },
+          "user_tz": 240
+        },
+        "id": "AkVaY0-dfEbH",
+        "outputId": "f04541ad-b1d3-4663-bf27-4d902648283d"
       },
-      "cell_type": "code",
+      "outputs": [
+        {
+          "name": "stdout",
+          "output_type": "stream",
+          "text": [
+            "g(9) = 81\n",
+            "tf_g(9) = 81\n"
+          ]
+        }
+      ],
       "source": [
         "tf_g = autograph.to_graph(g)\n",
         "\n",
@@ -272,77 +247,72 @@
         "\n",
         "  print('g(9) = %s' % g(9))\n",
         "  print('tf_g(9) = %s' % tf_g_result)"
-      ],
-      "execution_count": 0,
-      "outputs": [
-        {
-          "output_type": "stream",
-          "text": [
-            "g(9) = 81\n",
-            "tf_g(9) = 81\n"
-          ],
-          "name": "stdout"
-        }
       ]
     },
     {
-      "metadata": {
-        "id": "trrHQBM1VnD0",
-        "colab_type": "text"
-      },
       "cell_type": "markdown",
+      "metadata": {
+        "colab_type": "text",
+        "id": "trrHQBM1VnD0"
+      },
       "source": [
         "# 2. Case study: complex control flow\n",
         "\n",
-        "Autograph can convert a large chunk of the Python language into graph-equivalent code, and we're adding new supported language features all the time. In this section, we'll give you a taste of some of the functionality in autograph.\n",
-        "Autograph will automatically convert most Python control flow statements into their correct graph equivalent.\n",
+        "Autograph can convert a large subset of the Python language into graph-equivalent code, and we're adding new supported language features all the time. In this section, we'll give you a taste of some of the functionality in AutoGraph.\n",
+        "AutoGraph will automatically convert most Python control flow statements into their graph equivalent.\n",
         "  "
       ]
     },
     {
-      "metadata": {
-        "id": "u0YG3DPgZxoW",
-        "colab_type": "text"
-      },
       "cell_type": "markdown",
+      "metadata": {
+        "colab_type": "text",
+        "id": "u0YG3DPgZxoW"
+      },
       "source": [
         "We support common statements like `while`, `for`, `if`, `break`, `return` and more. You can even nest them as much as you like. Imagine trying to write the graph version of this code by hand:"
       ]
     },
     {
+      "cell_type": "code",
+      "execution_count": 6,
       "metadata": {
-        "id": "xJYDzOcrZ8pI",
-        "colab_type": "code",
         "colab": {
           "autoexec": {
             "startup": false,
             "wait_interval": 0
           },
-          "output_extras": [
-            {}
-          ],
-          "base_uri": "https://localhost:8080/",
           "height": 35
         },
-        "outputId": "6c244ee4-b141-4ad6-eefa-cfffa71f33c6",
+        "colab_type": "code",
         "executionInfo": {
+          "elapsed": 169,
           "status": "ok",
-          "timestamp": 1522345738402,
-          "user_tz": 240,
-          "elapsed": 483,
+          "timestamp": 1531750912183,
           "user": {
-            "displayName": "Dan Moldovan",
-            "photoUrl": "//lh5.googleusercontent.com/-Rneh8xjecyk/AAAAAAAAAAI/AAAAAAAACB4/c5vwsJpbktY/s50-c-k-no/photo.jpg",
-            "userId": "112023154726779574577"
-          }
-        }
+            "displayName": "",
+            "photoUrl": "",
+            "userId": ""
+          },
+          "user_tz": 240
+        },
+        "id": "xJYDzOcrZ8pI",
+        "outputId": "f392b475-bf87-4d90-919d-44f895ee9fc7"
       },
-      "cell_type": "code",
+      "outputs": [
+        {
+          "name": "stdout",
+          "output_type": "stream",
+          "text": [
+            "Sum of even numbers: 42\n"
+          ]
+        }
+      ],
       "source": [
         "def sum_even(numbers):\n",
         "  s = 0\n",
         "  for n in numbers:\n",
-        "    if n % 2 > 0:\n",
+        "    if n % 2 \u003e 0:\n",
         "      continue\n",
         "    s += n\n",
         "  return s\n",
@@ -358,77 +328,74 @@
         "  \n",
         "# Uncomment the line below to print the generated graph code\n",
         "# print(autograph.to_code(sum_even))"
-      ],
-      "execution_count": 0,
-      "outputs": [
-        {
-          "output_type": "stream",
-          "text": [
-            "Sum of even numbers: 42\n"
-          ],
-          "name": "stdout"
-        }
       ]
     },
     {
-      "metadata": {
-        "id": "_YXo4KOcbKrn",
-        "colab_type": "text"
-      },
       "cell_type": "markdown",
+      "metadata": {
+        "colab_type": "text",
+        "id": "_YXo4KOcbKrn"
+      },
       "source": [
         "Try replacing the `continue` in the above code with `break` -- Autograph supports that as well!"
       ]
     },
     {
-      "metadata": {
-        "id": "xHmC0rBIavW_",
-        "colab_type": "text"
-      },
       "cell_type": "markdown",
+      "metadata": {
+        "colab_type": "text",
+        "id": "xHmC0rBIavW_"
+      },
       "source": [
         "The Python code above is much more readable than the matching graph code. Autograph takes care of tediously converting every piece of Python code into the matching TensorFlow graph version for you, so that you can quickly write maintainable code, but still benefit from the optimizations and deployment benefits of graphs."
       ]
     },
     {
-      "metadata": {
-        "id": "UEHWGpBXbS7g",
-        "colab_type": "text"
-      },
       "cell_type": "markdown",
+      "metadata": {
+        "colab_type": "text",
+        "id": "UEHWGpBXbS7g"
+      },
       "source": [
         "Let's try some other useful Python constructs, like `print` and `assert`. We automatically convert Python `assert` statements into the equivalent `tf.Assert` code.  "
       ]
     },
     {
+      "cell_type": "code",
+      "execution_count": 7,
       "metadata": {
-        "id": "qUU57xlEbauI",
-        "colab_type": "code",
         "colab": {
           "autoexec": {
             "startup": false,
             "wait_interval": 0
           },
-          "output_extras": [
-            {}
-          ],
-          "base_uri": "https://localhost:8080/",
           "height": 53
         },
-        "outputId": "add3db4a-2077-4dd5-f7a7-a5b5a4529c26",
+        "colab_type": "code",
         "executionInfo": {
+          "elapsed": 56,
           "status": "ok",
-          "timestamp": 1522345738697,
-          "user_tz": 240,
-          "elapsed": 253,
+          "timestamp": 1531750912292,
           "user": {
-            "displayName": "Dan Moldovan",
-            "photoUrl": "//lh5.googleusercontent.com/-Rneh8xjecyk/AAAAAAAAAAI/AAAAAAAACB4/c5vwsJpbktY/s50-c-k-no/photo.jpg",
-            "userId": "112023154726779574577"
-          }
-        }
+            "displayName": "",
+            "photoUrl": "",
+            "userId": ""
+          },
+          "user_tz": 240
+        },
+        "id": "qUU57xlEbauI",
+        "outputId": "c9cd536a-4a95-4eb0-98c0-aafce5d79580"
       },
-      "cell_type": "code",
+      "outputs": [
+        {
+          "name": "stdout",
+          "output_type": "stream",
+          "text": [
+            "Got error message: assertion failed: [Do not pass zero!]\n",
+            "\t [[Node: f/Assert/Assert = Assert[T=[DT_STRING], summarize=3, _device=\"/job:localhost/replica:0/task:0/device:CPU:0\"](f/NotEqual, f/Assert/Assert/data_0)]]\n"
+          ]
+        }
+      ],
       "source": [
         "def f(x):\n",
         "  assert x != 0, 'Do not pass zero!'\n",
@@ -444,61 +411,35 @@
         "      \n",
         "# Uncomment the line below to print the generated graph code\n",
         "# print(autograph.to_code(f))"
-      ],
-      "execution_count": 0,
-      "outputs": [
-        {
-          "output_type": "stream",
-          "text": [
-            "Got error message: assertion failed: [Do not pass zero!]\n",
-            "\t [[Node: f/Assert/Assert = Assert[T=[DT_STRING], summarize=3, _device=\"/job:localhost/replica:0/task:0/device:CPU:0\"](f/NotEqual, f/Assert/Assert/data_0)]]\n"
-          ],
-          "name": "stdout"
-        }
       ]
     },
     {
-      "metadata": {
-        "id": "w5hBZaVJbck4",
-        "colab_type": "text"
-      },
       "cell_type": "markdown",
+      "metadata": {
+        "colab_type": "text",
+        "id": "w5hBZaVJbck4"
+      },
       "source": [
         "You can also use `print` functions in-graph:"
       ]
     },
     {
+      "cell_type": "code",
+      "execution_count": 0,
       "metadata": {
-        "id": "6NdzRKLEboRv",
-        "colab_type": "code",
         "colab": {
           "autoexec": {
             "startup": false,
             "wait_interval": 0
-          },
-          "output_extras": [
-            {}
-          ],
-          "base_uri": "https://localhost:8080/",
-          "height": 35
-        },
-        "outputId": "fb82dfc3-790f-4127-87f6-361805be9e9b",
-        "executionInfo": {
-          "status": "ok",
-          "timestamp": 1522345739013,
-          "user_tz": 240,
-          "elapsed": 247,
-          "user": {
-            "displayName": "Dan Moldovan",
-            "photoUrl": "//lh5.googleusercontent.com/-Rneh8xjecyk/AAAAAAAAAAI/AAAAAAAACB4/c5vwsJpbktY/s50-c-k-no/photo.jpg",
-            "userId": "112023154726779574577"
           }
-        }
+        },
+        "colab_type": "code",
+        "id": "6NdzRKLEboRv"
       },
-      "cell_type": "code",
+      "outputs": [],
       "source": [
         "def print_sign(n):\n",
-        "  if n >= 0:\n",
+        "  if n \u003e= 0:\n",
         "    print(n, 'is positive!')\n",
         "  else:\n",
         "    print(n, 'is negative!')\n",
@@ -512,62 +453,58 @@
         "    \n",
         "# Uncomment the line below to print the generated graph code\n",
         "# print(autograph.to_code(print_sign))"
-      ],
-      "execution_count": 0,
-      "outputs": [
-        {
-          "output_type": "stream",
-          "text": [
-            "1 is positive!\n"
-          ],
-          "name": "stdout"
-        }
       ]
     },
     {
-      "metadata": {
-        "id": "9u_Z3i3AivLA",
-        "colab_type": "text"
-      },
       "cell_type": "markdown",
+      "metadata": {
+        "colab_type": "text",
+        "id": "9u_Z3i3AivLA"
+      },
       "source": [
-        "We can convert lists to TensorArray, so appending to lists also works, with a few modifications:"
+        "Appending to lists also works, with a few modifications:"
       ]
     },
     {
+      "cell_type": "code",
+      "execution_count": 9,
       "metadata": {
-        "id": "MjhCQJVuiTNR",
-        "colab_type": "code",
         "colab": {
           "autoexec": {
             "startup": false,
             "wait_interval": 0
           },
-          "output_extras": [
-            {}
-          ],
-          "base_uri": "https://localhost:8080/",
           "height": 35
         },
-        "outputId": "dc320b87-595b-4392-d29c-994486fd8a0a",
+        "colab_type": "code",
         "executionInfo": {
+          "elapsed": 148,
           "status": "ok",
-          "timestamp": 1522345744470,
-          "user_tz": 240,
-          "elapsed": 5391,
+          "timestamp": 1531750912595,
           "user": {
-            "displayName": "Dan Moldovan",
-            "photoUrl": "//lh5.googleusercontent.com/-Rneh8xjecyk/AAAAAAAAAAI/AAAAAAAACB4/c5vwsJpbktY/s50-c-k-no/photo.jpg",
-            "userId": "112023154726779574577"
-          }
-        }
+            "displayName": "",
+            "photoUrl": "",
+            "userId": ""
+          },
+          "user_tz": 240
+        },
+        "id": "MjhCQJVuiTNR",
+        "outputId": "96bf9131-c7c1-4359-ee82-9c38575e7ab4"
       },
-      "cell_type": "code",
+      "outputs": [
+        {
+          "name": "stdout",
+          "output_type": "stream",
+          "text": [
+            "[0 1 2 3 4]\n"
+          ]
+        }
+      ],
       "source": [
         "def f(n):\n",
         "  numbers = []\n",
         "  # We ask you to tell us about the element dtype.\n",
-        "  autograph.utils.set_element_type(numbers, tf.int32)\n",
+        "  autograph.set_element_type(numbers, tf.int32)\n",
         "  for i in range(n):\n",
         "    numbers.append(i)\n",
         "  return autograph.stack(numbers) # Stack the list so that it can be used as a Tensor\n",
@@ -580,65 +517,62 @@
         "    \n",
         "# Uncomment the line below to print the generated graph code\n",
         "# print(autograph.to_code(f))"
-      ],
-      "execution_count": 0,
-      "outputs": [
-        {
-          "output_type": "stream",
-          "text": [
-            "[0 1 2 3 4]\n"
-          ],
-          "name": "stdout"
-        }
       ]
     },
     {
-      "metadata": {
-        "id": "UdG8ZFrkTAF2",
-        "colab_type": "text"
-      },
       "cell_type": "markdown",
+      "metadata": {
+        "colab_type": "text",
+        "id": "UdG8ZFrkTAF2"
+      },
       "source": [
         "And all of these functionalities, and more, can be composed into more complicated code:\n"
       ]
     },
     {
+      "cell_type": "code",
+      "execution_count": 10,
       "metadata": {
-        "id": "DVs6wt8NKaGQ",
-        "colab_type": "code",
+        "cellView": "code",
         "colab": {
           "autoexec": {
             "startup": false,
             "wait_interval": 0
           },
-          "output_extras": [
-            {}
-          ],
-          "base_uri": "https://localhost:8080/",
           "height": 53
         },
-        "cellView": "code",
-        "outputId": "0a4b8d08-8f65-4bbc-85ba-dc4c60563519",
+        "colab_type": "code",
         "executionInfo": {
+          "elapsed": 555,
           "status": "ok",
-          "timestamp": 1522345745186,
-          "user_tz": 240,
-          "elapsed": 658,
+          "timestamp": 1531750913176,
           "user": {
-            "displayName": "Dan Moldovan",
-            "photoUrl": "//lh5.googleusercontent.com/-Rneh8xjecyk/AAAAAAAAAAI/AAAAAAAACB4/c5vwsJpbktY/s50-c-k-no/photo.jpg",
-            "userId": "112023154726779574577"
-          }
-        }
+            "displayName": "",
+            "photoUrl": "",
+            "userId": ""
+          },
+          "user_tz": 240
+        },
+        "id": "DVs6wt8NKaGQ",
+        "outputId": "8729229c-4f08-4640-d3a1-0d3f9c697a87"
       },
-      "cell_type": "code",
+      "outputs": [
+        {
+          "name": "stdout",
+          "output_type": "stream",
+          "text": [
+            "The prime numbers less than 50 are:\n",
+            "[ 2  3  5  7 11 13 17 19 23 29 31 37 41 43 47]\n"
+          ]
+        }
+      ],
       "source": [
         "def print_primes(n):\n",
         "  \"\"\"Returns all the prime numbers less than n.\"\"\"\n",
-        "  assert n > 0\n",
+        "  assert n \u003e 0\n",
         "  \n",
         "  primes = []\n",
-        "  autograph.utils.set_element_type(primes, tf.int32)\n",
+        "  autograph.set_element_type(primes, tf.int32)\n",
         "  for i in range(2, n):\n",
         "    is_prime = True\n",
         "    for k in range(2, i):\n",
@@ -663,45 +597,36 @@
         "    \n",
         "# Uncomment the line below to print the generated graph code\n",
         "# print(autograph.to_code(print_primes))"
-      ],
-      "execution_count": 0,
-      "outputs": [
-        {
-          "output_type": "stream",
-          "text": [
-            "The prime numbers less than 50 are:\n",
-            "[ 2  3  5  7 11 13 17 19 23 29 31 37 41 43 47]\n"
-          ],
-          "name": "stdout"
-        }
       ]
     },
     {
-      "metadata": {
-        "id": "JQ8kQT99VqDk",
-        "colab_type": "text"
-      },
       "cell_type": "markdown",
+      "metadata": {
+        "colab_type": "text",
+        "id": "JQ8kQT99VqDk"
+      },
       "source": [
         "# 3. Case study: training MNIST with Keras\n",
         "\n",
-        "As we've seen, writing control flow in Autograph is easy. So running a training loop in graph should be easy as well!\n",
+        "As we've seen, writing control flow in AutoGraph is easy. So running a training loop in graph should be easy as well!\n",
         "\n",
         "Here, we show an example of such a training loop for a simple Keras model that trains on MNIST."
       ]
     },
     {
+      "cell_type": "code",
+      "execution_count": 0,
       "metadata": {
-        "id": "0CrtGWgwuLJr",
-        "colab_type": "code",
         "colab": {
           "autoexec": {
             "startup": false,
             "wait_interval": 0
           }
-        }
+        },
+        "colab_type": "code",
+        "id": "0CrtGWgwuLJr"
       },
-      "cell_type": "code",
+      "outputs": [],
       "source": [
         "import gzip\n",
         "import shutil\n",
@@ -754,66 +679,67 @@
         "\n",
         "def mnist_test(directory):\n",
         "  return dataset(directory, 't10k-images-idx3-ubyte', 't10k-labels-idx1-ubyte')"
-      ],
-      "execution_count": 0,
-      "outputs": []
+      ]
     },
     {
-      "metadata": {
-        "id": "2zu1U9Nqir6L",
-        "colab_type": "text"
-      },
       "cell_type": "markdown",
+      "metadata": {
+        "colab_type": "text",
+        "id": "2zu1U9Nqir6L"
+      },
       "source": [
         "First, we'll define a small three-layer neural network using the Keras API"
       ]
     },
     {
+      "cell_type": "code",
+      "execution_count": 0,
       "metadata": {
-        "id": "x_MU13boiok2",
-        "colab_type": "code",
         "colab": {
           "autoexec": {
             "startup": false,
             "wait_interval": 0
           }
-        }
+        },
+        "colab_type": "code",
+        "id": "x_MU13boiok2"
       },
-      "cell_type": "code",
+      "outputs": [],
       "source": [
         "def mlp_model(input_shape):\n",
-        "  model = tf.keras.Sequential([\n",
+        "  model = tf.keras.Sequential((\n",
         "      tf.keras.layers.Dense(100, activation='relu', input_shape=input_shape),\n",
         "      tf.keras.layers.Dense(100, activation='relu'),\n",
-        "      tf.keras.layers.Dense(10, activation='softmax')])\n",
+        "      tf.keras.layers.Dense(10, activation='softmax'),\n",
+        "  ))\n",
         "  model.build()\n",
         "  return model"
-      ],
-      "execution_count": 0,
-      "outputs": []
+      ]
     },
     {
-      "metadata": {
-        "id": "Wuqg3H8mi0Xj",
-        "colab_type": "text"
-      },
       "cell_type": "markdown",
+      "metadata": {
+        "colab_type": "text",
+        "id": "Wuqg3H8mi0Xj"
+      },
       "source": [
         "Let's connect the model definition (here abbreviated as `m`) to a loss function, so that we can train our model."
       ]
     },
     {
+      "cell_type": "code",
+      "execution_count": 0,
       "metadata": {
-        "id": "W51sfbONiz_5",
-        "colab_type": "code",
         "colab": {
           "autoexec": {
             "startup": false,
             "wait_interval": 0
           }
-        }
+        },
+        "colab_type": "code",
+        "id": "W51sfbONiz_5"
       },
-      "cell_type": "code",
+      "outputs": [],
       "source": [
         "def predict(m, x, y):\n",
         "  y_p = m(x)\n",
@@ -822,63 +748,63 @@
         "  accuracies = tf.keras.metrics.categorical_accuracy(y, y_p)\n",
         "  accuracy = tf.reduce_mean(accuracies)\n",
         "  return l, accuracy"
-      ],
-      "execution_count": 0,
-      "outputs": []
+      ]
     },
     {
-      "metadata": {
-        "id": "035tNWQki9tr",
-        "colab_type": "text"
-      },
       "cell_type": "markdown",
+      "metadata": {
+        "colab_type": "text",
+        "id": "035tNWQki9tr"
+      },
       "source": [
         "Now the final piece of the problem specification (before loading data, and clicking everything together) is backpropagating the loss through the model, and optimizing the weights using the gradient."
       ]
     },
     {
+      "cell_type": "code",
+      "execution_count": 0,
       "metadata": {
-        "id": "CsAD0ajbi9iZ",
-        "colab_type": "code",
         "colab": {
           "autoexec": {
             "startup": false,
             "wait_interval": 0
           }
-        }
+        },
+        "colab_type": "code",
+        "id": "CsAD0ajbi9iZ"
       },
-      "cell_type": "code",
+      "outputs": [],
       "source": [
         "def fit(m, x, y, opt):\n",
         "  l, accuracy = predict(m, x, y)\n",
         "  opt.minimize(l)\n",
         "  return l, accuracy"
-      ],
-      "execution_count": 0,
-      "outputs": []
+      ]
     },
     {
-      "metadata": {
-        "id": "PcVRIacKjSwb",
-        "colab_type": "text"
-      },
       "cell_type": "markdown",
+      "metadata": {
+        "colab_type": "text",
+        "id": "PcVRIacKjSwb"
+      },
       "source": [
         "These are some utility functions to download data and generate batches for training"
       ]
     },
     {
+      "cell_type": "code",
+      "execution_count": 0,
       "metadata": {
-        "id": "RVw57HdTjPzi",
-        "colab_type": "code",
         "colab": {
           "autoexec": {
             "startup": false,
             "wait_interval": 0
           }
-        }
+        },
+        "colab_type": "code",
+        "id": "RVw57HdTjPzi"
       },
-      "cell_type": "code",
+      "outputs": [],
       "source": [
         "def setup_mnist_data(is_training, hp, batch_size):\n",
         "  if is_training:\n",
@@ -896,16 +822,14 @@
         "  x = tf.to_float(tf.reshape(image, (-1, 28 * 28)))\n",
         "  y = tf.one_hot(tf.squeeze(label), 10)\n",
         "  return x, y"
-      ],
-      "execution_count": 0,
-      "outputs": []
+      ]
     },
     {
-      "metadata": {
-        "id": "2zEJH5XNjgFz",
-        "colab_type": "text"
-      },
       "cell_type": "markdown",
+      "metadata": {
+        "colab_type": "text",
+        "id": "2zEJH5XNjgFz"
+      },
       "source": [
         "This function specifies the main training loop. We instantiate the model (using the code above), instantiate an optimizer (here we'll use SGD with momentum, nothing too fancy), and we'll instantiate some lists to keep track of training and test loss and accuracy over time.\n",
         "\n",
@@ -913,33 +837,35 @@
       ]
     },
     {
+      "cell_type": "code",
+      "execution_count": 0,
       "metadata": {
-        "id": "UUI0566FjZPx",
-        "colab_type": "code",
         "colab": {
           "autoexec": {
             "startup": false,
             "wait_interval": 0
           }
-        }
+        },
+        "colab_type": "code",
+        "id": "UUI0566FjZPx"
       },
-      "cell_type": "code",
+      "outputs": [],
       "source": [
         "def train(train_ds, test_ds, hp):\n",
         "  m = mlp_model((28 * 28,))\n",
         "  opt = tf.train.MomentumOptimizer(hp.learning_rate, 0.9)\n",
+        "\n",
         "  train_losses = []\n",
-        "  train_losses = autograph.utils.set_element_type(train_losses, tf.float32)\n",
+        "  autograph.set_element_type(train_losses, tf.float32)\n",
         "  test_losses = []\n",
-        "  test_losses = autograph.utils.set_element_type(test_losses, tf.float32)\n",
+        "  autograph.set_element_type(test_losses, tf.float32)\n",
         "  train_accuracies = []\n",
-        "  train_accuracies = autograph.utils.set_element_type(train_accuracies,\n",
-        "                                                      tf.float32)\n",
+        "  autograph.set_element_type(train_accuracies, tf.float32)\n",
         "  test_accuracies = []\n",
-        "  test_accuracies = autograph.utils.set_element_type(test_accuracies,\n",
-        "                                                     tf.float32)\n",
-        "  i = tf.constant(0)\n",
-        "  while i < hp.max_steps:\n",
+        "  autograph.set_element_type(test_accuracies, tf.float32)\n",
+        "\n",
+        "  i = 0\n",
+        "  while i \u003c hp.max_steps:\n",
         "    train_x, train_y = get_next_batch(train_ds)\n",
         "    test_x, test_y = get_next_batch(test_ds)\n",
         "    step_train_loss, step_train_accuracy = fit(m, train_x, train_y, opt)\n",
@@ -956,173 +882,144 @@
         "  return (autograph.stack(train_losses), autograph.stack(test_losses),\n",
         "          autograph.stack(train_accuracies),\n",
         "          autograph.stack(test_accuracies))"
-      ],
-      "execution_count": 0,
-      "outputs": []
+      ]
     },
     {
-      "metadata": {
-        "id": "cYiUQ1ppkHzk",
-        "colab_type": "text"
-      },
       "cell_type": "markdown",
+      "metadata": {
+        "colab_type": "text",
+        "id": "cYiUQ1ppkHzk"
+      },
       "source": [
         "Everything is ready to go, let's train the model and plot its performance!"
       ]
     },
     {
+      "cell_type": "code",
+      "execution_count": 17,
       "metadata": {
-        "id": "K1m8TwOKjdNd",
-        "colab_type": "code",
         "colab": {
           "autoexec": {
             "startup": false,
             "wait_interval": 0
           },
-          "output_extras": [
-            {},
-            {},
-            {}
-          ],
-          "base_uri": "https://localhost:8080/",
-          "height": 988
+          "height": 585
         },
-        "outputId": "f9d3eef3-5bea-45c1-ddf9-4edee73e4436",
+        "colab_type": "code",
         "executionInfo": {
+          "elapsed": 17094,
           "status": "ok",
-          "timestamp": 1522345800262,
-          "user_tz": 240,
-          "elapsed": 52391,
+          "timestamp": 1531750930585,
           "user": {
-            "displayName": "Dan Moldovan",
-            "photoUrl": "//lh5.googleusercontent.com/-Rneh8xjecyk/AAAAAAAAAAI/AAAAAAAACB4/c5vwsJpbktY/s50-c-k-no/photo.jpg",
-            "userId": "112023154726779574577"
-          }
-        }
+            "displayName": "",
+            "photoUrl": "",
+            "userId": ""
+          },
+          "user_tz": 240
+        },
+        "id": "K1m8TwOKjdNd",
+        "outputId": "9f63da19-c3bf-498b-cf00-29090bf3b4f0"
       },
-      "cell_type": "code",
+      "outputs": [
+        {
+          "data": {
+            "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYwAAAEcCAYAAADUX4MJAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzsvXeAVNXd//++ZdrONsqyNBUECxZQRBHUoKLoE+lP0F+i\nxMT4tRDFWBKVxG7UJPaK8mBBE40lQBAVFAQE6bAU6WWBZXvf6bec3x+3zu7M7iw7w+4Onxd/MDO3\nnXtn9rzPp5zP4RhjDARBEATRAnx7N4AgCILoHJBgEARBEAlBgkEQBEEkBAkGQRAEkRAkGARBEERC\nkGAQBEEQCUGCQRDtzLp16zBq1KiE9n399dfxxz/+sc3nIYhjgQSDaHeuvPJKnHvuuaitrY36fMKE\nCTjzzDNRXFwMAHjooYdw5plnYtu2beY+hw8fxplnnmm+nzp1Kj7//HPz/cyZMzF69GgMHToUl19+\nOe677z4AwNixYzF06FAMHToUZ511FgYPHozzzz8fQ4cOxTvvvJPK240Jx3FJ2bc15yGI1iK2dwMI\nAgD69u2LhQsX4sYbbwQA7NmzB+FwOKoD5DgOubm5ePnllzF79uyoz2Mxd+5cLFiwAB988AH69u2L\nqqoqLF26FADw5ZdfmvtNnToVEydOxP/+7/+m4tYIIm0gC4PoEEyYMAFz584138+dOxeTJk1qst+k\nSZOwe/dubNiwocVzbt++HZdeein69u0LAOjWrRumTJkSc9+WCh68/vrruOeee/DHP/4RQ4cOxfjx\n41FYWIh33nkHI0eOxBVXXIEff/zR3L+8vBx33nknhg8fjmuuuQafffaZuS0cDuOhhx7CRRddhLFj\nx0ZZTMax06dPx4gRI3DVVVfhww8/bPFeY7F//35MnToVF154IcaNG2eKJQAsX74c1113HYYOHYpR\no0bhvffeAwDU1NTgjjvuwIUXXojhw4fjpptuOqZrE+kJCQbRIRgyZAj8fj8OHDgAVVXxzTffYPz4\n8U06crfbjTvuuAMvvvhiQuecN28eZs+eje3bt0NV1Ta1cdmyZZg0aRI2bNiAQYMG4Xe/+x0YY/jh\nhx8wbdo0PPLII+a+9913H3r37o2VK1filVdewYsvvog1a9YAAF577TUUFRVhyZIlmD17NubNm2ce\nxxjDHXfcgUGDBmHlypV4//33MWfOHKxatapVbZVlGXfeeScuu+wyrF69Gn/+85/xwAMPoLCwEADw\n5z//GU899RQ2bdqEL7/8EhdffDEA4L333kPPnj2xdu1a/Pjjj7j33nvb9MyI9IIEg+gwTJgwAfPm\nzcOqVatw6qmnokePHjH3u/7661FSUoIffvih2fONHz8ejzzyCFatWoWpU6di5MiRbYpPDBs2DCNH\njgTP87j22mtRU1OD2267DYIg4Oc//zmKi4vh8/lQUlKCzZs344EHHoDD4cCZZ56JKVOmYP78+QCA\nb775BnfeeSeysrKQn5+PqVOnmtfYunUramtrceedd0IQBPTt2xdTpkzBwoULW9XWgoICBAIB3Hbb\nbRBFERdffDGuuOIK0xXndDqxb98++Hw+ZGVlYdCgQQAAURRRUVGBoqIiCIKACy644JifF5F+kGAQ\nHYbx48fjyy+/xNy5czFhwoS4+zmdTkybNg2vvPJKi66ksWPH4t1338WGDRvwxBNP4NVXX231aN2g\nW7du5mu3240uXbqY8RO32w3GGPx+PyoqKpCTkwOPx2Pu37t3b5SXlwPQXE49e/aM2mZQXFyMsrIy\nXHTRRbjoootw4YUX4u2330Z1dXWr2lpeXo5evXpFfWZvw6uvvoply5bhyiuvxNSpU1FQUAAAuPXW\nW3HyySfjlltuwdVXX90uCQBEx4UEg+gw9O7dG3369MGKFSswZsyYZvedPHkyGhoa8O233yZ0bkEQ\ncM011+CMM87A3r17k9HcuPTo0QN1dXUIBALmZyUlJabFlJeXh5KSEnObkQUGAL169ULfvn2xbt06\nrFu3DuvXr8fGjRsxc+bMVrfBfg3jOkYbzjnnHLz55ptYvXo1Ro8ejT/84Q8AgIyMDDz44IP47rvv\nMHPmTLz//vumK40gSDCIDsUzzzyDDz74AG63u9n9BEHAXXfdhVmzZsXdZ+7cuVi+fDn8fj8YY1i+\nfDn279+PwYMHJ7vZUfTs2RPnn38+XnzxRUQiEezatQuff/45xo8fDwD4n//5H7z99tuor69HaWkp\nPvroI/PYwYMHIzMzE7NmzUI4HIaiKNi7d2+TwHhLDBkyBBkZGZg1axZkWcbatWuxbNkyjB07FpIk\nYcGCBfD5fBAEAV6vF4IgANDiNIcPHwagiYcgCOY2gqC0WqLdsafFnnTSSXG3NWbs2LF455130NDQ\nEHP/zMxMzJw5EwcOHICiKOjduzcef/xxDB06NOFrtAb7eV544QU89thjuOyyy5CTk4N77rkHI0aM\nAADcddddeOyxxzB69Gjk5+dj8uTJmDNnDgCA53nMnDkTzz33HEaPHg1JktC/f3/cc889rWqLw+HA\nW2+9hccffxxvv/02evbsib///e/o168fJEnC/Pnz8fTTT0NRFPTv3x/PP/88AKCwsBBPPvkkampq\nkJOTgxtvvBEXXnhhUp4P0fnhUrmAUmlpKf70pz+hsrISgiBgypQp+PWvfx21z7p16zBt2jSzo7j6\n6qsxbdq0VDWJIAiCOEZSamEIgoCHH34YgwYNgt/vx+TJk3HJJZdgwIABUfsNGzas1T5agiAI4viS\n0hhGXl6ema7n9XoxYMAAM0uDIAiC6Fwct6B3UVERdu3aFTPgWFBQgIkTJ+K2227Dvn37jleTCIIg\niFaQ0hiGgd/vx9SpUzFt2jRcddVVTbbxPA+Px4Ply5fjmWeewaJFi1LdJIIgCKKVpNzCkGUZ06dP\nx4QJE5qIBaC5qowJTqNGjYIkSU2qljbmOGgcQRAE0YiUp9XOmDEDAwcOxM033xxze2VlJbp37w5A\nK4sAALm5uc2ek+M4VFQ0NLvPiUJeXhY9Cx16Fhb0LCzoWVjk5WW16fiUCsbGjRuxYMECnH766Zg4\ncSI4jsO9996L4uJicByHG264AYsWLcLHH38MURThdrvx0ksvpbJJBEEQxDFyXGIYqYBGDBo0erKg\nZ2FBz8KCnoVFWy0MKg1CEARBJAQJBkEQBJEQJBgEQRBEQpBgEARBEAlBgkEQBEEkBAkGQRCEDZ/P\nh7lzPz+mY//0pz/A7/clvP+7776DTz75qOUdOwgkGARBEDYaGuoxd+5nMbepqtrssX//+8vwejNT\n0awOQadcQOmR715AqDIHd1w8CdleZ3s3hyCINGLmzNdRXHwUt9xyI4YNG44RIy7Be+/NQrdu3bFv\n3x58+OGnePjhB1BRUY5IJIwpU36JceMmAgCmTBmP2bM/RCAQwAMPTMe5556H7du3IC8vH8899wKc\nzvj91d69u/H8888hHA6jT58+ePjhx5CZmYnPPvsE8+f/B6Iool+//nj88b9i8+aNePXVF/RFuzi8\n8casqDXkU0WnFIzdVfsADvhg0SDcPTm1y20SBNF+fLp0H9bvatuSCILAQVGs+ckXntkD1185MO7+\nd955NwoLD+Ddd/8JANi8eSN27tyBDz/8FD179gQAzJjxGLKyshAOh/H//t+vMWrUlcjOzgZgrbpY\nVHQETzzxLB588M949NGHsWzZUowZc23c6z799OO4774HMWTIeZg9+2289947uPvu+/DPf36Azz9f\nAFEUTXfXJ598hPvvfwjnnDMYoVCoWSFKJp3aJVUfab5IIUEQRDI466yzTbEAgE8//Rd+85tf4fbb\nf4vy8nIUFR3Wt1jC1KtXbwwYoAnTGWecidLS4rjn9/t98Pt9GDLkPADAtddeh4KCzQCAgQNPw+OP\n/xmLF38NntfWVz/33CF49dUX8fnnn6ChoR48f3y68k5pYUQOnQnnKbvAvDXt3RSCIFLI9VcObNYa\nSIRklAZxu93m682bN2LTpg1455334XQ6cffdtyMSiTQ5xj7q53kh5j524lVp+sc/XkFBwSasXLkc\n77//f/joo89w002/wciRl2H16pW4/fbf4uWX38TJJ59yjHeXOJ3SwmD+HACA5CDBIAgiuWRkZCAQ\nCMTd7vf7kJWVBafTiUOHCvHTT9tj7teaMn1ebyays7OxdWsBAGDRoq9w3nlDAQBlZaU4//wLcOed\n0+H3+xAMBnD0aBFOPXUAbrzxZpxxxiAcPlyY+A22gU5pYagBrYCWH1Xt3BKCINKN7OwcnHvuENx8\n8/+H4cNHYsSIS6K2Dx8+EvPmfYHf/OZXOPnkU3DOOefatloxDC0gnTgzZjyO559/FuFwGL1798GM\nGY9BlmU8+eQj8Pv9ABhuuOFGeL2ZmDXrLWzatAGCIKBfv1Nx8cWXtHj+ZNApq9Xe/MQi+PsthuCQ\n8dpVT7X6i0knqBKnBT0LC3oWFvQsLE7IarXvPzoGQiQHTIigLlLf3s0hCII4IeiUgsFxHFyqppSV\nwep2bg1BEMSJQacUDADwIBsAUBGgOAZBEMTxoNMKRqagZUrtrYif20wQBEEkj04rGJeeoeVmbz96\npJ1bQhAEcWLQaQVjxOn9AAaEWOKVIQmCIIhjp9MKhsAL4FQnFK752ZMEQRCtoS3lzQHg008/Rjgc\njrnt7rtvx+7du4753O1NpxUMABCYE4yPQFaaLzlMEASRKM2VN0+Ezz77GOFwKIkt6jh0ypneBg7O\nBUkMoCEgoUuWq72bQxBEGtC4vPm0adPxr399iO+//xaSJONnP7sct9xyG0KhEB599CFUVJRDVVXc\nfPOtqK6uRGVlBe6++w7k5ubilVfeinudb7/9Bh999D4A4OKLL8Gdd94NVVXx3HNPYffunQA4XHfd\neFx//S9jljhvDzq1YLh4D4JcFap9fhIMgkhD/rPvS2wu39amcwg8B0W1Clqc3+NcTB44Nu7+jcub\nr1+/BkVFhzFr1hwwxvDgg/dhy5YC1NZWo3v3PPz97y8DAAIBPzIyvPj3vz/Ga6+9rZc7j01lZSVm\nznwd7733T2RmZuHee3+PlSuXIy8vHxUV5fjgg08AwCxnHqvEeXvQqV1SHkFbMKTCR7O9CYJIDevW\nrcX69etwyy034pZbbsThw4dQVHQYp546EBs2rMPMma9jy5YCZGR49SMY7GXOY7Fr108YOnQYsrNz\nwPM8rr76WhQUbEbv3n1QUlKMl19+HmvXrjbPGavEeXvQqS2MDIcHCAM17ai4BEGkjskDxzZrDSRC\nW2tJMcYwdepvMH78pCbbZs/+CKtXr8Lbb7+Oiy66GL/5za0JnzNWGb+srCy8//7HWLt2Nf7zn0+x\ndOm3ePjhR2OWOD9ea2DY6dQWRpZTU9/qABUWIwgiOTQubz58+MVYuPC/CAaDAIDKygrU1NSgsrIS\nLpcLY8Zci1/+8ibs2bNbP96rV5eNz1lnnYMtWzajvr4OiqLgu+8W4bzzhqKurhaqqmDUqCtw6613\nYu9e7ZyxSpy3B53awsh2e4EGoC7U/JdDEASRKI3Lm0+bNh2FhYW4447fAtAE5ZFHnkJR0RG88cYr\n4HkOoujAAw88DAAYP34iHnhgOrp3z2sS9DYqa3fr1h233/573H337QCAESMuxaWX/gz79u3FM888\nAcZUcByHO+64O26J8/agU5Y3B4CKigYs3rcK8w/PxymRS/Cnaye0d5PaBSrdbEHPwoKehQU9C4sT\nsry5QY+sLgCABpl+DARBEKmmUwtG36x8AECA0VKtBEEQqaZTC0ZXTxdAFRAWKK2WIAgi1XRqweA5\nHqKcBdXZAEVV2rs5BEEQaU2nFgwAcLMccLyKcl9tezeFIAgiren0gpHt1KL+e0vL2rklBEEQ6U2n\nF4zeOV0BAHtKy9u5JQRBEOlNSgWjtLQUv/71r/Hzn/8c48aNw5w5c2Lu9/TTT2PMmDGYMGECdu7c\n2apr9M/LAwAcqa5sc3sJgiCI+KR0prcgCHj44YcxaNAg+P1+TJ48GZdccgkGDBhg7rN8+XIcPnwY\nixcvxpYtW/DYY4/h008/TfgaeZm5AID6CNWTIgiCSCUptTDy8vIwaNAgAIDX68WAAQNQXh7tOlqy\nZAkmTpwIABgyZAgaGhpQWZm4tZDt1KbIB1U/1M45aZ0gCKJTcNxiGEVFRdi1axcGDx4c9Xl5eTl6\n9uxpvs/Pz0dZWeIBbCPozYQwGgJSchpLEARBNOG4FB/0+/2YPn06ZsyYAa/XG7UtVikro0BXcxg1\nUbqqGdoxjjCYwLe5Vkpn5ES853jQs7CgZ2FBzyI5pFwwZFnG9OnTMWHCBFx11VVNtufn56O0tNR8\nX1paih49erR4XnsxMSc8CDnC2H+oBrnuTl2At9VQYTULehYW9Cws6FlYdPjigzNmzMDAgQNx8803\nx9w+evRozJs3DwBQUFCA7OxsdO/evVXX8IpecI4I9hyhyXsEQRCpIqXD8Y0bN2LBggU4/fTTMXHi\nRHAch3vvvRfFxcXgOA433HADRo0aheXLl+Pqq6+Gx+PBs88+2+rr5GXmokauxNKNhzDh0n7IcDtS\ncDcEQRAnNikVjAsuuCCheRWPPvpom66T49LMLFUIo6o+TIJBEASRAjr9TG/AypSCI4x6f6R9G0MQ\nBJGmpIVgZOlzMThHBHX+cDu3hiAIIj1JC8EwLAzOEUIdWRgEQRApIS0EIy+jGwCA9/hR5yPBIAiC\nSAVpIRi9vb3AgQOfW47qAOVbEwRBpIK0EAy36AIDA+8O4oC4or2bQxAEkZakhWAAwPCeFwAAgq7i\ndm4JQRBEepI2gvGL08YBAHjJ28KeBEEQxLGQNoKR4cgAwplgnNzeTSEIgkhL0kYwAIBXBTCeBIMg\nCCIVpJVgcMwB8ApUprZ3UwiCINKOtBIMXi+NFVFoISWCIIhkk16CwbSig2GFJu8RBEEkm7QSDEG3\nMMIK1ZMiCIJINmkmGJqFEZRC7dwSgiCI9COtBEPkNMHwR0gwCIIgkk2aCYYTABCUyCVFEASRbNJM\nMDQLI0AuKYIgiKSTVoLh5HXBkMnCIAiCSDZpJRgO3SUVIguDIAgi6aSVYLh4FwAgKJNgEARBJJv0\nEgzBAwAISMF2bglBEET6kVaC4RbcAICgTIJBEASRbNJLMETNwggq5JIiCIJINmklGB7dwggpZGEQ\nBEEkm7QSDJdDBFMEqiVFEASRAtJKMESBA5MdCKvkkiIIgkg2aSUYDpEHFAcijASDIAgi2aSVYIgC\nDyaLkFiEVt0jCIJIMmklGE6HACh6iXOavEcQBJFU0kowsjwOMFkvcS4F2rk1BEEQ6UV6CUaGA0zS\n6kn5JH87t4YgCCK9SDPBcAKyLhgRXzu3hiAIIr1IK8FwOwVwClkYBEEQqSCtBIPjOHjEDACAL0KC\nQRAEkUxSKhgzZszAyJEjMW7cuJjb161bh2HDhmHSpEmYNGkS3nzzzTZf0yt4AQANErmkCIIgkomY\nypNPnjwZU6dOxZ/+9Ke4+wwbNgwzZ85M2jWznF7UAqgPk2AQBEEkk5RaGMOGDUN2dnYqL9GEHHcW\nAKAu1HBcr0sQBJHutHsMo6CgABMnTsRtt92Gffv2tfl8mW43mCKgLkKCQRAEkUxS6pJqibPPPhvf\nf/89PB4Pli9fjt///vdYtGhRm87pdTvA6jNQI1aDMQaO45LUWoIgiBObdhUMr9drvh41ahSeeOIJ\n1NbWIjc3t8Vj8/KyYn/eNROsPANSRgMcWQxdPMfXJdYexHsWJyL0LCzoWVjQs0gOKRcMxljcbZWV\nlejevTsAYOvWrQCQkFgAQEVFbJcTUxSooQwIAHYeKcRpXU5tXYM7GXl5WXGfxYkGPQsLehYW9Cws\n2iqcKRWM+++/H2vXrkVtbS0uv/xy3H333ZAkCRzH4YYbbsCiRYvw8ccfQxRFuN1uvPTSS22+ZoZL\nBAtplktFsDLtBYMgCOJ4kVLBeOGFF5rdfuONN+LGG29M6jW9btGsJ0UFCAmCIJJHu2dJJZsMtwNg\n2m3JqtLOrSEIgkgf0k4wPG7RFAyFyQkdwxhrNtZCEARBpKFgeN0imNo6C+Optc/jlc1vp7JZBEEQ\nnZ52TatNBR6nCDBt7oWcoIVRFqhAWaAilc0iCILo9KSdhcHzHDwOLeitUAyDIAgiaaSdYABAhtsF\nAJDVxCwMgiAIomXSUjCy3JqFISVgYahMTXVzCIIg0oI0FQw3ACAsSy3uS24rgiCIxEhLwcj2aC6p\nkNSyYMiMBIMgCCIR0lIwMj2ahREhC4MgCCJpJCQYX331FXw+bQW7V155Bb/73e+wffv2lDasLeRk\naBZGRG456J1o6i1BEMSJTkKC8dZbbyEzMxNbt27FypUrMXHiRDz99NOpbtsxk5OhWxhK6ywMCoAT\nBEHEJyHBEEVtft+qVaswZcoUjBs3DuFwOKUNawtZXieYykFKIK3WHsMg9xRBEER8EhIMjuPw3//+\nFwsXLsSIESMAAFICAeX2IlMvQJhIaRC7SCgUACcIgohLQoLxl7/8Bd988w2mTJmCk046CYWFhRg+\nfHiq23bMeN0ioPIJCYA9hqGQS4ogCCIuCdWSGjp0KN58803zfb9+/fDII4+krFFtxevRLIxEBIMs\nDIIgiMRIyMJ47rnn0NDQAFmW8atf/QrnnXce5s+fn+q2HTNupwAwHmoiFoZKMQyCIIhESEgwfvzx\nR2RlZWHlypXIz8/HokWL8O6776a6bccMx3HgwENFAhYGIwuDIAgiEVo1cW/9+vW4+uqrkZ+fD47j\nUtWmpMBDAEPLMQl7gUKyMAiCIOKTkGB069YNf/nLX/DVV1/hkksugSzLUJSO3bkKnADGqVBbWEkv\n2sKgoDdBEEQ8EhKMF154AQMHDsRLL72EnJwclJaW4re//W2q29YmBE4AOBWhcPPCJlPQmyAIIiES\nEoyuXbvipptugtfrxb59+9CzZ09Mnjw51W1rEyIvgOMZGgLNTzC0i0SiS7oSBEGciCSUVrtt2zZM\nnz4dTqcTjDHIsozXXnsNZ599dqrbd8w4BAcAoMYfQn5Xb9z9yMIgCIJIjIQE469//SueeeYZc5b3\nmjVr8NRTT+GTTz5JaePaglMQAQWo9Yea3U+xTdxLJA2XIAjiRCUhl1QwGDTFAgAuvvhiBIPBlDUq\nGTj1+ld1gebbGT0Pg4LeBEEQ8UhIMDweD9asWWO+X7duHTweT8oalQzcorZMa12gJQvDFsOgUucE\nQRBxScglNWPGDNxzzz1wOvW1siUJr776akob1lY8DkMwAs3uFx3DIAuDIAgiHgkJxuDBg7F48WIc\nPHgQjDH0798fY8aMwbJly1LcvGOniycbqAXqwr5m91No4h5BEERCJCQYAOBwOHD66aeb71kLE+La\nm64ZWQCA+hYEQ6bSIARBEAlxzGt6d/TSINkuTTBCavMuKYWKDxIEQSREsxbGvn374m6TE1gvuz3J\ncmYCACJoIYZBFgZBEERCNCsYt912W9xtLpcr6Y1JJlkOTTBkNJ8lJavWyoEU9CYIgohPs4KxdOnS\n49WOpJPp1GZ3K0LzpUHCSsR8TRYGQRBEfI45htHRyXRogsGEMBRVhSSrqGloKh4h2fqMYhgEQRDx\nSVvBEHkRHBPBCRJCEQUvf7YF97+xCrW+aNE4UFpjviYLgyAIIj5pKxgAIDAR4BWEIwp2HtKEoSEg\nRe3jC1ulQ+yLKREEQRDRpFQwZsyYgZEjR2LcuHFx93n66acxZswYTJgwATt37kzq9QXOAfAqghHL\nchD4RunAvLUtKDcfICcIgjiRSalgTJ48GbNnz467ffny5Th8+DAWL16MJ598Eo899lhSry9wIjhB\nwaqtJRC6lsB5xjqE5UjUPpxoWRUkGARBEPFJqWAMGzYM2dnZcbcvWbIEEydOBAAMGTIEDQ0NqKys\nTNr1Rc4B8Aq+WXcYzoFbIORUY1/DXgDAtxuO4I7nlwG8AjXsBgAE5I5dgZcgCKI9adcYRnl5OXr2\n7Gm+z8/PR1lZWdLO7+Qd4HgVgFXGpCpUDQD4+Lu9iMgqwMuA5AIYcNRXgtpwXdKuTxAEkU60q2DE\nqkeVzJIjDl5bdc8epygLldgupoLjGZgigmMOVIdq8OdVf03a9QmCINKJhIsPpoL8/HyUlpaa70tL\nS9GjR4+Ejs3Ly2pxnwyXG5AAzmW5mioj5fDk8HCduR5SST/tQ0UAVN6Uz0TO3ZHobO1NJfQsLOhZ\nWNCzSA4pF4zmqtqOHj0a//znP/Hzn/8cBQUFyM7ORvfu3RM6b0VFQ4v78EzQ/s+w9q0JV2PJzjXg\ns6vgyq7S2qiKYLyVbpvIuTsKeXlZnaq9qYSehQU9Cwt6FhZtFc6UCsb999+PtWvXora2Fpdffjnu\nvvtuSJIEjuNwww03YNSoUVi+fDmuvvpqeDwePPvss0m9vkvQFlHiXFYBQgaGf+76PHpHRQD42HWk\n/FIAHtENnkvrKSsEQRAtklLBeOGFF1rc59FHH03Z9Z2GYDi1dFk1kBVlbRgwVYh5fGWwGk+u+Qem\nnD4el/UZEXMfgiCIE4W0Hja7BC3obcQwlOr82DvGEYxSfxkUpqDEX56S9hEEQXQm0lswRK0EO+fU\nBaO2B/p5Tmu6oyqALxze5OMGyQ8ACMvNV7wlCII4EUhrwXCLRgxDn8EtO9DV0TQLi6k8uIYe6J99\nSlSswhfRlncNKyQYbUVVGRSV1hshiM5MmguGbmHoAW0mO83MqShUHqrKIPA8VKaamV0+3cIIkWC0\nmfvfWIXpr6xs72YQBNEG2nUeRqrxOJzWG1XQXE9cDMFgAhTGIOjbVKZC4ARTMMjCaDt1/kjLOxEE\n0aFJawvD47CWkeUUTTyatTB0wTDWxfBFdAsjRgxj/9E6vDF3G8IRWkODIIgTg7QWjAyn23zNq5p4\n8FxTo4rZXFKATTBMC6Pp6PivH27Ext0VWLmtpMk2giCIdCStBcMreszXAtMD4LEsDMZDsVkYsmpY\nGC0HvRWFArkEQZwYpLVguEXLwjAEg48VtlG1x8A3dklJ2gzx5oLe8QufEARBpBdpLRgem2A4OD2e\nwZresjHDQ/F7AAAgAElEQVTTW9HDEXuO1EBSZYQULR1XVmUoKsUqCII4sTlhBEPUBSNm0FsXkcMl\nWsziixV74dfjFwaUKUUQxIlOWguGyFvuJ5cuGDFjGLpLqs6nVazNyXKgIRItGDQXgyCIE520Fgw7\nTkGzNjjEF4yIpEUkvB6hiYURK7WWIAjiROKEEQw3r1sYMQoNMt3qYLpwhCXZzJDyihkArBTbpscm\nvakEQRAdkhNGMFy6haEqMZaA1YUCTNsWliWz8ODJ2X0BANWhmqhDhG7FcA9bBL+anmuAl9UEUOtL\nvlXV3IJaBEF0bE4YwRD1dNqv1xxtutEUDO3/kCybFsXJWbEFw3HqVnA8Q6G0LUUtbl8efnsN7nt9\nVdLPS3pBEJ2XE0YwOF63LNQYt8yiLYyILCMoaym1fTJ7AQCqQ7XRh4S1SYFBRks/tgaVFIMgOi1p\nLxhy2ckAgGxeXys8xjwMQBcTm2AoqgwA6JGhHdfYwjAEI8Dqk9zi9IZcUgTReUl7wegdvgjBddeg\nb9cu+icxYhgGuphEFBmyPtvbI7qR5chsIhjGefxqHVRG5UGawy4SKukFQXRa0rq8OQA89KsLUFUf\nQqhRVVnGAK6RdjDdwpAUGbKqvRZ5EV3dXXDUVwyVqeYCS5ygWSAyIjjqK8VJWb1TfCfHj2RbAXY3\nlEqKQRCdlrS3MFxOAb27e8Hb7jS48UqENl7VdGfdwuA4hoisTeITOAFO5oXMFDToqbbaBtl8uat6\nT0ra3l4kO85gPx25pAii85L2gmHA280JxQmoMYwr3cIApyKiaIIg8gJ27NUC4Ha3FCfIYLIIDjzW\nlW5Kq44w2Sup2q2KjmxgbNxdgcNllMRAEPE4cQSDbyZ2YWAExDkGSRcMgRPBIlqA2xAMSVYAQQYL\nZyCf749ifymK/aUpaXd7kGy3kV1LO2qWlKyoeGPuNjz+3vr2bgpBdFjSPoZhIMQQjOCmK6PTbG0W\nRlh3SfEcb2ZEVYdqMe+HA/jvqoPwXKRAVQR4oQXTG5cS6cwku1O3n491UBND6aDtIoiOxIljYTSO\ncAOAHO2aMkqDgGOIyDI4cJAkBiZpa2k0RHz476pCK36hiDAeoWwrf76zeg8KKran4jaOC8mPYXR8\nl1Q6uRQJIlWcOIIRw8JwORvVlTItDIaIIkHkRS27Sq8/JTNNKIwMKaaI4Fj0sq4A8HrB/2HWtjnJ\nvoXjht0llYyOVO0EQe9kx20IIh05cQQjhoXhaSIYepYUr0JSZIi8gFBENoVE1ifzRVkYrKmFYWDM\nFu9s2AUjGa6aqLTajioYHbRdBNGROHEEI4aF4XE1CuHYLAxZVSBwAgJh2YxzGIFwu4VxqFSLXZhi\nYqM23DkLE9o1QlHa3pGyTpAlRfNDCKJlTmjBcDtjC4bgrYfMFPgDCv46ZyMYixYMiFpAHLIT/oBm\nWRgzw+2zvjutYERZGG331US5pFrRMW/YVY7f/W0pSqpSn1BAFgZBtMyJIxgxYt4ZrmiXlOrPQaYj\nE3yXMiiiD4oSXbAwohoWhiYYTHaYLimj9lRYiZjn++7QcqhMBWMMSzcVobQ6kNR7ShX2zlNOhoVx\njC6pd7/aCcaAZZuL29yGlkimhaGoKpZtPor6QKTlnQmiE3HCCEastFp3Y5eUKmJkrwut92YV29gW\nBlMcZmaVYWGEbHGLXTV7sbN6Dw6U1OOjxXvwl1lrk3ErKSdeDONQaQO+WL6/1aNxtY1ZUrES3Foi\nLDWNKTVHMgXjhy0lmLNoN96c23kz5QgiFieMYHAxg95Np6FkOb3WG0MwDFFQNaHgTJeUPehtWBjR\niw4V+0oRCGnbVCh4o2A2NpVvPfYbOQ7YO3hFsVxSywuOYuHqQyipap2ldKwuqWPtwn/YWow7X1iO\nzXsrEj4mmS6pitogAOBAMVUyJtKLE0YwYloYjbOkAHgcGdYb1TiGB2OApGdCGYLBZKdtlT5NFEK6\nYAicdu4Sf5lZH5f31mFH9W7M3v5RW28npcSzMCKyJh6hSNMAf3Mcq0vqWBXj2/VFAICVW0sSPiap\nMW/zp0ZxESK9OGEEI1bQWxCafpYheszXzL52hsrb0mptMQzd+vAFNVdUSNYEY8wpV0DkRZT4y6zl\nNljT69U0hBFppfsk1dhFQra/1q2NcOTY3T0dNbaczJnenP6F2+91b1EtZi/cYT7D9mDVthJ8vmx/\nu12f6Pyc0ILBxVgbwy4YUYstMR6SEfQ2XFK2eRiBiPaZYWFkODzIEbvgaH05Xvz3Fu043upoGWOo\nrg/h/jdW4c15HcvXbe/o7C4pST42wYhXSyoQkrBxd0XcyXxMH6G3NobBmQLdijYmUzBiXP/ZjzZh\n1bZSbNlXlbTrtJbZC3fiqzWH2u36RGIEQhLmfLML5bprsyNx4ghGgr1OhsMuGBxGnN0TYy48CVB5\nax6GKIGpHKAKtpRbXTD0oLdbcKG8SoLMJJiuCcHqaINyEAdLtMqoW/e3XycSi6gYRpSFob1ubUA5\nnkvqrXnb8cbcbVi/qxwAsPtwDZ58fz3qfNFxoFjC3hzHECNPWgxj/9E6LFwdv1OWlPa3JimFuGPz\n5epDWFZQjDf/s629m9KElAvGihUrcO211+Kaa67BO++802T73LlzMWLECEyaNAmTJk3C559/nuom\nIcMl4vSTcmP2LN6oGAaPrAwHHCIPxmwuKVECZAcAzoxzGGJiWBhu0Q2oAjieAZw+UrZZGIX1R9Bg\nS7sMyiGsOroWSowZ45v2lmHBmn1tueVWERXDUJq6pEKtzUCKCnpbr38q1Kr/Fldq8yx2FNagsLTB\nFNLGIYBV20pQVpOa1ORkdaJ//XCj+Zp10BgGTVLs2BhJMnX+jpeWndJqtaqq4qmnnsL777+PHj16\n4Be/+AVGjx6NAQMGRO133XXX4S9/+UsqmxLFq3+4DDzH4bPvm3bCjV1STgevWSeqzSXFK2BG0ULT\nwtA60bAew3AJLr04IbRSIrIzatGlN7bMxmnCRQC6AgDm7luIVcVrURWqwfgB10a16Z0d70LIqsGo\nwFPIznA1afPhsgYcrfBjxDk9W/8wYqDEmbgnHWMMI56FwXGa28b4LCJr5/WHpOgTcEBRhQ+zF+4E\nALz70JXNX9B0CSXeMaakWm2sU3aAvlpRGMSm+R5EB8HwnneAn0oTUmphbN26Faeccgr69OkDh8OB\n6667DkuWLGmy3/EuSGe6p2JYGA7eYb5mjIfLIUAUeIDxCMu64vNWQUJTMHQx8ellzr0OD5i+j2lZ\n6P93dWsl0asjWtqnxyWgMqi5pbZX7WzSJiFLG4nvKyuLeT+Pv7ces77c0bSjPUZYnIl78jHGMKLK\nm9teG9+DoUkRSXvhD+pJBbZzGKOu+NdQ8fyqt7GudJMVdNa3lQcq8fKmmagKNl6X3SIVy7J3UL1I\nyuz9YyEQknCwhFKNW4QzkiY6wq8lmpQKRllZGXr16mW+z8/PR3l5eZP9Fi9ejAkTJuCee+5BaWnq\nFiK6dvjJ+OVVp8XdfuGZPaLna6gcnKIuGCoPcCq6ZLnA8aqZHWVO3NNdSUY5kFxXDqDooqILhSEc\nkwZeBwAI6gKU5XGaIlIeqIzbvgOVzT8bo8NtK/FKg5hZUq2OYcQ+t5GIYFoY+nl9IQm+oCV+HGIn\nLdgpD1RgXVEBPtjxSZNtH+38DHtrD+CLvf+Ne/xx8+u34jLBsIxguPUpzC11NO219seTH2zAUx9s\nMOepELExfukdUC9S65JKRCGvvPJKjB07Fg6HA5988gkefPBBfPDBBy0el5eX1er2/P7686Pee3X3\njihw+Ntdl6Ffr2w4HQLcQgZCiuYr79Y1AxFJBSvlwfEq8rt5cIhXwQwxMFJleRV5eVnwq35wHIeT\n8/NNC8N0RelB7755eQAAf1hzXzkcAjiH9qwkVcL+0F5c1Oc88Hy0nleEapq97+wcD/K6eWNuKyyp\nxyNv/4iHfn0hzj61W7PPqaja+oP2ZrrNaxrfJifwLT7/3YeqwXEcTj+5C6oCVuefneMxjxV4DhIA\nt9uhfSZo9/vlj4fw5Y9W4Dgjw4luXa37inXtBsGyHhwO7TxOp4i8vCwonL4YliP+76akzpqh35rf\nViAkweMSY04MjXWuzCztee46VI1Fqw9h2i+GwCHGHreNu38+AGDBCxMSaosvEMEvH/kaYy/tj9sn\nDY66tj2dN7eLF12z3QmdM5mU12i/KyYIx/T32xaO9/XagsejeTk4jutw7U6pYPTs2RPFxVYdoLKy\nMvTo0SNqn5ycHPP19ddfj+effz6hc1dUtH3t5WBQG+EzBnTxiKir1UQii89BSAmAc4YQDkmaC0bl\nwXGAIOqdfyOXVEiSUFHRgEpfNbIdWSgr85kxDI5XwABwvHZsxMfAgYfC6YHysIQan2Wqv/jjLNx1\n3q0Y1PV0RGy1qUrrK5rct/0HVVJWDyGOu+H9/25HbUMYr/17M566dXizz6XaFliuqQmY1wyFdSuq\nPhTVjj1HarFs81H89ueDzM7vgVd/AKDFG2qqY5/P6GN9/jAqKhrQ0Cg7yiAQjKC21jrHrP9swbkD\numFAb+u3c7TassxM11lYRkVFAyKS9pyliBL3d2O/55Z+W2t+KkVOpgsOkcczH27EL0efhqsvPCnm\nvo3PtftgFQ4eqcG8lQcBAKf3ycawM7W/CZWxmNl8if7W9xdr1u2XKw/i9kmDo46zWyrl5Q1Qwslx\nXx4LtbUBVFQ4j9v18vKyktJfHC9CumtZVdWkt7utApRSl9S5556Lw4cP4+jRo4hEIli4cCFGjx4d\ntU9FhVW+YcmSJRg4cGAqmxTFWf20gPMVQ/tEfZ7j1DoizhWEUxS0CX66MDhdWmdkWg+qVXyQMYa6\ncD1yXTnarGjTwlCi/j941A+m8KaLKhRRcKAsOrW2Pqz9UHy2pV+DsERlR2E1bnluKXYVVpuf2V1S\nQTmIz/bMR0PEpzWTGXMaWk46jSo+aA96c34ArIlL6rl/bsKaHWUo2NfUnSYratxaUlYMI9ol1aQ9\nKovK1vrvqkL8dc7GqH0CsmUVMS76PEYFYaWZQEVr5mG8s2AH/vHxZmzao/12P16yF8s2H03o2K/W\nHDLFArC+j6/XHsKtf/selbq7Jl7cpzliVTMwsD/b9ophdAa2H6jCii2pL3bZHJwZw2jXZsQkpRaG\nIAh45JFHcMstt4Axhl/84hcYMGAAXn31VZx77rm44oor8OGHH2Lp0qUQRRE5OTl49tlnU9mkKAad\n0gUv/P4S5GZGj3Z6ZuZhn38XOF6By8EjIvOmMBRlaCPnxhaGwhT4JD9kpsDDe3GkrMF0WxmWhRHL\neG/hPrjPFgBBQddsF6rrw3ApQXDMiQGu83CArTNX9/NL1sjX7zqChogPWc5MfLxkLwDgw+WrAUcI\nkNxRncJXB7/DsqJVOOorwR+G3mF2ynwCQwSmMkCMACpvdtQH6gohnfEtxOL+CEeiXVqc2w+hSxlU\n9awm56ppCMfNkrJiGNp7o/RIYxSFtdjJ2Z+TKuiuD92JVhfSRLesPn7QO9EYhr0dWR4rQWLOot24\n/Pw+sQ5pFpdT+0I++16bgb1lfxVGX9A3KsgvySqcjpbTmuwDhsaps2Hbs23v9cuPpZjk8eLFT7VJ\ntj8b0rvd2mDGMIz/GcPhMh/65Hm1eGo7klLBAICf/exn+NnPfhb12fTp083X9913H+67775UNyMu\nXbKapqlOPP1qLN+5B1JJfzjO4iEKvDlBz8fpFpEhGODAmCYYxmh++x4/Nh/aAqGbkVarB70NS0MR\nwFTNwuia5UZ1fRicIIHJTuzcE4HrNGteh3FOJjnAOSSsKdmAq0+5XAsK8zL2ur+B53wguO7aqJG/\nsWTsoYYi/b0uGLYMjHjWhqKq8AxdCjXshqKeAwDYXa11aI7eBxEqvihqf9fZP4ITFBRHDgKITu39\nfN9cfW6J1pnaR/JxLQxBgpBbAaWqFwAOispa7OQCNsFo6FIAFA0BGKCoCoKKH+CA2kj8DB1FjR7R\nx3s2stx0XkpbaDwp0RBXvy3oH4ooCQmGZBOFmobo1R6jLYz2FYyOOHJujKqyFhMtUkajWmTrd5Vj\n5vyfcOXQPrhpzBnt0yadE2amd2vwONyI7DsfzJ8LABB5DpwjehKNkR2lveGhMsWsVMv02AVrlCUF\n3hb/UAWAV3WfPwNEOao2VUTVrlcf0gRDLusHMA4bywoAaB1K4zbZR+heUZuAaMRAmGlhcFh86HvM\nWPV0lLvLjk/Wrsm7QmZpEJG3OqzGabWGEMpoOtFob8Mu7Pb9BHDaeaJcUk2ypLR9nP23wTlgK4S8\nIwA0AWtRMGwuqbCnBJxX8+f7pID5B6jy4ZgrIwLRa3o3Z21INpEoScL6Jo1Fx7i0L2QXjMQypYx5\nLABQ2qiisN36SMYqiolSWh1oMgGtvQUrEaQ41u7xoHEtst1HagEA63Y2zTA93pBgxOGZ2y7GpMv6\n47STciEIPDhno1RA1TbiU3koULG7qCp6m/4/7w4AnArOFdRFhNMFQ4HLof3PcQxQHOYxRqmRyqDW\n8alBL5i/C474iiGpsjY/QrT9IYqRqFFkWLW2qUw1O2qe4zB//9eojzRgdfH6mPdeI1nxFKP4oGHp\nAEBIit2BSUwTTCvXniHMQlChgvPolpIhDkoEkZNWg8+pMMXM6PB4fd4J79XOoygsZicXDMtm525Y\nGMPyz9MeR14RGLRYjp36SHQQsSZUi/21hdFus2Y6NPszLqlsWTBueW4pNu6O/4feeIEqNY6FkQj2\nTs5+vLbNOsfxSiFmjGHGO2tw72sroz5XErTMUj0PISwp2FFYbT4P+/Xs4nu8MWuR6e+NZnUEVx4J\nRhx6ds3AuEv6g+c4OAQOnKsZwWA8GBSs31MStc2wNMT8w3CctFsbsVf1wvCztJRbjlchijAtBfsK\nfuV1flTUBrFgk7Z2BgtmQY1o5zOsBrMIIgBnv59QHCwy39sXclpyeAVUPSbCc0A3txbs31a5I+a9\n19kEw+ioa8OWOyckWokKhrABgE9uwM5DNXjqgw3aB7b28Rna8cYf5+6afVAzy+E6Y6M54jRGwcw+\nQx7QXVJNO5nfv7QCr3+h1dvx68Iw/lRtljznCAGMNRGMunC0W+qx1X/Di5vehF+2xUD0Symq0qRU\ni6SoAKdC7HkQxbVWTCTbGz/rZ86i3XG3Nb4v08I4BsGwWxGSrKLWlnUWbgcLI968oHirODYEIman\nXV4bxO/+9j1WbYsuUb+vqM7MBmsrc77Zjec/KcCan0qbtPd4WBjltUE88OYq7C2qjfrcKl4ZLWSJ\nJKykGhKMBBAEHkp1tG+eRQkGh7As4XBFbfQ22z5Cdy2LRqnqhT7dvRh0UncAgCgycE6tc2cRt+nG\nWrPzKB6cuRp8Rj2YIoCFMsxyJIYY2F1SQtcyfF8zD/uOan9MQZtgzNv/FeoytE6L5zm4BK1zO1h/\nOGo/QAtu7w/9ZL43Rlp1YWtkLuVr1XUDIQlPrPmH+Xm1VInVRwrM95zNAuIztOMNwbB3xEfFjTjS\nUGyN6owZ8oarq5kYhpGZZVgYOa5s/YLasQH9/pik3XNdIwvDiPUEFMuCUvXJbw+seBSvFcyK2l+S\nVYh99sJx8m6gj/accjKdZipvLLo1M+ehqUvKsDAsKy5Rl5TdXbZ43SHc9/oqFJZqAtkeWVLxKg/E\niv3sOVKLe15dif+sOAAAWLNd68SNcjAGz3y0sUmGXCzqfGHUNMRO0zbYul/77Rws1n4TgXB0okGq\nWbDqIKrrw5g5/6eoz824lv6TtwSj9dc4WuHD58v2J+07J8FIgByvE9LBcxAptGUBKZYYaAFs1YpV\nGBZGyAsW0YLqnD5/g6kC3E4BXpfWiQgOm2CE3QAz4h4qwCngPH6ogSyYbiwADSFdMMRGMQNexfwf\ntD84Y10OA5nXOlSO40x/v8pU7KiyRr+MMby48S1Uy5YLxReQsLNqD/bVaUFvpa4buIw6bDlUhLte\n/gE1YWt0dCS0HxsjX4PTrQm7BcRn1gK8bJbgsLuGKt3b8fGuL6yRp25lGfenKGqLo+KAHESGwwOR\n10rOc4IClVkuKTWoTfybtW0OygNNV+Lzyz4Yf6EqYwjIQURUCXtrD5hpuYDWkQi52vGcU3vGWR5H\nVPpxY5oXjNguqZYsjFjuGskmCkfKtOdbU6+10e5ikY9TDMEXjC0YscR/2wHNqv16zeGkXPve11fh\n/jdWNbuPEUMzEkKOt2Bk6ll2TZ6TEXMzBUP7P9GK23aemrMBX605hI27E199sjlIMBIgv2sGHvv1\nCPx53HXWh3pw+qW7LwWLeMA5w+Yo+srzTtb2YTxCWy+LPpkqwO0U4eK1Ea8gKmZ8hEU85nnBK4Ao\ngeMYWETrcAzrwxcOAWBaJwwgvGeotl1yIcOt/QgbWw6GEPE8h5AcAqf/+2zPfEiKBEVVsXD9XjMV\nlelVeOsCIawqXqt/xkOp1WapL9tfgHhwDv3adgsjsw6us9aYHWLjWIKTt7l09OM4dwDgFCgqMztk\nvksphPzCJtf0SwFkOrVAP8e0uJCiqKZgsGCmue+yoqYdyfd1/4F76BKAU6GqDA229hX7rJIskqxq\n7QIA2QEOgNspQpaZfm/MDPAbZGVo34nj1K1wnBztBmw82pbMVQ2tDn7TngoU7LXmuDw6ex2mv/JD\nk3uwJz3U+bRnaIjDdxssd+Xxqlbrj1P/K5aFITRKgEgWzcVBGmfpBW3ttT/LzXsrzPkxycSr/602\nFifr+7EGMEDiFgZjDDsLqxGKyKabraVabIlCgpEgp/TMQj+9pAegFSn87f+ciRyvE0q5NstX6KGN\njjJdthGlKkIN299rFoZT0Kf/Cyo4l80lZRYsVK2ihYZPX9+29WAZ+NwKCF21YoQs5IUayAQnSvC6\ntX2DSgheMQMPXHCXdgpO74Q5hpAShlzfBVJlTzRIPtSG6/HV6kOYu0brzOSyk6HWafdaFwiA5/TM\nrf2DwfzapMafiovMjlGp64bIviHmLRqussZZXHyGDypjqAnV4utCrQilGtKqAwucNafBsEw4UYLn\nwm8R5GrNUanrtAI4T9llrnpoEJACyNTXY+f0DDRZZQhKIf06lmB0cWnZb/b4i3Y9GRAjUBlQbwvy\n76zeY7tOSLMmAUCMgEErLaMyBklS4Th5FzwXLgbfpRTiSbugTXRUAV6G2L0YYs/oEbSsMESUCBwD\nCsB5a82OKmxLLFi3sxyvfmGtA19U4YM/pE0U3by3An94bSWq6kJRnZzhglIUFTUNYRSWWgJ4vGIY\ngTguqVgWRpMU1iS565uL/5hZemosC0M7rrIuiNe+2IYH3159zG2org/h9f9sQ3mj0vweV+xZDcbz\nYY0sjETXhdm6vwr/+KQgytWVLCEmwThGpk0cjMv0yT0PTbgaALS1LwBkuT3RO0vWXA+m8nA7BTh0\nwdiozrO5pBpZGKaLS/9MtzCWFhwC77UCf0xyaIFiQTLrKIXkENyiG909WoBb5YxKu/ofsSzqa3oA\nYSWMo5V+0zLQhEs7T00giCMVesC6vqs1GVGQo1xwSnUvXOwZq20zBEO3FOQqK/7DGPCZrQhgeMcI\nvb36WuiCas1X0QnxNXonZ/3oxV4HIHTT4kKSIiGiSqZggIngeAWKwkz3GwtYgmEUioyVVszxShML\nY8XR1WY6bnXIcsFxDq3NxmSqyrogxJ5aDSzXaQVw9CoE5/Fh9U+lZsaXdpNWR6ooKjaUbYHYrRTu\ns9eYHX04gUKSEUnFa19sQ70/ghVbiqMyoczzq6xJxlQy/NmL1x/B9gPNL/zVGgujsbulpa4x0Tkw\nDXHcYgDA8wycK2B20AFbuRQjHmTEkuz9bWFpfdRaNi3xr+/2YtOeCnzwTXTyQ7y4XGMLsLUxjCPl\n2mDHvjBbsgw3EoxjxO7yObVHHpyC5VLJz8mE02E9WjPrBzBdUkbgWoEEzhnSOmhVNK0IIbcSYm+t\nhARTBW3tCHswWD+nQ83U0nEVBzgOCEohHK30IygH4RHd5voeKq9nYhnrkSsOK4iuhBGKKFHBd6Md\nlfV+lNQYCxoJWhsBbU6JLhiGiDiYdi2tI2Xgs7UfrFw8AEqdJlyyKkfFBKDPPTHmsLgztG1yVU8z\n/lNcXY/NeysAm8Xi6H0QzgHbAFiikOnMwIotxZAlDuAVyKrNJSW5ENl9AQCY14o5D0VQwBhDQ0Tb\nluPMRnWoBosOfQ8AqI1YmVG8JwDnoDWQnFp5lkdmr2tyOk7P9LILPOeyRpolVQFsO2C5mzbzX6Cg\nfJs114VTwGXoqdVqdCVae1B5wY+F2HbAKBOjwtF/G/icCpTXBPHOAm2kaYxo2zoPIiwp+GTJXrz4\n6ZZmR66+QMQaVNiIlSUVq6wJn10F8DIW/FioXVeWwLm076W5GIO9w20sllHt6LYP7iErUMNrIh8M\nW22V4gh2MCzjr3M24tMYa+nEw6jj1bjN9vRi+/ca18JIUDBifSfJckOSYLSS+4ZOw+DuZ2NI3tnm\nZxzHobueqgoAORkZeHX6ZfjddYO0D6LmbAhwOQUInPUZ5wrYgujWr0LsZqXpCjxvncdmfXRvGA6A\n01JyAazacQRPfPkJwkoEHtENgRfgFJxgumWh8LovVhHNa+4trsK2I0Vw9tdcUizianQt/Yet8qb4\ncYJiWQL6viLTXW9iBJzHByGnGkpdNy1+oAuNpESQ49QzmcKZ2v2qIsJ6qrA3Wxe0iBuRQ/rzE2Ts\nKKwB74ox74FXTMEoLAri/a93aRYZr0BWmCnsTHFA1WNBxrViWxgyFJuF8asz/xcCJ2BbxU6tVlgk\nuryIkFWLqowt+sEx3B+6YHA2weAzGsx9V24rwcb9tnRovhabyreas/Yd/XbAfY42XyUsKVEj68bB\nUmPlQj6rBmLeUbjO2IgFPxaiqEJfo8V9bILhC0pRKx3a/eH7iuKnuG4J/gDPsG8BRzjKqoll4TRO\nGdUBXG0AACAASURBVC1R9sN15no4T92GuXrm1OJD38M95AfwXUpRVF/S5BwG9ooHDXql5IgiYfb2\nj7CzynIvylnac68QdkNVWVSBRimOBdMQiEB116ImmHhRQEMMGmui/bu0W2OqKRjG/CRtv+bSarcf\nrDLvO5Y4JGtOCwlGKxmQ2w+3D745yqIAgG4eSzCcghNOh2CO6KIsDKa5pK7rP8b8iBOU6DTdxqgC\nnCJvm59gCUbEGHTLxjbJHNn3d2mi5hE8UFy14Lx1OJS1WGuGKpjn++KHPVEdGot4rJnsvKKvMMgB\n4CAw2xyJRllhX63UgsOcI2LGIVRfLgDOtEIiTMKuI1r7QruG6seL5qi/78n6XIxAllXt1yitos+F\nUX1WlVoICvy6NXDoqCEO2tK4ZTU+lNbVWc9Hb0NIDmPzngrUhzXTfdzJ43CaMNw8X3lNED/sKAQA\ndPd0g4Nz4VBFDZYXFKNe1s7X3WG52VzQ3F1mMNyGZWFYLinnqdvhOtcKvBvZVgYO3mH63oXuWiE8\nMf8wnl/xEd7+cru5X60vjltEjD2qNrJyYsUwGGPYWbUHSw+vaLLtyffX4+G315hCYff1Hy5rwJqd\nxZAVFXW+6LphRdDmyAi5ZVGxhFgWRuNRcbWi/Zb4HMv6MuYNuU4rwMvbXjMXHWuMPYXYsDCe+Wou\nNpVvxZtb3zW3CbL2vQWcxXhj0cqoCgZGsLix66vSXwf3OatxOGdhzGvHwhDoxnEa+3Owx3tMC6PR\n/cQTjK37K/Hiv7fgrXnabyOWNiQrz4EEI0nYLQxXIzGxp+ACgNspINPpxbhTr7HtE7+sF1MFnHda\nd2t+Aq+YgdeI3tf07qIFcjlRMjvy+Qsi+GFrMer8WkfrPtsWuJOtWeWcIJsBdqUhFyycYbuWNlHN\neJ/tdWvioQuJ0T7thQAmi5pLqpGYGBZGfTCA0toG/Tjt56fKvFkKJcRrI3g1mNVkAp8xW1wqOg1y\neV+z7XVhv3VP9mtyKg5X1gJMqzZsuOB2HKnAa//ZhrV7tNIjny85gp8O6B06r+CNudvMLK5sZxYU\nSQAnyPh+81EE9LIpl+Zei1OyTjKvA6Dp5E6j7YIE3h2AGrTWi+dt4mK4Ag3CShgRSdFcEJJ2T0Ju\nBUqFHdhSaQW/lx9dYQbW7fEd3tYOzuU3S9JogsFQK0V3tC99ugW/+9v3eH3L/+GLfV/CF4m2vCrr\nAnCduwKf7JoHILpU+vyCDfiw5GX88V+f497XV2LpJqtqL6enR/NZtVHHxJrpbc1jYfj20DL4mR4r\nsv1d5Dpzo46xx5PsRFkYumCUBLV2uQUrAYVj1t/lT8HVUccZFkYTwQhoAwZFaJSFqBOQAiitDuDl\nz7Y0qTzcuMO3p2LbRbixBRavirNBlb6WixGzUE2LxroeBb07GD0yojOoAFvVyUZi4HZqP9QshxWI\nRTMWxjmn5GHcyH4xXVL6GkxwGX8IeufPFB4Ah/e+2hXl+zeQy0+K7pANwSg7Obo9hktK/+PPyXAB\nqhgd9LYJourPBp/hM+cqGGJiWBhLtxy2soxsM+IlFobY8wB8rBo8eM2NZVoY+ig9s0ZbA9yfY2uf\njJqAXnZEFwxr4SpN1DgmAuDQv4fW4dQFtM66rF7vcGSneQ+cIOvxnDB4JsIjujWxE2R4XCIkpnVA\nua4c3HXerdrt81qasxEEz/QPtGacCzJ4XeiU2vwm3wOXUQehizbvxUiPNmJKOV5nk98ObDXMdsmr\n4ehVCPf5S+GyDQbswuUe8gOcp23W2uVxQMg/hK/r5mBd6Sb4ghIqa4PmHAiDssbzVBwR8J4ANlZr\nMZoo102u5i6K9NkA99Al+HanlW4tqppA8hn15loqQPTI+l+7Psc3hUvMDlroWoJ5+79CNbRsMuO7\nNObG2JHi1AWzz9iuD4Qwc/1HELtrLqwcwVZpmbfNE8poiBKMQCQIX8TfpIJybSh+ActN5Vvxxx8e\nxxNz/4ut+6uwYqt2TaP/53kOBeXb8O/d88BYdLmbYEiGrKhY81NplHvqQHE99uhuP9mW1LC35gD+\ntesLKKrSRIhCakBzC9s+TkahTOA4VKs9UejltToDI2X27P5dMbBvDnqc0RubGw6a242smkynJRjN\nuaT69chFZobDCi732W8GkcMh7Ufn5LSAM++t1zp42/mYypkZXAAwKOdsbLIF2DleAThj/oXuRtM7\nJtcZG8Fk69ouhwCmCOAzfHD029Gk7XLJAAg51RC66nMXDDExr2V3ZfFR2xwn70GY5SLT6YWf8dGC\nxqngM+t0V5XDFkuR8e/lO+E8xSbM9vsSFPD6SHLMsH74oASmxcIEW0kW0Qjm6+a/IwQnMsBxnGY1\neRR4XDwa5AjAAR6HC27RBQ4c6vijcJ3lg1KjLw5Wn49Tc06x2m6Ufom4oFTnm+nQAIOYd9Rsg1qb\nBzAtxhKWFGRlOBBq7F4y53hYHQDnkMA5bEvaNrJ0hBxNELxuh/m9fLjzU8gNXyOy7zwA0Vl9ZYEK\nDMjthyUbi7CjsNoUQkCb7GkXDHsRTk6UEcksQllNAHm5HiicdhznDkRlIBmdl0/yY1WxJkKXsf+n\nP/9GI2H9O919uBZ1IT+YIkAqOg3OU3aZKdONsXf8FeEybD9guf/s7WC8BMY4qHXdIORWwu+zLKvl\ngU/x1cpaBNddA3tc0V5aRlEVCLainKuOavOVkLcfqOhhDgzNET8PzNr+IQDg2n5XRnXitb4IPl6y\nF9/bLDQAeHrOBvO1PWj+8uaZAICzup0Bvy1lPBiWsZZ9As9QCcr+oWAKoNb2SNpERLIwkkSvTEsw\njHkLToeAGTddgFN7Wu6qHrkec0SQZROMU/O7xD232+HS4iH2UiM5WkZMSP9bzuP6AbILYs9CcKIU\n1YkbqasGP+3X/fpGp+uIxO3EAd3Npb93OQXzOMOtkuG0pQ0bM9sNq8ZMCbbiL11zdNcR49Et22Va\nLwAQVANwCa6oY3i3D5zbD45X9Vnv0efjmri/LOuI4xWAieA5Dh6XA1BEy5VmCIbkjC5HwqmAIwIH\n00bITI8PFVXVotrvB1M5uByi+T0D2sREo2MNBUS49ew0TpDBGWm0iojIgcHWvBxeMcUrvPsCaAkA\nDoSVMMKSApcTUUJg3C/QNO6hfWi4xmIXRfR6RG1yKLSOn8+sg5CnB9xtAfsVR3/E90dW4p/f7sHm\nvZVR82nW7i1sNAksenTr8zE8/PYarN5eAlUfwXO8ioKqzeY+isowf//X+GjnZ+ZnZufJR1sNxiDg\nHx9vRqWvQRN3Q0SKLMuoMliF8oAW77ALRkiJFpWGUNDswFVeAhQRql9LwqhnVrzEcInxuRXgnNbz\ntE84/XL38qhzu0Xtd8tn1kHIPwSfXpGBqQyctxZlmZZwheSQaWnxORX4v6VrmoiF7SmAzy2HxJp6\nCurD9VHJD5V1ISjQ3gsDNsF1+iYA8et6tRayMJJEpiP2WtoA4BasDvW5O6zO236M8WMDADWUEeXj\ndotO8BzX1D0BmCN4t+CCw5cPKfuwlhoatM7NAtlQ6rtAyNbjA4rW0ZnFEXseMjv6Jp2u2SjtmHNP\n7YbdpdGjwAyHC8afUeM2mi4po3JvVg04XgWTNZdZbpYLDbZ5CRKLWM/COMbbAOdA3dWhOKKuY3eN\n9evRBQfqbBaP7rrjVDdEgYPLoWea6Z1uhOkjcVs8B7wCzhEGxwG8rHWuiqRtq/b74OQVPQlBvy9b\n7MCwJP535CB4jHsQ5P+/vTMPr6LK8/631rvl3pt9D1khJEAgAcIWdmQTJGkWhRe1WxRFWxRwQXrU\nntHWmcYHp/vpx8exfbrtxWec0R573ufFcXoGX0VfEW1axBZwWFQSIAkhZM9dquq8f5yqU1X3XiAo\niCT1+QdS66lTt36/81vO77AEAKLfR+tJBu9qpsrOsHZCPvbM/dEQrVnl1kujdKRDSG4znxdm3EPr\nCYJP6mT3giLFVVY2FF6SR2Lns316jS2rUmjsPonG7pMAP4+6Hy0Wxnst/w/DBLNEjnWftX0ff3EK\nsHhc32l/E5BmAVE3FFXBn/Q0ZQNjBMydJ2BPrx21pXwrxDz28T3/AABQT1SiZkQGDKupX6XfEYnI\nAE+gIIrX/u8x1FXlQOOiIKoIEnXrx/YDoHEeA9eIv4BEJYQ+ngPALP0PAH86/SamFVYj2RXEZ1+0\nQ+bNb1guPISmaAaAcmiEwD3qA1jzqvpVY+kADa5yWhur/8OF9gcWopCKPgNUEWJmE5TmQrx3oBJ1\nVTnskLZQO3r7zcFmbyix1ZVojs7XwbEwLiO3jVqDtRWr4rbHBcF1rBaGWzSPCf91KrIj49jfXsk+\n4rahj84FgYdILItBxQp8y7lMoFpiD8aIlQniGEFg3Ke2IhMen/3H55XNQGJlfoZtH2LuJeV8gQ6t\nlSkgr0sCHxNjMRWsOXrlPb0xbTfdVUYW1aIpZbhr2Sh2bcMlBU2EKPCQJUGvEkyPjyIEmXMB4G0T\nEg1hzOmCRInw7F6Gu08U4z8do8+mVRSy4CqnB71pmyVb2zneuk93BSoCzhquEd1S0HqDCB80srgM\nhUGVgtKWixJ3JbuX26uAEzSo58z3YMR2fB7pvAI+UZzLmHhpVSZfRj/F7tC/xD0zQxf41gWtzOvR\nfbGjfoAuGMbJ5sRHdg6bx0EAga4Zw+IaMCbVmQJeGHYQn4TMkf8XZ6gVEvmqki5Cxit488MT+JsX\n99K5SYrI+j6il+ePrSLASVHW7/2qPSHgz6cP4L/+3IifvXYAB0/YYz/tfZ147vVPcbYr3hrsV0K0\nbIulb901u2wWlpjRCDGtGWImtQI5bxd+9QYtxmik5Z/oakJbxKz91hEyFZoJGdBE0IHgKIzLyPis\ncZiSMyFuO88JCY6mglHk6Y/VI1mFvYhUyfzgPYbLRxMR+nQa1G5rtggVqqLAsTgGEB8TIYawAphA\nNeIVNoyPsTdo20w0ARyoHzys2T94aymUZdOG29P6mIURG7wVWLtjS4hYra04Yhan4t294EX6MYwp\nzqJ+Y0MR6rW4NJWn/SPy9HyjbDofhgS97WxCogroQlDTrS6j76Sig3pCgZBwohnn6odP8kLgBfMZ\nBMUsPBmbxaUrO7ZGivFcutDo1Vd31HoDccv9GnEKEvGYmT+CAneSns4c8qH/41n6OVTo+twSIEbg\nhh/jhRvYOQDAiQlcXEaJllilYH1mfaARbRyuX4ee09qjB2rbzNGwoZxCoEJ3Zv5UjEmnc23CSgTy\nyA/Z+ZEv9Tk4hsIQFBrEVU3XrFEdIOEETMM9Z9QzUyQ9ecFUQLTvRWaBRbWw7ZlslxMj0DQN3Rq1\n9JRmGqP6n45jaGmn76IzRli3dnfhz5+fscV8DPqVEBRVs/32OTFqm+BpxBUNSJi6SOn6NvT5jnQc\nx1fBnWwA0R1O3BeOhXENcb4qkxzHsUwpm8IAkOo2zUyfvu+Bm8ZhTd14WyE9A1HgIXNm2mZsKq/N\nOlHjLQwDluranYolefXmDo2H1y0mXLbS6zKVkUcWY6wZe1mT2O0CzyGpc7Rtn0s4v8Jgqxhqpjst\nJZd+ZC5RhiQKTFm6hlM3lhrlIQg8C9hTIURAhAibbMgEsqufCS0lIlF/uP48gr+DChNNQDDB+he8\nKwS/TGMsIi9C4kXwvk5zBr0S605T6WjW1l8iOEEDQNBJ6MhR6w3aLCoA4APt+r4AzeQCVSYun24p\nhj1A1A2tJ8DOccs8OCkMGR5oUaOWma4wdMGlfFUJF9HnlRgKw514ZUYIUXBiFEJvJpTTpbRqs369\n3sAh1nZDmRjtCGnU+gjIfrgFOsiJkAh4t+lKU9vyqJvUSEKwVihg82ki6O6L4Me/j587wgpYWtyB\nRBV0a5SwthhVEgAgrIXAwVSQysnhiJ4u0q8XxeftxxHiu6C05SB6YiQIAXojvTQOZ7lXibfc9nei\n2e4hJQRVJXHK2FYRQE5sDfZG+0BAQIjlW9Sv0xOhfcsSMAA6V0nrwtuNF67eOxAchfEtYA2OxmLU\nP/LGKIwMrxko9+gun8qiVMwdn4/a4QVx1xF5Dh7eku0SY2EE3JaYBps3kaBdFuFV4LPcR+Ph0yd+\nzR1mX6PdWgbFJQuILYUCIP6jMUqgCDwy1QpEjlWxXdaYT13WjITnWe/Rqa/V7RJkyBIPrSPTdoqq\n6BaGJACKTOMT/nZwHEE0bM+sEoJn2WS5aJhHR3fY/mECyEsNskmZPxi1xrYvYEmVLg0WgxMVljbL\n+tbmTlPs/WVZ1pfz9FDfe9TNLLTcLBe23TIWQuAcDdZG3eZgQ1DBu43yLh7WT5yg4YHVVYiQMDie\nQIIHasS0cgBTKah9SVCbS+g2MQLe3w4huQ2C6mGVio1Kxka1ZD90a1iRqFXCaeCDbbS6cVuePd4E\noF9feyQgm8ouooXNwYD+PoxFxgBAyGw076G/q7AaxpHTbQgVvQ0AiDaOgHKWWjRs5C4amXAyJE42\n+1ZXQD7Zg1HDMlkbkv0uJqi1sAuwxHi+7KJtUM9lAeAAVaJl8I0MJDEKLezBkgK9phqz0CxlhCTq\nau6N9OsWhm6hnSzV+9SqMOh5N5Wstr0rowqB2poPtbnYdq8e3RWodadAOZPH+r1d/hyvHvl3fFMc\nhfEtkJdEf8RjM0bH7TMsDLfooi4TnaJMM188yWVXJiWZMXECUMHrEUylIPEinrl7KsaU0OvwmtUl\nZZYhCR+eyASAfR/g4k2LhRAeyUm0HfWli/HszCcROlCHaONw5LoKzXMkIa4UCkAtFhvEtDACXpmu\nBWJcw+KSWla6ALnuYeZpTOjGW0cuQYZLFEAiHuYyMNpAYxg8lDN0wp9r2BEAQJeRJWkR2oK+RGxP\nN/DBwRZz3ghrn2ldTMgahxJ+PPvbGpdaVW5aaETjLMrO4l4SFHbv5CSZKYZ5tdmQ3SqIItvbxysQ\nfL0Ap0HrplaoVzLjJWYBSSPuRd/7sFwP2iI0pdZDUqBEeXYOADZXROtPQl+v/nsQo+D0kv1ZkXGI\nHKmG1pcEcATJfpkpjGx3nv5cIjghimBArwLbka6P4O3W0alOah0F5CTmumtKfSMmRsfRd6wPNIz0\nY63Pb1EYUXzRaVb/JRE3KzLJSWHw/rMQ0/T0bkWCZJTQFxQ2CVRWk1i9NQgKAl6ZlaAhETfrf06M\nsNG7UUyUKCJCagjhqAo+0AbeFQJUAZnBII2b68rKmOXv66zAoQP0fby57zhaO/qZwtB6kvX0bdou\nztcBIaUVhAATc8aA0wSmFA6dpXWsSNQFrd/IxqP7enWFYc0mg6BAwYUXkxoojsL4Fkh1p+Dv6x7D\n7aPXxu0zBIwsyCxv+4l1tSjLM2MIse4qq1Ay6Asr8ImmgJ9YnoPUgNucvGONIVhy57WuNFbKnGIq\nD06zWwrpQSqYeI6n9alCSVBOl7J2A/a0W8AapJYQ+mSGZTttgyhwtFS3Ygphq4XhcYnwy5YMNEPo\nRjzQQubzcuAgCRJy032oHp6OCaWmdURUEQLPQxYFaJ3pVGD79NURozK7QviwPf5EVBH//t4XUFqG\n2awMq8IAAAHm3wHdJQUAGZ40UxmrElyyiCdvn2S+CykCjifI8Adw45wyOjlTVyZzJmYjrIbN2JOx\ndK/SiLcaqQtGC1NhkSSbAk8TdJeUIdQs5VBO9dMRcpKWgUiEp7Emyyx6EpUBRbYIySgTRElCgM6W\nD3vAccC9KyvY+i9FQdrXdD6LgtxMe+wn1sIw4i9BVwAe68zrGBcM0QRbDEMLeaC25bPf1LmeXvzH\nXjNIThTJJuDFLEspeSLArSefcLzKhLhXS4NsJCiICqKqBjFJXx2yz2/ri96IRRgDgCIhrPUjElUh\nDTvM3lPQ6wKnSbqA1yCX6bXGoh7WFz3hPpwJtUIqOKK/Lxkk5NNTogmkAlrZluMAt0tEwO2DKNP+\n+/cj/wWoEtS2XNbHctkBCKmn0aN0mX1h6XcFA6+ueyEchfEt4ZeTErqmDJcUVRj0BWemUEG4bvRa\nLCmeb5scBMTMENcJeCX4JGvWlV3J8Kop1OIC4uGYcuw6GiFmmwmf0G8PmAvBALDXvALsEwiVeCtH\n4HlMr8qlAslou+DC/SvH4o4lleA4DgGX5XmNaxMe4QPTWbaIJEjgOA48z+He5VUYV2h1pwkQBU6P\nv3BmCitAZ3kbh/WZwt7WXk1E1OYys/cDT8y/rcqc53hz8StFgqpqyE33MSEuF1IhU5CejAW1w+Bx\ni6zv2kPnQECQ4be3CaAzigGwa/tc9P2JGY1QjOKSMZbJrz57Gaf08hj97X5EIhqdk6KnJfPufmj9\nSRg5LBmV+XROESdGzPiHYMR6aJ8E/BxdD4RwyPYnY9a4XL1iMkEwzZ75Zfw7rzYbnLsHQkYTBM2N\nTG9GwgQHI1gPjdYE41x0Do4R9F08kbphQmrETEuOuOhgQFeUvByBqCtytZNa2Sw2JqjwpOjVAfqC\nOHUmRAcEQhSn2nrpipFRN6C4zHIzYoSt+24qQgkqVOw+0MQC1NHGEeB5DjxxUYUhmgFvviPPzJQT\nFPsSBRE3tLCXPq8cYuO26CnqHvRJXmhSH4TMr6DyIag9AZCI1/atyWWf4AuiL1+rSKYVLijQuPOn\nK18KjsK4yhiL+filJDZSFwX6a6nJrMKi4nlx5/gkc2RdNzYX96+swrQxObYJdMYo2Ai4c+r54xux\nghIApo3JRllekNXI4sRInMJ49NYJmFOTh+oR6Wwbx3F214LFmkm03e+VML48A//4w5mWtrtQVZqG\nKaNpgb9kt2VGfIzbIsVFLTGZtygjAMkuUwEZLin2p8UymVJRgBEFetaZItvjFVZLyaLs3DEWn6BZ\nFYa9L41ArtaTjPJhKXHXBQCvaFhunF6sEfiwmU64Ks1Kwy/un2GLE7E26QojoK+/wid1oVdoocrC\niE/pM6e/6mpEe+QsSFTG4S/68HljB+1L0eLGCnuQnuzBD+brylGMMjcIc9vo/dCn9FOFokjweiTc\nsnAkND1773/wLr0ey2jTkwYkDby3GxwH5ChVcAkyunrtgkwLeYGoG5nJHowtobEF99h3bf2W7NXf\nn2XiY7SxHADHLEZXcjdLj40co4t7ifpvhE86B9HTD6JIaDsLLJtabCpPMQIihaD20PdoXE/K/QLH\nO7+09QH7Tej127SwG1oX/RYkuMHJYXNNmDN5UBXebm3pbZd68gDFBaL/LqXSA+AFDUTlkROh5WKM\n9VjkokO2e9sGYRZIxLRmeF8HVC5qq5D9dXEUxlVmWm4tvl+5GuWpZSjM9iMvw3fBMsaAXWE8fMtE\nVJWmQxR4uCXLaD6m2JgxpwBAvMJIkHW17vpK8DzHsrU4Vz8CMQqjOCeAtfPLael1C7bgpW0mMGdZ\n4J6ek5mi+2CtzxxTHSLda5kFH9P2FDcVUrFzXZJdZuoxUQWmhAEzPREAJpTms7LfAMeCnDzHo7LQ\ndNVZLTSPZL9Xrs+sXBuIcRdGvhoJEpUw0lVL54ggPmXZrSuMmhEZmFkyDi7ehY9a6Mxoj+iB1y3i\nb2+rxTBSYzvPUBheyW4hcqpl5r3lXXRGOi0uOIDjCHhXP3PdkKgMTSPwy0m0/Iv/HBN4xj0MIdQX\npdlkRJFoZhyA22uXQiAy+jU9vVQVMboklQn6kBKCKOuz7BUZR5o68L8/OGJruzEq9rpFdITtpdON\nZ/G4JJqRxZsTH3m9ijLpC0DrS4LqPwWS1MbaAQBpoHEtKfc4iEDb3tMfxeiSNKT5/OBFBYvqqJIy\nrG4jeQAAQoSWJmHKWBfW1HWnmNYDAJ8eT5RLP2FtiCrUqiME4LzdZu2xvjL9nvR3KfjPgfN2IuD2\n4cHV1QCA1n5zFjq9d0xsy0L0VIm+3DPdJ+UdB/GcYwkG3wRHYVxlZEHGxOxq8ByP7y8aiR//YOJF\nz0lxJ2Pl8GXYMv5u+7VkgZWdOBemPvrKIipsq4vy2XFG/MDjElGSG7C7aGIo8NOApkeSUD0iPtie\nkPNU3r11YTmCxshfz5PPTDaFtzECCql2X3a2pU6XITSeuXsq/v7OyUyhSTEKI9Vtr2wqWCwM6yz4\nJMlnsz4Ml4ZHcCPoNa+Z7DUtB3dM2u+SCebaKF6LMgcAtaUIoY/nYFp5iem600SED9WyY9J0K04U\neNx8XSUKg6Y7zbA+slK8eHjuTfh+5Wrz4npbjbXMDXjVFAzKqVJInEWBxKz+CIBNliNRF8JRFSIv\nguvIB+8KQUg5oz+XBwGfzEa0Lx9+FZwUBYnKdAY9gNqKbGQlmckNRJFQlO3HjrtnAQDeP/0R5k2l\n7qHjjf14+vd/gdKab6tHZQT9PS4RTT2nbM/F0psFDhyvlzZJp242v5EFSMzEBgiKTcCnCFkYnlwG\nTg4jTPoARURuuk/vQw8kt4KRZW57PxEeoU+m256J/V+1KAxRQW5yEE+so++1NjgbRBHB+2g8ROZd\ntMAi4aGeyQfv7oOoZ34ZvycjC83AL/tYSfqCpFzbPkPx2+ZX6Rjl/+OKnl4gXX2gOArjOwTHcXGj\n9fMxq2AaSoJFtm1uSWCZM8aodU5NPratHY/66aXmgfoo/b4VVchJ8wKqBKGtDBVCTAorgMXF12F2\nQR22zbyDZkCdh4fXVONHN9NsIbUzsWLRNIKyZOp/NjJsMlLMEdyDE36IMemVmJpbazsv22dJk9U/\nghS/C5kpXqYwYl1SAi+gIf8maCEPtO5UiJb5I2qXmYHmO4/CUImKJI+pMFycKYRjlZMk8phTMB0i\nLyLHZ0/ppXA2C+p7M0qw1KJkioL2NGlrIUt3zKhwQtY4TM6egNrsGty3YizuvGEU0j1p4L+YxALs\ngmY5R5Uw1jeV/TkqP4cJoegXMVl7UReum0DbInXnsc1EFeCWJTyxrhar6uis8rMhfSEpRbYp46DF\nFUhUEa3n+pnSA4C3mvRZ2Cy+ISP8V7N9rNSNLGBa7iRb8wxlIvI8KyjJe6k147aU5bAtiWwRxpkc\nvgAAFtxJREFU8KLAI91jWqsF6SnYciNNcy0JFiGqKdjXoseHrNcIe01LzSqg9Wu7KmgBxaxAEHkZ\n1MIszciB0mJm6vkkD1vRT23T0131+AYb+SsyIkfNWJnVcrxr7A+QKZjXY0kiSoLBGYvL2U11NdGx\nl4ijMAYRLklA9MtRiDaVob50EQBaUrksP2gTikb8QBJ55iKSzoxCTdr4uGvKgoQVw29ga4Ofj/Jh\nKSjVM7u0zgxET5aiyluHm+ePYMeoGsGkbF2p6EI74DU/wAJ/Hu6q+n6c6Wyr06WPFg0BnHoelxQA\njEorR/jATJB+P7xuy8dicc/5ZR8k0bJuQBd9zpAaZusVGGuwGyNhI25i5XtlS/DszCfhERMnEFgd\nc0umFmFpbTn7Oy9m9GhVOt6Y63Ech5srV+HWypswtiwdkyqpciFdGQgfnIwcbTRSQxVI8ZsCLyCb\nQjw3OYWlWmvdabbFqB5YMYnFc1xRM8OLKBJ8bhF+r4yZJTU2F5zEuVj2HGBXGOlJSbhuYgGrZmDF\nNlK3JB4Ygxm3LGLliGUYlzHG3GfUPhN4/GjyJtv1rKnYVrebVcBLAm+LbWX6A6yfjBU0P2r5S/w1\nwLE4H5s5DqA4RtFb331mssfWt92WQlKGK9Fg0cQy9n/N4i61Zj0mu4IYhrHmSUb/kfhBnNG3WmeG\nrSpEW/s3D3w7CmMQ4ZIFQBOhnCpLKLhMU1VflIfnMFUPLC+ZWmRzD31TlJPDUe4ej9k1+Vh/QyUC\nPhkTK7JQmVaOB8bfg++PXoX1SysvGq8xmJozEZWp5Vh3fSVumFbEtpsuqXjT3Mg6AwC/165QNo3d\niDtG3wyP6LEpU8OdUZlWztYrCPpkyIKA0P6ZSGqahYnZ1XH34jguYRbcTXOHQxJ5jCy0VyPmOR4z\n86diTsF0SDECNccikD1SYgUUiyjyIH1BjOCn4oeLpuMnG6axfUGX6U4LyH4MyzLjLJolQ84q7GVB\nBukz/67Q2y8LMlaOWMa2zxtXauu/ZIsy3bC0GqW5QXAch3UxKeW2YK3NzWO4pARIvIiiQEHcPlHg\nkO3LtGULWgcZomYpkWMZVQsCh6ClfdZvpDhQaMbXABDF7r5RWwtsbQAAn5qNGcEl7G+rRZAacGFq\neTH7Oxw2r11XUQTrEGJ4bjqeXj+ZXt+iMGLffapo/i58kg8TR9KBhW0FSgAjcvQkFMIjcthirV9g\nkbaB4lSrHURcyGUEAOHDtXopCl1hCFSQPb9lJmRJQGfv5cnVZugW8eTKbEyuNH/sxcFCFMcP0i/I\n/6pYmXA7C3rz8RaG12X+vA03zMYVVTh2shNlafkAqHKwWV+qBOHQfKy7czr6iglOn+3FzQvK8S+7\njgCKC66o74Iz92OZP7EA8yfGz8wHgFUj6hNutwpJcYCZLRuXV+EP7xzDwknDEPDJyMgwlYTVOgzI\nfvgyLLP+LQLKOodEFHmo57LAJ3XSkicWhZtvsYh8MTGboMWasQrxmswq/IvkM+s+2Xzv1oQH+n9D\n2VvbZMYwaP+nulPQHaUuKTp5sRcuWcDT62bhRx/sjrsPz3M2C8Mq4CVBQoY3jZVIJxEXinMCKM0N\n4PjpLvzo5tlY/+vfQOtJxuiSVBz+6hyWTivCud4e7NZj81ZrkOM4rJo+Gvve/QN9HsENoxwjITy8\nfBL6tG4QjYfEi5CM9FuL8pRjftMLa4vxJz1hbNPy8Th4UMNHh1sRPjgFj24owzP7fgEAaD5jqV1F\neD1BQLvwMtADxLEwBhEu+SI/CE2wuWOIvtCvrCsawz1kdWd8HQwBWT4s+SJHfnMyPelYWDgHM/On\nxu2zpqL69WcbV5aO5TNLbcfZFAYAF+eFW3QhNeDGI2vHIz8jCafO0s89mPTNA4cXQ+RFXDdsFoB4\nd9X5KMkN4MHV1XGZbACQ6gli1Yh6ZHrSURwchorCFEwbnY07llSymFehv8Am4JfPLIHSRu9dmVJp\nu55X8uDGEfVIknwoCgyz7Svw03MkXrQLe1jWvdd4WMvSrJk3nC13y3vpxDPjt2i9htXCAOyJDR7Z\nFLRBj0UhWq0XYs+ei7XCc7xm7GjNrFF4aHU11lw3An9zywRwHAe1dRhIXwCFWX688OBsFOcEkOIz\nrZzYZA2rS2ndItO11tsfZen0RsxGZoM9jrmR+hR7xV+XJGBTzQbMyp+GAn8eJlZkQuA53LGkEmmW\n2MzyGcNt5xluViNu+E1wLIxBxMUsjFnjcvH2fjPzJLZEN8dx+NnGujgBeqncNHc4GqaXXFyBXQY4\njsPS0oXn3WcQ65KyYk25BZBwzkN3H7W+aisSBbUvP8tKF2FR8bzzlsa/FFySgJlZU21Kdd0SqgQq\ni+ohSEvhkz22/hpdnIZfbVmMzvB0c20PCzPyp2J63pQ4l2JxsBBPTfsbcBwXF7BPd6fiq65GWiLe\nQpJHwuphy7Fj33PoPkWVeWYqFbb5fovCVI15SnpKttda0ZkqBlUltjZpFrcaIQRZlnO8Me0zrpfm\nTsG88YWIZerobLz/12akBszz3LKA6MlSSHnH4jKZrO3IS0nB8PwuHGnqRG9/FHmBTJzsb2QLZFnL\nAkWOjkNwxP9gYdHcuDaUJRezxJH0oAe/fGg2ezaDuqocVgZ9XFk6XMHJOID/w9xq3wRHYQwiLqYw\nblk4Emvnl6OxtQefN3YgOzU+ZnEhwXpJbfkWlMWlYLikEiHFKM7Z1Xlxx2xcUYX9R9owZVR23L4r\nAcdxl0VZANbRazwXs5is8Y9Yzhd/ssZCrFRnVmFf6ydx271uEQX+dPy07m9x5wc0iypLz57z25Yx\npuLKr7/L2QV12NW4GwVJuZB66TtU9USFm8ob8Lu3Dphr1INaGLIl1hUbjJ+ZPxUCL2B2QV3C9n9/\n0UhUD09H9XBT6QR8MpSTZfBHCzB+9riE5wF0lvniyYX42WsHMG9CATwZSfiwZR/bb/0NPnvnXEji\nfHuixkXgOA4NZdfHpc5WFKVAVZOx973ZQPT838BAcRTGIOJiCgOgftzCbD8Ks88vCAYjfu/5Pxbr\n8pX/eG9dQrfO6OI0jC5Oi9t+LTCQ38W3wdiMUZiQNQ4dZ2R8atlupJJLotnOjGTTXfS3Ux7Gm4c/\nxFu99L2kJ5vK5KfTfwwOHP5jD11kyBhoT8+bgvTp5XjtnWP44nS3vo/uTPekoa3/LOy5azQetrRk\nwXnbLwo8xpfbLUyfW8LT66cgySslVKC3j74ZRzqOISAnYWyZn8ULVS0NxYFClKdQi4rjOFw/pRA5\nad6v7facN2xm3Dae4yDKvC3V+JvgKIxBhEsWsKF+NBudOZhcyMIw1qj2uMSEyuJaxyV/N0KVPMez\ncvDaTILb/4Eu0+pOYI1a3aLpnjRMzpiKt7APxTl268WwwhItaFVRlIpHi1Jx29+/BcBUJptrNuC9\nU3sxKbsm7pyvQ1YCS92gOnMMqjPN+IVh7Qm8gAcm3GM7Nja2djkQeA6CMLBMxIFwxRXG7t278dRT\nT4EQguXLl2P9+vW2/ZFIBA8//DA+++wzpKSk4Nlnn0Vu7sACfQ7xGKl2DpQlU4vw2RftbC2PRPSF\n9bURLsEFcC0x0Mmg3yY8x+HJ2yfh4JftKMk1lcC9y8fYStwYlOUHsfnGsRienziRYiBC0fDyB10B\nXF983ddq97UGz3PQNHLxAwfIFf1CNE3DE088gZdeegmZmZlYsWIF5s6di9JSU5O+9tprCAaD+NOf\n/oQ33ngD27dvx7PPPnslm+UwhPjejBJ8b0bJBY8xXDaJYjrXMo/eOoEF67+L5Kb7WGkOA2t8IJYL\nuQQvpBR9Hgm9/dG45IahgFsW0NN/eSrVAlc4rfbAgQMoLCxEXl4eJEnC9ddfj127dtmO2bVrFxoa\nGgAACxYswJ49e65kkxwc4lg5uwzXTSjA+htGXfzga4jinACqStMvfuAgIJFLyuAnd03FxJGZmFOT\nf95jBhuP3joBs6vzUDMigxX4HJ5/iZOfEnBFLYyWlhbk5JiLwGdlZeHTTz+1HdPa2orsbJp5IggC\nAoEAOjo6kJx85XP4HRwAGrtYPW/4xQ90+M7i8+iT+hIojtL8ZGyoj1/tcjBTnBNg8Z7RxWnYfONY\nlOR8xxVGbIntgRxDCBlwuQgHBwcHAJhQnonjE7pQNybn4gcPQS5Xht8VVRjZ2dk4dcqcKNbS0oLM\nzMy4Y5qbm5GVlQVVVdHT04Ng8OKa0Fr6YKjj9IWJ0xcmQ60v7lsdXzzTYKj1xZXiisYwxowZgxMn\nTuDkyZOIRCLYuXMn5s61z16cPXs2Xn/9dQDAm2++icmTJ1/JJjk4ODg4fE04MhC/0Tdg9+7d+MlP\nfgJCCFasWIH169fj5z//OcaMGYPZs2cjEongwQcfxKFDh5CcnIwdO3YgP3/oBKccHBwcrhWuuMJw\ncHBwcBgcfPdm9Dg4ODg4fCdxFIaDg4ODw4BwFIaDg4ODw4C45hTG7t27sXDhQixYsAAvvPDC1W7O\nFWfbtm2YOnUqli5dyrZ1dnbitttuw4IFC7Bu3Tp0WxYMfvLJJzF//nwsW7YMhw4duhpNviI0Nzfj\nlltuweLFi7F06VL89re/BTA0+yISiWDlypWor6/H0qVL8Ytf0JXWmpqasGrVKixYsACbN2+Goijs\n+E2bNmH+/Pm48cYbbanugwVN09DQ0IC77roLwNDtizlz5uCGG25AfX09VqxYAeAyfyPkGkJVVTJv\n3jzS1NREIpEIueGGG8jRo0evdrOuKB999BE5ePAgWbJkCdv205/+lLzwwguEEEL+6Z/+iWzfvp0Q\nQsjbb79N7rjjDkIIIfv37ycrV6789ht8hWhtbSUHDx4khBDS09ND5s+fT44ePTok+4IQQvr6+ggh\nhCiKQlauXEn2799P7rvvPvLGG28QQgh57LHHyD//8z8TQgh5+eWXyeOPP04IIWTnzp3k/vvvvypt\nvpL8+te/Jlu2bCF33nknIYQM2b6YM2cO6ejosG27nN/INWVhDKQ21WBjwoQJCATsJZ2t9bcaGhpY\nH+zatQv19XSd6LFjx6K7uxttbW3fboOvEBkZGaioqAAA+Hw+lJaWoqWlZUj2BQB4PLQ+UCQSgaIo\n4DgOe/fuxYIFdD2HhoYG/Pd//zeAwV+vrbm5Ge+88w5WrjTXff/ggw+GZF8QQqBp9hUNL+c3ck0p\njES1qVpbW69ii64O7e3tSE+nReUyMjLQ3t4OwF6XC6D909LSclXaeCVpamrC4cOHMXbsWJw9e3ZI\n9oWmaaivr8e0adMwbdo0FBQUIBAIgNertmZnZ7PnPV+9tsHCU089hYceeoiVFDp37hyCweCQ7AuO\n47Bu3TosX74cr776KgBc1m/kmloAgDhTRi5Iov4ZbHW5ent7sXHjRmzbtg0+n++8zzfY+4Lnefzx\nj39ET08P7rnnHhw7dizuGON5Y/uCDKJ6bW+//TbS09NRUVGBvXv3AqDPF/vMQ6EvAOCVV15hSuG2\n225DcXHxZf1GrimFMZDaVEOBtLQ0tLW1IT09HWfOnEFqaioAOkJobm5mxzU3Nw+q/lEUBRs3bsSy\nZcswb948AEO3LwySkpIwceJEfPLJJ+jq6oKmaeB53va8Rl9car22a4G//OUveOutt/DOO+8gHA6j\nt7cXTz31FLq7u4dcXwDUggCA1NRUzJs3DwcOHLis38g15ZIaSG2qwUjsSGDOnDn4t3/7NwDA66+/\nzvpg7ty5+OMf/wgA2L9/PwKBADNFBwPbtm1DWVkZbr31VrZtKPZFe3s7y3QJhULYs2cPysrKMGnS\nJLz55psA7H0xZ86cQVuvbfPmzXj77bexa9cu7NixA5MmTcIzzzwzJPuiv78fvb29AIC+vj689957\nGDFixGX9Rq650iCJalMNZrZs2YK9e/eio6MD6enpuPfeezFv3jzcd999OH36NHJzc/Gzn/2MBcb/\n7u/+Du+++y48Hg+efvppjBo1OBYF2rdvH9auXYsRI0aA4zhwHIdNmzahqqoK999//5Dqi88//xxb\nt26FpmnQNA2LFy/Ghg0b0NjYiM2bN6OrqwsVFRXYvn07JEkaMvXaPvzwQ/zqV7/C888/PyT7orGx\nET/84Q/BcRxUVcXSpUuxfv16dHR0XLZv5JpTGA4ODg4OV4dryiXl4ODg4HD1cBSGg4ODg8OAcBSG\ng4ODg8OAcBSGg4ODg8OAcBSGg4ODg8OAcBSGg4ODg8OAcBSGwzXNqlWr0NDQgOuvvx6jRo1CQ0MD\nGhoasG3btku+1u233z6gctePPPII9u/f/3Wae0kcPHgQ//mf/3nF7+PgMFCceRgOg4KTJ09ixYoV\nF6w+apSKuFZ49dVXsWfPHuzYseNqN8XBAcA1VkvKweFS2LNnD7Zv345x48bh4MGDuOeee9De3o6X\nX36ZLaizdetW1NbWAgBmzpyJl156CcXFxVizZg2qq6vx8ccfo7W1FUuWLMH9998PAFizZg3uvvtu\n1NXV4cEHH0RSUhKOHTuGlpYW1NTU4OmnnwZAa/M89NBDOHfuHAoKCqCqKubMmYMbb7zR1s62tjZs\n2bIF586dAwDU1dXh9ttvx3PPPYe+vj40NDRg0qRJ2Lp1Kz7++GPs2LED/f39AICNGzdixowZOHHi\nBNasWYMlS5Zg3759iEQiePzxx1FTU/Ot9LXDEOGbLNbh4PBdoampiUyePNm27f333yeVlZXk008/\nZdusi8scPXqUzJo1i/09Y8YMcvz4cUIIIatXryZbtmwhhBDS1dVFamtrSVNTE9v37rvvEkIIeeCB\nB8jatWtJNBol4XCYLFy4kOzdu5cQQsiGDRvIL3/5S0IIIY2NjaS6upq88sorcW1/8cUXyWOPPcb+\n7urqIoQQ8q//+q9k8+bNtrbX19eTs2fPEkIIaW5uJjNmzCA9PT3kq6++IuXl5WTnzp3s2WfNmkUU\nRRl4Jzo4XATHwnAY1JSUlGD06NHs7y+//BI///nP0draCkEQ0Nraio6ODiQnJ8edu2jRIgCA3+9H\ncXExTpw4gby8vLjjrrvuOogi/ZQqKytx4sQJ1NbWYu/evXjyyScBAPn5+cySiWXcuHH4/e9/j2ee\neQYTJ05EXV1dwuP27duHpqYmrFu3jhWkFAQBjY2N8Hq98Hg8WLx4MQBgypQpEAQBX375JUpLSwfa\nXQ4OF8RRGA6DGp/PZ/t706ZNePzxxzFz5kxomoaqqiqEw+GE57pcLvZ/nuehquolHTfQdRbGjx+P\n119/He+//z7+8Ic/4MUXX8Tvfve7uOMIIRg1ahReeumluH0nTpyI26Zp2qBa68Hh6nPtRAAdHC4C\nGUD+Rk9PD6tO+sorr5xXCVwOamtrWVnpkydP4sMPP0x4XFNTE5KSkrB48WJs3boVf/3rXwHQtS6M\nMuYAUFNTg6NHj+LPf/4z23bgwAH2//7+frzxxhsA6BKlAFBYWHh5H8phSONYGA6DhoGMprdt24b1\n69cjJycHkyZNgt/vT3h+7LXOt+9Cxz366KN4+OGHsXPnTpSUlKCmpsZ2P4M9e/bgt7/9LQRBACEE\nTzzxBABg2rRp+M1vfoP6+npMnjwZW7duxXPPPYft27eju7sb0WgUBQUFeP755wEA6enpOHLkCFau\nXIlIJIIdO3ZAEISL9omDw0Bx0modHK4Q4XAYkiSB53m0tLRg5cqVePnll1FQUHDZ72VkSb333nuX\n/doODgaOheHgcIU4fvw4HnnkERBCoGkaNm3adEWUhYPDt4VjYTg4ODg4DAgn6O3g4ODgMCAcheHg\n4ODgMCAcheHg4ODgMCAcheHg4ODgMCAcheHg4ODgMCAcheHg4ODgMCD+P4xSKOOE0RxSAAAAAElF\nTkSuQmCC\n",
+            "text/plain": [
+              "\u003cmatplotlib.figure.Figure at 0x7f97f1e98d90\u003e"
+            ]
+          },
+          "metadata": {
+            "tags": []
+          },
+          "output_type": "display_data"
+        },
+        {
+          "data": {
+            "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYwAAAEcCAYAAADUX4MJAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzsvXeAHMWZ/v/pNGlnc5S0ymmFUE6WEAgQ2UJkGxtjsMEG\nbDD+YnNwZ3PnH+fD2GcwnDFHMBmcwETLIiMJ5YByzqvd1eY0eTr9/uie7p7dlRACHQ7z/LM73dVV\n1dXd71NvqLcE0zRNcsghhxxyyOFjIH7eHcghhxxyyOHvAznCyCGHHHLI4ZiQI4wccsghhxyOCTnC\nyCGHHHLI4ZiQI4wccsghhxyOCTnCyCGHHHLI4ZiQI4wccviUWL16NXPmzDmmsg899BC33377Ce5R\nDjmcGOQII4dPjTPPPJNx48bR2dmZdfyiiy6ipqaGhoYGAO68805qamrYvHmzU6a2tpaamhrn99VX\nX81LL73k/H7kkUeYO3cukydP5vTTT+e2224DYN68eUyePJnJkydz0kknMX78eCZNmsTkyZN57LHH\nTuTt9glBEE5I2Rxy+FuC/Hl3IId/DFRXV7NgwQKuuuoqAHbt2kUqlcoSjoIgUFRUxAMPPMATTzyR\ndbwvvPLKK7zxxhs888wzVFdX09bWxvvvvw/AX/7yF6fc1VdfzcUXX8xll112Im7tnwa6riNJ0ufd\njRz+hpHTMHL4THDRRRfxyiuvOL9feeUVLrnkkl7lLrnkEnbu3MnatWs/ts4tW7Ywe/ZsqqurASgt\nLeWKK67os+zHJSx46KGHuPXWW7n99tuZPHky8+fP58CBAzz22GPMmjWLM844g+XLlzvlm5ubuemm\nm5gxYwbnnnsuL774onMulUpx5513Mn36dObNm5elMWWu/d73vsfMmTM566yzeO655z72XgG6u7u5\n8cYbmTlzJjNmzODGG2+kqanJOd/V1cW//uu/cuqppzJjxgxuvvlm59y7777LxRdfzJQpUzjnnHNY\nunQpYGl/K1asyBqHjEmsvr6empoaXnrpJc444wyuvfZaAG699VZmz57NtGnTuPrqq9mzZ0/Wvd97\n772ceeaZTJ06lauuuopUKsUNN9zACy+8kHU/8+fP57333jume8/h7wM5wsjhM8GECROIxWLs27cP\nwzB48803mT9/fi9BHggEuPHGG7n//vuPqc5XX32VJ554gi1btmAYxqfq46JFi7jkkktYu3YtY8aM\n4brrrsM0TT788EO+853vcNdddzllb7vtNvr378/SpUt58MEHuf/++1m5ciUAv/71r6mrq+O9997j\niSee4NVXX3WuM02TG2+8kTFjxrB06VKefvppnn32WZYtW/ax/TMMg8suu4zFixfzwQcfEAgEuPvu\nu53zt99+O6lUioULF7J8+XJHwG/atIk777yTO+64g3Xr1vH8888zYMCAI7bTU6Nbu3YtCxcudLS+\nOXPm8M4777B8+XJOOukkfvjDHzpl7733XrZt28Yf//hH1qxZw+23344oilx88cW89tprTrkdO3bQ\n3Nx8zL6dHP4+kCOMHD4zXHTRRbz66qssW7aMYcOGUVFR0We5L33pSxw+fJgPP/zwqPXNnz+fu+66\ni2XLlnH11Vcza9asT+WfmDp1KrNmzUIURc477zw6Ojr49re/jSRJXHDBBTQ0NBCNRjl8+DDr16/n\nhz/8IYqiUFNTwxVXXOEIxDfffJObbrqJ/Px8Kisrufrqq502Nm3aRGdnJzfddBOSJFFdXc0VV1zB\nggULPrZ/RUVFnH322fh8PkKhEDfccIOjiTU3N7N06VLuvvtuwuEwkiQxdepUAF566SUuv/xyZs6c\nCUBFRQVDhw49pjERBIFbbrmFQCCAz+cD4NJLLyUYDKIoCt/97nfZsWMH0WgU0zR5+eWX+fGPf0x5\neTmCIDBx4kQURWHu3LkcPHiQ2tpaAF577TUuuOACZDln9f5HQu5p5vCZYf78+Xzta1+jrq6Oiy66\n6IjlfD4f3/nOd3jwwQe57777jlrnvHnzmDdvHrqu8+677/KDH/yAsWPHcsopp3zi/pWWljr/BwIB\niouLndl2IBDANE1isRgtLS0UFhYSDAad8v3792fr1q2AJbyrqqqyzmXQ0NBAU1MT06dPByyNwzAM\npk2b9rH9SyaT3HPPPSxdupTu7m5M0yQej2OaJo2NjRQWFhIOh3td19jY+Klm8t57MQyD+++/n7fe\neouOjg4EQUAQBDo6Okin06TTaQYOHNirDp/Px/nnn8/rr7/Od7/7XRYsWMCvf/3r4+5TDn+byGkY\nOXxm6N+/PwMGDGDJkiWcc845Ry176aWXEolEeOedd46pbkmSOPfccxk9ejS7d+/+LLp7RFRUVNDV\n1UU8HneOHT582NGYysvLOXz4sHMuEwUG0K9fP6qrq1m9ejWrV69mzZo1rFu3jkceeeRj233yySc5\ncOAAL730EmvXrnV8AqZp0q9fP7q6uohGo72uq6qq4tChQ33WGQqFSCaTzu+WlpZeZbwmqjfeeIMP\nPviAZ555hrVr1/L+++87ZsXi4mL8fr+jRfTExRdfzOuvv86KFSsIBoNMmDDhY+85h78v5Agjh88U\n99xzD8888wyBQOCo5SRJ4uabb+bxxx8/YplXXnmFxYsXE4vFME2TxYsXs3fvXsaPH/9ZdzsLVVVV\nTJo0ifvvv590Os2OHTt46aWXmD9/PgDnn38+jz76KN3d3TQ2NvL88887144fP55wOMzjjz9OKpVC\n13V2797dyzHeF2KxGIFAgHA4TGdnZ9YMvby8nNNOO42f/OQndHd3o2maY666/PLLefnll1m5ciWm\nadLU1MS+ffsAqKmpYcGCBWiaxubNm3nrrbey2uzpY4rFYvh8PgoKCojH49x3330OoQiCwKWXXsq9\n995Lc3MzhmGwYcMGVFUFYOLEiQiCwL333ntUDTOHv1/kCCOHTw3vDHXgwIGMHTu2z3M9MW/ePCoq\nKnqF3mYQDod55JFHOPPMM5k2bRr33XcfP/nJT5g8efIR2/808NZz3333UVdXx6mnnsr3vvc9br31\nVsdHcPPNN9O/f3/mzp3L9ddfz8UXX+xcJ4oijzzyCDt27GDu3LnMmjWLu+66q0/NoCeuueYaEokE\nM2bM4Morr+xlZvrFL36BLMucf/75nHLKKTz77LOARVL33HMP99xzD1OmTOHrX/+6owHdeuut1NbW\nMn36dH7zm99w4YUXHvGewdIS+vXrx2mnnca8efOYNGlS1vk77riDUaNGcfnllzNjxgzuu+++LNK5\n+OKL2b17t0OuOfxjQTiRGyj927/9G4sWLaK0tJQ33nijzzI//elPWbJkCcFgkHvvvZcxY8acqO7k\nkEMOJxivvvoqL774Yq8Q2xz+MXBCNYxLL700a4FWTyxevJja2lrefvtt7r77bv7jP/7jRHYnhxxy\nOIFIJBL8/ve/58tf/vLn3ZUcThBOKGFMnTqVgoKCI55/7733HHV+woQJRCIRWltbT2SXcsghhxOA\npUuXMmvWLMrLy5k3b97n3Z0cThA+17DanuGJlZWVNDU1UVZW9jn2KocccvikmD17NuvXr/+8u5HD\nCcbn6vTuy32SS8yWQw455PC3ic9Vw6isrKSxsdH53djYeMTVwV6Yppkjlj6w+1AHtz2whLOmDeLW\nKycdsdyFP7BWLL/+y/m5cQSu+veFdMfSnDdzCN+9vO+1A5kxe+JHZ1NREvq/7N5x48ofLSAYUHjq\nrqOvifm/RmYsM3juJ+dRlO//VHXe98I6Fn1UR2VJiN/+6Ow+2/vpjbOYMLL8U7Xz94LMPT/6r3N5\n8vWtrNrayOCqfB66/cxPVe8JJ4yjBWHNnTuXF154gQsuuIANGzZQUFBwTOYoQRBoaYl8lt38u0V5\neb4zFoebrL+xeOqYxudwYzeK/I8TWe0di08Cv2KNQWdX4mOv37anBWFoyXH17/8S5eX5pFQD01T/\n5r+Vg3UdqGV5n6qOZMpaC2IYxhHvt+sYnu8/Glpbo6TTGgC6/ulyscEJJowf/OAHrFq1is7OTk4/\n/XRuueUWVFVFEAS+/OUvM2fOHBYvXszZZ59NMBjkZz/72Ynszj88kpkXwzi2SGlVM/6hCON4EfBZ\nn0EipX1s2cb2OGP/DgjDNE003cAwzL95jTyaUP9vGjpxKwj+ZqHrn+09n1DC+Lg8QQD//u//fiK7\n8HeFV5bsQ5FF5s0aclzXJ9M6AFqPl0TTDR59fSunnNyPiSNdDU7tMeMwTJPH39jG5FHlTKv5eNPg\nkbB6exMbdrdy/YUnIX4Ggmr7wQ4+3NjANy4Y84kJ7u3Vtai6wRdnDgFg4cqDxFMal80Z7pQJ+qw9\nIHYe6uRXf9rI9fPGkB/y9VlfY1u8z+PHCtM0eXrhDkZWFzF7fL9jvm7jnlbW7mzmG+ePQRSPPqYv\nLdpLSZGVB8swTVTNwKdIJFIaD7+yme64ypTR5Zw7bRCPvLaFc6cPomZwcVYde+q7+MvyA5w7fRDv\nravj+nljHGI9XpimydKGlQihbsy4Gz0ZswnDNE1eeGcXowYWMX1MZda1ndEUzyzcwVfPHkV5UZBn\n39zBkH4FnDahP0fC+l0trNja2Ov42wc/YEndCn404zaCspWRIJnWeGLBdubNHMLgqnyn7CtL9iHL\nIudOG8hv/7KNc6YPQhDg7dWH+OYXxyAKAk/+dTtnTBrAqIFFR+zLB+vr2X6gHd0wue6LJxHwSTy5\ncAszavpRVhTkpUV7+cYFYwgHlaOOoWGaPPfWTsYMLu41Ru3dSZ57aydfO2e0c+wXv19Pfsiq87Og\njlzywb8hvLH8AMBxE0bKJoyequfO2k7W7Wxh3c4WnrzTtWFqWna5uuYoq7Y1sWpbE9PuPH5b5yOv\nWUn65s8eStVnYO//799b0TcnDyth1snHLmQB/vC+tZdDhjBeXLQXIIswAn7rM0imdTbva2PBioNc\nOXdkVj2CYE1Qu2Kp47qHDJJpnQ83HebDTYc/EWE8+NImAM6ZNoiBFb0TEHrx15UHs9tUdXyKxKHm\nKFsPdADQHUtTkh9g4942Nu5ty3ovAH7+wkfohsmmvW0ArNhSzBmTq4+5v31hS9t2/rDzFQKjAyTW\nn+4cz2gY0YTK+x/V8/5H9VnCcEf7bl5eu5Y9eytpj2zmrmumsmhDA2xoOCJh7O7Yy0MLt2AmXOGv\n2xrGa3sXAlAfPcyIIiur73vr6li3s4Wt+9t5+DZ3hX3mmxxVXcjanS0E/DLLNh3GBEYMKCQUkFm9\nby8fRRfxmytvQBazReob+94iko7y7lsuIb+5+iCD+iusDz7HmlWDKY9PpaE1RuHivXz9vBqOhuaO\nBIs3NLB4Q0MvwvjtX7axo7YT5QN3/5JoQiWaUBH8cUw+/beYs0f8A8HRMHqYpOJHMLX01DAy139W\n+KyTCLR19y2sNUPrs62jte89F/Rnf+R9jYMsiUc890lgfMoxMY7R3OhFMq2zs30Pf2n4MwhW/6MJ\nFekomkrGrCnkdYKkEgpkz3y70xHePvgBunHs47Hy8DrrHymddTyaUDFMgx1te+hrHvzrDY9TL68H\nOUVje5y0mmnT4L9W3c/Le/6SVd4wDR5Y/yiBccuy6lM1g7Tumr/SejrrnFjYgjFkFRtatvTqQyRu\nXdfUHndqTKQ1UqqO/+RlyFUH2dDcO1/YmwfeY1nDqqx+mCbsi1i5vuSqg6iabtenO/3f3bG3z/e3\nuaNvDVc3dGqL30Cu3oUiZYt1sbCFwIQlJPx1fV77SZAjjL8RaJ+BQyp5BOdW0kMY3pdQ7aFhpNTP\nmjA+m3qCfstk1BntTRgN0UZuXfRvvFu7uNe5tOreX8/xTXvuvafZrOe4eMt8WsI4VptyR7KTrW07\nevUn84w/CZIpjf/Z8Bh7YzsR861913XDxMRECHXjG7GepfUre10nFjcSGLsSZeBOeloWf7n2N7y2\ndyEfNW/qdd2KhjV9Po+D3XZG3VS2hhRNqLy463We3v00UqmV+TcSt4R5d9p1Uguyagl9ezzEgg4a\nYo28V7skS1uOqwn3HvLbnf9VzeBQpN75HVOzha/cfy9SUSsv7sqO4gKIJlUEX5zD7VGHaFXNQNMM\nBNF+pj0GyUtOSO5zkyWRjnRH1m9w39E/7XqNB9Y/yrrmjb364TWJxpJu/S2JNkx/DKX/Pgrzss2p\nUqG1GDoldvWq75MiRxh/IzheQaRqOnsbupw6hGCEpsKlJDUrpfXuuk7W7Gy2CitJNh5yzRVb9reh\n6Qa76zqtvReSfQuj7liaw22xY+pPbVME5DRCsBvdMDnUHKUj0lvQ17VE+WhXizO7OhqKwn7kfnvZ\nl9rE4bYYLZ2uQFh52MrY+vq+N3td53Wm9iTDlGe89R47+Xn71BlN0dQedz7mhJo9O/44dMfTNLS6\nY9fXxKC2KdKLDH+8/B4e3vgknamurPtNqTrN8RY6U9kf//aWvexv7OxTgzkcdVOaC7Lb/2hCQ648\niFTSxO93vuwcb+uy3h2l2kojLxa0O+S7pXU7d3z4/9GWtASxZljvzKHmKA2tMZYcWM/zO17klT0L\n2LinlbSqs35XC3/e9i4dKYusDDF7DOtaoyypt7bHFQKWQFy5rQnTNNnZ7ppXBMW6LqNhSCWufyLz\n/bR0Jlm5y33HMwQJ1thn+g0QVd3nEjHakeyykXSUVdsbWbuj2Tm/uHYVgYlLSBbudrSvnYc62RV3\ntZGE5qaRB9hS56ac9457Q2uMpqSVHNLURYcwDhyO0NAa48N6a0vdPa3ZGoGmG/z10EL8J60ATJra\nrfdibeP6LHI50Nht/aOk8I34CKnEakslu3/Hg5wP428ExzNzBPjv59exYvNhbr9yIsm0hlxeRzJY\nx57O/QwMDONnz3/klA1OWsTjexYB5wHw4gd7WbDccgJ/4/yarFl3Bg3RRn76wROk9kzgkVvOR5Gl\nI/ZFNwx+8tQafKM2IxW1cChaw2//ZM0Ye9rI//2J1QB8/bzRnD7xyNuJAsgSyAP20KqH+NHjpVn1\ntdtCqMTf2+HoJYxkSifPY1ZJpjUK7JmYN6pMHrCLneHFrG5UmV41mcde30pDWxzdMJEqD9A+eAeH\nIoMZmJ9tO09oSVJ6iiJ/YdbxH/5mGZpu8ugPT0eRxSzCeLd2MZXBCn711OGse1IN913oSnXT2elu\n5BRPqfxy3aP0z6vi+5NvBGB72y4e2vxbzHSA6092d//LoDHu7gsu+JKOP+ZQpA653J1xx9UEISXI\n7f+7HDAQApZANVNB0jaJvrxnQZagTWgJuqIp7n56Dbphogzajmwnb3jwlbV8oWYgK3cdJDhpkdsH\nOTsqavPBRoKZva1MS3j+/t3dHPQvYX2LZ5ZtC92uWBowkIotwhBMKWvC9YclW/DXZNpyiVjVDKIe\noo157mOT9h7YCoJu6jz6149AzaToN2kOWxqYWNgKh4cBsC+6C7/hrm6PezSWw20xHv7ravwZ/7Os\ngt2VNTua8U9oQrSXnkiS1XBbd5IfP7GcoL3X1vvrGri8xnAIZdX2w6jFe61ZvpKmuSNOVbnCU9t+\nnzWeO2o7AAGl/16kEpf0eo778SCnYXzO2Nd1gOe2/4l46thnrpqh8dy2P7Gv6wArNlvCpqUraWkY\n9kee0JJEssIVvTNPj/qe0kDUeLnlCbZE1/Zq65FNTyHkdSH320c0cXRSy3y0UpE1o13ZvKJXmc5U\nF49vfg4hYKX7jnn6uKN9Ny9sf7GXXTxhRhBEE1Po3X5H0iKM4kBvwuiMuTPzbEI2Wd+yyTGneM1E\nctVBNCnBxpatmKbJwaYI3bE0CAa+wZaJ6EB3tlO5NdHGv3z4E/5r1f29+q77u5BKGhytRbXbEnwJ\nXtmzgEc2P9Wr33s79zv/d6W6s3xQjYkmYmqcxpgrCFoSrXadSZ7Y9Thi2DV3AFnaiOBLUhS2JNVH\nvJJVzjv7FgJxx8IiyKqjYfQPV2VdE1XjtEdSrs/D7wpNQVGpa445moFzXNJBcN9B7+wbyX0fssgC\nMIwonQdWEImriMXNCIodXSXoJNLudVnt2f/Xr36SSDRKV6o7q+8ZJOjEiBWgHh5i1eFzZ+OZb6on\npEIrIEA9NArINnF1x9IIfvf9y+qToCP4rHOCZCDJ7jsj5nVnXdNqa3sHuw+xtMPdbEzwWd93T7Oa\n1THrfTH17MndsEFH36PmWJAjjM8Z9617mJWH17KtfYdz7OOcxR81b2Jl41ruW/ewcywvoPQgjESW\nDd/7ASBlCzUx3ElajLBLX96rrbakJXxM1Z8l3PtCMmU77aJWyGRnuqNXmUc2PsWGls3I/Syh6J3d\n/3rD4yw/vIZaj50ZIClYH5Ep9m6/w+6fX8peKdyZ6uKJgw+gDLYitrwzUKmsntfrXubJLVYKbs0x\nSZkgWuW60910x9Ik7HvyCkJnKmpjY8tWDNMgriWIqO6+F2lVJ3DycnwjNtEcs4SLrhuI4Q4CE10b\nf0Z4uPW5Zo7udMTtu6RSn7DGLaJGHRu5LGY7pMViV6MQi5pZ3f2+p62ko1n1RGui3Xaqm/hGeyYP\nctrRMCQhW2TE1FiWJpcxKYFFBLGkmkUIercdLeSx6XuFqSBpDKrsOwrM0KN0HlxBJJ7GN3gbpiFg\nJKwFfwnNHXdBSXn+t+oeMP2byEowizAyGoZmaJiCjqkpmGlLqGa+F8GXIDB+qVufl0jsdvRua11O\nTHPvfWf3dnxDtmeNhXeMvO4OFY/PJeya0JDTNLZbdf732oeoVbdl9SMaV4lrvQnDuX+jhzVA+vQa\nRs4k9TeCtMdubpgm0lHWL/S0lYJlDoqnUwglCadMWnDr9M6SBEnD1F0hk0UmR4IufewCq2TGTyBb\nwiClZ9vldUPnUNQyUZmqJeD7cgKrRnY7qhSxZjaiYc1MbbOFZmh02U5Rb8QLQF2kAR0NufIQWtNg\nkmndIWKxwBLeJiYJLen2QUk5H3JXqtv5WCF7jBJq9nh5TRGdqS7HLNXs8T3s7NjDkNIqVN1ALGrO\nul4saEdvtcxyHzVvYkm9q5l1pSOItnYUnPIe2zy32ZnqpCJUTlLLJhzRQ27+UR9lnUNJUZTn46BH\ny0zvOxnfsC00x1qJhVWEvG5Ev0cwejSMuN3WndO+z71rHiCmxok6zl0zezYuq0SjKkKBdX6wOpO9\nyUNQ0IEgq5iaDyGvC/+Y1VltDanKp7ap94ZTzWtXoMbbePBn38McahKuGknzijUopSbJg1sYdOoP\nqV/zDIZQj2molM0cSPFJFqHse+9nbJ/4bbbVr2bfcxvJG1TE/ob1bB24ih/ffbfVgC5jpizzX6xp\nOy2rX8IUUygb0wy+fCxSyI9pxmnc+CeSnXWIqxNUnjEIfypE9+42/vT4U7znf5WioiLU+WEaP9iP\n5JMoP2UQgpzmwOL7GTD9GyhVm9jx65WEhxQTr+vGd9ZYmjavJdl5COQIReNLqDpjKIKSYv2GjTx6\n3wvsbtmHKIsMu2Yi+5/fSMXkcqLJycRUld2/XUf1haMJ2kQrKGnMZLYJSjDFLBPc8SJHGJ8Cj72+\nleqKMBd8YfDHll27o5n31tXx/740AZ/S2w+wfHMTYAmaX/xuPd+/YgKvr9iNIMCX5libSv158V40\n3aB0RG/BrWoGcbPLEXhr9zTQb6ibF0kIel4WMVvD8M4KAXx2qoy31rqmEUSjF2F8uLGBrQfa+fb8\nsTzxl+2I9uRTsGePqulKt39/YhVCqAsyoeO2SSKjYUTSroCIqjF+984u8kMKF8wcjOGLuqqwpIHm\n44/v70aVXVNLysgmjPakO1MT/AmSac1pSwy5kTc/enoRna3WjNtLCm2JLnYeyq7D6V86gW4YPPDi\nJmaMqaRRd/vRmewCe01aoydQYE/3Ps5ltkVOerZGkJkRNnXEeXbVe5AHp/afyYcNK+hOdRNIZZtw\nMuhIdlERKidhE7PW2h9faTOGp6+mJiPYBB4S84kF4hT4fM4MX2+vwIhZ791bm7YxavoUZzast1eC\npCEVtpHUUqRUnX2NbQiKyLLVUZIb57DCFFlt2DNpSSO1cY6nbQXDMKCznOSGOew2/OjmaLSG4Zj2\n7NdMB0husK6RShoRC1sZXFUAG9090zOonD2BZFOCC274Aet5lc51PhLNbQz68jTMhjkYUeh/6lxC\n47dgqDq7H11HwUjL1yQIAjvTFjGl2xMMu2ISgeIyzPc6efbFV2EwmLqMmbSEbv6IPIK+m5FKGoh0\nvUnj2+30O30cTWtWIPoGMmTObfgnLEZPpkltTlP3wQ5GXDeFSmEW3z3lbH665efW/dn3KfhS9t8E\nUkE7qbYEA848jeoL06jpA5TVnIevsgNl2Eb2PrmZxKgkgao2nv3lvdzz01/wh8hLpFIJ9JbhlExp\noWvPFnYWnsaqvRswdYNgZRhTlxAkHd+odSTXneU8Y/XQSIoGtmWZ4I4XOcI4TpimycptTbCt6ZgI\n4+FXLTPDtgMdWautM2iLxsgQxu66LhZtqGeJ8RSmIfIl7sU0TRassGznlw/pHVmk6gYJwRVcta0d\nxPpZAr60wE+XR033mgPAQLRnhULm5bZZ509LNxPIbJ8t6r0I46mFlhlt3qwh2StqbdVX8xBGXUsM\nqbKOjDFEsEkrE6FUH3UFRDQd4911ll3+zMnV2dqRqGHi463Vh5BKDuMbYR1P6T0JwzWHCZJGMq1b\nhCEYCEGXnLrT3UAZYKIM3OVWIBi8unwXYAn3LA0jneJwa5yt+9vZur+dorFNYKdC6vD4Cxq6XZ9A\nl+2c13TDGZ9Qqpq4v85x5m7Z105STyIBs8pO48OGFXSlIwiq3uOZ2fdo15mJiNOaBpNXHEf3R7F8\nVgJmOoAgW/dbKJYTV/bhl9IOqZu6jJkIY2oKCV8zb66qdWamelcpYoF1D3E1wZZ97cTVJAISopKZ\nHRhoug6IzkTE1BS7jky4qfXXMHCseYKoW85eUUQ3PSHO4U5KigVkScA0rUhVpWkcauVmRJsIu9Jd\n4ANT9REor8BXFCDVbOeSalzHzqUbQVNIR1Ko3d3IFbVWqpRoAUIgSaikADlcjq5ECBcNYMee/UiD\nsTSMZAgVe2coAAAgAElEQVQjGUIz66lb9Ti61g5SAtlfweDScvbu66BqwjzARPSlkM0COjtqCQ8p\nwlcUoN3YwMrdE62bMUFrGoSpg1RxyHr37O/QVxBEjp2BEVuBmNdN5PA6utd/CIKK2mmS6ugCOQ+l\nSCQiFqGhQrICvWUoRSfvo+n9j6gfGKH90DpKJvWjguHUbqsiMG4ZgqRbwQ32M9ZaqvEPitOuu+/j\n8SLnwzhO9Ey/cawwj7RAX8wWCM2dthAXrY/Jigyx0BrrnUBN1QxU0bXPIqnUtViC4sJThhIMecxT\nGR+GqBOY/D5SiW3zNi3C0DQDwzSz7bWSSxh7Ovez6NAypLI6lOEbaenwmEQE3YlL1/EIDUCyTUHW\nj8yqdOt8c9wN/Yx6/AD3b3jIcS5a11njJPgS+Ea4TtGeJikvYZAhDN1AGbTD0sJMwa7Hnvn540i2\ncDRVW+OwP24hrwtlwF6nuriazHqKpmchmtdG3p5w+xDXrdmdphvOhyx1DrTqtwV0bVPENhdKiIYP\nRZTpSnWRTGsIHvtzRdCacOzptBZ/ZQgDTYZ0yNIoMuYIu63/N/kmCgQr3Uta6XDMhugKgyrzMbqL\nEf0JyyeUaUtXQLPGIqHHiSbSlilJV/jSmSMITFxMYOJifKOsSKHAycvwjV6LGIoQmLgYuf9+EHQC\n45ZaPht/FLGgzbnu9usHMe2sJue3MmgngqTzSsPvqShREATQO8vpPjgAvbMcMa8bQdSJaDYpaz5E\nyeqff9RHjCnvJNVcx8hvT2XIWTcQrCjC0HR8Q2zbv01oAbEAIxFGEE1SZoqUaj3nYZWlgIDRVUb9\nwu0U10xi+BWXUX3haEzN4LSRYyw3V7iLqgofCAZjq/sxwJM4URAN3k09ab0XyUICYhCtfgSCaODz\nPBdBCFptRYtIdyTorHuHEdePY+T1c8grr0HrtFaoC5LGa13/a9Wn+UD1kxcIkz+8hGjzRiJ12yke\nV4nZWo2ZyEdtsFauC/6EO8nQFQrkEnxS376rT4IcYRwn+lrcdSS0JToITPwAsbAl67jXuS30cEQf\naM+2cze1xwETqayO2m7LKWwa7uPTdANN8URYSBqHbDuwTxadWSzgkJPgSzjmCsg4lU10w6S1K5nt\n2/BoGL/66H95cfdr+IZtQS49TG17b4FuNYBr/pJTiIWtGCnbqZjRMOwxaEm4dURSrkbRmMg2TWSE\nrVdLKPIX9iKMTMy/dY1KMq2hGabjP0jvH2t1K2Bf57H3aq22GcMmE9leTJZBQku6z05Oo/pcYsi0\na5om27QPneMZ56Smm05biYjlx8kQxsHGiDV+ukxKMygJFNOe7LSc3vZzUvQwd07/PmXBUtY1bSCu\nJhyflqnL6HEr/YMYtLQMQUkR0ssYUTSUAsEimqjQ7BCQqcvkh3yOWapN2YNUbI2RqfkcX1Pc7Ka2\nvc0aE122khp2WvZFQUkC1n2Zmg9Ts7UyOY3cf58zjqbmw+ioRK21Yk3rY41Zi+wyaEk24Q+a9jWW\nEURrGYDokzH0JHHDevam6sfUPUYSqRMpKCPKIsm2CPGG7KALQdIsTSqlOSlDkkYcVbfGNuy3828l\n8zBSOr5CGUFWad/QCKbA2LIa8keU0LlvFSm/pVH7NR9SuJrI7iRp22el2d+JEixGSDRhJMLEGyJE\nOzsQ82yys18fMxVET+mIPgnRL5FuDBJr2YnePJBgXn/UaJp4g/Vd63ER0zQZVjCUkmkVtGx/hdCA\nAqSgQn2T6tQHOBqGaYhgikzNP527Z97Za6w/KXImqeNEz7QaALXddeyvTzBr5HAUWcQwTdbvamWX\nsRTBl8I3YgO6foZTPsuM4o0aCUY4rLaR8XRs3NtMR0RFzG/HN2wLhzPWJU8UxKY9bZgFUTAES1BL\nGrV1liaiyFLWLNgRut64bDUAStIS8IZMXXO0F2HEEmqfC+0aOj3CWe5hOpE0MGTE/A4E0URrHYA4\nYK9DJJpmsGxLA+vbXHNQdyoKFPRpt3c0DJsAvzr6Mt7Zv4yo1sq2/W3UHe6iJXWY/V3uoilkjb31\n3fQvy0OQVYxYAUbMcjTkF+qk6tyxUA+NtGZyeKJNMuTWVerY8zNrVvwnrcAUVcsMI2mOZlPb1kZM\nsEjQSAVI+ZNEEylWbWtyxj/SKRMwcQiktjlKoFrDTPtZta2J4tJimuIttGh1jqCRuwdxoD7G2KKx\nLD68hHV1u0lmggt0mURHPkopSOWHEIuaEUSTZERm8YZ65GQZpilRl96NMtg1H+UXKZjddhK+4u3O\ne2dqCoYdAXQgtpe60IcgWOGaq3c0kd49Cd+kd0FWEfwJS3NTfZjpgKUlFbZhJj35izQFENC7SlGA\nDc2b2W1rST1hZhb3ZQhB9SGHFEJVFax78vcU1BTiZwiioZA+WINv8A4aBx4kujRG08OrkcVWAkWD\n7f6bCIL1jI2khKabTnRVVypCLKVRSMhJRGimglSeMYT6hS+jbA4QGlhAyhQp8hcy8NRx7PvrWjb+\n+SEQBUZdWU1Mq6by5MvZ/7s/ASZyno/h10wkXF1Dqm45+xc8R94IEX9pyF33YgduGKkgwUFhgv3C\n7HxoNUqgjGDxUECkIDKRwZcdpv4v2zE0A8HMY8DkGkYVj2Br/21IAZGSSVY+MkO1EwymLcKQ++2z\nfJP2+IV8PvJ9R89BdizIEcZxomfiPt3Q+fna/wFg+66vc+NFJ7N002GeXriD/hNbwQcYEgnPegBv\nDLXgIQwrB46LB1/5iEElJb3CYREM8kM+IvE0O+vbCFRGMOMFVtiepNFt57/xKyKm5PVhZMI0M06x\nUYihbqTSRkfAb2/Zh1LtWWVraxjrdmVrSQDN3RHIiJketnZB0jBVV8CbSVt9t4Xwxr1trGhfhNLf\nDQWNZKI5PGYYPVKElN/pjpMtZNvadRpbU0gFGnc89CEgIA/cgdLPQK0bgVK9B0HS2LCnlQ17mwlO\n0zE0BdNelCVlTFKOTV9xZtWCkrQCADIhlgemwIS3SeopVDsiTAzY8fSyiqiFaEtYZq3/+v1ylLGg\nNVdbZORP8r2H3wfVj2+MhmkIFuHrisfcZIKkYep5vLu2Dt+QBFIFNBa/j8+ORu3qMvn579ZTPcKA\nEvjj7j9jyvYs3RTRuotQALnM1czSCZln3twJgDK8nEhpI2JGjusy4aAbTpoFTcFI52OqCnKFu+pY\nDEb57RvbMUxLQImhCIEJS6wuaAoYMnpbP+sa2+8gdw90TJ5mMoxihhyyEBD4cuVNPL/5dacdof9u\n0HA0iAyJV593CmJBO4KskvxoNGd8fQ5rDlkmp6SQoPqrJxEQQ3SsPM1qt3wHcIARX/4acngHkhxm\nyJzbMFWLgMMnjySUCgHbCSlBQMNMhigcX05hjbvZ0ujpVwKQ5ytm0KUnOcdPH3cm+Wsklqo6pefN\n6hGZlMcPf/Rz/ufljwhOfTdraIecfROyJqLb79qgS6w6UzunYHRZ7QbS+YxWvkno238EQG0Yilbn\n46Sykfxpg/Xe5o8occcd8JsWKYghWwuzAz0CviMvuP0kyJmkjhM9NYxme/EUwOrtlkq/z07Z0RW3\nPhrTELPWA8Q0T+RSH07NDARRo7Y548j0HJd0CsLWiyLmdSGIJka0yI6McV9cRRYxPITRU8MwNcX5\nMDPn6hPZi9MyUVKxPhbvtXt8KkLPWO8+2hJN2THBdcfSjo9CShTjl3xE7Ygp12nXH1oH91lfdwQw\n7HmPTUKZ64yovZgv0yfJ7QOaYglYKZFdRpO5cJplMhF8KQZV5CMoKUxdol9xIaYukVCTpDSjlwYk\nqnl0pSOkdRVDsAlS87k+EZs0BVm1Z36C7SBOO/0XBNOZFRqpIL1gR1jV19rCV/aadATQ/Jjp7DUp\nGQIEMDqzd5zLmKRMtTdhmLZGYESzU5+bhuSkIDHV7Igv2bTqccYea11Ov9gpngpERhinuj8xmTVm\nEN+ddhWTS6xlzo3avqz7dbQ+XxLBH8eI53PV2aO5ft4Yrj9nalYf8hU3Q22GCH1DtyJIumvCsutD\ndgMA8ny2hpHuPe43zLOiP4JiftbxylAFXzt7FP9x7TR8PVwERrSIQRVh9/304AeXTWf2uH4Y8QKM\nuFun939ZEqgqDZHaPg0z7XdCrzd+uJbdj6+l6qxhDM0fglQ3BQyZorCP//z66VntZL6THGF8zujp\nw2iI9g4DzJCDKGUMltmE0Z3yOK+dUNc+nOJStiD0oiDfeoQZk4UeKbY+fiXtCEFJBiTVTQbo+BWs\n8yWhsPsh2W2kNDvssqvUmg3bGkYqbc+OPfDaoTMLpRw7dg+NwNR8Vqih2Nu0JR6YSZG/iC579ud1\nzJaGM05AeywywlfzuStae2hOVeHSrD44JKpbgtBn5qEKcfucq2GcNtZyHKKkGFQVRvAnMdMBqsvD\nln9BT6NqRtaCO61xMELamrZ/cOhDN4eRprjCTk4jD9yBGIw54ZboEoIvbZmQQvYCxcysug/CyNj0\nzXQAI9l3umovQQCOoAHQu0t7FBYI+qS+NQy7jxnzHcDs/jMYFHfNqpnV1hmU5RXafXClp5EMUxTO\nlqaFRjUPnH4PY0tr+GrNZUiiyPjhpVQXZffPebaagmlaa1YEAcx4mJrBxUiiyPQRgykLlCCLMpMq\nxnN6v9Pd6+PZAj4gWfdZErLfJ1l1vpOwL5R131747b1A8noQRnmwFJ8iMbgqH9MT7ZXeOx40n5OO\nRu8qRcKtt6a6wtJeDYnUllMY4B9ivZeeZyeJApUlIYxIKckNZzghv+ef/0V+/L8/Y9bsU7l50nVU\nYKXqLykIUBLOo+DwaaS2zbBuJWb199PuZZJBziR1nPDmBDJNk4Zo741aMoudRMkua4hZSe/aeoR+\nWoV7C1JB0qwgyZ4mKSAUykT72I7PVBCjuwQpvwMxvx2js5LX619yzgmBBMqAvWjNAx1tYES/MtbU\ntme1ldRS4AO1biT+0WudKKmkpiKIJnpnGXpbf3zDN3kcxiZyv/2YJugd5cjlDS7ZZcpoCromZt2n\noKQQ1TxSSYHSQDFN8WYQNQTbOW9qClXFhXR67tNLQJkP3Aq59Ttj6RdDCAh9ajkAQTGPLpoAN9QV\nXabAlw+m1a+64GKEtIqp+qgsCUKHhGqmSau6syq3Mj2eA7X9CA6tBaxEiMpAPP2zSF0qbUSusHwr\n7sI46/n5hm51H6pNGHpXH9sVO2s4BFJbZiGGO/HXrHVs8pBNGKmdUzBTHmJRA4wqqGFXtxUSbSbz\nrDDqPoRkpm+GZ0+Jr9RcxsPbNwMtWWOZQUAMuved6U86QFFpNokZpokiynxnwjezjs+pnoUoiAzM\nH8Db+5ew0dGIBNAUh6CMeIEVzAGIgsh/zPwXTNNEEiUi8TTPYq3OHhQeTO0u3VnAWJIXphsYUFrI\nLl1CUNKIpjVpKA4UAJZ14Cv9b+Cpv+5AGbKNEZXucwhIQSezzpSKCUiiO26Z8GC1YRh6mxU4kVnT\nlN45lSvPG41QVks0HUUUxKy8bFcN/xq/fnkzCVxLgCSJR9xPZu6g05g76DT7/oWsv/5UBUY0yuDO\neezYZ1kxPqudNXMaxnHCq2E89PJmlh305sI3eWt1LRv2WGaqhO7amJdsbOAHv1nGDx9exs5GTwqM\noxBG5pwv4LaZSbFQG7TTPkiuQDZsQZMJE21OWmTmnWlKth0YYOzAKlcjsDWE1qgdhaTLljARdeJJ\njZitTZi67NEi3BBOMRTB6C7FiJTY5/oQ1rpkCUw5TSaSRzaDpFSdPMme+fkTjoYRkP2UBSqsuPxQ\nt3WNrWHEYgJkZqGiq2GYhoBf8hOQ/S5ZeUgLICSFARPfiI1O/itTU5BECUH3I/iTHEpbfhwjUoxf\nkZBQMFDpSHXgG24984BZCAjEunrvlmZqiiOwM2QBONpeJmIo65oMKegKqe3TUBuGec65c7zivDyM\n7jLmyNeS2jrTLWObpExDdOzhXnxp6JepqL2MxPrTMZNhR/CqDUPRGgdz57Tvc8fU7znlja5S9O5i\nrj3pq9b9emar6r5xaI2DLU3UhALJ3r5W9RKGv1fK7SPt6xGQA5w9+HRqSkZy3divY6ZDVBRnk5Bp\nWuG2smffB1EQHeHtTTJZXZ6P0VnhaGahgFWmqiRkmQP9CcTCVoRUPpVhV7vpX1COmQ6S3jWF2cXn\nO8eDinsf3zz5qqy+OyHzpquBC4Jgj69AMmUwp3oWXxx2DuDuJW/1J8+a3HggScIxbUCW2aUv8zez\njsqnF4Lmd+r6LJDTMI4TXsLYcOgAgWKPM1jS+OP7vdMyI2pZi98+OlBL5vtyBGsfZqfMMcVnkAZS\n26chV1o+hiitWAI0EyapYCZsf4Q/AZjEtThiogitYTi+/vsxRZ3+VQqqItEFDC4rwUzYaQWCEaev\nAKP6l9AgKhiiRgroiEUhz1LfC0tLaAIKC0UKggaGqNEMmKmQa4bx234a2TaJ6bITIRKc/L6V7E0A\nn2l9GPFuxel7hoiCcoCg7MfszkMq6CAw5V3XBxLVPFFNacyEPV6GjF+WCEgBYpLttLZJZkBxMeWF\n5ZRVRjncsMddh4IrkAv9BXTq1jM1Yvmoh0YhjxbxC3kkpC7WR62QWb2rlCADgE6MVO+Pe9zgKjZu\nVjENwVmfotYPR++01kQU0p90ohgj6AkB9chSI1JKuTIQv5yiRavP0h5GDypi5dYmGpu1LDt55h6O\ntD+SLIkEfYqTjfULY6vYUduBblQwqDLfycT7pTMMJ6tvLDmKaVVDgGx7uJkOotZamQgQNQomlAP1\nWRqGqAcJBrJFzbFsJBXyy5wzbSBD+xVwsDHCYqx3yegu4dSThvQyczntiQLzZg2hrDDA5FHlmILA\nTsqI0UhC7GDK6GmcMq4fi1f5EPyWGfCU6slZRBgOevrr6eqXJp3G3sV7OXf4ab3a/ebYq3h++4uc\nVDwJIRxkUKU1+fnxNVN5fel+5kzMznA8fngZm/a2MbgqH38fPgZZFKksDvGFkyqtRcJHwNfOGYUg\nwFVnj7Lv3zpumCY3XXwym/a0UlHUhz/sOJAjjOOE1+mdlTAMPLmaTKSyekQ79YaXDARfAqmkCcGU\n0ZN+O1bd7B0JBc7MOUMY86aP5C9rTDd1sZy2BbLghiEaohUWKqvopo6oBQCBkeo57PIvJL9fC/u7\nLRNKcSjsONsy2TIzff32vAk8vHk1TVFLW+mIRyAPJg/vx1nDJnL3ync4eVQeqxvfdrprpv0Y3SWY\nhkigooVo/UhL+NtOVG+4rtLvAAB+wRK2sYgCPpswbNJSBL/luI8VIgZjDlmYhkB7JIUpZaKabHVe\n0jB1GZ8iEZQDCJLtRLcjdvoVFfLtOeN4v7Z3csSM9jG8tD/rmi3C0BqHgO5DlkTyKSNBA43GXkxV\nIb1zCsnBlmTuy+cwdXg1GzfWYkRKHOe+1jDcIc1pNRXUh8vYF3P74kSSYZkZ7vnWDDRjCk+/s5kV\nmvuuDanMZ+XWJg4191jImVmUeAT7gSQKWcJRkUW+deHYXuXOmzGoz+sD/iM4UL3OXc//kh7o5XQ1\njmEZkyAIzla5M06qpHHdaHZ27USrH8k3bh5z1GsvPc3Vyu78+jSW7/LxwPpHOGfw6cz4wjgg25x2\n/qjZ+D0+hnDQoyF5CCPPH+Cn59zUZ5tTKicwpXJCr+PV5WG+c8m4XscHVoT5169NOeI9SJKAKAp8\ne/5Y5k6p5r+eW9dnuZKCALdcNt75ndEwTNN6v6bVVByxjU+KnEnqOOENq80IIjFtxzlnVvKWHsY3\nzLPdo3fXrSorT5NsBDET1voAb8RGFjL1KVabhUHLFKE1WpFD/lEfWZu/2AIZBGQziOCP4x+zyuqj\nZs0mFXsmnyELsGbw6ApGKmjnWXKJK98fxC/50AXLfNSVtMgvpAQJyZaAbI67EWJg29AN2TJlKBGU\nYZusML+MIOuR7hogKFpj195qvZK+wdtRBlob+CiCD0US0Vtck1p6/1jSuyfRGU31zjBqL37zySJB\nOWiNn5J0MuTmS5Y5r6iPlOgZQd4vz90vORMlZJgmJZnNHsBe7CaSyKQf13rPeDOOVL3Ds/+y6X52\niizik+2oqGSIacp8x/4N1kxXEAQUScHXY0/monzL1JPZutbnmDhs34PZt1SWJfFTRc18UgeqpId6\nXXM8W9VeUHUuh1/194raOhaMLB7G7JbxTCw52TmWeV/KxGqK/IX4PTnegh5S/LTb6h4vvCY3fx/5\n546ETD6549nO9+PwT0UYa3c0H/POcQBbD7Q75ZfVr+LB9Y+x6NAyHlz/GCnVE29tE0Y6Ypt1MkK/\n15oETyimLTTDTV/AsGeUYjCWlSJEtGc8gqSBnKbbb4UaFoes8hlBKYatqCKvfdtPHoKiWnWCE32h\nkD0LvnnC9c6MxIgUIShppPI6pIJ2ezcw2UkpoAzZSjRtEUbYZxGGX/JxwEM+Vr9sG3pmEZG9JsBU\nA4693IuS5BiqJWvG2NSH5q0IPkvDiJSSPlhDet/J6C0DMboqSKV1d92Eo6VpmJpHwxCsPToE0UBr\nGkSRYtmqi/wFWe3oHRVkhG2VhzAymkNbV5Iqv0tahm3Gc/ercG1AWtMgpGgV+b48u+5K+xpXewCL\nMBz7sikwrGBYFqH0lagyg4BPptJj4y7Jt94Hvd0itUtGfLHP6yRJ+FSE8UmEF1ihtgGlp4bxyYWZ\npBl07l7/8QWPgJdf+hOplKvdavUj0CNFTAudC1imrAwET7boz5IvdP3Yd9b07rkuHmX/9Z5wNYwc\nYRw3uqIpHn51Cz96fNUxlVc1nfv+sMEp/7udf2ZXxx5e3P0auzr20Kl6NqXxxzFNHD+AQxR9PS8x\n21fR1OheJwSijrlletVkTi+4xDouq/g9+xP0K7EEXc9QSO8aiJ6z0YyJQBEUfPb+CacNmMWY0lFO\nkYxQcyN2rBfvi0MtJ51cUYdUbS0AG5jfD0mUmFHVW6XOxPR7yUnvLkatHU1Bno/UzsluWU1mgDrN\nEaz0CAkFCAhhZJto9KYh6K3V2e1lNAwlZa9lsO7Xp9gaBm5Kc729Etk28hb63N3xUtunkd7t9qsq\nz1Lj8+QQX5w5BIAR1YUUhcKOZpcJLvBubas1DsJI5KEeHENB8yn4MpEwqp/k5lmkdkzP6rsiiRT5\n8537KAhlayleQpgwIjtqKuCTGFLlRjBl9pEw4wXcPv7HzB14GtPHWPcxdog7K5dFkZKCQK/6jxVe\nshk+wHoX+9v5lE4aUuwIuglcSGr3RBRJ7mXGOnlYySdu9+knH0GNt1G37H94+GFrkezvfvcc3/rW\n17n22q/y5JOPAZBMJvmXf/k+3/jGV7nmmitZuHAhL730B1pbW7jllhu59VbLpKS39yO9/QsUBQp4\n+unf8q1vXcOBxffTtOnPAEwaWUY61sbzj/yEa6/9KtdddzUNDVagygsvPMM111zJN77xVR599DcA\n3HLLDezcaUWfdXV1csUV8wFYuPAv3HXXndxxx//jtttuIZFIcOut3+G6667mmmu+wtKl1t4oM0+u\npLtuHQcW/4qDSx5g6YLHiMfjXHHFRYSD1viVF4hcccX8oxKP6DFJfdb4p/FhJNVjZ3bg43eX09zw\nN9GfsNIhZGyits9h6thiNtmLuX1qCWmlHUHWkIUApcUSHYaAronki4VowMk1QbbssGZANcUj6U5b\nAk0IRrN24qosCvOf189gb9ce/njQTcAnKCrhoEI0oeLvQRhCOjOzFcj35dOWbGdcmWsHlkQBvbOc\nfMqJ2CGTGfIaXjSEfKmQiN6F6E+iNQxl9BlW7Pe4spOy9m8A+NcvzaIsXMi6Vh8v77MIRj00GjNW\nREF/H60NFWiNg5GrDmJqlvbgmizcmZR6aBR6exWhYWEUj3r+X9+awdqdLbyyxNK4fvSVmfxqxyLL\nxODJwqrIEtjpHjIRY0a8wJnRF/o9C6aS2TP/fnmVfHvcNQzKH0CRv5DZ4/tRWRwimlBRa2vQmgc6\ncfEZk9SVZ47gD+5eRUii4BAdgJnI1mgAZFnk4hEXsHhjA2r9CAKTJX51y2zASo8+sMJN5zBxRBnX\nnl/D03aW4IBP4tLThjFpZBmSKFKc73cWjQZ9fgRB4LovnsQVp49g+ZbDbD1g+UkkSeC8GYMYNbDo\nmKJweiLgk5EH7kAqaSSWH6BqqDWrrTIMXm1bReUpJqYJO9M6yiCVuLiLx3Z/gH+CvTo56OPt6Cre\n7rFf16SKcVw6Yt4R273pplvYu28vj//2BYJ+mTVrVlJXV8vjjz+LaZrcccdtbNy4gc7OdsrKyvnF\nLx6wxiIoMHWqyR//+Ht+/etHKSjIfg6yJHDZZV/m2muvJxJP88tf3M3y5Uu56eJZfLTgl3ztm9cx\ne/YcVFXFMAxWrlzO0qVLePzxZ/H5fEQivZOBWnDf5a1bN/Pss38kHA5jGAY/+9kvCYVCdHV1csMN\n32D27DlMGGDyx9YVjD3rFmKqzIzRhYRCISZPnsKm9av4+Y0z+fD9vzLo9LlI0pG1vIwyciI0jH8a\nwhCOsiFRX/i4zYISzqY1JigppEQxWo/V0uEwELdCJwdXF3KQ1Yj57ZSZZdZCsbi12tcvBtEAjZSb\nUVP2kySIqUvZ2VptDCjLo6L4JA7qk1m2qd6J9Mns+RASLPu8IsrcOP4b/O/WRkBFECxhmNbTjCwe\n7tTnUyQSKZPx+kWYFftY3vZ+VnsFvgIiiS7MtB+haYwznhUhd9Z77uAzKQ4UMaLKmtUWxz0fph1m\nmZlBqw3DEQJx1EOjUEb3bVM3U0HMVAhZErPiyPuV5hFQ3FTNAyvCBHblEVNSrjlQl/ErIrKc0T7S\nVuJDXXFsw7Loef378D9MKHcdwZXFlmDND1p+ogxZgDXmfkVi3PBS/uCJjpMlIYvo+oIii4SUIOoB\nq62AX3JCUHuGogIMKHeJLeCT8CkSowdZ2oNXQGSISpFFSgsDTsglWEQmCAKjBvbhwzkGeLUFr9lE\nsl7OvssAACAASURBVO9VEAQrd1MmlTnZ35/8KUI8JVEg6Lee2+rVq1izZjXf/OZVmKZJIpGkrq6W\n8eMn8pvfPMgjjzzEzJmzOeusU0kkbN9cH2q/KAqsW7ea3/3uOVKpJJFIhFEjRzJx4mS6OtuYPdva\nr0NRrDFcu3Y1X/zihfjspd35+fm96uyJadNmEA5b74xhGDz66ENs2LAeURRobW2ho6OdDRvWcdbc\ns9mWCoGaxh+w3rl58y7id797jtmz5/D22wu4444fH7Utx8T88cP5ifFPQxifFD0JwycqpD07waV0\n2xYqWInNArKPZIYwZBVREBDkTNK6MvoNHsRBczVSSSO0jUJHdXwOQTFIDEgZSQT7W/RLfuv1TuYh\n2NpFdbg/pw1w4+0VSeG2U77FB6/+GZ+S4ltf+CK/XW9F0fQXxnDR5AmUBIooCRQjmNZaDEEQuOak\nK1ENFcUjMH2ySCJl7fw3INhjNTBWfDxYkSU+z4KjYr8rdMaW1jC8aIjzO19xhWrGz+Bsh6r5SO+y\nzFmyLGaFFRrRAsRwt+MjUGSxVyoW78zdp0j4hSBxOe5ESpmqgiKJ+GSP2c7ug1fI5UtFdGtdWX6D\noyEv2HutBVhC0DsuVjtiVj/7Qk9C+Tj/gN/TRk9HcpZQFrPr9fb7k06eevVBkdAO1aAdquE/7zzz\niOUWbajn2Td30q8qn3/56iS+c7+Vb+rWG77gEPCngWmaXH31tcyff0mvc0888TwrVizj0UcfYteu\nzVxxxdVHrEfXNO6//xc8+eTzlJWV8+STj5FOW0EeR2oXeo+hJEnOam/rehfBoGuefeedN+ns7OSp\np15AFC0TUyqVdgg/U3OG/8eNm0Bj48/ZsOEjDMNg6NBhHA0nUsP4p/Fh6H1klz0avPtX17VEScaz\nBUU0kw7DdmIHFZ8zSxWVNGWFAYdUTE2hUClCNoIIgTiabpA0kgiGveLYFmppM+msp/BLfnyymJX+\n4bwhczllwIzenTVk0tu/wJTKCc5MMi/gY0TRUEoC1uwzbM/sg36JkBKksIfDt7TQ9jvIEiXB3qYT\nE3vmbkieaByyVro6foi+ftuJ5zIC0jtTVmSRoEf4pXZOJbVthpOCWpFEZ9V8xmneU9AGpCCC5K6+\nNhNhEARnbMFNV+FdxPSV6utJrj271/0eCflHIAxJElGU7D7JkpBFTn2h5wrcjyvvHfujOa5TPUyw\n4SP0+0Qi0z9ZErOIsCeZHStCoRDxuJuwc8aML7BgweskEta3aM3UO2htbcXv93POOefxla98jW3b\nttnX5xGL9Q56EdEQBCgoKCQej7No0XtO+YqKSj78cBH8/+3deXxU1f038M+9d2Yy2ReyEjBCEAWM\nAsomNMgiQcKSFKIsVm1Q3BGiCNIifUqr/YHlKTwqlmKlVV7Sal36M6htQUULYl0ALaKCYkggC4Ts\nyyz3PH/cmTuZbDMJmSQz+bxfr76aO3MzOXNk7ne+59zzPQCsVisaGxswdqz2d50T6FVV2he6pKRk\nHD+u/a133/1Xi7/jVFNTg+joGMiyjM8++wTFxdpNIddcMxbvvvsv2B03ljTUu9qakTELv/jFz5CZ\nOddjPzm/HIQEdf1/8z6TYXT0rozqJgHjnY8L9LBvLUqFMfkkyqqrAARr+0xD+7Y/69qh2Ft3CCkD\nTZgWNxifWBxbVzrWBBgagmBVamG122GxW2CUItEAINhkhFkxo1GthxxdqxW6C43HgCtM2HcmAWeh\nZQfNL/JOt994hT7xuiLnKrzz8WnccO1At3Puy74Sez76AZlt7A54X3Ya3vjwe/w4fTAMBoGYE8lI\n6+cakokLjcF31d9DrQ9zK2kAuLKv5uWTw4zux4C2O194iBGzr7sUj2w76Og7GUMHRmHssHiYTQom\npfXHZ9+U4e2PtbuvDAYZE0Yk4lRxNW64Vpvwbn6hHRAdjeLSH/RbcZ3lLNwDhpZhNL1gpQ2Kw/Uj\nB8JkkPGP/zQpid6GfpFmTB2djJIL9YgMNeHAl45V9HbV7ds/oF38w4KNuHHcJahtsKG8ugH9Isw4\nXlDh2N/EFfhWLRyJ4wUViPOwwKpp37dW7mHdbdfi0LESDLs0BufPu/YM6cqAMbh/BDLGDsS1l7d/\nf78zAzIoknv208kyFRERkUhLuxq33bYQ48Zdh3vvXY5Tp07h7rt/CkALKOvWbUBh4Wk8/fQWyLIE\ng8GIX/96AwBg7twsPPzwcsTGxmHLlm149JbROPhlMa4dMRBz5mTj1ltvRlJSfwwb5vp3//Of/x9s\n2vQ4duz4PYxGIzZs+A3GjZuAEye+wdKlt8JkMmL8+IlYtuxeLFq0BOvWPYp33nkL11wzps33MWPG\nTKxenYc777wVQ4ZcjpQUrXbZoEGDceutufjt//t/UIUEUTAE98y/1vE7N2LHjmcxffoMj/20cNpl\nMBpkZE1qPxPpjD4TMOwdDBhNh6TCgo1AowrVUacJySfRqDrKYjvmHAySAbPHXIG97wNh4SrGDkvA\nB582aIvpVAVGgwwDTIChEjZo6WqwYkY1AHOQAaHGYJxvOA85SCuJ7RwCmjUyDc99qU1sR5paHytN\nv9p1335yXBhyM1suakrqF4qlmcNbPO4UHR6E22+8Qj/eMOVBt+dzhs7FocNVsBamICjW/QP/iwmr\nUdFYqd+R5KSViwaCJDOcMz4hZgNuv9G9fUaDjBCzAXfPc90jP2RApB4wjI45jFszXGU0mo+qhAe5\nByfn4rembWotw5BlCbdmXI7PvynzKmBIkoRbZriX8zjwZTEaLPYWGYZzTD9nyhC3x785XYHf7NJq\nGzkvnsMujcGwSz3fOdS0nERrQ0uDkiIwKCmixW2YXRkwZEnCzVMv83hecJMMo6mLmcN47LENbsc5\nOQuRk7PQ7bH+/ZMxdux4/TguLhxlZdWYP/9mzJ9/s/74ZQOicNkAbUj1jjvuxh133N3i7w0YMBBb\ntmxr8fiSJbdhyZLb3B675JJL8ac/vaQfO1/vxhtn48YbXZP5kZFRePbZP7b6/mbOzMS/votCeVUj\nJqa51vwcOfI5rr9+GkJDPe9pERUW1O5n/WIwYLShpt41BilLEiRZdZQB1z54+hyGI8MwyAaYFCOC\nDWZUWbS7Jupt9XoZa5NRhkEKgiQJ2GXt22WoY4evIKOCEGOIXozQVurKDgaEuYJBRBsZRncINgQj\nvOpK1Kv1LdYGRAZFtJr9yJKMX123Fke/rcCfoN3RpLRykfNmYri5pkUcASDM6Brisl+Id5UfaSXD\naHUMv5PXMOeF2K4K/XZGp7aGl5rOJ3h67821ty6jPT0xJBXUZsDoMyPhF8dxyfrd7zbho48O4skn\nt/Rse9CXAkaTPbj3HzmDlIRwpCS2/MYuhMAbJ97BB7YPIIeNhloTjeo6K2CyA6pZ2zcZTSe9HUNS\njrUNEaYIVFq0Mc16W4Ne9MxkUGCStAuW3VAHBUBEkHaRM5sUfW2EWh8CUedaHxAb7PrW2XSSuic4\n747pSOXLaHMUgg1NbkFu5SLqaYiitQtM8zH60CYBo+l6ioSQJsX3HHNMVbUtV5o3L/zmrbYmwYG2\nA0bTi3dHq4h6muNoS2cDzcVoOiTV1MVkGH2B89+i84q1YsWqnmtMM30m1NubFK/Z+dZxvPHh962e\n91Hxp/jn6X2QjFZHZVSgqs4CSCqEKusZhr4VqbPOk+NiHmkKR621DjbVhjpbvV6O2qBISIrSAkGQ\no8rpgOh+UGQJ/WND9Q2Y1NpITBntWk0sSzIWXp6Nm4e2vBOkuzk3u4+NbGXvhHa43XrZygWvMxmG\ncyhh2mhtTsOstFzwd/nAKIQYQ/QsbdwQbf5mUFIrmVonr2FtTYK3J7RJIb6Oftt2ZkfhIZ3LGDob\ncDojMtQEk9G1SND576Z5JkbuenP39J0Mo9mQVFVdy2+ZAHDgzMf6zwYDYAdQWdsAqZ+AJBRXcT/F\nimnXDMC732jrAYyKI8NwLAYrrTsHi90CYXUsvpMkJEVG4kgFYIMFI+PSMOuydMxcriA4yIC3/q39\nK8m6ZjRmDnatvgaAHzW5lbYn/XTWMMwcl4Kkfh27JdKt5EJrAcPTraetPJ+SGI7fPTCp1QvnM3np\nsFhVRDjWMeRdcy++vXASI/pdgYUTbK0Oz3T2M9pehtHWIGjTINGZfQqeWpEOo6HjLX4mL73TmVRn\nBAcZsOme6/Ry448vG49Gq/2ib+vtK3qohFW7+mzAaG1hnipUFNWc0Y8jwhSUAaiu14afTIoBjfq2\nmlaMuiwW755wZhiOW9kM2sX0bK1294yzrpIkAcFG1wRs7ojF2i2pjv8Cd111Gw6e+Q+mX3pdr/1A\nGRTZbeWxtzxlGJ6+ZbeVgUQ0Wdg2Kj4NpxtO49p+Wplqc5M1b0GKCVc6VrV39Vh+cHs1mbz4wHcm\nYISYO/ex7apd1zoivEmpE4Mic/7CC66Pf++LGH0nYNjdO7+2WcAQQuBc3Xk02i0wS2FoEDVw7pVS\n3dAIAwBFcmyPaTVBMjZiQHyYPodhUrTnzAYtQJyt1Uo06HWOmq0JaLp+AQAujbgEl0a0Xk7a3zXN\nMFobjvC0uYs3m78YZAPuGrMEZWVtlWloX2djdHsXYW8+7ryAUlt6X7joSwGjWQH+2gYb7KoKxXFP\n/rbXv8TnZV/ANASIkhJQLGrgqAQAq2prGTDMtQgJUiDJzoDhvgjvrVPawh1nwDAbFZQ3qT/VlzS9\nM6q1DKM3jGl3tAKrU2hwOwHDizGFjlQhpb6hX4QZZRUNCA9ufYOonuTzgLF//348/vjjEEJg/vz5\nWLZsmdvzZ8+exerVq1FdXQ1VVZGXl4fJkyd3eTtau622tsGm1zb65OsyKLGOrVCFNrlrNDqW6jdZ\nawEAsJkgSY6tVx0BI8jgWEltcJ8QXnx9Gi6cicDll0QBF5IAoNUKr4HMLcNoLWB4uGh2xxDd0IFR\nmH1dCkYPbbmlaXuS+oUi+0eD9HpOD908Er/9y2GPv7d68Sh8W1jZar0o6tvumD0c//jPacydeGlP\nN6UFnwYMVVWxYcMG7Ny5E/Hx8ViwYAGmTZuG1FRX0btt27Zh1qxZWLhwIU6ePIk777wT+/bta+dV\nO6fVgFFvRUSIybUK3BEYZKF9iA3O4W5HUHBmGMFKCCwAqi01gKT9jp5hKO4BY2hiIpKHaIHiipjL\nsHrMciSHJnXZ+/IHngKGp3jQHd/BJUnCj9NTPZ/YijkTB+k/jxgUg4HxYThdWtPupOXll0TrQYao\nqZgIs77TYG/j0wHUo0ePIiUlBcnJyTAajcjMzMTevXvdzpEkCTU1WgmDqqoqJCQktPZSF635HAbg\nmvgur3Ls1OYIDJLzVliDM5Boj+sbGtm1eYoaa22TDEP7HXOTDCM18lL0D3Wt1gSAS8IHtJi/CHSe\n5jA8DUn1ghGrDvGz5hJ5zacZRklJCZKSXN+mExIS8MUXX7idc//99yM3NxcvvPACGhoa8Pzzz/uk\nLc3nMABXwCi+4Cho5pjA/vpULUypgKw4Aogji5BaCRh6kHEU12saMG4amtVr73jqTp7u/fc8h+Fn\nfehnzSXylk8DhjeTfvn5+Zg/fz5uv/12HD58GKtWrUJ+fr7H34uL81yDvqngkJYLuyRFQVxcOGzf\nOfZWcFz8nWXHTWatrr/z8eEpcTh9BLhueAr2lR2BMFmR0j8UZwAMHhCLuLhw1BtdpcFTkhIQE9yx\ndnZGR/uiu9VYXcG6aVunXjsQ+z45jauuSEBkWMv/PjdOuBRvHTyFMWlJ6BfZflG+1l6/pxgdE+hG\nk9Kj7ekNfdFbsC+6hk8DRmJiIs6cca1rKCkpQXy8e4XLV155Bc899xwAYOTIkWhsbER5eTliYtov\nxNbR2ycrK+tbPHa2rBplZdU478wwZOdeDY7yHxYLQs1G1Dke7xcWjGcfmoTvq7/HvjKguPw8RgyK\nxJkCQG1UUVZWjfoGV8mKxiqBsprO3ebpLWdhtd6sssJVkrppW5dMG4KbJg+Gpd6CsvqWCylzJg/G\nvOtSoFpsXr3H3tIXNpv276Wx0bt2+0Jv6YvegH3hcrGB06dzGGlpaSgoKEBRUREsFgvy8/Mxbdo0\nt3P69++PAwe0vRpPnjwJi8XiMVh0RmuT3s4hKYujLpHkGJISqgFCAHZhQ2iwUb9LyigbYTIqCHPs\n81BtrYXVsamSwbFwz9xk0tvQw7Wfeou27oKSJMljjaOeqIFERK3z6RVNURSsW7cOubm5EEJgwYIF\nSE1NxdatW5GWloYpU6Zg9erV+PnPf46dO3dClmX8z//8T5e2obSiHmFmY6tzGM7FexbHN0LnXVJQ\nZUBVYBM2hAUbcM6qfft1VkR17vNQY6lBiFFb2e2sJeVcuEcufW2tQfMd04gChc+/AqenpyM9Pd3t\nseXLl+s/p6am4qWXXmr+a13is2/K8NSrXyA4yICMMQNbPF9Tr627cGYYzklvqDIgZNiEHZEhJkj1\n2oK7CMd+FGGOIFFjrdVrSDkDhizJkCAhOaxv3TrbntZKmgeyhJgQnCqu7nCRRqLeLqDHTM5VarfL\n1jfaWpTDBoAaRwFC5/af+qS3UABVhk21Ys7ES1H+5ccoBRDpKCyoyApCDSGottSgzlYPo2xw26ti\n65QnfPiu/E9fyzCW3DAUybGhmOqopEsUKAI6YDTdx7uhlYBRGvI5vjwXAoujUrnzFlmoMoQqw6ra\ncGliBGKLJZSWa3tdOIWZQlFcp9WLuixqsNteFbLE+kBNdWdJ7d4gLFjbgpYo0AT0lc3WZKK7+Q5t\nMDagMeobbDv6vGsOo+mQlKrApmqRpNJShSDF5DY/Ue7YHQ8ALo/unasye4u+lmEQBaqADhhNM4wD\nXxa7PRfUpISPPofhzDCENofhvAOqylKtz184OSe+r44dgWmXuM/RkLu+lmEQBarAHpJqZx/vsHAJ\nztUBFpsKJeYslIhyCFUCIOlDUnbVjhpLLeIj3YvS3X3V7ThR+T0mJ/fe/St6C/YPUWAI6IBhs7e8\nldYpOFi4AobVDtOQIwAASXYEGVWGKlRcaKyAgEC0OdLt9weE98eA8P6+aHbAYYZBFBgCfEiq7QzD\nFOya09DnMJpy1IYqrdP22o4OiuraxvUhnMMgCgyBnWG0MSQlRxfjbJhrzwJLK3dQCUd5kMJqrbRJ\njJkBo7MYMIgCQ4BnGK0PSRmTvnc7Pnu+rsU5wqIVuztZqZ0bzYDRab1hRz0iuniBHTBayTDWLBmN\n/v3Cmj3qfp7JKGP2tcMAACcqTgHgkNTFemB+GtbfPqanm0FEFyGwh6RayTAGxIUhvCwIxU2315bd\nh6TSBvVDapwROAM02LXV4lHNJr2pY0Zd1rGtT4mo9wn4DMOQ9B2MqYfhzCIUWYLavCpcs4DRaLMj\nxuzaPlOChBCDd/sxEBEFqoAOGFa7DcaB38DQrxgwaHWjFEVCjbXG7TypWcCwWlW3gBFiCGa5DyLq\n8wL6KlituDZvks11SEkIhyJLqLI020xFsQNCm5iV6qKxaPplWikQx94WIUZmF0REAT2H0ShX6T9L\nQXVY/9MxsNitqLc1wCAZYLUCksGmD0n1D03E6uuX6xsfRZjC0FDfoO95QUTUlwV0hmEXrm0/JbN2\n62xFYyUAYHjUlbCVpGjPKTZAEgg3hbntkucsNhiscF8DIqLADhiSVf9ZCnIGjAoAQIw5ErBrq7kl\ng3ObVfeES5G051Vw6zQiosAOGHAFDNlchxMV3+O7ygIAQGxIDITq2C/aETCMzQOGrD1vd5Q5JyLq\nywJ6DkN1ZBjCrkAOq8T//Wyb/lxCWAx+lDYQh6q/ajPDMEjasV20XcSQiKivCOgMwxkw1NqIFs9F\nm6NwSZxjMZ7SesBw1o+KbLYXBhFRXxTgGYYNQgCiLhyIuOD2XHRQJIIUbRclSR+SMrqdM/+yOTAb\nzMhImdo9DSYi6sUCNmD885PTsAkLJFWB2hCqPy5Bwk1D58FsMMNs1AKEpGhzFAbHnIWT2WDG/Mvm\ndF+jiYh6sYAdknrpX98Cih2SasSg8MH64z8fl4f0AdcBAMwGxz6tbWQYRETkErABA3BkDnYFa3Mm\n64/FmGP0n4ONQY7zHHMYknuGQURELgE7JAUAUGxQHftaPDZ+FSobK2FSXFlEkJ5haENSzDCIiNoW\nwAFDhSSrUG1a1pAQEoeEEPcS284AoWcYSgB3BxHRRQrcISnHRDbUtoOAHjAcGYZz3QUREbUUsAFD\nMtcDAISl7TpQpmYZRfOV3kRE5BKwAUMO1kqYq3VtL7prPmfRfOEeERG5BG7ACNEChqhvvn+3iyIp\naLr5HjMMIqK2BdwV8u8ffo+TZ6ogBWu76rWXYUiSBKiKtoESmGEQEbUn4K6Qr3/4PQAgaLgNQpVw\n95yr2/8FVWbAICLyQsAOSUFSIUPB2GEJ7Z6mlzgH12EQEbUncAOGrEISXqzcVl1d0LyWFBERuQRu\nwJBUSN68PWYYRERe8XnA2L9/P2bOnImMjAxs37691XP27NmDzMxMzJkzBw8//HCX/F3JywxDbrJY\nj3MYRERt8+kVUlVVbNiwATt37kR8fDwWLFiAadOmITU1VT/nhx9+wI4dO/CXv/wFYWFhKC8v75o/\n7mWGkZoYjZNV2l4ZvK2WiKhtPs0wjh49ipSUFCQnJ8NoNCIzMxN79+51O+evf/0rFi9ejLAwbb1E\nTExMay/VcbI26e2Js2ItwAyDiKg9Pg0YJSUlSEpK0o8TEhJQWlrqds6pU6fw/fffY9GiRVi4cCE+\n+OCDrvnjkgrJi4BhNrgCBjMMIqK2ebxClpSUICGh/VtT2yKaLqNug91uR0FBAXbt2oUzZ85gyZIl\nyM/P1zOOzhGQZAFZ9RwwghRmGERE3vB4hZw/fz5GjRqFxYsXY8KECR168cTERJw5c0Y/LikpQXx8\nvNs5CQkJGDVqFGRZxoABAzBo0CCcOnUKV155ZbuvHRfX9gpuSFqgMshK++cBiC5yPZ8YH6Wt/vYz\nnt5jX8K+cGFfuLAvuobHgLFv3z7s2bMHv/vd77BhwwYsWbIE8+bN8yoDSEtLQ0FBAYqKihAXF4f8\n/Hxs3rzZ7Zzp06cjPz8fWVlZKC8vxw8//ICBAwd6fO2ysuq2n5RU7f+F3P55AFSLK0CcO1fj8e/2\nNnFx4R7fY1/BvnBhX7iwL1wuNnB6DBgmkwlZWVnIysrCZ599hry8PPz2t79FdnY27r33XvTr16/N\n31UUBevWrUNubi6EEFiwYAFSU1OxdetWpKWlYcqUKfjRj36Ef//738jMzISiKHjkkUcQGRnZ6Tck\nSYCQtYCheDPp3WRIioiI2ubVoH1RURF2796NN998ExMmTEBOTg4++ugjLF26FK+//nq7v5ueno70\n9HS3x5YvX+52vGbNGqxZs6aDTW9JCKFVn3VkGLIXGyIFGRgwiIi84fGKevfdd+Obb77BwoUL8eqr\nryI6OhoAMHr0aOzZs8fnDewIu6rNXUgdyDDMzDCIiLziMWDMmzcPM2bMgKK0vPi++eabPmlUZ9nt\njruy9AyjY3dJERFR2zyuw4iMjERdXZ1+XFVVhYMHD/q0UZ3lzDDgyDAMXgQMs6HtLVyJiMjFY8DY\nuHGj2x1RYWFh2Lhxo08b1Vl21XF3lCPDULyYw+CQFBGRdzwGDCGE29oEWZZht9t92qjOcs1haO1T\nOCRFRNRlPAaM0NBQHDlyRD8+cuQIQkJCfNqoztLnMJyT3l4EDO6BQUTkHY9jNqtWrcJ9992HIUOG\nAABOnDiBp556yucN6wzXkJQWOIQX5c0jTOEwK0EYkzjal00jIvJ7HgPGqFGjkJ+fj8OHD0MIgVGj\nRl3Uwjpf0ie99ZXenst8KLKCJ9N/6ZclQYiIupNXC/ciIyMxefJkX7flojVfh+HVFq0AgwURkRc8\nBozjx49j/fr1OH78OCwWi/74V1995dOGdYbdLgDFCslUrz2gBu4OtERE3c1jwPjFL36BFStW4Ikn\nnsCOHTuwa9cuhIaGdkfbOsyuCgSNOAjZrK0bEQwYRERdxuMV1WKxYMKECRBCID4+HitXruy6TY66\nmF1V9WABAAaVi/KIiLqKx4Ahy9opkZGROH78OC5cuICioiKfN6wzVNV9wyazLbaHWkJEFHg8Dkll\nZmbiwoULWLZsGRYtWgRVVVtUm+0tbM0CRlRocA+1hIgo8LQbMFRVxYQJExAdHY309HR8/PHHaGxs\nvMjtU33HbhcQqgxJVjFWmY/Z4y/t6SYREQWMdoekZFnGz372M/3YaDT22mABAFa7DZKsop+cjNsm\nj0NwEPfoJiLqKh7nMFJTU1FYWNgdbbloFrt2269RMvVwS4iIAo/Hr+Dl5eWYO3currnmGrcaUlu2\nbPFpwzqjwd4IADDKDBhERF3Nq0nvzMzM7mjLRWu0OTIM2djDLSEiCjweA0Z2dnZ3tKNLNNgbAAAm\nmSXLiYi6mseAsXz58lZrLfXGISmLygyDiMhXPAaMKVOm6D83NjbinXfeQWpqqk8b1VnOgBHEDIOI\nqMt1eEjqxz/+Me655x6fNehiOO+SMimc9CYi6modrs4nSVKvvc3Womp3SXGfbiKirtehOQwhBL7+\n+mtMmDDB5w3rjEbHbbVmA4sOEhF1tQ7NYSiKgtzcXIwcOdKnjeqsRlXbByPM2Dv3HCci8mcBdVtt\no3AGjN5bvoSIyF95nMNYtGgRKisr9eOKigosWbLEp43qrEahrcMIN/XODZ6IiPyZx4BRV1eHyMhI\n/TgqKgo1NTU+bVRnWVEPoUoIMXIOg4ioq3kMGKqqoq7OtYtdbW0t7Ha7TxvVWVbRANhMMBqUnm4K\nEVHA8TiHMXv2bOTm5mLRokUAgJdeeglz5871ecM6wyo1QNiCoCgtV6YTEdHF8Rgw7rrrLsTHx2Pf\nvn0QQmDhwoXIysrqjrZ1iF21Q5WsELZwGJUOLy8hIiIPvNphKDs7u9ffLVVcVwoAEFYTFAYMF7dU\nkAAAFDhJREFUIqIu5/HK+sADD6CiokI/vnDhAh588EGfNqoz3jq1FwBgP5/EDIOIyAc8XllPnz6N\nqKgo/Tg6OhoFBQU+bVRnlNaVQVKNUCviOYdBROQDHgOG3W53uyvKarXCYrH4tFGdUW9rgKwaIUGC\nIjNgEBF1NY8BY9KkSVi5ciU++eQTfPLJJ8jLy0N6errXf2D//v2YOXMmMjIysH379jbPe/vtt3HF\nFVfgv//9r9ev3VS9rQGSaoSiyK3u30FERBfH46R3Xl4efv/73+M3v/kNAK221Lhx47x6cVVVsWHD\nBuzcuRPx8fFYsGABpk2b1mI/jdraWrz44oudrlElhECDrQGKGgIDh6OIiHzCY4ZhNBpx//334+mn\nn8YNN9yAv//971i7dq1XL3706FGkpKQgOTkZRqMRmZmZ2Lt3b4vztmzZgjvvvBNGY+d2ymu0WyAg\nALsBBk54ExH5RLsZhs1mw759+/C3v/0Nhw8fhs1mw3PPPed1JlBSUoKkpCT9OCEhAV988YXbOV99\n9RWKi4sxefJk7NixoxNvwbWXN+xGZhhERD7S5tfxJ554Atdffz12796N2bNn4/3330dkZGSHho2E\nEB6ff/zxx7FmzRqvf6c1DTYtYAhmGEREPtNmhvHSSy9h1KhRWLZsGcaPHw8AHZ5MTkxMxJkzZ/Tj\nkpISxMfH68e1tbU4ceIEfvKTn0AIgXPnzuHee+/Ftm3bMGLEiHZfOy4uXP/5glSm/WA3IMhkcHuu\nL+hr77c97AsX9oUL+6JrtBkwPvzwQ/zv//4vNm7ciMrKSmRlZXW46GBaWhoKCgpQVFSEuLg45Ofn\nY/PmzfrzYWFhOHjwoH78k5/8BI8++iiGDx/u8bXLyqr1n8+eLwcA2K0KpGbPBbq4uPA+9X7bw75w\nYV+4sC9cLjZwtjl+ExERgSVLluDVV1/F008/jcrKSjQ0NGDJkiXYvXu3Vy+uKArWrVuH3NxczJ49\nG5mZmUhNTcXWrVvx7rvvtjhfkqTODUk5tmZVbQrnMIiIfEQSHbhCW61W/POf/8Rrr72GP/zhD75s\nl0dlZdUoqCrEy9/+HcNiLkP+9/+E5WQaBpmHY+1PrunRtnUnfntyYV+4sC9c2BcuF5theFV80Mlo\nNGLWrFmYNWvWRf3RrrL7m9fwQ9VpfFd5CoA26W2x9c69OoiI/J1f31IUFRTp/oDdgPOVDT3TGCKi\nAOfXASM2OMbtWNgNqG2w9VBriIgCm18HjBbsnVspTkREnvl1wLCr7vMVwm5AanJED7WGiCiwdWjS\nu7exqe7DT4umXIGJIwb0UGuIiAKbX2cYNuGeYQzpH4MQs1/HQCKiXsuvA0bzISmzSemhlhARBT6/\nDhjlNfVux2YTswsiIl/x64BRXe++5oIZBhGR7/h1wLA3m8MIMjJgEBH5SkAFDFlm4UEiIl/x64Ch\nwhUwbGXJPdgSIqLA59ezxKpQIVQZDZ9NBwSzCyIiX/LrgGEXNkDI2v+IiMin/PpKq0JlsCAi6iZ+\nfbUVsEOofv0WiIj8hl9fbbUMg3MXRETdwa/nMATsgDBg2ugBGDs8vqebQ0QU0Pw8YKiAKuOmqakw\nGrhoj4jIl/x6SEpI2qS3ovj12yAi8gt+faUVjrukZInzGEREvua3AUMVKiAJSP77FoiI/IrfXm2d\nu+1JXIdBRNQt/PZqa3NsniSBk91ERN3BbwOGs1Kt7L9vgYjIr/jt1VYfkmKGQUTULfw4YDgzDAYM\nIqLu4LcBwy60DINDUkRE3cNvr7ZWZ4YhMcMgIuoOfhswnHMYCgMGEVG38NuAYbE7h6QYMIiIuoP/\nBgybFQCgyAwYRETdwW8DRqMzYDDDICLqFn4ZMKpqLbDYtCEpAzMMIqJu4Zf7YSx57C0kDq4AYjnp\nTUTUXXyeYezfvx8zZ85ERkYGtm/f3uL5nTt3IjMzE/PmzcNPf/pTnD171qvXLausAwAYZL+MeURE\nfsenAUNVVWzYsAHPPfcc3nzzTeTn5+PkyZNu5wwfPhyvvvoq3njjDcyYMQMbN2707sUlFQADBhFR\nd/FpwDh69ChSUlKQnJwMo9GIzMxM7N271+2csWPHIigoCAAwcuRIlJSUePfishYwjAoDBhFRd/Bp\nwCgpKUFSUpJ+nJCQgNLS0jbPf+WVV5Cenu7di+sZBucwiIi6g0+/ngshvD73jTfewH//+1+88MIL\nXp0vSdprh4cEIy4uvFPtCxR9/f03xb5wYV+4sC+6hk8DRmJiIs6cOaMfl5SUID4+vsV5Bw4cwPbt\n2/Hiiy/CaDR69+KOISnVBpSVVXdJe/1RXFx4n37/TbEvXNgXLuwLl4sNnD4dkkpLS0NBQQGKiopg\nsViQn5+PadOmuZ1z7NgxrF+/Htu2bUN0dLT3L+4YkjJxDoOIqFv49GqrKArWrVuH3NxcCCGwYMEC\npKamYuvWrUhLS8OUKVOwadMm1NfX48EHH4QQAv3798czzzzj+cUl56Q35zCIiLqDz7+ep6ent5jI\nXr58uf7z888/36nXlWRnhuHlEBYREV0UvywNAgBwTHqbDAwYRETdwS8DRnR4EOcwiIi6mV8GDINB\n1u+SCvL2rioiIroo/hkwZFnPMIKYYRARdQv/DBgGSV+4Z2KGQUTULfwyYCiya0jKzElvIqJu4ZcB\nw6BI+pCUmRkGEVG38NOA0WQOw8A5DCKi7uCXAUNRZEiyCqHKMBm40puIqDv4ZcAwKjKg2AC7AqPB\nL98CEZHf8curraJIkIyNENYgBgwiom7il1dbWRGQDDYIaxAUxS/fAhGR3/HPq63SCAAQVhNkSerh\nxhAR9Q1+GTBUgzNgBPVwS4iI+g6/DBh2uR4AINkZMIiIuotfBgxVbgAAGEVwD7eEiKjv8MuAcVoc\nAQAEqRE93BIior7DLwNGg1QJ27n+CFHjeropRER9hl8GDABQq2IQEsSyIERE3cV/A0ZdOIIZMIiI\nuo1/BgwBiPowmE2sI0VE1F38MmAEl6cBQkFYMEubExF1F78MGOvmLsaVg2Iwd+Kgnm4KEVGf4ZeT\nAEMGRCHv5pE93Qwioj7FLzMMIiLqfgwYRETkFQYMIiLyCgMGERF5hQGDiIi8woBBREReYcAgIiKv\nMGAQEZFXGDCIiMgrDBhEROQVBgwiIvKKzwPG/v37MXPmTGRkZGD79u0tnrdYLFi5ciVmzJiBm2++\nGWfOnPF1k4iIqBN8GjBUVcWGDRvw3HPP4c0330R+fj5Onjzpds4rr7yCyMhI/OMf/8Btt92GTZs2\n+bJJRETUST4NGEePHkVKSgqSk5NhNBqRmZmJvXv3up2zd+9eZGdnAwAyMjJw8OBBXzaJiIg6yacB\no6SkBElJSfpxQkICSktL3c4pLS1FYmIiAEBRFERERKCiosKXzSIiok7wacAQQnT4HCEEJEnyVZOI\niKiTfLqBUmJiotskdklJCeLj41ucU1xcjISEBNjtdtTU1CAyMtLja8fFhXd5e/0V+8KFfeHCvnBh\nX3QNn2YYaWlpKCgoQFFRESwWC/Lz8zFt2jS3c6ZMmYLXXnsNAPD2229j/PjxvmwSERF1kiS8GTe6\nCPv378evf/1rCCGwYMECLFu2DFu3bkVaWhqmTJkCi8WCVatW4auvvkJUVBQ2b96MAQMG+LJJRETU\nCT4PGEREFBi40puIiLzCgEFERF5hwCAiIq/4XcDwVJsq0KxduxbXXXcd5syZoz9WWVmJ3NxcZGRk\nYOnSpaiurtaf+9WvfoUZM2Zg3rx5+Oqrr3qiyT5RXFyMW2+9FbNmzcKcOXPw5z//GUDf7AuLxYKc\nnBxkZWVhzpw5eOqppwAAhYWFuOmmm5CRkYG8vDzYbDb9/ECv16aqKrKzs3H33XcD6Lt9MXXqVMyd\nOxdZWVlYsGABgC7+jAg/YrfbxfTp00VhYaGwWCxi7ty54sSJEz3dLJ/6z3/+I44dOyZmz56tP7Zx\n40axfft2IYQQv//978WmTZuEEEK899574s477xRCCHH48GGRk5PT/Q32kdLSUnHs2DEhhBA1NTVi\nxowZ4sSJE32yL4QQoq6uTgghhM1mEzk5OeLw4cPiwQcfFHv27BFCCPHYY4+Jl156SQghxK5du8T6\n9euFEELk5+eLFStW9Eibfen5558XDz30kLjrrruEEKLP9sXUqVNFRUWF22Nd+RnxqwzDm9pUgeba\na69FRESE22NN629lZ2frfbB3715kZWUBAK6++mpUV1fj3Llz3dtgH4mLi8OwYcMAAKGhoUhNTUVJ\nSUmf7AsACA4OBqB9Y7bZbJAkCYcOHUJGRgYArS/+9a9/AQj8em3FxcV4//33kZOToz/20Ucf9cm+\nEEJAVVW3x7ryM+JXAcOb2lR9QXl5OWJjYwFoF9Ly8nIA7nW5AK1/SkpKeqSNvlRYWIjjx4/j6quv\nxvnz5/tkX6iqiqysLEycOBETJ07EwIEDERERAVnWPtKJiYn6+w30em2PP/44HnnkEb2k0IULFxAZ\nGdkn+0KSJCxduhTz58/Hyy+/DABd+hnxaWmQria4ZKRdrfVPoNXlqq2txfLly7F27VqEhoa2+f4C\nvS9kWcbrr7+Ompoa3HfffS22DQBc77d5X4gAqtf23nvvITY2FsOGDcOhQ4cAaO+v+XvuC30BALt3\n79aDQm5uLgYNGtSlnxG/Chje1KbqC/r164dz584hNjYWZWVliImJAaB9QyguLtbPKy4uDqj+sdls\nWL58OebNm4fp06cD6Lt94RQWFoYxY8bgyJEjqKqqgqqqkGXZ7f06+6Kj9dr8wWeffYZ9+/bh/fff\nR2NjI2pra/H444+jurq6z/UFoGUQABATE4Pp06fj6NGjXfoZ8ashKW9qUwWi5t8Epk6dildffRUA\n8Nprr+l9MG3aNLz++usAgMOHDyMiIkJPRQPB2rVrMWTIENx22236Y32xL8rLy/U7XRoaGnDw4EEM\nGTIE48aNw9tvvw3AvS+mTp0asPXa8vLy8N5772Hv3r3YvHkzxo0bhyeffLJP9kV9fT1qa2sBAHV1\ndfjwww8xdOjQLv2M+F1pkNZqUwWyhx56CIcOHUJFRQViY2PxwAMPYPr06XjwwQdx9uxZ9O/fH1u2\nbNEnxn/5y1/igw8+QHBwMJ544gmMGDGih99B1/j0009xyy23YOjQoZAkCZIkYeXKlbjqqquwYsWK\nPtUXX3/9NdasWQNVVaGqKmbNmoV77rkHp0+fRl5eHqqqqjBs2DBs2rQJRqOxz9Rr+/jjj/HHP/4R\nzz77bJ/si9OnT+P++++HJEmw2+2YM2cOli1bhoqKii77jPhdwCAiop7hV0NSRETUcxgwiIjIKwwY\nRETkFQYMIiLyCgMGERF5hQGDiIi8woBBfu2mm25CdnY2MjMzMWLECGRnZyM7Oxtr167t8Gvdcccd\nXpW7fvTRR3H48OHONLdDjh07hnfeecfnf4fIW1yHQQGhqKgICxYsaLf6qLNUhL94+eWXcfDgQWze\nvLmnm0IEwM9qSRF1xMGDB7Fp0yaMHDkSx44dw3333Yfy8nLs2rVL31BnzZo1GDt2LABg8uTJ2Llz\nJwYNGoTFixdj1KhR+Pzzz1FaWorZs2djxYoVAIDFixfj3nvvxaRJk7Bq1SqEhYXh5MmTKCkpwejR\no/HEE08A0GrzPPLII7hw4QIGDhwIu92OqVOn4uabb3Zr57lz5/DQQw/hwoULAIBJkybhjjvuwDPP\nPIO6ujpkZ2dj3LhxWLNmDT7//HNs3rwZ9fX1AIDly5cjPT0dBQUFWLx4MWbPno1PP/0UFosF69ev\nx+jRo7ulr6mPuJjNOoh6i8LCQjF+/Hi3xw4cOCCGDx8uvvjiC/2xppvLnDhxQlx//fX6cXp6uvju\nu++EEEIsWrRIPPTQQ0IIIaqqqsTYsWNFYWGh/twHH3wghBDi4YcfFrfccouwWq2isbFRzJw5Uxw6\ndEgIIcQ999wj/vCHPwghhDh9+rQYNWqU2L17d4u279ixQzz22GP6cVVVlRBCiL/+9a8iLy/Pre1Z\nWVni/PnzQgghiouLRXp6uqipqRE//PCDuPzyy0V+fr7+3q+//nphs9m870QiD5hhUEAbPHgwrrzy\nSv341KlT2Lp1K0pLS6EoCkpLS1FRUYGoqKgWv3vjjTcCAMLDwzFo0CAUFBQgOTm5xXk33HADDAbt\nozR8+HAUFBRg7NixOHToEH71q18BAAYMGKBnMs2NHDkSL774Ip588kmMGTMGkyZNavW8Tz/9FIWF\nhVi6dKlekFJRFJw+fRohISEIDg7GrFmzAAATJkyAoig4deoUUlNTve0uonYxYFBACw0NdTteuXIl\n1q9fj8mTJ0NVVVx11VVobGxs9XeDgoL0n2VZht1u79B53u6zcM011+C1117DgQMH8Le//Q07duzA\nCy+80OI8IQRGjBiBnTt3tniuoKCgxWOqqgbUXg/U8/xnBpDIA+HF/Rs1NTV6ddLdu3e3GQS6wtix\nY/Wy0kVFRfj4449bPa+wsBBhYWGYNWsW1qxZgy+//BKAtteFs4w5AIwePRonTpzAJ598oj929OhR\n/ef6+nrs2bMHgLZFKQCkpKR07ZuiPo0ZBgUMb75Nr127FsuWLUNSUhLGjRuH8PDwVn+/+Wu19Vx7\n561btw6rV69Gfn4+Bg8ejNGjR7v9PaeDBw/iz3/+MxRFgRACGzZsAABMnDgRf/rTn5CVlYXx48dj\nzZo1eOaZZ7Bp0yZUV1fDarVi4MCBePbZZwEAsbGx+Pbbb5GTkwOLxYLNmzdDURSPfULkLd5WS+Qj\njY2NMBqNkGUZJSUlyMnJwa5duzBw4MAu/1vOu6Q+/PDDLn9tIidmGEQ+8t133+HRRx+FEAKqqmLl\nypU+CRZE3YUZBhEReYWT3kRE5BUGDCIi8goDBhEReYUBg4iIvMKAQUREXmHAICIir/x/apbYj523\no60AAAAASUVORK5CYII=\n",
+            "text/plain": [
+              "\u003cmatplotlib.figure.Figure at 0x7f97f1330850\u003e"
+            ]
+          },
+          "metadata": {
+            "tags": []
+          },
+          "output_type": "display_data"
+        }
+      ],
       "source": [
+        "def plot(train, test, label):\n",
+        "    plt.title('MNIST model %s' % label)\n",
+        "    plt.plot(train, label='train %s' % label)\n",
+        "    plt.plot(test, label='test %s' % label)\n",
+        "    plt.legend()\n",
+        "    plt.xlabel('Training step')\n",
+        "    plt.ylabel(label.capitalize())\n",
+        "    plt.show()\n",
+        "  \n",
+        "\n",
         "with tf.Graph().as_default():\n",
         "  hp = tf.contrib.training.HParams(\n",
         "      learning_rate=0.05,\n",
-        "      max_steps=500,\n",
+        "      max_steps=tf.constant(500),\n",
         "  )\n",
         "  train_ds = setup_mnist_data(True, hp, 50)\n",
         "  test_ds = setup_mnist_data(False, hp, 1000)\n",
         "  tf_train = autograph.to_graph(train)\n",
-        "  (train_losses, test_losses, train_accuracies,\n",
-        "   test_accuracies) = tf_train(train_ds, test_ds, hp)\n",
+        "  all_losses = tf_train(train_ds, test_ds, hp)\n",
         "\n",
         "  with tf.Session() as sess:\n",
         "    sess.run(tf.global_variables_initializer())\n",
         "    (train_losses, test_losses, train_accuracies,\n",
-        "     test_accuracies) = sess.run([train_losses, test_losses, train_accuracies,\n",
-        "                                  test_accuracies])\n",
-        "    plt.title('MNIST train/test losses')\n",
-        "    plt.plot(train_losses, label='train loss')\n",
-        "    plt.plot(test_losses, label='test loss')\n",
-        "    plt.legend()\n",
-        "    plt.xlabel('Training step')\n",
-        "    plt.ylabel('Loss')\n",
-        "    plt.show()\n",
-        "    plt.title('MNIST train/test accuracies')\n",
-        "    plt.plot(train_accuracies, label='train accuracy')\n",
-        "    plt.plot(test_accuracies, label='test accuracy')\n",
-        "    plt.legend(loc='lower right')\n",
-        "    plt.xlabel('Training step')\n",
-        "    plt.ylabel('Accuracy')\n",
-        "    plt.show()"
-      ],
-      "execution_count": 0,
-      "outputs": [
-        {
-          "output_type": "stream",
-          "text": [
-            "Downloading https://storage.googleapis.com/cvdf-datasets/mnist/train-images-idx3-ubyte.gz to /tmp/autograph_mnist_data/train-images-idx3-ubyte.gz\n",
-            "Downloading https://storage.googleapis.com/cvdf-datasets/mnist/train-labels-idx1-ubyte.gz to /tmp/autograph_mnist_data/train-labels-idx1-ubyte.gz\n",
-            "Downloading https://storage.googleapis.com/cvdf-datasets/mnist/t10k-images-idx3-ubyte.gz to /tmp/autograph_mnist_data/t10k-images-idx3-ubyte.gz\n",
-            "Downloading https://storage.googleapis.com/cvdf-datasets/mnist/t10k-labels-idx1-ubyte.gz to /tmp/autograph_mnist_data/t10k-labels-idx1-ubyte.gz\n",
-            "Step 0 train loss: 2.244329 test loss: 2.2499208 train accuracy: 0.12 test accuracy: 0.161\n",
-            "Step 50 train loss: 0.64771986 test loss: 0.56013924 train accuracy: 0.82 test accuracy: 0.836\n",
-            "Step 100 train loss: 0.49011207 test loss: 0.42143965 train accuracy: 0.84 test accuracy: 0.879\n",
-            "Step 150 train loss: 0.3768609 test loss: 0.39319593 train accuracy: 0.88 test accuracy: 0.883\n",
-            "Step 200 train loss: 0.36007702 test loss: 0.37089333 train accuracy: 0.9 test accuracy: 0.881\n",
-            "Step 250 train loss: 0.182115 test loss: 0.28543878 train accuracy: 0.94 test accuracy: 0.915\n",
-            "Step 300 train loss: 0.2119576 test loss: 0.22305593 train accuracy: 0.92 test accuracy: 0.93\n",
-            "Step 350 train loss: 0.12932214 test loss: 0.29057172 train accuracy: 0.96 test accuracy: 0.906\n",
-            "Step 400 train loss: 0.22937602 test loss: 0.2200287 train accuracy: 0.92 test accuracy: 0.925\n",
-            "Step 450 train loss: 0.23444137 test loss: 0.19857481 train accuracy: 0.94 test accuracy: 0.94\n"
-          ],
-          "name": "stdout"
-        },
-        {
-          "output_type": "display_data",
-          "data": {
-            "image/png": "iVBORw0KGgoAAAANSUhEUgAAAe8AAAFnCAYAAACPasF4AAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMS4yLCBo\ndHRwOi8vbWF0cGxvdGxpYi5vcmcvNQv5yAAAIABJREFUeJzs3XmAFNW9Pvynlt5mYdhmQMHggnGN\nS9zCD0ElKug1edUY9ZoQTYze3GuiRk1uYjRqRHNj4n5NrhKjiUYlbihGQFRUFDSoKIvgICAO6+xL\n711V5/2jlq7qZaZnpnumZ3g+/zjTXV1dXSP91PecU+dIQggBIiIiGjLkwT4AIiIi6h2GNxER0RDD\n8CYiIhpiGN5ERERDDMObiIhoiGF4ExERDTEMb6JeOOigg3DllVdmPf6rX/0KBx10kGe766+/3rPN\ne++9h9mzZwMAtm3bhkMPPdR57osvvsCPfvQjzJw5EzNnzsTZZ5+NV199FQBw0003YdasWZg1axYO\nO+wwnHLKKc7v4XDY8x7JZBLz58/v9edavXo1Lr300oK2XbBgAebMmdPn97J19/rZs2fjhRde6PO+\niYY7hjdRL3366aee0Ewmk1izZk3WditXrsQnn3xS0D6vu+46TJs2DYsXL8bixYtxyy234LrrrsPO\nnTtxyy23YNGiRVi0aBHGjRuH3//+987vVVVVnv188sknfQrUI444Ag8//HBB2y5fvhxTpkzp83vZ\n+vt6oj0Zw5uol0444QQsWbLE+f3tt9/GV77ylaztrrnmGtx+++0F7bO+vh5HHnmk8/uRRx6JxYsX\nY/z48QUfV3NzM3784x/jo48+wkUXXQTAbAF48MEHMXPmTOi6jlWrVuHcc8/FrFmzcOaZZ2L58uUA\nzFaB0047DQBw//334ze/+Q2uuOIKfP3rX8d5552HxsZG533ee+89HHzwwVnv9cEHH+Bb3/oWTjvt\nNJx//vloaGgAAOzevRsXX3wxzjzzTJx66qm4++67cx5rPu+99x7OOecczJo1C9/+9redC6Vc++3u\ncSEE/vd//xczZ87EKaecgjlz5kDXdQDAwoULcdZZZ+GMM87AN77xDbz33nsFn3eiwcDwJuqlM844\nAy+99JLz+z//+U/MmjUr53ZCCCxatKjHfU6fPh1XXnkl/va3v2HTpk0AgHHjxkGSpIKPa+zYsbjm\nmmtw1FFH4YknnnAeF0Jg8eLFUBQFv/71r3HppZdi0aJFuPzyy3HTTTfl3NeiRYtw/fXX49VXX8WY\nMWPw7LPPAgA2bdqE2tpaTJgwwfNe4XAY//mf/4lrrrkGS5Yswfe+9z1cddVVAIBHH30Uxx13HF5+\n+WUsWLAADQ0NMAwj57FmikQiuOqqq3DDDTdg0aJF+OEPf4jrrrsOhmHk3G9jY2Pex1944QUsWrQI\nzzzzDJYsWYKGhgY8+eSTAIBbbrkFDz74IBYuXIibbroJr7/+esHnnWgwMLyJeun444/Hxo0b0dLS\nglgshlWrVmHKlCk5t73++uvxhz/8AYlEott9/v73v8d3vvMdLFiwAGeddRZmzJjhBEt/nXzyyc7P\n8+fPxxlnnAEAOOaYY5zqONOxxx6LCRMmQJIkHHLIIdi5cycAYMWKFTk/6wcffIBx48Zh6tSpAICz\nzjoLX3zxBXbs2IExY8bg7bffxvvvvw+/34+77roLdXV1BR376tWrMX78eBxzzDEAgJkzZ6KtrQ3b\nt2/Pu998jy9duhTf+ta3UF1dDVVV8e1vfxuvvPIKAGDMmDF46qmnsH37dhx77LH45S9/WdjJJRok\n6mAfANFQoygKTj/9dCxcuBCjR4/GiSeeCFXN/U/psMMOw3HHHYdHHnkERx99dN59BgIBXHrppbj0\n0kvR2dmJRYsW4fbbb8fEiRMxbdq0fh3vyJEjnZ8XLFiAv/3tb4hEIjAMA/mWNqiurnZ+VhTFaV5+\n5513cMkll2Rt39nZiYaGBk8LhN/vR2trKy655BIYhoFbbrkFjY2N+M53voOf/OQnBR17a2srRowY\nkXVsLS0tefeb7/Guri48/PDDmDdvHgBA13WMHj0aAPCnP/0Jf/rTn3Duuedir732wvXXX4/jjz++\noGMkGgwMb6I+OPPMM3H33Xdj1KhRPfbZ/vSnP8W5556LiRMn5ny+tbUV69evd6rWESNG4Pzzz8ey\nZctQX1/f7/C27d69GzfccAOefvppHHLIIfj8888xc+bMgl+vaRrWrFmT8yKkrq4O+++/P5577rmc\nr7388stx+eWXY8uWLbjsssucSronY8aMQXt7u/O7EAIdHR0YM2YMVFXNud+pU6fmfLyurg4zZszA\nd7/73az3+dKXvoTf/va3MAwD8+fPx7XXXotly5YVeGaIBh6bzYn64Oijj0ZjYyM2btzYY4VWV1eH\n73znO7j//vtzPh+Px3HllVd6wmLr1q34+OOPceyxx/bquFRVRTgczllRt7a2oqKiAvvvvz80TXMq\n0EgkUtC+V69ejYMOOgh+vz/rvY488kg0NTXh448/BgA0NDTgZz/7GYQQ+PWvf4133nkHgBmSY8eO\nhSRJ3R6r7YgjjkBzczNWrVoFwBxfMH78eEycODHvfvM9/vWvfx0vvPACYrEYAOCpp57C888/j9bW\nVnz/+99HOByGLMs48sgjezXWgGgwsPIm6gNJknDaaachFotBlnu+Bv7BD36Ap59+Oudze++9N/70\npz/hvvvuw5w5cyCEQFVVFX75y196RqAX4phjjsEf/vAHTJs2DW+++abnuYMPPhjTp0/HzJkzMWbM\nGPziF7/Ahx9+iNmzZ+O///u/e9y3fYtYvve67777cOuttyISicDn8+Gqq66CJEm48MIL8etf/xq3\n3norhBCYMWMGpkyZgh07dnheryhK1ntWVFTgnnvuwa233opoNIrRo0fjrrvu6na/I0eOzPk4AGzc\nuBHnnHMOADPYb7vtNowePRrTpk3Dt771LSiKAp/Ph9tuu61X551ooElcz5uIiGhoYbM5ERHREMPw\nJiIiGmIY3kREREMMw5uIiGiIYXgTERENMUPmVrGmpq6i7m/UqAq0tUWLus89Ec9j//Ec9h/PYXHw\nPPZfsc9hbW11zsf32MpbVbPvKaXe43nsP57D/uM5LA6ex/4bqHO4x4Y3ERHRUMXwJiIiGmIY3kRE\nREMMw5uIiGiIYXgTERENMQxvIiKiIYbhTURENMQwvImIaNh6443XCt723nvvxI4d23vc7sMP38cN\nN/y8P4fVbwxvIiIalnbu3IFXX11c8PZXXXUt9t57QgmPqHiGzPSoREREvXHXXb/D+vXr8Mgjc2EY\nBnbs2I6dO3fgnnv+iN/+9jdoampELBbDD35wOaZOnYYf//hyXHPNz7F06WuIRML44out2L59G668\n8lpMmTI153u89toSzJv3dyiKgoMOOgS33XYL6us34M47fwefzwe/349bbvktdu7cnvVYdXXuqU8L\nsceGd0c4gfc3NOLYg+sG+1CIiIa9f7z+GVZuaCzqPo87uA7nz5ic9/l///fZeO65f+D7378MDz/8\nIDQthT/+8c9oa2vF8cd/DWeccRa2b9+GG2/8BaZOneZ5bWPjbvzhD/fh3XeX44UXns0Z3tFoFA89\n9AAeeeQJVFRU4Oc//yneffddvPzyyzjnnPMwa9a/4YMPVqK1tQUvv7wg6zGGdx9ceecbaO2M46ZL\njsOk8X0/gURENDQccshhAIDq6hFYv34dXnzxOUiSjM7OjqxtjzjiKABAXV0dwuFwzv01NHyBiRO/\nhIqKCgDA0Ucfg/Xr1+PEE0/CH/7wP2ho+AJf//ppmDRp35yP9cceGd5b23YiPOFNSMnD0dwRZ3gT\nEZXY+TMmd1slDwSfzwcAWLJkETo7O/HAA39GZ2cnfvjD2VnbKkp6gREhRM79SZL3OU1LQZJCOPbY\n4/HnP/8Ny5cvw5w5N+PHP74652Nf/eqxff4se2R4f7ztCyjVbTBG70RLZ3ywD4eIiEpAlmXoup71\neHt7O/baa2/Isow333wdqVSqT/vfZ59J2LbtC0SjEVRUVGLVqg9x1VU/xrPPzsOUKSfi9NPPgBAC\n9fUbsGXLpqzHGN69dPykA7G4CZArO9DSwfAmIhqOJk3aD59+ugH33XcnKiurnMdPPnkGfvGLa/DJ\nJ2vxb//2TdTV1eGRR+b2ev+hUAhXXHEVrr32J5AkGUcccRSOPfZY7NzZghtv/AWqqqrg8/lw/fU3\nob7+06zH+kMS+doDykxTU1dR93fjit+ipTOCQyLn4yfnHlHUfe9Jamuri/632dPwHPYfz2Fx8Dz2\nX7HPYW1t7m7dPfY+7y+P2Q+SL4mmcOtgHwoREVGv7LHhPbFmPACgLdk2yEdCRETUO3tseI8JjQIA\nxBFGPKkN8tEQEREVbs8N74rRAADJH+egNSIiGlL22PAeW2FW3pI/xtvFiIhoSNljw3uME96svImI\naGjZY8M75AvCLwcg+eNoZuVNRDQs9WZJUNtHH32ItjbvnUjlsAyo2x4b3gAwMlDDypuIaJjq7ZKg\ntn/+88Ws8C43e+QMa7a6ijFojDWiqSt7UnoiIhra3EuCXnDBRbj99lvQ1dUFXddx9dU/w+TJB+Lx\nxx/Fm28uhSzLmDp1Gg455FAsW/YGtmzZjDlz7sD48eOz9pu5DOjVV1/nLANaWRkCIJdkGVC3PTy8\nxwItQKfWPtiHQkQ0rD332UtY1bimqPs8uu4rOHfyWXmfdy8J+uijf8YJJ/w/fOMbZ2PLls24994/\n4J57/oinnnoc8+cvgqIomD//WRx33NcwefKXcc01P88Z3LmWAf3ww/fx1ltLcc4552H27AuxaNHr\nJVkG1G2PDu/a0FgAQAysvImIhrM1a1ajvb0Nixe/DABIJMzu0pNP/jquvvq/cNpps3D66bN63E+u\nZUDr6zc4S362tOzClCknlWQZULc9OrzrKszwTildMISALEmDfERERMPTuZPP6rZKLjWfT8VPf/oz\nHH64dy2L6677JbZu/Ryvv74EP/nJf+Chh/7a7X5yLQMaCAScJT/XrFlZsmVA3fboAWt25Y1gFNE4\nZ1kjIhpO3EuCHnro4XjrrTcAAFu2bMZTTz2OcDiMRx6Zi0mT9sX3v38ZqqtrEI1G8i4lCniXAQWA\nVas+xEEHHYpnn52Hzs4OfPOb38QFF1yE+voNzmOnn36G81ix7NGV96hgDSQhQw5EEYmnUBXyDfYh\nERFRkbiXBP3hD3+E2267Gf/1Xz+EYRi4+urrUFVVhfb2Nlx22fcQClXg8MOPwIgRNTjqqK/ihhv+\nG7/97Z3Yf/8DPPvMtQzokUcehVgsihtv/AVGjaoBIJdkGVC3PXZJUHvZtp++fgvicQM/O+pa7L/3\niKK+x56ASwj2H89h//EcFgfPY/9xSdABEpCCkNQUIvHUYB8KERFRQfb48A4qIUiqhs4oJ2ohIqKh\nYY8P7wo1BABoj4YH+UiIiIgKs8eHd6XPvFevIxEZ5CMhIiIqzB4f3iMClQCAjjjDm4iIhoY9PrxH\nVZgj+Xa1tw3ykRARERVmjw/v0RXm7WE7OjrQHk4M8tEQERH1bI8P70qfOWBNUpNYvallkI+GiIio\nZwxvn9nnDSWFpvbY4B4MERFRAUo6Peodd9yBDz74AJqm4T/+4z9w+umnO88tX74cd911FxRFwfTp\n03HFFVeU8lDysm8Vk9QUWjvZbE5EROWvZOH97rvvYuPGjZg3bx7a2tpwzjnneMJ7zpw5ePjhhzFu\n3Dh897vfxcyZMzF58uRSHU5eITVo/qBoaOviRC1ERFT+Shbexx13HI44wlx6bcSIEYjFYtB1HYqi\noKGhATU1Ndhrr70AACeddBJWrFgxKOHtV/wAAJ9foK2NlTcREZW/koW3oijOYuXPPPMMpk+fDkVR\nAABNTU0YPXq0s+3o0aPR0NDQ7f5GjaqAqipFPcba2mqM1M3K2+8XaI8kMXZsFSSu690r+SbOp8Lx\nHPYfz2Fx8Dz230Ccw5IvCfrqq6/imWeewV/+8pd+7aetLVqkIzLZK78IISBLMiTFQCKpY+u2NlQG\nuTRoobgKUf/xHPYfz2Fx8Dz237BYVWzZsmX4v//7P8ydOxfV1ekDqKurQ3Nzs/P77t27UVdXV8pD\nyUuSJPhlP2TVXHi9jYPWiIiozJUsvLu6unDHHXfgwQcfxMiRIz3PTZw4EeFwGNu2bYOmaVi6dCmm\nTp1aqkPpkV/xAbIZ3h2R5KAdBxERUSFK1mz+8ssvo62tDVdffbXz2AknnICDDjoIp512Gm6++WZc\ne+21AIAzzzwT++23X6kOpUd+xY9kyhxpHo5xXW8iIipvJQvvCy64ABdccEHe54877jjMmzevVG/f\nKwHFjw6YS4IyvImIqNzt8TOsAYBf9kMXZmhHGN5ERFTmGN4w+7wNGIBksPImIqKyx/BGeqIWyDrC\ncYY3ERGVN4Y3zD5vAGZ4s/ImIqIyx/CG2ecNAKrPYJ83ERGVPYY3rPu8AYRCEitvIiIqewxvpPu8\nQyEgHNMG+WiIiIi6x/BGus87GABiCQ26YQzyEREREeXH8Ea68g5YS3tH46y+iYiofDG8Afhls89b\nVc2KO57UB/NwiIiIusXwRrryllUBwGw6JyIiKlcMbwABJQAAzrKgrLyJiKicMbwBhFQzvCXVrLjj\nSVbeRERUvhjeAIKKNVJNNu/xjiVYeRMRUflieAMIWpW3IZkVd4yVNxERlTGGN4CQGgIAGJJZecdZ\neRMRURljeAMIWgPWdCQBsM+biIjKG8MbgCqrUCQFmhXe7PMmIqJyxvAGIEkSgmoAKWGFNytvIiIq\nYwxvS1AJImkkAABxTtJCRERljOFtCaoBJHQrvDlJCxERlTGGtyWkBpHQk1BkNpsTEVF5Y3hbgkoQ\nAgKBoOCtYkREVNYY3hZ7opZgCIiyz5uIiMoYw9sSVM0pUitCQCSWGuSjISIiyo/hbQlZ85sHQwJJ\nzUAixaZzIiIqTwxvi115B4IGAFbfRERUvhjeFrvP2+c3wzvM8CYiojLF8LbYzeYqw5uIiMocw9ti\nV96yz+zrZngTEVG5YnhbglblLavmbWIMbyIiKlcMb4tdeUNheBMRUXljeFtC1mhzQzJDm+FNRETl\niuFtCTK8iYhoiGB4W+w+b3tNb85vTkRE5YrhbfHJKmRJdtb0TunGIB8RERFRbgxviyRJCClBZ01v\nneFNRERliuHtElQDiGlxKLLEypuIiMoWw9slqAYR1xJQFRmaJgb7cIiIiHJieLsErWZzRQE0Vt5E\nRFSmGN4uITUAAQHVLxjeRERUthjeLj7Fb/5XNRjeRERUthjeLn7ZBwCQVYGUzj5vIiIqTwxvF5+s\nAgAU1YCm9b/ybutK4MEX16G5I9bvfREREdkY3i4+xay8FaU4fd5PvFqP9z7Zjb8u3NDvfREREdkY\n3i4+u9ncZ0ArQrN5PKl7/ktERFQMDG8Xu89bUQwYQsAw2O9NRETlh+HtYjebS4rZZM5Z1oiIqBwx\nvF2c0eayGdq8XYyIiMoRw9vF7vO2K+9i9HsTEREVG8PbxWk2tyvvItwuRkREVGwlDe/6+nqceuqp\nePzxx7OemzFjBi666CLMnj0bs2fPxu7du0t5KAWxK2/I5ujwfjebC1buRERUfGqpdhyNRnHrrbdi\nypQpebeZO3cuKisrS3UIvebPCG8OWCMionJUssrb7/dj7ty5qKurK9VbFF1Ws3mxwlsqzm6IiIiA\nElbeqqpCVbvf/U033YTt27fjmGOOwbXXXgtJGtyUs6dHFZLdbM5mbyIiKj8lC++eXHnllZg2bRpq\nampwxRVXYPHixZg1a1be7UeNqoCqKkU9htraas/vcf9IAIBqLi6Gqqpg1ja94fObp9enKv3aT7kb\nzp9toPAc9h/PYXHwPPbfQJzDQQvvs88+2/l5+vTpqK+v7za829qiRX3/2tpqNDV1eR4Lx1IAgJSW\nBAA0t4TRVBPo83ukkpq1Pz3rvYaLXOeReofnsP94DouD57H/in0O810IDMqtYl1dXbj00kuRTJoh\nuXLlShx44IGDcSge9mhzQ+KANSIiKl8lq7zXrl2L3/3ud9i+fTtUVcXixYsxY8YMTJw4Eaeddhqm\nT5+OCy64AIFAAIceemi3VfdA8St2n7dZMevs8yYiojJUsvA+/PDD8dhjj+V9/uKLL8bFF19cqrfv\nE6fyBitvIiIqX5xhzUWRFEiQYMCsvDnDGhERlSOGt4skSfApPqfy7uk+7x3hXXjsk38grsUH4vCI\niIgADOJo83Lll33QhTVKvIc+7/s+eghdyTDGVdTi9H1PGYjDIyIiYuWdKagEkDQSAAC9m8p7W2MY\nXckwACBpJAfk2IiIiACGd5bairGIGREEv/oqtic3593ulfcbnJ8lzn9KREQDiOGdYXyFORe7pGpY\nrb2af0N3i/ogT+tKRER7FoZ3hnGV6YVUVPjzbifAe8CJiGhwMLwzjK+oTf8iCquoZTabExHRAGJ4\nZxhfOc75OYEINEPLvaGn8GZ4ExHRwGF4Z6j2V+EHX/4h9I4xgCTQGm/r8TXs8iYiooHE8M5h/5pJ\nMLpGAQCaYq05t/GMV8tTebNXnIiISoHhnYOqSBApc7BaLJV7KVLhSmbeKkZERAOJ4Z2DqsiAYU4+\nl8g7AYsnvYmIiAYMwzsHVZEhDAUAkNBzh3chzeZERESlwPDOQVUkQDfDO5knvHuD4U5ERMXE8M5B\nkiQo1pot21o6cm8kvNsTERENFIZ3Hgp8AICV9TuwsyWS9TxHkhMR0WBheOdhhzdkHZ2R7pvO2SxO\nREQDieGdhyqlwzsX4bpXzBD5lw4lIiIqNoZ3HnZ4S0qe6VFd3EFORERUagzvPHyKz5yIRdaR1Lqv\nrA2w8iYiooHD8M5DlRXAUCApOpKp7KZzd7HNZnMiIhpIDO88fKp1r7esI5nqofJmszkREQ0ghnce\n5ixrKiRFQ0LLUXm7f2blTUREA4jhnYeqyAVX3nqe8GZBTkREpcDwzkOWJXN+c1lHIpljxLn7VjEO\nWCMiogHE8M7DMIQ5YE0WSGiprOe9zeY9lNicw4WIiIqI4Z2HYQhAN+c3j2mJ7rdlnzcREQ0ghnce\nuiGcZUFjqXj2Bp5bxdi5TUREA6eg8F67di2WLl0KALj77rtx8cUX4/333y/pgQ023RAQyQAAIKKH\nu92Wfd5ERDSQCgrvOXPmYL/99sP777+PNWvW4MYbb8R9991X6mMbVIYhIBIhAEBMdGU9z1vFiIho\nsBQU3oFAAPvuuy9ee+01nH/++Zg8eTJkeXi3uJuVdzfh7VmYhM3mREQ0cApK4FgshoULF+LVV1/F\niSeeiPb2dnR2dpb62AaVIQREMggASCLXet7pwK7f1pZzxDkXLCEiolIoKLyvueYaLFiwAD/96U9R\nVVWFxx57DJdcckmJD21w6a5m86Scq8873VS+uy2CpvZY9hZ2djPDiYioiNRCNvra176Gww8/HFVV\nVWhubsaUKVPw1a9+tdTHNqgMwwAMFUJToSvRrOfdlTckkQ5q9zZW5c0KnIiIiqmgyvvWW2/FwoUL\n0d7ejgsvvBCPP/44br755hIf2uD60rhqAIBIhKCrkawAzry3O3ezub1taY6RiIj2TAWF9yeffIJv\nf/vbWLhwIc455xzcc8892Lp1a6mPbVBdcsbB+N7Mg+DTqwFZR0cyo49fSieyxMqbiIgGUEHhbYfP\nG2+8gRkzZgAAkslk6Y6qDFQGfTj56AkIiBEAgMZok+d54b63WxI5VyGxA53ZTURExVRQeO+33344\n88wzEYlEcMghh2D+/Pmoqakp9bGVhZAwP+fOsDe8vROziJwBzcqbiIhKoaABa3PmzEF9fT0OOOAA\nAMDkyZNxxx13lPTAykW1MgotALZ37fY8nll557rXO+lrARSJfd5ERFRUBYV3PB7H66+/jnvvvReS\nJOGoo47C5MmTS31sZWGkbzSAXM3mmaPNvQm9qf1ztO31OvwVYyBaTy71YRIR0R6koGbzG2+8EeFw\nGBdeeCHOP/98NDc344Ybbij1sZWFmmAVhACi1uIkH3zaiBfe3gJkNJvruje869s2AQCUmhb2eRMR\nUVEVVHk3Nzfjrrvucn4/5ZRTMHv27JIdVDmpCKpAVIJm6ACAB55fCwA4cLLrukcS6EqGcdt7D+Oc\nyf+GQ8cchNZ4KwBApHzs8yYioqIqeHrUWCw9g1g0GkUi0f0a18NFZVAFhAxN1z2Pp9y/S8DqjlXY\nEdmFBz5+GADQEm8DAIhkiH3eRERUVAVV3hdccAHOOOMMHH744QCAdevW4aqrrirpgZWLiqAPEBI0\n4Q3vpK65fhMQGQndaoe3prLyJiKioioovM877zxMnToV69atgyRJuPHGG/HYY4+V+tjKgll5S9AN\n74xqKS0d3lLGgDUhhFN5QzYY3kREVFQFhTcA7LXXXthrr72c31evXl2SAyo3duWti+6azYUnoBN6\n0pk+VZJ1DlgjIqKi6vOi3HtKNVkZVCGEbC5U4pIy3GEunAFtABDX4+mnFH2POVdERDQw+hzekiQV\n8zjKVkVQBSBBhze8tYzKO2GkB/DFtHR4S7LOAWtERFRU3Tabn3TSSTlDWgiBtra2kh1UOamw+ryF\n8PZdp3QNAdd2yTzhzT5vIiIqtm7D+4knnhio4yhbiixDEjIMpKC5J2KR3TOsGXkrb7DPm4iIiqzb\n8J4wYcJAHUdZkyUJAgZSWrqpXJJdt4pJQMod3qmoazsDBpjeRERUPH3u8y5EfX09Tj31VDz++ONZ\nzy1fvhznnXceLrjgAjzwwAOlPIx+kyADEEhprn5v1R3eAkmRDu+2RIfn9ULSQEREVCwlC+9oNIpb\nb70VU6ZMyfn8nDlzcP/99+PJJ5/EO++8g88++6xUh9JviiRDSAaSrvD2VN4QSBnp9c2d8DbM0ysk\n721mRERE/VGy8Pb7/Zg7dy7q6uqynmtoaEBNTQ322msvyLKMk046CStWrCjVofSbLCkABOJJVwgr\n3klaUsIV3vF28wctCAAQYOVNRETFU7LwVlUVwWAw53NNTU0YPXq08/vo0aPR1NSUc9tyoMgyJFmg\nI5JuGpdUb+Wtwd1sboV3yhwf+2w3AAAgAElEQVSPzsqbiIiKqeAZ1gbbqFEVUFWlqPusra0uaDtV\nMU+TUFzXOlblLTQVkj/puQu8PWk1m1vhDVkv+L2GouH82QYKz2H/8RwWB89j/w3EORyU8K6rq0Nz\nc7Pz++7du3M2r7u1tUW7fb63amur0dTUVdC2spABCdi2M31vu2SHt+5zqvB9qvZGQ3gHuhJh87lU\nABIAQ9IKfq+hpjfnkXLjOew/nsPi4Hnsv2Kfw3wXAiUdbZ7PxIkTEQ6HsW3bNmiahqVLl2Lq1KmD\ncSgFUWTzNHVE0/3akDXz/m3NvP6pwlhMm+AdnCecZnP2eRMRUfGUrPJeu3Ytfve732H79u1QVRWL\nFy/GjBkzMHHiRJx22mm4+eabce211wIAzjzzTOy3336lOpR+UxUF0IHOqGvaU1UDdBWQzHu4fQhA\nldOn0y/7ENet5nb2eRMRURGVLLwPP/zwbpcNPe644zBv3rxSvX1RqbIV3rH0oDQoGoSuOn3fighA\nkdN98j7Fh6iumE0bDG8iIiqiQWk2H2pUK5S7oq7R5opZeUtOePuhSunwViU1fZ+3zGZzIiIqHoZ3\nAXyKGcrhuN3nLZzK2x6spghvs7kqqxCaFeYyK28iIioehncB/NatYl0xK7xlA5IkzD5v2A/5PM3m\nqqxA6NbvisaVxYiIqGgY3gXwWfeX68KqoJ3bxNzh7Tebyi2qpMIwrN9lnUuTEBFR0TC8C1Dh91k/\nmRHszGvuCm9J+Jy+cQBmFW5V3pJVeXdGk7j/2dXY1hgekOMmIqLhieFdAL/PCm/JmkdNsSpwwzXj\nm65mNJur6cpcMdf0/ufyrVi1sRn3Pbt6AI6aiIiGK4Z3ARTJOk2SgCSlK293szkMNavZ3A53STYr\nb3s98GSKA9iIiKjvGN4FUOxbwCSByqAPvoDVg+2qvCXd22yuuprNoegw2OlNRERFMmQWJhlMslV5\nS5JAZciHsGrAACB0BYmNR0EZ2QRZqvbcKqZIKgAZQpedypuIiKgYWHkXIN1sbuDEr4yHL2D1fRsq\njLbxSG35CoRhB7b9GsXZxu7zdkjSwBw4ERENSwzvAshWEP/7qZNx5tcmweczk9i5jxuArouMZnPV\n2UbKvM+bVTgREfUDw7sAduU9fmwIkiRB8dmVtyu8hchoNndV3jL7vImIqHgY3gWQrSVBDWGGtqxa\no8Vdo811XXjmNreb0IWuAIoGwzDSO2SzORER9QPDuwB2Fa1b4S1Z93m7m80NQzgD2wCkg1xXIUlA\nyuDiJEREVBwcbV4AO5S/6NyGz9o3A0rKfMJwVd6GAclVUctOs7n537iWXguceuetj3dgQm0lDti7\nZrAPhYioLDC8C2D3eS/e+joAQLZWGfMMWDPSk7CYr/Fuc+fqe3AELhqQ4x1OYgkNjy7cAAD4yy9m\nDPLREBGVBzabF0B29WUDgJCyJ2nRDYHHXql3frfDW1LNKj2hJyBghntnJIkHnlsDg6POe6TpRs8b\nERHtYRjeBVAk72kSMMy7vQxvn/fGhnbXa8zntN2T0tsgXZl/UN+Enc2REh0xERENZwzvAmSGNwAr\nuNN93PGk7unztkebG51jobWMN3+Gd05znfeP9YhniIgoG8O7ALKsZD+oe4cLRGIpz+8KXK+xKnQD\n3hHnDO+esWeBiCgbw7sAco7KWxgZ/eAwB1c5r5HdK45Z94lL3srbYHj3iOdoePvX+t247I6l2N0a\nHexDIRpSGN4FyN9s7hV2Vd+K69TaQS/YbN5rDO/h7c8vfQLdEFi2eudgHwrRkMLwLkDmaHMATjXt\n5g7j5vZk+glhbtssbYLkT1cYKY6k7hFH5A9v/PMS9Q3DuwC5Ku9RVaFuX7P4vW3pX6yg362uQ/Co\nt5yHUymGd08Y3kRE2RjeBcjV5z1+VBWqQj4AQCjQw1w3OZrYAVbehWCzORFRNoZ3AZQczeaqrDrL\nfFZX+Lp9vcjRxA4AyZSe83FKY3jvGbhWD1HvMLwLIOf4ZnEv/1kdyg7vQyeNxrUXHoWvHTouu/KW\nNQACb3e8jHe2vwcAWPDOFsxd8ElRj3s4YHYTEWVjeBcgksq+jcW9/Gd1hT/r+ZHVfhy272jzOeE9\nzZI/DskfxxfJT/HEp88CAJ5ftgUr1u0q8pEPnLWbW7BibfGPn5U3EVE2hncBJo3YBwDw5VGTncfM\nZnPz5xGV6fAWmlmFj6kYCQCQ5exmcykQg+RPrzJmrxMOwGmKz2f+ss34+LPmPnyK0rrrHx9j7kvF\nbznggDUiomwM7wJU+6vwwIw7cOa+pzqPqa5Z1/yq7Axei6+ZisTGo3DUhP0BABUBNavZXArEIAVi\nzu+NkXQYdxdWndEkXnznc9z7zOr+faAS6unio7fKObxffGeLs+IZEdFAYnj3guIKbFVWPfNu71NX\nBQCoCYzAdbPOwKTx1QCAiqAv655wSUlB8qfD+4GP/+KsEa7p3YR3JJn3uXKR1Io7gr6cm83nL9uC\ntz7eMdiHMaSV8bUZUVljePeCu59blVQ4y2ZIwMGTRgEAamtCzs8AUBlSs/q8IQlP5d2aaIW692YA\ngN7N7WPhaCrvc+Wi2CPoyzm8iYgGC8O7F7Iqbye7JZxxwpfwzan74rJvHOp5TWXQlzUPOmTDCe+v\n7zPd3F9tAyBrWZV3Uk9i4ZZX0Z7oQGe0/CvvRLHD23U6/ufvH6KxPZZ/40HCC4y+4y1iRH3D8O4F\n9/3e7j5vSQJURcbZ0/ZH7UjvzGuVOZrNIRmQ/HEoIoBzDzwLB4a+AknVIPnj0DIq73/Uv4CXtryC\nFzctQke4/MM7WeRZ49x93vUN7Zj32sai7r8YONlO37HZnKhvGN69oHbT551PZUjN7vOWDEi+JFQj\nCAAwdOt5SUBzVXHtiQ6s2LnS+b0jo897xY6VWLBpUS8/RWkVu/IWGVVtOS7mknnBRURUagzvXvBU\n3pKCQtK7MuiDENnN5lBSkI0AAEDT0o+7+7zvWzU3/RJJdgashQLm/h7f8DQWbX0dulE+M7UVu887\nM6zLsYk6VeRBekREPWF490L2aHMzSLrrtzNvFcucpCUBSQIk3QzvlDUOTZIM6Fafd2ckieZYi/Oa\nqBZzKu+qjBndolr59AMnSthsnuv3cqAxvPuNfd9EvcPw7gXPaHO5h8VILLIsZd/n7TMnaJE0c3IX\n3S5WJQOaYQbBtX98C7rQceDIAwAAsVQMHZEEAMCvKp77qbuS4d5/mCJyH0vxR5tn/l4e4a27Dox9\n3n0nCup8IqJMDO9eUFyBrcqq606xHsoG4X1e8pshbM/GJgzreUkgkdTxxqrt0GWzyq5UK+BX/Ihq\nMcQTZjDqhkBcT8/QFklF+vyZisHdtF30Pu/MyrtMwlvT0sfR3b35ROXglZUNWL+1bbAPg4qI4d0L\n7nW9PQPWemzyywhvnxnMwqq8Dd16Xjbw7Fub8bfFn0JSzI5wvxxEhRpCTIs5FZ4hBLqS6cAO55h7\nfSC5w9tdeacMDXd/+Ces2LEy18sKkt1s3uddFZW72uaANSpniZSOp17biN8/uWqwD4WKiOHdC1kD\n1iw9ZXfdqFDOx42kWXk74S0Z2N5kNoFLqtkRHrDCO6rFnYFRhiEQTrnDe5Arb1d4ufu8t3Y24LP2\nLXh8w9N933eZjjZ3BzYHrFE5K5fWKiouhncvSK5RNYprkpae3Pz94zC+60QkPj3G83gqYTbD233e\nkiTgXApY06X65QBCaghxLY6UZm5oCOFpKh/sZnMtT+WtGVquzXsl84unXAasuQepsc+7H6w/Z+bY\nBiLqHsO7j3yyAvf0qN0J+lVMCnwZRkcthKv/OxaVYRgCuqvytkmqGXw+KYAKXxACAklh9pUbRmaz\n+WBX3q4+by0d3rmWUu2tzLDOvO97sLgvWDjavP/K5aJsOOK5HZ4Y3n2UOT1qTxTFOtVGeluR8iMS\nT0HX0n3e6RdYlbcUQIVaYT1vPmYIIJxKjzAPJ7NDMvMfbCKl4911u5zqvZjczebJZPrnLtcxLnrv\niz7tO/N7Rx/gLyJNN7BkZQOice+88u7AZp93/7Fpt3TKpauJiovh3UfmwiSmQu5R9dnh7VqkROgq\nWjsTcPLUGm0OpPu8VRFASA1ab2pW45l93pnN5l/s7sIPf7cUb3603XnsuTc346EFn2D+si0FfT63\ndVta8doH2/I+7xlt7ro4CLtuYVv4Xu/fF8jRbD7AX0TPv7UZT762EX9fUu953N1Uzmbz/mN4l065\ntFZRcTG8+6jQ+7xtimIlvHuFMV3BLY+uRDhid3obTsVsh7cCPypUc8CbZFXjhiE8TdI7Irs8s6wt\nX7sLAPDU6585jzU0dgEANu3o7NVxA8Cd8z7C35fU521+y9fn7b7/3JD7tiJa1gxrA/w9tHFbBwCg\ntTPheZwD1orD/nOyabd0mN3DE8O7j1RZ6dWiCnblLQz7vxKc028FuiS5m83NKlsRfgTUgPVYesBa\nXDPv8z669itoT3Rgbct656V2FSP7EqhvMwPcp5qj4/vTbJ6vOvKMNk+6wtvVIiAUb/gV/J5Z93kP\nbFC2dZnHPbI64Hnc22wuEI2nsO7z1gE9tuGEAVM6bNUYnhjefaRIhU2PalNVO6itjQ1X5S7Sk7TY\n7CpbNgLpJnopfatYzArv0yedAgB4a9sK57VOv/A+a3DvqofwcdNa+K33T/ajSszXt5tvkhZ35d3X\n8M5s8hvoUcntYfO4R1T4PY+nXIP0NM3AnfM+xp1PfYT6hvYBPb6hzv6nM9AXZW5CCCz9cBt2tQ7u\nfAmlwlaN4Ynh3UeqrOLkoycAAA760qgCtvc2m8vCHd7Wn8E1YE3yJyAMCbLwwWc10UtyepKWmBaD\nLBQ89sIuHDhyf2xo24hdkd3m7uzAC5lBMn/Ty/D5zPdI9WPu8XwDX9yjzd39v+5BdYbct+VMM9/S\nEAKabmTNvFYq9mfOfD8tY5KWLTvN7oimMlxvvJw5zeaD2POweWcnHnulHr+a++7gHUQJsfIenhje\nfeSTFXzntC/jjh9NwWH7ju5xe6fytprNZeSqvN3hHYNIhqDrrv512Wo2N4CYFofQfdi8vRMnjDfv\nH/+0bZP5vN1vnjJHqTdGmyGpZgWZ7EezuZ5jGlAhhGeeb/e0oRHXKHhD6Wt4Zw9Yu/z3b2DO3z7o\n0/56w90FkDkoTcszYI0LbPRNb6vD9zc04sEX1xWlqozEzC6q4VqgsvIenhjefaTKKmRJwtiRuWdP\ny9o+Y7S5e7S63Q9uN5srqg7Jn4RIhKDpBnyKtYqY5K6844BuTtEaUioBAM+/vRHN7bF0FaOkB4nF\nVXOFMvfgqriW7hMvRGbl3RZvx/ee+ylWtb0PqGY4u0MtYbgCW+1bs3n2gDXzd7vSLaXWrvT88cmM\nFgv3eXT/LGekdyyhIZbo/2Q1A03TDWzd1TVg79fb6vCP89fivU92Y3cBTd2vf7itV5/FMATunPeR\n526NoYyV9/DE8O4j91SphVCt0eb2JC2q5FrW0x6wZjWLT5xo7lskQkhpRlazOWCGt6GZj8swt4+m\n4nhx+efpK20lHRqblXcANenp835iwzO4d9VD+KhpbUGfQc+oPjd1fI6ElsCyliUIHvkG4Is74W0I\nA5qhQYHVV9zHyjuzz3sgFwGx108Huq+8NU/l7Q3vK+5+C1fc/VaJjrB0HlrwCW55dOWA9eH3tTp0\n5k/Io7E9hsdfqcctj+afXz+ztaSxPYZ1W1rx10Wf9umYyg2ze3gqaXjffvvtuOCCC3DhhRdi9erV\nnudmzJiBiy66CLNnz8bs2bOxe/fuUh5K0Vz+le/hrP1metb2LoRdeUtWda1KKn71PWu61IxmcyVo\n9puKRAU03Ug3m9vN6rIBXegwUnZ4p5/fvKPTudIWcgp1FWMBAElEoY773FMl2iPUN1rN7T3pbrIH\nSTGgjGx0giypm8EXRJV1Avp2q1jSSHpaEIq95Gh3uqLp982cRU3zDFhzDTTsY7N5S6wVN7xzOza0\nbuzbDors/Q2NAFBQZVsMfa0OMy8oMxXy/0vmn2y4dX2w8h6eShbe//rXv7B161bMmzcPt912G267\n7basbebOnYvHHnsMjz32GMaNG1eqQymqI2sPxxn7fb3Xr3Oaza3wliDjgL1roMiS0w/ujDb3m1+Y\nduWdsjPErrytMBO6VZFb64VLio4dzRGzipEMQNYxOjAKFx30LfP5jACt9JnN7YVOr6plfAnYI95t\nysgmZxR2QrdmiDMqrffuW+W9UnseoWNegz20qbezRdW3fYZH1j2BVB/mWe+KuirvjLECqTxzm/e1\ngnz1izfRlmjH3DWPFbS9EAJPLKnHui2lvT2tKuTreaMi6Gu+9HSPfUE5LHX765DHPu/hqWThvWLF\nCpx66qkAgAMOOAAdHR0Ih8M9vGr4UuzR5s7tZVbftyJD2KPN7T5t1aq8k0GkdAML3ramFrUGrNnL\nhcIKbwjF83wkrjkBH1KDOGj0gZ7nbVU+c0BbOJk7vNvi7Xh03ZOQrIuJzCrHvtf8lJFnw4hXQK5u\ncyrUlNXfrYgghC47y6D2VhhmOMk1zX16/b2rHsL7uz/CxwV2Dbh1uirvzJDwDNJzN6FrfWz+tVpy\nNFHYRcb2pghe/WAb7pz3UZ/erzvukfWZF2yl4q4OOyNJrN3ckndbz/H11I3ShzJ6uGXdnlh5G0Lg\nd3//EP9c8flgH0rJ9G6asF5obm7GYYcd5vw+evRoNDU1oaqqynnspptuwvbt23HMMcfg2muvzeov\ndBs1qgKq2rum6p7U1lYXdX/dGdlsNT/azeaKitraavhUGYmUt887FJKBlFlZ+/wqWtpTwCjXJC5W\neAvdrIpGjrA+hxXOmiGchU1GVY/AXnXWrWzW/u3PXREIAl1AzIjmPBfLPnkbK3evQuAIGfH3T0f1\niJBnu+QXZrhVh6ogkgHIwSg0w0BtbTVi7eaAMkX2QST9gJrM+R7vbVuFUcEafHns/p7Ho/EUKoLp\nqk8Zux1GR61nm978/Xyh3v+9U64vPUOSPK/3B9LHJrv6XYMVfmc7dytBT+8dCJj/FDVDK+g4O+Lp\nC7Fi/3/c1pluUQm5Pk8pqT7FeZ9fPLQEja1R3Hftydhv75qsbSOx9EVVVXXQeV2u44y7rrnyfY6a\n1phnm5he+N9tKGgKpy+cC/k8w+Ezh6NJfNrQjk8b2nHJN78y4O8/IP9mSv4Olsz7ZK+88kpMmzYN\nNTU1uOKKK7B48WLMmjUr7+vb2orb91ZbW42mpoEbTdvVZX1BWAEqdKCpqQuyLOWYpMX6YhYyOrvi\nUCUFCddrneZva8BaW6s5ktsO/65Iup9Y0hR0tVnPWzO0NTZ2QpIkdMbMintXuAlf7GyCX/Z5+vI7\nwzFnv1IwjJaWCJpC5nsmkjpefOdTqOOARFQ4rQApI4mmpi7s6jAHOukpCdD8kIKRrPNtCAN3vvMQ\nAOCBGXc4j2/Y2oY7nlyF807e32yokAClphkpyfBML9ubv19LR5dn+22NYUgSMKG2yrPdZ9s7cO/T\nH+Pq849EY0u6RSIWT3le3+EKuIireb2tPepsF0+mq+jujvWtxmVY9Nkbzu/23ydTMqXj3U9247iD\n69DWnv73UKz/j1es24VdLVEctl/61sfWtuiA/DuJu85vo9XPvnFLC6p82Y2Dja576Ztawmiq9uf9\n99zSkm7ty/c5OjLOZVNzz68ZSlpb0/8f9/R5Bvp7sVQiroWEBvrzFPsc5rsQKFmzeV1dHZqb002d\njY2NqK1NV05nn302xowZA1VVMX36dNTX1+fazbDh3ELk6vMGkNHnbQ1YU+1FjmVougG/Yo3Ylg0E\nfIrTbG73ecPwNpvHk5qzTYUagk/2eZ5/7q3NeGfNTqfPOqEncd1bv8b1Cx/CZ9s7nGN2z58uBSOe\npuKuWNJpAZAMn3MsQtagG4bTbA5dgdB8kBQdCc3bdJ7Qc98+ttIaLLVw5RanA1JSNchVfR/5HE15\nJ0/59V/+hRsf/lfWds8s/QyRuIZnlm5yms1HVPg8zea6YeCL3el/nO4+b/e98O6R/d01Xc5bu8Dz\ne0TLfaG6YPnneHThBjz56saSNO3OXfAJFiz/HM0d6XM1UPO25+qXzddk7668e1qOtZAxEpnvM9xW\n4ervx+mIJPHBp03FOZgBMtz+hrmULLynTp2KxYsXAwDWrVuHuro6p8m8q6sLl156KZJJ88t85cqV\nOPDAA0t1KGUhff+vNe847D5vKV1NWuEtK1Z1bshIaQYCavo+74BPTt8CZjWb6xrM6t0K51hCd6rz\nkBqCIiuQhAzJev6fK7bi4X+uzxpwFg5twd3/SPehulcFU0Y1oiWRHhxlGMK5QJCFz6m8JUWDpgkk\nrNHmwlAgUubFR1vcezUaTaXf372wiv1FLqvmY0I3L07kEd5+UPMiQcsK5lw6k7mvhA1hYHc0/cVk\nT6aj6Qa6oklUhXwI+BVPiK1YuxtrXQPFtDyD19yz2el5phAzRPbjLbHcg9B2tZih/vmu0t7j3tKR\n/ruUMrzdrXG5Lm7yjST3hHcPo80LGayV+d7D7YvffQ76MjPh//z9Qzzw/JohNfVvrgmlhpuShfdX\nv/pVHHbYYbjwwgsxZ84c3HTTTXjuueewZMkSVFdXY/r06c5tZKNHj+62yXw4kK0Ba3a/tV15T6yt\nAmA1nctWVW6FNwwFKV0gqKbv8w74FSek7cldNN2AJBSn2Tye0JyAt5cTlaB41wuHQEJPoNJeKxyA\n0FTPVbq78lZrt+PvDQ8imdJR39BuTlpih7er8oaiIaUbSOr2iHgZ0Mzwrm/x3pIW1dKh61772/4y\ntS8OjC6zGVcOeQc8aprAw2sfw8+W3YRIKuoJccMQePHtLc5kOJ3J3IG3cMur+M27v3fudbfvCtB0\nga5oCtUVPvhUb3hv3tHh2UfKM2DNtba5PUJd0pHQcg9Ei2vZrQ/5Rv/b/w/phijpl1OLq0uglMud\nunMkZ3jnCdGwK7x7Or5CgjgrvHvY519eXo///r/lPe63XLg/X19Gntu3Cw6lqX/zXSwPJyXt877u\nuus8vx988MHOzxdffDEuvvjiUr59WXEqbzugrdHml5xxMPbfewdeiSsw7AFp9n+FDE0zYOj23Oe6\n2Wwup8MdsKoj4Qp1pG/NspcTlYXqHW2uaBAQ2K9mknO/t4hXpudgR+4Q+cvL6/Gv9Y046/9NgqRo\nELoC3YCn8tZ1A0nDWr5UV2AkzGOYt+kZHD/hSAStVdJirvDuSHRiZMAcnGR/v8jWoDsRr4DQFUhB\n7/GkdANrms1j//mymwEAPzjsIhwz7ij8a/1uzH97C0LHCEABOhPp4HdXa+/sMCfvWLV7DY6qPdwJ\n70RKRziWwoSxlYgndU94B/2utdxlAy0j/gUlPgJSMIIW3Q/AHHyXTBmAZCB45FuYv6kT3z3sW1nn\nM7P1AzAHreVi37FgGKLHirM/8lXeiZSOrmgSY2sKm1WwJ+4gyZWx+YI3Ek+fn54uYgoZaZ35Pj0F\n/turd1rbGVDk8p/nyhPehkAP89rkNZRaJIbSsfZV+f+fN0yMqQlaP3mbzasr/Pi3KftClc1wkkc2\nIpwKQ7Kq8ZRuIJGy/keUzD5vJ9ytyjulGea93q5wloLm1fLY0Bjzd6E4zeZAetWyoBLElV/5sfmg\nrHtmrAqnIhgd9C668q/1Zn/0xoYOc1CcrkI3BISRWXlbzea6DL3xSxBJM7Cjrv5cd3gv3rrUqdad\nudntixFdhYhXWp/JfZtQdoDtipjHZ37BC2cZ1Q5X5e2e6tReS317s9msbs+E12atJmZW3rInxGLW\nQLTbLjsBoTFtSFR/Dv8Bq+GbsAmrxItOU3hS0yH545D8CWzsyD0NbVw3gzKoBHDKPicCQM570qOp\nmPP31Q0BrciVhbs5tdm1drm7JeGOJ1bh539a4Zl5rj/0HirCfBcoUdd0sz1V3oWFd+ZtgIV98Wd2\nKQgh8PFnzWU3Ha773PYn1IbSLWdsNqeiGVUdwG2XneBUywq8k18okgJJ1RD48ofYEdllzaomIaUZ\nSKYEhCFDkg34fa6QFq5lPo2McA5EASFhTMhscpZyVN4AsGZjJ3738GdQjRAgG051J4RAOBVBlTWR\nS6akpluVt2p++en2RDEaNF044W1oMiBk6O3mYEU7oAHvILKPm9bipS3mGIms6V11FUas0hz17k+/\nRtMMyJL3f2FP8Lmmh+1IdDrv51621J4AJ2m9zl533V6UpLrSD58qw3AtwBK3ngv61Zwz7dmfMakZ\nkHxmOLfEWz2f3WZX3idNnIqJVXtnfwaYs9XNee9ObAq+CkBYK6sV98vJHUSeytsVjvZ88u5m6/5w\nh0GuUMn3GfU83RQ5t+1L5a27jyv//jOPb/naXbj3mdX466INPb7nQHJ/hP5c8w2lanYoXWj0FcN7\nAO01phIHpk6D3joOU+r+n+e5zBHGftkHVTFHmyeSulllywZURXaazYWr2VwYCiRfylkARA5GIeuh\n9LzoIqMyt5qk7XlzzIsD3ak8E3oSmqGhUq1AcrN5n6Q9hzoAJDTdDEddhaYJT5/3X15ej664GZS6\nZq+mZr7WDnXAW3kDwEeNa8xNnT5vb+UNAFIo3XSe0DSnYjyq1jzGlGEHp56ezAaAgMD7uz92nks/\nYU9ba430z2hTHFFhhjeQDji7sgr6FShq9rdh0khCNwxsbwxD8iec998VzZ4C2J7oJqQGnb9VKiPk\nP2hcjY5kJ8LyLsjVbVafd3Er77hrBTXPimnWZ3avsFasL3F3tZ85h735Prk/o2dq2j40my98dytW\nb2rJu02+VfIyZVbe9iDGTdtLv2hOb3gGBvbjNoVi/z9XSkPpQqOvGN4D7IpZU3H1cT/AtMO+1O12\nqqzCp5qVdyJlhndNtYq6kaH0wDO72Vw3zIFhAEJfXQqoCUj+BJRUFYQQ+OPzaxCNCUiygNPsbM8X\n7p5iVU734dn93RVqJTsCVIYAACAASURBVPTmCdA7R8OADsBuEk5Bks3Qjic1T5/3Z9s6sGqTGVS6\nZlXyVmVu94UDQDSjv7cl3obGaHO6/9OpvBWIlNns7p5mNZyMQkDgyNrD8c39Z5rnwqpaw7GU83q9\nrQ5CAG82LEdjW8QTRPY99kKyBt9l3F49wmo2B9Jf1vGEBglAwK84I+LdknoKTy/dhKde/wzwpZug\nd4R3ZW1rV95BNQjVuqVPM7zhvXLXh87PytjtVp934V9OWzq+wJ/XPo5wMpJ3tHE8zxzg9mduaDSv\n8tQJG/HytpcKfu/u9NRsnm+kuztce2w2z9hvNK7h6Tc24Z6nP05v002fd+b+3ecvc8rcZmtA11in\ni6w86D20cBS8nyE09Vyxu5XKEcN7gPl9Cg6eNKrb2eQAwCer8Cmy1WyuQ5FVCDkJQ04BkvWl4fR5\n604VDgDKSPP+eilZiVhCw/ufNmXdCy75zdCwQ1FYfeZ25b0zYgbNCL81QYDzeiu8EXFeH4lrnsob\nSFfYuqZ4Xp9wVd6fN5mVynXH/Bhn7GtOpdueaE9/Qcqu+9l17/EDQJc1rWuVr8IJPrvyjsRS6dHq\nsSroLXthV2wXfvXss3jh7S3pE23tLwnzizfzy7raVXl/vH0zVu/YjFhSRzCgQJYkyEqu8E5imTWo\nya68ge7DO6QE0pV3RrN5S7wN1b4qyEKFXNmRNWBtV2Q33tuZf33zf9TPx6rG1bjx5b/i9sdzbxfP\n009rn4+GRnNMgG/CJqxu/xDhWApPLKnvVxO6O0dyZUrmMqw276IwvWs2D8ey++u7azbPvIBwH1Pm\nc01Wd8OoEYFuj2mgGT3cklfwfoZQNbsn9HkP2Axr1DuqYt5fHEtqSGoGAlAQTnXhXflvgGwu4iKE\nq9lcl5wFFeRqs0lQSoUQtkfmWkHv+9IGpD4/DFLAXrnMHDls6GZzvH070spdq8ztIxMAtDmVM2Qd\nMFTEpU4oMEeoR42Uq/K2mrqtCwwtZVW2OZrN12zdBXUsMMJf5dzSFtPi6S8J2T52NT0JnSss7TnZ\nK32V8Cne4AvHNE+fubbjAKhjd0Ie0YpVG9OTB9n3w8eMMO54/37ElNEA9nKe19ROrK94BsrY/fBk\nwyLz/RJnOyPOJVflbUSrIFeEkdCTqAgoiCU0p88bAHZEssM77qq884V3OBnBmNAoSMlKdIR2Q0fK\nEzi3vncnAKAmMAL71UxCwJ7Uxz4uawBdomIbNm34ctYxAN5mczc7HKMJLf33ADD3pbVYs6kNmiHw\nvZkH5XxtT/L1ecuSBEOIvCuCefq8ezlgzT1ffa73BrxVW+b+3bPmuS/0hBDOQL5yC7nM0eZ9NZQC\nkc3mNGh8soqAT0VXxPyySfc3Cydw7C/7ZMqAcC2bKVeYVZIwZIStLys7PNW6bZBrmiFb4W3Ezfu8\nDatvWlENGMLA6uZPEMIIvPCKNWGIYc+/bo149pnNqCJe4am81boGyKN2Oc3Qeko2mxFzhLd7xLs7\nvJ2Kxj2TnD0Lnavytu9Dr/RVOLPI2f3F4VjK2b/QVAgtPdGNh7WNhhS2djag0f+x5+k2YxeSUgT+\n/dMLm8STOkKBdDcBACTWHwe9dbzzGYP281blXaFU5q68dbvPO2Teiw/vrWIpQ0Ncj6PaV4VqqRaS\nBBiBzpxNyvd/NBd3rLwPQgi8/uE2ayY2gdZ4m3ksqubchZApka/Z3AooTRee127aaf5/YfSjedId\nJO4+b7v1J6nluaDoplk7U+aXeFeOkfLdNptrmeGt53zOfftavhYDIQSefXOTZxbDgeAZbd6Ppu+B\nWqSmGPaE+7wZ3mXi9Emn4OBRBzr3OvtkFUG/4vzDE3L6S8eerGREyAy8aELzNM/KlWZ4G7qcbtbU\nXaOiJQEpEDOraWsCFfteclk2oBk6UkYKWjQEZ35Su9ncqnxl64vciFeiPZxIr3AGwDfhMwgrZFMp\nCaOqA1AlMzyT1rSpumFO8iKEOUNb0BXedsUl5HSfd2azPwBENKvyViucCxk7+CKxFKCmK3e4lk1N\nnwcjPSFOPkp2c3IslUDQnx5dDwBC8zvvsaO1AxV2ePviEJoPtYFx6Eh2eia+AbwD1pZ+YIb79tb0\ngCd7lrsqfyWqYN72JwKdeQcP7Yo2YmP7Jjz+Sj2WvN+AjmSXZzIcuTJ7MFVcS2Bly/KsVeeAdEBp\nugHZdZ99NGVdlAT7vmSonmcglWKHd54Q9FbehQ9YE0KgM9q7ZvPsyts1sM89IY/r4iffRcfnu7rw\nzxVbcftj+bs4SsGd17kGBvbEPb/AUFGs1oZyxvAuE//fAWfgJ0dfZt0iBqiy2WxuS0npL2A7qGsq\nrfCOa5B82TN1CUN2+vjcfeKSrEMKRK0mc8nZFgBk1XACUNcl1768fd72hCkiXoH2cDIdrjCb0u3K\nW+gKVEXGiKDZPG/fLhWOpszPofmh6cKpvONaHAnNACCQ8rUDwgxGu9nefTucHUqVvgrzVjtIzoC4\ncCzlDG4TKX+Oyl3Af9BKz2fPJQVr/vdPj4HeYYYnanYh4LcvqlyD6qxz8MTrG8zKXElBCkZhRKsw\n2mfeKpdZfcdc4b16o1khN7anJ5SxZ56r9lVBMsygFJLebRW0rvlT5+ftHebAwUlV5gBJKZQ9Teyz\nG1/EB13L4Nvn06zn7PBKaYZ5+6HFvmjpzz3NIk+zuT1oMpmnP7uvfd66Yc6alykz4LuvvHM3m7tb\nLqJGFz5sXJ31Pok8XROl1t8Ba3Z4r9ncUvKpeYuluwuw7vx10QZnEp5yx/AuM4p137JPVhH0eatl\nN2HIGGmFdyyhQW8dl7Uvs/K2vmxEOoilQBSSqkEkXTNluSpbu8/VHinuft4OTykQMydesSZnCfpV\nnDzCmkFMNiAk3ZqaVIJPlVFTYb5XJGmGVWs4BikQgxGvQDJleJrNkykdck0z9EA7KhP7mHO4Z1T+\nABC1Ku8qfyUkSYJPVhFJxLHgnS1WeNvN5n4AMoQhpcPfl4AywgxLvWW8s09ZT48U3m+vaucWPpEM\nQsTMufn9B6xG20irerIH1bmqe8jmjGxyVbvZzN01GmN85t9nc8fnnr+RHd6bGiLOHPbuPm+7X7/K\nX+lZtz39hZT9ZRxOpPvZd0fN/v0vjzBnN5QrO7NGnO+KmhPb5Ap2O7xSuuFtclfSa8c/99YmvPbB\ntqzX9iTfaPN05e0Nu2ff3ISXln+OsNTsdH/0ps9b13NX3lpGuOVbqx0AYnmazd2tBLvHvYSH1z6O\n19au97z2b1/8H/wHZy+GUyhDCM/FQ8Gv62cVav89GhrD+M2j72PTjg7c98xqRON9v3ArplxjI/py\nwZJM6Xjzox34y8vre964DDC8y4w9baoqKZ7KO4sho6bKbPKOJjSkPj8MX459A0YsPamK0CWn8naW\nEQUgWc3u9qxn9v4As9nZvlXJXXlnjVZXXCPMAVQEVewd2MfaRoMOzemHVhUZFX4zFCNWsOzobIYk\nCYh4BZKajpCSEd6VZr9gZXQ/81hzNJt3psyFEsYEzYrYp/iwszWM55dtMf/B+lyVN2BeaNjH75rn\nXa4I43jpAnNbpB8/cOJIp5lbaH7rIsDU5WswH5fTg+Ls1gF17834rKUBcrV5cWCER2Kczzw3G9q8\nM63t6mqF0GX88dkNCPnM/bv7vO1b9qp9Va7WAyNdfarZYbR5d5vzc7u1GEyNOhpGIgg51JX1ZaZI\n9oWZAXWfDVDqvjB/l1zN5prhad2RrM8djafw0vKt+PuSeuyM7Ma8T5/POV97Lkae+7ztqYTXbmnF\nR5+lBxf+c8VWvLBuGT6vfhnq3uY8+T32eXtmFzN6rLx1XXQ72txTeWu5K2+7p+mJ1z51LpQMYaAj\n1QZlRO5FZwpx3zOr8V93vdVta4emG57lMM337t993plTwP7+yVX46LNmvPnR9l7vq9iefXMTfnTn\nm9jZ4p06ubtBh24bt7Wjrcv8/zVfyLd0xEs6HXFfMbzLjF15GxCe8E6PJbcYCmoqrfCOpwChoEau\n9TRf64bkDFjzfPFat4l5mrqtn9uqV6fvv3Y1J2eFp6x7Xl8RUOFTFXMOckWH4QlvCRX/P3vfGW9H\nVa/9TN/19H5OzknvIR0SEjpEulIFiShYLyI2BEQR9PpD5aJX5d5XQbHAtYAIypULWABpIXRIg5De\nc0pO3XXKej+sMmv2npOQkJAE5vlAOHvKXrNm9jzr356/wfTMWax0xyDt5EUKSdiOhxjTYM+5eRSY\nJjgdgxEYq+w2H3D7YGkmKkxqERuqESB33eR9z/k51OD4GZztI2F5FehIjxDu/mRMxylzRvgxascA\nsf34rgGLndIG8VhnODZGNZaFPvlpkZvgZSphKnG0pVqwrm+DiGl7xENPoQskT5vT8LwDuXXqoBTz\nlhdQPO6rsAXKmMqRmBc/EwCwTYqZ9xeY7CuJg2QroJhF9GT7AxKnQo42MQijeQN0Rt5xU4fteMg5\nebylPwa10idSYXnnfCL58Su348mtS/D0tucwHAghIg8j2DDD30f+/Cf3Bd3PWgO18DU2Fsfx8Pra\nnmFL1uRzOR4JlXYtzXrfXZ33cAlroXFuhYhEtqCG/b4RAReWkRvHlOJbv34Bn//RUwGyGS488Xah\nlogfcC/Du9Uudnd4aMlGAMCK9cFFUVAlL/yaO3uz+O7/vIyb734RQDjJ9/Tn8dWfPov/vPe1sm0H\nGxF5H2Lgcp8e8QJu85OSl+CyKR8RWeeEqEjFDWiqItxXhq4G4rfE8RPWvLxvkYsabznWy+uwY9vx\n5zUPBT4D4Mufqi4AAqjB2vKERevS4eo0EU3xyds0NCTMIHl35ujLl1reXiDmXXRcEVsnpCRhTvXd\nxUNuH+pitejpz+P7v30Zjh20qDXTptYwczcTT2rqwv51drbD3dXC+qYbgOohldBx2xePRW1lDBk7\nA0MxAaIGLG9DYeSt+AI1gfkC/GQ3x4DjemhPt8EhDr551xMAgK5cD4jiwcumEDM1DGR4jbwjXLfC\n8jZTUtzft7x5XH989RjUaC1iO8cga4X6qwfXw8vSmv2/L1+BL972NJ5bSePv/YX+wHGinaylwXE9\nPLVlCfr1jZClCbjl3ZcpAIoL64h/iYXGa10rUIqubA8Gi0P4zSNv4KofP4Wt3ZlhNbdLX7YD2SJW\n71oLrWETVOba91gI47W1PfjRH1/Dd38d7o4W51IdvLTzFQxk/UUst4qD3+1hlf009DYa/y+zvAsS\nebthbnP/M0X1hFUnt9flHqF9xe6M561d9HmRPQHDLYzeLrRS5aJDEKW6GYFF2zBW86ad9J70MC3/\nsORH3tt+1cbesm0HGxF5H2KQyduSyLs53YA5jTNgKiwm66mIWzoqkqZI7DE0FfUVPkm7riLI29ky\nDsUNkwFIwiEy2Uj//0bvWwDoAsHfTv/fHLNMxHJlyzwRM6DrKrW8VQdEcaEpPB6uIcXc5isHX8fD\n6/+B3iJzKRcSKNgutnXmoCoqNnX3omh7Qq5UfAdhMWtOiEYBLhxk+k1c87MleHNzHwaGXMhtTxW9\n6LvMAboA4W5zVodOHJal7hIYGvMU6P6POGNnYaksN0Amb7Yw8VAU4QNSQt6KXqTKdVDgekR0U4Pq\nghCC7Sx5jeTSKBRdZLJ+jTx/6QvL20j6fd+lmDe3vFNGCiopDy2IVquOKch7xY4NAIA7HlwJQgj6\nCiWlSxqXf6WWt0tCrErNRUXSxMBQEUosCzVGX3KqomJd/4aApekRD//x4m345Yrf4cnXaDLQ+m0D\nw8a8S8l73dYB/PjV22GOXClkfVHiiXpdcq/L4C9xo/0N3Lf+TxhM+fFM/rIujXFvJstgtKwXf3O8\n2rUcAwU/L6DUba5YGcSP/Jv/5ap/H4ekKgO5MmRf8HZ6cpcuSDj25DbfuGMA/3X/soDrfTjyPpRy\nuEuH+HZi3p0lLU7DKjhMQyv77FBBRN6HGHi3MZd4Abd5OkGJQybvmKkFpBh1XUVrbYX4m3gaeofY\nKp9ocDtHiAYn/Bz+viGPQpjbHIA1eSn9n5KYNz1GBzQXRHVE85WYqSNp+eP86/q/+brmtonnV+7E\nt3/zIlxbw2CBveTYGF23xDvAPuelal2d0o+LaDSBTKUdxVy1ECBcriInn58vWlzPg6XSfXVDJu8M\nYiodu6gVB+ApLC9AsUXSXqAcDzSWbjHCdlxPiKcomgvXI9gyRInMy6XYi1AR96efuXeHbE7eKSGB\nC9XzrT5meadNSu6EoKScboiGDYgKkqXPhpyYtq2/F45EzqpnsnI6D6ahwnY9IfIiQzc8NFbHA+1n\nTxt5MmY3TAfgl8ABtAFNxsliTd86keCn6wrk05JhyAYA1m4rr4tWNV8NcHcQOvkshGEnt4ltnHxl\nK01uHSvvs7p3LX6+7C68YD8ItXon9Kb1cFwPHgsDFG0XetPGkkF6tIwS/n0EEBDuKRsv8fZIzm8n\nbC1n4e+N5X3lfzyOl1d34bkV5Tr85eMoP9ef1/zfbtX+9hV7Gnep5e0GLO/wY3mf8mSMl5mW73co\nl5lF5H2IgVvepJS848wFzcmbqIgZQfI2NEVYhAAATxUPKIXix39RalmHrDBD3OoyFATd5lTpTYei\nuVAU1gwFQNzUUBEP9oC2XZ6lreI1Fssjju5b1oxcbVt+80iWM3P9J9W0fz2uCkUliM/5BxQzD6J4\nAVc37bxGaDy9pDOb6xKYjFw1g373uv4NsD0HMS3BxufPnY0C8k4BLmxhvYfNkaHQc7oegcoFDVUX\nRdtFX44nDkpa2GyBMsAWXZt6ekA8BU7RD4koiivkTLmLO2kkqTEqhwYA5NwMVI/OPfdCFIlv+e0c\npLHCZHYU8svno4KwzHvdEfK8A5LL18vTcyUTCpK8xpuPQU+IOZRlcLn17xFPJPFpqor/emCZf97d\nSHgOZotlOR+q7ore67sDf4nzygoS8/MBuFXtegSKlYU17Um83hOMsfMXf3+BHpdVemGNewVG+5so\n2g4efHo9rvrxU1i5cRfUip7AsUrA8vYTqoazvPNOAf/+3K349crf7/aa9qTbrVZ24U3mPQPefsxb\nnvdk3F+YD7eYKP246Nr4+6YncNeqe3Y7vr2F63n42h1LcO9j4W11AZQ6YgJW9HCaCNt76LuxtoL+\n/sLc6283Uc0jBKs29r6jxi97i4i8DzHwhDW3JObNLW9D5a5XD5apo7bSJ0VdV8vIuxQyAQXIhoQ8\nCnsgd/m7EjFK3rL1yWO0MVNDMhaU7LSJLb6DJxEpngHD8jCiIeWXAknhQSK3PWX/5vNAR2MaJ8xs\nDYxXYdnqPMnMMjV/PjSnrDOb43rCbc47hf34lTsAUPUzOugY7E1UCtT2CljeQ12w3mBN4FwyTGbN\nP7NsOx54YpMYe9HxROa9fJ9UaIDioj9TxKadg+jNDQKOibVbB0RCG1RPlCxx8t64Nc+6z0neBcVD\nkRRgEDZ+fq3En1SejY5CEqZdA5PF8hXNgc403Tlx0fmk280YEd4WbnnHtLjwLshKev1539LnBNfd\nnwsmj9VtwD82/QuATzAXnzyOnstxxaLW3jyOnch5W+QtkvGYkp6iEtF5T7a89Za1UONZPLL1kcDx\nolQupMd63inikefpPX3hra1Q48GMZyhyzFsi72Es78c2P4nOXDde3PkqVvS8OSxp7l6m1IM14SX8\nz9q7sXGAVkS83WzzjTv9+/R2Er5K8wGyUmfEMG/NviKTc9DVl8fGnYPIOTlsGiwvS1R3Y3kPN/4u\nFs/mW12XAEYe8SMfwWObnsTTr2/H7//xVuixpXjxjU6ahf9WePjmQCAi70MMgZh3wG1OicVQfOvN\nKnWba6qwfADfsm6o9gl+WPIOURIjw7jNOUbUVfqHqwomtlcFysc42cRMXciJcnDxEz6GuKVjYmsD\nHGLjcxeNRnUFHWdBTiJ2Zbc3V3BTkU4YaKyOB65HJOUxy7syYYpriM96TKjQnbuQkoHjEphsMdLX\n9Dh6cr2iZGt2zZHivM6OUXD7a2ATGy/upPrvXBa11G0OACZbbK3fPuhnzGsOirYrVMrkudVVHVA9\nPL9qJ2761QtQDBq394h0P1TXLxdi5L1lexH5okv34d4JVmGQGeT3UQFxNbjwJ7UvT4nZKRiIW5oY\nLzQbpk7H1S/FefkCSTdcn7wNej5L8cm74PrWZU/Od3trjLxL1dOMjlV4YM1DcDwXhAAT26tw1GRa\nG19winCJC7evDs72MaKigTeMCUPfUAGPv7JVkJDcjc4cuRJqRXfA8pZDQNLFis5hgYQzBqphH1zA\naH3t0gLDldzmEnlb4eQtJ/r9v9fuxLLulaH7DVceV3RtaHV+WODxzc8AKPdqEELws9d/jf9dG1yo\n9PTnWRc8EiDm4cgvb7t4+LmNWL2ZlmxmbT+GvCvfF3rMvoCX5xUdF//96p34/gs/KRM7KvXWBGr3\nh1ns8NACfw4c1xNVDH9a81f86onnsKnTv++7s8K3sERBfr/fDUTkfYhBVcOzzbmVoTGZUUV1ETM0\n1ErkPba1UsiE0pPQ40c2paXPJHeYbJmXan5Lx7O9yzaPb6kVC4yBrI3KlIXjjmgX23lTkpipiZcc\nh6uxlxnLJm+pTeDoFkqSd6+6V4xHdpvLMWthgbsa0gkDDdWJwPWUlsNVJM3A9ei1NN5cnaIuccfz\nhCeBqDbuXf0AvcaqMWhPjQheuEv3W9e3EaYSA8nR+f33y+eXzRG3vAH4Cxtmeedt+sKvTib8/TUD\niurhjU19gOJC0VwQx4TrefA8iJg4J29uUaowaRmT7DYXjVmkBZur0wx5Bp4QZ+cNxC1dkLeiOeLe\n9hcly7tAnzdV84TbnBOXqcZD3ea9WXo8IUy6Vy+KEkYK/9nryXK3ugKTkXPe4wsxQ1wDNCcQ81ZT\nvbjnzT8LBb8f3PMq7n70TSxdxWK3hpSAVbMT1sQXBQm6HvGrGmRIuQWDdjl5F11byMNyD4ilxv3K\nDtUT3gWZvLmGQSl6MoMwSAIN8ToAwxPgcKpyj21+MqDBz/NKZPL9xV9XYe2urVjWvRKPbHxMfH7/\nk2tx9zPPIT7zceitawLqdjIxquke4bnY3p3BH59Yi+/9lraslWV4d2a7Qse4L8ixDP9C0cP6Aerp\nKG3y44sJ2VjbtwHLi0/AGE1DII7r4anXt+FP/1obOIaHRGQJYPkdEZu6BErCf/YzuxGl6WFW/HCS\nvgcCEXkfYjihbSEA4JSO42GZ5daAcFUzy7u1jr4oJnVUY1RzhbAeAQh3LHe5A74rm26XasI7R8Dp\nbIPT2eZvl9zQJJeCN1SJRNHvuGVqJs4/kVoZU0ZS17HIqAZQLPjkHbf0gIAMjAIjW7pPU20Ccxpn\nYFrdJKzr34AhbTs7h3TxcsyaK615GtIJk1qBsopcSUb9rPH1qEun/HMxxboYUzVzXSL01wFfxtXQ\nDJhG8GfCSSTjZGEp/uKptT6FUpiaLITj66sXbQ95FhNorvGTDGO6IVnOvshMNu/QlzBbwMiWNyGA\n6rG+6l65d0K27ImniVp2wE+kKuboPbKYWA50h1U7EAzZQ1DsONA5GvYW1pVMc0SiD0+aMxCDxa5X\nJm/umvcGqJiOVrsNg3JrTql0atsQJVtVVYVlXfAYKTAvCl3EObAMSU9/1HI8ufVZPLLhn3A9V5RM\n9bA2nUqImI3sNlfCyrcUV7yMB/dgeXMPSEz1PUCK6olqD368O1gFNZ4RLm0ZWTuHQk7DB0efAcDv\nA5ArOPjt31eL/Rw3PKntjV1BFy8PXcge7KGcjd89/1TZsX99diNyBiVEo3VtoFc5J38lNgRr0guw\nJtM6/lK1uqyUUf+/ax8WvyEZL+54Bc9L/elLUXSL5fr/kuXN8asVv8OPX75dhMe4Vfzwhn/ihy//\nP2zxVkKv2waoDhzPw6/+7w08tGRjwAshpH+55e2RMg+kKpP3btrfdrPnbLgGPwcCEXkfYphcOwE/\nOf67mNVwBCyj/PYYnGBUDzFTQ1XKwq1XHI0vf5hm+fK4LQBBvgH3ouwelC1vosHeMBVeVs5WD24v\nrJyPKUnfhWxqBj588nh89zPzMGMctRZiElnlmfEbs6jbvLBsIYobJ4rtinR+njRycvvx9OsUjyXE\nlMfdrSnPCsubeNTytgzNT3aDH1fk1xC3NMwd7y88eDcxUzOggCa1aFKHXMI8Dbyvugw5NGCqscC2\ns0afiqq+2eLvQHvOgHyqi6JXBCEKWup8z0jcNH0vCCccx0Qmb9MsbE+lMe+Cr3QH1wAhCLjNdU3y\nTngl91+aJy7/6hQMxE0NMS3OzmvT5iu6DZe4UPKViO+a5hOo6kiWN53LJ1/y+8bLMW+e8ObsGAli\nGzDa30Rfwbcqq6sly3Dlb2BNfRquPgRNVaEqCoqk3PImqitCSfLcPrrxMVz1xNegN9FSL9cjwoPh\nZYOLq0DCWgi5Q/WEBSqTN/GYfKtr+6ED9jwljARkHf3BnA3Xc7FhYBNMLyUWMLe8eBs2D/ou7oJt\nU8liR0c2xyxCRn6PLN0kyc8SbM9tx5cfvwlPrH8hMNwEy81wulqhQRM6/4E4t+KhR6WJX2rp619a\n5PXYdBHVP1TwNQXYgpiXBZaSmWx5bx7ahqU7yrPOf7Xy9/jNyj+Iv4u2G9B8v/Wl/8Y1T92EXfle\n/HrFH7Az0yme9VIZ1NV9a6HX00UQJ+A1fesD+6iJwYDbnH+XJ2nYc0+G63riPnLIev6lynUyIvKO\nAADQVPYjUspdedzyVlRPuNJrKmJCwjBgeTOrNpDMEaKqxlGZNANxW0teCDA0VfrkbmoGFEVBY7Xv\n9pXJm7+EYqbGXKBKILNazlavTtPjWpK+znhpkhx3hauJId8t7lLL2zTUACnpsWLgHKauBRc2jNhM\nzYSmqXC8oOXNyVtX9fLYqpQ3YKlWYNOpI09ERW6cv13zr5d7PbTa7XijawOKjg14KtobZcvbpN4F\nkDLL2/OISNrjpMFnvQAAIABJREFULwlFt0EcAx4jb3gaFIVlC3P3eWkSIRfaAc1GB2huQNzS/fun\nOYiZmp87UIhTS5yoILaBIjJSzLsI4mpYsqwbdz9MXZM524/r8nixl6mE09kORSEYcCh5X/Ghqaiu\nCU6vmhhCwaRuV8NQy8ibXoODdFJ+PoOWqNEuNVlhiwsu7AJQFz63vF3J8k5qKcQd2kRGUT3YbJ4H\n7SHEtBgaN58HZ/toAIBDbL8Gmn1Hykj4vyvFQ6Ho4q3e9cg5eaSdVnj9dWIMXUO+KtiWHuZKdw0M\nZei4RJMdRhp6y1rE5vwdD+/6LYrI4c8rngxc85Cdpde1fip01RALKNntrbe+BcegnhAufyyseOn3\ns9T5E17dvAFf+q9nxCLHigWJqbQ3Oq9l93rpb3ht34bAdtlb4HgObNfG13/+HP7th/8Sn29l5ZM3\nPPtdvLDzZfxr6xLkmOVdCHNJMw8cH2NNrDqwWUkMBkrF1vZuxuceuwbLu94Qn8ltb6EHr4mrJAI0\ncY4QgnvefACvdvnhCdvx0McSE4frQX8gEJH3IYzm2gROmNWKL15whPgskE0eAqOEcDVVCcgbkuEs\nbwC1lbEAoafiQWICgNYaP0lNjudyWJLb3M821/06TEk0hYu4AEBVih4XsFRLM+Cl97OaYGVWsuWt\ny+QddJsbuio0vGWYqgFNU6h2t7SY4Mk3pmqUkbec9CeTsxibNN+xEMtbjWXxt/7f04Q1TxPudtNQ\nYeq+Z0V0RXNMZPIOtSCY5e1fqA04BmzH893mAGJxlLnNP3bqBEHufFvOy9JnytMRs3RhvampPjqn\njLy9giU8EKQYR8YblFzGRX9O2Hf15XwrLONkaEzZMcR+3EqLWRo8zd83zix/T6X3z9BUOGD3UnwH\n/d5kXAqTlHTVcwerpG1sIWeb8DIVYpz8he95HqAX4RXi+FjHlTAddqzqsg531PJOm0nYDoSHpugV\nxQKAex8qrKT4XXHPx7Iu6vKOF5vhDVXD3joGAPDcm742+NZeupghjo6BQWZpMsubX6Wa3hUoAyxk\ng7+/jJ1hc6RAgyFCF778bT/05vVAMQEvkxZiQaVqfRxLNvtlc+NHVOH8U/zcDzW1C/IP8ub/eQld\ng9TFbO8YgYQex/qSJjy25xPj7ct+gy8/eQN2ubQpztbuTKjVammmkKQNI0au9Oc4dCxyxjtA3d6y\nbsCDq/8BAPjjW38Wn/FjHdcLvEMAQElI5J230Vvow5Nbl+Dny+4Sn+8azIuZiCzvCACoxfzRRRNw\nxBh/tT4hdQTcwSoU3pwdekwpucdMLaiQJJM3CZKZkDdlkBOpONpqq6T9yxcSlaZvRfKXWMyULT//\n/LpE3tzy1lTNJ9mSxUVx3RHwWMIUb0nKY96moQlXOOCX9nDi0jU1IBwiX4OuKtjUOYTfPbpOfM7L\no/RQ8vYXKLy0SoYWIG95MVOSw6C6UIkuLNiKhCnlNDjCCiCOQd3mhJM3LwVzoai0tj5XcFDgbnMA\nlim7zTUoACxDCyTNAUCB5JDQaC5CwtIRN+j86rU70KmsgWKypKd8TMwDKcThEgdEp5nJil70xXDY\ngi3LuscRQtDv7PLbz7Lvz7t0e6ezCTuTVNr0kxMvw1ktF9Ahs/71pqHCUYKVA2JRKB5PAhhFjEx3\n4OjmuXRqpC588iKosHIezGKNyDsAaLKiYhQB2/QXSACgUMvbIx6G7AzSZgq27Sc2Op4jkTf9jsp4\nWlLCo9v6cixbv0jnmeTpwAekBc6OPhZGcA08vIS6yGWyo99B5X7zrx1L50hx8M+XttA+5ZkiuocG\nxBzpii5i5tzw1iq7oShAYeN4EMeEogDZYkHEkkvj/tttP8FLU5WA0Iw1+XmYE18AJ/A1W/rx6rrt\nbJ4NjKrsQHd+FwaKfqWCHMte2fMmrftnv+MbfrEUP/uLb81yFN2i0DRwPSJCFv7AWNWJ68sJK0RF\n7oVF9JqsbMBtvnEbnfNdhV6YE16A3voWc6F71I3O3iFpvQLENpnbnC0M8g5eWeu3C/WIhy2dQ3h+\nVac/3ihhLcJwiGtxFFfNg9dfH7q9lLwtUyuxvOWENXr7501uxPc/O5+2/pMs7/GtJf5MMMuCn1sr\nt7wbEv5Cg1tIPGv5e5+Zh7PmjQ0dK7e8AYiM5dIMYJJP4SOTzg1+oWR5u92tZePxX8R+9q0MQzWk\nemH/+3gs2ND0snri0fX+NRoh5C3PtxJI6C8JA6gudMUQZXQVSRNtKRqX16q6AuSbZZa3r89OAuSe\nLTjCbQ4Ahkl8kvc0aBpLAJOS5gACm+TAeSNmakjr/uIrhz5R1uQWLDEPXoH1ZlcGac9yzRPhEL5Y\n4q1fu3O7YKMgLF5ueXsKJYo3B/3yqE1bbdz10Dq2ncdXM7Br3gKgCNLjC4BYjL2U9SIUBUjoSVw0\n4TwoTgyKbqOphu4vXP/FGEBUmAodP7f+O6uepIsgT4Preb4YDot5d2W74REP9fE62K4nFp02sSWl\nO2Z5mwkpt4Fu4yEEl1Vf8DnK2QUUbRe/fGgVVm1hjXpcXWyX8wb4dRLHFGI7ikoT2VZv7sMdf10B\nWymI+VWhS25ztsBgdegkmxZz2DkwhKLtQW9eC60mqKrW7/o1y6qqBKRhAdAOaZK1nnPZ78s10Jyg\nZX49OT80kA35/cmu+tfWlau6FdyicJtD8VhIyQfPc+GLqEwxA89mioKuCkVzgqV10m9Qq+wRTXgc\nhzDLm97Hj7Z/Bl6mIuClsl0Pv3/CL9+7c/n/4DtLbsMDT/niMZHlHWFYqHu4Y2aJNWwZWlD3N1Aq\nRh/kptoE6qvi0FQ1QO6TO2rF/08fU4svXzjdj8ejNL5OURvzCZ94Kl08MJd5Q3UCR030CVa4iAGk\nErIrmrfwLL/YMXV+TJx386pImNA1Bc7WsdQqIeUxfuIRVFoVpaeDqRmi5EgJqXU3VCMgvfjZD07B\n5R+YIf7Ww8hb2t+TpEcntNUFd9RtqIqOuGR5z2+eCxAFesNmP6Pe1ZHJ2zR2yaw6a+ozfptXx0Au\nHyTvbGIjI2h6nzVNoeTLXtqJlEvdpooHz6afxS0dST2Jwlv0+lzFFpa3V4gL0RauVpbxBkUSk8hl\nYN+/dscu/Owvy/HEm5ScSYaFW3jZGrdwTN+789yrg4J8XOY2R7IXUF2MVuaCFBOB73hsgJYUcosx\nriawoycLt6hDt1xRiREgb0BkxOftIjziIR9jFmM+IfIKAFoOt2pjL/64lNbzt6aaqQyqwlu32uVu\n81hSkD/XyOdVBbativtJPy/gsZe34ull27F5Fy2Rq02m/aQ/j7vNFYgcCFsqeWT3dzBrY8122mKX\ne0Dyeep2J4SAe43VWAbEU0AKcXGN37l7KdZvH4AxgvUzcHTkXliEpNMIBzZ4GZ+mKgErmkP+zdhM\nuY84hig5zEreroxdImID3+1N57A8abDgFkTCGkqSyVJGUhCrIyzvrK+q6OqA7gT7juvhSWe268F1\nCfNuqMgXJE8Zu8ai7QbG8GrXcmjpXvEbEfu8S4jI+z2GcLe5VPIVEvMOWJYSudek/Bfr/KlNmDra\nJ3MAAUEYDpnc4WkBlzk9p2+5VyV88RiZ8MQChBGVSGarS6IuLnkDPA26ptDEKkUBoIAUEtAhuarZ\nS8ojBIs6TkBV/wwa72OQyZk37pARqJsHVZKrkhYBCb085i27zT2pfj5VojKnKABcFQ3VCUwbXYu5\nExtQHauC5VZBiQ8FMuppqZhfh6omhkQmLHENDOVtFGzfbd6XXI7p09l99VToqsK6zrH5GPMMVNZb\nmj8TBduFoijwhmhoxEYOipmHQhTAlmLezPLut/tgxIPkzc/Vn83h+VWd+PsKSt6lljePLTpMMCb/\n+kLs7CmIuHavthHfff5HUHU6B5br51qIlynJQzFzIt5tKQlk8g6Ia8BVCjBYtUYpefNcjbybD1iD\nzrYxrByPC9FQ8n19K81gbk01U8ubu80JJe+EpQuXdtKK+d4PI0jeXPdAdPDzCsiKen3672lzx4rf\nnS2XWqksROKYoGI7qng+sgUnQJwAkMl6ICBwPMePeceyIIUEAFXyDri49wnfclR0ByAqFM8MzLWm\nKqHlcmD3UYkPQq3sogtqT4NC6Dhkb1fGLre8lVjWJz+jnFjzbkGUivE5cnc14gOpy2CqlvjMcT04\nnoO8mxeqisTVoaiOmGN6fSXfwd3ujkcXAJoNuAZts+zySgJ/n7LjpTkChkmqO0CIyPsww5566IZa\n3oGYd3mdNycbz/MCCWvyQqBUfrB0eygUglhJrbpsrVt6+PG8QQh/iR49tQkfP20irr5oBkzNpCtu\nNv50wixrSiCTN38RVqUs6KqO6sKkQHtUUzVEOdCYmjZ888jrMK5q9LDXaGhqoJZ9XHMdPnDkCNx0\n2Vz/slUFhVVzoearsKDZF27hgh4yKhMJ6JqKL104HfOnUq+CiQR9YRh+0h0tFSOB8h7enAWOgf6h\nYHY9APSxzm10kaMGLG8A0OtYwhT7bOG0ZurZYQRAyTEPnSQAKNA1BQumNWFEFQ3Z7Mr3wUqweHKJ\n5a0YBVimImWrJ6Brip/YptlQFF+qlQghGVVoxW8Z2ibmwHN8qV23zw8ZKUZRWN4WErSch32HbrJY\nrsVkMJnHIGXRf9/Y0oVfPkL7NDudbSDFOAtNcPJmI0pQi7Ml2URj3sxtTsnbRdzSka70UBmjrV1L\nyZ+7r4u8RxD7DRb1XRhwugHFhcZEgyqsBGrScRAiuc0VOW4vJe0x0ti4Y1C4r0lJ7kHRsyl560W6\nwGChB+Fh01ykkiE04AYXWSqzvHUvjuKGSbC3jaLbmSWqN9MFDsnR3vSKS8chk3e2pH4bAPSGLYjN\noNnmhuWTYFKpggIFBafot2FlY/EKcShOnC7CJLc5j6kHLG/NoUTMUGrdK6oHKJS4HZewcj0ahvIX\nOPQ7MnlbkLfT3QJnRzubA7o9bmmR5R1heBT3QN56iaVoGcGENeLJ2xXpv1wmskSqkyGsLWCY5Q0A\nY6voD5vYFsa0BF3VMtEamoFvf+JI3HrF0SXnpeTI5V1jpoZjp7eIuHiVVcmuRRMNW2QYEnlfcfZ0\nfP7caRjTSo/RNZW9YPh1aSJO1VSTQGOqBhWmbJkH57M0/p00E/jwiePQ3ugfo6kKvMFaJDedgOqY\n/7nc7IGjpabc2rcU+oJVOem4GnIFF7miCy3mZ1VziyWQxyBZ+l051vCFuc0NPRgWEYlVnobPnD0F\nNRUxen+IBuJqKHg5KLotXsS6ruITZ0zGNefT+9WT3wU9zsnbEucCAK1yF5qmv+W77l0dLbVJsVDQ\n67Yj1rLJF3MJkZYFAM9gCnBF+twkYjq83iZUDbIKDL0o9NKTajWyeceP+zJLTjGZNeZpuOXf5iPN\nyHv11h68vtFPsgJo8ppX4vZWrBwsNYZ7/rYJBP4C1CU0YU2zCsg4Q+iobGGeJhWEKFA1Rt6eDVM1\nYLOsZrHAqejBC7gPWsNmaJXsGow4aiuo0EvOLhey4fFuIkkFr9vZi9gUKpzCFy5y3NzziEgMEwtX\nISTjoChJ2Y6qYhnlJeENVSXoLw4grqThdnb4izUtaBUX3jgyMA5ZMjUjZYIHcmMAQPEEeRc3TMIM\n71xYmomiWxAxb1GD7eooFF0YqinKHh2X+Cp2IrFRh6J5yBSkeWTkrQ42wB1gZWWqC9vxqKdDs0Fc\ng3lwuOVNv38wa4v5cLvaUB1jybvsGY9behTzjjA89mR5lwovWKaGie30ITthVmvoS5KngHgEQQlR\nibiUEPIu7fTEccX0T6Bi2wkgmUqcefTIYcdqqDra6lOoqYiVfS6j1Hr33dYkKNTBj5dUz2pSScwc\n71tqhqYG6n0BX7iBZ30nDD9cUGZ5l2Sex8JKxbgbnhDELR1nzO/AvMmNWDituWxfSyuPmXPyVrhl\nzd2sRRfElPpCM3KXQx2Vdf6LWGQrexp0VaVubzlhx2KuVtfXntfZfSaOgSFniMqzshc5d5vH9TgS\nehy7cr2iJI+/zD+4YIw4f6eyRri947qJz35oqug0BwBoXYmCW4ACJZDMJ5frOBq1evM5Rt5snKpD\nCXj2tCTMup3wCnGkvUYqYcleuq/iL4DqUPJm46urjPueE83P6OcvfM8jovWqxvu6aw5yWQVLWJtM\ngy1aHTh0MZ2gNdod6XamSgfAU6FoLJud2DA1U2RNBxfQ/iKNz21N2gI8Ddtz23HHsrsgMvqlccLT\nxMKoM+tnO8ulcIBP3rwlqli4Sm5z3oa3Wm3CN46/KuCh4ffC1bLwiIdxDS04YVYrxrXUse22P5eA\nOG57Fx1vgLyZZXzVjE/jqhmfDswBvRd8gRLDUMaFpZk0YU3EvNn8cfJWTJFQ5rgeduWpp8kX86H/\nDhX8MSi6DS+TRmbVLH8Bwo7f5qyHogDeUGXAbS5yC3K2mA/iGJg7voWek2kimHpkeUfYDfbU67fU\nhZyMGWitT+G2Lx6DxaeMLy9XAkRrPyrmIFnGEonK33v6yJNRH68NxH5lWJqJq886ATd8bI7I+JXB\nm6+UeglKt/NyH8sILjgqmeWt6DbSyZBac0XOXA9+h6YpActbBpf7TOp+LH5PlndY9yQeo+eqcecd\nNwafPnsKmmuTqDQqA/uGldvFWekWfzEeN82vr1WkF7/O483SgqxdnVZ2PuL6lrdcIy7I3/W158e3\nV2H+lEY0pqtEaRBPaNOlhUtNrBo9+V7U1zOyZy/CU+YEdeCJ4oB4KmaOa0RTTQLfuuyowPa8W4Cl\nmaiuKF8EAUBRoyV7OUbeosENK9dzk53wFAfurkbs7M0hm7eF29RGHlrtdroAkcSBYixPQdHcMnf0\n8nW70DfAXMUa70jmBBZIwuOkuFTVLk5Jo6OizV9oen5M2iU2DCk8M5yXAQDqE3WoqYgJ1/1rXcuR\nJf2wJlBJUd/y1oU7l3sv7O0j4Q0wi5aR8zPbn4dLiBAb4QtX/syYY19FhtDx1+ktSFspGLoKj7e5\nZZamrdHjm5J1+OiiCWirpgaBYmVhjnsZWsUudk56n555lWaqb+rpFdfG3ea5IR1JI/heUHQbnsEs\nZ9tAf6YIS7NQCIl5wzGQLzq+qJLqoug6uH/NX+k1MhU7fo0i1q7QOm6xuJcaBdmOh60OFW5xu1tD\nLe+hrB2o8EjH6Hvig8eOwHc/PQ+WoUUx7wjDY97kJswaX4+vLZ4Vup1nVNdZ9Zg2uhZnL6Qu7GSM\nJWaFkTezvUvbBcqiJvKmM0Yvwk3zrw0mp5WgtjKGUc3h5M7JebiYOd8u9MdLkt7SJn0BKbqDdLyc\nvGXLu1RIxtBUv+SoBJwYZMtbLyFXTmAfnXQhxlaNwsiKkqYlAM6Y34HT5rXjU2dNLtv2hWlXBWr0\nwzL2E5rk1lc0jGzyCb+m+xhfI54n+kj3tEFvxw+O/ffgCT1VinlL99RgbnfPz3jXVBWfOmsK6lL+\nvXOKLAFLWrjUxqphezZ67R7qvuS92y3//GkjhYq0CsXTce6xNI9A04KLy135PliaiY+dOhFnzO8o\nmwvCwgCdPVRHnN8jTmI7MszqtC1s685Qy1tWA+WhBdvCSbOobn/CYG1Nx7wu+otzwn9pdZcIJ2ga\nK8nTnMACSSgPcnI26QJjRLoVlsmS+jwNHmhfe9uz0dMnJToNoxxYvXURLM2kSoeyVCk2+vMhW95C\n598RcyD2Y8f/c9OT6NPXQIlTD0ap5a1oHvQxNJuee5Fk8halWCo9vi5OibE6Sc9jtKyHVs3ugfQc\ncm8N11bo7suhM0sJ/Sf3vFn221fTPSC1G0CKFrxsBdZvH0BPn4OcUxAiLVzbgdgx5G0XGgwxxgIZ\nQme2G9PrpooWvdzyzhTZ74Qt1OIaj/v7mgeO6yGDXpCiBZJL0wx1fs/ZImkoZ0uuewOVTGggFgcq\nUxZMQ0XRdvdoYO0vhJs+EQ5ZWKaGK88tt644UkYS35p/HSrMVHhMOqS1J3/Zlbb+k6340pZ77wS+\n5R1O/pogb/riLiXvuJThHeY214gpvqd0gUHJV8XE+Cx0NFQFtnELf3duc+5Wntc8B/Oa54SOP27p\nuOD4saHbUrFYwPIPu0dJPQk4/vbqtH+9llcJe/0UWJOfB1GZFSBZhZahIaZbSOoJP8bo6dBUBTFL\ng9vTgqJuw+zw5SHh6mVd32TLyCkyYpeItyZO44V9hX5U6JXg7RsURUFh9UxY41+B7dlIWAZqrKQI\njWglnouck0M6UYdpo2sxbXQtHlqyEYWVR6FuyhoMEhazJ4Bjq+hoTQjPByfvXqaRbqoxbO/JImbq\nouc44MtbLpzSjounUNnadExanNWzpL1Aq1z6Ha45CKj1rCpAmmON11mzeD57oSeNhL/wJCo8uEhY\nOlzVLbG2gwsYTt5xRp7phBH4neaJn+XtDXBi8suYeLlVIJ9B+v+COgjVGoKXjwsPguyB4z9z/rsy\nNBW9/QRWo+/9Kap0DPUsVl2TKM/VCHw/I/8iyeP1tT348f8+g9gR6+EO1ACuIQiZQ6vugqIAxS3j\nAU9HvujCLChQzaLoC6ym+kEIdWsXii7i4HF5FzZT4RvoKxeEGsxnAZjQ0rS6okKvQhcQ0DywHQ8O\n8ZUCs3lb/K54eCJrboNVxWrfPRWVcbqIzjssVBUbgDbiDeSKp5XNzYFAZHm/B1EXrxk2mexblx9V\n9hmnZbIbgi61yt8JRFx+mFOqSnncXkZCcmvL7U55rJlnm4dZtfzlf3TNSTh7zKmh35PYjdv8ncLU\ntYALN8z7kNb9a7JUU7jhAZoMN29C0NqX3eomW4BUxZi1ThSAKNBUBcmYgfqqONydI6ES2UrSAhYz\nAD+jHxAvYpng5Xr+ilgSs8bXi0XloglzUUmakXcLyDm5gJiPripCHpQjVhL394aqMdM8xf/A1QEo\naK5Jipp8txict7pUBTp7cxjMFuF2t+KktuMB+PKWDekKUXWRNMt/G7LkbYJtH4qvg9FOFzky2QkJ\nYJ6Mp9iIaVbwuXV05L0cYpVDVMSmpLJDBifvGHvuUnED8o8jD2r1FlbPFORbEaf7ajXbpQ575RoO\nAPWsKUYRhCWr3fjxuThnQfnikqvrmYYq7jl3mxdUujyrZ5Z3Q0U5eZeqNxJPRc7JY9naHiEA43bS\nZ3XZup7gHHDPgOwV83QoCi+1I1CT/SC5FFRCyZ9b3takpbAVapWv3iDVkrt8AcF6rTdSQZZRFvOI\nSZZ10XHhoCg8BnLuBPds8GeBjRhxg3fQo+SdSa6F0bwBm3qD7UoPFCLyfp9hREMKN867BjcvuEF8\nxt08Lvt3XP+5uOGoqwPH7U/y5pa1S8KTO9QSy7s05t2UpOpN9bHaQO05fzlzyzuMGPnLP6xjG/+e\nZIjlffHJ4zB+RFVACW5fQL9fETHN/kJ5b2ceFgAASw+St6oquPC4oDu+QrIkeQ9s/pKloQefMK5f\nPBvzpjQibUrk7Oplcyxn3E8f2YxPnDEJx83wBXZ4xj9A5+vKc6dhFksMvPCEsRjZQL8/5+QDSXma\npsLZOg7FdVP9awxJ2otp/vg4cTbX+Za36ypI6v51N1VVwiME67cPQFVUnNi+AIBfTiff0zE1I/zs\neAauugbQOefQG1g3L4mYDFWjiyJmeXuqLeLoHPa2MSDwUGxaRj/gmvNmubdJMQsgRKEd5cDIW/N/\nG0VlqGwMHfXU82GOXAU11ReYJ/n7AKDIiJfY1LXb0ZTG+NagZgPgaxYQ4ru9eYJWERkYqiEWdfXp\n8pBYaSIeHAMFL49ETIfCeoDzkM+ytUHyVpmSn+w14Za8Yuap7oHmwstUwjI1arm7vuVcTG1mx0jN\nhQIxawIt3Y8RqTbUWLWB8Sqai6ydp78VtmjpzxQDx1tSCaCzg4Z3+D1/bPNTeHzz037mPQn3KO5v\nROT9PkRDog6VVvnKmbvGLSWFpmRDYFtIXtY+QxXkHX5SlcfaWcy71FoZXdmBzx7xcXxp9hXB47ik\nNCPv0pp3gFs1VIq0FMJtHmJ5nzJnBK67ZFawZn4foCgKrr5oBmbWURWz0sQdAEiYMVHrbGkmkjFd\nkLKmBUkLAJKmFONn19DMFjgcfOlVmbLw6bOmoDImddjytLJEx3qplOeoiW1YMK1ZzB2AwPOTCLsG\naQ5ly5vfS/klHUbeimv6nhNO3jVJcbzreUhLY2itoZ6ATN5BIqajwkoHqiHkMVZYaehvLkL+9YXi\nM/k+xIzdh5scj8BQLKiJQSjxAXhKOXl7fY1I6Wm4Zl/g+NLKCQFXF2JKybgRVC5TWaxXImdu9QF+\nXH+4RLiiTscwsq4ON1xKQz1hiZIJk96zwWxRsjqZ2xw5VJgp8ZzIz5x8DYCfsEk8DXllANvct0SN\nNo9Db9hRrtYG0JJD/qyLOTviaRH+IPkkYqaGfNHBUM6fI0/3NQ9Kx2N0rGJzRFATr0KMe5mE5e0K\nsR5O/rmCFPPWHDom1QOxTdibJtFxSc/tfW89KMJYils+twcCEXm/j/GlC6ejrT6JY46gJQ+cvGWC\nGsvqoxtq4uUn2EfwOHRYpjbgW+Ya++2kQmq5p9VNLluAcLe5RuiPKszyPml2G7568UyMaPDJ6+On\nTUR7QwpjWqk1EbS8939ayOSRNbjsiPNxycTzsajjxLLtluRaNzUqQiMatygKNFULvDiC5E3nroy8\nSxwnSSNoeZeiPu6Tt0zEHBVSA5rQBUiAvP2xcs+HHDoI08h3XCI8LPwl3taQFDK6iZiBasn676j3\n3fiJmA5VUZHQ/WssXfAYugqST6Hw5mwU101FXaU/3ngIecvEWSi6mKgfDUVzoTdtgIci4iELkLRR\nCaIEO7uVhif880slmpoaIG9P9ZOkOJRA1QDvXS/FsaUua45OiW/e+A7RwS6szDNlMNlbqVaeyt8S\n2MghLXljShd79BqY5rzJa8jpta8k//TVANl5t3aXS6USxwCIhoZqdi8kmeOWDno9Zx45HjGTajP0\nZf2ySRKKQnq/AAAbb0lEQVQbCJxfHo+i29CbN7BrTIjx8Xtijl6OdblV9CBXFzkAckKbodN7Egif\nlNxzy6I/suaaYEXJgUJE3u9jTBtdi29/4ihhhXLXuKym9qULp+Nri2dhTMv+eyC55T0cefPt6YSO\nb19+JCrfpqv6iDHUHdZWzVyKIdZFzNQxqaM68PI5dnoLbrr8SBh6iOUdco79AV3VcXTLkaFjNAzV\n713NSJeTN19Y8aoCUzUQtyTVOmF5S33R6ZkCf6UMyfIOJW/frZowysm7UnqRlxJj6THyNXLrkkus\nAggo1vE6esfxfCEPRkS1FTGcMa8DC6Y14XPnTEW15Sccjm32a/m5cE9ausbSaxC1+P31cLvbxLMD\nhJO3vMAp2C7aY+MB0HI7opAyyxsAKqQmL9yKa2sIL1NEqbWmlv825PvUW/Sbhvglf/52N6RxkRyO\naU+34YNjTgsQZMqS480avEwaWsUuaPVbQBQvcDwAjKroQEqpFp2+xjbV4rR57SK0YW8eL/bV0n1C\nOnU48FBGQzUTKUr6IaW8QRu3jKqvQ8LSkc07yG0ZAQyy6xS96/05mDLC9x7yTPWkkfS9H9K+y7LP\n0jE4BkawBQ4kt7mha9QL4egY1ZzGly+cXrbodPUcTM1EOvHOQmtvFxF5RxDglrfspo5bOsa1VQ13\nyD5B3UPMW1jm8IZ/2YXg46dNxFXnHYGFU6hs4R7lW4eBoRnCZbuv53gnMHVNvMy5vCTPOOfZ2jUx\npg6lKIhJ8WruNi9VsCq1vNvSkmBMyAtVJtQwy1te1ISRe3wYy1tkrEtWolySyMvRbMcT18jjoYqi\nIBEz8IkzJqO5NonqmL+gbK3zn9EjxtJrr5LIPVGywCgNxUwf689XmEuYuJqwVfNFF0nTAnE1EVMP\nI++URN7cyjv32NE4//gxZfsSV99ziZFENos6ThL/z/UQZOudZCqRe/HkQGy/QiJfRVGwqOMEJGzf\nQ5OWyRsKiutoAqLesLnseAC4es7nME+7UCwARjdX4YLjx4pyPrenFflXj5PGT3uNDwfujeGWt73N\nn6ch0Bh52kxhVEsFXI+gp9dD88CxwXOwRe8lp4zHF844DpW5CQAANUkt85SRRJznHYQtJFzd98rx\nksGqbrg1a2jioWvgklMmYOro2jLvQ09uV6gH5kAhIu8IAsfPpAlJs8aHtxvdXxhTORJAmHVIMb1u\nCh1P24K9Om/M1DFjXB3SZhKWZgaSrvYWPEZ6INzme4IpWd5claqmgvc7py8MTmxFtwhLiqNazHug\nqzqumvFptPfRspVSWhhd6ddUf2NxeQWCjHgIecsItbyHiXnLXp20Qq1dXv8L+Ja37bpoSNDnkBBg\nzsRgDgYAVPMFDILiOVzJri3V4o+x1PKWyPu4GS1oqfWvIWUl8K3510IfkhY4ro6PnEItyUVzR9De\n6LYpuqrFJaW9tnrqrm9IBpvoANSDcvq8Dnz9yC/jxBHHBM4vo3YoqONAXE2QCQBMrh+Lr5TkfJSF\nPzxdlNQBCP09aNLzLSc+AsBXPngs4GqC+NIhxxMoYlw8h+Wy0yYKWWRSjAnLXHZpA0DlllNwztgz\n/HOxPIiKBPME9jYF8hIA6k2Z2O7f95baCmiuf2/5d+SLDlRFRYfH+ruzkreUZHmHClY5BkYIqWMF\nbj99RovJbfQjVy+rfvHnItwDc6AQ1XlHEDh9XgcWTmt+227qfcWF4z+ICdVjMatxeuj2SbXj8b2F\n3wyWK+0FNFXDNXM+H3AN7y0Sehx9hf6DYnkbuubXm7JabRHzZqRTKxGX/DIxpSz6CTVjYbkZAD1l\n7D0i5WeOj24O96x8ceZnsHFwS5m7tBRhZYmyNR6WkAYA7eZErCg8g+aUb/0J8nY8zG2cgXV9GxDL\njMTZx00qO77GCo77psvmYiBbFHM1qmoEsCV8DHweZ42vx8dOnRgcu6WjLl6LOSNH47lupn3u6Zg/\npQknzaZCL6+s7mJSpdQzInsqvrZ4Nrr6cuhVN4nvT5oWvvrJo0TYoCXVhIUtR+GxzU/R87s6IHHC\nCGUatrxYg/icf9APSohGVYKJi6RE2lh87vj3Jox8PdZ61elpQoKFX266bC5Wb+7D5JE1UFdUwovv\nYseXPwfHTm/GP5dyOWBK3o01CXz90jlYuWEX/vPe10DsGHXtl1xDtVGHk9tnYkzlKNz94t+xsZMu\ndqaMqsH0TbUY0ZjCX5esByGK8C6kzBTGj/AXXifPbsPmt6rQyYVYuDgMlzw2LZCiJRZZKTOJmDK8\n5U1cHUdPbcIf/klbpBbfnIPYnL/BNQbE9pgxvOt/uGf9QCCyvCMIKIpywIkboC7Go5pn79aqTUuZ\nrfuCpmQjUua+kT9A3c5pI1VWc/5uwNJV1roRqGSJYaUxb9EUAcGyt9KSLz6DpIS9Dc3AhOqxGF9V\n7sLlGFc9Bie3Hzfsdo6w8IdcBx6WkAYAU5Nz8NkjPo6zR/v19kdNpkQ+sb0auqrjkknn47w5c0Q+\nggzZbQ4A7Y1pTB3lx65HVUqysiXPkio66ZW7qrkVP76+zf/Q1f0sZQCmqQUy5mXhoLilo70xHcgb\nOG56G1rqgs9jXPYGuDpOnOV/XzpBVdZ4YlmYlRhIOvR8adLW+iTOOYY1B5LqpsPCHzWDM+H2NqCt\nOE/McXtjGiczmVut6Lv+wyz3uso4KhL02r2S52DyyBpceMJYv1lKyTV091PCHVXZjqnG8aLne1NN\nAl+4YDqV2iWqyI8wVB2WZiIVN/CJMybhyxdOR3tjGpMa/C6AHz91Mlrrk2KR1TdUCCRHUstbCx0P\nAMAxkIobuP6js3Hy7DYACohtwVN5A52g5X3JxPOFpxAID58cKESWd4QIIfjY5ItQcIvvaAGxr9B1\nFW5nG2yjgCvPOx8AUJP21a+AYDKWXH5klpK3SJ0t/56rZn66/MO9wHnjzsIDax7CxJpxZdsaE37o\nZTjytkwd0+qCNevnHDMacyc2BKoBhgOPaZdm1nPwpD7e31lGW30SmzuHUFc1/MtWvi7iagGXv6Vr\nAZd02Eu7OdmI5mQjtmd2oqmy3LshW84nzhyB8SP8fToa0wAUmEocBZIVVutpR7ULyeOEEYcChS7M\nJCI6YWYrTpzVhhNnt+Hz/1WEmuqDSozQZ/mCo+bi0aWNWPyhCaFzoBerwIVd08N4ssZVjcZLna/5\n1QESqtIWyFZekkUt81HNVP50guT+lhdn3PuSZImH3kAN1FgWtudn4C+QmvzMbZyJf215BgBNPj12\nuh8uaapJYHl3HGqKJr8ljSRivN+BY+L85o/h3rcegJryLWuAVtmMba3EkZMace+W5diapS4U4hgB\nsaKjW47ElNqJeK2b9q1/N2PeEXlHiBACUzOHVak70DCYhKuzdRzqE9R6a29M4ZxjRokOaTweXB+v\nDVjbZoj4DDCsmN07wokjjgnGbSXIRCFaNZYgbKyqqgTaq+4Opmbg5gXfGHZxAAC5l06iL9sPBD//\n6AcmoK0hxayr0rHTfyulxjtfuXB2YB/TUAPkHQ/pLqcoCq6Z83m81rUC0+unlm3XVA03zbsWf1n3\nMOY1B88/aSQlNiPXgEJsA5QEJRdNU8X9VhUVcT2GrJMrkTv1O7DpJIbCiqNRlQ4nlTEtlbjinOHl\nluOZDgwZW2FU9pZpP3AsnnQBptVNxuyQMFgypkvtR6llfvyMFpxz7KhABYvrlmfX88WS21/ni+WE\nYGTFCDQmGtCSKs+h+dAxo5B5eRJeGKKqZykjAVPKjxhd3Q4vlxbkXZr1P7atEvW91YK8DcUs03pI\nmynoqg7HcyLLO0KE9zOSMQOXfmBCwPpUFAVnLRgl/q6NV+PauVehxqoWtdMAy1QPwbvUKyGARR0n\n4G8bH0d7upwggXIvwb6gcpjOdhy3f+kUhDlPYqaO044qb4RSio9Nvggv7HwFExpaA5/HTC0QTx7u\npW1qJuY2zRz2/PWJWnxy6uKyz6tSFlrrkujcUgN97Aa/R3XJjeTiIh111Th/8Ww8+vwmHD2Fkpii\nKHBcD4CCUU27n6fhoCkGiqtnY+KY6mFzH3Z3jRPaq1D7Vgp96BYKZJapBcIbANA7SGPSFSG9Cnij\nkXFVo8u2AfQ6bzjqK6GeBUPXcMmcU/DCE4/T79aswH6WqQXaKB83pTyMJHdPTJrhXRJrrCp05roD\nuQ8HGhF5R4hwCIJn/u8OYaSol3Tt8t9T7z57nz36VBzVNCvUnQr4mfEHEqX913eHay6eiQeeWheY\n+yObZuHIpvIOfg3VCZw4bQyeHqB61/EDYHGdd/wY/PTPWRQ3TII3SC3x0kXYpJrxWLVrNU4ffRLG\n1ldibFu4FT1hxL6Ve/L5U/YxPUpTVZw//Rj8YvkGuF10XsMWmJUpujiZPKqmbBscE1+c/CWMqKsu\n38awu/CWoer4ztHXI+fky/bTVQWktxVesg+jnWNw6YfKEyPlBWKlVU7eAK3+6Mx1v6sJaxF5R4jw\nHkLpy4n/fRAMbyiKMixxA8O7+A8WJnZU42sds/e8I8MHpx8JrO2GSzyMZuWP+xMzxtZhbGsVVm30\nPQSliYeXT/kICm4xkMAYhpHDtOfdEy47bRLufvRNXHRSeV7D28XMhmn45lFfxdeefx1AeF+Bs44e\nicqkhWOOaA58fvnpk/Dy6i6MaWh6R9LE1bEqhFG/rqtQMrUoLF+A6qnhYYFKSU2wKhHufeClm2Hh\nkwOFiLwjRHg/4GCw9x5Qmhl/uCFhxHHxxPMO6Hc01iSwamOv+LvU8k4YiVBteY5vXDoHb27uxbi2\nfVNIbKlL4tpLyj0Pe4vGZD14NnxYuMTQNZEhLmPhEc1YWELo+xO6poqSjPgwuvNT6yahVZuADVuK\n+MAJ4eWtnLwjt3mECBH2Cj/43AK4XnnSz26SzQ869kfM+72OxupgedecCeHW4XAY3VKB0S37ZnUf\nKBxKizZFAbhBP5znPWkkcN2xl9Me4lY4ZY6pot6RltSBW2iUIiLvCBHeA6geJpt4yqgavPRmF2aN\nrwvdfjBxqLnND0U0VvtW9e1XH79XMfxDFeYwCmXvJj555iS8sbGPlX3tObSkKsqwxA0A46vH4gfH\n/ntkeUeIEGH/4NjpLRjVVPG26qbfbZjvASI60JgyqhqTOqpx9NSm9wRxA76lezBx9NRmHD2VWsn7\nK6fz3SRuICLvCBHe01AVBR1N+67xfiDwqbMmY/32gVDVtAhBGLqGr148fKnZ4YTT53Xg2eXbUfUu\nqDjuDfzQ0qEYXBoeB3Qpd/PNN+PDH/4wLrroIrz++uuBbc8++yzOP/98fPjDH8Z///d/H8hhRIgQ\n4RDC/ClN+MjJ4/e8Y4T3FM4/fgx+eOXCQBOZQwEXn0wz6WVltsMBB8zyfv7557Fx40bcc889WLt2\nLa6//nrcc889Yvt3vvMd3HnnnWhsbMTixYvxgQ98AGPHjj1Qw4kQIUKECBHKILvQDyccsCXQkiVL\ncPLJJwMAxowZg/7+fgwNDQEANm/ejMrKSjQ3N0NVVRx33HFYsmTJgRpKhAgRIkSI8J7CAbO8u7u7\nMWWK322lpqYGXV1dSKVS6OrqQk1NTWDb5s2bd3u+6uoE9P0cI6uvP7RigYcronl854jm8J0jmsP9\ng2ge3znejTl81xLWSjV59xa9vdn9NBKK+vo0uroG9+s534+I5vGdI5rDd45oDvcPonl859jfczjc\nQuCAuc0bGhrQ3d0t/u7s7ER9fX3otp07d6KhYe/EByJEiBAhQoT3Kw4YeS9YsACPPvooAGDFihVo\naGhAKkVrTdva2jA0NIQtW7bAcRw8/vjjWLBgwYEaSoQIESJEiPCewgFzm8+aNQtTpkzBRRddBEVR\ncOONN+L+++9HOp3GKaecgptuuglf+cpXAACnn346Ro0atYczRogQIUKECBEAQCHvNBj9LmF/x2Gi\n2M7+QTSP7xzRHL5zRHO4fxDN4zvHYR/zjhAhQoQIESIcGETkHSFChAgRIhxmiMg7QoQIESJEOMwQ\nkXeECBEiRIhwmCEi7wgRIkSIEOEww2GTbR4hQoQIESJEoIgs7wgRIkSIEOEwQ0TeESJEiBAhwmGG\niLwjRIgQIUKEwwwReUeIECFChAiHGSLyjhAhQoQIEQ4zROQdIUKECBEiHGY4YF3FDmXcfPPNeO21\n16AoCq6//nocccQRB3tIhzRWr16NK664Ah//+MexePFibN++Hddccw1c10V9fT3+4z/+A6Zp4sEH\nH8RvfvMbqKqKCy+8EBdccMHBHvohg1tuuQUvvfQSHMfBZz7zGUybNi2aw71ALpfDddddh56eHhQK\nBVxxxRWYOHFiNIf7iHw+jzPPPBNXXHEF5s+fH83jXmDp0qX4whe+gHHjxgEAxo8fj09+8pPv/hyS\n9xmWLl1KPv3pTxNCCFmzZg258MILD/KIDm1kMhmyePFi8o1vfIPcfffdhBBCrrvuOvJ///d/hBBC\nfvCDH5Df/va3JJPJkEWLFpGBgQGSy+XIGWecQXp7ew/m0A8ZLFmyhHzyk58khBCya9cuctxxx0Vz\nuJd46KGHyB133EEIIWTLli1k0aJF0Ry+A/zwhz8k5557LvnTn/4UzeNe4rnnniOf//znA58djDl8\n37nNlyxZgpNPPhkAMGbMGPT392NoaOggj+rQhWma+PnPf46Ghgbx2dKlS3HSSScBAE444QQsWbIE\nr732GqZNm4Z0Oo1YLIZZs2bh5ZdfPljDPqQwd+5c/PjHPwYAVFRUIJfLRXO4lzj99NPxqU99CgCw\nfft2NDY2RnO4j1i7di3WrFmD448/HkD0e94fOBhz+L4j7+7ublRXV4u/a2pq0NXVdRBHdGhD13XE\nYrHAZ7lcDqZpAgBqa2vR1dWF7u5u1NTUiH2iefWhaRoSiQQA4L777sOxxx4bzeE+4qKLLsLVV1+N\n66+/PprDfcT3v/99XHfddeLvaB73HmvWrMFnP/tZXHzxxXjmmWcOyhy+L2PeMkikDvuOMNz8RfNa\njn/84x+477778Mtf/hKLFi0Sn0dz+Pbxhz/8AatWrcJXv/rVwPxEc/j28Oc//xkzZszAiBEjQrdH\n87hnjBw5EldeeSVOO+00bN68GZdeeilc1xXb3605fN+Rd0NDA7q7u8XfnZ2dqK+vP4gjOvyQSCSQ\nz+cRi8Wwc+dONDQ0hM7rjBkzDuIoDy089dRT+NnPfoZf/OIXSKfT0RzuJZYvX47a2lo0Nzdj0qRJ\ncF0XyWQymsO9xBNPPIHNmzfjiSeewI4dO2CaZvQs7iUaGxtx+umnAwDa29tRV1eHZcuWvetz+L5z\nmy9YsACPPvooAGDFihVoaGhAKpU6yKM6vHD00UeLOfzb3/6GY445BtOnT8eyZcswMDCATCaDl19+\nGXPmzDnIIz00MDg4iFtuuQW33347qqqqAERzuLd48cUX8ctf/hIADX1ls9loDvcBP/rRj/CnP/0J\n9957Ly644AJcccUV0TzuJR588EHceeedAICuri709PTg3HPPfdfn8H3ZVezWW2/Fiy++CEVRcOON\nN2LixIkHe0iHLJYvX47vf//72Lp1K3RdR2NjI2699VZcd911KBQKaGlpwXe/+10YhoFHHnkEd955\nJxRFweLFi3H22Wcf7OEfErjnnntw2223YdSoUeKz733ve/jGN74RzeHbRD6fx9e//nVs374d+Xwe\nV155JaZOnYprr702msN9xG233YbW1lYsXLgwmse9wNDQEK6++moMDAzAtm1ceeWVmDRp0rs+h+9L\n8o4QIUKECBEOZ7zv3OYRIkSIECHC4Y6IvCNEiBAhQoTDDBF5R4gQIUKECIcZIvKOECFChAgRDjNE\n5B0hQoQIESIcZnjfibREiHC44ZZbbsGyZctQKBSwcuVKzJw5EwBw3nnn4UMf+tDbOscdd9yB8ePH\nCz3rMHz0ox/Fr3/9a2iatj+GHcDOnTuxbt06zJ8/f7+fO0KE9yOiUrEIEQ4TbNmyBR/5yEfw5JNP\nHuyh7DUefPBBrF27Fl/60pcO9lAiRHhPILK8I0Q4jHHbbbdhy5Yt2LZtG6699lrk83nceuutME0T\n+XweN954I6ZMmYLrrrsOs2fPxvz58/Fv//ZvWLhwIV5//XVkMhncfvvtaGxsxIQJE7BixQr89Kc/\nRV9fH3bs2IGNGzfiqKOOwg033IBCoYBrr70WW7duRVNTEzRNw4IFCwI9ijOZDL7yla9gYGAAjuPg\nhBNOwJlnnokf/ehHIISgqqoKl1xyCb797W9j48aNyGQyOPPMM3H55Zfj/vvvx9///ncoioKdO3di\n9OjRuPnmm2EYxkGc4QgRDk1EMe8IEQ5zbNmyBXfddRemTp2Kvr4+3HTTTbjrrrtw6aWX4vbbby/b\nf+3atTj33HPx29/+FpMmTcLDDz9cts/KlSvxk5/8BPfddx/uv/9+9Pf348EHH4TjOPjjH/+Ib37z\nm3jmmWfKjnv22WfhOA5+97vf4Q9/+AMSiQRaW1txzjnn4Oyzz8Zll12Gu+66Cw0NDbj77rvxxz/+\nEQ899BDeeOMNAMCyZctw66234r777sO2bdsOSy9DhAjvBiLLO0KEwxzTp0+HoigAgLq6Otxyyy0o\nFAoYHBxEZWVl2f7V1dUYN24cAKClpQV9fX1l+8yePRuapkHTNFRXV6O/vx+rVq3CkUceCQCor6/H\n7Nmzy46bNWsWfvKTn+ALX/gCjjvuOFxwwQVQ1aCNsHTpUuzYsQMvvPACAKBYLGLTpk3ieN4+debM\nmVi7dq3okxwhQgQfEXlHiHCYQ3YrX3PNNfjWt76F+fPn4/HHHxfNPGSUJqSFpb2E7eN5XoCIS0kZ\noL2M//KXv+CVV17BP//5T5x33nl44IEHAvuYponPfe5zOPXUUwOf33///fA8b7fjihAhAkXkNo8Q\n4T2E7u5ujBs3Dq7r4pFHHkGxWNxv5x49ejReeeUVAEBPTw9eeun/t3eHOAoDYRTHHyGYJlwAMAjg\nAFROSC0STCWCIJCYBhwOwxEqegIkuqLBbRN0LQaBxkBZsdkaDJutmeb/05PJ517eZCbz9bYmSRLF\ncazhcKggCOQ4jm63m2q1mh6Ph6SfVv97VJ/nuXa7XdH+z+ez7ve7Xq+X0jTVYDAobX6gSmjeQIUs\nFgvNZjO1Wi3N53MFQaAoikrZezqdKo5j+b6vTqcj13XfGnq329V6vVYYhqrX6zLGqN1uy3VdrVYr\nNRoNLZdLZVkm3/f1fD7leV7xVWq/39dms9HlclGv15MxppTZgarhqRiAj1yvV6VpqvF4rDzPNZlM\ntN1ui3fn/3U4HHQ6nbTf70vZD6gymjeAjzSbTR2Px+J/4tFoVFpwA/gbmjcAAJbhwhoAAJYhvAEA\nsAzhDQCAZQhvAAAsQ3gDAGAZwhsAAMt8AxJ5C+54P8QOAAAAAElFTkSuQmCC\n",
-            "text/plain": [
-              "<matplotlib.figure.Figure at 0x7f72fab5e290>"
-            ]
-          },
-          "metadata": {
-            "tags": []
-          }
-        },
-        {
-          "output_type": "display_data",
-          "data": {
-            "image/png": "iVBORw0KGgoAAAANSUhEUgAAAe8AAAFnCAYAAACPasF4AAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMS4yLCBo\ndHRwOi8vbWF0cGxvdGxpYi5vcmcvNQv5yAAAIABJREFUeJzsvXe8XVWZ///e5dTba3pCQiAJCSWE\nIJGmoSSgjsg4gmCb4Tf+dCwURUdEQXGs41gYFQvDiIyIiKIIJIAgEBJCgJBKertpt59z76m7fv9Y\nu55zboiQBCL783rllXt2WXvttfden6et55Fs27aJECFChAgRIhw1kF/vDkSIECFChAgR/jZE5B0h\nQoQIESIcZYjIO0KECBEiRDjKEJF3hAgRIkSIcJQhIu8IESJEiBDhKENE3hEiRIgQIcJRhoi8I7yp\nMW3aND796U9Xbf/iF7/ItGnTQsfdcMMNoWOWL1/OBz/4QQB2797NCSec4O3btWsXH/vYx1iwYAEL\nFizgkksu4bHHHgPgpptuYuHChSxcuJCZM2fy9re/3fudy+VC19A0jfvvv/9vvq/Vq1dz1VVXHdSx\nDzzwAF/72tde9bVcvNbz3wi46667+P73v/96dyNChFeE+np3IEKE1xsbN24kl8tRX18PCBJas2ZN\n1XErVqxg/fr1IZIeCZ/97Gd597vfzW233QbAqlWr+PCHP8zDDz/MV77yFe+4+fPn8+1vf5vTTjut\nZjvr16/n/vvv55JLLvmb7umkk07i9ttvP6hjly5dyvnnn/+qr+XitZ7/RsAHPvCB17sLESIcFCLN\nO8KbHm95y1t49NFHvd9LlizhxBNPrDruuuuu4+tf//pBtblp0yZOPvlk7/fJJ5/M4sWLGT169EH3\nq6+vj09+8pO89NJLXHHFFYCwAPz0pz9lwYIFmKbJypUrufTSS1m4cCEXX3wxS5cuBYRV4IILLgDg\n1ltv5atf/Sqf+MQnOO+883jve99LT0+Pd53ly5czffr0qmu98MIL/OM//iMXXHAB73vf++jq6gKg\nu7ubD3/4w1x88cWcf/75fO9736vZ18p7ueqqq1i4cCHz58/njjvu8PatXbuWSy+9lAULFvCBD3zA\nu85I26dNm8b+/fu9893fy5cv5/LLL+fqq6/mM5/5DAD33nsvF110ERdeeCFXXnkle/bsAcC2bb7x\njW8wf/58FixYwC9+8QtvrL74xS8CsH///pD15MknnwTAMAy++MUvsmDBAi644AI++clPVllMIkQ4\n3IjIO8KbHhdddBF//vOfvd8PPvggCxcurHmcbdssWrToFds855xz+PSnP82dd97J1q1bARg1ahSS\nJB10v9rb27nuuus45ZRT+PWvf+1tt22bxYsXoygKX/7yl7nqqqtYtGgRH/3oR7nppptqtrVo0SJu\nuOEGHnvsMdra2rjvvvsA2Lp1Kx0dHYwbNy50rVwux8c//nGuu+46Hn30UT70oQ9x9dVXA/C///u/\nzJ07l4ceeogHHniArq4uLMuq2VcXP/nJTxg/fjyLFi3il7/8Jd/97nfZt28fIISiq6++msWLF3P+\n+edzyy23HHD7gbB+/Xouv/xyvvvd79Lf389Xv/pV7rjjDh555BEmTpzIj3/8YwD+9Kc/sXr1ahYv\nXsx9993HXXfdxerVq0Ntff7zn2f69OksXryYn/3sZ3zuc59jcHCQJUuWsHv3bhYtWsQjjzzC1KlT\nWbly5Sv2LUKEQ4mIvCO86XH66aezefNm+vv7KRaLrFy5knnz5tU89oYbbuA///M/KZfLB2zzO9/5\nDldeeSUPPPAA73znO5k/fz533333Ienv2972Nu/v+++/n4suugiAOXPmeNppJU477TTGjRuHJEnM\nmDHDI85ly5bVvNcXXniBUaNGceaZZwLwzne+k127drF3717a2tpYsmQJzz//PPF4nP/6r/+is7Pz\ngH2+8cYb+dKXvgTAhAkT6OjoYPfu3Wzfvp3BwUHOPfdcQJitb7311hG3vxKSyaR3P21tbbzwwgue\nteO0007zxuepp55iwYIFxGIx6uvreeihh0LWlkKhwPLly/nIRz4CwKRJk5gzZw5PPvkkra2tbN26\nlUcffZRiscg111zD2Wef/Yp9ixDhUCLyeUd400NRFC688EIefvhhWltbOeuss1DV2p/GzJkzmTt3\nLnfccQezZ88esc1EIsFVV13FVVddxdDQEIsWLeLrX/8648ePf80TfXNzs/f3Aw88wJ133kk+n8ey\nLEYqVdDQ0OD9rSgKpmkC8Mwzz3gEFcTQ0BBdXV0hC0Q8HmdgYICPfOQjWJbFV77yFXp6erjyyiv5\n1Kc+dcA+r1mzxtO2ZVmmt7cXy7IYHBwM9U1VVVRVHXH7K6Gpqcn72zRNfvjDH/L4449jmib5fJ7J\nkycDMDg4SGNjo3dsOp0OtTM8PIxt21x++eXetkKhwBlnnMFJJ53EjTfeyK9+9Ss+//nPM3/+fG66\n6aZQexEiHG5E5B0hAnDxxRfzve99j5aWlpo+2yCuvfZaLr30UsaPH19z/8DAAC+//LKntTY2NvK+\n972Pp59+mk2bNh0yLa27u5sbb7yRe++9lxkzZrBjxw4WLFhw0OcbhsGaNWtqCiGdnZ1MmTKF3//+\n9zXP/ehHP8pHP/pRtm/fzr/+678yZ86cA17r+uuv58Mf/jDvf//7kSTJG4OWlhYymQyWZSHLMrqu\n093dPeL28ePHI8uyJ3xks9kRr/nQQw/x+OOPc9ddd9Ha2spvf/tbHnjgAe+6g4OD3rF9fX0kk0nv\nd1tbG4qicN9991FXV1fVtrs6IJPJcMMNN3D77bdz7bXXHnAMIkQ4lIjM5hEiALNnz6anp4fNmzdz\n+umnH/DYzs5OrrzyyhHNuKVSiU9/+tM8/fTT3radO3eyatWqEaPKR4KqquRyuZoa9cDAAOl0milT\npmAYBvfccw8A+Xz+oNpevXo106ZNIx6PV13r5JNPpre3l1WrVgHQ1dXF9ddfj23bfPnLX+aZZ54B\nYOLEibS3tyNJ0gH72t/fz6xZs5AkiT/84Q8Ui0UKhQLHHHMMo0eP5pFHHgHgd7/7HV/+8pdH3A7Q\n0dHBhg0bALjvvvuQ5drTWH9/P+PGjaO1tZXBwUEefvhhb2zmz5/Pgw8+iKZpFAoFrrjiCjZt2hQa\n93PPPZff/OY3ABSLRb7whS+wb98+7rvvPn70ox8BwgoyZcqUgxrvCBEOJSLyjhABkCSJCy64gLe+\n9a0jkkEQ//Iv/4Ku6zX3jR07lp/85CdeVPiFF17Itddeyxe+8IVQBPrBYM6cOfT09HD22Wd72qaL\n6dOnc84557BgwQIuu+wy5s+fzymnnOKtPX8lLF26NOTvDl4rFovxwx/+kFtuuYWLLrqIT3ziEyxc\nuBBJkrj88sv53ve+50W4z549m3nz5h2wr1dffTWf+MQneNe73kWhUOCyyy7jS1/6El1dXfzgBz/g\ntttu48ILL+TPf/4zN998M5Ik1dwOwvJx88038+53v5tUKuUt8avEO9/5TjKZDBdccAGf+cxnuOaa\na9i/fz/f/OY3ufjiiznrrLO48MILec973sN73/teTj311ND5N998MytWrGDhwoW85z3vYcKECYwZ\nM4bzzjuPdevWceGFF3LRRRexZcsW/vmf//mgxjxChEMFKarnHSFChAgRIhxdiDTvCBEiRIgQ4ShD\nRN4RIkSIECHCUYaIvCNEiBAhQoSjDBF5R4gQIUKECEcZIvKOECFChAgRjjIcNUlaenuHD2l7LS1p\nBgcLh7TNNyOicXztiMbwtSMaw0ODaBxfOw71GHZ0NNTc/qbVvFVVeb278HeBaBxfO6IxfO2IxvDQ\nIBrH144jNYZvWvKOECFChAgRjlZE5B0hQoQIESIcZYjIO0KECBEiRDjKEJF3hAgRIkSIcJQhIu8I\nESJEiBDhKENE3hEiRIgQIcJRhoi8I0SIECFChKMMEXlHiBAhQoQIRxkOK3lv2rSJ888/n7vuuqtq\n39KlS3nve9/LZZddxo9+9KPD2Y0IESJEiBDh7wqHjbwLhQK33HIL8+bNq7n/a1/7Grfeeit33303\nzzzzDFu2bDlcXYkQIUKECBH+rnDYyDsej/Pzn/+czs7Oqn1dXV00NTUxZswYZFnm3HPPZdmyZYer\nKxEivGmhGxZL1+6jWDZe76542NuXZ822/te7G0cNXtjYy879wyxduw/Lsl/v7rxq9GWKrN8x8Hp3\nA4D9AwVWbekDoKyZPPdyN7Y98tjmSzovbOw54DFHGoetMImqqqhq7eZ7e3tpbW31fre2ttLV1XXA\n9lpa0oc8Z+xICd8j/G2IxvG143CN4d2PbOTXizdw3twc11x+6mG5xt+Kf/nm4wDc/+13oSiHTn/4\ne3wP9/Tm+NEf1ni/48k4F8075rBe83CNo/vcf3XzQpobEoflGn9rX+79+jv4+d0vsmzNPmRV4aK3\nTq55/I9/8SzPv9zNdVecytvnTHjF9o/Eu3jUVBU71JVuOjoaDnmlsjcjonF87TicY7hhu9BwN+wY\neMM9p737syTjh2YK+nt9D7dWaKobt/dz2tS2w3a9IzGOXXsz6K3pw3qNg0V3zzArN/YAsGnnAKcd\n117zuA3Oc3hh/X5mTWw+YJuHegzfUFXFOjs76evr8353d3fXNK9HiBDhtcE180lIr3NPqqEZ1uvd\nhTc8SroZ+m2aR/+YvZFcOJZtY5jiG1EPYAVqrheWgsHh8hHp18HgdSHv8ePHk8vl2L17N4Zh8MQT\nT3DmmWe+Hl2JEOHvGq6LTnrjcTdGRN6viHIFeRtHsc/bRb6kv95d8GBaticQqcrIH0mLY+bP5N44\n5H3YzOZr167lW9/6Fnv27EFVVRYvXsz8+fMZP348F1xwATfffDOf+cxnALj44ouZPLm2ryFChAiv\nHW9E8tYj8n5FaHp4jEzz6CfvQun11byDQWeWZeP+UuSRddn6VAyAzAE072x5iKZE4yHp48HgsJH3\nrFmz+NWvfjXi/rlz53LPPfccrstHiPCGwf6BAo3pOOmk+Nx6MkXSCdWbEGqhe6BAQzpGOukf0z1Y\noLk+QSJWHbiZzZUxLZvWxmRou+Wazd+A7H0kzOYDQyUUWaKp/rUHSFm2TVd3jgmj6pEliZ7BAk11\nCRLx8PMoayZ9QyXGtde9pusVSjq7e3OhbYPDJbJ5jaa6uLetN1MkGVdoSMcrm6BYNtiyJ8u49rqq\ndwOEANWXLTKmrbqvA0Ml4jGF/mzJu+dK2LZNV0+Ose11ntnZtm329OUZ116H5IxTXeBdz5cM9vbl\n6WxJeedYts2W3VniMZljRjfSkynSmI6FYiJ2dQ8zpq2OmFqbZGudUwslzbdmmJZV9bdl2WzenSGV\nUEknVOJxxfuOhgo6fZkidakYqYR/naV7V/B/G+7lIye8n4s7zjng9Q8VjpqAtQgRjkaUNZMbfvYs\njXVxvv+pswD499uWIQG3//v8mufohsXNd6xg9nHtfPQfZgLQny1x48+X8455k7jk7ClV51z7388A\n8D8VbbpKhvw6cLdpmWzL7mBq85SawsOR0Lw/++OlQPW4vBosfm4X9z6xlcvPO45Tj2vn33/6LBOm\nZ7A7N3LdnH+jOdEEwDf/70V2dg/z7Y/No7059aqvd/MdK+jLlkLbNuzKcO2tS0L3c8Mf7sYup/nF\nxy6vauOexzezZNdKmuoVvnvlZVX7n1i5h3v+spmvXHU64zvqve2mZXljB3DlBcdz3pzxVeev2TbA\n9+9dxZknjuaqd5wAwOMv7uH/Ht3E+88/jtOmdfLvP32W9iZfcFi5uZdfLd7I208dxwcvnAbAqi19\n3HqfiKq//v2z+c7dK5k+sZnPXSFWSGzcNci3fr2SudM7+fgls6r6kc2V+ffbljF1XBM3fHBOjdH0\nEdT8g0vvXFJ/YVMvP7l/rbddSuZomLUSuXkaVqaTz922jHHtddzy/73FO+avu5cAsKJ7JRefeGTI\nO0qPGiHCYYRmiAlhKK8BYDj+tQMZP4tlg7Juhvxre/pymJb9N/vcfBPhkWfvezf/ie+v/CkrulfW\n3K8bZs3tbyT8cevDfGHJLWimxsrNIsh21ZY+9vYXIFair/FZ+kuDdA3v8c7Z2S0ijQcOMrhJt2qb\nkSuJuxZ2De0hPmkDieNfrLm/qzdH4riXKI15ofY1MkVsoKsnrOFXmrbXjrAuf9veLADPrNnvbXtx\nUy8AK17uYSivIaWHyE+7H7mpx2lLRG4/8aI/Zv2Be3VzAGzYlfHb3LsRKV5gxYaemv3oHiwCsGVP\ntuZ+0zIxLfG+BX3uZoC8NSe+oC9bDJ0rN/Wjy3kxxoo4d09fPnRM2RDPOqkcuSVwEXlHiHAYURlf\ndDDapghSssnGtzGsiUm1NyMmt6DPc1XvWnoLB0524pL366F5P71HJF7al+/2tlkBf+PR4PN+ZOcT\nDGnD9BbD45zJlVEa/WVcRaOaaJUDBEC52DW8m2v+egNL9jxb+wDJIjZlNXJzd2iz+1yf6FpywPZ7\nC/6qHsuuHm83IK43EyasSvIeyVRdy6Li3rdhWpiWTWzsVtHGpA0j9jN4vf4KoaW/OMCSwu+Jz3hu\nxPMrz6nEf734E7723HeBcLR7Tisi1WWIH/8Cw8ZwVV8AJFXz/pbragsHZVMck1CqXReHCxF5R4hw\nGFG5tEc/iKU+Zd1Ead9LpvU5/mfdrwF/cnU1hf35Hn625k7+47n/8jQGoCoDl6d315hkNw5sYU3f\n+oO+l1eLxri/TjVI2G/0pWLuhAygW+EI6d5MESnuE0ZBD5MfgHIQEtOjO/8KwIPbH625X2ndj9q+\nl8TxYeuFu7xpW3YnALZR7QEtaQZFxSfvWn0cibzzAQKT6rIMpNbXJH+5xj2qTuCXYdqifcVpq0Yf\na12vp6Ivq3qFCVtOjEzQwf6XtDD5dg3vYcfQLnoKfWim7l9L0fn+y98mOfNZlOZetiUfreoLgBQr\nB/7WqIWyeeSj0CPyjvC64I2UZvBwwqwgU10/OPKWG4RWtze3D/AnJ3epUG9RTMq6pYcmm0rh4EBW\n8x++9DNuW/2/r9ifkfBiz2q+8dz3KRrVpBCc6IPEFyTv16p5W7bFrSt/7hFg9X4bpW0vcsv+mvtf\nCbuGdnt/l4zw5NybKSLFAuRtVCeRMi2bIW2Y32/+MzktX7U/eI2JDeOq+g4gN4nnbNvhB1jWTWzb\nJlN2TMuKUUWufZkScr2vKeZr9NGNZu/LhImx4JiWpbosyZnL2Bd/gd3De6vOryWfuEuuTMuirJtI\nqmjLNqsDNF3BsxAwZe9zTNKphAgEfMkhb9saWRjakFvtPefKe1m+z3cZ5PScZzavJGJNyZLT86G+\nACEhDTV8zu83/5lfrL0LzXnHa1lgDhci8n6TwPUvWrZdMYGaNY97pW2vBat61/LJJz7PtuyOmvst\n2+Kl3rUU9dIBr23VEAAOVV9/uf433LT0m6/6fLcfQfI2TCtEriMJMJpmIsUFIbYmW4Cg2VycP1jy\n/YHByaaSED2zeeC3bdvkdJ9MKv3ouiGIwbKtmtqWi9vX3sXu3F5W9673zgHxXILm/JAGG+hfppzh\nzvX30F3oxbQsBofL2LZd9QxFX6rHqrfYz4bBzdy/9aGq4yzbxjBMYsesIzaxtrm2ss18SQ+ZTHcN\n++RdOSn3ZkpIcX/cCjUEGNO0+c7z/81fup7imb3Lvevphskftz7MZ578Mn0lIaTJkh+xXiwb7O4f\nRG7sQ2l0yLscDnzTdJMhLYdhi/5KEhR1v49lzWT7/iGkhE/Y+RoCxLDdR+yYtfQMD4W254o6iZlL\nSc70a04MlAZDxzyzdzm7zZeRm3tInb6IPbl92LbtRZAXk7tZMfCMr3lXQB2zleuXfImCXgwJoK5F\nJp2IYds2e/Ou8CXh2pJ0w2JgqESuqDNYyrIz/gyJ414C/JgDwzK4e+PveaFntX9fWt5/xnJ1v7Kl\noWqzeby25m1ZNn/peoqVgfaPJHlH0eZvAnQPFvjCT5/l4jMm8fLOQbbvG+J//n0+Dy7bwX1PbuPm\nf57LxFEN/PWlPdy5aCPXv382MyYJ0vjNXzbzyIouvvWxeXS8hsjZIO7fIibbv3Y9w5SmY6r2L937\nHHdv/D2N+jF0r5zOj649J7QsA2BTV4Zv/t+LfPySWcydLrLzPfp8F3c/tpkbPjgHPdVNS6KZ0XWv\nLnPfc/tFAJBhGajy3/aZvLxzkO/cvZIPLZzGceP9VIr5khEycRumTUyt1iZKuomUEGSwfVeZNW39\nXhCNaxbvCfgyQ5p3gBwf2LaY4eQQ0IYkSZR1k49/90nOOGEU557lB9Zc96On+MAFM5h/6niG8hrX\n3LqEc04eiz3xRV7u38R/nHUjsQOMgaZb/P//+SRnnTiGf3nHDD73k6VkpN0kRCBxyKToE7PFnwZv\nB6A50cSG5Z1s2JUhEVcoayafuewUZk5u5cWe1WzdrPDw091V0dvd+XDw0n/d8xLb9g3xb+85ke/+\n5iWuuGgikmKCbGFaJorsE+SqLX384Herue59JzNrShvb9w1xyy+fB+CbH5tHZ3OKVV27vON/8dBq\nxqnT/WsnXkJp9f3QtUzSmfKgR3j3P72de34Dn79iNt/69UpSpz8ROvalbfvZ2TlMc32cz922DOnY\n5SSm+wKQVKHxLVu3n98/v5LkTH9btpynLp6mpBlc/+Ol5EsGiVk+mQxr1Zp3X/ol1NR+8oqBbvhR\n0kPlAnJdmNAHy2F/76833AdAfLLQqH+y5EFSPafQ0SKeUXncc7yUAzlZ+x5iEzZj2LBpcAu5cnXf\nDNPi9kWrKDrmckm2QBZC4pduX06PE6TWcEwXeJ+5ze0PvsykUQ3stzZXxRIM6znyJdFfqYZQ8Z3f\nPUeb7FtB5MZ+5PQw2BJIdugeilr1+bWEuMOFSPN+E2CjE7X50LM72b5PfJCWbXPfk9sAPzr0waXC\nf7Z07T7v3EdWiIIxm7p8Te+1wtXmRlp77EbuZhFmuv6hamn2ryvFMfc+4ZeSve+vIjBm+cbd/PdL\nv+CW5f/5mvsa1BoPFktWi34/tGxnyOddKOkhzbsye5aLkmYguf492eLhZ3d6y1hcTb7HMZvHlXhI\nU3DJ0bAMFu34CwPNKwDxvIediPdn13fTlfMjfVEML3p2l6O1PLVqN893v0TeKJAp1Q7ScbEvI/Yv\nWbMPy7YZGCqHTI1lI0jezrNP+pO1bulelHDZuc+la/ezYWAzt6+9i8cHBUm8vCus+e2vIO91OwYp\nlk1+/dgG1NHbWbRGmFslSZivg1i0XBDzn5buAMS6eq/d/gLL973Attxmb5tml0NLBOzOTeJ/UwgE\ntczmPaVe729TEtaRxc+NUIBJ0dm0O0NXTw7dsFCawgFykmqA5L87f1ixKqQVA2RLeedehCY7arRN\nLOW/vzm9uo+u8UFp2093xo84HypWa+lBzTtoNZJi4t4yWYOd3TnvGVYimbb454unM2/mKN4+2yfI\nn6/9FXvG/L7KvVHWTZZt3hHaJsU0hgs6PYNFL9+BlvYtJA2NYoy27xtClqvzIeS0PANDzvtYg7yF\nZu5bshLTxfcjmXHn+v67nCtWZ4orRWbzCIcSlZGiUrzAQCHrmbfcCdUNsKn000LtwJRXCzenkSzV\nfv08U63j56t15ZST8CS47MM1t9nqa5N+g6biVxOIogdyJQfHslAyQj5vbQTyHtZySJK7QNsMCS8e\neRd8Yqg1BtlymKzcyF8Xe3P+RCkpJprmulWcyzb4wlqmXE3ewTEKEpebgSpE3gEBSDMsUDXkQKT2\nYDHnBWC5SMQVT4iT04JUKpPT7A1EsQ+X/D70JlcTm7iR4lh/nfJAKSx8uglz+lIr+dnqX4aEqoHC\nEHe+fA92zH+PJMW3mtiYge2Oz7aW5q0NBI4TRDE40lI/xaA3U/RiG1yhIISA1hcb5QsBVkEEBA47\nhNubKSI39DM0cTGmFCDvcjUhm5Lfn64B35ozXK6+n6DmXWt5m6aJ96w0AnnbisbZJ43lX981k7NO\nGlO1X2kJC2NlKUd8imOSdueCWJk9TuKaGcc0ITf1hPz6l54nhILebBHd9L8LN2ZgWM95Yyyp1fcg\nqToZR8gNCktSsQXbkpDrhjyXVqYQHs/GeEOkeUc4tIhVJNxPnvIUNy3/ukfq7oTvEnStmsG1siu9\nWnjBOCO8fqZDDF6QTsW1C3qRVervUNp3UyxXTxSGUjs46GAR/AC1V6F5uzm7K8k7XzIOSvMeChCv\npBj0Z/0J1jQtbNv2JlLN1MgXq33Kg+UwWelGONZhMKhNK4bXF89H3uATT7YGebtL2AAKZoA43Ykx\noKGEzeYWyROXED/Gj3LPFKsrMCVisqfpuYFKlQUt9hd88t7W7U/87uQaxEDRHw/TMulveB65foBy\n0xZW9a1DM/yJfqhUYwJWDIYdTasQ8C3bloRqJ8g774zhPV+bQS2gPTvrg0dapy8pOn2ZkhfbYBuB\n4C6XuFS/j5Ll77cKIrmKaxbvzRRDhKZaooJXvkLz3jS4FSvhH7c34z/zXMDEbmbbkGyZTDDOooal\nwRVkhgvV38yY5Hh0S/e+p2DSFu/8eFhrjU3Y4AluFB33k6qxs1tsax6TJTFNuLdkW4yHkhTj25sp\nUTQDz0lLOPeV9yPTlWrNWVI133IQ0MylvTPBjCHFyyROehqAgYL/DZzVeiGtyRaKRumIBeNG5P1m\nQ0CajKsVmrdyZDXvkczmXiCRM2lVLrfaNbybItmQ9haEJudqbg9ix9CuUDRxELkAMb0as7k7gcdU\nudpsHiDQkTSUIT3Qf8UMBVaZlk1eL2AENJ+hgJbktl+pLZtWOFguUwoICLJBWQ8njwlOpBkt7PuE\nsHBQNH1hyU0sEgzyqQxYq4zyHa6hESZiCrudSHsMYbKsDCQKmnF39PmWCKxqrTWoea8f2EivuoHE\nCc+BLO47GImdr0HekmJ4VoVcYLy1DXNRiFN0iNHVzmPHrmJDYZV/vqPlDeVqvE+2BIpBT7ZAr5sg\nJBCZHdNF/Elw3EzE3+ZQC9aQKBE6rPmad5D8k5YgviB59xT6+MHKn3r3D9CdC0SmOwKKOdSKtuUU\nVCsVGsOagVmOcDGU10LzjLZsY2voAAAgAElEQVRtFi0J0Qc3ULJWamC5gryDEfbGUKM3Bjv2i/dR\nSfnHj5VEPIIuFVBkib5MMWzCdsZzqJwj4zyDWj5vggKSs9/oHYdeSHrjLzlj1p0V42V0T6TdmEZK\nTWLaZkjjP5yIyPvvEAOlQTQzaEoNkETg5Yx55C32u2ZzzSpXSemHMsmHa3IdSZu3bLe/Yn/l8ifX\nZOx+XJU+tqLtE9NI0dI/X/Mrfvly7dz6w4Go3FdjNvfIW5FCVox8yXCehY0yagd7ctVLbwAKhk/e\nUkVErGnZVcQ8FCC/2uQttO6gmX446ANWzCrNO6g51zKbBzX3vOFf39e8S9imgm0qlMxqn3cQtZaa\nxWOyPz6KDtgVS+L00Du6dzAgyFnV01qwv7XKoxYD1oOc5vfX1WpRDE+wcTXvuvxUrFwrshX3rDWu\nEKS2+W4J28bT8mwIERtA0m5Ckm36snl6B4uOUO2/N0nTIe9gwJfzHWtbT8Z2hJu8JvrQmy2FiClh\nC7N60KKU06sF3IGCP0Yll7wHRoMZQzHTDGnDXpayWm4C1zIwVNA9rdUcGIXZN576mMidviWzXRxb\n49sXAl8wsMA/xsoJ8pcSBU/zjiXEOOp7jmVS8ngAslqWtqYkvZli2IK2XUT2DRQDgmhgjLRts5x7\nEGOcSqj+flMV30dIKLTodSL0bSNGb6ZIWhWBevkaY3M4EEWbH4XY1T3ML/68nk9eeiKdLeGi9nm9\nwJeWfoPx9WP5wunX8H+PbOIvL/oaZnACiFVq3g5Db2m+l889bdO89VLv2B/9YS3nzxnPFRccf1B9\nfOCZ7by8c5Dr3z/b+1DveXwz2ZxGLqWBAiDxg3tXsdkpSPCpfzyJyWMasQhr3pWlI7tdf6/zcXUP\nFkKBa3nTn4SeXtPFI8v3ceOHTvMi1otGkUw5S4MzET350h4ef3EPLQ0Jpk9soXNyteb98PKdrHi5\nhxs/dNorWiFcYUOpMJs//uJu9vUXkFI54pM2cHfXBo4ZfQ0dHdNC5xfMvC9WKz7hqopMMbGXb6y4\nN3R8vkLzfu7lbh7ZvAncVNWySV+2xH/+5iVQyySmr6Bo+WSlxk1PAHIzuAXJ+4k1W2nMdHHh3An8\n/qmtDA6VGTfL13SDxPenZ3aI8+MlbC2JpOrsGxxC003iMSUsSOL4CZ3+j++oZ3dvDqV1L08Ul1G2\nXQ1JRBkPFzRu+eXzDAyVuPyiseLWJBnLtugeGgScSHTJH3NbjyPFNJ7esI1ZiX5mTWmrGVQUFCCW\nb9hLYgZItoK2+VSSJz8VIkPN0kgACScVpmzF0S2DsqlVuUJsSwJLDWt5FRpfPpNAaRVBcXv6ZEa1\npukPHJM0WxlmK/Gpqyi91Iytpfz2jJj4B+zoHeCWX66gN1MiMcrC/WpiOLWoy4M8u24/v35sM23j\nstDqdlJEUvfkMlzzvb/S2ZRkz2CWeDNgim9GNtPY2GTKQ7SlWnjmZT8S37+voNbqru0W53em2wG4\nc/09nNwxq3a8i2yKNtzgMMdaUVpzJraewDZU1NE76Fk3FmjwrmFmOmlKNEFJBLt2NI9l3fYBVm7d\nBzEorT4Lu1SHjEJ/UVhrmuvj5J0xHNf/DrYMlGHKWi/4rqkuTlmvuIf++fR0OMl0VIP+vAZpMUZ9\nmRId44UrIK8XSODniT9ciDTvoxD//fs17O7Nc/+S7VX7smUhDe52tJYgcYP/UUE1eXuk5Ex++/rD\n2vdjL4i2Hti6iJuWfjNkuq3EH57ezoZdmdBktvi5Lp5d3+1V7zEsg1Vb+ymUDTI5jXXbhfZkOaTq\nkXeFGd9dJuVOYNv2DbFuh29CzZm+VnnnY2vZ11/w2ga8NchlS5DDLxdtpKsnx+qt/fz2iS0hs7mr\nzdz7xFZ27B9mYFhM/I/tepIvLf1GTbO6YYj+xlQ51Hd3PIPEuLXGWveiJTRZ24gJE51kkUooJOMK\nQx3LveMkXZBVIUBGmmFy2x/XMRQ0dct+pLrascf3IzqIxy3vOXmacUwTpk5bwlSKvLBR+JT/vHQn\nz6zd7yWPUWWVklXh/5QNpJiOrSWxTRXd0ti0W5hcS3p4vNpTbRhogM2oVnE/8amrKdhhbV+KldnX\nX2D7viGyeY2N+4VmO8FJbtJfEM9/zrQOkulAycfhZmxLwlIK/OB3Ivgp6Au1ikIjdCO1g+M1Sj8Z\nu5wS5tsa5OvmsVY1oRWu69/gCUFuxrPyunnYhhr2V1eQt2vilhQD07LpaEqScuSQs8fOo8mY6B3b\n0C76Kak6tiWDrXiad1e+i+37hsgVdU8rbUk0M8Y6EXO4ha58F89u3k6uqLN70DeB1xtCECrbBbbu\nzrJsXbfXx6ljBOm675rrLtm63/fne/0P3CMO8br7zh53BhMbxmNjM1QerhKgZEMoIak5j3uBeZKq\nYVsSdrEejDj67uORZBu5LksqoVK2xHc0a2In582ayrFNk9kwuJlp08XzH8yL91wkh5EYk5jAgN6L\nlMgzujXtPceGRJpPvWc2tiV5yk1bU9IXnB3yTlvtTFBO8PqWLfrfaaGkM7vzJE5sn0FnXTtHAhF5\nH4UYcgJC6pPVfiP7gCUvCJnN3UxIru9VqTRlSbVNzot2Pk5faaAqorkWatbudYSDsiHuo9NZu+ul\nAK2INh9Z8xb34i6BclG2AxOxc7/BW3Ozk2mmVm1Wl02yAZPycCk8ybhc/IctDzJQGqyZdcowLZAN\nCsmuqr7Hj3+exPTnvd+1AuLKtiBDu5T2+pSIKSiyhGwEAn1KQrovB8hINyykZA65MbBGOKC9h9Jo\n6qItJRYgb9MCbKRYmeZEI7aeQIqXqtJn7sntJ6HEmdQwAc0uhd6Vjsni2tZwC5gKyCb5ongPKrN8\n1cXS4n1QjFCZSxcJSbwbUkwjmw8kRTHFxDyrbTqqpNDPDuIxiX+7ZBbHT/K1HqtUJywA8ZJnBXH9\ntbGhiRyfOA2A/ny1sCNZCiCBWaE5O/uTagJFlogPC3J9dt/zvrAqmyT0Nuxio/C31iB/M9sqtErX\nv+1sb29OolllpjQdw+XT30NCrqO8+RQAzjhFVC5D1T2N2y6lMTPtKI0DyM3i25Ad8rzm1I+RUJKY\nvULI2Ws6Firn2zH6RzPJnOe0GXgXnb5ceubxpBMqaOJdcZMDlZx3Ttt6EqUX345VrAuR97hRTh4B\nh/iS8RjHNYtqeHkjXxWVPaV9lD+8DQNCaFUMMGOeddF2+iCpGsm44rXxrxefQjKhcsGkc0UDDb1M\n7KzHkp3+OO/8xLiwcClt+xjVmvb6m5QTzD6uQ5i9nW1j2tJV1gNVkZkxfpTTB92L1bDNGLppM731\nOD520j8TV0Yu9XsoEZH3UQg3pWFDuka6wVcIsAp+YO5yD9eXqCgyECAb+cDZygyrOjBDMzVRLEEO\ntx3KmuWQd8kh77FO3WOXICqXihkBn7duGV6gkrf8JlS9yaZsB5f4iD4G/es9gexfwSUvUqJA6rRH\nWbTjL962XLmCvKtyh9fI8mZaxI9byZ66p9haeDm0T2nuC/2u5VPXKGDbYDlZtSTFEOStSEiaX3fZ\ndLRGzfKfeX+5j8TMZUiq4S83CvrNAwFKdllMikrM94drugmqjiTbNMQbsEsppHiJTD6Q7U6y6Cn2\nMrZuNC1JQSZBa4LWuB3bkjB6JmBbigjGGhSknXeimG1TYczwOYK8Ee9lMmVWvXMtsrOkKFYmGxDS\nipYg77H1Yzix/QSM2BAtHWUkSfKIxb1HW0tCrOwJGG4msobisXTUif5nQwF8jvbs+DhtUw2Rr/ve\nJeQE8ZiCVaynM93OjuwuQd6ShSTbWIZ/vhCgrND5Vq4Fu9jgkYvSIN7rlqYYNjZJ1dHsFckjLtcq\nIyl6IChNwth/DBAonOFcI6UmUWQJa0jYyPP0e+MNYPaOp0FtAFsKPUM1bnrnx2Iylkve5QyWbVGS\nRTu2HgdkQdJObAKAGjP8sUMQn/usl+xZ7uVkd3FSu59tJnHcS6RmPYuk6khmjNaGROBaQEwnHlMo\nODEPKUX0rTMlNN5sOYuqytiyLtwWtqC5MbFjkWwFdew28vWbkRQD25KIqWIcG2PNSIkCclOvqG8e\n8Hm7z8H1a8tNfdhjnRUThloVVHskEJH3UYxaUeFBM26tJQth8hbHFsoOwcmSZ+6CEaIxAyjVIJ4l\ne5fzu81/Iu6UKHQTHlQm+we8oLr6VIzm+rgXqeznwq4OWOst9PmEqRiA7ZVelOoyJE56GjsogDj3\nIAX81K7mDWHylOurE9G4ZnMXlZp0Lf+pYfpJNoaMAye3qWV216WiiLB2J+eA5m1LgQxteYe8bf8e\n9pZ3ICkmetfxGN2TgLDmHXz+linGRFZNz/pSMjSSJz0FQL1aj1VOIUli+ZUXSZ7MY9kWY+pGe+lb\ng9HphprHLtWBkRBaqwQ9Q4Jsi6azpGr/MSQK47wJXU6UeEL/XxLTnwtZB1plx7edyntCK0DRsa40\nJxqZ2ijiMFIteeceAuRddDRvyRcw3ICidCzFqAZB3iUr8Jwd8rZ0550xYk6kcfC9EwlyknGFsm7S\nnmwjbxTIlYre+YYhuwMi/ne/rQpSsDVBCLGJGyFWoqlRXNclJUWWQBcEVrByoh+q7hEj+OlTvWVy\nAdO+KsvYWgoZGSvmm91BmHx1A5JyGjk9jNK2l8TMZ5AdQSKlJokpMmbJ1byz/HHrwxjNwuftWg2E\ni8dGaXfW5scCPnkH7rNetm8Fd738W4KYN2Yu7516iX8/ySFQdFRJCEiiLdcXrpGIyRSMIkkl4WXO\na3LqqWfKQyKHhaO5e5kiTJVYYTSSbLFOe1ospzNVYoo4/8KxFyFJoI7ewdi2tDf/ueMcU2XqnMC7\n2Litfl/N2EEVHDrUiMj7KEN/IUPylCeQW/bXXCccJCOj1gtVg7xdYrVtO+QTr6V5u/5qqC7WAHjL\nJJTGAZANr22fvG1PA3LN5om4Qkdziv6hEoZp+ZHyjoYeLIPZEyBeSRZtuZp3/NhVyE7mLjcgxtMw\nAm0EyTtoqQhOhi4KevgeNcMK+fprJWUIErxsB9s8sLAFIg7AUHJYpTS25ZyrGCTiCrIsYztadHnT\nqZ42plt+H12t08o3CpO1c76LEHk7yT0kxcS0bAzTIqsNeoFCti152rmUKLK/wmffnGyixZkw3XSu\nSBaWpHt+WDdCtzebc8bL0byNGLph0eFoS7HxIpuZXJ8N9bfDnoptg9wUWAoGDNvid0uiBVsT10qk\nxHlFowSWjLZtltBuXXOrI2C4qTjr4knGNAvhQx29E6VjlzceAGXNyXtQSiPJlne+uz+pCGIp6xYt\nSeH3HigP+uTtnO8SnEsGleZYs38Mck6YY+Vkgfp6cZ6vecvYegJsWJ9ZizJqJ5IEiuW7GWwthW0H\nnoOsE1fiKLLiLAGVSMuNSIkCx09o8oPLzBjDeY3ZTWcgqQbxY1cj1w1jJ7NOH5IidqMk+rJjaCeP\n7XrSfxCGK4CIMY5PWRsao+A35Uac10JKTXJK58zQNkm2ScpJLzmPa2lQO/YwNGoJBb1ISvXT5SbV\nBEklSaacJaZIQrM2VRJxcb5hWpT2jQ9dwzZjnvvw2JaJIj4hVqalIeG9h+51Fdm3HoTa0OO159rD\njIi8jzI8vnOZSBRw3Es10xAGyeBHf1hbtT84eWu2+LusmTy8fKfIchUMOlGq28+X/PaD5kkXwQpS\nUsqv4OOlHJRsQbrgVeJJxAR52zbc9sd1dGcdE6ZD8oWSwc/+tI7t+4ZYvlVIvHaAmAbcDGSBicI1\nobkfYFk3uOuRDdz57OPszPjpX3/1WKAkZg0ff7DYA8AdD7/MMxt2+PsDWt7zG3q4/+ltXoY1gBUv\nB7JG1bBkDBULWJbN/zz0Mt/9zUrW7u0CyRZBOs49SorQvFVZwpZ1UnIdVqbTm1SCfmQ3iMc2Yz75\nB4Uwl0D2noDpaeZim6abFAMa6JT6qb5Glyiw28ls5b5DxbzM48sdM6yreTt+U9fE6fZxy0AXS9fu\n8zVcI0ZXT47lS2JYxXqkej/gUJIgZTfz2TmfADOOlWsRVhGnbbmhnyFpPzNaj6cp0YBWEtey4jl+\nt/lPDJYzKHoDZt94QAqR992PbWbzfiG8NSTqGN/a4l03Ptl5FxzXgusxsYvChy4lnVgKR4BKqkkS\nMZmybnoWiKyWDWhsYdLxfMoBUhjVmgYkFIe8pXiJtMNHSVX0W5Yc06/zPONOXeykEqg1YMvCwuCQ\ntyXrYc0dyA6oSDGd3WN+i5zyg62GCjpnj52HbVbTQVJJEFcV8jlQibNreE9ovyuY6Hum+hslv5JY\ncL16LeKb0Xo8lxx7MZIk0ZRoqNqfjqX9zHqm6iXsKSf3M1jOkI6F6y00JxrZm9/PQMOLYi4zVS+W\n4p7Ht1AeaGFy9p3e8dZwi5dpsi6pYhtxJFVHVWTf8uhp3lLoHqxcE+WNp4KexDBtNnVl+PkD6w95\nIaeREJH3UQYvwtGSR9C8fXJdvbWvan9wOYcZINpHV3RhmFY4KrZm1R0/GKyW5h3URCXVr9Lk19AN\nrNX1yFtm1mThk3txU6+fwcohnadW7eXZ9d3c8svneXHnDsDRLBHLSTzLgeFrIp3pDq8PAC9s7OXJ\nrmdZXlgUShm5bmcgM1egb66/2fXLu9jTm+dXj/vJN4JLjH58/1r+9MyOUCYwSxJ/nzdnfDga10Hf\ncJ6+TJElq/exbscgK7YJ4cQq1vtai2z4ZnPZICY596m7+Zb9PnqWF1MNkH+15l3YO9Zr31svr1ue\nyVnfNY2J6SkhzXt3r/PsHRLq2qvRtdsJ7nK1UvcenWdh9ApNRx23lb+8sMeL5G9K1lPWTdZvz2L2\nj64al3pzFJObJqEbFtZwC5IEckoIdW5ynrdPOFvcS05MY7uNjSLeApDxScMn7yKPPt/FcKmIbcP0\n8e00pfzJ2DYVLjhtgvfeubGK9YrQqpV0nqb6uDdeKVVohZpmehaIgVLGF5Zcn7kTeCincsSOXYXa\nIVZttNbVMXl0g9NH8b6NHS15edBd8vVQIfx1NjaGfttaCjlRIjZlFSYaKZf8HfK2HdO7jS0sHDbU\nxVNccf5xjG2vJ2ZWL29SZMVZlSJR6vOjqMubT0HvOs57zu3pZibEnWWkqk5RFXPP5I4OLxVqXQ3N\n+x+mLOSCSW8T/ayxfGx0U6OnOYPvv3aRVivJ2zGdpzYiyRa2odJQkRBmzqQp3t9mpsPLQJlMqCTk\nJHJcFwKPa4Gq4bcHMHomYGU7aUzHMEyLp1ftZdm6/fRnj0x+84i8jzJ4ZGHEvIQQQYQCoCo0ye99\n8kzGjvJfZMM2mDymkUmjGiiUDQzDqliPWi0cZEv+MqNaPu9g6khJMTzy9uoDy7XIW+GMmaOZM80h\nXOe6biajYPUeOT0szLmOyTc4oU0d3eH9PaqCvLsHi6GsX36DZu2/HXPgLv1lnt+/MnxOILCndi7j\ngHnc6d+0Cc186rJpVUdqZjkkhA3oTgnIYr0n8UuqCNBRFAkUHQXXzxj0AYoJzrUE2IbqTToN9YHP\nXNHF0idLEe3bEpYsyLikGb7mbsQo66YnxMjJgle8xBUWTC0WIka3L+75AP/90X9gUuMElPpBhu0e\n9sbEWH78nbO5/v2zAbDyTVXjIluOS8AwPdLxVg441291TNXZrF1V79pSfWuEbz1wBQwDhRjzZo5B\nkiTU3aeKZUKKiT12LRPGiOuVyqKm9LX/cBYA8+c1M3/2uEAwWIJETMEGGmMueQ/6AW+u5u1o7kpr\nN2rbPuQ6IYTc8E9v83IPWI5PefrxKe+7cjVvL6eMHo7GnzVugvf3tz8+jwmdghzV9n0YUtkjb1fz\ndp+Vi3Qsxa1Xn8Ox45qIqTIzx4r2bEOl05jBW8ecDvhLSs0+EX9QrzZgDY5G6fdzPnz742+lNS2+\nydiY7QzYuzm+ZSpffN+5/MvFM4BqzbshVs/ExrAZ28qFBZJxLS2hnPZSxZyUrqHNB2FrqVBFwpnH\ntPD2U8czPjlZXG+oDdW5P1mSOG5MBzYW31/zQz/4L0DeDTFfwDH7x5KMKzTVJzBMS9R4l6Ct6dBU\nX3wlROR9lMElC3dyrUTIh1rxosdUORSJbdg6MVUmnVTRdKeggHJgzXsoQN7lGpp3KFtWgLx9zdvv\nk0fejmTtfaTudR3ydgPzpGQOuW4IK9vmE1egv2ogAdKYOmfpiUMm/dlSyKzuJVEIEHZQcLED2ZTu\nWH936B6DwVmVZnWlfTdKu798zG1TUYTJOwjbktCscMrUYVMEuNmlOo+0pFiZZFxBloVA4+ZxxlKw\nLRkppnnpJstWwIXg3INhB56pE8ErGEEiIdWhSeKZarqFZrmFMWLifdATyCjIyYDP2yFRvaSCGcM2\nFc9c6xKrazaPqTJtyRaQID9KVGhS+o9lcvNEjxRcK0pojB0TsW5YHmkpDYNI8aInILgTaX8mXPEL\nwFQC5K255F1EqsuKwCzbJ8J0cRJG9zEAPLN/GT3SJm98VUVmVLodWZJZ2buGPnmrFxOQiiW9dzet\nOMVB9Kz/jjvjbzlL+jwyQBBZc6LJS0lslcWzzpQy3mqKSh+xvP2tnNJxovd7Zsdx3t8xVWFWy6zQ\n8UmPvMU4G/smc0LsLG99eqXw3ZRwnoNkc4w1jytnvFcc5xatGWrj7JaFvHvM+wHoqCCphrgjPIze\nSVxOcOnUd4b2B8n7golv4wunX0MlyhtOp7R2nve7M91BIjYyTbkCigvTDs95drE+RN5u8NtFoy6l\n+OLbwYyhBoJZ61RxDz3FXuRkwUmyI85RFZn6eB0fPuFyJmbeAbZMQzqGqsjohk1vtkRrQ7KqENTh\nQkTebzDolsEL3atGXPJVMv3JtbbPO1A4voJ8Y6rirSEGQLZEBKVTYSlb0MKm3Rqad1+gwENNzTto\nNlcM8k4ke9Eh76CJ17CdJTfOB5WIK0h1Wc8n7loO3OVZSpvwVZt943yTslJtogY/o5N7P2U9LJgk\nqHP6GPQHB9ZDl/2JqXISDd5DZWrP+JS1xKesCbTpkLcsoVNhTrNUdFsLZR3TLFdzjnvFFKR4mURM\n8QOeLH+JkK3HQfXJW7c1Z3mM4pnNTcIJQkLEJTWiSQWQRIpUL3LdUB3BSyItNSKlh4jPWYSUzHkC\nUbkozKlWoQEplUNp24OccM+Pe/fdFHdIIZHHzHQwpnwasiT7BXMMv7b4jBZhnVBNJ5LesDxBQB29\nk+QpTwrLhy15/s7eTMl7Z+aOOhWAMaXT/HE2Y9iGitLc65fRDFilEjEFK+dr/xa+5qwqMnElzsJj\nzmNYy/F88REUZy11OuYHU6WoR5UUitKQJxDalkIqoYAR9zK9uVAlX5sDMHSFhBJnsJxlq5NCdErT\nJIKQyg28a8qF3u+JjX5lrrgqc+boed56cPDJ0hUQsFSmpWbzDqeNyY1+8hcARXIEViksCfnr6yVa\n9anETfE8O5rDxOmSN8C5o89hQsPY0H41UBP+H45d6AsLQVgqdqGJy6dcwTWzP8bcUbOrqskFMXfU\n7NDv9x1/ifftg1jn71aQA0g6wlZSjXvvnRog2/p4WJMXFj4xfm5g2+mjT0XVxfuSTsaIKRKGKQJn\nK8fkcCIi7zcYFu94nP9Z93/cv/Xhmvu9IDFbqql5B3OaV5KvLNuUrTC5xlWZdDIG2BhNO/xoVaiK\nNpcb+1jc/cfqvgQQIrMKn7c6ertXHxfAQiz18j5OtRSuUSyHydsNGDKHW3yTskNoqiJj4pN3a7KV\nhJIIpYN1NSZzqIVOyzFhBwQc19xp5ZrQd87wtlea+4Jthgs01Fiap7h542VPsDGHm/nQ8R/ANhVM\nWw/lHNesssiFbckhzTsekz1BwF0/DIARR1I16p01/yaaFyTkBqwFyRtVJyb5ZFkvi0lIbhhk7eAa\nj7xtM0beqaLVoDoR5bKN2tnlkVCx4JiF841IEsSPXYM6QQRTeZYRSQpN0ma2jQ4nKU88oFG17lnI\nl97yWT4y/QOUN84hVRJJRXTDCsUyiPHQUOwEsiRjWlaoZOqxzZP477d/i3GcGDonKIyBsxzPQTyu\nYA2OQt80h45Um3+Q5QsYFx9zPh+Y8T5/V66JZEz1a0obNu2pdqz4MI0Nzn2ZKumEeBZuJjcXHzxB\ntOUSgmHatCRb6C8OsGlwK82JJi8ILvhadaTamdQwgYXHnIcs++MXU2WSCVUkxnFwaufJ4hoBzTKm\nysyfcDafnfMJPjDjn0J9mtYqgs7M3rApOzPsv++9mZIXhNpWURmsMemblMc3d3IgjFQO2MVJ7TM4\nrmUKkiQRj/vvu7Z9JlY5yWzlHXz5LZ9leutxofPG1o/m+jmf8n7bxbqQ5u0+r2CKYzVQddHVvF2Y\nw63e30GN2nUDphNqiPzbm4+MyRwi8j6ieOz5Lrp6wqkphwoaDyz1g5y2O8kLdgyJZSuPrOjinsc3\ne8uhvCQNstCUlq3bz12PbOS3T2xhqKCFfd4V5Js3CuGkIgHNW27qJT55HWpnIA96KEDGJhYo4wjV\nAWuWbYfK5EmKEYo2V8eJ7E5mthUz60ySkuV9nHvl1aH2RE1rvw61p7kYcZ/YHD92XVL1lr5JG9/G\njq4yacXPmAR45KdvO9ExHVdq3k7U9daTwIxTeukcZDPBkBZ+ZkENasPgZm596o/c+9ctxBM1stsF\nNG+3kIax5zjmjj0RLAXN0vnLCr82s47uCCaSuE9bglhZWCVc4UMPrO/V40iK5UUoIxu+VcJdR+ya\n6yUTSbZIyP6k26AKv3HsmHX8pe8BhmNOX4yY9+wanWPASTiiashWjGLJoqUhUdNnbet+bEWQvO1S\n2iPvYKnaJI2MruskEVOxsh1YTlSxHtC8XcjJAoolnv/9T2/HtGxithCwOlLtSJJUpa25goxVrMPM\ntnGccoa3TxwrYWY7mBYkA0vxtFZJkpg35jRa4+K9NbonElMV7zovbupF1uqRFJN0Y9k737VqeX57\nQNtyMjNahb/YNWnbNqT0jnwAACAASURBVExrOZaSWaZgFJnaPLlm8Q5FVvjc3E/xrikLKrZLwrxs\nJLC1BAoqJ7YLAVRRwiQPMLlpkhfU6eLE9hOIbTsXfdf00PZgVbvebJGCM1e11CdCxzUHyLsj3Uot\n/MeZX+Rrb72h5r4g4oHnlwz8bfZOoLzqbYxPTmZUXW0BIRiBbpdTNc3mSlCgCZJ3haBuF/0I+CDJ\nu0pJXSoW2t4RkfffH/b05vj1Y5u56X+eC23/5cMb+MNT2/ijk6fc9dmokkJfpshv/rKZxc+JZTa6\nqfumV4e871y0kcdf3MOi5bt4fkNPyOddGdzh1om2yk6QkWx6Pu9ahelD/uBE0VtD7aLSbL5++wAl\no+RPtorOcN5J0lLWQRITsbb5VH8Nsmx6H+cwvdiWRGnVOZhZ5+OXA+StaiKBhy37EbxOn+rTMXRL\nx9bjFLJJfvC71aTUdCi5hr+EJ4ahy1X36Js7nWIMRh2y1iisCZIFWMSnP4fS0uMtWQHYYDzDw8/u\n8pa+ubBtKeTzdjNC2UYMWZbEGnDZ4PHnffK2EMk3Jo6qByTQ48LnHTCbG5r/2bpaaSJtihSwiu6T\ntvMcOjqcNe+uoBPQLprjzc44OlYBxe+jm9K0PuaTr6Tqjt88TqFk0NaYrE3eAW3ZM5sjfPluLedY\nYFJWHRLz/LPOulndsJDNMEkAyGaSkmbw4DIh7F7UcQVXTv8nprUI7TFoKhX37rgjSmm0jXOZnvTN\n6u77Z9swJu2n6cSWQxM7wPunXIm2YwZm/1hiqkzKuc79T29n5y4np3Z6nTjdUkgnVc49ZayXZAXC\na59PmSpMvP9w5jGcMdrv07njzwx0vur2PbQ42cckSfJIpLT2TK4c93FPuw1mF4yrI5ugAS47ay7Y\nMmec4I/DhXP9wLjeTNEjrvGdgqynTxTvUFOAvNuStcm7OdHkrYmvhfEd4t0MCl+1zOYH8oMDTJFP\nQ983GZBFeteKtoKat+dWIOxDNzPtmAP+OAQ17wWnC5fD204ZGyJvNxvckUBUVewIoViuvfZv/4CY\nLF3Tn0veiqzSE8gnPVzQ6Q/UL0a2KGtmyHReLBuUpaDmHSbkYUeDtMtpSJRANompCnXJcO7lifHj\n2aVtCpO/G6S07xhGG7Pon/DnqoC1fFlDUkyxbjemISkG/UMlLMumZJSRZJvpbZP5+GfO56tPbKef\nHpAt74PSEZnF7HLaXx8qW77ZPKb564fLKSRkSAhLREdTim5LCwWaqXZSRKzLplgj6yWmUCmXbVER\nyCHsKWMbSU2qZ1sOT7CoS6pCY0oBqoYkWSL5DAifbkX0uhtjEJfjDL94BvETlgc0b9kb/1s+IqKX\nZTuGpYhc4u4MLSkGthHnuHHNfPby2Vz/2FKkZE5MHE77Wjkwm7sJJGI6LQ0xioqF5Wb0slQScoJU\nyuCmj8zllj8+AAjTopsfqjXRChUp6t1o9JyjeadUn4ileAlUDb2QwrJt0kmVn3ziHfz48TgbSi+i\nNGRoUBspBsgqpHlrqZqatxfxK0tIkh+kqBsWsRqEI5kJT7iYPKaBK+efSl+fbyFprwim0ndNI3Hc\nS+h7BbknAqbYoJY3OqTNSSGTKMC4pk7MHuGLjqkyHQHTsV1hGscU39aHFkyjdetuFu0Sgkaw1vak\n0Q386NpzhGUFeNv4M2lPtYX93QcoV/Ctj83zAh49Td2Ih4g0SE6V91OJd501hVMmt4a01ffNn8q7\nz5rMt3+9kr39ec+d0tqQ4NZrziYVF8cGY0Nqrek+GHz5I3PRdCtErkGzubftAH5wgOPUuazrEgpR\nkLxdn/dImncwT4W2KRA3QVggPPeUsZw+YxTppMpTq/wA1WT8yFFqpHkfIVg1UpUCVaYxw3KLhMih\nYhCFkkFf0c/JLSt+SktX8ivr1oE1b6fghuf/U0zH5+1XParPT+WczvnO/mCqVD/pQn3MCc6p8Hkv\nyQo/va0lhd9WFVWSBoZLXtnIpkQ9qiLTXu8kvohp3sdZtovexGa7NZklV/O2QdWwveAmmTq5Ednx\ng7c3J0WQn+l/1JYernYkMi4JE2nZ4V3h47dJxhUM1zfsCACphIrlraUuh0zwthFnsiYKIXjJLZzx\nPnPs6SiWWKftEroiS2TKQ0hIjKpvce6gVhIVE0yVeFymPhUThUEUC1s2sJVgoJjTDyeoTZcLtLW4\n5nKfHBpiDWS1Idqakshp8fyntvqaVGstDckQZnt3kp7deiqzW+eIcUgNI8m2l9WsLqkSjym0SZPR\nd8yEgQl8aMpVBNXFUGCSLdf0eYeIXJFFwiBElbRa0buSmfDM+lPGNFV9R5WBQ9bgaN7ffg22YyUI\nam5BIvdWKQT6EkRdYAKPq3LIxxn0N4MTsJZUkSSJ9nTAOmGE1x2nEiqyJCFJEv90/Lt5+4Szqu53\nJKiKHCLaWvcUJKr4K5C3JElV7cnOto7mJLphsddZMphOxqhLxjyiDa7jrmXyPxioilxlNamteR+Y\nvINCyiuZzYPHnth+AnVqmium/2NVm3WBQlCSJHn9DL67ifiRo9SIvI8QauUZr7Xd07wlhd6MT475\nUrXm7aIuJV4iTTdDRSrCmrfN5sw28Zdjcg6bzZ3lN6UpXuIKKWg293Ihq6QSKkk1GdK8dctgW2GD\nc7AFZswj/L5Myat85Urnk5pEQJJclyURU9AtQ0RKuyZ3JxmDu9YbVRdm4YD/s0FpEfV3FZ2OppQg\n74DmrZXcQLhAZivHZFly5CK1bT9Kx26x3MPSPHJXFYlEXMEoOwJATAv5usHGGhjDhORkp9604Res\nUBNiQgsUtFBkiaw2RH28zsvFbLqme1dIkiyRWMJUfFOuQ85lCuiKIF+9GCAMxyeXNftobnL8pwGz\nbGO8kbxeYG+xy8vHPGOUr9U1JeqIyeIeZdstpOFkbnPMo+lEgg/NfC9WOemZ122nIlk66QpbNnax\nAXXvKbSkwlHESSXBuPix6LunoioyTfV+JLoLNaAdKrLkFXoQmncN8jYSnvm2crKH2r7HukCyjrBZ\n1m+/MR7O8hXsl/gd9h8Hr2NraYrPBXzRAZ93Q9zXhG0zTN6viFfBg0HNVKkIbHu1cO91pxO3U1dJ\nskqck9pnVvnjXytqEXWlUHWg/alEtQl+pIC1hng93z7nZs4c+5aqNmu9ZxAm/1cSKg4lIvI+QhiB\nu6vglsNUZYW+rK9590gb+O2m+/0DA8TqlgYtaWaIUIOat9zcy7J9ItLbKjnLpGJlYorsmM2dNddy\niua0Y/JSqoO9MEVO6sZ4AwOlQTQnA1le9zOvGb0TsE0VJSau35sp8v/Yu/P4qMqzf/yfs81MJpls\nkAAJ+yabICgo4i5Qt69WWxUXcKlaRVu1daFUpbUPuFT9Wbva1trqQ12hllddeLpp1YLWlcUVtAjI\nkkD2zHaW3x9nmXMmM5mQZCYZ5vP+h8xkZnLmJMx1rvu+7uuOWevL7eA9uXqMeVyl+/CrDx7Gqzv+\nbZ6npJ7Y/knrzKBmNUZxFy/ZhVRCoB0DyvxQDc0zbG533rKDrjkkbZ6rcLsrWJTvRWNwM3a173Iy\nd0kU4VckaBFX8PZUrsdR3xSBz96yUo45vxO/5IMkCOb6Z8mcKxdFc7cjuwMUAGiqfYFiPq+4OLGk\nx+nnbL3fiNaGmGiNnESCieYeVrOaFr0egZB1fK7gXR4wA+nKj58xHx8NYGBxYs4x4JfNoXMAJdoQ\nCLGg01TEzmz9PslsEqO5A5V5UWF/gNt75IiC0GGeWBAEnFJ9DtQvx6KqPODMwbqzMzkp8/YOm4uY\nV3YhYlumIbLpaMS3j4PSNNK5uEgOIgBQXtJx7tGdOaWbUxUEAQtGXYzohzM7HFcyRRZR2mFnP8HZ\nIcuI+Z2LG89FQYoe+r3NfUHiHjbvjeAdjWnmErqkQCUIAr459RKcMvLkbv+MVFLOb2e4oHFfdLmH\nsv0phs2TL9DSCaYY4QAS9RoAg/dByT1s/t6n9dANsxfuftd2llu/bEJMtTJcw0BdY9jJAPeXJ/aA\n1ttLrAIq8zXNDy8De/WtqI/sT/xQV/C1h5dlQYbeWG0uMSpuhqJ4h819QgClwSLo4SDEUKPzGu7M\ne2d9G0q1oYjpcTyx/jWs/2C3s7etumeY9fqJIri6pjBiVqFdsbWOcnRlrbmOdsBufNGyHau2/MU8\nUCt428PmghL3NOZwF0KVyFaLVCWC0lJ7eU7iP09Ts13oZm1VKCUqsdvaBGCH+SErVdShrug980nW\n/2NZMiuW7QsdqWq7N/OW4tjXHIEMa3jWmuMHzEzTzLytD3YljrgRQVxXUe4aQraXfCnDzKYgpSF7\nIwvZmUqwq5TbjVZExCYYutnD2mn5GPfDiPuwH19ik/oP8xQ0JqqI7S077SmX2JbEOmDAXPs/sMgM\n3pE2H9o3HO08xh42d9bhC4lhUfu47OBk/32bc9YdPwztAJuuGtcdJCVR8BSs+WQRgwODoe0fAqO9\nFOquMdBVn7MbXjDFvvbuzMo5Blfm7Q48/qQ51YmV46C3mFXlyRciboospXyvV0y5GPH35gGaL2Xm\nndziMxv8nmJAd/DufnAZ6JqKSHXBlC2ZsuxMz3FPz9gXAuky784Up/g7AwBZTrwWg/dByJ15P7Rq\nA155dyfuXvmOp9HK8sfeRn2zOTcc0+PY1xxFZWkAJQEFQsRd9GP9J7IztiIFUtUObCsyd/s5acDp\nAOBtB2oF4UvGLwIMEXpbGUR/GIZoNfiQzbaZPsmHoF+GVl8LQdQhVe72PB+agoaWKN57y/xD/vN7\nr+PXaz7Axm3m4+zgamgydMEMmvubo4gLVvC2Mm9REJ0Mz3OeUvTrdg9ZuzPvErnEeZ8lQSvwuTJv\nLe7q2Caaeyy7s57wl0M9VePun2suvZGgt1ZgQukkSKFGSFWJZXSxz6bCMIC4nZl7Mm+/uYey9f7E\nYDPaNHsLy0TWO7bYWspTuQcQVZSUJC5A7A+BgUHz8a/sfx5hcb815SGgusIOgmaTFA0xRIw2xHeM\nxfTBiTXqdvAGzPXtRpv5evaccNAvozpoBqq2Fsks7LOCS0u7+Tu3i3wG+BJroO3gbVfXjq01f85h\nYwc6owLuoFhZGoAAYGhVx9854B16lCUhkXlrZuadnDFqmp5YrpNuODPpQzlV4RLQ8QPXPUfaWYGX\nnbFVliay/AGl5haVMsy/02CK4J343XXN6CHm//3Dxg3M8MhERul+T6LYu5k3kH4IORvsn+WTRQyz\nKtyryjpvhuK+6PLMSSuJkbVU3+/KcSSTPXPeuQverDbPESczKWmAPPQTfLjTu7ymTdgLsbzOmeON\najFEYqq5jlY30KaJEACE6meiWbaWFllV1MGA7GzacP74ryKyx9zooXaIjMtPnAXdMPBKXQPW7/3M\nyXy11lKIZXUIi/sQ9I81s1PV3NtWFAV8e958/PKjTzF1qoh3/55ocFLiC6IZifWPdr/o3U1WW0+7\nGMfOOiUVMVWHjihEeCtSJ9YOxsdNiZaR5vPNDz17pACAOd/tt+daXQ1GrOB95IwifN76mfVzXWug\nnUYuGgTr/BiajOKAbA25CmZBmL9jsxnJNSw4o2w2Pmr+wNmJKbLhWHO/agDtLQJQYgV915y3JEWc\nJVRicTNaVfPnuzPvb596PJa/vBX75E8hKDFUlgWxwzpGe8574UmH4uebXI1rrO5XQwYU47yTxiIU\n9OGdLwdgXcM/UOoL4ZRDvo5h1SFcEDYb5OwT/us89fCRI3HWCWbryTsunYn9zebWh9VNZlCwh8JP\nnjEUf39nBzTdQHFAdoYd506agj98bG7KcsnJhyEkDMCU0WbWfszUIRhUUYTRNWaf7GWXzkSFK6hV\nlRfh9kuPwODK1FXInjlvSUQsrsEwDKfaPDljVDU9MSef5kP1vmuPxusbduGZl825fk+Rmiu4JQc0\nd5CXU2Tw9187B63huJN1L7t0JprazIs+ewcrO4ja2Zq7u9hti7xVzJnMnjIYA8sCGF2ToiNZkvsW\nH43G1ljSnHfXC9Y6M6DU3BfdMHIbvAM+GcsunYnykB+KJODLfe2oTXMRaHNfdCkpRlnSFay53X/t\nHLz18V488Tdzu9p0GXqqi4NcYPDOEbswTSzdB6m0Ac2tewAkrh63la6Fuyg3psUQi5vLqEQB2Cuo\nKJaLIDQMhVi1y1xcJOowAAQUGYJsZn2TB0zA3z7ZZ3bv8oedtZhavVWQ5jOvnu250jZhPwRBgCDH\nYaiJhgMTBtdC/FhE3JpntTPvimAJmvfHzbXWmuQUpe1vbzH/muxqcSdwxs0GNIpVze5aQlJRFAK8\nsdvJrGPbJiIweb35GnIMorVlpN6ayFxDivke3t3/Nt7d/7Z5pyvzdobQ5Rj8483vG9EihII+TB0z\nEOs273aCfak+BOMGV2P9f8zzJEuCk50FjUrobaUQi5s9xwgATc0wg7ccd4oI/ZIPoiA4PbvF4ia0\nqOZzy1xz3n6fhOpQGfaFzfqD8jLRXLalJ4bNq0PeavD4DrOJSHFAdrLYE8dNw4mY5nmcX5FQWQpU\nxkc79w0vH4RqK3sqtiqFAWDW4MPx7KsfQ9s/BJNHVngyQ3e2NW5AotBt+qihngsxURBwyPBEtfWI\nwR23dxw5OH3w6ThsbjhD58mZt98nQdONRJerNMOZpUGf50PeHdB8nmHljnP0smQeQ6oP9oqQ31lf\nDQChoA+hoLeRjP1+3BcCK+bcDkkUUaIcWMFa8rntTFmJH2VJ8/2pmrR0hyyJqAz5sa85mnYIOVvc\nf0/2KE9n5DRLwVIWrKW4QAPM3/PIFH/Hydw1BRw2Pwg5w+ZWT+WInmKHKxd7yZdfkcwPJ1GDIvoQ\njWuQkpYYybIASUlsU1jXGIERC6BdS6x7tVtzhvzmB669WUMM7TAMwwrePkSsHbxkUUaFvxx14X0Q\ny/dCHmAPi7u2WlQVZ/mUvduYMyft7GGsojUchVhsZuYhV+FOyrWg9rB7WzmiH1vLk5QoxFAj9EgR\nEHd1B/OluPp27fhlX0BIFXsgKHFIrYOh7jSDn7OUyPp9+EQ/Lp9yEcT95m5DdsEaYFZdq9aOSoYu\neLL7BmsBgFi6z5mXtzd+QDwAI+aHEGzGjlZzyL0maSlSib1LkRxDaYld7Z0YNncXOk0UToTeYI6q\ndDXzce+6lG7tbUD2I/blKECXMLC8KG27R3exXbHcvXW86aQqWItZ65cVyRu8Az4JqmZkHDYHktY4\np8mQUs2P2wVv7u1dD4Q9kuD+PZX5Qx365OdCb2XeQOJiLpeZd3d4Mu8U1eBdybzNx2U+X+6Lg1R/\nS9nC4J0jTsGatYGCs/uTLWlLQ7tHud9ndmkSJA2KYG4Dam9q4ARvSXSGtQNyAPWNYQhqAO1qO1Td\nvD+shiEKIoKKGbTsIdKI0WbuYiQYgKZ4CuiqigagOdYC//h3nPvawq41yZriFGm1WNXmRofMW0Wj\n8hnE4hZUxMd4AkiqYOLeYcoZQg81QJDjHdbRFrn28lWsYUnPPLp1DGKRtca8bZIzn+tklFa2LMIq\nHrP+I8qS4BS6tEfi0PbVmIFb9cFd6mq0l0JvLYNUXg+p2mxp65f8zsWa3h6C6I9g475NKJKLMCxU\n63kPIcVV+e/XneO2P2R8kmvNtpgYdTiQzOeiCeeiSC7C5AET0j4mZm0vW1bs82Qi7vXSgiDg4gnn\n4uyxp3d7HW86SoqlYnbzEZ8ieoJOQJGg6ZmHzYH0WZV7Pa6U4jF2Zt3Y0vlFdjr2h36uM9RUpG4U\nZ6VjX8wV+/v+fXXGezHoyox9nS8VS9aF2J2x8U22MHhnQTiqoqXduyuY0yXMyvTiRlJ3LtVbgOEE\nb8Xa9UtUAV1GLK5Bttbl2vPjih28NRkCBNQ1heEXzMBoN2Zpj4dRJAcgSaK5VCfuh2EIaFWb8ceP\nVgEAtP2DPB9WA4OuTRosEVenOMOqKPcrIiL2Bh3OnLcVOBUVMdnMumvh3bIwOXifNOxYs2DKZjVZ\nEcvMPa6T23C650EXTVqAG2dcA3XXqMTx6e75bxHlYiLrtT+c7WAfMMzXtocYJUl0/qO3RVRA9SH+\n3ymIb0/sya3IImCIiG01h6ztna38kh/2SgB7HXZYi2B8+egOGzKU+q3aASXmbCBiaHLK5TElUuLi\npegAMp+ja2bivuN+2GlbSltxkeL5MEquDp9dMxNzhx/f5Z/dZUnLxlQtfebt90nQNHPY3C4sTCdd\n5uS+v7Pg3dDazeCdIvPuK+5h855edOVL5q2kec+pMu/OCtbc2/Wm09MLou5i8M6C6x96Ddc/9Jrn\nPrt6Fk7w9gZ3Q9CgR4LmGlZRcTbZ8CsSSopkCJKOPfti5iYMgt061GroIgnOhhRtERXhqIZia3/h\npqg51xpWwwhamar5QWj2zd7eth0fNXyKQfIIaPW1GDwgEVA9OyxZhg8yX3dAqd8pShs62Oc0QnEy\nb6dtpwpVtIbsFe/8kXsI8VuHXdlh/99xQ8xWlfb/PfcmAYD3P+DQkhqMLR8FGCnmvGF1RLOqdodV\nlzgZROyzqYhvH4ehhrk8ys4AZVFwisbs4VmtvhbaPnP4fNSQUqdHtxEtcvrFA4Bf9jkdLY32xEhA\nqsy3LJAI3s7fhC55AtL/G30Kjhx8OAJS9pbq2PPcQyqDnkrsQTnaaMHdrEiWBOiG4azEUBTJO2yu\nmHPebRHVHJXqJCBJXVjDW2o1jXFn92NqzIu5IQO6N8ztVyQU+eU++2B3S3Vx0l2DrL+T0mJfhkf2\nrXS/d/v3ka63eTK7F7zYyd9YV9eJ97b+ffmUp+xCG8MwnA+WRPA2/9XQMXhDC0DdNQbV46LYGfkC\nsLbLnDV5IF54C04wcipXreCtSCIgxWFEfE5L1XJ/KRoANESbMArmnLddLFUe8mPP/nZz/tgXRZEc\nwHdnX463yxow3bUcZXz5mMTxqTK+MuJknHTUZLz7aT3iqo6nt5hrzysrJWyPxc1kU1NwzNQhEMsF\n/CeyCZKswfBFYBhCh0zbfXt8xRgIgoAbzp0Gnyxi9/52zJo4CDe/9ifnfert3jluWRKwaOL52NL4\neYcLjbISH5paDRiGGfyrS8px+lEjURr0YfaUwSgOKLj27EPx8z9thLprDIQae7g8kXnbRU32nuTj\nhpbhzGNGYV9TBDPGV+FnqzbA3GFcgN40EKK1I5tn2Nx1wTFj0FQkq7CaqMiDvsCGfebPOe+YyZ6i\no1NGmu1qX3rjC+e+AaW9u2/wLRdMx0dfNGDK6AGIxTV8/YQxB1Qo1VPupZR2sLH/litCfs8oix3I\nW9oT+5ink/yhe+uF0xGNe7OpMTVluPTUCc4GGwBw8hFD4fdJmDHeu/NWV10wd5wzrN/XejN4H35I\nFRbOH4+jJg/utdfMhuRs+vuLDkdTa+Iz1/130dn5GTE4hEtPnYBDhqcfteqrCzQG7yxStcSmCppm\nz3mbHxya4A3eEPREYxIkdtzy+yRnq0l7DbOdeTt7RUsCdCEOQwtiZ521UUdxJT4PA/sjDeZuZLrq\nZN5V5QEzeFsXEjXFQ1CsFOG4ad4sa3jpUAwOVmN3+15EP5yFY4+ag1DQh+Om1WD9B4lK7WDQgICw\ntbm9gLOPHY09cRn/ec8cNheUKBD3IVDi/aAt9lQrm+996hgzCE8YYfX/1v3QxXarUKxjRe+RVYfj\nyCGHdzj35SV+8z+rIQCCgepQGfyKhLlHJPp6H35IFYJ+Ge1R1cmU7Yst93CsnXkfOnoAJo9MVH+7\nq5zj28cDhoihlRVQRNmpcTDCJdAjRThhzHTPHL1znEWJC5KdrealwNETRnV4HODNEFJ1EOuJytIA\njp4yBIBZiX3aUSMyPKN3uTNve5jX3rSnqjzgyYrt77dFVFRXdF44l5xVpbsYOW5ajee2KAgd7jsQ\n44ZmnqLIld4M3pIo4sQZQzM/sI8lz0PbIympZJpKyPR30JWitmzo+zGdg5j7Cl/Tra+tYXNnj2Xz\nljlfavfztpc7WTtuOXt0W8HSZ+/HbDdOkTSn4GxHnVn1XVtqZtANkUa0W/PRRdY+t1X2jkuy+bo1\nJemvom8+4jpEP5wJI1zq3bQ+oCSGyJUwRH8EmpUZK7LobK0nyCoEXxRGzO/Zlxfo2s5DJa3mHLPe\n2DED6uxDKRS06wLMoFDiSz386QzJG4bntuyZ844797l55v00H+LbJmGYPsN6Qet+Q0R0w3E4b/xZ\nKX9+kc/nFA8CZuFdukpud/FVLqtac8Gdedvnede+xI5x7mFz9+890/RBbwaufCX1g6H7XMvlUHa6\nTaeyrfB+qzlkL7sCEsPmguBu2WmxN9+wMm87wxZE1QreiblQAAiIAc9rGFYWb2gydlrBe0SlOV+8\nP9Jo7kcNIGgFVLvNYeyzQzEiNAynjpyb9j0E5IDTKtL9HyIYkJ3g/Z+ItZtYuGPwhq/dXI8eD3To\nhdyV4F0ePgSRjXMQ+++UDt/r7EMped/idEt07Kvu5P9/dntUIJF5JweCVEU79lW49+VStwwFzGVP\n0Q+OcgJ4mb/jDlm2rhTP5Bv7nRquM2af50TmXeQ59+7fe6bCqT76XO1XCvECJpdD2U5ilmMcNu9F\numF45lI8mbfmLVhzb7cJwargtoKz0SHztrqLWXPefsneDMMM3ppgXQioMnZY2/UNG1AJn6hgQ/1m\nJxjYa4ZDRebws948ELfMPK/L7y8580bS7kh24xdFFlFkWM1gfFZns5i/Q1WwLMo4YegcDAqmn1eU\nZbFDoZqts0KT5Cvv9MHb/Dd52FwUEsHbnitLfs1Uy4DsD8p0u8h1PE4RRqzIXG5WuRdiJzsu2Mv4\netJoo78RBAGGYSRl3lbw3tcOvyIhFFSSNjFxZ96dz3l39fdwMGPwzi7nsz3HDp5PgT62e387rrjn\nn/j7267+1/HEsqrkgjVB6ph5G9awuW7vNuWLwO9zZ97mtZZTdWy9hu7KvJvaYigt9iHgk+GTzCD9\nft0mAMCospEA9OeBHQAAIABJREFUgPJQ9ypFk5tcOHtuW/RwCLIkmPv/Wpm37rOat8T9Kfv+njv+\nLBw39Oi0P7OzZRzJnbHcyoq9c8IBOXWBlz13XGQdWyITTPS/brcadSRn+ikzbyl1Jp+JfcEW19MX\nOdmvOXxQ560h84m9JMtd4S675rQHlgc6jES4f++ZMu+DbXqhOwrxHOTygqWvLqaZefeStz7aCwBY\n+ddPnPvcm444QyuiO/M2AAiJPautYFgSrwX8m6AM+wQ++WRnztvOvINyEIh3zLxrKspR7h/gVMi2\nurbpBIDRZWYR0qSRlTh99ghMH9e1Stprzz4UX9a3ej4EKkJ+nHroNLypbcWYkrF4+8P9MNpKofgT\nFfHmkjd7NzJft1oHdjZ3lep7t1wwHe9vrcfXjh8DUQT+ZT9WTP2zrz17Cl5Y/wVOn20VaLlesqqi\nCKOGhPD5LnP0IPkDIdV8qzNsbkVaWRJxwdxxad8DAFx51hSsa9iBrZFdnuHjZGcdMwqqpuOsY1IX\ntOWj75w/Df/3n+04+fBEEdSx02rQ0h6Hbhg4ekqiHuPCueMQ8MnY+mWip26m4D24MohTjhyOKaMq\nO33cwcyvSDhzzshO29MebIr8svmeh6R/zxfPH9/lTUk6M2N8FU6cXotjpw3p8WsdCAbvA7S18b+o\nC9fjqCHezQXswCYEWiGW1UPbMwLRlJm3GagF0XA2FnGG0q3g7YsOQoV/KBqKdwBSvMOcd5EcgBAX\nnNakLaq5DehXpo/F7JpEj+sLJ3wNL3z+NzRGm5znAeaQ8NeOTywDy+TwQ6pw+CEdA/3Xjp6Mq6uO\nwsdb67B+rbkft/sqNCgH0BSzh/SVbgZvMem22XMaSD1sPmFEhVOpfv5J4/Avc5dMKGLq4dXqiiAu\nPTWx/lpAYthbFARcd85UfPfnr6c8lmCKLlPJAf74aTU4cXpth8e5nXncGEzYfiZ+uWEfFhxydtrH\nBQMyFn7lkLTfz0dDBhTjklO869/H1pbh21/vuKzOXimwbXeLc1+mYXNBEHDeiWN74Ujz21ePHZ35\nQQeZTO/5pF6qmpclsU/+XzJ4H6AH3vkFAGDW4Bmebln2XHdgqtmcJdJa4Q3emrdgDYCZfetyIhu3\nhs1jcQ2KHgREQBOirszbqjZXJBTFi9BqBe9P2z+CKIiYPND7ITin5kjMqTkSb+95DwNTNFzpLe4P\nUPeSnqASRFMssZtXd7bLSw6YPlmCqplDy501TrAdWzsbr+5ch9HWlEEmiepz89+yksQUQ3Kmn7pg\nzTts3tWGVhWBciyddWPXHlzg3Bdt7o0/iAoJ//K7SdVVZ04ZAJKnWARRSxo2TypYgznsbcQDEKwm\nJPYcciSmQdB9ZvAWI4iq1lIxe523LCIoF6FNaoXgb8OeyJeYWDnes4mF2+GDDuvRe83Ep4hmP2rd\n8GTe7mpyo7uZd9J8kt8nOXPQXWn1eN74s/DVMachIHdvXbS3mYP3WFIOm9tz3vbwd+FNN2ad5ClY\n40cYFSYWrHWTmlRY1CGQGELSsLm9zjuRedubeiSGzc3gFo1rEKyGJHEjirC1TttemuWTRQSVIkCO\nQRpgNvaYOWh6z99UNwmC4HyIuueQPOuVXZttHIjkbPdAd0USBfGAAnfyum+3mKp5bqfaijIx523/\nfEbv3uYtWOvfG2QQZQuDdzfFda3zBwg6/vi3T7Hps30AXJm36FoTaFecJw2bb9vdgi92mvPcUSOM\ntri53tVu0qLIEkqUIATRgFS1A7IgY2rV5J6/qR6wP0QVxTtsbjNUxbOTU1clD5tne79cZ847xfda\n2uOe26kL1rpXbU5dx8ybiMG725Izb7ufucMKyA88/T6AdMPmKgZVFGFghbWHtWvplb0dZtQIO3tx\n25m3IotOYBT9EQwtHppoitJHjpo0CANKAzh8fLVzX1BJtAOVoXSrjaA7eI8cHMKF88b37EAzSZEo\nf+/iGZgwvByzJ3v34lZkEbMmVnsKopKHzZl4976JIyowqDKISSMrUFHau21iifIFL1u7SdW9WVgs\nufuVNY9tf3inK1i7Y9FMPP3uK3izHYAuosgvIRzVnK0129VE8LaboiiyiGI90XQk5Ov7db9nHjMK\nZyYtYXIPm/vl7q0td+/zfPslR2R9swdnnbfr1zRuaDluuXBGx8cKAq4+y+z89vQ/twBwVZsbicdQ\n7xo3tBx3XXVUXx8GUZ9i5t1NquEdNjdbV7rms63MuzJkZsTJvc0Bs1GLTxFRVGT9GgzRqdy2s+zW\nWBva4+3wiT5nWN0niyj3JdYvhtIUqvU1d8FadyrNgUTBmiSaLUaz3Xwh0XGte+Pe9pJBnfVqRJRF\nWc28V6xYgffffx+CIGDp0qWYOjWxdnPlypVYs2YNRFHElClT8P3vfz+bh9LrkofN46ruZNsAnK8H\nWMN6qea8BVmFJIoIBqzgrZutIOubIs6weVu8De1qGAEpALs1hSKLKJMSwbs0zaYbfc09593duWq7\nOMkO2tnecEBI7pd6gOSkJi3MvIkoG7KWeb/55pvYtm0bnnrqKSxfvhzLly93vtfa2opHHnkEK1eu\nxBNPPIGtW7fivffey9ahZEVyG8u4qnn7lVvBu9Rqv6m72qMaqnnNJPniePHzv6MdjQDMOe9ia39i\nSfdBgIDWeBva42FnO0/ALFgr8yeCd1mgf3ZOch9z8qYkXWVn3nZGm+3t9xLD5t2M3slLBhm7iSgL\nsvZJuG7dOsyda+5WNWbMGDQ1NaG11exzrSgKFEVBe3s7VFVFOBxGWVn6/Vb7Ql1jGI+t/djZDjJZ\nqszbvVOY0/LUCgLujUnsrFoYsAN/+XwtXtn5uvVY0Wk6IUkiipUgGqNNiGgRTxaryKI3ePeDOe9U\nZDExsNPtzFtK7K8N5K5Pc7eLxa0n9tU2gURUGLI2bF5fX4/JkxPLlyorK1FXV4eSkhL4/X5ce+21\nmDt3Lvx+P04//XSMGtV5v+aKiiBkuXeXCVVVpZ8rXrHyHWzZ3oiyUABXnNVxO8rikOJ5viCJiXXb\ngJN5y4qEqqoQJFkCYEAQAD3uAwLtHY+nrBhl1hy5IokYXl6DD+o+BQBUliSC9eDqEAJFiYA9fNAg\nVA3su3nvdOfRCNYC7wB6OIjSEn+n5zudygpzSkCWRef5AZ+E8cMruvV6mVx46kT86JE3cN68Q7r1\n+iWhAKqqQrj6nKn45aoNOHXO6C69TjbeS6HhOewdPI89l4tzmLNqc/cwZGtrKx5++GG89NJLKCkp\nwSWXXIKPPvoIEyZMSPv8hoaOwa4nqqpCqKtrSfv9fY1h69/2lI/b19CCOiVxf2tbzDNsPn54CB/s\nBMLhOOrqWtAeiSWK1TQZhi4msnPLVacfin+tM3+uKAoY5B+ED2AGb9lINKNoaQ5DiyZ+dVq72Ol7\nyabOzqMAH04oPh8vvl0HjDO6dYzhtqj1WnCe/7MbjoMgICvveVRVMX57y4kQRaFbr9/cHEZdXQtm\njhuIw7v4Opn+FikznsPewfPYc719DtNdCGRt2Ly6uhr19fXO7b1796KqytzcYuvWrRg2bBgqKyvh\n8/lwxBFHYNOmTdk6lG6xLzbSjdJ2GDbX9KRtPs3MW7NeJ2q0QvBHrBcXALXjdZMsys7wuiyJqA3V\nON9zL7tSJNFTCFWi9M9hcwCo8g0GNB/8Svf+1Ox13u4qc9GqPM+W3hqaL8StGIkoN7IWvOfMmYO1\na9cCADZv3ozq6mqUlJhBpra2Flu3bkUkYgazTZs2YeTIkdk6lG4xUqzTdY8edChYi2uA7B42N7Nq\nOxjvqFqDwNRXrRcSYcQ7NpdQRBmqtaRMEgUMK0kEb/dabrt/+IiQuctSd/t254IdfANK9wZ5ZDm3\nc91ERPkga8PmM2bMwOTJk7FgwQIIgoBly5Zh9erVCIVCmDdvHr7xjW9g0aJFkCQJ06dPxxFHHJH5\nRXPIvdRH1VX88aNVmO3aBlQ1OmbeYlFiqMQQzO87Veae1xZgtJVBLDYff+GEr+HThs9RVTQQmlYH\nwCxYqykZjONqZ0MWZcypmYUnsN45JgD47uGLu70eOVfsgjNfN1qjAole6WKWq8x7C+vUiCgXsjrn\nfdNNN3luu+e0FyxYgAULFmTzx/eIu8nGxvoP8cbut/HG7red76tJvc1jqg6xsinxfGgQBQGaYeCL\nvc3eFzdE6K3lQPUOAImtO4FEm1VZFCAKIs7vZH9nScxun+/eYGfe3a02l6zny3mSeff3iykiOjjk\nRzrTBxKZd+pGG8lz3jEtBiHYAr3NrArXoEIUBei6gR/8YZ33yboIvS310rhZE83+2ccfVpPy+/mm\nImQO6Q8o7V7v9UTm3b+D9xGHmPUcIwf3zzX3RHRwYW/zDARBgF/s2Jc7ntzbXG6EIBjQWiogBJuh\nG6qzx7Wn8xoAASLu/8ZXsOaLCMaVj/Z878hJgzC2tgyVKTZc+NkNxyY6teWJMbVluPvq2RhY1r3g\nbQ+79/fg/c2zJuO8ligGlhVlfjARUQ8xeKfhDJsLqbt6JQ+bq4K5lE2PBiHpkpN5R2Kad/03zD2m\ny0sCWDTp/JQ/e0CaQJevexdXl3c/oLl7m/dnkigycBNRznDYPI3EUjEBmqF3+H6HLUFFa9vOmB/Q\nRWiGBkkUsLehvUPmDZ2nvavsXuH9PfMmIsolRpE03FXDWlKWDXirzQ3DgC5Za7jjPhi6BNWIQxIF\nGAY6ZN727mCUmZ1550vBGhFRLjCKpJEp845riYC8e387oJidwIy4HzBEqIaayBalpODP4N1lSp7M\neRMR5RKjSBruOW/N6Dzz/vmfNkFQYgCAUn8I0BKZNwAIHQrWGIi6yqdIkCUBRT6WZxAR2Ri803A3\nadFTDZu75rwjMRWCEoUiKvjhJbNRO6AUcT0OwT67ycPmev9fn91fyJKI755/GM4/aWxfHwoRUb/B\ndCYDM/NOVbCWCOiabkDyx1DmC6G02I/yYDF2RXRIkpW+JxesaflZNd5XDhle0deHQETUrzDzTkN3\nNWlJOeftWuet6ToMKWoOmQPwS+YabdGa6xaS57w1XjMREVH3MXin47RHFVLPebuGzXUhBggGQtbu\nXgEreAv2RiVi0rC51rHpCxERUVcxeKdhrxRLV7AW01QnOzdEs9K8WDG37fRbu3wJaTNvDpsTEVH3\nMXhnIKYpWPt8dyN+9Zy5B7kmmpXmQTt4S1ZmbQftDpk3h82JiKj7GLy7INWcN0QNb31sbt9pWMG7\nWDaDtzNszsybiIiygME7A90wUg6bu7umGZJZvNZh2FxUARgQ/OGkF2XmTURE3cfgnYFupMm8reBt\nGAYMKXnY3NoRTFQhDdoGsbjZ05iFTVqIiKgnGLwzMAwj5Zy3ORRuQDcMCLKdeZu7StnD5pBUSKX7\nAQCXTlqQk+MlIqKDH4N3BuaweYrMGwAkFf/e+SaU2q0AgGDSnLchqBCKWmDEFVQFB+bkeImI6ODH\n4J2BYaRYKmavAZdUPPnpaufu5DlvTYpADIRhREKQRc5zExFR72DwzkDXOxasybDntL33FyctFYvI\n9eY3wqEO+38TERF1F4N3BobRcT9vUU/MaeuRIud+RTSXgNnD5mFxHwBAiIYwpHgQJMOH+M4xEFiv\nRkREPcDgnYFhGNCT5rwF3cysBUmFEU0Eb8GKyvawuV1ULmh++CQfZukLoe4cl/2DJiKigxqDdwYp\nC9bsJiuSCgjmBPjo8Hzn285SMYtgPV4wmHITEVHPMXin8NH+TyH4zMYqqQrWjLgVjK3gbRgCSvUa\n5/uKKENxFagJujeYExER9QSDd5KWWCt++t5v4J/2CgAz8/7vnibPY1S79kxSIQgGYAiQRG9WHfKF\nnK9FnbuIERFR72HwTtIWbwcAp6hM1XTsaWjzPMauX7MzbxgCxKQzGfKVOF+LOnuZExFR72HwThLT\nY57bcVV35rVtumadNsnsXW4Gb++pLHUFbwHmELoB7+sQERF1B4N3koga8dyOxXVAMAvWoh8dgaJw\nLbR6c37bnXlLSeu/Qkpi2Dz5e0RERD3B4J2kPSl4x1XNybz15gEI7T0aajQAABCUqBO8haQzWepP\nBG8hKXgn3yYiIjoQ7NmZJBz3bt8Zs4bNDQMABLRH44Dqg6EqEAJt1lruFAVrimvOW2SwJiKi3sPM\nO0lY6zhsLgg6YJinqj1ilprr4WIIgTAEUYNhCB0CtGepGDNtIiLqRQzeSbyZt4G4pjtD4wAQjpql\n5kakGIJgQPBFAUPskHlLouR8bX+L5WpERNQbGLyTeDJvwUAsrgGCDlmUMLqmFLo5fg4jXJx4nCFA\nTMquJw+YABgCYtsmdPgeERFRTzB4JwnHXcFbVJ05b1EQ4VcS2bQeDSYel2LYPOQrwYTGi6DtGclh\ncyIi6lUM3knCqmvYXNSdanMR3uAN3fV1ig5rAKwit8SwORERUW9g8E4Sdi0VEyTVWectQoLf5w7Y\n7lPXMfMG4AyxC4zeRETUixi8k3gzbw2abkBwhs1dp8u9Q1iKOW8gEbyd2M2KNSIi6gUM3kncTVoE\n0W5ibgZvn2vY3NATpy7VUjEAmDKyEgBw2NiBnvs5BU5ERD3BJi1JYpqrt7lkB28doiBBkdJn3qnm\nvOfOHIZDhldgWHVJh+8RERF1F4N3kqh7Y5KkzFv2BG9vIE+VeYuCgBGDQx3uJyIi6gkGbxfDMBDX\n4s7txLC5DkmQkoK3O1innvMmIiLKBs55u8R11bttp5TIvCVBhCy5h8q9WXiqYfNkrFcjIqLewODt\nYs93S4JZmCaIGiCqEATAJ/o9mbe7YC3dsHk6zNGJiKgnGLxdolbwDohW9zRRg+Azq89LlNABF6wl\nG1xpvu7omrLeOWAiIipInPN2iVvFakVSEG1aCyCp5sYjAEqVEGQxdcGakWadd7KTZtSiOCBj+riB\nGR9LRESUDoO3i5N5C2aGLEgaBMXMvEt9pZCTsm33110ZNpclEXMOHdJ7B0xERAWJw+YuMavS3O8M\nm6vOsHmZrzT9UrE07VGJiIiyIWPw3rp1ay6Oo1+IWcPmPqMIgJV5W8Pm5f4yyHLP5ryJiIh6Q8bg\n/e1vfxsXXHABVq1ahXA4nOnhec0eNldgBm9zztvMvCuKyrwFa8jc25yIiCgbMs55P//88/jkk0/w\n4osvYuHChZg4cSLOPfdcTJ06NRfHl1N2gxZBV2BoIgRJBXwRGLqIkFKMqBRJ/URD5LA5ERHlTJfm\nvMePH4/rr78eS5YswdatW7F48WJcdNFF+O9//5vlw8stO/OGIQG6DLG4GWKgHdq+wVCUpA5rbhw2\nJyKiHMqYee/cuRN/+tOf8Je//AVjx47F1VdfjWOPPRYbN27EzTffjGeeeSYXx5kT9py3oUkwNAmC\nYt6v7hwHRfL2Nvd0WwOYeRMRUc5kDN4LFy7E17/+dfzhD3/AoEGDnPunTp2aceh8xYoVeP/99yEI\nApYuXep5/K5du/Cd73wH8XgckyZNwp133tmDt9E77A5rhi4CmnlqDEOAEQtAlrztURXZvc5b5Jw3\nERHlTMZh8zVr1mDkyJFO4H7iiSfQ1tYGALj99tvTPu/NN9/Etm3b8NRTT2H58uVYvny55/t33303\nLr/8cjz77LOQJAlffvllT95Hr7CXihmqBEO39u6OKwAESJLgqTZP7rbGYXMiIsqVjMH7e9/7Hurr\n653bkUgEt9xyS8YXXrduHebOnQsAGDNmDJqamtDa2goA0HUdb7/9Nk466SQAwLJly1BTU9OtN9Cb\n7DlvXXNl3roMSTSryd0BO3nZGIfNiYgoVzIG78bGRixatMi5fdlll6G5uTnjC9fX16OiosK5XVlZ\nibq6OgDA/v37UVxcjLvuugsXXHAB7r///u4ce6+z57x1VYSzFEyTnEDtnvNOzrwZvImIKFcyznnH\n43Fs3boVY8aMAQBs2rQJ8Xg8w7M6MgzD8/WePXuwaNEi1NbW4qqrrsLLL7+ME044Ie3zKyqCkGXp\ngH9uZ6qqQp7bwqfmMUqiD7D28jZ0CQFFQlVVCEUlifddFFDgXMIYAgYOKO7weoWiUN93b+I57Dme\nw97B89hzuTiHGYP39773PSxevBgtLS3QNA2VlZW49957M75wdXW1Z7h97969qKqqAgBUVFSgpqYG\nw4cPBwDMnj0bn376aafBu6GhPePPPBBVVSHU1bV47mtpN39GuN2A4Lf28tYlSKKAuroWxOJa4sGu\nixEYApoa2+EvwOQ71XmkA8Nz2HM8h72D57HnevscprsQyDhsPm3aNKxduxbPP/881q5dixdffLFL\nmfecOXOwdu1aAMDmzZtRXV2NkpISAIAsyxg2bJizTnzz5s0YNWpUV99L1tjV5mocTuYNXXIqy93z\n3JJnqRg7rBERUe5kzLxbW1vx5z//GQ0NDQDMYfRVq1bhtdde6/R5M2bMwOTJk7FgwQIIgoBly5Zh\n9erVCIVCmDdvHpYuXYolS5bAMAyMHz/eKV7rS1E9BlmUoaqAPedtqDJ8VtB2B2jJ9bVhCDBARESU\nGxmD9w033ICamhq89tpr+MpXvoLXX38dP/jBD7r04jfddJPn9oQJE5yvR4wYgSeeeOLAjjbL4loc\nPlFBXNUhfjEdyvCPEd5+CJSqjgMU7gI1QTAYvImIKGcyDptHo1HceeedqK2txa233orHHnsML774\nYi6OLeeiWgw+yYeYqkNRy1C291hA9UNJUSgnJbdKNRi+iYgoNzIG73g8jvb2dui6joaGBpSXl2P7\n9u25OLaci2kx+CQz81ZkCbpuBmR3NzWbuymLJAuoCAVydpxERFTYMg6bn3XWWXj66adx7rnn4rTT\nTkNlZSVGjBiRi2PLuZgeQ7lYigZVQzCgQNV0AHDmvN3cwfvsY0alDPBERETZkDF42wVngLmka9++\nfZg4cWLWDyzXDMNATIvDJ/kQ13Qosoj2iAogc+bNGW8iIsqljOmiu7vaoEGDMGnSJCeYH0xUXYUB\nwwzeqg6fLELVzcxbSbEVqOgJ3kRERLmTMfOeOHEifvKTn2D69OlQFMW5f/bs2Vk9sFyLWq1RFVGB\nqhlQZBGaZs15KykK1kT3rmIM30RElDsZg/eHH34IAHjrrbec+wRBOOiCt92gRbY28VZkyZnzTpV5\ne4fN9RwcIRERkSlj8H788cdzcRx9zt4ONBG8RSd4y3IiUFeVB1DXGPEOmzPzJiKiHMoYvC+88MKU\nc9wrV67MygH1leTM2yeLUK1hc9k1RL78yqMQi2t4+p9bnftYsEZERLnUpQ5rtng8jvXr1yMYDGb1\noPqCvZe3CHN+293HXHb1MZcl0dkaVI8UQQyEUSQX5fBIiYio0GUM3rNmzfLcnjNnDq688sqsHVBf\nienmsLmExLC5rUM3Nfs5Hx+B4NCdOPb4g2v+n4iI+reMwTu5m9quXbvw+eefZ+2A+krMybzNU+Ju\nzCKLqZbGGTCixZD3TIFPUlJ8n4iIKDsyBu9LLrnE+VoQBJSUlOC6667L6kH1BSd4GzIA3ZN5y510\nTzv4VrwTEVF/lzF4/+Mf/4Cu6xCtoq14PO5Z732wiOnu4B3zbEYipxk2JyIi6gsZo9LatWuxePFi\n5/ZFF12El156KasH1RfsgjUY5ilxr+2WUgybc3UYERH1lYzB+9FHH8WPf/xj5/bvfvc7PProo1k9\nqL4Qt9Z5Q7fmvBV3wVr6wfGDsVUsERH1bxmDt2EYCIVCzu2SkpKDMmBFtCgAQLCCtzvzdq/ztjHx\nJiKivpJxznvKlCm44YYbMGvWLBiGgVdffRVTpkzJxbHllB287cxbUdzrvDnnTURE/UfG4H3bbbdh\nzZo12LBhAwRBwJlnnolTTjklF8eWU1HVCt6anXm7C9YOvpEGIiLKXxmDdzgchqIouP322wEATzzx\nBMLhMIqLi7N+cLlkZ96GZgZt91KxUNDX4fFVZQEAQG3VwXUeiIio/8s4Hnzrrbeivr7euR2JRHDL\nLbdk9aD6gp1566oZvH2yiOVXHolLTjkEIwaHOjz+lCOH44KTx+HKMybl9DiJiIgyBu/GxkYsWrTI\nuX3ZZZehubk5qwfVFyJaBIqoQNPM24osYsiAYhx/WG3KxyuyhHkzh6XMyomIiLIpY/COx+PYujWx\ng9bGjRsRj8ezelB9IaJFEZD8iKvWHt6ddFUjIiLqSxnnvL/3ve9h8eLFaGlpga7rqKiowL333puL\nY8upqBpFQPYjxuBNRET9XMYINW3aNKxduxarVq3CkiVLUF1djWuuuSYXx5ZTyZm3z9UelYiIqD/J\nmHm/9957WL16NV544QXouo4f/ehHmD9/fi6OLWd0Q0dUi8Ev+xHXmHkTEVH/ljZC/eY3v8Fpp52G\nG2+8EZWVlVi1ahWGDx+O008//aDbmMTuax6Q/IjHzYo1Bm8iIuqv0mbeDz74IMaOHYs77rgDRx11\nFICDt4931FrjHZADaGPmTURE/Vza4P3yyy/jT3/6E5YtWwZd13H22WcflFXmABCx1nj7JbNgTRBS\n7yRGRETUH6RNL6uqqnDVVVdh7dq1WLFiBb744gvs3LkTV199NV555ZVcHmPWOZm3VbDmk6WDdpSB\niIjyX5fGhmfOnIm7774br776Kk444QT8/Oc/z/Zx5VRYjQCAWbCm6hwyJyKifu2AolRJSQkWLFiA\np59+OlvH0ye8mbfG4E1ERP0aoxSA9ngYAFAkB9DcHkdx4OCqpiciooMLgzeAdtUM3qLuRzSmoao8\n0MdHRERElB6DN4D2eDsAIBoxT0dVeVFfHg4REVGnGLwBtFmZd6SNwZuIiPo/Bm8kMu+WVvM2gzcR\nEfVnDN5IzHk3NZnd1TjnTURE/RmDN4C2eDsUUUFLmxm8K0L+Pj4iIiKi9Bi8YQ6bFytBRGPmpiQ+\nhduBEhFR/8XgDXPYPCgXIRLX4FNEiGyNSkRE/VjBB2/d0BFWIwgqRYjFNfiZdRMRUT9X8ME7rEZg\nwECxHEQ6NoGoAAAYmElEQVSUwZuIiPIAg7ddad6so6E5Cr+PwZuIiPq3gg/eMc3co3zL9jYYADNv\nIiLq9wo+eMd1M3gbunkqGLyJiKi/Y/DWVfMLBm8iIsoTDN5W5g3dDNo+peBPCRER9XMFH6ni1pw3\nDPNUBFiwRkRE/RyDtzPnbWfeDN5ERNS/MXhzzpuIiPIMg7cz583gTURE+YHBW/MOmzN4ExFRf5fV\n4L1ixQqcf/75WLBgATZs2JDyMffffz8WLlyYzcPolDNsbhWsiSI3JSEiov4ta8H7zTffxLZt2/DU\nU09h+fLlWL58eYfHbNmyBf/5z3+ydQhdkrxUTNP0PjwaIiKizLIWvNetW4e5c+cCAMaMGYOmpia0\ntrZ6HnP33XfjxhtvzNYhdEksqcOapht9eThEREQZZS1419fXo6KiwrldWVmJuro65/bq1asxa9Ys\n1NbWZusQukR1qs3NzLu4SOnDoyEiIspMztUPMoxERtvY2IjVq1fj0UcfxZ49e7r0/IqKIGS5d4vJ\nqqpCkD63bugi5h85Al89aTwkznsfkKqqUF8fQt7jOew5nsPewfPYc7k4h1kL3tXV1aivr3du7927\nF1VVVQCA9evXY//+/bjooosQi8XwxRdfYMWKFVi6dGna12toaO/V46uqCqGurgXN7ebrGrqEuTNq\nsH9fa4Znkpt9Hqn7eA57juewd/A89lxvn8N0FwJZGzafM2cO1q5dCwDYvHkzqqurUVJSAgA45ZRT\n8MILL+Dpp5/Gz372M0yePLnTwJ1NqqvaXBILfuUcERHlgaxl3jNmzMDkyZOxYMECCIKAZcuWYfXq\n1QiFQpg3b162fuwBi7matMgSh8uJiKj/y+qc90033eS5PWHChA6PGTp0KB5//PFsHkannI1JdAmy\nxMybiIj6v4KPVqquWg1aBBaqERFRXij44B3T4xAMs4qdmTcREeWDgo9WcSt4CwJboxIRUX5g8NZU\nCKw0JyKiPFLwESuuxwFDYqU5ERHlDQZvPW4tEyv4U0FERHmioCOWYRiIaXFAl1lpTkREeaOgg7dq\naDBgwGCDFiIiyiMFHbxjWsz8QpcgcdiciIjyREFHLDt4G5rEYXMiIsobhR28rb7mhsaCNSIiyh8F\nHbHszFtn5k1ERHmkwIM3M28iIso/BR2xYnoi82a1ORER5YvCDt4sWCMiojxU4MHb3stb5FIxIiLK\nGwUdsRLrvGXOeRMRUd4o6IjlLBXTRQ6bExFR3ijs4O3qsMaCNSIiyhcM3gCgSdzPm4iI8kZBR6zE\nsLkERSnoU0FERHmkoCOWe9hcYcEaERHliYKOWFFnqZgEHzNvIiLKEwUdseJWhzWDmTcREeWRgo5Y\nMVfmrchS3x4MERFRFxV08I46c94iFLmgTwUREeWRgo5Yqq5CggRAgI/Bm4iI8kRBRyzVUCEKMgAw\n8yYiorxR0BErrschwpzrZvAmIqJ8UdARK66pruDNgjUiIsoPBR28VUOFYJingJk3ERHli4KOWKqu\nQrAybxasERFRvijoiBXXVQgG57yJiCi/FGzEMgzDzLw5bE5ERHmmYCOWqqvmF8y8iYgozxRsxIpr\nVvDW7cyb1eZERJQfCjd423t5W8PmLFgjIqJ8UbARy868DZ1z3kRElF8KNmLFrMwbmghBACRR6NsD\nIiIi6qKCDd6qlXnrugBFFiEIDN5ERJQfCjZ423t5G5oIRSrY00BERHmoYKOWXbCmaQJ8CivNiYgo\nfxRu8LaHzTWBmTcREeWVgo1acd0O3iIUpWBPAxER5aGCjVpxa85bUwXIYsGeBiIiykMFG7Xcw+ay\nzEpzIiLKH4UbvK2CNV0TITHzJiKiPFKwUcvpbW6IkCVm3kRElD8KN3jbvc11ETKrzYmIKI8UbNSy\nm7TAENkalYiI8krBBm9nP29dhMTMm4iI8kjBRq2Ys6uYBJmZNxER5ZGCDd5x97A5C9aIiCiPyNl8\n8RUrVuD999+HIAhYunQppk6d6nxv/fr1eOCBByCKIkaNGoXly5dDzOGSrYgaNb/QJBasERFRXsla\n1HrzzTexbds2PPXUU1i+fDmWL1/u+f4dd9yBhx56CE8++STa2trw6quvZutQUgqrEQCAocssWCMi\norySteC9bt06zJ07FwAwZswYNDU1obW11fn+6tWrMXjwYABAZWUlGhoasnUoKUXiZvCGJjPzJiKi\nvJK1qFVfX4+KigrndmVlJerq6pzbJSUlAIC9e/fi9ddfx/HHH5+tQ0kprEYhQLCqzZl5ExFR/sjq\nnLebYRgd7tu3bx+uvvpqLFu2zBPoU6moCEKWe2/f7XA8Ap/kRzsElJYEUFUV6rXXLjQ8dz3Hc9hz\nPIe9g+ex53JxDrMWvKurq1FfX+/c3rt3L6qqqpzbra2tuPLKK3HDDTfgmGOOyfh6DQ3tvXp8YTUC\nBQoAIBqNo66upVdfv1BUVYV47nqI57DneA57B89jz/X2OUx3IZC1YfM5c+Zg7dq1AIDNmzejurra\nGSoHgLvvvhuXXHIJjjvuuGwdQqci8QgU0QcALFgjIqK8krXMe8aMGZg8eTIWLFgAQRCwbNkyrF69\nGqFQCMcccwyee+45bNu2Dc8++ywA4IwzzsD555+frcPpIKxGUSGXAgAL1oiIKK9kdc77pptu8tye\nMGGC8/WmTZuy+aM7FddVqLoKQTffPoM3EVHfevnlv+OEE07u0mN/8pP7ce65C1BTU5vlo+q/CjJq\nRa0GLbv2xgBw2JyIqC/t2vUl/va3tV1+/PXXf7egAzeQw2rz/iSimcHb0MzqdS4VIyLqOw88cA8+\n/HAzHn30N9B1HV9+uRO7dn2JBx/8Be66607U1e1FOBzG5ZdfhTlzjsV1112F73znFvzzn39HW1sr\nvvhiG3bu3IFvf/u7mD17jvO6qqpi+fIfdHj+J598hPvvvweiKGDKlGm49trrU95n/5zRo8di1aqn\n0NjYiOnTD8eTT/4v2tvbcd11N+Ldd9/Gyy//HbquY/bsObj11u+ipaUFd955G9ra2lBSUoI77vgf\nXH75Rfj9759AMBjEhg3v4cknV2LFih93+5wVZPCOWsEb9rB5DtuyEhH1Z0//Ywv+89HeXn3NmROq\ncd5JY9N+/4ILFmL16qdx2WVX4pFHHoaqxvGLX/wWDQ37MWvWUTj11DOwc+cO3H77EsyZc6znuXv3\n7sF99z2E9ev/jT//eZUneLe0NKd8/oMP3oebb16KsWPH4Uc/ugO7d+9KeV86W7duwRNPrIbP58O7\n776NX/zitxBFEeeddxauvfabeOKJxzFr1myce+4CPPXUSrzzzls47rgT8dpr/8L8+afgtddewbx5\nX+nROS3I4G33NTc08+0z8yYi6j8mTpwMAAiFSvHhh5uxZs1qCIKI5uamDo+dOvUwAObyZHcXz86e\n/8UX2zB27DgAwO2335n2vnTGjh0Hn89crRQIBHDddVdBkiQ0NjaisbERn3zyEa644hoAwPnnXwQA\nqKmpxW9/+0vMn38K3n33bXzjG1cf+IlxKczgrSU2JQFYsEZEZDvvpLGdZsm5oChmD46//vUlNDc3\n4+c//y2am5txxRULOzxWkhLNu5KbgaV7fqpNsFLdJwiJxE5V1Q7Ht3v3Ljz11Er87ncrEQwGsXDh\nedZrSTAM3fNaY8eOw759+/Dhh5sxatQY+P3+zk9CBgUZtSL2piR25s2CNSKiPiOKIjRN63B/Y2Mj\nhgypgSiKeOWVfyAejx/Q66Z7/siRo7B5s7ni6a677sR///t5yvuKi4uxb5/ZbGzjxvdTvn5FRQWC\nwSA+/vgj7N69G/F4HBMnTsLbb/8HAPDcc6vw4ot/AQCcdNI8PPDAPZg375QDeh+pFGTwthlx88qH\nmTcRUd8ZMWIUPv74Izz00P2e+0844ST8+9+v4vrrr0FRURGqq6vx6KO/6fLrpnv+9dffhJ/97P/D\nNdd8A6FQKUaOHJXyvjPPPAf3338vbr75egwcWNXh9ceNG4+ioiCuueZy/P3v/4ezzjoHP/zhD3Hu\nuRdg06YNuO66q/Dvf7+G448/EQBw8snzsHfvXhx++MyenTAAgpGq6Xg/1Jvt5uJaHNf89hnojdWA\nIeKWC6ZjwojOe6tTamyn2HM8hz3Hc9g7eB57rrNz+Pzza7B79y584xvfPKDXS6Ug57wVSYHeMNi5\nzcybiIiy6Z57/gdffrkTd911X6+8XkEG72SsNiciomy69dbbevX1CjLl1HXvTAEL1oiIKJ8UZPCO\nxr1VjRw2JyKifFKQUSvWIXgz8yYiovxRkME7OfOW2B6ViIjySEFGrWjc2/mGmTcRUd96+eW/H/Bz\n3nvvHTQ07M/C0fR/hRm8Y0mZN+e8iYj6zIFuCWp7/vk1BRu8C3KpWMdhc2beRER9xb0l6PnnX4gV\nK36IlpYWaJqGG264GWPHjsP//u/v8cor/4Qoipgz51hMnDgJr776Mj7//DP8z//ci8GDzd4dfbEN\n6OWXX+VsAxqLReD3F2VlG1A3Bm+w2pyIyLZ6y1/w7t6Nvfqa06sPxTljz0j7ffeWoL///W9x5JFH\n4//9v6/i888/w09+ch8efPAXePLJ/8Vzz70ESZLw3HOrMHPmURg7djy+851bnMAN9M02oOeff6Gz\nDejixVfiZz/7VVa2AXVj8AabtBAR9RcbN25AY2MD1q59AQAQjZobSZ1wwsm44YbFmDfvFMyfn35j\nj77YBrS5uTkn24C6FWTwrgz54ZNF6IYBVTMgCgzeREQAcM7YMzrNkrNNUWTceOPNmDJlquf+m276\nHrZt+y/+8Y+/4lvf+iZ+/es/pHz+wbwNqOfYe+2V8sghwyvw1IrT8fBNJ+DXN5/Q14dDRFTQ3FuC\nTpo0Bf/618sAgM8//wxPPvm/aG1txaOP/gYjRozEZZddiVCoDO3tbSm3Ej2YtwH1nLNefbU8Iksi\nBEHgfDcRUR9zbwn69a+fj507t2Px4itwzz3/g8MOm4GSkhI0NjbgyisX4dvfvhqTJ09BaWkZDjts\nBm677VZ89tlW57X6YhvQ+++/x9kGdOHChVnbBtStILcEBbj1XW/heew5nsOe4znsHTyPPZd8Druz\nDWjy66VSkHPeRERE2dbb24C6MXgTERFlQW9vA+rGCV8iIqI8w+BNRESUZxi8iYiI8gyDNxERUZ5h\n8CYiIsozDN5ERER5hsGbiIgozzB4ExER5Zm8aY9KREREJmbeREREeYbBm4iIKM8weBMREeUZBm8i\nIqI8w+BNRESUZxi8iYiI8kxB7ue9YsUKvP/++xAEAUuXLsXUqVP7+pD6tU8++QSLFy/GpZdeiosv\nvhi7du3CLbfcAk3TUFVVhR//+Mfw+XxYs2YN/vCHP0AURZx33nk499xz+/rQ+417770Xb7/9NlRV\nxTe/+U0ceuihPIcHIBwOY8mSJdi3bx+i0SgWL16MCRMm8Bx2UyQSwRlnnIHFixdj9uzZPI8H4I03\n3sD111+PcePGAQDGjx+PK664Ivfn0Cgwb7zxhnHVVVcZhmEYW7ZsMc4777w+PqL+ra2tzbj44ouN\n2267zXj88ccNwzCMJUuWGC+88IJhGIZx//33GytXrjTa2tqM+fPnG83NzUY4HDZOP/10o6GhoS8P\nvd9Yt26dccUVVxiGYRj79+83jj/+eJ7DA/T8888bv/71rw3DMIwdO3YY8+fP5znsgQceeMA455xz\njFWrVvE8HqD169cb3/rWtzz39cU5LLhh83Xr1mHu3LkAgDFjxqCpqQmtra19fFT9l8/nw29+8xtU\nV1c7973xxhs4+eSTAQAnnngi1q1bh/fffx+HHnooQqEQAoEAZsyYgXfeeaevDrtfmTlzJn7yk58A\nAEpLSxEOh3kOD9Bpp52GK6+8EgCwa9cuDBo0iOewm7Zu3YotW7bghBNOAMD/z72hL85hwQXv+vp6\nVFRUOLcrKytRV1fXh0fUv8myjEAg4LkvHA7D5/MBAAYMGIC6ujrU19ejsrLSeQzPa4IkSQgGgwCA\nZ599FscddxzPYTctWLAAN910E5YuXcpz2E333HMPlixZ4tzmeTxwW7ZswdVXX40LLrgAr7/+ep+c\nw4Kc83Yz2B22R9KdP57Xjv72t7/h2Wefxe9+9zvMnz/fuZ/nsOuefPJJfPjhh7j55ps954fnsGue\ne+45HHbYYRg2bFjK7/M8ZjZy5Ehcd911OPXUU7F9+3YsWrQImqY538/VOSy44F1dXY36+nrn9t69\ne1FVVdWHR5R/gsEgIpEIAoEA9uzZg+rq6pTn9bDDDuvDo+xfXn31VfzqV7/Cb3/7W4RCIZ7DA7Rp\n0yYMGDAAQ4YMwcSJE6FpGoqLi3kOD9DLL7+M7du34+WXX8bu3bvh8/n4t3iABg0ahNNOOw0AMHz4\ncAwcOBAbN27M+TksuGHzOXPmYO3atQCAzZs3o7q6GiUlJX18VPnl6KOPds7h//3f/+HYY4/FtGnT\nsHHjRjQ3N6OtrQ3vvPMOjjjiiD4+0v6hpaUF9957Lx5++GGUl5cD4Dk8UG+99RZ+97vfATCnvtrb\n23kOu+HBBx/EqlWr8PTTT+Pcc8/F4sWLeR4P0Jo1a/DII48AAOrq6rBv3z6cc845OT+HBbmr2H33\n3Ye33noLgiBg2bJlmDBhQl8fUr+1adMm3HPPPdi5cydkWcagQYNw3333YcmSJYhGo6ipqcFdd90F\nRVHw0ksv4ZFHHoEgCLj44otx5pln9vXh9wtPPfUUfvrTn2LUqFHOfXfffTduu+02nsMuikQi+P73\nv49du3YhEonguuuuw5QpU3DrrbfyHHbTT3/6U9TW1uKYY47heTwAra2tuOmmm9Dc3Ix4PI7rrrsO\nEydOzPk5LMjgTURElM8KbticiIgo3zF4ExER5RkGbyIiojzD4E1ERJRnGLyJiIjyTME1aSHKN/fe\ney82btyIaDSKDz74ANOnTwcAfO1rX8NXv/rVLr3Gr3/9a4wfP97pZ53KwoUL8fvf/x6SJPXGYXvs\n2bMHn332GWbPnt3rr01UiLhUjChP7NixAxdeeCH+9a9/9fWhHLA1a9Zg69atuPHGG/v6UIgOCsy8\nifLYT3/6U+zYsQNffvklbr31VkQiEdx3333w+XyIRCJYtmwZJk+ejCVLluDwww/H7Nmzcc011+CY\nY47Bhg0b0NbWhocffhiDBg3CIYccgs2bN+OXv/wlGhsbsXv3bmzbtg1HHnkkbr/9dkSjUdx6663Y\nuXMnBg8eDEmSMGfOHM8exW1tbfjud7+L5uZmqKqKE088EWeccQYefPBBGIaB8vJyXHTRRbjzzjux\nbds2tLW14YwzzsDll1+O1atX469//SsEQcCePXswevRorFixAoqi9OEZJuqfOOdNlOd27NiBxx57\nDFOmTEFjYyN+8IMf4LHHHsOiRYvw8MMPd3j81q1bcc4552DlypWYOHEiXnzxxQ6P+eCDD/DQQw/h\n2WefxerVq9HU1IQ1a9ZAVVU888wzuOOOO/D66693eN6///1vqKqKP/7xj3jyyScRDAZRW1uLs88+\nG2eeeSYuu+wyPPbYY6iursbjjz+OZ555Bs8//zw++ugjAMDGjRv///bu2CW1MIzj+NcONQQRQi3W\nYnBsjDoSBFKNOVaEo0M4REO4HGyrKQin5ob+gDBaoiVyECEipakhWkKkQKFoiERPd5DOzYxLlysX\njvw+4+F5X97tx/PyHh7S6TSHh4eUy2VP3jKI/A/qvEU8bmJiAp/PB8DQ0BC7u7u8vb3x8vLC4OBg\nW73f78c0TQACgQBPT09tNZZlYRgGhmHg9/t5fn7m5uaG6elpAIaHh7Esq23d1NQUe3t7bGxsMDc3\nx8rKCj09rT3CxcUFDw8PXF5eAlCr1bi/v3fXf4xPnZyc5O7uzp2TLCK/KbxFPO7ztbJt22xvbzMz\nM8P5+bk7zOOzrw/Svnv28l2N4zgtQfw1lKE5y/j4+JhiscjZ2RnLy8scHR211PT19bG+vs7CwkLL\n90wmg+M4fzyXiDTp2lyki1QqFUzTpNFocHp6Sq1W69jeY2NjFItFAKrVKldXV201uVyObDaLZVnY\ntk1/fz/VahWfz0e9XgeaXf3HVb3jOOzs7Ljd//X1Na+vr7y/v1MoFBgfH+/Y+UW6iTpvkS6SSCSI\nx+MEAgFWV1exbZuDg4OO7L20tEQ2myUWizE6Oko4HG7r0IPBIKlUiv39fQzDIBKJMDIyQjgcJplM\n0tvby9raGre3t8RiMRqNBvPz8+6o1FAoxObmJqVSCdM0iUQiHTm7SLfRr2Ii8iOPj48UCgWi0SiO\n47C4uMjW1pb73/m/ymQy5PN50ul0R/YT6WbqvEXkRwYGBjg5OXHnE8/OznYsuEXk76jzFhER8Rg9\nWBMREfEYhbeIiIjHKLxFREQ8RuEtIiLiMQpvERERj1F4i4iIeMwvRph4T/csGFUAAAAASUVORK5C\nYII=\n",
-            "text/plain": [
-              "<matplotlib.figure.Figure at 0x7f72f867ef90>"
-            ]
-          },
-          "metadata": {
-            "tags": []
-          }
-        }
+        "     test_accuracies) = sess.run(all_losses)\n",
+        "    \n",
+        "    plot(train_losses, test_losses, 'loss')\n",
+        "    plot(train_accuracies, test_accuracies, 'accuracy')"
       ]
     },
     {
-      "metadata": {
-        "id": "HNqUFL4deCsL",
-        "colab_type": "text"
-      },
       "cell_type": "markdown",
+      "metadata": {
+        "colab_type": "text",
+        "id": "HNqUFL4deCsL"
+      },
       "source": [
         "# 4. Case study: building an RNN\n"
       ]
     },
     {
-      "metadata": {
-        "id": "YkC1k4HEQ7rw",
-        "colab_type": "text"
-      },
       "cell_type": "markdown",
+      "metadata": {
+        "colab_type": "text",
+        "id": "YkC1k4HEQ7rw"
+      },
       "source": [
         "In this exercise we build and train a model similar to the RNNColorbot model that was used in the main Eager notebook. The model is adapted for converting and training in graph mode."
       ]
     },
     {
-      "metadata": {
-        "id": "7nkPDl5CTCNb",
-        "colab_type": "text"
-      },
       "cell_type": "markdown",
+      "metadata": {
+        "colab_type": "text",
+        "id": "7nkPDl5CTCNb"
+      },
       "source": [
         "To get started, we load the colorbot dataset. The code is identical to that used in the other exercise and its details are unimportant."
       ]
     },
     {
+      "cell_type": "code",
+      "execution_count": 0,
       "metadata": {
-        "id": "A0uREmVXCQEw",
-        "colab_type": "code",
         "colab": {
           "autoexec": {
             "startup": false,
             "wait_interval": 0
           }
-        }
+        },
+        "colab_type": "code",
+        "id": "A0uREmVXCQEw"
       },
-      "cell_type": "code",
+      "outputs": [],
       "source": [
         "def parse(line):\n",
         "  \"\"\"Parses a line from the colors dataset.\n",
@@ -1137,7 +1034,7 @@
         "    A tuple of three tensors (rgb, chars, length), of shapes: (batch_size, 3),\n",
         "    (batch_size, max_sequence_length, 256) and respectively (batch_size).\n",
         "  \"\"\"\n",
-        "  items = tf.string_split([line], \",\").values\n",
+        "  items = tf.string_split(tf.expand_dims(line, 0), \",\").values\n",
         "  rgb = tf.string_to_number(items[1:], out_type=tf.float32) / 255.0\n",
         "  color_name = items[0]\n",
         "  chars = tf.one_hot(tf.decode_raw(color_name, tf.uint8), depth=256)\n",
@@ -1169,23 +1066,21 @@
         "  dataset = dataset.repeat()\n",
         "  if training:\n",
         "    dataset = dataset.shuffle(buffer_size=3000)\n",
-        "  dataset = dataset.padded_batch(batch_size, padded_shapes=([None], [None, None], []))\n",
+        "  dataset = dataset.padded_batch(batch_size, padded_shapes=((None,), (None, None), ()))\n",
         "  return dataset\n",
         "\n",
         "\n",
         "train_url = \"https://raw.githubusercontent.com/random-forests/tensorflow-workshop/master/extras/colorbot/data/train.csv\"\n",
         "test_url = \"https://raw.githubusercontent.com/random-forests/tensorflow-workshop/master/extras/colorbot/data/test.csv\"\n",
         "data_dir = \"tmp/rnn/data\""
-      ],
-      "execution_count": 0,
-      "outputs": []
+      ]
     },
     {
-      "metadata": {
-        "id": "waZ89t3DTUla",
-        "colab_type": "text"
-      },
       "cell_type": "markdown",
+      "metadata": {
+        "colab_type": "text",
+        "id": "waZ89t3DTUla"
+      },
       "source": [
         "Next, we set up the RNNColobot model, which is very similar to the one we used in the main exercise.\n",
         "\n",
@@ -1193,17 +1088,19 @@
       ]
     },
     {
+      "cell_type": "code",
+      "execution_count": 0,
       "metadata": {
-        "id": "9v8AJouiC44V",
-        "colab_type": "code",
         "colab": {
           "autoexec": {
             "startup": false,
             "wait_interval": 0
           }
-        }
+        },
+        "colab_type": "code",
+        "id": "9v8AJouiC44V"
       },
-      "cell_type": "code",
+      "outputs": [],
       "source": [
         "def model_components():\n",
         "  lower_cell = tf.contrib.rnn.LSTMBlockCell(256)\n",
@@ -1227,12 +1124,13 @@
         "  Returns:\n",
         "    A Tensor of shape (max_sequence_length, batch_size, output_size).\n",
         "  \"\"\"\n",
-        "  hidden_outputs = []\n",
-        "  autograph.utils.set_element_type(hidden_outputs, tf.float32)\n",
+        "  hidden_outputs = tf.TensorArray(tf.float32, size=0, dynamic_size=True)\n",
         "  state, output = cell.zero_state(batch_size, tf.float32)\n",
+        "  initial_state_shape = state.shape\n",
+        "  initial_output_shape = output.shape\n",
         "  n = tf.shape(chars)[0]\n",
         "  i = 0\n",
-        "  while i < n:\n",
+        "  while i \u003c n:\n",
         "    ch = chars[i]\n",
         "    cell_output, (state, output) = cell.call(ch, (state, output))\n",
         "    hidden_outputs.append(cell_output)\n",
@@ -1261,50 +1159,51 @@
         "    A Tensor of shape (batch_size, 3) - the model predictions.\n",
         "  \"\"\"\n",
         "  (chars, length) = inputs\n",
-        "  chars_time_major = tf.transpose(chars, [1, 0, 2])\n",
+        "  chars_time_major = tf.transpose(chars, (1, 0, 2))\n",
         "  chars_time_major.set_shape((None, batch_size, 256))\n",
         "\n",
         "  hidden_outputs = rnn_layer(chars_time_major, lower_cell, batch_size, training)\n",
         "  final_outputs = rnn_layer(hidden_outputs, upper_cell, batch_size, training)\n",
         "\n",
         "  # Grab just the end-of-sequence from each output.\n",
-        "  indices = tf.stack([length - 1, range(batch_size)], axis=1)\n",
+        "  indices = tf.stack((length - 1, range(batch_size)), axis=1)\n",
         "  sequence_ends = tf.gather_nd(final_outputs, indices)\n",
+        "  sequence_ends.set_shape((batch_size, 128))\n",
         "  return relu_layer(sequence_ends)\n",
         "\n",
         "def loss_fn(labels, predictions):\n",
         "  return tf.reduce_mean((predictions - labels) ** 2)"
-      ],
-      "execution_count": 0,
-      "outputs": []
+      ]
     },
     {
-      "metadata": {
-        "id": "JjK4gXFvFsf4",
-        "colab_type": "text"
-      },
       "cell_type": "markdown",
+      "metadata": {
+        "colab_type": "text",
+        "id": "JjK4gXFvFsf4"
+      },
       "source": [
         "The train and test functions are also similar to the ones used in the Eager notebook. Since the network requires a fixed batch size, we'll train in a single shot, rather than by epoch."
       ]
     },
     {
+      "cell_type": "code",
+      "execution_count": 0,
       "metadata": {
-        "id": "ZWQMExk0S6X6",
-        "colab_type": "code",
         "colab": {
           "autoexec": {
             "startup": false,
             "wait_interval": 0
           }
-        }
+        },
+        "colab_type": "code",
+        "id": "ZWQMExk0S6X6"
       },
-      "cell_type": "code",
+      "outputs": [],
       "source": [
         "def train(optimizer, train_data, lower_cell, upper_cell, relu_layer, batch_size, num_steps):\n",
         "  iterator = train_data.make_one_shot_iterator()\n",
         "  step = 0\n",
-        "  while step < num_steps:\n",
+        "  while step \u003c num_steps:\n",
         "    labels, chars, sequence_length = iterator.get_next()\n",
         "    predictions = model((chars, sequence_length), lower_cell, upper_cell, relu_layer, batch_size, training=True)\n",
         "    loss = loss_fn(labels, predictions)\n",
@@ -1319,7 +1218,7 @@
         "  total_loss = 0.0\n",
         "  iterator = eval_data.make_one_shot_iterator()\n",
         "  step = 0\n",
-        "  while step < num_steps:\n",
+        "  while step \u003c num_steps:\n",
         "    labels, chars, sequence_length = iterator.get_next()\n",
         "    predictions = model((chars, sequence_length), lower_cell, upper_cell, relu_layer, batch_size, training=False)\n",
         "    total_loss += loss_fn(labels, predictions)\n",
@@ -1340,16 +1239,14 @@
         "  # Here, we create a no_op that will drive the execution of all other code in\n",
         "  # this function. Autograph will add the necessary control dependencies.\n",
         "  return tf.no_op()"
-      ],
-      "execution_count": 0,
-      "outputs": []
+      ]
     },
     {
-      "metadata": {
-        "id": "iopcs5hXG2od",
-        "colab_type": "text"
-      },
       "cell_type": "markdown",
+      "metadata": {
+        "colab_type": "text",
+        "id": "iopcs5hXG2od"
+      },
       "source": [
         "Finally, we add code to run inference on a single input, which we'll read from the input.\n",
         "\n",
@@ -1357,17 +1254,19 @@
       ]
     },
     {
+      "cell_type": "code",
+      "execution_count": 0,
       "metadata": {
-        "id": "DyU0wnnAFEYj",
-        "colab_type": "code",
         "colab": {
           "autoexec": {
             "startup": false,
             "wait_interval": 0
           }
-        }
+        },
+        "colab_type": "code",
+        "id": "DyU0wnnAFEYj"
       },
-      "cell_type": "code",
+      "outputs": [],
       "source": [
         "@autograph.do_not_convert(run_as=autograph.RunMode.PY_FUNC)\n",
         "def draw_prediction(color_name, pred):\n",
@@ -1389,16 +1288,14 @@
         "  draw_prediction(color_name, pred)\n",
         "  # Create an op that will drive the entire function.\n",
         "  return tf.no_op()"
-      ],
-      "execution_count": 0,
-      "outputs": []
+      ]
     },
     {
-      "metadata": {
-        "id": "Nt0Kv5OCHip0",
-        "colab_type": "text"
-      },
       "cell_type": "markdown",
+      "metadata": {
+        "colab_type": "text",
+        "id": "Nt0Kv5OCHip0"
+      },
       "source": [
         "Finally, we put everything together.\n",
         "\n",
@@ -1406,56 +1303,543 @@
       ]
     },
     {
+      "cell_type": "code",
+      "execution_count": 22,
       "metadata": {
-        "id": "-GmWa0GtYWdh",
-        "colab_type": "code",
         "colab": {
           "autoexec": {
             "startup": false,
             "wait_interval": 0
           },
-          "output_extras": [
-            {},
-            {},
-            {},
-            {},
-            {},
-            {},
-            {},
-            {},
-            {},
-            {},
-            {},
-            {},
-            {},
-            {},
-            {},
-            {},
-            {},
-            {},
-            {},
-            {},
-            {},
-            {},
-            {}
-          ],
-          "base_uri": "https://localhost:8080/",
-          "height": 668
+          "height": 415
         },
-        "outputId": "61f4af1d-c81e-44db-9079-1a7b8ed8ce58",
+        "colab_type": "code",
         "executionInfo": {
+          "elapsed": 15536,
           "status": "ok",
-          "timestamp": 1522345877153,
-          "user_tz": 240,
-          "elapsed": 75500,
+          "timestamp": 1531750946373,
           "user": {
-            "displayName": "Dan Moldovan",
-            "photoUrl": "//lh5.googleusercontent.com/-Rneh8xjecyk/AAAAAAAAAAI/AAAAAAAACB4/c5vwsJpbktY/s50-c-k-no/photo.jpg",
-            "userId": "112023154726779574577"
-          }
-        }
+            "displayName": "",
+            "photoUrl": "",
+            "userId": ""
+          },
+          "user_tz": 240
+        },
+        "id": "-GmWa0GtYWdh",
+        "outputId": "2e7a9856-9809-43a3-8b43-3c8514ea43e9"
       },
-      "cell_type": "code",
+      "outputs": [
+        {
+          "name": "stdout",
+          "output_type": "stream",
+          "text": [
+            "Test loss 0.138294\n",
+            "Colorbot is ready to generate colors!\n",
+            "\n",
+            "\n"
+          ]
+        },
+        {
+          "data": {
+            "text/html": [
+              "\u003clink rel=stylesheet type=text/css href='/nbextensions/google.colab/tabbar.css'\u003e\u003c/link\u003e"
+            ],
+            "text/plain": [
+              "\u003cIPython.core.display.HTML at 0x7f97ee42bb90\u003e"
+            ]
+          },
+          "metadata": {
+            "tags": [
+              "outputarea_id1"
+            ]
+          },
+          "output_type": "display_data"
+        },
+        {
+          "data": {
+            "text/html": [
+              "\u003cscript src='/nbextensions/google.colab/tabbar_main.min.js'\u003e\u003c/script\u003e"
+            ],
+            "text/plain": [
+              "\u003cIPython.core.display.HTML at 0x7f97ee42be10\u003e"
+            ]
+          },
+          "metadata": {
+            "tags": [
+              "outputarea_id1"
+            ]
+          },
+          "output_type": "display_data"
+        },
+        {
+          "data": {
+            "text/html": [
+              "\u003cdiv id=\"id1\"\u003e\u003c/div\u003e"
+            ],
+            "text/plain": [
+              "\u003cIPython.core.display.HTML at 0x7f97ee42bd90\u003e"
+            ]
+          },
+          "metadata": {
+            "tags": [
+              "outputarea_id1"
+            ]
+          },
+          "output_type": "display_data"
+        },
+        {
+          "data": {
+            "application/javascript": [
+              "window[\"a6045494-8903-11e8-99f9-c8d3ffb5fbe0\"] = colab_lib.createTabBar({\"location\": \"top\", \"borderColor\": [\"#a7a7a7\"], \"initialSelection\": 0, \"elementId\": \"id1\", \"contentHeight\": [\"initial\"], \"contentBorder\": [\"0px\"], \"tabNames\": [\"RNN Colorbot\"]});\n",
+              "//# sourceURL=js_02f896cbda"
+            ],
+            "text/plain": [
+              "\u003cIPython.core.display.Javascript at 0x7f97ee2ab810\u003e"
+            ]
+          },
+          "metadata": {
+            "tags": [
+              "outputarea_id1"
+            ]
+          },
+          "output_type": "display_data"
+        },
+        {
+          "data": {
+            "application/javascript": [
+              "window[\"a6045495-8903-11e8-99f9-c8d3ffb5fbe0\"] = window[\"id1\"].setSelectedTabIndex(0);\n",
+              "//# sourceURL=js_7e8f9f77a0"
+            ],
+            "text/plain": [
+              "\u003cIPython.core.display.Javascript at 0x7f97ee2ab710\u003e"
+            ]
+          },
+          "metadata": {
+            "tags": [
+              "outputarea_id1"
+            ]
+          },
+          "output_type": "display_data"
+        },
+        {
+          "data": {
+            "application/javascript": [
+              "window[\"a6045496-8903-11e8-99f9-c8d3ffb5fbe0\"] = google.colab.output.getActiveOutputArea();\n",
+              "//# sourceURL=js_5531553c2f"
+            ],
+            "text/plain": [
+              "\u003cIPython.core.display.Javascript at 0x7f97ee2ab6d0\u003e"
+            ]
+          },
+          "metadata": {
+            "tags": [
+              "id1_content_0",
+              "outputarea_id1"
+            ]
+          },
+          "output_type": "display_data"
+        },
+        {
+          "data": {
+            "application/javascript": [
+              "window[\"a6045497-8903-11e8-99f9-c8d3ffb5fbe0\"] = document.querySelector(\"#id1_content_0\");\n",
+              "//# sourceURL=js_d1f809ec17"
+            ],
+            "text/plain": [
+              "\u003cIPython.core.display.Javascript at 0x7f97ee2ab990\u003e"
+            ]
+          },
+          "metadata": {
+            "tags": [
+              "id1_content_0",
+              "outputarea_id1"
+            ]
+          },
+          "output_type": "display_data"
+        },
+        {
+          "data": {
+            "application/javascript": [
+              "window[\"a6045498-8903-11e8-99f9-c8d3ffb5fbe0\"] = google.colab.output.setActiveOutputArea(window[\"a6045497-8903-11e8-99f9-c8d3ffb5fbe0\"]);\n",
+              "//# sourceURL=js_3a3123cadb"
+            ],
+            "text/plain": [
+              "\u003cIPython.core.display.Javascript at 0x7f97ee2aba50\u003e"
+            ]
+          },
+          "metadata": {
+            "tags": [
+              "id1_content_0",
+              "outputarea_id1"
+            ]
+          },
+          "output_type": "display_data"
+        },
+        {
+          "data": {
+            "application/javascript": [
+              "window[\"a6045499-8903-11e8-99f9-c8d3ffb5fbe0\"] = window[\"id1\"].setSelectedTabIndex(0);\n",
+              "//# sourceURL=js_1a0e1f7d6f"
+            ],
+            "text/plain": [
+              "\u003cIPython.core.display.Javascript at 0x7f97ee2ab890\u003e"
+            ]
+          },
+          "metadata": {
+            "tags": [
+              "id1_content_0",
+              "outputarea_id1"
+            ]
+          },
+          "output_type": "display_data"
+        },
+        {
+          "data": {
+            "application/javascript": [
+              "window[\"a8e54762-8903-11e8-99f9-c8d3ffb5fbe0\"] = google.colab.output.setActiveOutputArea(window[\"a6045496-8903-11e8-99f9-c8d3ffb5fbe0\"]);\n",
+              "//# sourceURL=js_6213539615"
+            ],
+            "text/plain": [
+              "\u003cIPython.core.display.Javascript at 0x7f97ee2abad0\u003e"
+            ]
+          },
+          "metadata": {
+            "tags": [
+              "id1_content_0",
+              "outputarea_id1"
+            ]
+          },
+          "output_type": "display_data"
+        },
+        {
+          "data": {
+            "application/javascript": [
+              "window[\"a8e54763-8903-11e8-99f9-c8d3ffb5fbe0\"] = google.colab.output.getActiveOutputArea();\n",
+              "//# sourceURL=js_0bd7f95c6e"
+            ],
+            "text/plain": [
+              "\u003cIPython.core.display.Javascript at 0x7f97ee2ab950\u003e"
+            ]
+          },
+          "metadata": {
+            "tags": [
+              "id1_content_0",
+              "outputarea_id1"
+            ]
+          },
+          "output_type": "display_data"
+        },
+        {
+          "data": {
+            "application/javascript": [
+              "window[\"a8e54764-8903-11e8-99f9-c8d3ffb5fbe0\"] = document.querySelector(\"#id1_content_0\");\n",
+              "//# sourceURL=js_215f004f6b"
+            ],
+            "text/plain": [
+              "\u003cIPython.core.display.Javascript at 0x7f97ee2abb10\u003e"
+            ]
+          },
+          "metadata": {
+            "tags": [
+              "id1_content_0",
+              "outputarea_id1"
+            ]
+          },
+          "output_type": "display_data"
+        },
+        {
+          "data": {
+            "application/javascript": [
+              "window[\"a8e54765-8903-11e8-99f9-c8d3ffb5fbe0\"] = google.colab.output.setActiveOutputArea(window[\"a8e54764-8903-11e8-99f9-c8d3ffb5fbe0\"]);\n",
+              "//# sourceURL=js_a06186c8ad"
+            ],
+            "text/plain": [
+              "\u003cIPython.core.display.Javascript at 0x7f97ee2aba90\u003e"
+            ]
+          },
+          "metadata": {
+            "tags": [
+              "id1_content_0",
+              "outputarea_id1"
+            ]
+          },
+          "output_type": "display_data"
+        },
+        {
+          "data": {
+            "application/javascript": [
+              "window[\"a8e54766-8903-11e8-99f9-c8d3ffb5fbe0\"] = window[\"id1\"].setSelectedTabIndex(0);\n",
+              "//# sourceURL=js_383fbaae67"
+            ],
+            "text/plain": [
+              "\u003cIPython.core.display.Javascript at 0x7f97ee2abc50\u003e"
+            ]
+          },
+          "metadata": {
+            "tags": [
+              "id1_content_0",
+              "outputarea_id1"
+            ]
+          },
+          "output_type": "display_data"
+        },
+        {
+          "data": {
+            "image/png": "iVBORw0KGgoAAAANSUhEUgAAAQwAAAENCAYAAAD60Fs2AAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAACL9JREFUeJzt3F+IlXUex/Gv2ziiBRGVOQaFd2JBzOg5aiH+IZGoJgmM\n/uhVGIlgFE0QEYHdFQaRGBJ10VX0D5TAi8jKomGmILsYjEAkmBwbRIxKGDV/e7G7w8ouux9jd911\nX6+rcx6e85zveS7e/J7zb0ZrrRVA4A8XewDgf4dgADHBAGKCAcQEA4gJBhATDC6Kp59+urrdbt13\n3301OjpaK1euvNgjERCMS9yaNWtqeHj4Yo9xnq+++qqGh4frs88+q7fffruqqmbMmHGRpyIhGPxH\n/fbbb/XDDz/U9ddfX7NmzbrY43CBBOMS9tRTT9XExERt2bKlBgYG6vXXX69vvvmm7r///up0OrV+\n/foaHR2d3n/Tpk318ssv1wMPPFADAwP18MMP18mTJ6uq6vTp0zU0NFRLly6tTqdTGzZsqBMnTlRV\n1eTkZG3ZsqWWLl1a69atq3feeWf6mDt37qxt27bV0NBQLVmypN5777169tln6+DBgzUwMFA7d+78\nm7kPHz5cmzZtqk6nU3fffXft37+/qqrGx8er0+lM7/fMM8/UrbfeOn1/aGio3nzzzX/tSeR8jUva\n6tWr2/DwcGuttWPHjrVut9sOHDjQWmvtiy++aN1ut504caK11trGjRvb2rVr2/fff9+mpqbaxo0b\n244dO1prrb311lvt0UcfbVNTU+3cuXNtbGys/fLLL6211h566KG2ffv2dvr06Xbo0KG2bNmy6ed8\n5ZVX2k033dQ++uij1lprU1NT7f33328PPvjg9IwjIyNt5cqVrbXWzpw509auXdt2797dzpw504aH\nh1t/f387cuTI9OsZGxtrrbW2bt26dvvtt7fDhw+31lpbtWpVO3To0L/rVNJas8L4P9D+/HOhvXv3\n1qpVq2rFihVVVbV8+fK6+eab69NPP53e9957760bbrihent764477qhDhw5VVVVPT0+dPHmyjhw5\nUjNmzKhFixbV5ZdfXseOHauvv/66nnzyyZo5c2YtXLiwNmzYUHv27Jk+Zn9/f61Zs6aqqnp7e//h\nrAcPHqxTp07VI488Uj09PbVs2bJavXp1ffDBB1VVtWTJkhodHa3jx49XVdW6devqyy+/rPHx8fr1\n119r4cKF/6Kzxt/Tc7EH4D/n6NGjtW/fvvr444+r6k8hOXv2bC1fvnx6n2uuuWb69uzZs+vUqVNV\nVXXPPffUsWPH6oknnqiff/65BgcH6/HHH6/Jycm68sora/bs2dOPmz9/fo2NjU3fnzdvXjzj5ORk\n9fX1nbdt/vz5NTk5WVVVnU6n9u/fX9ddd111u93qdru1Z8+e6u3trcWLF1/A2eD3EIxL3F9/+tDX\n11fr16+v7du3X/Bxenp6auvWrbV169Y6evRobd68uRYsWFC33XZb/fTTT3Xq1KmaM2dOVVVNTEzU\n3Llz/+4M/8zcuXNrYmLivG1Hjx6tBQsWVFVVt9utF198sfr6+qrT6dTAwEA999xz1dvbW91u94Jf\nFxfGJckl7tprr63x8fGqqhocHKz9+/fX559/XufOnaupqakaHR2tH3/88Z8eZ2RkpL777rs6d+5c\nzZkzp3p6euqyyy6refPmVX9/f7300kt1+vTp+vbbb+vdd9+twcHB3zXvLbfcUnPmzKnXXnutzp49\nWyMjI/XJJ5/UnXfeWVVVN954Y82aNav27t1bnU6nrrjiirr66qvrww8/PO8NUf49BOMSt3nz5tq1\na1d1u93at29f7dq1q3bv3l3Lly+v1atX1xtvvDH9Hsc/WgkcP368tm3bVosXL6677rqrli5dOh2F\nHTt21Pj4eK1YsaK2bdtWjz322HmXORdi5syZ9eqrr9aBAwdq2bJl9fzzz9cLL7wwvcKo+tMq46qr\nrpq+1PlLKBYtWvS7npPcjNb8gQ6QscIAYoIBxAQDiAkGEPuv/R7GxN7+iz0C/F/rG/z6b7ZZYQAx\nwQBiggHEBAOICQYQEwwgJhhATDCAmGAAMcEAYoIBxAQDiAkGEBMMICYYQEwwgJhgADHBAGKCAcQE\nA4gJBhATDCAmGEBMMICYYAAxwQBiggHEBAOICQYQEwwgJhhATDCAmGAAMcEAYoIBxAQDiAkGEBMM\nICYYQEwwgJhgADHBAGKCAcQEA4gJBhATDCAmGEBMMICYYAAxwQBiggHEBAOICQYQEwwgJhhATDCA\nmGAAMcEAYoIBxAQDiAkGEBMMICYYQEwwgJhgADHBAGKCAcQEA4gJBhATDCAmGEBMMICYYAAxwQBi\nggHEBAOICQYQEwwgJhhATDCAmGAAMcEAYoIBxAQDiAkGEBMMICYYQEwwgJhgADHBAGKCAcQEA4gJ\nBhATDCAmGEBMMICYYAAxwQBiggHEBAOICQYQEwwgJhhATDCAmGAAMcEAYoIBxAQDiAkGEBMMICYY\nQEwwgJhgADHBAGKCAcQEA4gJBhATDCAmGEBMMICYYAAxwQBiggHEBAOICQYQEwwgJhhATDCAmGAA\nMcEAYoIBxAQDiAkGEBMMICYYQEwwgJhgADHBAGKCAcQEA4gJBhATDCAmGEBMMICYYAAxwQBiggHE\nBAOICQYQEwwgJhhATDCAmGAAMcEAYoIBxAQDiAkGEBMMICYYQEwwgJhgADHBAGKCAcQEA4gJBhAT\nDCAmGEBMMICYYAAxwQBiggHEBAOICQYQEwwgJhhATDCAmGAAMcEAYoIBxAQDiAkGEBMMICYYQEww\ngJhgADHBAGKCAcQEA4gJBhATDCAmGEBMMICYYAAxwQBiggHEBAOICQYQEwwgJhhATDCAmGAAMcEA\nYoIBxAQDiAkGEBMMICYYQEwwgJhgADHBAGKCAcQEA4gJBhATDCAmGEBMMICYYAAxwQBiggHEBAOI\nCQYQEwwgNqO11i72EMD/BisMICYYQEwwgJhgADHBAGKCAcQEA4gJBhATDCAmGEBMMICYYAAxwQBi\nggHEBAOICQYQEwwgJhhATDCAmGAAMcEAYoIBxAQDiP0RoqNMBlokHDIAAAAASUVORK5CYII=\n",
+            "text/plain": [
+              "\u003cmatplotlib.figure.Figure at 0x7f97ee42bb90\u003e"
+            ]
+          },
+          "metadata": {
+            "tags": [
+              "id1_content_0",
+              "outputarea_id1",
+              "user_output"
+            ]
+          },
+          "output_type": "display_data"
+        },
+        {
+          "data": {
+            "application/javascript": [
+              "window[\"a8e54767-8903-11e8-99f9-c8d3ffb5fbe0\"] = google.colab.output.setActiveOutputArea(window[\"a8e54763-8903-11e8-99f9-c8d3ffb5fbe0\"]);\n",
+              "//# sourceURL=js_28bd08ac10"
+            ],
+            "text/plain": [
+              "\u003cIPython.core.display.Javascript at 0x7f97ea9efc10\u003e"
+            ]
+          },
+          "metadata": {
+            "tags": [
+              "id1_content_0",
+              "outputarea_id1"
+            ]
+          },
+          "output_type": "display_data"
+        },
+        {
+          "data": {
+            "application/javascript": [
+              "window[\"a8e54768-8903-11e8-99f9-c8d3ffb5fbe0\"] = google.colab.output.getActiveOutputArea();\n",
+              "//# sourceURL=js_ae2887f57d"
+            ],
+            "text/plain": [
+              "\u003cIPython.core.display.Javascript at 0x7f97ea9efb50\u003e"
+            ]
+          },
+          "metadata": {
+            "tags": [
+              "id1_content_0",
+              "outputarea_id1"
+            ]
+          },
+          "output_type": "display_data"
+        },
+        {
+          "data": {
+            "application/javascript": [
+              "window[\"a8e54769-8903-11e8-99f9-c8d3ffb5fbe0\"] = document.querySelector(\"#id1_content_0\");\n",
+              "//# sourceURL=js_608805a786"
+            ],
+            "text/plain": [
+              "\u003cIPython.core.display.Javascript at 0x7f97ea9ef710\u003e"
+            ]
+          },
+          "metadata": {
+            "tags": [
+              "id1_content_0",
+              "outputarea_id1"
+            ]
+          },
+          "output_type": "display_data"
+        },
+        {
+          "data": {
+            "application/javascript": [
+              "window[\"a8e5476a-8903-11e8-99f9-c8d3ffb5fbe0\"] = google.colab.output.setActiveOutputArea(window[\"a8e54769-8903-11e8-99f9-c8d3ffb5fbe0\"]);\n",
+              "//# sourceURL=js_3d87cf7d0f"
+            ],
+            "text/plain": [
+              "\u003cIPython.core.display.Javascript at 0x7f97ea9efa90\u003e"
+            ]
+          },
+          "metadata": {
+            "tags": [
+              "id1_content_0",
+              "outputarea_id1"
+            ]
+          },
+          "output_type": "display_data"
+        },
+        {
+          "data": {
+            "application/javascript": [
+              "window[\"a8e5476b-8903-11e8-99f9-c8d3ffb5fbe0\"] = window[\"id1\"].setSelectedTabIndex(0);\n",
+              "//# sourceURL=js_5e91101199"
+            ],
+            "text/plain": [
+              "\u003cIPython.core.display.Javascript at 0x7f97ea9efa50\u003e"
+            ]
+          },
+          "metadata": {
+            "tags": [
+              "id1_content_0",
+              "outputarea_id1"
+            ]
+          },
+          "output_type": "display_data"
+        },
+        {
+          "data": {
+            "text/html": [
+              "\u003cdiv class=id_45185901 style=\"margin-right:10px; display:flex;align-items:center;\"\u003e\u003cspan style=\"margin-right: 3px;\"\u003e\u003c/span\u003e\u003c/div\u003e"
+            ],
+            "text/plain": [
+              "\u003cIPython.core.display.HTML at 0x7f97ee42bd90\u003e"
+            ]
+          },
+          "metadata": {
+            "tags": [
+              "id1_content_0",
+              "outputarea_id1",
+              "user_output"
+            ]
+          },
+          "output_type": "display_data"
+        },
+        {
+          "data": {
+            "application/javascript": [
+              "window[\"a8e5476c-8903-11e8-99f9-c8d3ffb5fbe0\"] = jQuery(\".id_45185901 span\");\n",
+              "//# sourceURL=js_f43052a94e"
+            ],
+            "text/plain": [
+              "\u003cIPython.core.display.Javascript at 0x7f97ea9ef750\u003e"
+            ]
+          },
+          "metadata": {
+            "tags": [
+              "id1_content_0",
+              "outputarea_id1",
+              "user_output"
+            ]
+          },
+          "output_type": "display_data"
+        },
+        {
+          "data": {
+            "application/javascript": [
+              "window[\"a8e5476d-8903-11e8-99f9-c8d3ffb5fbe0\"] = window[\"a8e5476c-8903-11e8-99f9-c8d3ffb5fbe0\"].text(\"Give me a color name (or press 'enter' to exit): \");\n",
+              "//# sourceURL=js_bfc0fb76ce"
+            ],
+            "text/plain": [
+              "\u003cIPython.core.display.Javascript at 0x7f97ea9efb10\u003e"
+            ]
+          },
+          "metadata": {
+            "tags": [
+              "id1_content_0",
+              "outputarea_id1",
+              "user_output"
+            ]
+          },
+          "output_type": "display_data"
+        },
+        {
+          "data": {
+            "application/javascript": [
+              "window[\"a9e9b8b0-8903-11e8-99f9-c8d3ffb5fbe0\"] = jQuery(\".id_45185901 input\");\n",
+              "//# sourceURL=js_7f167283fa"
+            ],
+            "text/plain": [
+              "\u003cIPython.core.display.Javascript at 0x7f97ea9ef610\u003e"
+            ]
+          },
+          "metadata": {
+            "tags": [
+              "id1_content_0",
+              "outputarea_id1",
+              "user_output"
+            ]
+          },
+          "output_type": "display_data"
+        },
+        {
+          "data": {
+            "application/javascript": [
+              "window[\"a9e9b8b1-8903-11e8-99f9-c8d3ffb5fbe0\"] = window[\"a9e9b8b0-8903-11e8-99f9-c8d3ffb5fbe0\"].remove();\n",
+              "//# sourceURL=js_016ae4bf21"
+            ],
+            "text/plain": [
+              "\u003cIPython.core.display.Javascript at 0x7f97ea9ef250\u003e"
+            ]
+          },
+          "metadata": {
+            "tags": [
+              "id1_content_0",
+              "outputarea_id1",
+              "user_output"
+            ]
+          },
+          "output_type": "display_data"
+        },
+        {
+          "data": {
+            "application/javascript": [
+              "window[\"a9e9b8b2-8903-11e8-99f9-c8d3ffb5fbe0\"] = jQuery(\".id_45185901 span\");\n",
+              "//# sourceURL=js_e666f179bc"
+            ],
+            "text/plain": [
+              "\u003cIPython.core.display.Javascript at 0x7f97ea9ef550\u003e"
+            ]
+          },
+          "metadata": {
+            "tags": [
+              "id1_content_0",
+              "outputarea_id1",
+              "user_output"
+            ]
+          },
+          "output_type": "display_data"
+        },
+        {
+          "data": {
+            "application/javascript": [
+              "window[\"a9e9b8b3-8903-11e8-99f9-c8d3ffb5fbe0\"] = window[\"a9e9b8b2-8903-11e8-99f9-c8d3ffb5fbe0\"].text(\"Give me a color name (or press 'enter' to exit): \");\n",
+              "//# sourceURL=js_cbb9d14aec"
+            ],
+            "text/plain": [
+              "\u003cIPython.core.display.Javascript at 0x7f97ea9ef1d0\u003e"
+            ]
+          },
+          "metadata": {
+            "tags": [
+              "id1_content_0",
+              "outputarea_id1",
+              "user_output"
+            ]
+          },
+          "output_type": "display_data"
+        },
+        {
+          "data": {
+            "application/javascript": [
+              "window[\"a9e9b8b4-8903-11e8-99f9-c8d3ffb5fbe0\"] = google.colab.output.setActiveOutputArea(window[\"a8e54768-8903-11e8-99f9-c8d3ffb5fbe0\"]);\n",
+              "//# sourceURL=js_2967a79665"
+            ],
+            "text/plain": [
+              "\u003cIPython.core.display.Javascript at 0x7f97ea9ef1d0\u003e"
+            ]
+          },
+          "metadata": {
+            "tags": [
+              "id1_content_0",
+              "outputarea_id1"
+            ]
+          },
+          "output_type": "display_data"
+        }
+      ],
       "source": [
         "def run_input_loop(sess, inference_ops, color_name_placeholder):\n",
         "  \"\"\"Helper function that reads from input and calls the inference ops in a loop.\"\"\"\n",
@@ -1502,419 +1886,52 @@
         "     \n",
         "    # Run the inference loop.\n",
         "    run_input_loop(sess, inference_ops, color_name_placeholder)"
-      ],
-      "execution_count": 0,
-      "outputs": [
-        {
-          "output_type": "stream",
-          "text": [
-            "('Successfully downloaded', 'train.csv', 28010L, 'bytes.')\n",
-            "('Successfully downloaded', 'test.csv', 2414L, 'bytes.')\n",
-            "Step 0 train loss 0.37890616\n",
-            "Step 10 train loss 0.18515904\n",
-            "Step 20 train loss 0.0892782\n",
-            "Step 30 train loss 0.07883155\n",
-            "Step 40 train loss 0.08585831\n",
-            "Step 50 train loss 0.09302989\n",
-            "Step 60 train loss 0.089012615\n",
-            "Step 70 train loss 0.07275697\n",
-            "Step 80 train loss 0.06644974\n",
-            "Step 90 train loss 0.0854013\n",
-            "Test loss 0.13216865Colorbot is ready to generate colors!\n",
-            "\n",
-            "\n",
-            "\n"
-          ],
-          "name": "stdout"
-        },
-        {
-          "output_type": "display_data",
-          "data": {
-            "text/plain": [
-              "<IPython.core.display.HTML object>"
-            ],
-            "text/html": [
-              "<link rel=stylesheet type=text/css href='/nbextensions/google.colab/tabbar.css'></link>"
-            ]
-          },
-          "metadata": {
-            "tags": [
-              "outputarea_id1"
-            ]
-          }
-        },
-        {
-          "output_type": "display_data",
-          "data": {
-            "text/plain": [
-              "<IPython.core.display.HTML object>"
-            ],
-            "text/html": [
-              "<script src='/nbextensions/google.colab/tabbar_main.min.js'></script>"
-            ]
-          },
-          "metadata": {
-            "tags": [
-              "outputarea_id1"
-            ]
-          }
-        },
-        {
-          "output_type": "display_data",
-          "data": {
-            "text/plain": [
-              "<IPython.core.display.HTML object>"
-            ],
-            "text/html": [
-              "<div id=\"id1\"></div>"
-            ]
-          },
-          "metadata": {
-            "tags": [
-              "outputarea_id1"
-            ]
-          }
-        },
-        {
-          "output_type": "display_data",
-          "data": {
-            "application/javascript": [
-              "window[\"b102d936-3379-11e8-ac70-0242ac110002\"] = colab_lib.createTabBar({\"contentBorder\": [\"0px\"], \"borderColor\": [\"#a7a7a7\"], \"tabNames\": [\"RNN Colorbot\"], \"initialSelection\": 0, \"location\": \"top\", \"contentHeight\": [\"initial\"], \"elementId\": \"id1\"});\n",
-              "//# sourceURL=js_e223a56194"
-            ],
-            "text/plain": [
-              "<IPython.core.display.Javascript object>"
-            ]
-          },
-          "metadata": {
-            "tags": [
-              "outputarea_id1"
-            ]
-          }
-        },
-        {
-          "output_type": "display_data",
-          "data": {
-            "application/javascript": [
-              "window[\"b103532a-3379-11e8-ac70-0242ac110002\"] = window[\"id1\"].setSelectedTabIndex(0);\n",
-              "//# sourceURL=js_b8c6a821fb"
-            ],
-            "text/plain": [
-              "<IPython.core.display.Javascript object>"
-            ]
-          },
-          "metadata": {
-            "tags": [
-              "outputarea_id1"
-            ]
-          }
-        },
-        {
-          "output_type": "display_data",
-          "data": {
-            "application/javascript": [
-              "window[\"b105b28c-3379-11e8-ac70-0242ac110002\"] = google.colab.output.getActiveOutputArea();\n",
-              "//# sourceURL=js_44805e254b"
-            ],
-            "text/plain": [
-              "<IPython.core.display.Javascript object>"
-            ]
-          },
-          "metadata": {
-            "tags": [
-              "id1_content_0",
-              "outputarea_id1"
-            ]
-          }
-        },
-        {
-          "output_type": "display_data",
-          "data": {
-            "application/javascript": [
-              "window[\"b106197a-3379-11e8-ac70-0242ac110002\"] = document.querySelector(\"#id1_content_0\");\n",
-              "//# sourceURL=js_a63d3c6c47"
-            ],
-            "text/plain": [
-              "<IPython.core.display.Javascript object>"
-            ]
-          },
-          "metadata": {
-            "tags": [
-              "id1_content_0",
-              "outputarea_id1"
-            ]
-          }
-        },
-        {
-          "output_type": "display_data",
-          "data": {
-            "application/javascript": [
-              "window[\"b1069f44-3379-11e8-ac70-0242ac110002\"] = google.colab.output.setActiveOutputArea(window[\"b106197a-3379-11e8-ac70-0242ac110002\"]);\n",
-              "//# sourceURL=js_7e203b8bce"
-            ],
-            "text/plain": [
-              "<IPython.core.display.Javascript object>"
-            ]
-          },
-          "metadata": {
-            "tags": [
-              "id1_content_0",
-              "outputarea_id1"
-            ]
-          }
-        },
-        {
-          "output_type": "display_data",
-          "data": {
-            "application/javascript": [
-              "window[\"b1070f38-3379-11e8-ac70-0242ac110002\"] = window[\"id1\"].setSelectedTabIndex(0);\n",
-              "//# sourceURL=js_d53293d4a7"
-            ],
-            "text/plain": [
-              "<IPython.core.display.Javascript object>"
-            ]
-          },
-          "metadata": {
-            "tags": [
-              "id1_content_0",
-              "outputarea_id1"
-            ]
-          }
-        },
-        {
-          "output_type": "display_data",
-          "data": {
-            "application/javascript": [
-              "window[\"c6d90d5c-3379-11e8-ac70-0242ac110002\"] = google.colab.output.setActiveOutputArea(window[\"b105b28c-3379-11e8-ac70-0242ac110002\"]);\n",
-              "//# sourceURL=js_3000dc2c05"
-            ],
-            "text/plain": [
-              "<IPython.core.display.Javascript object>"
-            ]
-          },
-          "metadata": {
-            "tags": [
-              "id1_content_0",
-              "outputarea_id1"
-            ]
-          }
-        },
-        {
-          "output_type": "display_data",
-          "data": {
-            "application/javascript": [
-              "window[\"c6da872c-3379-11e8-ac70-0242ac110002\"] = google.colab.output.getActiveOutputArea();\n",
-              "//# sourceURL=js_4136f669a3"
-            ],
-            "text/plain": [
-              "<IPython.core.display.Javascript object>"
-            ]
-          },
-          "metadata": {
-            "tags": [
-              "id1_content_0",
-              "outputarea_id1"
-            ]
-          }
-        },
-        {
-          "output_type": "display_data",
-          "data": {
-            "application/javascript": [
-              "window[\"c6dac868-3379-11e8-ac70-0242ac110002\"] = document.querySelector(\"#id1_content_0\");\n",
-              "//# sourceURL=js_2f70dd9aee"
-            ],
-            "text/plain": [
-              "<IPython.core.display.Javascript object>"
-            ]
-          },
-          "metadata": {
-            "tags": [
-              "id1_content_0",
-              "outputarea_id1"
-            ]
-          }
-        },
-        {
-          "output_type": "display_data",
-          "data": {
-            "application/javascript": [
-              "window[\"c6db07d8-3379-11e8-ac70-0242ac110002\"] = google.colab.output.setActiveOutputArea(window[\"c6dac868-3379-11e8-ac70-0242ac110002\"]);\n",
-              "//# sourceURL=js_7226726048"
-            ],
-            "text/plain": [
-              "<IPython.core.display.Javascript object>"
-            ]
-          },
-          "metadata": {
-            "tags": [
-              "id1_content_0",
-              "outputarea_id1"
-            ]
-          }
-        },
-        {
-          "output_type": "display_data",
-          "data": {
-            "application/javascript": [
-              "window[\"c6dcc6fe-3379-11e8-ac70-0242ac110002\"] = window[\"id1\"].setSelectedTabIndex(0);\n",
-              "//# sourceURL=js_72e7709865"
-            ],
-            "text/plain": [
-              "<IPython.core.display.Javascript object>"
-            ]
-          },
-          "metadata": {
-            "tags": [
-              "id1_content_0",
-              "outputarea_id1"
-            ]
-          }
-        },
-        {
-          "output_type": "display_data",
-          "data": {
-            "image/png": "iVBORw0KGgoAAAANSUhEUgAAAVQAAAFZCAYAAADHDNdrAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMS4yLCBo\ndHRwOi8vbWF0cGxvdGxpYi5vcmcvNQv5yAAAB9JJREFUeJzt3E1Lle0ax+HTF4jeEAyMBhE0DawI\nwsCH0AIlaGBWNJBo0CDoA0TQhmDXuKAGDioiCA2KlEAlnl05FD9Co8BeaGCQoBDa2jPZsXt4Bvu/\n0+o4Rmvd1zW4rsmP84bFamo0Go0C4H/WvNYHAPhVCCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKDy\nUxgeHq5Dhw7V4OBgPXz4sHp7e+vWrVt15cqVOnnyZN2/f78ajUbdvn27+vr6qqenp65du1YrKytV\nVfXhw4e6cOFC9fX1VV9fX01PT1dV1dzcXHV3d9eDBw/q+PHj9ccff9TExMRaXpWfWOtaHwD+zuvX\nr+vOnTs1MTFRbW1tdf78+dW16enpGh8fr/b29hobG6upqal6/Phxbdy4sS5evFgjIyM1NDRUly5d\nqv3799fw8HC9efOmTp8+XVNTU1VV9enTp2pubq5nz57V5ORk3bhxo44dO7ZW1+UnZkJl3Zudna2D\nBw9WR0dHbdiwoQYHB1fX9u7dW+3t7VVV9fLlyxocHKytW7dWa2trnTp1qp4/f16Li4s1MzNT586d\nq6qqXbt21YEDB1an1OXl5Tpx4kRVVe3Zs6fevXv3Yy/IL8OEyrr3+fPnamtrW/2+ffv21c//+Xxh\nYaHu3r1bjx49qqqqlZWVam9vr4WFhWo0GnXmzJnVvYuLi9XV1VVVVS0tLbVp06aqqmpubq6vX7/+\nX+/Dr0tQWfe2bNlSi4uLq98/fvz43X0dHR3V29tbQ0ND3zxfXl6ulpaWevLkSW3evPmbtbm5ufyB\n+W155Wfd6+zsrJmZmZqfn68vX77U2NjYd/cdOXKkxsfHa2lpqaqqRkdH6+nTp9Xa2lqHDx+u0dHR\nqqpaWlqqy5cv1/v373/YHfg9CCrrXmdnZw0MDNTAwECdPXu2enp6vrvv6NGj1dPTUwMDA9Xf318v\nXryo7u7uqqq6evVqzc7OVn9/fw0MDNTOnTtrx44dP/Ia/Aaa/B8qP4NGo1FNTU1VVfXq1au6efPm\nX06qsFZMqKx78/Pz1dXVVW/fvq1Go1GTk5O1b9++tT4W/BcTKj+FkZGRunfvXjU1NdXu3bvr+vXr\ntW3btrU+FnxDUAFCvPIDhAgqQMi6+WH/kX8eXesjAPytf/3jz79cM6EChAgqQIigAoQIKkCIoAKE\nCCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQI\nKkCIoAKECCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgq\nQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQIKkCIoAKECCpA\niKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQIKkCI\noAKECCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgqQIig\nAoQIKkCIoAKECCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKAC\nhAgqQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQIKkCIoAKE\nCCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQI\nKkCIoAKECCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgq\nQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQIKkCIoAKECCpA\niKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQIKkCI\noAKECCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgqQIig\nAoQIKkCIoAKECCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKAC\nhAgqQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQIKkCIoAKE\nCCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQI\nKkCIoAKECCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgq\nQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQIKkCIoAKECCpA\niKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQIKkBI\nU6PRaKz1IQB+BSZUgBBBBQgRVIAQQQUIEVSAEEEFCBFUgBBBBQgRVIAQQQUIEVSAEEEFCBFUgBBB\nBQgRVIAQQQUIEVSAEEEFCBFUgBBBBQgRVIAQQQUIEVSAkH8D1Aj8lNhhe7QAAAAASUVORK5CYII=\n",
-            "text/plain": [
-              "<matplotlib.figure.Figure at 0x7f72f402e850>"
-            ]
-          },
-          "metadata": {
-            "tags": [
-              "id1_content_0",
-              "outputarea_id1",
-              "user_output"
-            ]
-          }
-        },
-        {
-          "output_type": "display_data",
-          "data": {
-            "application/javascript": [
-              "window[\"c70592aa-3379-11e8-ac70-0242ac110002\"] = google.colab.output.setActiveOutputArea(window[\"c6da872c-3379-11e8-ac70-0242ac110002\"]);\n",
-              "//# sourceURL=js_25c3aaf79a"
-            ],
-            "text/plain": [
-              "<IPython.core.display.Javascript object>"
-            ]
-          },
-          "metadata": {
-            "tags": [
-              "id1_content_0",
-              "outputarea_id1"
-            ]
-          }
-        },
-        {
-          "output_type": "display_data",
-          "data": {
-            "application/javascript": [
-              "window[\"c70842c0-3379-11e8-ac70-0242ac110002\"] = google.colab.output.getActiveOutputArea();\n",
-              "//# sourceURL=js_984c56b816"
-            ],
-            "text/plain": [
-              "<IPython.core.display.Javascript object>"
-            ]
-          },
-          "metadata": {
-            "tags": [
-              "id1_content_0",
-              "outputarea_id1"
-            ]
-          }
-        },
-        {
-          "output_type": "display_data",
-          "data": {
-            "application/javascript": [
-              "window[\"c708dec4-3379-11e8-ac70-0242ac110002\"] = document.querySelector(\"#id1_content_0\");\n",
-              "//# sourceURL=js_e0451a1217"
-            ],
-            "text/plain": [
-              "<IPython.core.display.Javascript object>"
-            ]
-          },
-          "metadata": {
-            "tags": [
-              "id1_content_0",
-              "outputarea_id1"
-            ]
-          }
-        },
-        {
-          "output_type": "display_data",
-          "data": {
-            "application/javascript": [
-              "window[\"c7092726-3379-11e8-ac70-0242ac110002\"] = google.colab.output.setActiveOutputArea(window[\"c708dec4-3379-11e8-ac70-0242ac110002\"]);\n",
-              "//# sourceURL=js_7aa23d7385"
-            ],
-            "text/plain": [
-              "<IPython.core.display.Javascript object>"
-            ]
-          },
-          "metadata": {
-            "tags": [
-              "id1_content_0",
-              "outputarea_id1"
-            ]
-          }
-        },
-        {
-          "output_type": "display_data",
-          "data": {
-            "application/javascript": [
-              "window[\"c7099044-3379-11e8-ac70-0242ac110002\"] = window[\"id1\"].setSelectedTabIndex(0);\n",
-              "//# sourceURL=js_5722756ddb"
-            ],
-            "text/plain": [
-              "<IPython.core.display.Javascript object>"
-            ]
-          },
-          "metadata": {
-            "tags": [
-              "id1_content_0",
-              "outputarea_id1"
-            ]
-          }
-        },
-        {
-          "output_type": "stream",
-          "text": [
-            "Give me a color name (or press 'enter' to exit): \n"
-          ],
-          "name": "stdout"
-        },
-        {
-          "output_type": "display_data",
-          "data": {
-            "application/javascript": [
-              "window[\"c7baac12-3379-11e8-ac70-0242ac110002\"] = google.colab.output.setActiveOutputArea(window[\"c70842c0-3379-11e8-ac70-0242ac110002\"]);\n",
-              "//# sourceURL=js_cdd622e58f"
-            ],
-            "text/plain": [
-              "<IPython.core.display.Javascript object>"
-            ]
-          },
-          "metadata": {
-            "tags": [
-              "id1_content_0",
-              "outputarea_id1"
-            ]
-          }
-        }
       ]
     },
     {
-      "metadata": {
-        "id": "AHJ2c47U-A5W",
-        "colab_type": "text"
-      },
       "cell_type": "markdown",
+      "metadata": {
+        "colab_type": "text",
+        "id": "AHJ2c47U-A5W"
+      },
       "source": [
         "# Where do we go next?\n",
         "\n",
-        "Autograph is available in tensorflow.contrib, but it's still in its early stages. We're excited about the possibilities it brings — write your machine learning code in the flexible Eager style, but still enjoy all the benefits that come with running in graph mode. A beta version will be available soon -- stay tuned!"
+        "AutoGraph is still in its early stages, but is available in [tensorflow.contrib](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/autograph). We're excited about the possibilities it brings. New versions will be available soon — stay tuned!"
       ]
     }
-  ]
+  ],
+  "metadata": {
+    "colab": {
+      "collapsed_sections": [],
+      "default_view": {},
+      "last_runtime": {
+        "build_target": "",
+        "kind": "local"
+      },
+      "name": "Dev Summit 2018 - Autograph",
+      "provenance": [
+        {
+          "file_id": "1wCZUh73zTNs1jzzYjqoxMIdaBWCdKJ2K",
+          "timestamp": 1522238054357
+        },
+        {
+          "file_id": "1_HpC-RrmIv4lNaqeoslUeWaX8zH5IXaJ",
+          "timestamp": 1521743157199
+        },
+        {
+          "file_id": "1mjO2fQ2F9hxpAzw2mnrrUkcgfb7xSGW-",
+          "timestamp": 1520522344607
+        }
+      ],
+      "version": "0.3.2",
+      "views": {}
+    },
+    "kernelspec": {
+      "display_name": "Python 2",
+      "name": "python2"
+    }
+  },
+  "nbformat": 4,
+  "nbformat_minor": 0
 }
diff --git a/tensorflow/contrib/autograph/examples/notebooks/workshop.ipynb b/tensorflow/contrib/autograph/examples/notebooks/workshop.ipynb
index e8f16b4..e7dfb13 100644
--- a/tensorflow/contrib/autograph/examples/notebooks/workshop.ipynb
+++ b/tensorflow/contrib/autograph/examples/notebooks/workshop.ipynb
@@ -11,6 +11,24 @@
           }
         },
         "colab_type": "code",
+        "id": "u3B7Uh50lozN"
+      },
+      "outputs": [],
+      "source": [
+        "!pip install -U -q tf-nightly"
+      ]
+    },
+    {
+      "cell_type": "code",
+      "execution_count": 0,
+      "metadata": {
+        "colab": {
+          "autoexec": {
+            "startup": false,
+            "wait_interval": 0
+          }
+        },
+        "colab_type": "code",
         "id": "qWUV0FYjDSKj"
       },
       "outputs": [],
@@ -62,7 +80,7 @@
         "# ...into graph-building functions like this:\n",
         "def tf_g(x):\n",
         "  with tf.name_scope('g'):\n",
-        "    \n",
+        "\n",
         "    def if_true():\n",
         "      with tf.name_scope('if_true'):\n",
         "        x_1, = x,\n",
@@ -76,7 +94,7 @@
         "        return x_1,\n",
         "\n",
         "    x = autograph_utils.run_cond(tf.greater(x, 0), if_true, if_false)\n",
-        "    return x\n"
+        "    return x"
       ]
     },
     {
@@ -101,14 +119,14 @@
         "# Generate a graph-version of g and call it:\n",
         "tf_g = autograph.to_graph(g)\n",
         "\n",
-        "with tf.Graph().as_default():  \n",
+        "with tf.Graph().as_default():\n",
         "  # The result works like a regular op: takes tensors in, returns tensors.\n",
         "  # You can inspect the graph using tf.get_default_graph().as_graph_def()\n",
         "  g_ops = tf_g(tf.constant(9.0))\n",
         "  with tf.Session() as sess:\n",
         "    print('Autograph value: %2.2f\\n' % sess.run(g_ops))\n",
-        "    \n",
-        "  \n",
+        "\n",
+        "\n",
         "# You can view, debug and tweak the generated code:\n",
         "print(autograph.to_code(g))"
       ]
@@ -155,10 +173,10 @@
         "print('Original value: %d' % f([10,12,15,20]))\n",
         "\n",
         "tf_f = autograph.to_graph(f)\n",
-        "with tf.Graph().as_default():  \n",
+        "with tf.Graph().as_default():\n",
         "  with tf.Session():\n",
         "    print('Graph value: %d\\n\\n' % tf_f(tf.constant([10,12,15,20])).eval())\n",
-        "  \n",
+        "\n",
         "print(autograph.to_code(f))"
       ]
     },
@@ -194,7 +212,7 @@
         "  return x * x\n",
         "\n",
         "tf_f = autograph.to_graph(f)\n",
-        "with tf.Graph().as_default():  \n",
+        "with tf.Graph().as_default():\n",
         "  with tf.Session():\n",
         "    try:\n",
         "      print(tf_f(tf.constant(0)).eval())\n",
@@ -233,7 +251,7 @@
         "      n += 1\n",
         "      print(n)\n",
         "  return n\n",
-        "    \n",
+        "\n",
         "tf_f = autograph.to_graph(f)\n",
         "with tf.Graph().as_default():\n",
         "  with tf.Session():\n",
@@ -247,7 +265,7 @@
         "id": "NqF0GT-VCVFh"
       },
       "source": [
-        "Appending to lists in loops also works (we create a `TensorArray` for you behind the scenes)"
+        "Appending to lists in loops also works (we create a tensor list ops behind the scenes)"
       ]
     },
     {
@@ -268,15 +286,15 @@
         "def f(n):\n",
         "  z = []\n",
         "  # We ask you to tell us the element dtype of the list\n",
-        "  z = autograph.utils.set_element_type(z, tf.int32)\n",
+        "  autograph.set_element_type(z, tf.int32)\n",
         "  for i in range(n):\n",
         "    z.append(i)\n",
         "  # when you're done with the list, stack it\n",
         "  # (this is just like np.stack)\n",
-        "  return autograph.stack(z) \n",
+        "  return autograph.stack(z)\n",
         "\n",
         "tf_f = autograph.to_graph(f)\n",
-        "with tf.Graph().as_default():  \n",
+        "with tf.Graph().as_default():\n",
         "  with tf.Session():\n",
         "    print(tf_f(tf.constant(3)).eval())\n",
         "\n",
@@ -327,7 +345,7 @@
       "source": [
         "tf_g = autograph.to_graph(fizzbuzz)\n",
         "\n",
-        "with tf.Graph().as_default():  \n",
+        "with tf.Graph().as_default():\n",
         "  # The result works like a regular op: takes tensors in, returns tensors.\n",
         "  # You can inspect the graph using tf.get_default_graph().as_graph_def()\n",
         "  g_ops = tf_g(tf.constant(15))\n",
@@ -384,7 +402,7 @@
         "  return x\n",
         "\n",
         "\n",
-        "with tf.Graph().as_default():  \n",
+        "with tf.Graph().as_default():\n",
         "  with tf.Session() as sess:\n",
         "    print(sess.run(square_log(tf.constant(4))))"
       ]
@@ -396,7 +414,7 @@
         "id": "_R-Q7BbxmkBF"
       },
       "source": [
-        "#### Now some exercises. Convert the TensorFlow code into AutoGraph'd Python code."
+        "#### Convert the TensorFlow code into Python code for AutoGraph"
       ]
     },
     {
@@ -439,8 +457,10 @@
       "source": [
         "@autograph.convert()\n",
         "def square_if_positive(x):\n",
-        "  ... # \u003c\u003c\u003c fill it in!\n",
-        "  \n",
+        "\n",
+        "  pass # TODO: fill it in!\n",
+        "\n",
+        "\n",
         "with tf.Session() as sess:\n",
         "  print(sess.run(square_if_positive(tf.constant(4))))"
       ]
@@ -517,7 +537,7 @@
         "    x = tf.cond(tf.greater(x, 0), if_positive, lambda: x)\n",
         "    return x\n",
         "\n",
-        "with tf.Graph().as_default():  \n",
+        "with tf.Graph().as_default():\n",
         "  with tf.Session() as sess:\n",
         "    print(sess.run(nearest_odd_square(tf.constant(4))))"
       ]
@@ -539,8 +559,10 @@
       "source": [
         "@autograph.convert()\n",
         "def nearest_odd_square(x):\n",
-        "  ... # \u003c\u003c\u003c fill it in!\n",
-        "  \n",
+        "\n",
+        "  pass # TODO: fill it in!\n",
+        "\n",
+        "\n",
         "with tf.Session() as sess:\n",
         "  print(sess.run(nearest_odd_square(tf.constant(4))))"
       ]
@@ -578,7 +600,7 @@
         "      x = x + 1\n",
         "  return x\n",
         "\n",
-        "with tf.Graph().as_default():  \n",
+        "with tf.Graph().as_default():\n",
         "  with tf.Session() as sess:\n",
         "    print(sess.run(nearest_odd_square(tf.constant(4))))"
       ]
@@ -612,8 +634,8 @@
         "def square_until_stop(x, y):\n",
         "  x = tf.while_loop(lambda x: tf.less(x, y), lambda x: x * x, [x])\n",
         "  return x\n",
-        "    \n",
-        "with tf.Graph().as_default():  \n",
+        "\n",
+        "with tf.Graph().as_default():\n",
         "  with tf.Session() as sess:\n",
         "    print(sess.run(square_until_stop(tf.constant(4), tf.constant(100))))"
       ]
@@ -635,9 +657,11 @@
       "source": [
         "@autograph.convert()\n",
         "def square_until_stop(x, y):\n",
-        "  ... # fill it in!\n",
-        "    \n",
-        "with tf.Graph().as_default():  \n",
+        "\n",
+        "  pass # TODO: fill it in!\n",
+        "\n",
+        "\n",
+        "with tf.Graph().as_default():\n",
         "  with tf.Session() as sess:\n",
         "    print(sess.run(square_until_stop(tf.constant(4), tf.constant(100))))"
       ]
@@ -672,8 +696,8 @@
         "  while x \u003c y:\n",
         "    x = x * x\n",
         "  return x\n",
-        "    \n",
-        "with tf.Graph().as_default():  \n",
+        "\n",
+        "with tf.Graph().as_default():\n",
         "  with tf.Session() as sess:\n",
         "    print(sess.run(square_until_stop(tf.constant(4), tf.constant(100))))"
       ]
@@ -707,7 +731,7 @@
         "def argwhere_cumsum(x, threshold):\n",
         "  current_sum = 0.0\n",
         "  idx = 0\n",
-        "  \n",
+        "\n",
         "  for i in range(len(x)):\n",
         "    idx = i\n",
         "    if current_sum \u003e= threshold:\n",
@@ -715,10 +739,10 @@
         "    current_sum += x[i]\n",
         "  return idx\n",
         "\n",
-        "N = 10\n",
-        "with tf.Graph().as_default():  \n",
+        "n = 10\n",
+        "with tf.Graph().as_default():\n",
         "  with tf.Session() as sess:\n",
-        "    idx = argwhere_cumsum(tf.ones(N), tf.constant(float(N/2)))\n",
+        "    idx = argwhere_cumsum(tf.ones(n), tf.constant(float(n / 2)))\n",
         "    print(sess.run(idx))"
       ]
     },
@@ -739,12 +763,14 @@
       "source": [
         "@autograph.convert()\n",
         "def argwhere_cumsum(x, threshold):\n",
-        "  ...\n",
         "\n",
-        "N = 10\n",
-        "with tf.Graph().as_default():  \n",
+        "  pass # TODO: fill it in!\n",
+        "\n",
+        "\n",
+        "n = 10\n",
+        "with tf.Graph().as_default():\n",
         "  with tf.Session() as sess:\n",
-        "    idx = argwhere_cumsum(tf.ones(N), tf.constant(float(N/2)))\n",
+        "    idx = argwhere_cumsum(tf.ones(n), tf.constant(float(n / 2)))\n",
         "    print(sess.run(idx))"
       ]
     },
@@ -784,10 +810,10 @@
         "    current_sum += x[i]\n",
         "  return idx\n",
         "\n",
-        "N = 10\n",
+        "n = 10\n",
         "with tf.Graph().as_default():  \n",
         "  with tf.Session() as sess:\n",
-        "    idx = argwhere_cumsum(tf.ones(N), tf.constant(float(N/2)))\n",
+        "    idx = argwhere_cumsum(tf.ones(n), tf.constant(float(n / 2)))\n",
         "    print(sess.run(idx))"
       ]
     },
@@ -980,43 +1006,50 @@
         "def train(train_ds, test_ds, hp):\n",
         "  m = mlp_model((28 * 28,))\n",
         "  opt = tf.train.MomentumOptimizer(hp.learning_rate, 0.9)\n",
-        "  \n",
+        "\n",
         "  # We'd like to save our losses to a list. In order for AutoGraph\n",
         "  # to convert these lists into their graph equivalent,\n",
         "  # we need to specify the element type of the lists.\n",
         "  train_losses = []\n",
-        "  train_losses = autograph.utils.set_element_type(train_losses, tf.float32)\n",
         "  test_losses = []\n",
-        "  test_losses = autograph.utils.set_element_type(test_losses, tf.float32)\n",
         "  train_accuracies = []\n",
-        "  train_accuracies = autograph.utils.set_element_type(train_accuracies, tf.float32)\n",
         "  test_accuracies = []\n",
-        "  test_accuracies = autograph.utils.set_element_type(test_accuracies, tf.float32)\n",
-        "  \n",
+        "  autograph.set_element_type(train_losses, tf.float32)\n",
+        "  autograph.set_element_type(test_losses, tf.float32)\n",
+        "  autograph.set_element_type(train_accuracies, tf.float32)\n",
+        "  autograph.set_element_type(test_accuracies, tf.float32)\n",
+        "\n",
         "  # This entire training loop will be run in-graph.\n",
         "  i = tf.constant(0)\n",
         "  while i \u003c hp.max_steps:\n",
         "    train_x, train_y = get_next_batch(train_ds)\n",
         "    test_x, test_y = get_next_batch(test_ds)\n",
-        "    # add get next\n",
+        "\n",
         "    step_train_loss, step_train_accuracy = fit(m, train_x, train_y, opt)\n",
         "    step_test_loss, step_test_accuracy = predict(m, test_x, test_y)\n",
+        "\n",
         "    if i % (hp.max_steps // 10) == 0:\n",
         "      print('Step', i, 'train loss:', step_train_loss, 'test loss:',\n",
         "            step_test_loss, 'train accuracy:', step_train_accuracy,\n",
         "            'test accuracy:', step_test_accuracy)\n",
+        "\n",
         "    train_losses.append(step_train_loss)\n",
         "    test_losses.append(step_test_loss)\n",
         "    train_accuracies.append(step_train_accuracy)\n",
         "    test_accuracies.append(step_test_accuracy)\n",
+        "\n",
         "    i += 1\n",
-        "  \n",
-        "  # We've recorded our loss values and accuracies \n",
+        "\n",
+        "  # We've recorded our loss values and accuracies\n",
         "  # to a list in a graph with AutoGraph's help.\n",
-        "  # In order to return the values as a Tensor, \n",
+        "  # In order to return the values as a Tensor,\n",
         "  # we need to stack them before returning them.\n",
-        "  return (autograph.stack(train_losses), autograph.stack(test_losses),  autograph.stack(train_accuracies),\n",
-        "          autograph.stack(test_accuracies))"
+        "  return (\n",
+        "      autograph.stack(train_losses),\n",
+        "      autograph.stack(test_losses),\n",
+        "      autograph.stack(train_accuracies),\n",
+        "      autograph.stack(test_accuracies),\n",
+        "  )"
       ]
     },
     {
@@ -1042,14 +1075,17 @@
         "  train_ds = setup_mnist_data(True, hp, 50)\n",
         "  test_ds = setup_mnist_data(False, hp, 1000)\n",
         "  tf_train = autograph.to_graph(train)\n",
-        "  (train_losses, test_losses, train_accuracies,\n",
-        "   test_accuracies) = tf_train(train_ds, test_ds, hp)\n",
+        "  loss_tensors = tf_train(train_ds, test_ds, hp)\n",
         "\n",
         "  with tf.Session() as sess:\n",
         "    sess.run(tf.global_variables_initializer())\n",
-        "    (train_losses, test_losses, train_accuracies,\n",
-        "     test_accuracies) = sess.run([train_losses, test_losses, train_accuracies,\n",
-        "                                  test_accuracies])\n",
+        "    (\n",
+        "        train_losses,\n",
+        "        test_losses,\n",
+        "        train_accuracies,\n",
+        "        test_accuracies\n",
+        "    ) = sess.run(loss_tensors)\n",
+        "\n",
         "    plt.title('MNIST train/test losses')\n",
         "    plt.plot(train_losses, label='train loss')\n",
         "    plt.plot(test_losses, label='test loss')\n",
diff --git a/tensorflow/contrib/autograph/impl/api.py b/tensorflow/contrib/autograph/impl/api.py
index c7401c7..f7fe3de 100644
--- a/tensorflow/contrib/autograph/impl/api.py
+++ b/tensorflow/contrib/autograph/impl/api.py
@@ -99,6 +99,7 @@
   Returns:
     A decorator that wraps the original function.
   """
+
   def decorator(f):
     """Decorator implementation."""
 
@@ -109,8 +110,7 @@
     @wraps(f)
     def py_func_wrapper(*args, **kwargs):
       if kwargs:
-        raise NotImplementedError(
-            'RunMode.PY_FUNC does not yet support kwargs')
+        raise NotImplementedError('RunMode.PY_FUNC does not yet support kwargs')
       # TODO(mdan): Add support for kwargs.
       return py_func.wrap_py_func(
           f, return_dtypes, args, kwargs, use_dummy_return=not return_dtypes)
@@ -231,7 +231,10 @@
 
   Returns:
     A function with a signature identical to `o`, but which when executed it
-  creates TF a graph that has the same functionality as the original entity.
+    creates TF a graph that has the same functionality as the original entity.
+  Raises:
+    ValueError: If the converted function defines or refers to symbol names that
+    are reserved for AutoGraph.
   """
   program_ctx = converter.ProgramContext(
       recursive=recursive,
@@ -256,6 +259,19 @@
       compiled_node.__dict__[key] = val
   compiled_fn = getattr(compiled_node, name)
 
+  # Need this so the source_mapping attribute is available for the context
+  # manager to access for runtime errors.
+  #
+  # Note that compiler.ast_to_object attaches the source map 'ag_source_map__'
+  # symbol to the compiled module.
+  source_map_attribute_name = 'ag_source_map'
+  if getattr(compiled_fn, source_map_attribute_name, None) is not None:
+    raise ValueError('cannot convert %s because is has an attribute '
+                     '"%s", which is reserved for AutoGraph.' %
+                     (compiled_fn, source_map_attribute_name))
+  setattr(compiled_fn, source_map_attribute_name,
+          compiled_node.__dict__['ag_source_map__'])
+
   if verbose:
     logging.info('Compiled output of %s:\n\n%s\n', e, compiled_src)
 
@@ -292,7 +308,7 @@
   conversion.entity_to_graph(e, program_ctx, arg_values, arg_types)
 
   code = '\n'.join(
-      compiler.ast_to_source(dep, indentation)
+      compiler.ast_to_source(dep, indentation)[0]
       for dep in reversed(tuple(six.itervalues(program_ctx.dependency_cache))))
 
   return program_ctx.required_imports + '\n\n' + code
diff --git a/tensorflow/contrib/autograph/impl/api_test.py b/tensorflow/contrib/autograph/impl/api_test.py
index 9943093..4de7df6 100644
--- a/tensorflow/contrib/autograph/impl/api_test.py
+++ b/tensorflow/contrib/autograph/impl/api_test.py
@@ -206,8 +206,8 @@
       return x
 
     with self.test_session() as sess:
-      x = api.converted_call(
-          test_fn, False, False, {}, constant_op.constant(-1))
+      x = api.converted_call(test_fn, False, False, {},
+                             constant_op.constant(-1))
       self.assertEqual(1, sess.run(x))
 
   def test_converted_call_method(self):
@@ -274,8 +274,8 @@
         return self.x
 
     with self.test_session() as sess:
-      tc = api.converted_call(
-          TestClass, False, False, {}, constant_op.constant(-1))
+      tc = api.converted_call(TestClass, False, False, {},
+                              constant_op.constant(-1))
       # tc is now a converted object.
       x = tc.test_method()
       self.assertEqual(1, sess.run(x))
@@ -305,6 +305,13 @@
     # Just check that it is parseable Python code.
     self.assertIsNotNone(parser.parse_str(compiled_code))
 
+  def test_source_map_attribute_present(self):
+
+    def test_fn(y):
+      return y**2
+
+    self.assertTrue(hasattr(api.to_graph(test_fn), 'ag_source_map'))
+
 
 if __name__ == '__main__':
   test.main()
diff --git a/tensorflow/contrib/autograph/impl/conversion.py b/tensorflow/contrib/autograph/impl/conversion.py
index 776d19f..7bd0ba3 100644
--- a/tensorflow/contrib/autograph/impl/conversion.py
+++ b/tensorflow/contrib/autograph/impl/conversion.py
@@ -28,26 +28,27 @@
 from tensorflow.contrib.autograph.converters import break_statements
 from tensorflow.contrib.autograph.converters import builtin_functions
 from tensorflow.contrib.autograph.converters import call_trees
+from tensorflow.contrib.autograph.converters import conditional_expressions
 from tensorflow.contrib.autograph.converters import continue_statements
 from tensorflow.contrib.autograph.converters import control_flow
 from tensorflow.contrib.autograph.converters import decorators
-from tensorflow.contrib.autograph.converters import ifexp
+from tensorflow.contrib.autograph.converters import directives
+from tensorflow.contrib.autograph.converters import error_handlers
 from tensorflow.contrib.autograph.converters import lists
 from tensorflow.contrib.autograph.converters import logical_expressions
 from tensorflow.contrib.autograph.converters import name_scopes
+from tensorflow.contrib.autograph.converters import return_statements
 from tensorflow.contrib.autograph.converters import side_effect_guards
-from tensorflow.contrib.autograph.converters import single_return
 from tensorflow.contrib.autograph.converters import slices
 from tensorflow.contrib.autograph.core import config
 from tensorflow.contrib.autograph.core import converter
+from tensorflow.contrib.autograph.core import errors
 from tensorflow.contrib.autograph.pyct import ast_util
 from tensorflow.contrib.autograph.pyct import inspect_utils
+from tensorflow.contrib.autograph.pyct import origin_info
 from tensorflow.contrib.autograph.pyct import parser
 from tensorflow.contrib.autograph.pyct import qual_names
 from tensorflow.contrib.autograph.pyct import transformer
-from tensorflow.contrib.autograph.pyct.static_analysis import activity
-from tensorflow.contrib.autograph.pyct.static_analysis import live_values
-from tensorflow.contrib.autograph.pyct.static_analysis import type_info
 from tensorflow.python.util import tf_inspect
 
 
@@ -157,7 +158,8 @@
         program_ctx=program_ctx,
         arg_values={},
         arg_types={'self': (c.__name__, c)},
-        owner_type=c)
+        owner_type=c,
+        rewrite_errors=False)
     if class_namespace is None:
       class_namespace = namespace
     else:
@@ -231,6 +233,8 @@
     ag_internal = imp.new_module('autograph')
     ag_internal.converted_call = autograph_module.converted_call
     ag_internal.utils = utils
+    ag_internal.rewrite_graph_construction_error = (
+        errors.rewrite_graph_construction_error)
     # TODO(mdan): Add safeguards against name clashes.
     # We don't want to create a submodule because we want the operators to be
     # accessible as ag__.<operator>
@@ -239,11 +243,17 @@
   _add_reserved_symbol(namespace, 'ag__', ag_internal)
 
 
-def function_to_graph(f, program_ctx, arg_values, arg_types, owner_type=None):
+def function_to_graph(f,
+                      program_ctx,
+                      arg_values,
+                      arg_types,
+                      owner_type=None,
+                      rewrite_errors=True):
   """Specialization of `entity_to_graph` for callable functions."""
+
   node, source = parser.parse_entity(f)
   node = node.body[0]
-
+  origin_info.resolve(node, source, f)
   namespace = inspect_utils.getnamespace(f)
   _add_self_references(namespace, program_ctx.autograph_module)
   namer = program_ctx.new_namer(namespace)
@@ -256,7 +266,7 @@
       arg_types=arg_types,
       owner_type=owner_type)
   context = converter.EntityContext(namer, entity_info, program_ctx)
-  node = node_to_graph(node, context)
+  node = node_to_graph(node, context, rewrite_errors=rewrite_errors)
 
   # TODO(mdan): This somewhat duplicates the call rename logic in call_treest.py
   new_name, did_rename = namer.compiled_function_name(f.__name__, f, owner_type)
@@ -272,22 +282,13 @@
   return node, new_name, namespace
 
 
-def _apply_transformer(node, context, converter_module):
-  # TODO(mdan): Clear static analysis here.
-  node = qual_names.resolve(node)
-  node = activity.resolve(node, context.info, None)
-  node = live_values.resolve(node, context.info, config.PYTHON_LITERALS)
-  node = type_info.resolve(node, context.info)
-  node = converter_module.transform(node, context)
-  return node
-
-
-def node_to_graph(node, context):
+def node_to_graph(node, context, rewrite_errors=True):
   """Convert Python code to equivalent TF graph mode code.
 
   Args:
     node: AST, the code to convert.
     context: converter.EntityContext
+    rewrite_errors: Boolean, whether or not to rewrite the error traceback.
 
   Returns:
     A tuple (node, deps):
@@ -295,28 +296,33 @@
         * deps: A set of strings, the fully qualified names of entity
             dependencies that this node has.
   """
-  # TODO(mdan): Verify arguments for correctness.
+  # TODO(mdan): Insert list_comprehensions somewhere.
 
-  node = _apply_transformer(node, context, ifexp)
+  node = converter.standard_analysis(node, context, is_initial=True)
   # Past this point, line numbers are no longer accurate so we ignore the
   # source.
   # TODO(mdan): Is it feasible to reconstruct intermediate source code?
   context.info.source_code = None
-  node = _apply_transformer(node, context, decorators)
-  node = _apply_transformer(node, context, break_statements)
-  node = _apply_transformer(node, context, asserts)
+
+  node = converter.apply_(node, context, decorators)
+  node = converter.apply_(node, context, directives)
+  node = converter.apply_(node, context, break_statements)
+  node = converter.apply_(node, context, asserts)
   # Note: sequencing continue canonicalization before for loop one avoids
   # dealing with the extra loop increment operation that the for
   # canonicalization creates.
-  node = _apply_transformer(node, context, continue_statements)
+  node = converter.apply_(node, context, continue_statements)
   context.info.namespace['len'] = len
-  node = _apply_transformer(node, context, single_return)
-  node = _apply_transformer(node, context, lists)
-  node = _apply_transformer(node, context, slices)
-  node = _apply_transformer(node, context, builtin_functions)
-  node = _apply_transformer(node, context, call_trees)
-  node = _apply_transformer(node, context, control_flow)
-  node = _apply_transformer(node, context, logical_expressions)
-  node = _apply_transformer(node, context, side_effect_guards)
-  node = _apply_transformer(node, context, name_scopes)
+  node = converter.apply_(node, context, return_statements)
+  node = converter.apply_(node, context, lists)
+  node = converter.apply_(node, context, slices)
+  node = converter.apply_(node, context, builtin_functions)
+  node = converter.apply_(node, context, call_trees)
+  node = converter.apply_(node, context, control_flow)
+  node = converter.apply_(node, context, conditional_expressions)
+  node = converter.apply_(node, context, logical_expressions)
+  node = converter.apply_(node, context, side_effect_guards)
+  node = converter.apply_(node, context, name_scopes)
+  if rewrite_errors:
+    node = converter.apply_(node, context, error_handlers)
   return node
diff --git a/tensorflow/contrib/autograph/impl/conversion_test.py b/tensorflow/contrib/autograph/impl/conversion_test.py
index f527929..207225a 100644
--- a/tensorflow/contrib/autograph/impl/conversion_test.py
+++ b/tensorflow/contrib/autograph/impl/conversion_test.py
@@ -79,10 +79,12 @@
     self.assertTrue(f in program_ctx.dependency_cache)
     self.assertTrue(g in program_ctx.dependency_cache)
     self.assertEqual('tf__f', program_ctx.dependency_cache[f].name)
-    # need the extra .body[0] in order to step past the with tf.name_scope('f')
-    # that is added automatically
+    # need one extra .body[0] in order to step past the try/except wrapper that
+    # is added automatically, the other for the with tf.name_scope('f') that is
+    # added automatically
     self.assertEqual(
-        'tf__g', program_ctx.dependency_cache[f].body[0].body[0].value.func.id)
+        'tf__g',
+        program_ctx.dependency_cache[f].body[0].body[0].body[0].value.func.id)
     self.assertEqual('tf__g', program_ctx.dependency_cache[g].name)
 
   def test_entity_to_graph_class_hierarchy(self):
diff --git a/tensorflow/contrib/autograph/pyct/anno.py b/tensorflow/contrib/autograph/pyct/anno.py
index 92f1370..1a52110 100644
--- a/tensorflow/contrib/autograph/pyct/anno.py
+++ b/tensorflow/contrib/autograph/pyct/anno.py
@@ -12,7 +12,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 # ==============================================================================
-"""Handling annotations on AST nodes.
+"""AST node annotation support.
 
 Adapted from Tangent.
 """
@@ -21,36 +21,87 @@
 from __future__ import division
 from __future__ import print_function
 
-from enum import Enum
+import enum
+
+# pylint:disable=g-bad-import-order
+import gast
+# pylint:enable=g-bad-import-order
 
 
-class NoValue(Enum):
+# TODO(mdan): Shorten the names.
+# These names are heavily used, and anno.blaa
+# TODO(mdan): Replace the attr-dict mechanism with a more typed solution.
+
+
+class NoValue(enum.Enum):
 
   def __repr__(self):
     return self.name
 
 
 class Basic(NoValue):
-  """Container for annotation keys.
+  """Container for basic annotation keys.
 
   The enum values are used strictly for documentation purposes.
   """
 
-  QN = 'Qualified name, as it appeared in the code.'
+  QN = 'Qualified name, as it appeared in the code. See qual_names.py.'
   SKIP_PROCESSING = (
       'This node should be preserved as is and not processed any further.')
   INDENT_BLOCK_REMAINDER = (
-      'When a node is annotated with this, the remainder of the block should '
-      'be indented below it. The annotation contains a tuple '
-      '(new_body, name_map), where `new_body` is the new indented block and '
-      '`name_map` allows renaming symbols.')
-  ORIGIN = ('Contains OriginInfo objects specific to the annotated node. See '
-            'origin_information.py for definition.')
+      'When a node is annotated with this, the remainder of the block should'
+      ' be indented below it. The annotation contains a tuple'
+      ' (new_body, name_map), where `new_body` is the new indented block and'
+      ' `name_map` allows renaming symbols.')
+  ORIGIN = ('Information about the source code that converted code originated'
+            ' from. See origin_information.py.')
+
+
+class Static(NoValue):
+  """Container for static analysis annotation keys.
+
+  The enum values are used strictly for documentation purposes.
+  """
+
+  # Deprecated - use reaching definitions instead.
+  # Symbols
+  # These flags are boolean.
+  IS_LOCAL = 'Symbol is local to the function scope being analyzed.'
+  IS_PARAM = 'Symbol is a parameter to the function being analyzed.'
+
+  # Scopes
+  # Scopes are represented by objects of type activity.Scope.
+  SCOPE = 'The scope for the annotated node. See activity.py.'
+  # TODO(mdan): Drop these in favor of accessing the child's SCOPE.
+  ARGS_SCOPE = 'The scope for the argument list of a function call.'
+  COND_SCOPE = 'The scope for the test node of a conditional statement.'
+  BODY_SCOPE = (
+      'The scope for the main body of a statement (True branch for if '
+      'statements, main body for loops).')
+  ORELSE_SCOPE = (
+      'The scope for the orelse body of a statement (False branch for if '
+      'statements, orelse body for loops).')
+
+  # Static analysis annotations.
+  DEFINITIONS = (
+      'Reaching definition information. See reaching_definitions.py.')
+  ORIG_DEFINITIONS = (
+      'The value of DEFINITIONS that applied to the original code before any'
+      ' conversion.')
+  DEFINED_VARS_IN = (
+      'Symbols defined when entering the node. See reaching_definitions.py.')
+  LIVE_VARS_OUT = ('Symbols live when exiting the node. See liveness.py.')
 
 
 FAIL = object()
 
 
+def keys(node, field_name='___pyct_anno'):
+  if not hasattr(node, field_name):
+    return frozenset()
+  return frozenset(getattr(node, field_name).keys())
+
+
 def getanno(node, key, default=FAIL, field_name='___pyct_anno'):
   if (default is FAIL or (hasattr(node, field_name) and
                           (key in getattr(node, field_name)))):
@@ -88,3 +139,19 @@
         key,
         getanno(from_node, key, field_name=field_name),
         field_name=field_name)
+
+
+def dup(node, copy_map, field_name='___pyct_anno'):
+  """Recursively copies annotations in an AST tree.
+
+  Args:
+    node: ast.AST
+    copy_map: Dict[Hashable, Hashable], maps a source anno key to a destination
+        key. All annotations with the source key will be copied to identical
+        annotations with the destination key.
+    field_name: str
+  """
+  for n in gast.walk(node):
+    for k in copy_map:
+      if hasanno(n, k, field_name):
+        setanno(n, copy_map[k], getanno(n, k, field_name), field_name)
diff --git a/tensorflow/contrib/autograph/pyct/anno_test.py b/tensorflow/contrib/autograph/pyct/anno_test.py
index f2c0c8c..5ef4da6 100644
--- a/tensorflow/contrib/autograph/pyct/anno_test.py
+++ b/tensorflow/contrib/autograph/pyct/anno_test.py
@@ -32,22 +32,27 @@
   def test_basic(self):
     node = ast.Name()
 
+    self.assertEqual(anno.keys(node), set())
     self.assertFalse(anno.hasanno(node, 'foo'))
     with self.assertRaises(AttributeError):
       anno.getanno(node, 'foo')
 
     anno.setanno(node, 'foo', 3)
+
+    self.assertEqual(anno.keys(node), {'foo'})
     self.assertTrue(anno.hasanno(node, 'foo'))
     self.assertEqual(anno.getanno(node, 'foo'), 3)
     self.assertEqual(anno.getanno(node, 'bar', default=7), 7)
 
     anno.delanno(node, 'foo')
+
+    self.assertEqual(anno.keys(node), set())
     self.assertFalse(anno.hasanno(node, 'foo'))
     with self.assertRaises(AttributeError):
       anno.getanno(node, 'foo')
     self.assertIsNone(anno.getanno(node, 'foo', default=None))
 
-  def test_copyanno(self):
+  def test_copy(self):
     node_1 = ast.Name()
     anno.setanno(node_1, 'foo', 3)
 
@@ -58,6 +63,22 @@
     self.assertTrue(anno.hasanno(node_2, 'foo'))
     self.assertFalse(anno.hasanno(node_2, 'bar'))
 
+  def test_duplicate(self):
+    node = ast.If(
+        test=ast.Num(1),
+        body=[ast.Expr(ast.Name('bar', ast.Load()))],
+        orelse=[])
+    anno.setanno(node, 'spam', 1)
+    anno.setanno(node, 'ham', 1)
+    anno.setanno(node.body[0], 'ham', 1)
+
+    anno.dup(node, {'spam': 'eggs'})
+
+    self.assertTrue(anno.hasanno(node, 'spam'))
+    self.assertTrue(anno.hasanno(node, 'ham'))
+    self.assertTrue(anno.hasanno(node, 'eggs'))
+    self.assertFalse(anno.hasanno(node.body[0], 'eggs'))
+
 
 if __name__ == '__main__':
   test.main()
diff --git a/tensorflow/contrib/autograph/pyct/ast_util.py b/tensorflow/contrib/autograph/pyct/ast_util.py
index c4f82d1..86e3f56 100644
--- a/tensorflow/contrib/autograph/pyct/ast_util.py
+++ b/tensorflow/contrib/autograph/pyct/ast_util.py
@@ -12,7 +12,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 # ==============================================================================
-"""Copy an AST tree, discarding annotations."""
+"""AST manipulation utilities."""
 
 from __future__ import absolute_import
 from __future__ import division
@@ -20,53 +20,60 @@
 
 import ast
 
+import collections
 import gast
 
 from tensorflow.contrib.autograph.pyct import anno
 from tensorflow.contrib.autograph.pyct import parser
 
 
-class CleanCopier(gast.NodeVisitor):
-  """Copies AST nodes.
+class CleanCopier(object):
+  """NodeTransformer-like visitor that copies an AST."""
 
-  The copied nodes will ignore almost all fields that are prefixed by '__'.
-  Exceptions make some annotations.
-  """
+  def __init__(self, preserve_annos):
+    super(CleanCopier, self).__init__()
+    self.preserve_annos = preserve_annos
 
-  # TODO(mdan): Parametrize which annotations get carried over.
+  def copy(self, node):
+    """Returns a deep copy of node (excluding some fields, see copy_clean)."""
 
-  def generic_visit(self, node):
+    if isinstance(node, list):
+      return [self.copy(n) for n in node]
+    elif isinstance(node, tuple):
+      return tuple(self.copy(n) for n in node)
+    elif not isinstance(node, (gast.AST, ast.AST)):
+      # Assuming everything that's not an AST, list or tuple is a value type
+      # and may simply be assigned.
+      return node
+
+    assert isinstance(node, (gast.AST, ast.AST))
+
     new_fields = {}
     for f in node._fields:
-      if f.startswith('__'):
-        continue
-      if not hasattr(node, f):
-        continue
-      v = getattr(node, f)
-      if isinstance(v, list):
-        v = [self.generic_visit(n) for n in v]
-      elif isinstance(v, tuple):
-        v = tuple(self.generic_visit(n) for n in v)
-      elif isinstance(v, (gast.AST, ast.AST)):
-        v = self.generic_visit(v)
-      else:
-        # Assume everything else is a value type.
-        pass
-      new_fields[f] = v
+      if not f.startswith('__') and hasattr(node, f):
+        new_fields[f] = self.copy(getattr(node, f))
     new_node = type(node)(**new_fields)
-    if anno.hasanno(node, anno.Basic.SKIP_PROCESSING):
-      anno.setanno(new_node, anno.Basic.SKIP_PROCESSING, True)
+
+    if self.preserve_annos:
+      for k in self.preserve_annos:
+        anno.copyanno(node, new_node, k)
     return new_node
 
 
-def copy_clean(node):
-  copier = CleanCopier()
-  if isinstance(node, list):
-    return [copier.visit(n) for n in node]
-  elif isinstance(node, tuple):
-    return tuple(copier.visit(n) for n in node)
-  else:
-    return copier.visit(node)
+def copy_clean(node, preserve_annos=None):
+  """Creates a deep copy of an AST.
+
+  The copy will not include fields that are prefixed by '__', with the
+  exception of user-specified annotations.
+
+  Args:
+    node: ast.AST
+    preserve_annos: Optional[Set[Hashable]], annotation keys to include in the
+        copy
+  Returns:
+    ast.AST
+  """
+  return CleanCopier(preserve_annos).copy(node)
 
 
 class SymbolRenamer(gast.NodeTransformer):
@@ -78,7 +85,11 @@
   def _process(self, node):
     qn = anno.getanno(node, anno.Basic.QN)
     if qn in self.name_map:
-      return gast.Name(str(self.name_map[qn]), node.ctx, None)
+      new_node = gast.Name(str(self.name_map[qn]), node.ctx, None)
+      # All annotations get carried over.
+      for k in anno.keys(node):
+        anno.copyanno(node, new_node, k)
+      return new_node
     return self.generic_visit(node)
 
   def visit_Name(self, node):
@@ -92,6 +103,7 @@
 
 
 def rename_symbols(node, name_map):
+  """Renames symbols in an AST. Requires qual_names annotations."""
   renamer = SymbolRenamer(name_map)
   if isinstance(node, list):
     return [renamer.visit(n) for n in node]
@@ -101,6 +113,7 @@
 
 
 def keywords_to_dict(keywords):
+  """Converts a list of ast.keyword objects to a dict."""
   keys = []
   values = []
   for kw in keywords:
@@ -110,10 +123,7 @@
 
 
 class PatternMatcher(gast.NodeVisitor):
-  """Matches a node against a pattern represented by a node.
-
-  The pattern may contain wildcards represented by the symbol '_'.
-  """
+  """Matches a node against a pattern represented by a node."""
 
   def __init__(self, pattern):
     self.pattern = pattern
@@ -175,11 +185,98 @@
         if v != p:
           return self.no_match()
 
-
 def matches(node, pattern):
+  """Basic pattern matcher for AST.
+
+  The pattern may contain wildcards represented by the symbol '_'. A node
+  matches a pattern if for every node in the tree, either there is a node of
+  the same type in pattern, or a Name node with id='_'.
+
+  Args:
+    node: ast.AST
+    pattern: ast.AST
+  Returns:
+    bool
+  """
   if isinstance(pattern, str):
     pattern = parser.parse_expression(pattern)
   matcher = PatternMatcher(pattern)
   matcher.visit(node)
   return matcher.matches
 
+
+# TODO(mdan): Once we have error tracing, we may be able to just go to SSA.
+def apply_to_single_assignments(targets, values, apply_fn):
+  """Applies a function to each individual assignment.
+
+  This function can process a possibly-unpacked (e.g. a, b = c, d) assignment.
+  It tries to break down the unpacking if possible. In effect, it has the same
+  effect as passing the assigned values in SSA form to apply_fn.
+
+  Examples:
+
+  The following will result in apply_fn(a, c), apply_fn(b, d):
+
+      a, b = c, d
+
+  The following will result in apply_fn(a, c[0]), apply_fn(b, c[1]):
+
+      a, b = c
+
+  The following will result in apply_fn(a, (b, c)):
+
+      a = b, c
+
+  It uses the visitor pattern to allow subclasses to process single
+  assignments individually.
+
+  Args:
+    targets: Union[List[ast.AST, ...], Tuple[ast.AST, ...], ast.AST, should be
+        used with the targets field of an ast.Assign node
+    values: ast.AST
+    apply_fn: Callable[[ast.AST, ast.AST], None], called with the
+        respective nodes of each single assignment
+  """
+  if not isinstance(targets, (list, tuple)):
+    targets = (targets,)
+  for target in targets:
+    if isinstance(target, (gast.Tuple, gast.List)):
+      for i in range(len(target.elts)):
+        target_el = target.elts[i]
+        if isinstance(values, (gast.Tuple, gast.List)):
+          value_el = values.elts[i]
+        else:
+          idx = parser.parse_expression(str(i))
+          value_el = gast.Subscript(values, gast.Index(idx), ctx=gast.Load())
+        apply_to_single_assignments(target_el, value_el, apply_fn)
+    else:
+      apply_fn(target, values)
+
+
+def iter_fields(node):
+  for field in sorted(node._fields):
+    try:
+      yield getattr(node, field)
+    except AttributeError:
+      pass
+
+
+def iter_child_nodes(node):
+  for field in iter_fields(node):
+    if isinstance(field, gast.AST):
+      yield field
+    elif isinstance(field, list):
+      for item in field:
+        if isinstance(item, gast.AST):
+          yield item
+
+
+def parallel_walk(node_a, node_b):
+  todo_a = collections.deque([node_a])
+  todo_b = collections.deque([node_b])
+  while todo_a and todo_b:
+    node_a = todo_a.popleft()
+    node_b = todo_b.popleft()
+    todo_a.extend(iter_child_nodes(node_a))
+    todo_b.extend(iter_child_nodes(node_b))
+    yield node_a, node_b
diff --git a/tensorflow/contrib/autograph/pyct/ast_util_test.py b/tensorflow/contrib/autograph/pyct/ast_util_test.py
index 3afa04a..981e398 100644
--- a/tensorflow/contrib/autograph/pyct/ast_util_test.py
+++ b/tensorflow/contrib/autograph/pyct/ast_util_test.py
@@ -19,7 +19,10 @@
 from __future__ import print_function
 
 import ast
+import collections
+import textwrap
 
+from tensorflow.contrib.autograph.pyct import anno
 from tensorflow.contrib.autograph.pyct import ast_util
 from tensorflow.contrib.autograph.pyct import compiler
 from tensorflow.contrib.autograph.pyct import parser
@@ -29,53 +32,66 @@
 
 class AstUtilTest(test.TestCase):
 
-  def test_rename_symbols(self):
-    node = ast.Tuple([
-        ast.Name('a', ast.Load()),
-        ast.Name('b', ast.Load()),
-        ast.Attribute(ast.Name('b', None), 'c', ast.Store()),
-        ast.Attribute(
-            ast.Attribute(ast.Name('b', None), 'c', ast.Load()), 'd', None)
-    ], None)
-    node = qual_names.resolve(node)
-    node = ast_util.rename_symbols(
-        node, {
-            qual_names.QN('a'):
-                qual_names.QN('renamed_a'),
-            qual_names.QN(qual_names.QN('b'), attr='c'):
-                qual_names.QN('renamed_b_c'),
-        })
+  def setUp(self):
+    super(AstUtilTest, self).setUp()
+    self._invocation_counts = collections.defaultdict(lambda: 0)
 
-    self.assertEqual(node.elts[0].id, 'renamed_a')
-    self.assertTrue(isinstance(node.elts[0].ctx, ast.Load))
-    self.assertEqual(node.elts[1].id, 'b')
-    self.assertEqual(node.elts[2].id, 'renamed_b_c')
-    self.assertTrue(isinstance(node.elts[2].ctx, ast.Store))
-    self.assertEqual(node.elts[3].value.id, 'renamed_b_c')
-    self.assertTrue(isinstance(node.elts[3].value.ctx, ast.Load))
+  def test_rename_symbols_basic(self):
+    node = parser.parse_str('a + b')
+    node = qual_names.resolve(node)
+
+    node = ast_util.rename_symbols(
+        node, {qual_names.QN('a'): qual_names.QN('renamed_a')})
+
+    self.assertIsInstance(node.body[0].value.left.id, str)
+    source, _ = compiler.ast_to_source(node)
+    self.assertEqual(source.strip(), 'renamed_a + b')
+
+  def test_rename_symbols_attributes(self):
+    node = parser.parse_str('b.c = b.c.d')
+    node = qual_names.resolve(node)
+
+    node = ast_util.rename_symbols(
+        node, {qual_names.from_str('b.c'): qual_names.QN('renamed_b_c')})
+
+    source, _ = compiler.ast_to_source(node)
+    self.assertEqual(source.strip(), 'renamed_b_c = renamed_b_c.d')
+
+  def test_rename_symbols_annotations(self):
+    node = parser.parse_str('a[i]')
+    node = qual_names.resolve(node)
+    anno.setanno(node, 'foo', 'bar')
+    orig_anno = anno.getanno(node, 'foo')
+
+    node = ast_util.rename_symbols(node,
+                                   {qual_names.QN('a'): qual_names.QN('b')})
+
+    self.assertIs(anno.getanno(node, 'foo'), orig_anno)
 
   def test_copy_clean(self):
-    ret = ast.Return(
-        ast.BinOp(
-            op=ast.Add(),
-            left=ast.Name(id='a', ctx=ast.Load()),
-            right=ast.Num(1)))
-    setattr(ret, '__foo', 'bar')
-    node = ast.FunctionDef(
-        name='f',
-        args=ast.arguments(
-            args=[ast.Name(id='a', ctx=ast.Param())],
-            vararg=None,
-            kwarg=None,
-            defaults=[]),
-        body=[ret],
-        decorator_list=[],
-        returns=None)
+    node = parser.parse_str(
+        textwrap.dedent("""
+      def f(a):
+        return a + 1
+    """))
+    setattr(node.body[0], '__foo', 'bar')
     new_node = ast_util.copy_clean(node)
-    self.assertFalse(node is new_node)
-    self.assertFalse(ret is new_node.body[0])
+    self.assertIsNot(new_node, node)
+    self.assertIsNot(new_node.body[0], node.body[0])
     self.assertFalse(hasattr(new_node.body[0], '__foo'))
 
+  def test_copy_clean_preserves_annotations(self):
+    node = parser.parse_str(
+        textwrap.dedent("""
+      def f(a):
+        return a + 1
+    """))
+    anno.setanno(node.body[0], 'foo', 'bar')
+    anno.setanno(node.body[0], 'baz', 1)
+    new_node = ast_util.copy_clean(node, preserve_annos={'foo'})
+    self.assertEqual(anno.getanno(new_node.body[0], 'foo'), 'bar')
+    self.assertFalse(anno.hasanno(new_node.body[0], 'baz'))
+
   def test_keywords_to_dict(self):
     keywords = parser.parse_expression('f(a=b, c=1, d=\'e\')').keywords
     d = ast_util.keywords_to_dict(keywords)
@@ -113,6 +129,52 @@
     self.assertNoMatch('super(Foo, self).__init__()',
                        'super(Bar, _).__init__(_)')
 
+  def _mock_apply_fn(self, target, source):
+    target, _ = compiler.ast_to_source(target)
+    source, _ = compiler.ast_to_source(source)
+    self._invocation_counts[(target.strip(), source.strip())] += 1
+
+  def test_apply_to_single_assignments_dynamic_unpack(self):
+    node = parser.parse_str('a, b, c = d')
+    node = node.body[0]
+    ast_util.apply_to_single_assignments(node.targets, node.value,
+                                         self._mock_apply_fn)
+    self.assertDictEqual(self._invocation_counts, {
+        ('a', 'd[0]'): 1,
+        ('b', 'd[1]'): 1,
+        ('c', 'd[2]'): 1,
+    })
+
+  def test_apply_to_single_assignments_static_unpack(self):
+    node = parser.parse_str('a, b, c = d, e, f')
+    node = node.body[0]
+    ast_util.apply_to_single_assignments(node.targets, node.value,
+                                         self._mock_apply_fn)
+    self.assertDictEqual(self._invocation_counts, {
+        ('a', 'd'): 1,
+        ('b', 'e'): 1,
+        ('c', 'f'): 1,
+    })
+
+  def test_parallel_walk(self):
+    ret = ast.Return(
+        ast.BinOp(
+            op=ast.Add(),
+            left=ast.Name(id='a', ctx=ast.Load()),
+            right=ast.Num(1)))
+    node = ast.FunctionDef(
+        name='f',
+        args=ast.arguments(
+            args=[ast.Name(id='a', ctx=ast.Param())],
+            vararg=None,
+            kwarg=None,
+            defaults=[]),
+        body=[ret],
+        decorator_list=[],
+        returns=None)
+    for child_a, child_b in ast_util.parallel_walk(node, node):
+      self.assertEqual(child_a, child_b)
+
 
 if __name__ == '__main__':
   test.main()
diff --git a/tensorflow/contrib/autograph/pyct/cfg.py b/tensorflow/contrib/autograph/pyct/cfg.py
index 6663287..25fec7f 100644
--- a/tensorflow/contrib/autograph/pyct/cfg.py
+++ b/tensorflow/contrib/autograph/pyct/cfg.py
@@ -64,11 +64,19 @@
     self.prev = frozenset(self.prev)
 
   def __repr__(self):
-    return compiler.ast_to_source(self.ast_node).strip()
+    if isinstance(self.ast_node, gast.FunctionDef):
+      return 'def %s' % self.ast_node.name
+    elif isinstance(self.ast_node, gast.withitem):
+      source, _ = compiler.ast_to_source(self.ast_node.context_expr)
+      return source.strip()
+    source, _ = compiler.ast_to_source(self.ast_node)
+    return source.strip()
 
 
 class Graph(
-    collections.namedtuple('Graph', ['entry', 'exit', 'error', 'index'])):
+    collections.namedtuple(
+        'Graph',
+        ['entry', 'exit', 'error', 'index', 'stmt_prev', 'stmt_next'])):
   """A Control Flow Graph.
 
   The CFG maintains an index to allow looking up a CFG node by the AST node to
@@ -82,6 +90,11 @@
   because these are shared, and wiring them would create a reverse path from
   normal control flow into the error nodes, which we want to avoid.
 
+  The graph also maintains edges corresponding to higher level statements
+  like for-else loops. A node is considered successor of a statement if there
+  is an edge from a node that is lexically a child of that statement to a node
+  that is not. Statement predecessors are analogously defined.
+
   Attributes:
     entry: Node, the entry node
     exit: FrozenSet[Node, ...], the exit nodes
@@ -89,6 +102,10 @@
         error (errors propagated from function calls are not accounted)
     index: Dict[ast.Node, Node], mapping AST nodes to the respective CFG
         node
+    stmt_prev: Dict[ast.Node, FrozenSet[Node, ...]], mapping statement AST
+        nodes to their predecessor CFG nodes
+    stmt_next: Dict[ast.Node, FrozenSet[Node, ...]], mapping statement AST
+        nodes to their successor CFG nodes
   """
 
   def __repr__(self):
@@ -96,9 +113,8 @@
     for node in self.index.values():
       result += '  %s [label="%s"];\n' % (id(node), node)
     for node in self.index.values():
-      if node.next:
-        result += '  %s -> {%s};\n' % (id(node), ', '.join(
-            repr(id(n)) for n in node.next))
+      for next_ in node.next:
+        result += '  %s -> %s;\n' % (id(node), id(next_))
     result += '}'
     return result
 
@@ -108,6 +124,8 @@
   REVERSE = 2
 
 
+# TODO(mdan): Rename to DataFlowAnalyzer.
+# TODO(mdan): Consider specializations that use gen/kill/transfer abstractions.
 class GraphVisitor(object):
   """Base class for a CFG visitors.
 
@@ -130,26 +148,22 @@
     out: Dict[Node, Any], stores node-keyed state during a visit
   """
 
-  def reset(self):
-    self.in_ = {
-        node: self.init_state(node) for node in self.graph.index.values()
-    }
-    self.out = {
-        node: self.init_state(node) for node in self.graph.index.values()
-    }
+  def __init__(self, graph):
+    self.graph = graph
+    self.reset()
 
   def init_state(self, node):
     """State initialization function. Optional to overload.
 
     An in/out state slot will be created for each node in the graph. Subclasses
-    may overload this to control what that is initialized to.
+    must overload this to control what that is initialized to.
 
     Args:
       node: Node
     """
-    del node
-    return None
+    raise NotImplementedError('Subclasses must implement this.')
 
+  # TODO(mdan): Rename to flow?
   def visit_node(self, node):
     """Visitor function.
 
@@ -161,6 +175,14 @@
     """
     raise NotImplementedError('Subclasses must implement this.')
 
+  def reset(self):
+    self.in_ = {
+        node: self.init_state(node) for node in self.graph.index.values()
+    }
+    self.out = {
+        node: self.init_state(node) for node in self.graph.index.values()
+    }
+
   def _visit_internal(self, mode):
     """Visits the CFG, depth-first."""
     assert mode in (_WalkMode.FORWARD, _WalkMode.REVERSE)
@@ -169,7 +191,6 @@
     elif mode == _WalkMode.REVERSE:
       open_ = list(self.graph.exit)
     closed = set()
-    self.reset()
 
     while open_:
       node = open_.pop(0)
@@ -186,12 +207,10 @@
         if should_revisit or next_ not in closed:
           open_.append(next_)
 
-  def visit_forward(self, graph):
-    self.graph = graph
+  def visit_forward(self):
     self._visit_internal(_WalkMode.FORWARD)
 
-  def visit_reverse(self, graph):
-    self.graph = graph
+  def visit_reverse(self):
     self._visit_internal(_WalkMode.REVERSE)
 
 
@@ -244,8 +263,16 @@
     # TODO(mdan): Too many primitives. Use classes.
     self.leaves = set()
 
+    # Note: This mechanism requires that nodes are added in lexical order (top
+    # to bottom, depth first).
+    self.active_stmts = set()
+    self.owners = {}  # type: Set[any]
+    self.forward_edges = set()  # type: Tuple[Node, Node] # (from, to)
+
     self.finally_sections = {}
-    self.finally_section_subgraphs = {}  # Values are [begin_node, exit_nodes]
+    # Dict values represent (entry, exits)
+    self.finally_section_subgraphs = {
+    }  # type: Dict[ast.AST, Tuple[Node, Set[Node]]]
     # Whether the guard section can be reached from the statement that precedes
     # it.
     self.finally_section_has_direct_flow = {}
@@ -275,6 +302,7 @@
     if isinstance(first, Node):
       first.next.add(second)
       second.prev.add(first)
+      self.forward_edges.add((first, second))
     else:
       for node in first:
         self._connect_nodes(node, second)
@@ -285,6 +313,7 @@
       raise ValueError('%s added twice' % ast_node)
     node = Node(next_=set(), prev=set(), ast_node=ast_node)
     self.node_index[ast_node] = node
+    self.owners[node] = frozenset(self.active_stmts)
 
     if self.head is None:
       self.head = node
@@ -299,6 +328,25 @@
 
     return node
 
+  def begin_statement(self, stmt):
+    """Marks the beginning of a statement.
+
+    Args:
+      stmt: Hashable, a key by which the statement can be identified in
+          the CFG's stmt_prev and stmt_next attributes
+    """
+    self.active_stmts.add(stmt)
+
+  def end_statement(self, stmt):
+    """Marks the end of a statement.
+
+    Args:
+      stmt: Hashable, a key by which the statement can be identified in
+          the CFG's stmt_prev and stmt_next attributes; must match a key
+          previously passed to begin_statement.
+    """
+    self.active_stmts.remove(stmt)
+
   def add_ordinary_node(self, ast_node):
     """Grows the graph by adding an ordinary CFG node.
 
@@ -505,11 +553,35 @@
     for node in self.node_index.values():
       node.freeze()
 
+    # Build the statement edges.
+    stmt_next = {}
+    stmt_prev = {}
+    for node, _ in self.forward_edges:
+      for stmt in self.owners[node]:
+        if stmt not in stmt_next:
+          stmt_next[stmt] = set()
+        if stmt not in stmt_prev:
+          stmt_prev[stmt] = set()
+    for first, second in self.forward_edges:
+      stmts_exited = self.owners[first] - self.owners[second]
+      for stmt in stmts_exited:
+        stmt_next[stmt].add(second)
+      stmts_entered = self.owners[second] - self.owners[first]
+      for stmt in stmts_entered:
+        stmt_prev[stmt].add(first)
+    for stmt in stmt_next:
+      stmt_next[stmt] = frozenset(stmt_next[stmt])
+    for stmt in stmt_prev:
+      stmt_prev[stmt] = frozenset(stmt_prev[stmt])
+
+    # Construct the final graph object.
     result = Graph(
         entry=self.head,
         exit=self.leaves,
         error=self.errors,
-        index=self.node_index)
+        index=self.node_index,
+        stmt_prev=stmt_prev,
+        stmt_next=stmt_next)
 
     # Reset the state.
     self.reset()
@@ -523,8 +595,6 @@
   A separate CFG will be constructed for each function.
   """
 
-  # TODO(mdan): Figure out how to deal with closures.
-
   def __init__(self):
     super(AstToCfg, self).__init__()
 
@@ -577,6 +647,13 @@
     self.builder.add_continue_node(node, try_node, guards)
 
   def visit_FunctionDef(self, node):
+    # We also keep the FunctionDef node in the CFG. This allows us to determine
+    # things like reaching definitions via closure. Note that the function body
+    # will be stored in a separate graph, because function definitions are not
+    # the same as function calls.
+    if self.builder is not None:
+      self.builder.add_ordinary_node(node)
+
     self.builder_stack.append(self.builder)
     self.builder = GraphBuilder(node)
 
@@ -622,7 +699,7 @@
     )
     if try_node is None:
       raise ValueError('%s that is not enclosed by any FunctionDef' % node)
-    self.builder.add_error_node(node, try_node, guards)
+    self.builder.add_error_node(node, guards)
 
   def visit_Assert(self, node):
     # Ignoring the effect of exceptions.
@@ -637,6 +714,7 @@
     # targets of jump statements like break/continue/etc. Since there is no
     # statement that can interrupt a conditional, we don't need to track their
     # lexical scope. That may change in the future.
+    self.builder.begin_statement(node)
 
     self.builder.enter_cond_section(node)
     self._process_basic_statement(node.test)
@@ -650,8 +728,10 @@
       self.visit(stmt)
 
     self.builder.exit_cond_section(node)
+    self.builder.end_statement(node)
 
   def visit_While(self, node):
+    self.builder.begin_statement(node)
     self._enter_lexical_scope(node)
 
     self.builder.enter_section(node)
@@ -670,8 +750,10 @@
       self.visit(stmt)
 
     self.builder.exit_section(node)
+    self.builder.end_statement(node)
 
   def visit_For(self, node):
+    self.builder.begin_statement(node)
     self._enter_lexical_scope(node)
 
     self.builder.enter_section(node)
@@ -693,6 +775,7 @@
       self.visit(stmt)
 
     self.builder.exit_section(node)
+    self.builder.end_statement(node)
 
   def visit_Break(self, node):
     self._process_exit_statement(node, gast.While, gast.For)
@@ -722,12 +805,13 @@
 
   def visit_With(self, node):
     # TODO(mdan): Mark the context manager's exit call as exit guard.
-    self._process_basic_statement(node.items)
+    for item in node.items:
+      self._process_basic_statement(item)
     for stmt in node.body:
       self.visit(stmt)
 
 
 def build(node):
-  builder = AstToCfg()
-  builder.visit(node)
-  return builder.cfgs
+  visitor = AstToCfg()
+  visitor.visit(node)
+  return visitor.cfgs
diff --git a/tensorflow/contrib/autograph/pyct/cfg_test.py b/tensorflow/contrib/autograph/pyct/cfg_test.py
index 00afadd..9d0a85d 100644
--- a/tensorflow/contrib/autograph/pyct/cfg_test.py
+++ b/tensorflow/contrib/autograph/pyct/cfg_test.py
@@ -25,9 +25,13 @@
 
 class CountingVisitor(cfg.GraphVisitor):
 
-  def __init__(self):
+  def __init__(self, graph):
+    super(CountingVisitor, self).__init__(graph)
     self.counts = {}
 
+  def init_state(self, _):
+    return None
+
   def visit_node(self, node):
     self.counts[node.ast_node] = self.counts.get(node.ast_node, 0) + 1
     return False  # visit only once
@@ -51,8 +55,8 @@
 
     graphs, node = self._build_cfg(test_fn)
     graph, = graphs.values()
-    visitor = CountingVisitor()
-    visitor.visit_forward(graph)
+    visitor = CountingVisitor(graph)
+    visitor.visit_forward()
     fn_node = node.body[0]
 
     self.assertEqual(visitor.counts[fn_node.args], 1)
@@ -74,8 +78,8 @@
 
     graphs, node = self._build_cfg(test_fn)
     graph, = graphs.values()
-    visitor = CountingVisitor()
-    visitor.visit_reverse(graph)
+    visitor = CountingVisitor(graph)
+    visitor.visit_reverse()
     fn_node = node.body[0]
 
     self.assertEqual(visitor.counts[fn_node.args], 1)
@@ -94,7 +98,7 @@
     return cfgs
 
   def _repr_set(self, node_set):
-    return set(repr(n) for n in node_set)
+    return frozenset(repr(n) for n in node_set)
 
   def _as_set(self, elements):
     if elements is None:
@@ -110,14 +114,35 @@
       matched = False
       for cfg_node in graph.index.values():
         if repr(cfg_node) == node_repr:
-          if (self._as_set(prev) == set(map(repr, cfg_node.prev)) and
-              self._as_set(next_) == set(map(repr, cfg_node.next))):
+          if (self._as_set(prev) == frozenset(map(repr, cfg_node.prev)) and
+              self._as_set(next_) == frozenset(map(repr, cfg_node.next))):
             matched = True
             break
       if not matched:
         self.fail(
             'match failed for node "%s" in graph:\n%s' % (node_repr, graph))
 
+  def assertStatementEdges(self, graph, edges):
+    """Tests whether the CFG contains the specified statement edges."""
+    for prev_node_reprs, node_repr, next_node_reprs in edges:
+      matched = False
+      partial_matches = []
+      self.assertSetEqual(
+          frozenset(graph.stmt_next.keys()), frozenset(graph.stmt_prev.keys()))
+      for stmt_ast_node in graph.stmt_next:
+        ast_repr = '%s:%s' % (stmt_ast_node.__class__.__name__,
+                              stmt_ast_node.lineno)
+        if ast_repr == node_repr:
+          actual_next = frozenset(map(repr, graph.stmt_next[stmt_ast_node]))
+          actual_prev = frozenset(map(repr, graph.stmt_prev[stmt_ast_node]))
+          partial_matches.append((actual_prev, node_repr, actual_next))
+          if (self._as_set(prev_node_reprs) == actual_prev and
+              self._as_set(next_node_reprs) == actual_next):
+            matched = True
+            break
+      if not matched:
+        self.fail('edges mismatch for %s: %s' % (node_repr, partial_matches))
+
   def test_straightline(self):
 
     def test_fn(a):
@@ -171,7 +196,7 @@
         ),
     )
 
-  def test_branch_straightline(self):
+  def test_if_straightline(self):
 
     def test_fn(a):
       if a > 0:
@@ -189,6 +214,10 @@
             ('(a > 0)', 'a += -1', None),
         ),
     )
+    self.assertStatementEdges(
+        graph,
+        (('a', 'If:2', None),),
+    )
 
   def test_branch_nested(self):
 
@@ -219,6 +248,14 @@
             ('(a > 2)', 'a = 4', None),
         ),
     )
+    self.assertStatementEdges(
+        graph,
+        (
+            ('a', 'If:2', None),
+            ('(a > 0)', 'If:3', None),
+            ('(a > 0)', 'If:8', None),
+        ),
+    )
 
   def test_branch_straightline_semi(self):
 
@@ -236,6 +273,10 @@
             ('(a > 0)', 'a = 1', None),
         ),
     )
+    self.assertStatementEdges(
+        graph,
+        (('a', 'If:2', None),),
+    )
 
   def test_branch_return(self):
 
@@ -257,6 +298,10 @@
             ('a = 1', 'a = 2', None),
         ),
     )
+    self.assertStatementEdges(
+        graph,
+        (('a', 'If:2', 'a = 2'),),
+    )
 
   def test_branch_return_minimal(self):
 
@@ -273,6 +318,10 @@
             ('(a > 0)', 'return', None),
         ),
     )
+    self.assertStatementEdges(
+        graph,
+        (('a', 'If:2', None),),
+    )
 
   def test_while_straightline(self):
 
@@ -291,6 +340,10 @@
             ('(a > 0)', 'a = 2', None),
         ),
     )
+    self.assertStatementEdges(
+        graph,
+        (('a', 'While:2', 'a = 2'),),
+    )
 
   def test_while_else_straightline(self):
 
@@ -312,6 +365,10 @@
             ('a = 2', 'a = 3', None),
         ),
     )
+    self.assertStatementEdges(
+        graph,
+        (('a', 'While:2', 'a = 3'),),
+    )
 
   def test_while_else_continue(self):
 
@@ -339,6 +396,13 @@
             ('a = 2', 'a = 3', None),
         ),
     )
+    self.assertStatementEdges(
+        graph,
+        (
+            ('a', 'While:2', 'a = 3'),
+            ('(a > 0)', 'If:3', ('a = 1', '(a > 0)')),
+        ),
+    )
 
   def test_while_else_break(self):
 
@@ -364,6 +428,13 @@
             (('break', 'a = 2'), 'a = 3', None),
         ),
     )
+    self.assertStatementEdges(
+        graph,
+        (
+            ('a', 'While:2', 'a = 3'),
+            ('(a > 0)', 'If:3', ('a = 1', 'a = 3')),
+        ),
+    )
 
   def test_while_else_return(self):
 
@@ -389,6 +460,13 @@
             ('a = 2', 'a = 3', None),
         ),
     )
+    self.assertStatementEdges(
+        graph,
+        (
+            ('a', 'While:2', 'a = 3'),
+            ('(a > 0)', 'If:3', 'a = 1'),
+        ),
+    )
 
   def test_while_nested_straightline(self):
 
@@ -411,6 +489,13 @@
             ('(a > 0)', 'a = 3', None),
         ),
     )
+    self.assertStatementEdges(
+        graph,
+        (
+            ('a', 'While:2', 'a = 3'),
+            ('(a > 0)', 'While:3', 'a = 2'),
+        ),
+    )
 
   def test_while_nested_continue(self):
 
@@ -437,6 +522,14 @@
             ('(a > 0)', 'a = 3', None),
         ),
     )
+    self.assertStatementEdges(
+        graph,
+        (
+            ('a', 'While:2', 'a = 3'),
+            ('(a > 0)', 'While:3', 'a = 2'),
+            ('(a > 1)', 'If:4', ('a = 1', '(a > 1)')),
+        ),
+    )
 
   def test_while_nested_break(self):
 
@@ -451,16 +544,21 @@
 
     graph, = self._build_cfg(test_fn).values()
 
-    self.assertGraphMatches(
+    self.assertGraphMatches(graph, (
+        (('a', 'a = 2'), '(a > 0)', ('(a > 1)', 'a = 3')),
+        (('(a > 0)', 'a = 1'), '(a > 1)', ('(a > 2)', 'a = 2')),
+        ('(a > 1)', '(a > 2)', ('break', 'a = 1')),
+        ('(a > 2)', 'break', 'a = 2'),
+        ('(a > 2)', 'a = 1', '(a > 1)'),
+        (('(a > 1)', 'break'), 'a = 2', '(a > 0)'),
+        ('(a > 0)', 'a = 3', None),
+    ))
+    self.assertStatementEdges(
         graph,
         (
-            (('a', 'a = 2'), '(a > 0)', ('(a > 1)', 'a = 3')),
-            (('(a > 0)', 'a = 1'), '(a > 1)', ('(a > 2)', 'a = 2')),
-            ('(a > 1)', '(a > 2)', ('break', 'a = 1')),
-            ('(a > 2)', 'break', 'a = 2'),
-            ('(a > 2)', 'a = 1', '(a > 1)'),
-            (('(a > 1)', 'break'), 'a = 2', '(a > 0)'),
-            ('(a > 0)', 'a = 3', None),
+            ('a', 'While:2', 'a = 3'),
+            ('(a > 0)', 'While:3', 'a = 2'),
+            ('(a > 1)', 'If:4', ('a = 1', 'a = 2')),
         ),
     )
 
@@ -481,6 +579,10 @@
             ('range(0, a)', 'a = 2', None),
         ),
     )
+    self.assertStatementEdges(
+        graph,
+        (('a', 'For:2', 'a = 2'),),
+    )
 
   def test_for_else_straightline(self):
 
@@ -502,6 +604,10 @@
             ('a = 2', 'a = 3', None),
         ),
     )
+    self.assertStatementEdges(
+        graph,
+        (('a', 'For:2', 'a = 3'),),
+    )
 
   def test_for_else_continue(self):
 
@@ -530,6 +636,13 @@
             ('a = 2', 'a = 3', None),
         ),
     )
+    self.assertStatementEdges(
+        graph,
+        (
+            ('a', 'For:2', 'a = 3'),
+            ('range(0, a)', 'If:3', ('a = 1', 'range(0, a)')),
+        ),
+    )
 
   def test_for_else_break(self):
 
@@ -555,6 +668,13 @@
             (('break', 'a = 2'), 'a = 3', None),
         ),
     )
+    self.assertStatementEdges(
+        graph,
+        (
+            ('a', 'For:2', 'a = 3'),
+            ('range(0, a)', 'If:3', ('a = 1', 'a = 3')),
+        ),
+    )
 
   def test_for_else_return(self):
 
@@ -580,6 +700,13 @@
             ('a = 2', 'a = 3', None),
         ),
     )
+    self.assertStatementEdges(
+        graph,
+        (
+            ('a', 'For:2', 'a = 3'),
+            ('range(0, a)', 'If:3', 'a = 1'),
+        ),
+    )
 
   def test_for_nested_straightline(self):
 
@@ -602,6 +729,13 @@
             ('range(0, a)', 'a = 3', None),
         ),
     )
+    self.assertStatementEdges(
+        graph,
+        (
+            ('a', 'For:2', 'a = 3'),
+            ('range(0, a)', 'For:3', 'a = 2'),
+        ),
+    )
 
   def test_for_nested_continue(self):
 
@@ -629,6 +763,14 @@
             ('range(0, a)', 'a = 3', None),
         ),
     )
+    self.assertStatementEdges(
+        graph,
+        (
+            ('a', 'For:2', 'a = 3'),
+            ('range(0, a)', 'For:3', 'a = 2'),
+            ('range(1, a)', 'If:4', ('b += 1', 'range(1, a)')),
+        ),
+    )
 
   def test_for_nested_break(self):
 
@@ -655,6 +797,14 @@
             ('range(0, a)', 'a = 3', None),
         ),
     )
+    self.assertStatementEdges(
+        graph,
+        (
+            ('a', 'For:2', 'a = 3'),
+            ('range(0, a)', 'For:3', 'a = 2'),
+            ('range(1, a)', 'If:4', ('b += 1', 'a = 2')),
+        ),
+    )
 
   def test_complex(self):
 
@@ -704,6 +854,17 @@
             ('range(1, a)', 'a = 3', None),
         ),
     )
+    self.assertStatementEdges(
+        graph,
+        (
+            ('b = 0', 'While:3', 'range(1, a)'),
+            ('(a > 0)', 'For:4', 'a = 2'),
+            ('range(0, a)', 'If:5', ('(a > 3)', 'a = 2')),
+            ('(a > 2)', 'If:7', ('b += 1', 'a = 2', 'range(0, a)')),
+            ('(a > 3)', 'If:8', ('a = 2', 'range(0, a)')),
+            ('(a > 0)', 'For:17', 'a = 3'),
+        ),
+    )
 
   def test_finally_straightline(self):
 
@@ -785,6 +946,24 @@
         ),
     )
 
+  def test_with_straightline(self):
+
+    def test_fn(a):
+      with max(a) as b:
+        a = 0
+        return b
+
+    graph, = self._build_cfg(test_fn).values()
+
+    self.assertGraphMatches(
+        graph,
+        (
+            ('a', 'max(a)', 'a = 0'),
+            ('max(a)', 'a = 0', 'return b'),
+            ('a = 0', 'return b', None),
+        ),
+    )
+
 
 if __name__ == '__main__':
   test.main()
diff --git a/tensorflow/contrib/autograph/pyct/compiler.py b/tensorflow/contrib/autograph/pyct/compiler.py
index 24c4517..c172ab2 100644
--- a/tensorflow/contrib/autograph/pyct/compiler.py
+++ b/tensorflow/contrib/autograph/pyct/compiler.py
@@ -30,9 +30,49 @@
 import astor
 import gast
 
+from tensorflow.contrib.autograph.pyct import anno
+from tensorflow.contrib.autograph.pyct import ast_util
+from tensorflow.contrib.autograph.pyct import origin_info
+from tensorflow.contrib.autograph.pyct import parser
+
+
+def _build_source_map(node, code):
+  """Return the Python objects represented by given AST.
+
+  Compiling the AST code this way ensures that the source code is readable by
+  e.g. `pdb` or `inspect`.
+
+  Args:
+    node: An AST node of the original generated code, before the source code is
+      generated.
+    code: The string representation of the source code for the newly generated
+      code.
+
+  Returns:
+    Dict[CodeLocation, OriginInfo], a mapping between the user and AutoGraph
+    generated code.
+  """
+  # After we have the final generated code we reparse it to get the final line
+  # numbers. Then we walk through the generated and original ASTs in parallel
+  # to build the mapping between the user and generated code.
+  new_node = parser.parse_str(code)
+  origin_info.resolve(new_node, code)
+  source_mapping = {}
+  for before, after in ast_util.parallel_walk(node, new_node):
+    # Need both checks because if origin information is ever copied over to new
+    # nodes then we need to rely on the fact that only the original user code
+    # has the origin annotation.
+    if (anno.hasanno(before, anno.Basic.ORIGIN) and
+        anno.hasanno(after, anno.Basic.ORIGIN)):
+      source_info = anno.getanno(before, anno.Basic.ORIGIN)
+      new_line_number = anno.getanno(after, anno.Basic.ORIGIN).line_number
+      source_mapping[new_line_number] = source_info
+  return source_mapping
+
 
 def ast_to_source(node, indentation='  '):
   """Return the source code of given AST."""
+  original_node = node
   if isinstance(node, gast.AST):
     node = gast.gast_to_ast(node)
   generator = astor.codegen.SourceGenerator(indentation, False,
@@ -42,11 +82,16 @@
   # In some versions of Python, literals may appear as actual values. This
   # ensures everything is string.
   code = map(str, generator.result)
-  return astor.source_repr.pretty_source(code).lstrip()
+  code = astor.source_repr.pretty_source(code).lstrip()
+  source_mapping = _build_source_map(original_node, code)
+
+  return code, source_mapping
 
 
-def ast_to_object(
-    node, indentation='  ', source_prefix=None, delete_on_exit=True):
+def ast_to_object(node,
+                  indentation='  ',
+                  source_prefix=None,
+                  delete_on_exit=True):
   """Return the Python objects represented by given AST.
 
   Compiling the AST code this way ensures that the source code is readable by
@@ -56,15 +101,30 @@
     node: The code to compile, as an AST object.
     indentation: The string to use for indentation.
     source_prefix: Optional string to print as-is into the source file.
-    delete_on_exit: Whether to delete the temporary file used for compilation
-        on exit.
+    delete_on_exit: Whether to delete the temporary file used for compilation on
+      exit.
 
   Returns:
     A module object containing the compiled source code.
+  Raises:
+    ValueError: If ag_source_map__ is already in the namespace of the compiled
+    node.
   """
-  source = ast_to_source(node, indentation)
+  # code_source_mapping does not yet include the offsets from import statements.
+  source, code_source_mapping = ast_to_source(node, indentation=indentation)
 
   with tempfile.NamedTemporaryFile(mode='w', suffix='.py', delete=False) as f:
+    # TODO(znado): move into an _offset_source_map() helper function.
+    # Need to offset the generated line numbers by the number of import lines.
+    if source_prefix:
+      num_import_lines = source_prefix.count('\n') + 1
+    else:
+      num_import_lines = 0
+    source_mapping = {}
+    for line_number, original_position in code_source_mapping.items():
+      source_map_key = origin_info.CodeLocation(
+          file_path=f.name, line_number=line_number + num_import_lines)
+      source_mapping[source_map_key] = original_position
     module_name = os.path.basename(f.name[:-3])
     if source_prefix:
       f.write(source_prefix)
@@ -72,4 +132,27 @@
     f.write(source)
   if delete_on_exit:
     atexit.register(lambda: os.remove(f.name))
-  return imp.load_source(module_name, f.name), source
+  compiled_node = imp.load_source(module_name, f.name)
+
+  # TODO(znado): Clean this up so we don't need to attach it to the namespace.
+  # TODO(znado): This does not work for classes because their methods share a
+  # namespace.
+  # This attaches the source map which is needed for error handling.  Note that
+  # api.to_graph copies this source map into an attribute of the function.
+  #
+  # We need this so the ag_source_map__ variable is available to the call to
+  # rewrite_graph_construction_error in the except block inside each function
+  # that handles graph construction errors.
+  #
+  # We cannot get the rewritten function name until it is too late so templating
+  # is hard, and this cleanly fixes the
+  # issues encountered with nested functions because this is attached to the
+  # outermost one.
+  source_map_name = 'ag_source_map__'
+  if source_map_name in compiled_node.__dict__:
+    raise ValueError('cannot convert %s because is has namespace attribute '
+                     '"%s", which is reserved for AutoGraph.' %
+                     (compiled_node, source_map_name))
+  compiled_node.__dict__[source_map_name] = source_mapping
+
+  return compiled_node, source
diff --git a/tensorflow/contrib/autograph/pyct/compiler_test.py b/tensorflow/contrib/autograph/pyct/compiler_test.py
index 98cdc15..e29fa93 100644
--- a/tensorflow/contrib/autograph/pyct/compiler_test.py
+++ b/tensorflow/contrib/autograph/pyct/compiler_test.py
@@ -59,14 +59,14 @@
                 value=gast.Str('c'))
         ])
 
+    source, _ = compiler.ast_to_source(node, indentation='  ')
     self.assertEqual(
         textwrap.dedent("""
             if 1:
               a = b
             else:
               a = 'c'
-        """).strip(),
-        compiler.ast_to_source(node, indentation='  ').strip())
+        """).strip(), source.strip())
 
   def test_ast_to_object(self):
     node = gast.FunctionDef(
diff --git a/tensorflow/contrib/autograph/pyct/origin_info.py b/tensorflow/contrib/autograph/pyct/origin_info.py
index b3c6a43..614e346 100644
--- a/tensorflow/contrib/autograph/pyct/origin_info.py
+++ b/tensorflow/contrib/autograph/pyct/origin_info.py
@@ -17,10 +17,16 @@
 from __future__ import division
 from __future__ import print_function
 
-from collections import namedtuple
+import collections
+
+import gast
+
+from tensorflow.contrib.autograph.pyct import anno
+from tensorflow.python.util import tf_inspect
 
 
-class CodeLocation(namedtuple('CodeLocation', ('file_path', 'line_number'))):
+class CodeLocation(
+    collections.namedtuple('CodeLocation', ('file_path', 'line_number'))):
   """Location of a line of code.
 
   Attributes:
@@ -31,8 +37,9 @@
 
 
 class OriginInfo(
-    namedtuple('OriginInfo', ('file_path', 'function_name', 'line_number',
-                              'column_offset', 'source_code_line'))):
+    collections.namedtuple('OriginInfo',
+                           ('file_path', 'function_name', 'line_number',
+                            'column_offset', 'source_code_line'))):
   """Container for information about the source code before conversion.
 
   Instances of this class contain information about the source code that
@@ -50,3 +57,44 @@
     """
     return (self.file_path, self.line_number, self.function_name,
             self.source_code_line)
+
+
+# TODO(znado): Consider refactoring this into a Visitor.
+def resolve(node, source, function=None):
+  """Adds an origin information to all nodes inside the body of function.
+
+  Args:
+    node: The AST node for the function whose body nodes will be annotated.
+    source: Text, the source code string for the function whose body nodes will
+      be annotated.
+    function: Callable, the function that will have all nodes inside of it
+      annotation with an OriginInfo annotation with key anno.Basic.ORIGIN.  If
+      it is None then only the line numbers and column offset will be set in the
+      annotation, with the rest of the information being None.
+
+  Returns:
+    A tuple of the AST node for function and a String containing its source
+    code.
+  """
+  if function:
+    _, function_lineno = tf_inspect.getsourcelines(function)
+    function_filepath = tf_inspect.getsourcefile(function)
+  else:
+    function_lineno = None
+    function_filepath = None
+  source_lines = source.split('\n')
+  for n in gast.walk(node):
+    if hasattr(n, 'lineno'):
+      # n.lineno is relative to the start of the enclosing function, so need to
+      # offset it by the line of the function.
+      source_code_line = source_lines[n.lineno - 1]
+      if function:
+        source_lineno = n.lineno + function_lineno - 1
+        function_name = function.__name__
+      else:
+        source_lineno = n.lineno
+        function_name = None
+      anno.setanno(
+          n, anno.Basic.ORIGIN,
+          OriginInfo(function_filepath, function_name, source_lineno,
+                     n.col_offset, source_code_line))
diff --git a/tensorflow/contrib/autograph/pyct/qual_names.py b/tensorflow/contrib/autograph/pyct/qual_names.py
index da07013..fb81404 100644
--- a/tensorflow/contrib/autograph/pyct/qual_names.py
+++ b/tensorflow/contrib/autograph/pyct/qual_names.py
@@ -30,6 +30,7 @@
 import gast
 
 from tensorflow.contrib.autograph.pyct import anno
+from tensorflow.contrib.autograph.pyct import parser
 
 
 class Symbol(collections.namedtuple('Symbol', ['name'])):
@@ -89,7 +90,8 @@
       if not isinstance(base, (str, StringLiteral, NumberLiteral)):
         # TODO(mdan): Require Symbol instead of string.
         raise ValueError(
-            'For simple QNs, base must be a string or a Literal object.')
+            'for simple QNs, base must be a string or a Literal object;'
+            ' got instead "%s"' % type(base))
       assert '.' not in base and '[' not in base and ']' not in base
       self._parent = None
       self.qn = (base,)
@@ -113,6 +115,22 @@
     return self._parent
 
   @property
+  def owner_set(self):
+    """Returns all the symbols (simple or composite) that own this QN.
+
+    In other words, if this symbol was modified, the symbols in the owner set
+    may also be affected.
+
+    Examples:
+      'a.b[c.d]' has two owners, 'a' and 'a.b'
+    """
+    owners = set()
+    if self.has_attr() or self.has_subscript():
+      owners.add(self.parent)
+      owners.update(self.parent.owner_set)
+    return owners
+
+  @property
   def support_set(self):
     """Returns the set of simple symbols that this QN relies on.
 
@@ -122,7 +140,7 @@
 
     Examples:
       'a.b' has only one support symbol, 'a'
-      'a[i]' has two roots, 'a' and 'i'
+      'a[i]' has two support symbols, 'a' and 'i'
     """
     # TODO(mdan): This might be the set of Name nodes in the AST. Track those?
     roots = set()
@@ -231,3 +249,9 @@
 
 def resolve(node):
   return QnResolver().visit(node)
+
+
+def from_str(qn_str):
+  node = parser.parse_expression(qn_str)
+  node = resolve(node)
+  return anno.getanno(node, anno.Basic.QN)
diff --git a/tensorflow/contrib/autograph/pyct/qual_names_test.py b/tensorflow/contrib/autograph/pyct/qual_names_test.py
index 264afd5..c793c2b 100644
--- a/tensorflow/contrib/autograph/pyct/qual_names_test.py
+++ b/tensorflow/contrib/autograph/pyct/qual_names_test.py
@@ -30,6 +30,15 @@
 
 class QNTest(test.TestCase):
 
+  def test_from_str(self):
+    a = QN('a')
+    b = QN('b')
+    a_dot_b = QN(a, attr='b')
+    a_sub_b = QN(a, subscript=b)
+    self.assertEqual(qual_names.from_str('a.b'), a_dot_b)
+    self.assertEqual(qual_names.from_str('a'), a)
+    self.assertEqual(qual_names.from_str('a[b]'), a_sub_b)
+
   def test_basic(self):
     a = QN('a')
     self.assertEqual(a.qn, ('a',))
diff --git a/tensorflow/contrib/autograph/pyct/static_analysis/BUILD b/tensorflow/contrib/autograph/pyct/static_analysis/BUILD
index bcf2dac..92eacba 100644
--- a/tensorflow/contrib/autograph/pyct/static_analysis/BUILD
+++ b/tensorflow/contrib/autograph/pyct/static_analysis/BUILD
@@ -19,8 +19,9 @@
     srcs = [
         "activity.py",
         "annos.py",
-        "cfg.py",
         "live_values.py",
+        "liveness.py",
+        "reaching_definitions.py",
         "type_info.py",
     ],
     srcs_version = "PY2AND3",
@@ -28,6 +29,7 @@
     deps = [
         "//tensorflow/contrib/autograph/pyct",
         "//tensorflow/contrib/autograph/utils",
+        "//tensorflow/python:util",
         "@gast_archive//:gast",
     ],
 )
@@ -46,23 +48,32 @@
 )
 
 py_test(
-    name = "cfg_test",
-    srcs = ["cfg_test.py"],
+    name = "live_values_test",
+    srcs = ["live_values_test.py"],
     srcs_version = "PY2AND3",
     tags = ["no_windows"],
     deps = [
         ":static_analysis",
         "//tensorflow/contrib/autograph/pyct",
         "//tensorflow/python:client_testlib",
-        "@gast_archive//:gast",
     ],
 )
 
 py_test(
-    name = "live_values_test",
-    srcs = ["live_values_test.py"],
+    name = "liveness_test",
+    srcs = ["liveness_test.py"],
     srcs_version = "PY2AND3",
-    tags = ["no_windows"],
+    deps = [
+        ":static_analysis",
+        "//tensorflow/contrib/autograph/pyct",
+        "//tensorflow/python:client_testlib",
+    ],
+)
+
+py_test(
+    name = "reaching_definitions_test",
+    srcs = ["reaching_definitions_test.py"],
+    srcs_version = "PY2AND3",
     deps = [
         ":static_analysis",
         "//tensorflow/contrib/autograph/pyct",
diff --git a/tensorflow/contrib/autograph/pyct/static_analysis/activity.py b/tensorflow/contrib/autograph/pyct/static_analysis/activity.py
index 4d7b0cb..a0182da 100644
--- a/tensorflow/contrib/autograph/pyct/static_analysis/activity.py
+++ b/tensorflow/contrib/autograph/pyct/static_analysis/activity.py
@@ -12,7 +12,10 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 # ==============================================================================
-"""Activity analysis."""
+"""Activity analysis.
+
+Requires qualified name annotations (see qual_names.py).
+"""
 
 from __future__ import absolute_import
 from __future__ import division
@@ -59,9 +62,10 @@
     self.parent = parent
     self.add_unknown_symbols = add_unknown_symbols
     self.modified = set()
+    # TODO(mdan): Completely remove this.
     self.created = set()
     self.used = set()
-    self.params = set()
+    self.params = {}
     self.returned = set()
 
   # TODO(mdan): Rename to `locals`
@@ -106,37 +110,23 @@
     self.modified |= other.modified
     self.created |= other.created
     self.used |= other.used
-    self.params |= other.params
+    self.params.update(other.params)
     self.returned |= other.returned
 
   def has(self, name):
-    if name in self.modified or name in self.params:
+    if name in self.modified:
       return True
     elif self.parent is not None:
       return self.parent.has(name)
     return False
 
-  def is_modified_since_entry(self, name):
-    if name in self.modified:
-      return True
-    elif self.parent is not None and not self.isolated:
-      return self.parent.is_modified_since_entry(name)
-    return False
-
-  def is_param(self, name):
-    if name in self.params:
-      return True
-    elif self.parent is not None and not self.isolated:
-      return self.parent.is_param(name)
-    return False
-
   def mark_read(self, name):
     self.used.add(name)
     if self.parent is not None and name not in self.created:
       self.parent.mark_read(name)
 
-  def mark_param(self, name):
-    self.params.add(name)
+  def mark_param(self, name, owner):
+    self.params[name] = owner
 
   def mark_creation(self, name, writes_create_symbol=False):
     """Mark a qualified name as created."""
@@ -226,37 +216,56 @@
     elif isinstance(node.ctx, gast.Param):
       # Param contexts appear in function defs, so they have the meaning of
       # defining a variable.
-      # TODO(mdan): This may be incorrect with nested functions.
-      # For nested functions, we'll have to add the notion of hiding args from
-      # the parent scope, not writing to them.
-      self.scope.mark_creation(qn)
-      self.scope.mark_param(qn)
+      self.scope.mark_write(qn)
+      self.scope.mark_param(qn, self.enclosing_entities[-1])
     else:
       raise ValueError('Unknown context %s for node %s.' % (type(node.ctx), qn))
 
     anno.setanno(node, NodeAnno.IS_LOCAL, self.scope.has(qn))
-    anno.setanno(node, NodeAnno.IS_MODIFIED_SINCE_ENTRY,
-                 self.scope.is_modified_since_entry(qn))
-    anno.setanno(node, NodeAnno.IS_PARAM, self.scope.is_param(qn))
 
     if self._in_return_statement:
       self.scope.mark_returned(qn)
 
+  def _enter_scope(self, isolated):
+    self.scope = Scope(self.scope, isolated=isolated)
+
+  def _exit_scope(self):
+    self.scope = self.scope.parent
+
+  def _process_statement(self, node):
+    self._enter_scope(False)
+    node = self.generic_visit(node)
+    anno.setanno(node, anno.Static.SCOPE, self.scope)
+    self._exit_scope()
+    return node
+
+  def visit_Expr(self, node):
+    return self._process_statement(node)
+
+  def visit_Return(self, node):
+    self._in_return_statement = True
+    node = self._process_statement(node)
+    self._in_return_statement = False
+    return node
+
+  def visit_Assign(self, node):
+    return self._process_statement(node)
+
   def visit_AugAssign(self, node):
     # Special rules for AugAssign. In Assign, the target is only written,
     # but in AugAssig (e.g. a += b), the target is both read and written.
     self._in_aug_assign = True
-    self.generic_visit(node)
+    node = self._process_statement(node)
     self._in_aug_assign = False
     return node
 
   def visit_Name(self, node):
-    self.generic_visit(node)
+    node = self.generic_visit(node)
     self._track_symbol(node)
     return node
 
   def visit_Attribute(self, node):
-    self.generic_visit(node)
+    node = self.generic_visit(node)
     if self._in_constructor and self._node_sets_self_attribute(node):
       self._track_symbol(
           node, composite_writes_alter_parent=True, writes_create_symbol=True)
@@ -265,44 +274,38 @@
     return node
 
   def visit_Subscript(self, node):
-    self.generic_visit(node)
+    node = self.generic_visit(node)
     # Subscript writes (e.g. a[b] = "value") are considered to modify
     # both the element itself (a[b]) and its parent (a).
-    self._track_symbol(node, composite_writes_alter_parent=True)
+    self._track_symbol(node)
     return node
 
   def visit_Print(self, node):
-    current_scope = self.scope
-    args_scope = Scope(current_scope)
-    self.scope = args_scope
-    for n in node.values:
-      self.visit(n)
-    anno.setanno(node, NodeAnno.ARGS_SCOPE, args_scope)
-    self.scope = current_scope
+    self._enter_scope(False)
+    node.values = self.visit_block(node.values)
+    anno.setanno(node, anno.Static.SCOPE, self.scope)
+    anno.setanno(node, NodeAnno.ARGS_SCOPE, self.scope)
+    self._exit_scope()
     return node
 
+  def visit_Assert(self, node):
+    return self._process_statement(node)
+
   def visit_Call(self, node):
-    current_scope = self.scope
-    args_scope = Scope(current_scope, isolated=False)
-    self.scope = args_scope
-    for n in node.args:
-      self.visit(n)
+    self._enter_scope(False)
+    node.args = self.visit_block(node.args)
+    node.keywords = self.visit_block(node.keywords)
     # TODO(mdan): Account starargs, kwargs
-    for n in node.keywords:
-      self.visit(n)
-    anno.setanno(node, NodeAnno.ARGS_SCOPE, args_scope)
-    self.scope = current_scope
-    self.visit(node.func)
+    anno.setanno(node, NodeAnno.ARGS_SCOPE, self.scope)
+    self._exit_scope()
+    node.func = self.visit(node.func)
     return node
 
   def _process_block_node(self, node, block, scope_name):
-    current_scope = self.scope
-    block_scope = Scope(current_scope, isolated=False)
-    self.scope = block_scope
-    for n in block:
-      self.visit(n)
-    anno.setanno(node, scope_name, block_scope)
-    self.scope = current_scope
+    self._enter_scope(False)
+    block = self.visit_block(block)
+    anno.setanno(node, scope_name, self.scope)
+    self._exit_scope()
     return node
 
   def _process_parallel_blocks(self, parent, children):
@@ -321,94 +324,75 @@
       self.scope.merge_from(after_child)
     return parent
 
+  def visit_arguments(self, node):
+    return self._process_statement(node)
+
   def visit_FunctionDef(self, node):
-    if self.scope:
-      qn = qual_names.QN(node.name)
-      self.scope.mark_write(qn)
-    current_scope = self.scope
-    body_scope = Scope(current_scope, isolated=True)
-    self.scope = body_scope
-    self.generic_visit(node)
-    anno.setanno(node, NodeAnno.BODY_SCOPE, body_scope)
-    self.scope = current_scope
+    # The FunctionDef node itself has a Scope object that tracks the creation
+    # of its name, along with the usage of any decorator accompany it.
+    self._enter_scope(False)
+    node.decorator_list = self.visit_block(node.decorator_list)
+    self.scope.mark_write(qual_names.QN(node.name))
+    anno.setanno(node, anno.Static.SCOPE, self.scope)
+    self._exit_scope()
+
+    # A separate Scope tracks the actual function definition.
+    self._enter_scope(True)
+    node.args = self.visit(node.args)
+
+    # Track the body separately. This is for compatibility reasons, it may not
+    # be strictly needed.
+    self._enter_scope(False)
+    node.body = self.visit_block(node.body)
+    anno.setanno(node, NodeAnno.BODY_SCOPE, self.scope)
+    self._exit_scope()
+
+    self._exit_scope()
     return node
 
   def visit_With(self, node):
-    current_scope = self.scope
-    with_scope = Scope(current_scope, isolated=False)
-    self.scope = with_scope
-    self.generic_visit(node)
-    anno.setanno(node, NodeAnno.BODY_SCOPE, with_scope)
-    self.scope = current_scope
+    self._enter_scope(False)
+    node = self.generic_visit(node)
+    anno.setanno(node, NodeAnno.BODY_SCOPE, self.scope)
+    self._exit_scope()
     return node
 
-  def visit_If(self, node):
-    current_scope = self.scope
-    cond_scope = Scope(current_scope, isolated=False)
-    self.scope = cond_scope
-    self.visit(node.test)
-    anno.setanno(node, NodeAnno.COND_SCOPE, cond_scope)
-    self.scope = current_scope
+  def visit_withitem(self, node):
+    return self._process_statement(node)
 
+  def visit_If(self, node):
+    self._enter_scope(False)
+    node.test = self.visit(node.test)
+    anno.setanno(node, NodeAnno.COND_SCOPE, self.scope)
+    anno.setanno(node.test, anno.Static.SCOPE, self.scope)
+    self._exit_scope()
     node = self._process_parallel_blocks(node,
                                          ((node.body, NodeAnno.BODY_SCOPE),
                                           (node.orelse, NodeAnno.ORELSE_SCOPE)))
     return node
 
   def visit_For(self, node):
-    self.visit(node.target)
-    self.visit(node.iter)
+    self._enter_scope(False)
+    node.target = self.visit(node.target)
+    node.iter = self.visit(node.iter)
+    anno.setanno(node.iter, anno.Static.SCOPE, self.scope)
+    self._exit_scope()
     node = self._process_parallel_blocks(node,
                                          ((node.body, NodeAnno.BODY_SCOPE),
                                           (node.orelse, NodeAnno.ORELSE_SCOPE)))
     return node
 
   def visit_While(self, node):
-    current_scope = self.scope
-    cond_scope = Scope(current_scope, isolated=False)
-    self.scope = cond_scope
-    self.visit(node.test)
-    anno.setanno(node, NodeAnno.COND_SCOPE, cond_scope)
-    self.scope = current_scope
-
+    self._enter_scope(False)
+    node.test = self.visit(node.test)
+    anno.setanno(node, NodeAnno.COND_SCOPE, self.scope)
+    anno.setanno(node.test, anno.Static.SCOPE, self.scope)
+    self._exit_scope()
     node = self._process_parallel_blocks(node,
                                          ((node.body, NodeAnno.BODY_SCOPE),
                                           (node.orelse, NodeAnno.ORELSE_SCOPE)))
     return node
 
-  def visit_Return(self, node):
-    self._in_return_statement = True
-    node = self.generic_visit(node)
-    self._in_return_statement = False
-    return node
-
-
-def get_read(node, context):
-  """Return the variable names as QNs (qual_names.py) read by this statement."""
-  analyzer = ActivityAnalyzer(context, None, True)
-  analyzer.visit(node)
-  return analyzer.scope.used
-
-
-def get_updated(node, context):
-  """Return the variable names created or mutated by this statement.
-
-  This function considers assign statements, augmented assign statements, and
-  the targets of for loops, as well as function arguments.
-  For example, `x[0] = 2` will return `x`, `x, y = 3, 4` will return `x` and
-  `y`, `for i in range(x)` will return `i`, etc.
-  Args:
-    node: An AST node
-    context: An EntityContext instance
-
-  Returns:
-    A set of variable names (QNs, see qual_names.py) of all the variables
-    created or mutated.
-  """
-  analyzer = ActivityAnalyzer(context, None, True)
-  analyzer.visit(node)
-  return analyzer.scope.created | analyzer.scope.modified
-
 
 def resolve(node, context, parent_scope=None):
   return ActivityAnalyzer(context, parent_scope).visit(node)
diff --git a/tensorflow/contrib/autograph/pyct/static_analysis/activity_test.py b/tensorflow/contrib/autograph/pyct/static_analysis/activity_test.py
index bc22be0..e940516 100644
--- a/tensorflow/contrib/autograph/pyct/static_analysis/activity_test.py
+++ b/tensorflow/contrib/autograph/pyct/static_analysis/activity_test.py
@@ -52,18 +52,18 @@
     other = activity.Scope(None)
     other.copy_from(scope)
 
-    self.assertTrue(QN('foo') in other.created)
+    self.assertTrue(QN('foo') in other.modified)
 
     scope.mark_write(QN('bar'))
     scope.copy_from(other)
 
-    self.assertFalse(QN('bar') in scope.created)
+    self.assertFalse(QN('bar') in scope.modified)
 
     scope.mark_write(QN('bar'))
     scope.merge_from(other)
 
-    self.assertTrue(QN('bar') in scope.created)
-    self.assertFalse(QN('bar') in other.created)
+    self.assertTrue(QN('bar') in scope.modified)
+    self.assertFalse(QN('bar') in other.modified)
 
   def test_copy_of(self):
     scope = activity.Scope(None)
@@ -157,7 +157,8 @@
     """Assert the scope contains specific used, modified & created variables."""
     self.assertSymbolSetsAre(used, scope.used, 'read')
     self.assertSymbolSetsAre(modified, scope.modified, 'modified')
-    self.assertSymbolSetsAre(created, scope.created, 'created')
+    # Created is deprecated, we're no longer verifying it.
+    # self.assertSymbolSetsAre(created, scope.created, 'created')
 
   def test_print_statement(self):
 
@@ -215,12 +216,6 @@
         (),
         (),
     )
-    self.assertScopeIsRmc(
-        anno.getanno(call_node, NodeAnno.ARGS_SCOPE).parent,
-        ('a', 'a.b', 'a.c', 'a.d', 'foo'),
-        ('a.c',),
-        ('a',),
-    )
 
   def test_call_args_subscripts(self):
 
@@ -241,12 +236,6 @@
         (),
         (),
     )
-    self.assertScopeIsRmc(
-        anno.getanno(call_node, NodeAnno.ARGS_SCOPE).parent,
-        ('a', 'a[0]', 'a[b]', 'a[c]', 'b', 'c', 'foo'),
-        ('b', 'c'),
-        ('a', 'b', 'c'),
-    )
 
   def test_while(self):
 
@@ -362,20 +351,20 @@
     self.assertScopeIsRmc(
         anno.getanno(if_node, NodeAnno.BODY_SCOPE),
         ('a', 'b', 'c', 'a[c]'),
-        ('a', 'a[b]', 'd'),
+        ('a[b]', 'd'),
         ('d',),
     )
     # TODO(mdan): Should subscript writes (a[0] = 1) be considered to read "a"?
     self.assertScopeIsRmc(
         anno.getanno(if_node, NodeAnno.ORELSE_SCOPE),
         ('a', 'e'),
-        ('a', 'a[0]', 'd'),
+        ('a[0]', 'd'),
         ('d',),
     )
     self.assertScopeIsRmc(
         anno.getanno(if_node, NodeAnno.ORELSE_SCOPE).parent,
         ('a', 'b', 'c', 'd', 'e', 'a[c]'),
-        ('a', 'd', 'a[b]', 'a[0]'),
+        ('d', 'a[b]', 'a[0]'),
         ('a', 'b', 'c', 'd', 'e'),
     )
 
@@ -416,10 +405,6 @@
     fn_def_node = node.body[0].body[0]
 
     self.assertScopeIsRmc(
-        anno.getanno(fn_def_node,
-                     NodeAnno.BODY_SCOPE).parent, ('b', 'i', 'f', 'c', 'a'),
-        ('f', 'b', 'c', 'i'), ('f', 'a', 'b', 'c', 'i'))
-    self.assertScopeIsRmc(
         anno.getanno(fn_def_node, NodeAnno.BODY_SCOPE), ('x', 'y'), ('y',), (
             'x',
             'y',
@@ -452,7 +437,7 @@
     self.assertScopeIsRmc(
         anno.getanno(fn_node, NodeAnno.BODY_SCOPE),
         ('a', 'a[0]'),
-        ('a', 'a[0]'),
+        ('a[0]',),
         ('a',),
     )
 
@@ -518,47 +503,6 @@
         anno.getanno(fn_node, NodeAnno.BODY_SCOPE), ('b',), (('')),
         (('a', 'b')))
 
-  def test_get_read(self):
-
-    def test_fn(x, y):
-      z = test_fn(x, y)
-      return z
-
-    node, ctx = self._parse_and_analyze(test_fn)
-    node = node.body[0].body[0]
-    read_vars = activity.get_read(node, ctx)
-    self.assertEqual(read_vars, set(map(qual_names.QN, ('test_fn', 'x', 'y'))))
-
-    def test_fn2(x, y, z):
-      z += test_fn2(x, y, z)
-      return z
-
-    node, ctx = self._parse_and_analyze(test_fn2)
-    node = node.body[0].body[0]
-    read_vars = activity.get_read(node, ctx)
-    self.assertEqual(read_vars,
-                     set(map(qual_names.QN, ('test_fn2', 'x', 'y', 'z'))))
-
-  def test_get_updated(self):
-
-    def test_fn(x, y):
-      z = test_fn(x, y)
-      return z
-
-    node, ctx = self._parse_and_analyze(test_fn)
-    node = node.body[0].body[0]
-    updated_vars = activity.get_updated(node, ctx)
-    self.assertEqual(updated_vars, set(map(qual_names.QN, ('z'))))
-
-    def test_fn2(x, y, z):
-      z += test_fn2(x, y, z)
-      return z
-
-    node, ctx = self._parse_and_analyze(test_fn2)
-    node = node.body[0].body[0]
-    updated_vars = activity.get_updated(node, ctx)
-    self.assertEqual(updated_vars, set(map(qual_names.QN, ('z'))))
-
 
 if __name__ == '__main__':
   test.main()
diff --git a/tensorflow/contrib/autograph/pyct/static_analysis/annos.py b/tensorflow/contrib/autograph/pyct/static_analysis/annos.py
index b929b35..5eefecf 100644
--- a/tensorflow/contrib/autograph/pyct/static_analysis/annos.py
+++ b/tensorflow/contrib/autograph/pyct/static_analysis/annos.py
@@ -21,6 +21,9 @@
 from enum import Enum
 
 
+# TODO(mdan): Remove.
+
+
 class NoValue(Enum):
 
   def __repr__(self):
@@ -50,10 +53,3 @@
   ORELSE_SCOPE = (
       'The scope for the orelse body of a statement (False branch for if '
       'statements, orelse body for loops).')
-
-  # Type and Value annotations
-  # Type annotations are represented by objects of type type_info.Type.
-  STATIC_INFO = (
-      'The type or value information that should be asserted about the entity '
-      'referenced by the symbol holding this annotation, irrespective of the '
-      'execution context.')
diff --git a/tensorflow/contrib/autograph/pyct/static_analysis/cfg.py b/tensorflow/contrib/autograph/pyct/static_analysis/cfg.py
deleted file mode 100644
index 4acc4ed..0000000
--- a/tensorflow/contrib/autograph/pyct/static_analysis/cfg.py
+++ /dev/null
@@ -1,446 +0,0 @@
-# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ==============================================================================
-"""Control flow graph analysis.
-
-Given a Python AST we construct a control flow graph, with edges both to the
-next and previous statements (so it can easily walk the graph both ways). Its
-nodes contain the AST of the statements. It can then perform forward or backward
-analysis on this CFG.
-"""
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-from collections import namedtuple
-import functools
-import operator
-
-import gast
-
-from tensorflow.contrib.autograph.pyct import anno
-from tensorflow.contrib.autograph.pyct.static_analysis import activity
-
-
-class CfgNode(object):
-  """A node in the CFG."""
-  __slots__ = ['next', 'value', 'prev']
-
-  def __init__(self, value):
-    self.next = set()
-    self.prev = set()
-    self.value = value
-
-
-class Cfg(namedtuple('Cfg', ['entry', 'exit'])):
-  """A Control Flow Graph.
-
-  Each statement is represented as a node. For control flow statements such
-  as conditionals and loops the conditional itself is a node which either
-  branches or cycles, respectively.
-  Attributes:
-    entry: The entry node, which contains the `gast.arguments` node of the
-        function definition.
-    exit: The exit node. This node is special because it has no value (i.e. no
-        corresponding AST node). This is because Python functions can have
-        multiple return statements.
-  """
-  pass
-
-
-class CfgBuilder(gast.NodeVisitor):
-  """Construct a control flow graph.
-
-  Construct a CFG starting from a FunctionDef node.
-  Usage:
-    cfg_obj = CfgBuilder().build_cfg(fndef_node)
-  """
-
-  def __init__(self):
-    # The current leaves of the CFG
-    self.current_leaves = []
-    # TODO(alexbw): generalize to break, return, continue, yield, etc.
-    # A stack of lists, tracking continue statements
-    self.continue_ = []
-    # A stack of lists tracking break nodes
-    self.break_ = []
-
-  def set_current_leaves(self, cfg_node):
-    """Link this cfg_node to the current leaves.
-
-    This is the central function for building the CFG. It links the current
-    head cfg_nodes to the passed cfg_node. It then resets the head to the
-    passed cfg_node.
-
-    Args:
-      cfg_node: A CfgNode instance.
-    """
-    for head in self.current_leaves:
-      head.next.add(cfg_node)
-      # While we're linking the CFG forward, add backlinks
-      cfg_node.prev.add(head)
-    self.current_leaves = [cfg_node]
-
-  def build_cfg(self, node):
-    """Build a CFG for a function.
-
-    Implementation of building a CFG for dataflow analysis. See, e.g.:
-    https://www.seas.harvard.edu/courses/cs252/2011sp/slides/Lec02-Dataflow.pdf
-
-    Args:
-      node: A function definition the body of which to analyze.
-    Returns:
-      A CFG object.
-    Raises:
-      TypeError: If the input is not a function definition.
-    """
-    if not isinstance(node, gast.FunctionDef):
-      raise TypeError('input must be a function definition')
-    entry_cfg_node = CfgNode(node.args)
-    self.current_leaves = [entry_cfg_node]
-    self.visit_statements(node.body)
-    exit_cfg_node = CfgNode(None)
-    self.set_current_leaves(exit_cfg_node)
-    return Cfg(entry_cfg_node, exit_cfg_node)
-
-  def visit_statements(self, nodes):
-    for node in nodes:
-      # Check for control flow
-      if isinstance(node, (gast.For, gast.While, gast.If, gast.Try, gast.Break,
-                           gast.Continue, gast.With)):
-        self.visit(node)
-      else:
-        expr = CfgNode(node)
-        self.set_current_leaves(expr)
-
-  def generic_visit(self, node):
-    raise ValueError('unknown control flow')
-
-  def visit_If(self, node):
-    # TODO(alexbw): change this to use immutable tuples instead of lists
-    # The current head will hold the conditional
-    test = CfgNode(node.test)
-    self.set_current_leaves(test)
-    # Handle the body
-    self.visit_statements(node.body)
-    body_exit = self.current_leaves
-    self.current_leaves = [test]
-    # Handle the orelse
-    self.visit_statements(node.orelse)
-    self.current_leaves.extend(body_exit)
-
-  def visit_While(self, node):
-    test = CfgNode(node.test)
-    self.set_current_leaves(test)
-    # Start a new level of nesting
-    self.break_.append([])
-    self.continue_.append([])
-    # Handle the body
-    self.visit_statements(node.body)
-    body_exit = self.current_leaves
-    self.current_leaves.extend(self.continue_.pop())
-    self.set_current_leaves(test)
-    # Handle the orelse
-    self.visit_statements(node.orelse)
-    # The break statements and the test go to the next node
-    self.current_leaves.extend(self.break_.pop())
-    # Body and orelse statements can reach out of the loop
-    self.current_leaves.extend(body_exit)
-
-  def visit_For(self, node):
-    iter_ = CfgNode(node.iter)
-    self.set_current_leaves(iter_)
-    self.break_.append([])
-    self.continue_.append([])
-    self.visit_statements(node.body)
-    body_exit = self.current_leaves
-    self.current_leaves.extend(self.continue_.pop())
-    self.set_current_leaves(iter_)
-    # Handle the orelse
-    self.visit_statements(node.orelse)
-    # The break statements and the test go to the next node
-    self.current_leaves.extend(self.break_.pop())
-    # Body and orelse statements can reach out of the loop
-    self.current_leaves.extend(body_exit)
-
-  def visit_Break(self, node):
-    self.break_[-1].extend(self.current_leaves)
-    self.current_leaves[:] = []
-
-  def visit_Continue(self, node):
-    self.continue_[-1].extend(self.current_leaves)
-    self.current_leaves[:] = []
-
-  def visit_Try(self, node):
-    self.visit_statements(node.body)
-    body = self.current_leaves
-    handlers = []
-    for handler in node.handlers:
-      self.current_leaves = body[:]
-      self.visit_statements(handler.body)
-      handlers.extend(self.current_leaves)
-    self.current_leaves = body
-    self.visit_statements(node.orelse)
-    self.current_leaves = handlers + self.current_leaves
-    self.visit_statements(node.finalbody)
-
-  def visit_With(self, node):
-    for item in node.items:
-      self.set_current_leaves(CfgNode(item))
-    self.visit_statements(node.body)
-
-
-# TODO(alexbw): once CFG analysis occurs at a block level,
-# this extra class will not be necessary
-class PropagateAnalysis(gast.NodeVisitor):
-  """Port analysis annotations from statements to their enclosing blocks."""
-
-  def __init__(self, analysis):
-    self.transfer_fn = analysis.transfer_fn
-    self.in_label = analysis.in_label
-    self.out_label = analysis.out_label
-    super(PropagateAnalysis, self).__init__()
-
-  def visit_If(self, node):
-    # Depth-first.
-    self.generic_visit(node)
-    incoming = anno.getanno(node.body[0], self.in_label)
-    incoming |= anno.getanno(node.test, self.in_label)
-    outgoing = anno.getanno(node.body[-1], self.out_label)
-    outgoing |= anno.getanno(node.test, self.out_label)
-    if node.orelse:
-      orelse_outgoing = anno.getanno(node.orelse[-1], self.out_label)
-      outgoing = self.transfer_fn(outgoing, orelse_outgoing)
-    anno.setanno(node, self.in_label, incoming)
-    anno.setanno(node, self.out_label, outgoing)
-
-  def visit_For(self, node):
-    self.generic_visit(node)
-    incoming = set(anno.getanno(node.body[0], self.in_label))
-    incoming -= set((anno.getanno(node.target, anno.Basic.QN),))
-    outgoing = anno.getanno(node.body[-1], self.out_label)
-    if node.orelse:
-      orelse_outgoing = anno.getanno(node.orelse[-1], self.out_label)
-      outgoing = self.transfer_fn(outgoing, orelse_outgoing)
-    anno.setanno(node, self.in_label, frozenset(incoming))
-    anno.setanno(node, self.out_label, outgoing)
-
-  def visit_While(self, node):
-    self.generic_visit(node)
-    incoming = anno.getanno(node.body[0], self.in_label)
-    incoming |= anno.getanno(node.test, self.in_label)
-    outgoing = anno.getanno(node.body[-1], self.out_label)
-    if node.orelse:
-      orelse_outgoing = anno.getanno(node.orelse[-1], self.out_label)
-      outgoing = self.transfer_fn(outgoing, orelse_outgoing)
-    anno.setanno(node, self.in_label, incoming)
-    anno.setanno(node, self.out_label, outgoing)
-
-  def visit_With(self, node):
-    self.generic_visit(node)
-    incoming = anno.getanno(node.body[0], self.in_label)
-    for item in node.items:
-      incoming |= anno.getanno(item, self.in_label)
-    outgoing = anno.getanno(node.body[-1], self.out_label)
-    anno.setanno(node, self.in_label, incoming)
-    anno.setanno(node, self.out_label, outgoing)
-
-
-# TODO(alexbw): Abstract the CFG walking machinery into a superclass
-# which is parameterized on which fields it selects when walking.
-# TODO(alexbw): Abstract the application of dataflow analysis
-class Forward(object):
-  """Forward analysis on CFG.
-
-  Args:
-    label: A name for this analysis e.g. 'active' for activity analysis. The AST
-      nodes in the CFG will be given annotations 'name_in', 'name_out',
-      'name_gen' and 'name_kill' which contain the incoming values, outgoing
-      values, values generated by the statement, and values deleted by the
-      statement respectively.
-    transfer_fn: Either the AND or OR operator. If the AND operator is used it
-      turns into forward must analysis (i.e. a value will only be carried
-      forward if it appears on all incoming paths). The OR operator means that
-      forward may analysis is done (i.e. the union of incoming values will be
-      taken).
-  """
-
-  def __init__(self, label, source_info, transfer_fn=operator.or_):
-    self.transfer_fn = transfer_fn
-    self.source_info = source_info
-    self.out_label = label + '_out'
-    self.in_label = label + '_in'
-    self.gen_label = label + '_gen'
-    self.kill_label = label + '_kill'
-
-  # TODO(alexbw): see if we can simplify by visiting breadth-first
-  def visit(self, node):
-    """Depth-first walking the CFG, applying dataflow info propagation."""
-    # node.value is None only for the exit CfgNode.
-    if not node.value:
-      return
-
-    if anno.hasanno(node.value, self.out_label):
-      before = hash(anno.getanno(node.value, self.out_label))
-    else:
-      before = None
-    preds = [
-        anno.getanno(pred.value, self.out_label)
-        for pred in node.prev
-        if anno.hasanno(pred.value, self.out_label)
-    ]
-    if preds:
-      incoming = functools.reduce(self.transfer_fn, preds[1:], preds[0])
-    else:
-      incoming = frozenset()
-    anno.setanno(node.value, self.in_label, incoming)
-    gen, kill = self.get_gen_kill(node, incoming)
-    anno.setanno(node.value, self.gen_label, gen)
-    anno.setanno(node.value, self.kill_label, kill)
-    anno.setanno(node.value, self.out_label, (incoming - kill) | gen)
-
-    if hash(anno.getanno(node.value, self.out_label)) != before:
-      for succ in node.next:
-        self.visit(succ)
-
-  def get_gen_kill(self, cfg_node, incoming):
-    """Calculate Gen and Kill properties of a CFG node in dataflow analysis.
-
-    A function which takes the CFG node as well as a set of incoming
-    values. It must return a set of newly generated values by the statement as
-    well as a set of deleted (killed) values.
-
-    Args:
-      cfg_node: A CfgNode instance.
-      incoming:
-    """
-    raise NotImplementedError()
-
-
-class Backward(Forward):
-  """Backward analysis on CFG."""
-
-  def visit(self, cfg_node):
-    # cfg_node.value is None for the exit node, which will be visited only once
-    if not cfg_node.value:
-      for pred in cfg_node.prev:
-        self.visit(pred)
-      return
-
-    if anno.hasanno(cfg_node.value, self.in_label):
-      before = hash(anno.getanno(cfg_node.value, self.in_label))
-    else:
-      before = None
-    succs = [
-        anno.getanno(succ.value, self.in_label)
-        for succ in cfg_node.next
-        if anno.hasanno(succ.value, self.in_label)
-    ]
-    if succs:
-      incoming = functools.reduce(self.transfer_fn, succs[1:], succs[0])
-    else:
-      incoming = frozenset()
-    anno.setanno(cfg_node.value, self.out_label, incoming)
-    gen, kill = self.get_gen_kill(cfg_node, incoming)
-    anno.setanno(cfg_node.value, self.gen_label, gen)
-    anno.setanno(cfg_node.value, self.kill_label, kill)
-    anno.setanno(cfg_node.value, self.in_label, (incoming - kill) | gen)
-    if hash(anno.getanno(cfg_node.value, self.in_label)) != before:
-      for pred in cfg_node.prev:
-        self.visit(pred)
-
-
-def run_analyses(node, analyses):
-  """Perform dataflow analysis on all functions within an AST.
-
-  Args:
-    node: An AST node on which to run dataflow analysis.
-    analyses: Either an instance of the Forward or Backward dataflow analysis
-      class, or a list or tuple of them.
-
-  Returns:
-    node: The node, but now with annotations on the AST nodes containing the
-    results of the dataflow analyses.
-  """
-  if not isinstance(analyses, (tuple, list)):
-    analyses = (analyses,)
-  for analysis in analyses:
-    if not isinstance(analysis, (Forward, Backward)):
-      raise TypeError('not a valid forward analysis object')
-
-  for child_node in gast.walk(node):
-    if isinstance(child_node, gast.FunctionDef):
-      cfg_obj = CfgBuilder().build_cfg(child_node)
-      for analysis in analyses:
-        if isinstance(analysis, Backward):
-          analysis.visit(cfg_obj.exit)
-        elif isinstance(analysis, Forward):
-          analysis.visit(cfg_obj.entry)
-  for analysis in analyses:
-    PropagateAnalysis(analysis).visit(node)
-  return node
-
-
-class Liveness(Backward):
-  """Perform a liveness analysis.
-
-  Each statement is annotated with a set of variables that may be used
-  later in the program.
-  """
-
-  def __init__(self, source_info):
-    super(Liveness, self).__init__('live', source_info)
-
-  def get_gen_kill(self, node, _):
-    # A variable's parents are live if it is live
-    # e.g. x is live if x.y is live. This means gen needs to return
-    # all parents of a variable (if it's an Attribute or Subscript).
-    # This doesn't apply to kill (e.g. del x.y doesn't affect liveness of x)
-    gen = activity.get_read(node.value, self.source_info)
-    gen = functools.reduce(lambda left, right: left | right.support_set, gen,
-                           gen)
-    kill = activity.get_updated(node.value, self.source_info)
-    return gen, kill
-
-
-class ReachingDefinitions(Forward):
-  """Perform reaching definition analysis.
-
-  Each statement is annotated with a set of (variable, definition) pairs.
-  """
-
-  def __init__(self, source_info):
-    super(ReachingDefinitions, self).__init__('definitions', source_info)
-
-  def get_gen_kill(self, node, incoming):
-    definitions = activity.get_updated(node.value, self.source_info)
-    gen = frozenset((id_, node.value) for id_ in definitions)
-    kill = frozenset(def_ for def_ in incoming if def_[0] in definitions)
-    return gen, kill
-
-
-class Defined(Forward):
-  """Perform defined variable analysis.
-
-  Each statement is annotated with a set of variables which are guaranteed to
-  be defined at that point.
-  """
-
-  def __init__(self, source_info):
-    super(Defined, self).__init__(
-        'defined', source_info, transfer_fn=operator.and_)
-
-  def get_gen_kill(self, node, _):
-    gen = activity.get_updated(node.value, self.source_info)
-    return gen, frozenset()
diff --git a/tensorflow/contrib/autograph/pyct/static_analysis/cfg_test.py b/tensorflow/contrib/autograph/pyct/static_analysis/cfg_test.py
deleted file mode 100644
index 428ebbed..0000000
--- a/tensorflow/contrib/autograph/pyct/static_analysis/cfg_test.py
+++ /dev/null
@@ -1,303 +0,0 @@
-# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ==============================================================================
-"""Tests for cfg module."""
-
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-import functools
-
-import gast
-
-from tensorflow.contrib.autograph.pyct import anno
-from tensorflow.contrib.autograph.pyct import parser
-from tensorflow.contrib.autograph.pyct import qual_names
-from tensorflow.contrib.autograph.pyct import transformer
-from tensorflow.contrib.autograph.pyct.static_analysis import cfg
-from tensorflow.python.platform import test
-
-
-class CFGTest(test.TestCase):
-
-  def _parse_and_analyze(self, test_fn):
-    node, source = parser.parse_entity(test_fn)
-    entity_info = transformer.EntityInfo(
-        source_code=source,
-        source_file=None,
-        namespace={},
-        arg_values=None,
-        arg_types=None,
-        owner_type=None)
-    node = qual_names.resolve(node)
-    return node, entity_info
-
-  def _check_anno_matches(self, node, anno_name, var_names):
-    if isinstance(var_names, str):
-      var_names = (var_names,)
-    qual_vars = set()
-    for var_name in var_names:
-      if isinstance(var_name, str):
-        if '[' in var_name or ']' in var_name:
-          raise ValueError('Annotation matching not supported with subscript.')
-        if '.' not in var_name:
-          qual_vars.add(qual_names.QN(var_name))
-        else:
-          attrs = var_name.split('.')
-          this_qn = functools.reduce(qual_names.QN, attrs[1:],
-                                     qual_names.QN(attrs[0]))
-          qual_vars.add(this_qn)
-    self.assertEqual(anno.getanno(node, anno_name), qual_vars)
-
-  def test_reaching(self):
-
-    def f(x):
-      print(x)
-      while True:
-        x = x
-        x = x
-      return x
-
-    node, ctx = self._parse_and_analyze(f)
-    cfg.run_analyses(node, cfg.ReachingDefinitions(ctx))
-    body = node.body[0].body
-    # Only the argument reaches the expression
-    def_in = anno.getanno(body[0], 'definitions_in')
-    # One element, x, from arguments
-    self.assertEqual(set(type(d[1]) for d in def_in), set((gast.arguments,)))
-
-    while_body = body[1].body
-    def_in = anno.getanno(while_body[0], 'definitions_in')
-    # One definition, two possible sources.
-    # - One from an assignment (if the loop is entered)
-    # - The other from the arguments (if loop is not entered)
-    self.assertEqual(
-        set(type(d[1]) for d in def_in), set((gast.arguments, gast.Assign)))
-
-    def_in = anno.getanno(while_body[1], 'definitions_in')
-    # If we've reached this line, the only reaching definition of x is the
-    # Assign node in previous line
-    self.assertEqual(set(type(d[1]) for d in def_in), set((gast.Assign,)))
-
-    def_in = anno.getanno(body[2], 'definitions_in')
-    # Same situation as while_body[0]
-    self.assertEqual(
-        set(type(d[1]) for d in def_in), set((gast.arguments, gast.Assign)))
-
-  def test_defined(self):
-
-    def f(x):
-      if x:
-        y = 2  # pylint: disable=unused-variable
-      return x
-
-    node, ctx = self._parse_and_analyze(f)
-    cfg.run_analyses(node, cfg.Defined(ctx))
-    body = node.body[0].body
-    # only x is for sure defined at the end
-    self._check_anno_matches(body[1], 'defined_in', 'x')
-    # at the end of the if body both x and y are defined
-    if_body = body[0].body
-    self._check_anno_matches(if_body[0], 'defined_out', ('x', 'y'))
-
-  def _get_live_annotated_fnbody(self, f):
-    node, ctx = self._parse_and_analyze(f)
-    cfg.run_analyses(node, cfg.Liveness(ctx))
-    body = node.body[0].body
-    return body
-
-  def test_live_straightline(self):
-
-    def f1(x):
-      a = g(x)  # pylint: disable=undefined-variable
-      b = h(a)  # pylint: disable=undefined-variable, unused-variable
-      return x
-
-    body = self._get_live_annotated_fnbody(f1)
-    self._check_anno_matches(body[1], 'live_in', ('a', 'h', 'x'))
-    self._check_anno_matches(body[2], 'live_in', ('x'))
-    self._check_anno_matches(body[0], 'live_in', ('g', 'h', 'x'))
-    self._check_anno_matches(body[2], 'live_out', ())
-
-  def test_live_stacked_conds_with_else(self):
-
-    def f2(x, a):  # pylint: disable=unused-argument
-      if a > 0:  # x should not be live
-        x = 0
-      if a > 1:
-        x = 1
-      else:
-        x = 2
-
-    body = self._get_live_annotated_fnbody(f2)
-    self._check_anno_matches(body[0], 'live_in', ('a'))
-    self._check_anno_matches(body[1], 'live_in', ('a'))
-
-  def test_live_stacked_conds(self):
-
-    def f3(x, a):
-      if a > 0:  # x and a should be live
-        x = 0
-      if a > 1:  # x and a should be live_in
-        x = 1
-      return x  # x should be live
-
-    body = self._get_live_annotated_fnbody(f3)
-    self._check_anno_matches(body[0], 'live_in', ('a', 'x'))
-    self._check_anno_matches(body[1], 'live_in', ('a', 'x'))
-    self._check_anno_matches(body[2], 'live_in', ('x'))
-
-  def test_live_possibly_unused_cond(self):
-
-    def f4(x, a):
-      if a > 0:  # x should be live
-        x = 0
-      x += 1
-
-    body = self._get_live_annotated_fnbody(f4)
-    self._check_anno_matches(body[0], 'live_in', ('x', 'a'))
-    self._check_anno_matches(body[1], 'live_in', ('x'))
-
-  def test_live_attribute_in_cond(self):
-
-    def f5(x, a):
-      if a > 0:  # x.y should be live
-        x.y = 0
-      return x.y
-
-    body = self._get_live_annotated_fnbody(f5)
-    self._check_anno_matches(body[0], 'live_in', ('x', 'x.y', 'a'))
-
-  def test_live_noop(self):
-
-    def f6(x):
-      return x  # should this cause x.* to be live?
-
-    body = self._get_live_annotated_fnbody(f6)
-    self._check_anno_matches(body[0], 'live_in', ('x'))
-
-  def test_live_loop(self):
-
-    def f7(x, n):
-      for i in range(n):
-        x += i
-      return x
-
-    body = self._get_live_annotated_fnbody(f7)
-    self._check_anno_matches(body[0], 'live_in', ('x', 'n', 'range'))
-    self._check_anno_matches(body[1], 'live_in', ('x'))
-
-  def test_live_context_manager(self):
-
-    def f8(x, f):
-      with f:
-        x += 1
-
-    body = self._get_live_annotated_fnbody(f8)
-    self._check_anno_matches(body[0], 'live_in', ('f', 'x'))
-
-  def test_node_equality(self):
-    node_a = gast.parse('y = x').body[0]
-    node_b = gast.parse('y = x').body[0]
-    self.assertNotEqual(node_a, node_b)
-
-  def test_nested_functions_defined(self):
-
-    def f(x):
-      y = x * 2
-
-      def g(z):
-        return z + y
-
-      return g(x)
-
-    node, ctx = self._parse_and_analyze(f)
-    cfg.run_analyses(node, cfg.Defined(ctx))
-
-    body = node.body[0].body
-    self.assertEqual(
-        anno.getanno(body[2], 'defined_in'),
-        frozenset(map(qual_names.QN, ('g', 'x', 'y'))))
-
-    # TODO(alexbw): CFG analysis doesn't currently cross FunctionDef boundaries.
-    # NOTE: 'z' is easy to find, but 'y' is  not identified as
-    # defined, because CFG analysis is applied with each function separately.
-    # fndef_body = body[1].body
-    # self.assertEqual(
-    #     anno.getanno(fndef_body[0], 'defined_in'),
-    #     frozenset(map(qual_names.QN, ('z', 'y'))))
-
-  def test_nested_functions_dont_leak_definitions(self):
-
-    def f(x):
-      print(x)
-
-      def g():
-        y = 2
-        return y
-
-      return g()  # y is not defined here
-
-    node, ctx = self._parse_and_analyze(f)
-    cfg.run_analyses(node, cfg.Defined(ctx))
-    body = node.body[0].body
-    self.assertEqual(
-        anno.getanno(body[2], 'defined_in'),
-        frozenset(map(qual_names.QN, ('x', 'g'))))
-
-  def test_loop_else(self):
-
-    # Disabling useless-else-on-loop error, because 'break' and 'continue'
-    # canonicalization are a separate analysis pass, and here we test
-    # the CFG analysis in isolation.
-    def for_orelse(x):
-      y = 0
-      for i in range(len(x)):
-        x += i
-      else:  # pylint: disable=useless-else-on-loop
-        y = 1
-      return x, y
-
-    def while_orelse(x, i):
-      y = 0
-      while x < 10:
-        x += i
-      else:  # pylint: disable=useless-else-on-loop
-        y = 1
-      return x, y
-
-    for f in (for_orelse, while_orelse):
-      node, ctx = self._parse_and_analyze(f)
-      cfg.run_analyses(node, cfg.ReachingDefinitions(ctx))
-      body = node.body[0].body
-      return_node = body[-1]
-      reaching_defs = anno.getanno(return_node, 'definitions_in')
-
-      # Y could be defined by Assign(Num(0)) or Assign(Num(1))
-      # X could be defined as an argument or an AugAssign.
-      y_defs = [node for var, node in reaching_defs if str(var) == 'y']
-      x_defs = [node for var, node in reaching_defs if str(var) == 'x']
-
-      self.assertEqual(set((gast.Assign,)), set(type(def_) for def_ in y_defs))
-      self.assertEqual(set((0, 1)), set(def_.value.n for def_ in y_defs))
-      self.assertEqual(len(y_defs), 2)
-      self.assertEqual(
-          set((gast.arguments, gast.AugAssign)),
-          set(type(def_) for def_ in x_defs))
-      self.assertEqual(len(x_defs), 2)
-
-
-if __name__ == '__main__':
-  test.main()
diff --git a/tensorflow/contrib/autograph/pyct/static_analysis/live_values.py b/tensorflow/contrib/autograph/pyct/static_analysis/live_values.py
index 9ccb98f..3280206 100644
--- a/tensorflow/contrib/autograph/pyct/static_analysis/live_values.py
+++ b/tensorflow/contrib/autograph/pyct/static_analysis/live_values.py
@@ -16,7 +16,7 @@
 
 Live values are extracted from the known execution context.
 
-Requires activity analysis annotations.
+Requires activity and reaching definitions analyses.
 """
 
 from __future__ import absolute_import
@@ -45,14 +45,12 @@
   def visit_Name(self, node):
     self.generic_visit(node)
     if isinstance(node.ctx, gast.Load):
-      assert anno.hasanno(node, NodeAnno.IS_LOCAL), node
-      symbol_is_local = anno.getanno(node, NodeAnno.IS_LOCAL)
-      assert anno.hasanno(node, NodeAnno.IS_MODIFIED_SINCE_ENTRY), node
-      symbol_is_modified = anno.getanno(node, NodeAnno.IS_MODIFIED_SINCE_ENTRY)
-      assert anno.hasanno(node, NodeAnno.IS_PARAM), node
-      symbol_is_param = anno.getanno(node, NodeAnno.IS_PARAM)
+      defs = anno.getanno(node, anno.Static.DEFINITIONS, ())
 
-      if not symbol_is_local and not symbol_is_param:
+      is_defined = bool(defs)
+      has_single_def = len(defs) == 1
+
+      if not is_defined:
         if node.id in self.literals:
           anno.setanno(node, 'live_val', self.literals[node.id])
         elif node.id in self.entity_info.namespace:
@@ -79,11 +77,13 @@
         # TODO(mdan): Attempt to trace its value through the local chain.
         # TODO(mdan): Use type annotations as fallback.
 
-      if not symbol_is_modified:
-        if node.id in self.entity_info.arg_values:
-          obj = self.entity_info.arg_values[node.id]
-          anno.setanno(node, 'live_val', obj)
-          anno.setanno(node, 'fqn', (obj.__class__.__name__,))
+      if has_single_def:
+        def_, = defs
+        if def_.param_of is self.enclosing_entities[0]:
+          if node.id in self.entity_info.arg_values:
+            obj = self.entity_info.arg_values[node.id]
+            anno.setanno(node, 'live_val', obj)
+            anno.setanno(node, 'fqn', (obj.__class__.__name__,))
     return node
 
   def visit_Attribute(self, node):
diff --git a/tensorflow/contrib/autograph/pyct/static_analysis/live_values_test.py b/tensorflow/contrib/autograph/pyct/static_analysis/live_values_test.py
index 38af792..fe30511 100644
--- a/tensorflow/contrib/autograph/pyct/static_analysis/live_values_test.py
+++ b/tensorflow/contrib/autograph/pyct/static_analysis/live_values_test.py
@@ -21,11 +21,13 @@
 import six
 
 from tensorflow.contrib.autograph.pyct import anno
+from tensorflow.contrib.autograph.pyct import cfg
 from tensorflow.contrib.autograph.pyct import parser
 from tensorflow.contrib.autograph.pyct import qual_names
 from tensorflow.contrib.autograph.pyct import transformer
 from tensorflow.contrib.autograph.pyct.static_analysis import activity
 from tensorflow.contrib.autograph.pyct.static_analysis import live_values
+from tensorflow.contrib.autograph.pyct.static_analysis import reaching_definitions
 from tensorflow.contrib.autograph.pyct.static_analysis import type_info
 from tensorflow.python.framework import constant_op
 from tensorflow.python.platform import test
@@ -48,7 +50,10 @@
         arg_types=arg_types,
         owner_type=None)
     node = qual_names.resolve(node)
+    graphs = cfg.build(node)
     node = activity.resolve(node, entity_info)
+    node = reaching_definitions.resolve(node, entity_info, graphs,
+                                        reaching_definitions.Definition)
     node = live_values.resolve(node, entity_info, literals)
     node = type_info.resolve(node, entity_info)
     node = live_values.resolve(node, entity_info, literals)
diff --git a/tensorflow/contrib/autograph/pyct/static_analysis/liveness.py b/tensorflow/contrib/autograph/pyct/static_analysis/liveness.py
new file mode 100644
index 0000000..bf29d86
--- /dev/null
+++ b/tensorflow/contrib/autograph/pyct/static_analysis/liveness.py
@@ -0,0 +1,200 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Live variable analysis.
+
+This analysis attaches a set containing the live symbols that are live at the
+exit of control flow statements.
+
+Requires activity analysis.
+"""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import gast
+
+from tensorflow.contrib.autograph.pyct import anno
+from tensorflow.contrib.autograph.pyct import cfg
+from tensorflow.contrib.autograph.pyct import transformer
+from tensorflow.contrib.autograph.pyct.static_analysis import annos
+
+
+class Analyzer(cfg.GraphVisitor):
+  """CFG visitor that performs liveness analysis at statement level."""
+
+  def __init__(self, graph):
+    super(Analyzer, self).__init__(graph)
+    # This allows communicating that nodes generate extra symbols,
+    # e.g. those that a function definition closes over.
+    self.extra_gen = {}
+
+  def init_state(self, _):
+    return set()
+
+  def visit_node(self, node):
+    prev_live_in = self.in_[node]
+
+    if anno.hasanno(node.ast_node, anno.Static.SCOPE):
+      node_scope = anno.getanno(node.ast_node, anno.Static.SCOPE)
+
+      gen = node_scope.used | self.extra_gen.get(node.ast_node, frozenset())
+      # TODO(mdan): verify whether composites' parents need to be added.
+      # E.g. if x.y is live whether x needs to be added. Theoretically the
+      # activity analysis should have both so that wouldn't be needed.
+      kill = node_scope.modified
+
+      live_out = set()
+      for n in node.next:
+        live_out |= self.in_[n]
+      live_in = gen | (live_out - kill)
+
+    else:
+      # Nodes that don't have a scope annotation are assumed not to touch any
+      # symbols.
+      # This Name node below is a literal name, e.g. False
+      assert isinstance(node.ast_node,
+                        (gast.Name, gast.Continue, gast.Break)), type(
+                            node.ast_node)
+      live_in = prev_live_in
+      live_out = live_in
+
+    self.in_[node] = live_in
+    self.out[node] = live_out
+
+    # TODO(mdan): Move this to the superclass?
+    return prev_live_in != live_in
+
+
+class WholeTreeAnalyzer(transformer.Base):
+  """Runs liveness analysis on each of the functions defined in the AST.
+
+  If a function defined other local functions, those will have separate CFGs.
+  However, dataflow analysis needs to tie up these CFGs to properly emulate the
+  effect of closures. In the case of liveness, the parent function's live
+  variables must account for the variables that are live at the entry of each
+  subfunction. For example:
+
+    def foo():
+      # baz is live here
+      def bar():
+        print(baz)
+
+  This analyzer runs liveness analysis on each individual function, accounting
+  for the effect above.
+  """
+
+  def __init__(self, source_info, graphs):
+    super(WholeTreeAnalyzer, self).__init__(source_info)
+    self.graphs = graphs
+    self.current_analyzer = None
+    self.analyzers = {}
+
+  def visit_FunctionDef(self, node):
+    parent_analyzer = self.current_analyzer
+    subgraph = self.graphs[node]
+
+    # Postorder tree processing makes this a bit complicated:
+    #  1. construct an analyzer object and put it on stack
+    #  2. recursively walk the subtree; this will initialize the analyzer's
+    #     in_ state properly (done in a block below)
+    #  3. run the final analysis
+    analyzer = Analyzer(subgraph)
+    self.current_analyzer = analyzer
+    node = self.generic_visit(node)
+    analyzer.visit_reverse()
+
+    if parent_analyzer is not None:
+      # Wire the state between the two subgraphs' analyzers.
+      child_in_state = analyzer.in_[subgraph.entry]
+      # Exception: symbols modified in the child function are local to it
+      body_scope = anno.getanno(node, annos.NodeAnno.BODY_SCOPE)
+      for qn in body_scope.modified:
+        # Note: a function modifying the symbol doesn't make that symbol
+        # live at the function's entry. In fact when that happens it is
+        # probably a case of undefined assignment, like this:
+        #
+        #   bar = 0
+        #   def foo():
+        #     print(bar)  # bar is undefined here!
+        #     bar = 1
+        #
+        # Hence we use discard and not remove below.
+        child_in_state.discard(qn)
+      parent_analyzer.extra_gen[node] = frozenset(child_in_state,)
+
+    self.analyzers[node] = analyzer
+    self.current_analyzer = parent_analyzer
+    return node
+
+  def visit_nonlocal(self, node):
+    raise NotImplementedError()
+
+  def visit_global(self, node):
+    raise NotImplementedError()
+
+
+class Annotator(transformer.Base):
+  """AST visitor that annotates each control flow block with live symbols."""
+
+  # Note: additional nodes may be added as needed.
+
+  def __init__(self, source_info, cross_function_analyzer):
+    super(Annotator, self).__init__(source_info)
+    self.cross_function_analyzer = cross_function_analyzer
+    self.current_analyzer = None
+
+  def visit_FunctionDef(self, node):
+    parent_analyzer = self.current_analyzer
+    self.current_analyzer = self.cross_function_analyzer.analyzers[node]
+
+    node = self.generic_visit(node)
+    self.current_analyzer = parent_analyzer
+    return node
+
+  def _aggregate_successors_live_in(self, node):
+    successors = self.current_analyzer.graph.stmt_next[node]
+    node_live_out = set()
+    for s in successors:
+      node_live_out.update(self.current_analyzer.in_[s])
+    anno.setanno(node, anno.Static.LIVE_VARS_OUT, frozenset(node_live_out))
+    node = self.generic_visit(node)
+    return node
+
+  def visit_If(self, node):
+    return self._aggregate_successors_live_in(node)
+
+  def visit_For(self, node):
+    return self._aggregate_successors_live_in(node)
+
+  def visit_While(self, node):
+    return self._aggregate_successors_live_in(node)
+
+
+def resolve(node, source_info, graphs):
+  """Resolves the live symbols at the exit of control flow statements.
+
+  Args:
+    node: ast.AST
+    source_info: transformer.SourceInfo
+    graphs: Dict[ast.FunctionDef, cfg.Graph]
+  Returns:
+    ast.AST
+  """
+  cross_function_analyzer = WholeTreeAnalyzer(source_info, graphs)
+  node = cross_function_analyzer.visit(node)
+  visitor = Annotator(source_info, cross_function_analyzer)
+  node = visitor.visit(node)
+  return node
diff --git a/tensorflow/contrib/autograph/pyct/static_analysis/liveness_test.py b/tensorflow/contrib/autograph/pyct/static_analysis/liveness_test.py
new file mode 100644
index 0000000..d53adb2
--- /dev/null
+++ b/tensorflow/contrib/autograph/pyct/static_analysis/liveness_test.py
@@ -0,0 +1,149 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Tests for liveness module."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from tensorflow.contrib.autograph.pyct import anno
+from tensorflow.contrib.autograph.pyct import cfg
+from tensorflow.contrib.autograph.pyct import parser
+from tensorflow.contrib.autograph.pyct import qual_names
+from tensorflow.contrib.autograph.pyct import transformer
+from tensorflow.contrib.autograph.pyct.static_analysis import activity
+from tensorflow.contrib.autograph.pyct.static_analysis import liveness
+from tensorflow.python.platform import test
+
+
+class LivenessTest(test.TestCase):
+
+  def _parse_and_analyze(self, test_fn):
+    node, source = parser.parse_entity(test_fn)
+    entity_info = transformer.EntityInfo(
+        source_code=source,
+        source_file=None,
+        namespace={},
+        arg_values=None,
+        arg_types=None,
+        owner_type=None)
+    node = qual_names.resolve(node)
+    node = activity.resolve(node, entity_info)
+    graphs = cfg.build(node)
+    liveness.resolve(node, entity_info, graphs)
+    return node
+
+  def assertHasLiveOut(self, node, expected):
+    live_out = anno.getanno(node, anno.Static.LIVE_VARS_OUT)
+    live_out_str = set(str(v) for v in live_out)
+    if not expected:
+      expected = ()
+    if not isinstance(expected, tuple):
+      expected = (expected,)
+    self.assertSetEqual(live_out_str, set(expected))
+
+  def test_stacked_if(self):
+
+    def test_fn(x, a):
+      if a > 0:
+        x = 0
+      if a > 1:
+        x = 1
+      return x
+
+    node = self._parse_and_analyze(test_fn)
+    fn_body = node.body[0].body
+
+    self.assertHasLiveOut(fn_body[0], ('a', 'x'))
+    self.assertHasLiveOut(fn_body[1], 'x')
+
+  def test_stacked_if_else(self):
+
+    def test_fn(x, a):
+      if a > 0:
+        x = 0
+      if a > 1:
+        x = 1
+      else:
+        x = 2
+      return x
+
+    node = self._parse_and_analyze(test_fn)
+    fn_body = node.body[0].body
+
+    self.assertHasLiveOut(fn_body[0], 'a')
+    self.assertHasLiveOut(fn_body[1], 'x')
+
+  def test_for_basic(self):
+
+    def test_fn(x, a):
+      for i in range(a):
+        x += i
+      return x
+
+    node = self._parse_and_analyze(test_fn)
+    fn_body = node.body[0].body
+
+    self.assertHasLiveOut(fn_body[0], 'x')
+
+  def test_attributes(self):
+
+    def test_fn(x, a):
+      if a > 0:
+        x.y = 0
+      return x.y
+
+    node = self._parse_and_analyze(test_fn)
+    fn_body = node.body[0].body
+
+    self.assertHasLiveOut(fn_body[0], ('x.y', 'x'))
+
+  def test_nested_functions(self):
+
+    def test_fn(a, b):
+      if b:
+        a = []
+
+      def foo():
+        return a
+
+      foo()
+
+    node = self._parse_and_analyze(test_fn)
+    fn_body = node.body[0].body
+
+    self.assertHasLiveOut(fn_body[0], 'a')
+
+  def test_nested_functions_isolation(self):
+
+    def test_fn(b):
+      if b:
+        a = 0  # pylint:disable=unused-variable
+
+      def child():
+        max(a)  # pylint:disable=used-before-assignment
+        a = 1
+        return a
+
+      child()
+
+    node = self._parse_and_analyze(test_fn)
+    fn_body = node.body[0].body
+
+    self.assertHasLiveOut(fn_body[0], 'max')
+
+
+if __name__ == '__main__':
+  test.main()
diff --git a/tensorflow/contrib/autograph/pyct/static_analysis/reaching_definitions.py b/tensorflow/contrib/autograph/pyct/static_analysis/reaching_definitions.py
new file mode 100644
index 0000000..9a84f12
--- /dev/null
+++ b/tensorflow/contrib/autograph/pyct/static_analysis/reaching_definitions.py
@@ -0,0 +1,301 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Reaching definition analysis.
+
+This analysis attaches a set of a Definition objects to each symbol, one
+for each distinct definition that may reach it. The Definition objects are
+mutable and may be used by subsequent analyses to further annotate data like
+static type and value information.
+The analysis also attaches the set of the symbols defined at the entry of
+control flow statements.
+
+Requires activity analysis.
+"""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import gast
+
+from tensorflow.contrib.autograph.pyct import anno
+from tensorflow.contrib.autograph.pyct import cfg
+from tensorflow.contrib.autograph.pyct import transformer
+from tensorflow.contrib.autograph.pyct.static_analysis import annos
+
+
+class Definition(object):
+  """Definition objects describe a unique definition of a variable.
+
+  Subclasses of this may be used by passing an appropriate factory fuction to
+  resolve.
+
+  Attributes:
+    param_of: Optional[ast.AST]
+  """
+
+  def __init__(self):
+    self.param_of = None
+
+  def __repr__(self):
+    return '%s[%d]' % (self.__class__.__name__, id(self))
+
+
+class _NodeState(object):
+  """Abstraction for the state of the CFG walk for reaching definition analysis.
+
+  This is a value type. Only implements the strictly necessary operators.
+
+  Attributes:
+    value: Dict[qual_names.QN, Set[Definition, ...]], the defined symbols and
+        their possible definitions
+  """
+
+  def __init__(self, init_from=None):
+    if init_from:
+      if isinstance(init_from, _NodeState):
+        self.value = {
+            s: set(other_infos) for s, other_infos in init_from.value.items()
+        }
+      elif isinstance(init_from, dict):
+        self.value = {s: set((init_from[s],)) for s in init_from}
+      else:
+        assert False, init_from
+    else:
+      self.value = {}
+
+  def __eq__(self, other):
+    if frozenset(self.value.keys()) != frozenset(other.value.keys()):
+      return False
+    ret = all(self.value[s] == other.value[s] for s in self.value)
+    return ret
+
+  def __ne__(self, other):
+    return not self.__eq__(other)
+
+  def __or__(self, other):
+    assert isinstance(other, _NodeState)
+    result = _NodeState(self)
+    for s, other_infos in other.value.items():
+      if s in result.value:
+        result.value[s].update(other_infos)
+      else:
+        result.value[s] = set(other_infos)
+    return result
+
+  def __sub__(self, other):
+    assert isinstance(other, set)
+    result = _NodeState(self)
+    for s in other:
+      result.value.pop(s, None)
+    return result
+
+  def __repr__(self):
+    return 'NodeState[%s]=%s' % (id(self), repr(self.value))
+
+
+class Analyzer(cfg.GraphVisitor):
+  """CFG visitor that determines reaching definitions at statement level."""
+
+  def __init__(self, graph, definition_factory):
+    self._definition_factory = definition_factory
+    super(Analyzer, self).__init__(graph)
+    # This allows communicating that nodes have extra reaching definitions,
+    # e.g. those that a function closes over.
+    self.extra_in = {}
+
+    self.gen_map = {}
+
+  def init_state(self, _):
+    return _NodeState()
+
+  def visit_node(self, node):
+    prev_defs_out = self.out[node]
+
+    defs_in = _NodeState(self.extra_in.get(node.ast_node, None))
+    for n in node.prev:
+      defs_in |= self.out[n]
+
+    if anno.hasanno(node.ast_node, anno.Static.SCOPE):
+      node_scope = anno.getanno(node.ast_node, anno.Static.SCOPE)
+      # The definition objects created by each node must be singletons because
+      # their ids are used in equality checks.
+      if node not in self.gen_map:
+        node_symbols = {}
+        for s in node_scope.modified:
+          def_ = self._definition_factory()
+          if s in node_scope.params:
+            def_.param_of = node_scope.params[s]
+          node_symbols[s] = def_
+        self.gen_map[node] = _NodeState(node_symbols)
+
+      gen = self.gen_map[node]
+      kill = node_scope.modified
+      defs_out = gen | (defs_in - kill)
+
+    else:
+      # Nodes that don't have a scope annotation are assumed not to touch any
+      # symbols.
+      # This Name node below is a literal name, e.g. False
+      # This can also happen if activity.py forgot to annotate the node with a
+      # scope object.
+      assert isinstance(
+          node.ast_node,
+          (gast.Name, gast.Break, gast.Continue, gast.Raise)), (node.ast_node,
+                                                                node)
+      defs_out = defs_in
+
+    self.in_[node] = defs_in
+    self.out[node] = defs_out
+
+    # TODO(mdan): Move this to the superclass?
+    return prev_defs_out != defs_out
+
+
+class TreeAnnotator(transformer.Base):
+  """AST visitor that annotates each symbol name with its reaching definitions.
+
+  Simultaneously, the visitor runs the dataflow analysis on each function node,
+  accounting for the effect of closures. For example:
+
+    def foo():
+      bar = 1
+      def baz():
+        # bar = 1 reaches here
+  """
+
+  def __init__(self, source_info, graphs, definition_factory):
+    super(TreeAnnotator, self).__init__(source_info)
+    self.definition_factory = definition_factory
+    self.graphs = graphs
+    self.current_analyzer = None
+    self.current_cfg_node = None
+
+  def visit_FunctionDef(self, node):
+    parent_analyzer = self.current_analyzer
+    subgraph = self.graphs[node]
+
+    # Preorder tree processing:
+    #  1. if this is a child function, the parent was already analyzed and it
+    #     has the proper state value for the subgraph's entry
+    #  2. analyze the current function body
+    #  2. recursively walk the subtree; child functions will be processed
+    analyzer = Analyzer(subgraph, self.definition_factory)
+    if parent_analyzer is not None:
+      # Wire the state between the two subgraphs' analyzers.
+      parent_out_state = parent_analyzer.out[parent_analyzer.graph.index[node]]
+      # Exception: symbols modified in the child function are local to it
+      body_scope = anno.getanno(node, annos.NodeAnno.BODY_SCOPE)
+      parent_out_state -= body_scope.modified
+      analyzer.extra_in[node.args] = parent_out_state
+
+    # Complete the analysis for the local function and annotate its body.
+    analyzer.visit_forward()
+
+    # Recursively process any remaining subfunctions.
+    self.current_analyzer = analyzer
+    # Note: not visiting name, decorator_list and returns because they don't
+    # apply to this anlysis.
+    # TODO(mdan): Should we still process the function name?
+    node.args = self.visit(node.args)
+    node.body = self.visit_block(node.body)
+    self.current_analyzer = parent_analyzer
+
+    return node
+
+  def visit_nonlocal(self, node):
+    raise NotImplementedError()
+
+  def visit_global(self, node):
+    raise NotImplementedError()
+
+  def visit_Name(self, node):
+    if self.current_analyzer is None:
+      # Names may appear outside function defs - for example in class
+      # definitions.
+      return node
+
+    analyzer = self.current_analyzer
+    cfg_node = self.current_cfg_node
+
+    assert cfg_node is not None, 'name node outside of any statement?'
+
+    qn = anno.getanno(node, anno.Basic.QN)
+    if isinstance(node.ctx, gast.Load):
+      anno.setanno(node, anno.Static.DEFINITIONS,
+                   tuple(analyzer.in_[cfg_node].value.get(qn, ())))
+    else:
+      anno.setanno(node, anno.Static.DEFINITIONS,
+                   tuple(analyzer.out[cfg_node].value.get(qn, ())))
+
+    return node
+
+  def _aggregate_predecessors_defined_in(self, node):
+    preds = self.current_analyzer.graph.stmt_prev[node]
+    node_defined_in = set()
+    for p in preds:
+      node_defined_in |= set(self.current_analyzer.out[p].value.keys())
+    anno.setanno(node, anno.Static.DEFINED_VARS_IN, frozenset(node_defined_in))
+
+  def visit_If(self, node):
+    self._aggregate_predecessors_defined_in(node)
+    return self.generic_visit(node)
+
+  def visit_For(self, node):
+    self._aggregate_predecessors_defined_in(node)
+
+    # Manually accounting for the shortcoming described in
+    # cfg.AstToCfg.visit_For.
+    parent = self.current_cfg_node
+    self.current_cfg_node = self.current_analyzer.graph.index[node.iter]
+    node.target = self.visit(node.target)
+    self.current_cfg_node = parent
+
+    node.iter = self.visit(node.iter)
+    node.body = self.visit_block(node.body)
+    node.orelse = self.visit_block(node.orelse)
+
+    return node
+
+  def visit_While(self, node):
+    self._aggregate_predecessors_defined_in(node)
+    return self.generic_visit(node)
+
+  def visit(self, node):
+    parent = self.current_cfg_node
+
+    if (self.current_analyzer is not None and
+        node in self.current_analyzer.graph.index):
+      self.current_cfg_node = self.current_analyzer.graph.index[node]
+    node = super(TreeAnnotator, self).visit(node)
+
+    self.current_cfg_node = parent
+    return node
+
+
+def resolve(node, source_info, graphs, definition_factory):
+  """Resolves reaching definitions for each symbol.
+
+  Args:
+    node: ast.AST
+    source_info: transformer.SourceInfo
+    graphs: Dict[ast.FunctionDef, cfg.Graph]
+    definition_factory: Callable[[], Definition]
+  Returns:
+    ast.AST
+  """
+  visitor = TreeAnnotator(source_info, graphs, definition_factory)
+  node = visitor.visit(node)
+  return node
diff --git a/tensorflow/contrib/autograph/pyct/static_analysis/reaching_definitions_test.py b/tensorflow/contrib/autograph/pyct/static_analysis/reaching_definitions_test.py
new file mode 100644
index 0000000..243fe80
--- /dev/null
+++ b/tensorflow/contrib/autograph/pyct/static_analysis/reaching_definitions_test.py
@@ -0,0 +1,263 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Tests for reaching_definitions module."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from tensorflow.contrib.autograph.pyct import anno
+from tensorflow.contrib.autograph.pyct import cfg
+from tensorflow.contrib.autograph.pyct import parser
+from tensorflow.contrib.autograph.pyct import qual_names
+from tensorflow.contrib.autograph.pyct import transformer
+from tensorflow.contrib.autograph.pyct.static_analysis import activity
+from tensorflow.contrib.autograph.pyct.static_analysis import reaching_definitions
+from tensorflow.python.platform import test
+
+
+class DefinitionInfoTest(test.TestCase):
+
+  def _parse_and_analyze(self, test_fn):
+    node, source = parser.parse_entity(test_fn)
+    entity_info = transformer.EntityInfo(
+        source_code=source,
+        source_file=None,
+        namespace={},
+        arg_values=None,
+        arg_types=None,
+        owner_type=None)
+    node = qual_names.resolve(node)
+    node = activity.resolve(node, entity_info)
+    graphs = cfg.build(node)
+    node = reaching_definitions.resolve(node, entity_info, graphs,
+                                        reaching_definitions.Definition)
+    return node
+
+  def assertHasDefs(self, node, num):
+    defs = anno.getanno(node, anno.Static.DEFINITIONS)
+    self.assertEqual(len(defs), num)
+    for r in defs:
+      self.assertIsInstance(r, reaching_definitions.Definition)
+
+  def assertHasDefinedIn(self, node, expected):
+    defined_in = anno.getanno(node, anno.Static.DEFINED_VARS_IN)
+    defined_in_str = set(str(v) for v in defined_in)
+    if not expected:
+      expected = ()
+    if not isinstance(expected, tuple):
+      expected = (expected,)
+    self.assertSetEqual(defined_in_str, set(expected))
+
+  def assertSameDef(self, first, second):
+    self.assertHasDefs(first, 1)
+    self.assertHasDefs(second, 1)
+    self.assertIs(
+        anno.getanno(first, anno.Static.DEFINITIONS)[0],
+        anno.getanno(second, anno.Static.DEFINITIONS)[0])
+
+  def assertNotSameDef(self, first, second):
+    self.assertHasDefs(first, 1)
+    self.assertHasDefs(second, 1)
+    self.assertIsNot(
+        anno.getanno(first, anno.Static.DEFINITIONS)[0],
+        anno.getanno(second, anno.Static.DEFINITIONS)[0])
+
+  def test_conditional(self):
+
+    def test_fn(a, b):
+      a = []
+      if b:
+        a = []
+      return a
+
+    node = self._parse_and_analyze(test_fn)
+    fn_body = node.body[0].body
+
+    self.assertHasDefs(fn_body[0].targets[0], 1)
+    self.assertHasDefs(fn_body[1].test, 1)
+    self.assertHasDefs(fn_body[1].body[0].targets[0], 1)
+    self.assertHasDefs(fn_body[2].value, 2)
+
+    self.assertHasDefinedIn(fn_body[1], ('a', 'b'))
+
+  def test_while(self):
+
+    def test_fn(a):
+      max(a)
+      while True:
+        a = a
+        a = a
+      return a
+
+    node = self._parse_and_analyze(test_fn)
+    fn_body = node.body[0].body
+
+    self.assertHasDefs(fn_body[0].value.args[0], 1)
+    self.assertHasDefs(fn_body[1].body[0].targets[0], 1)
+    self.assertHasDefs(fn_body[1].body[1].targets[0], 1)
+    self.assertHasDefs(fn_body[1].body[1].value, 1)
+    # The loop does have an invariant test, but the CFG doesn't know that.
+    self.assertHasDefs(fn_body[1].body[0].value, 2)
+    self.assertHasDefs(fn_body[2].value, 2)
+
+  def test_while_else(self):
+
+    def test_fn(x, i):
+      y = 0
+      while x:
+        x += i
+        if i:
+          break
+      else:
+        y = 1
+      return x, y
+
+    node = self._parse_and_analyze(test_fn)
+    fn_body = node.body[0].body
+
+    self.assertHasDefs(fn_body[0].targets[0], 1)
+    self.assertHasDefs(fn_body[1].test, 2)
+    self.assertHasDefs(fn_body[1].body[0].target, 1)
+    self.assertHasDefs(fn_body[1].body[1].test, 1)
+    self.assertHasDefs(fn_body[1].orelse[0].targets[0], 1)
+    self.assertHasDefs(fn_body[2].value.elts[0], 2)
+    self.assertHasDefs(fn_body[2].value.elts[1], 2)
+
+  def test_for_else(self):
+
+    def test_fn(x, i):
+      y = 0
+      for i in x:
+        x += i
+        if i:
+          break
+        else:
+          continue
+      else:
+        y = 1
+      return x, y
+
+    node = self._parse_and_analyze(test_fn)
+    fn_body = node.body[0].body
+
+    self.assertHasDefs(fn_body[0].targets[0], 1)
+    self.assertHasDefs(fn_body[1].target, 1)
+    self.assertHasDefs(fn_body[1].body[0].target, 1)
+    self.assertHasDefs(fn_body[1].body[1].test, 1)
+    self.assertHasDefs(fn_body[1].orelse[0].targets[0], 1)
+    self.assertHasDefs(fn_body[2].value.elts[0], 2)
+    self.assertHasDefs(fn_body[2].value.elts[1], 2)
+
+  def test_nested_functions(self):
+
+    def test_fn(a, b):
+      a = []
+      if b:
+        a = []
+
+        def foo():
+          return a
+
+        foo()
+
+      return a
+
+    node = self._parse_and_analyze(test_fn)
+    fn_body = node.body[0].body
+    def_of_a_in_if = fn_body[1].body[0].targets[0]
+
+    self.assertHasDefs(fn_body[0].targets[0], 1)
+    self.assertHasDefs(fn_body[1].test, 1)
+    self.assertHasDefs(def_of_a_in_if, 1)
+    self.assertHasDefs(fn_body[2].value, 2)
+
+    inner_fn_body = fn_body[1].body[1].body
+    self.assertSameDef(inner_fn_body[0].value, def_of_a_in_if)
+
+  def test_nested_functions_isolation(self):
+
+    def test_fn(a):
+      a = 0
+
+      def child():
+        a = 1
+        return a
+
+      child()
+      return a
+
+    node = self._parse_and_analyze(test_fn)
+    fn_body = node.body[0].body
+
+    parent_return = fn_body[3]
+    child_return = fn_body[1].body[1]
+    # The assignment `a = 1` makes `a` local to `child`.
+    self.assertNotSameDef(parent_return.value, child_return.value)
+
+  def test_function_call_in_with(self):
+
+    def foo(_):
+      pass
+
+    def test_fn(a):
+      with foo(a):
+        return a
+
+    node = self._parse_and_analyze(test_fn)
+    fn_body = node.body[0].body
+
+    self.assertHasDefs(fn_body[0].items[0].context_expr.func, 0)
+    self.assertHasDefs(fn_body[0].items[0].context_expr.args[0], 1)
+
+  def test_mutation_subscript(self):
+
+    def test_fn(a):
+      l = []
+      l[0] = a
+      return l
+
+    node = self._parse_and_analyze(test_fn)
+    fn_body = node.body[0].body
+
+    creation = fn_body[0].targets[0]
+    mutation = fn_body[1].targets[0].value
+    use = fn_body[2].value
+    self.assertSameDef(creation, mutation)
+    self.assertSameDef(creation, use)
+
+  def test_replacement(self):
+
+    def foo(a):
+      return a
+
+    def test_fn(a):
+      a = foo(a)
+      return a
+
+    node = self._parse_and_analyze(test_fn)
+    fn_body = node.body[0].body
+
+    param = node.body[0].args.args[0]
+    source = fn_body[0].value.args[0]
+    target = fn_body[0].targets[0]
+    retval = fn_body[1].value
+    self.assertSameDef(param, source)
+    self.assertNotSameDef(source, target)
+    self.assertSameDef(target, retval)
+
+
+if __name__ == '__main__':
+  test.main()
diff --git a/tensorflow/contrib/autograph/pyct/static_analysis/type_info.py b/tensorflow/contrib/autograph/pyct/static_analysis/type_info.py
index a229c28..835d519 100644
--- a/tensorflow/contrib/autograph/pyct/static_analysis/type_info.py
+++ b/tensorflow/contrib/autograph/pyct/static_analysis/type_info.py
@@ -43,9 +43,8 @@
 
 import gast
 
-from tensorflow.contrib.autograph import utils
 from tensorflow.contrib.autograph.pyct import anno
-from tensorflow.contrib.autograph.pyct import parser
+from tensorflow.contrib.autograph.pyct import ast_util
 from tensorflow.contrib.autograph.pyct import transformer
 from tensorflow.python.util import tf_inspect
 
@@ -166,7 +165,6 @@
         definition = self.scope.getval(qn)
         anno.copyanno(definition, node, 'type')
         anno.copyanno(definition, node, 'type_fqn')
-        anno.setanno(node, 'definition', definition)
 
         # TODO(mdan): Remove this when the directives module is in.
         anno.copyanno(definition, node, 'element_type')
@@ -198,52 +196,18 @@
   def visit_With(self, node):
     for item in node.items:
       if item.optional_vars is not None:
-        self.apply_to_single_assignments((item.optional_vars,),
-                                         item.context_expr,
-                                         self._process_variable_assignment)
+        ast_util.apply_to_single_assignments((item.optional_vars,),
+                                             item.context_expr,
+                                             self._process_variable_assignment)
     self.generic_visit(node)
     return node
 
   def visit_Assign(self, node):
     self.generic_visit(node)
-    self.apply_to_single_assignments(
-        node.targets, node.value, self._process_variable_assignment)
+    ast_util.apply_to_single_assignments(node.targets, node.value,
+                                         self._process_variable_assignment)
     return node
 
-  # TODO(mdan): Remove as soon as the new directives module is ready.
-  def visit_Call(self, node):
-    if anno.hasanno(node.func, 'live_val'):
-      # Symbols targeted by the "set_type" marker function are assigned the data
-      # type that it specified.
-      if anno.getanno(node.func, 'live_val') is utils.set_element_type:
-
-        if len(node.args) < 2 or len(node.args) > 3:
-          raise ValueError('"%s" must have either two or three parameters'
-                           % self.context.type_annotation_func)
-        if len(node.args) == 2:
-          target_arg, type_arg = node.args
-          shape_arg = parser.parse_expression('None')
-        else:
-          target_arg, type_arg, shape_arg = node.args
-        if not anno.hasanno(target_arg, anno.Basic.QN):
-          raise ValueError('the first argument of "%s" must by a symbol' %
-                           utils.set_element_type)
-        # TODO(mdan): This is vulnerable to symbol renaming.
-        element_type = type_arg
-        element_shape = shape_arg
-
-        target_symbol = anno.getanno(target_arg, anno.Basic.QN)
-        # Find the definition of this symbol and annotate it with the given
-        # data type. That in turn will cause future uses of the symbol
-        # to receive the same type annotation.
-        definition = self.scope.getval(target_symbol)
-        anno.setanno(node, 'element_type', element_type)
-        anno.setanno(node, 'element_shape', element_shape)
-        anno.setanno(definition, 'element_type', element_type)
-        anno.setanno(definition, 'element_shape', element_shape)
-        # TODO(mdan): Should we update references between definition and here?
-    return self.generic_visit(node)
-
 
 def resolve(node, context):
   return TypeInfoResolver(context).visit(node)
diff --git a/tensorflow/contrib/autograph/pyct/static_analysis/type_info_test.py b/tensorflow/contrib/autograph/pyct/static_analysis/type_info_test.py
index 32b1148..404311b 100644
--- a/tensorflow/contrib/autograph/pyct/static_analysis/type_info_test.py
+++ b/tensorflow/contrib/autograph/pyct/static_analysis/type_info_test.py
@@ -19,11 +19,13 @@
 from __future__ import print_function
 
 from tensorflow.contrib.autograph.pyct import anno
+from tensorflow.contrib.autograph.pyct import cfg
 from tensorflow.contrib.autograph.pyct import parser
 from tensorflow.contrib.autograph.pyct import qual_names
 from tensorflow.contrib.autograph.pyct import transformer
 from tensorflow.contrib.autograph.pyct.static_analysis import activity
 from tensorflow.contrib.autograph.pyct.static_analysis import live_values
+from tensorflow.contrib.autograph.pyct.static_analysis import reaching_definitions
 from tensorflow.contrib.autograph.pyct.static_analysis import type_info
 from tensorflow.python.client import session
 from tensorflow.python.platform import test
@@ -69,7 +71,10 @@
         arg_types=arg_types,
         owner_type=None)
     node = qual_names.resolve(node)
+    graphs = cfg.build(node)
     node = activity.resolve(node, entity_info)
+    node = reaching_definitions.resolve(node, entity_info, graphs,
+                                        reaching_definitions.Definition)
     node = live_values.resolve(node, entity_info, {})
     node = type_info.resolve(node, entity_info)
     node = live_values.resolve(node, entity_info, {})
diff --git a/tensorflow/contrib/autograph/pyct/templates.py b/tensorflow/contrib/autograph/pyct/templates.py
index 9c479eb..72d1d3b 100644
--- a/tensorflow/contrib/autograph/pyct/templates.py
+++ b/tensorflow/contrib/autograph/pyct/templates.py
@@ -26,6 +26,7 @@
 
 import gast
 
+from tensorflow.contrib.autograph.pyct import anno
 from tensorflow.contrib.autograph.pyct import ast_util
 from tensorflow.contrib.autograph.pyct import parser
 from tensorflow.contrib.autograph.pyct import qual_names
@@ -43,39 +44,65 @@
     """
     self.replacements = replacements
     self.in_replacements = False
+    self.preserved_annos = {
+        anno.Basic.ORIGIN,
+        anno.Basic.SKIP_PROCESSING,
+        anno.Static.ORIG_DEFINITIONS,
+    }
+
+  def _prepare_replacement(self, replaced, key):
+    """Prepares a replacement AST that's safe to swap in for a node.
+
+    Args:
+      replaced: ast.AST, the node being replaced
+      key: Hashable, the key of the replacement AST
+    Returns:
+      ast.AST, the replacement AST
+    """
+    repl = self.replacements[key]
+
+    new_nodes = ast_util.copy_clean(repl, preserve_annos=self.preserved_annos)
+    if isinstance(new_nodes, gast.AST):
+      new_nodes = [new_nodes]
+
+    return new_nodes
 
   def visit_Expr(self, node):
-    if (isinstance(node.value, gast.Name) and
-        node.value.id in self.replacements):
-      return self.visit(node.value)
-    self.generic_visit(node)
-    return node
+    # When replacing a placeholder with an entire statement, the replacement
+    # must stand on its own and not be wrapped in an Expr.
+    new_value = self.visit(node.value)
+    if new_value is node.value:
+      return node
+    return new_value
 
   def visit_keyword(self, node):
-    if node.arg in self.replacements:
-      repl = self.replacements[node.arg]
-      if isinstance(repl, gast.keyword):
-        return repl
-      elif (isinstance(repl, (list, tuple)) and repl and
-            all(isinstance(r, gast.keyword) for r in repl)):
-        return repl
-      # TODO(mdan): We may allow replacing with a string as well.
-      # For example, if one wanted to replace foo with bar in foo=baz, then
-      # we could allow changing just node arg, so that we end up with bar=baz.
-      raise ValueError(
-          'a keyword argument may only be replaced by another keyword or a '
-          'non-empty list of keywords. Found: %s' % repl)
-    return self.generic_visit(node)
+    if node.arg not in self.replacements:
+      return self.generic_visit(node)
+
+    repl = self._prepare_replacement(node, node.arg)
+    if isinstance(repl, gast.keyword):
+      return repl
+    elif (repl and isinstance(repl, (list, tuple)) and
+          all(isinstance(r, gast.keyword) for r in repl)):
+      return repl
+    # TODO(mdan): We may allow replacing with a string as well.
+    # For example, if one wanted to replace foo with bar in foo=baz, then
+    # we could allow changing just node arg, so that we end up with bar=baz.
+    raise ValueError(
+        'a keyword argument may only be replaced by another keyword or a '
+        'non-empty list of keywords. Found: %s' % repl)
 
   def visit_FunctionDef(self, node):
     node = self.generic_visit(node)
-    if node.name in self.replacements:
-      repl = self.replacements[node.name]
-      if not isinstance(repl, (gast.Name, ast.Name)):
-        raise ValueError(
-            'a function name can only be replaced by a Name node. Found: %s' %
-            repl)
-      node.name = repl.id
+    if node.name not in self.replacements:
+      return node
+
+    repl = self.replacements[node.name]
+    if not isinstance(repl, (gast.Name, ast.Name)):
+      raise ValueError(
+          'a function name can only be replaced by a Name node. Found: %s' %
+          repl)
+    node.name = repl.id
     return node
 
   def _check_has_context(self, node):
@@ -148,6 +175,7 @@
     node = self.generic_visit(node)
     if node.attr not in self.replacements:
       return node
+
     repl = self.replacements[node.attr]
     if not isinstance(repl, gast.Name):
       raise ValueError(
@@ -159,9 +187,7 @@
     if node.id not in self.replacements:
       return node
 
-    new_nodes = ast_util.copy_clean(self.replacements[node.id])
-    if isinstance(new_nodes, gast.AST):
-      new_nodes = [new_nodes]
+    new_nodes = self._prepare_replacement(node, node.id)
 
     # Preserve the target context.
     for n in new_nodes:
@@ -182,7 +208,7 @@
 
 
 def _convert_to_ast(n):
-  """Convert from a known data type to AST."""
+  """Converts from a known data type to AST."""
   if isinstance(n, str):
     # Note: the node will receive the ctx value from the template, see
     # ReplaceTransformer.visit_Name.
@@ -197,7 +223,7 @@
 
 
 def replace(template, **replacements):
-  """Replace placeholders in a Python template.
+  """Replaces placeholders in a Python template.
 
   AST Name and Tuple nodes always receive the context that inferred from
   the template. However, when replacing more complex nodes (that can potentially
diff --git a/tensorflow/contrib/autograph/pyct/templates_test.py b/tensorflow/contrib/autograph/pyct/templates_test.py
index a01f8bf..a8bbc5a 100644
--- a/tensorflow/contrib/autograph/pyct/templates_test.py
+++ b/tensorflow/contrib/autograph/pyct/templates_test.py
@@ -151,17 +151,13 @@
     self.assertEqual(node.func.id, 'bar')
     self.assertEqual(node.func.args[0].id, 'baz')
 
-  def replace_as_expression_restrictions(self):
+  def test_replace_as_expression_restrictions(self):
     template = """
       foo(a)
       bar(b)
     """
     with self.assertRaises(ValueError):
       templates.replace_as_expression(template)
-    with self.assertRaises(ValueError):
-      templates.replace('')
-    with self.assertRaises(ValueError):
-      templates.replace('a = b')
 
 
 if __name__ == '__main__':
diff --git a/tensorflow/contrib/autograph/pyct/transformer.py b/tensorflow/contrib/autograph/pyct/transformer.py
index 7655811..bbdfefc 100644
--- a/tensorflow/contrib/autograph/pyct/transformer.py
+++ b/tensorflow/contrib/autograph/pyct/transformer.py
@@ -59,6 +59,103 @@
     self.owner_type = owner_type
 
 
+class _StateStack(object):
+  """Typed stack abstraction.
+
+  This class provides syntactic sugar for a stack of objects of known
+  type. It allows accessing attributes of the object at the top of the stack
+  directly against this object, which allows for very terse syntax.
+
+  For example, this code:
+
+    stack = _StateStack(Foo)
+    stack.enter()
+    stack.bar
+
+  Is equivalent to:
+
+    stack = []
+    stack.append(Foo())
+    foo = stack[-1]
+    foo.bar
+
+  See _State for more on how this is used.
+
+  Attributes:
+    type: Any, the type of objects that this stack holds
+    level: int, the current stack depth
+    value: Any, the instance of the object at the top of the stack
+  """
+
+  def __init__(self, type_):
+    # Because we override __setattr__, we need to attach these attributes using
+    # the superclass' setattr.
+    object.__setattr__(self, 'type', type_)
+    object.__setattr__(self, '_stack', [])
+    self.enter()
+
+  def enter(self):
+    self._stack.append(self.type())
+
+  def exit(self):
+    return self._stack.pop()
+
+  @property
+  def level(self):
+    return len(self._stack)
+
+  @property
+  def value(self):
+    return self._stack[-1]
+
+  def __getattr__(self, key):
+    return getattr(self._stack[-1], key)
+
+  def __setattr__(self, key, value):
+    setattr(self._stack[-1], key, value)
+
+
+class _State(object):
+  """Supporting class for nested scope variable space for converter.Base.
+
+  This structure offers syntactic sugar over a dict of stacks of objects
+  of known type. These structures are useful to keep state during AST walks.
+  Multiple different scopes can be tracked in parallel. For example:
+
+    s = _State()
+
+    s[foo].enter()
+    s[bar].enter()  # this will not affect s[foo]
+
+  Element access has special semantics:
+    * keys are a data type
+    * element values are _StateStack(type=key) objects
+    * missing elements are automatically added, similarly to defaultdict
+
+  For example, the following block :
+
+    _State s
+    s[Foo]
+
+  Is equivalent to:
+
+    s = {}
+    if Foo not in s:
+      s[Foo] = Foo()
+    s[Foo]
+
+  See Base for how it's used.
+  """
+
+  def __init__(self):
+    self._value = {}
+
+  def __getitem__(self, key):
+    if key not in self._value:
+      self._value[key] = _StateStack(key)
+    return self._value[key]
+
+
 class Base(gast.NodeTransformer):
   """Base class for general-purpose code transformers transformers.
 
@@ -71,6 +168,27 @@
   (possibly nested) scopes, use enter/exit_local_scope and set/get_local.
   You must call enter/exit_local_scope manually, but the transformer detects
   when they are not properly paired.
+
+  The transformer allows keeping state across calls to visit_* that is local to
+  arbitrary nodes and their descendants, using the self.state attribute.
+  Multiple independent scopes are allowed and automatically constructed.
+
+  For example, to keep track of the If node that encloses any Name node, one can
+  write:
+
+    class FooType(object):
+
+      def __init__(self):
+        self.foo_property = None
+
+    class DummyTransformer(Base):
+
+      def visit_If(self, node):
+        self.state[FooType].enter()
+        self.state[FooType].foo_property = node
+
+      def visit_Name(self, node):
+        self.state[FooType].foo_property  # will hold the innermost enclosing if
   """
 
   # TODO(mdan): Document all extra features.
@@ -92,6 +210,12 @@
     self._local_scope_state = []
     self.enter_local_scope()
 
+    # Allows scoping of local variables to keep state across calls to visit_*
+    # methods. Multiple scope hierchies may exist and are keyed by tag. A scope
+    # is valid at one or more nodes and all its children. Scopes created in
+    # child nodes supersede their parent. Scopes are isolated from one another.
+    self.state = _State()
+
   @property
   def enclosing_entities(self):
     return tuple(self._enclosing_entities)
@@ -101,7 +225,9 @@
     return len(self._local_scope_state)
 
   def enter_local_scope(self, inherit=None):
-    """Marks entry into a new local scope.
+    """Deprecated. Use self.state instead.
+
+    Marks entry into a new local scope.
 
     Args:
       inherit: Optional enumerable of variable names to copy from the
@@ -116,7 +242,9 @@
     self._local_scope_state.append(scope_entered)
 
   def exit_local_scope(self, keep=None):
-    """Marks exit from the current local scope.
+    """Deprecated. Use self.state instead.
+
+    Marks exit from the current local scope.
 
     Args:
       keep: Optional enumerable of variable names to copy into the
@@ -133,9 +261,11 @@
     return scope_left
 
   def set_local(self, name, value):
+    """Deprecated. Use self.state instead."""
     self._local_scope_state[-1][name] = value
 
   def get_local(self, name, default=None):
+    """Deprecated. Use self.state instead."""
     return self._local_scope_state[-1].get(name, default)
 
   def debug_print(self, node):
@@ -216,7 +346,7 @@
         node_destination = new_destination
     return results
 
-  # TODO(mdan): Once we have error tracing, we may be able to just go to SSA.
+  # TODO(mdan): Remove.
   def apply_to_single_assignments(self, targets, values, apply_fn):
     """Applies a function to each individual assignment.
 
@@ -266,7 +396,8 @@
 
   def _get_source(self, node):
     try:
-      return compiler.ast_to_source(node)
+      source, _ = compiler.ast_to_source(node)
+      return source
     except AssertionError:
       return '<could not convert AST to source>'
 
diff --git a/tensorflow/contrib/autograph/pyct/transformer_test.py b/tensorflow/contrib/autograph/pyct/transformer_test.py
index baf0465..19b80b0 100644
--- a/tensorflow/contrib/autograph/pyct/transformer_test.py
+++ b/tensorflow/contrib/autograph/pyct/transformer_test.py
@@ -93,6 +93,83 @@
                       inner_function, lambda_node),
                      anno.getanno(lambda_expr, 'enclosing_entities'))
 
+  def assertSameAnno(self, first, second, key):
+    self.assertIs(anno.getanno(first, key), anno.getanno(second, key))
+
+  def assertDifferentAnno(self, first, second, key):
+    self.assertIsNot(anno.getanno(first, key), anno.getanno(second, key))
+
+  def test_state_tracking(self):
+
+    class LoopState(object):
+      pass
+
+    class CondState(object):
+      pass
+
+    class TestTransformer(transformer.Base):
+
+      def visit(self, node):
+        anno.setanno(node, 'loop_state', self.state[LoopState].value)
+        anno.setanno(node, 'cond_state', self.state[CondState].value)
+        return super(TestTransformer, self).visit(node)
+
+      def visit_While(self, node):
+        self.state[LoopState].enter()
+        node = self.generic_visit(node)
+        self.state[LoopState].exit()
+        return node
+
+      def visit_If(self, node):
+        self.state[CondState].enter()
+        node = self.generic_visit(node)
+        self.state[CondState].exit()
+        return node
+
+    tr = TestTransformer(self._simple_source_info())
+
+    def test_function(a):
+      a = 1
+      while a:
+        _ = 'a'
+        if a > 2:
+          _ = 'b'
+          while True:
+            raise '1'
+        if a > 3:
+          _ = 'c'
+          while True:
+            raise '1'
+
+    node, _ = parser.parse_entity(test_function)
+    node = tr.visit(node)
+
+    fn_body = node.body[0].body
+    outer_while_body = fn_body[1].body
+    self.assertSameAnno(fn_body[0], outer_while_body[0], 'cond_state')
+    self.assertDifferentAnno(fn_body[0], outer_while_body[0], 'loop_state')
+
+    first_if_body = outer_while_body[1].body
+    self.assertDifferentAnno(outer_while_body[0], first_if_body[0],
+                             'cond_state')
+    self.assertSameAnno(outer_while_body[0], first_if_body[0], 'loop_state')
+
+    first_inner_while_body = first_if_body[1].body
+    self.assertSameAnno(first_if_body[0], first_inner_while_body[0],
+                        'cond_state')
+    self.assertDifferentAnno(first_if_body[0], first_inner_while_body[0],
+                             'loop_state')
+
+    second_if_body = outer_while_body[2].body
+    self.assertDifferentAnno(first_if_body[0], second_if_body[0], 'cond_state')
+    self.assertSameAnno(first_if_body[0], second_if_body[0], 'loop_state')
+
+    second_inner_while_body = second_if_body[1].body
+    self.assertDifferentAnno(first_inner_while_body[0],
+                             second_inner_while_body[0], 'cond_state')
+    self.assertDifferentAnno(first_inner_while_body[0],
+                             second_inner_while_body[0], 'loop_state')
+
   def test_local_scope_info_stack(self):
 
     class TestTransformer(transformer.Base):
diff --git a/tensorflow/contrib/autograph/utils/BUILD b/tensorflow/contrib/autograph/utils/BUILD
index d82c17b..d2b399f 100644
--- a/tensorflow/contrib/autograph/utils/BUILD
+++ b/tensorflow/contrib/autograph/utils/BUILD
@@ -28,7 +28,6 @@
         "tensor_list.py",
         "testing.py",
         "type_check.py",
-        "type_hints.py",
     ],
     srcs_version = "PY2AND3",
     visibility = ["//tensorflow:__subpackages__"],
diff --git a/tensorflow/contrib/autograph/utils/__init__.py b/tensorflow/contrib/autograph/utils/__init__.py
index 817d412..57b5f74 100644
--- a/tensorflow/contrib/autograph/utils/__init__.py
+++ b/tensorflow/contrib/autograph/utils/__init__.py
@@ -30,4 +30,3 @@
 from tensorflow.contrib.autograph.utils.tensor_list import dynamic_list_append
 from tensorflow.contrib.autograph.utils.testing import fake_tf
 from tensorflow.contrib.autograph.utils.type_check import is_tensor
-from tensorflow.contrib.autograph.utils.type_hints import set_element_type
diff --git a/tensorflow/contrib/autograph/utils/builtins.py b/tensorflow/contrib/autograph/utils/builtins.py
index 998087e..71079cf 100644
--- a/tensorflow/contrib/autograph/utils/builtins.py
+++ b/tensorflow/contrib/autograph/utils/builtins.py
@@ -52,7 +52,7 @@
   """Implementation of len using dynamic dispatch."""
   if tensor_util.is_tensor(list_or_tensor):
     shape = list_or_tensor.shape
-    if not shape:
+    if not shape.ndims:
       raise ValueError(
           'len requires non-zero rank for tensor "%s"' % list_or_tensor)
     return array_ops.shape(list_or_tensor)[0]
diff --git a/tensorflow/contrib/autograph/utils/builtins_test.py b/tensorflow/contrib/autograph/utils/builtins_test.py
index 0c23121..b4821f3 100644
--- a/tensorflow/contrib/autograph/utils/builtins_test.py
+++ b/tensorflow/contrib/autograph/utils/builtins_test.py
@@ -33,7 +33,8 @@
   def test_dynamic_len_tf_scalar(self):
     a = constant_op.constant(1)
 
-    with self.assertRaises(ValueError):
+    with self.assertRaisesRegexp(ValueError,
+                                 'len requires non-zero rank for tensor.*'):
       with self.test_session() as sess:
         sess.run(builtins.dynamic_builtin(len, a))
 
diff --git a/tensorflow/contrib/autograph/utils/type_hints.py b/tensorflow/contrib/autograph/utils/type_hints.py
deleted file mode 100644
index aeb9e54..0000000
--- a/tensorflow/contrib/autograph/utils/type_hints.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ==============================================================================
-"""No-op utilities that provide static type hints.
-
-These are used when the data type is not known at creation, for instance in the
-case of empty lists.
-"""
-
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-
-def set_element_type(entity, dtype, shape=None):
-  """Indicates that the entity is expected hold items of specified type.
-
-  This function is a no-op. Its presence merely marks the data type of its
-  argument. The staged TensorFlow ops will reflect and assert this data type.
-
-  Args:
-    entity: A Tensor or TensorArray.
-    dtype: TensorFlow dtype value to assert for entity.
-    shape: Optional shape to assert for entity.
-  Returns:
-    The value of entity, unchanged.
-  """
-  del dtype
-  del shape
-  return entity
diff --git a/tensorflow/contrib/bigtable/kernels/bigtable_lib.h b/tensorflow/contrib/bigtable/kernels/bigtable_lib.h
index 12d8256..a2a5df1 100644
--- a/tensorflow/contrib/bigtable/kernels/bigtable_lib.h
+++ b/tensorflow/contrib/bigtable/kernels/bigtable_lib.h
@@ -58,7 +58,8 @@
   BigtableTableResource(BigtableClientResource* client, string table_name)
       : client_(client),
         table_name_(std::move(table_name)),
-        table_(client->get_client(), table_name_) {
+        table_(client->get_client(), table_name_,
+               google::cloud::bigtable::AlwaysRetryMutationPolicy()) {
     client_->Ref();
   }
 
diff --git a/tensorflow/contrib/boosted_trees/lib/utils/batch_features.cc b/tensorflow/contrib/boosted_trees/lib/utils/batch_features.cc
index 35b059f..4fab2b0 100644
--- a/tensorflow/contrib/boosted_trees/lib/utils/batch_features.cc
+++ b/tensorflow/contrib/boosted_trees/lib/utils/batch_features.cc
@@ -16,6 +16,7 @@
 #include "tensorflow/contrib/boosted_trees/lib/utils/batch_features.h"
 #include "tensorflow/contrib/boosted_trees/lib/utils/macros.h"
 #include "tensorflow/contrib/boosted_trees/lib/utils/tensor_utils.h"
+#include "tensorflow/core/lib/core/errors.h"
 
 namespace tensorflow {
 namespace boosted_trees {
@@ -96,9 +97,11 @@
             "Sparse float feature shape incompatible with batch size."));
     auto tensor_shape = TensorShape({shape_flat(0), shape_flat(1)});
     auto order_dims = sparse::SparseTensor::VarDimArray({0, 1});
-    sparse_float_feature_columns_.emplace_back(sparse_float_feature_indices,
-                                               sparse_float_feature_values,
-                                               tensor_shape, order_dims);
+    sparse::SparseTensor sparse_tensor;
+    TF_RETURN_IF_ERROR(sparse::SparseTensor::Create(
+        sparse_float_feature_indices, sparse_float_feature_values, tensor_shape,
+        order_dims, &sparse_tensor));
+    sparse_float_feature_columns_.push_back(std::move(sparse_tensor));
   }
 
   // Read sparse int features.
@@ -136,9 +139,11 @@
             "Sparse int feature shape incompatible with batch size."));
     auto tensor_shape = TensorShape({shape_flat(0), shape_flat(1)});
     auto order_dims = sparse::SparseTensor::VarDimArray({0, 1});
-    sparse_int_feature_columns_.emplace_back(sparse_int_feature_indices,
-                                             sparse_int_feature_values,
-                                             tensor_shape, order_dims);
+    sparse::SparseTensor sparse_tensor;
+    TF_RETURN_IF_ERROR(sparse::SparseTensor::Create(
+        sparse_int_feature_indices, sparse_int_feature_values, tensor_shape,
+        order_dims, &sparse_tensor));
+    sparse_int_feature_columns_.push_back(std::move(sparse_tensor));
   }
   return Status::OK();
 }
diff --git a/tensorflow/contrib/boosted_trees/lib/utils/examples_iterable_test.cc b/tensorflow/contrib/boosted_trees/lib/utils/examples_iterable_test.cc
index d8a6088..30c3743 100644
--- a/tensorflow/contrib/boosted_trees/lib/utils/examples_iterable_test.cc
+++ b/tensorflow/contrib/boosted_trees/lib/utils/examples_iterable_test.cc
@@ -43,27 +43,35 @@
       test::AsTensor<int64>({0, 0, 2, 0, 3, 0, 4, 0}, {4, 2});
   auto sparse_float_values1 = test::AsTensor<float>({-3.0f, 0.0f, 5.0f, 0.0f});
   auto sparse_float_shape1 = TensorShape({8, 1});
-  sparse::SparseTensor sparse_float_tensor1(
-      sparse_float_indices1, sparse_float_values1, sparse_float_shape1);
+  sparse::SparseTensor sparse_float_tensor1;
+  TF_ASSERT_OK(
+      sparse::SparseTensor::Create(sparse_float_indices1, sparse_float_values1,
+                                   sparse_float_shape1, &sparse_float_tensor1));
   auto sparse_float_indices2 = test::AsTensor<int64>(
       {0, 1, 1, 0, 2, 1, 3, 0, 4, 1, 5, 0, 5, 1, 7, 0}, {8, 2});
   auto sparse_float_values2 =
       test::AsTensor<float>({1.f, 4.0f, 3.f, 7.0f, 4.3f, 9.0f, 0.8f, -4.0f});
   auto sparse_float_shape2 = TensorShape({8, 2});
-  sparse::SparseTensor sparse_float_tensor2(
-      sparse_float_indices2, sparse_float_values2, sparse_float_shape2);
+  sparse::SparseTensor sparse_float_tensor2;
+  TF_ASSERT_OK(
+      sparse::SparseTensor::Create(sparse_float_indices2, sparse_float_values2,
+                                   sparse_float_shape2, &sparse_float_tensor2));
   auto sparse_int_indices1 =
       test::AsTensor<int64>({0, 0, 0, 1, 1, 0, 3, 0, 3, 1, 7, 0}, {6, 2});
   auto sparse_int_values1 = test::AsTensor<int64>({1, 8, 0, 2, 0, 5});
   auto sparse_int_shape1 = TensorShape({8, 2});
-  sparse::SparseTensor sparse_int_tensor1(
-      sparse_int_indices1, sparse_int_values1, sparse_int_shape1);
+  sparse::SparseTensor sparse_int_tensor1;
+  TF_ASSERT_OK(
+      sparse::SparseTensor::Create(sparse_int_indices1, sparse_int_values1,
+                                   sparse_int_shape1, &sparse_int_tensor1));
   auto sparse_int_indices2 =
       test::AsTensor<int64>({1, 0, 2, 0, 3, 0, 4, 0}, {4, 2});
   auto sparse_int_values2 = test::AsTensor<int64>({7, 13, 4, 0});
   auto sparse_int_shape2 = TensorShape({8, 1});
-  sparse::SparseTensor sparse_int_tensor2(
-      sparse_int_indices2, sparse_int_values2, sparse_int_shape2);
+  sparse::SparseTensor sparse_int_tensor2;
+  TF_ASSERT_OK(
+      sparse::SparseTensor::Create(sparse_int_indices2, sparse_int_values2,
+                                   sparse_int_shape2, &sparse_int_tensor2));
 
   auto validate_example_features = [](int64 example_idx,
                                       const Example& example) {
diff --git a/tensorflow/contrib/cmake/CMakeLists.txt b/tensorflow/contrib/cmake/CMakeLists.txt
index a0a5b0e..708618d 100644
--- a/tensorflow/contrib/cmake/CMakeLists.txt
+++ b/tensorflow/contrib/cmake/CMakeLists.txt
@@ -145,26 +145,41 @@
       # temporary fix for #18241
       add_definitions(-DEIGEN_DEFAULT_DENSE_INDEX_TYPE=std::int64_t)
   endif()
-  add_definitions(-DNOMINMAX -D_WIN32_WINNT=0x0A00 -DLANG_CXX11)
-  add_definitions(-DWIN32 -DOS_WIN -D_MBCS -DWIN32_LEAN_AND_MEAN -DNOGDI -DPLATFORM_WINDOWS)
+  add_definitions(-DNOMINMAX -D_WIN32_WINNT=0x0A00)
+  add_definitions(-DWIN32_LEAN_AND_MEAN -DNOGDI -DPLATFORM_WINDOWS)
   add_definitions(-DTENSORFLOW_USE_EIGEN_THREADPOOL -DEIGEN_HAS_C99_MATH)
   add_definitions(-DTF_COMPILE_LIBRARY)
-  add_definitions(/bigobj /nologo /EHsc /GF /MP /Gm-)
+  add_compile_options(/bigobj /GF /MP /Gm-)
   # Suppress warnings to reduce build log size.
-  add_definitions(/wd4267 /wd4244 /wd4800 /wd4503 /wd4554 /wd4996 /wd4348 /wd4018)
-  add_definitions(/wd4099 /wd4146 /wd4267 /wd4305 /wd4307)
-  add_definitions(/wd4715 /wd4722 /wd4723 /wd4838 /wd4309 /wd4334)
-  add_definitions(/wd4003 /wd4244 /wd4267 /wd4503 /wd4506 /wd4800 /wd4996)
+  add_compile_options(/wd4267 /wd4244 /wd4800 /wd4503 /wd4554 /wd4996 /wd4348 /wd4018)
+  add_compile_options(/wd4099 /wd4146 /wd4267 /wd4305 /wd4307)
+  add_compile_options(/wd4715 /wd4722 /wd4723 /wd4838 /wd4309 /wd4334)
+  add_compile_options(/wd4003 /wd4244 /wd4267 /wd4503 /wd4506 /wd4800 /wd4996)
   # Suppress linker warnings.
   set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} /ignore:4049 /ignore:4197 /ignore:4217 /ignore:4221")
   set(CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} /ignore:4049 /ignore:4197 /ignore:4217 /ignore:4221")
   set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /ignore:4049 /ignore:4197 /ignore:4217 /ignore:4221")
-  set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /MP")
   set(CMAKE_CXX_FLAGS_DEBUG "/D_DEBUG /MDd /Ob2")
   set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /D_ITERATOR_DEBUG_LEVEL=0")
   set(CMAKE_CXX_FLAGS_MINSIZEREL "${CMAKE_CXX_FLAGS_MINSIZEREL} /D_ITERATOR_DEBUG_LEVEL=0")
   set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} /D_ITERATOR_DEBUG_LEVEL=0")
 
+  set(compiler_flags
+    CMAKE_CXX_FLAGS
+    CMAKE_CXX_FLAGS_DEBUG
+    CMAKE_CXX_FLAGS_RELEASE
+    CMAKE_C_FLAGS
+    CMAKE_C_FLAGS_DEBUG
+    CMAKE_C_FLAGS_RELEASE
+  )
+  # No exception
+  foreach(flag ${compiler_flags})
+    string(REPLACE "/EHsc" "/EHs-c-" ${flag} "${${flag}}")
+  endforeach()
+  add_definitions(/D_HAS_EXCEPTIONS=0)
+  # Suppress 'noexcept used with no exception handling mode specified' warning
+  add_compile_options(/wd4577)
+
   # Try to avoid flaky failures due to failed generation of generate.stamp files.
   set(CMAKE_SUPPRESS_REGENERATION ON)
 endif()
diff --git a/tensorflow/contrib/cmake/python_modules.txt b/tensorflow/contrib/cmake/python_modules.txt
index 40041d9..75e00f3 100644
--- a/tensorflow/contrib/cmake/python_modules.txt
+++ b/tensorflow/contrib/cmake/python_modules.txt
@@ -62,6 +62,8 @@
 tensorflow/python/summary
 tensorflow/python/summary/writer
 tensorflow/python/tools
+tensorflow/python/tools/api
+tensorflow/python/tools/api/generator
 tensorflow/python/training
 tensorflow/python/training/checkpointable
 tensorflow/python/user_ops
@@ -69,7 +71,6 @@
 tensorflow/python/util/protobuf
 tensorflow/tools
 tensorflow/tools/api
-tensorflow/tools/api/generator
 tensorflow/tools/graph_transforms
 tensorflow/contrib
 tensorflow/contrib/all_reduce
diff --git a/tensorflow/contrib/cmake/tf_python.cmake b/tensorflow/contrib/cmake/tf_python.cmake
index 8a9172b..32b185f 100755
--- a/tensorflow/contrib/cmake/tf_python.cmake
+++ b/tensorflow/contrib/cmake/tf_python.cmake
@@ -736,8 +736,8 @@
 # Generate API __init__.py files.
 ########################################################
 
-# Parse tensorflow/tools/api/generator/BUILD to get list of generated files.
-FILE(READ ${tensorflow_source_dir}/tensorflow/tools/api/generator/api_gen.bzl api_generator_BUILD_text)
+# Parse tensorflow/python/tools/api/generator/BUILD to get list of generated files.
+FILE(READ ${tensorflow_source_dir}/tensorflow/python/tools/api/generator/api_gen.bzl api_generator_BUILD_text)
 STRING(REGEX MATCH "# BEGIN GENERATED FILES.*# END GENERATED FILES" api_init_files_text ${api_generator_BUILD_text})
 string(REPLACE "# BEGIN GENERATED FILES" "" api_init_files_text ${api_init_files_text})
 string(REPLACE "# END GENERATED FILES" "" api_init_files_text ${api_init_files_text})
@@ -781,7 +781,7 @@
 
           # Run create_python_api.py to generate API init files.
           COMMAND ${CMAKE_COMMAND} -E env PYTHONPATH=${CMAKE_CURRENT_BINARY_DIR}/tf_python PATH=${PY_RUNTIME_ENV} ${PYTHON_EXECUTABLE}
-                  "${CMAKE_CURRENT_BINARY_DIR}/tf_python/tensorflow/tools/api/generator/create_python_api.py"
+                  "${CMAKE_CURRENT_BINARY_DIR}/tf_python/tensorflow/python/tools/api/generator/create_python_api.py"
                   "--root_init_template=${CMAKE_CURRENT_BINARY_DIR}/tf_python/tensorflow/api_template.__init__.py"
                   "--apidir=${CMAKE_CURRENT_BINARY_DIR}/tf_python/tensorflow"
                   "--package=tensorflow.python"
@@ -803,7 +803,7 @@
 
           # Run create_python_api.py to generate API init files.
           COMMAND ${CMAKE_COMMAND} -E env PYTHONPATH=${CMAKE_CURRENT_BINARY_DIR}/tf_python ${PYTHON_EXECUTABLE}
-                  "${CMAKE_CURRENT_BINARY_DIR}/tf_python/tensorflow/tools/api/generator/create_python_api.py"
+                  "${CMAKE_CURRENT_BINARY_DIR}/tf_python/tensorflow/python/tools/api/generator/create_python_api.py"
                   "--root_init_template=${CMAKE_CURRENT_BINARY_DIR}/tf_python/tensorflow/api_template.__init__.py"
                   "--apidir=${CMAKE_CURRENT_BINARY_DIR}/tf_python/tensorflow"
                   "--package=tensorflow.python"
@@ -824,8 +824,8 @@
 # Generate API __init__.py files for tf.estimator.
 ########################################################
 
-# Parse tensorflow/tools/api/generator/BUILD to get list of generated files.
-FILE(READ ${tensorflow_source_dir}/tensorflow/tools/api/generator/api_gen.bzl api_generator_BUILD_text)
+# Parse tensorflow/python/tools/api/generator/BUILD to get list of generated files.
+FILE(READ ${tensorflow_source_dir}/tensorflow/python/tools/api/generator/api_gen.bzl api_generator_BUILD_text)
 STRING(REGEX MATCH "# BEGIN GENERATED ESTIMATOR FILES.*# END GENERATED ESTIMATOR FILES" api_init_files_text ${api_generator_BUILD_text})
 string(REPLACE "# BEGIN GENERATED ESTIMATOR FILES" "" api_init_files_text ${api_init_files_text})
 string(REPLACE "# END GENERATED ESTIMATOR FILES" "" api_init_files_text ${api_init_files_text})
@@ -849,7 +849,7 @@
 
       # Run create_python_api.py to generate API init files.
       COMMAND ${CMAKE_COMMAND} -E env PYTHONPATH=${CMAKE_CURRENT_BINARY_DIR}/tf_python ${PYTHON_EXECUTABLE}
-              "${CMAKE_CURRENT_BINARY_DIR}/tf_python/tensorflow/tools/api/generator/create_python_api.py"
+              "${CMAKE_CURRENT_BINARY_DIR}/tf_python/tensorflow/python/tools/api/generator/create_python_api.py"
               "--apidir=${CMAKE_CURRENT_BINARY_DIR}/tf_python/tensorflow/python/estimator/api"
               "--package=tensorflow.python.estimator"
               "--apiname=estimator"
diff --git a/tensorflow/contrib/cmake/tf_tests.cmake b/tensorflow/contrib/cmake/tf_tests.cmake
index eb9482d..b2330c4e 100644
--- a/tensorflow/contrib/cmake/tf_tests.cmake
+++ b/tensorflow/contrib/cmake/tf_tests.cmake
@@ -193,6 +193,7 @@
     # flaky test
     "${tensorflow_source_dir}/tensorflow/python/profiler/internal/run_metadata_test.py"
     "${tensorflow_source_dir}/tensorflow/python/profiler/model_analyzer_test.py"
+    "${tensorflow_source_dir}/tensorflow/python/data/kernel_tests/map_dataset_op_test.py"
     # Fails because uses data dependencies with bazel
     "${tensorflow_source_dir}/tensorflow/python/saved_model/saved_model_test.py"
     "${tensorflow_source_dir}/tensorflow/contrib/image/python/kernel_tests/sparse_image_warp_test.py"
@@ -216,7 +217,8 @@
     ${tensorflow_source_dir}/tensorflow/python/kernel_tests/duplicate_op_test.py
     ${tensorflow_source_dir}/tensorflow/python/kernel_tests/invalid_op_test.py
     ${tensorflow_source_dir}/tensorflow/python/kernel_tests/ackermann_test.py
-
+    # Tests too large to run.
+    ${tensorflow_source_dir}/tensorflow/python/kernel_tests/linalg/linear_operator_low_rank_update_test.py
   )
   if (WIN32)
     set(tf_test_src_py_exclude
diff --git a/tensorflow/contrib/copy_graph/python/util/copy_elements.py b/tensorflow/contrib/copy_graph/python/util/copy_elements.py
index a0dd388..5931c8a 100644
--- a/tensorflow/contrib/copy_graph/python/util/copy_elements.py
+++ b/tensorflow/contrib/copy_graph/python/util/copy_elements.py
@@ -18,7 +18,7 @@
 from one graph to another. The copied elements are initialized inside a
 user-specified scope in the other graph. There are separate functions to
 copy ops and variables.
-There is also a function to retrive the copied version of an op from the
+There is also a function to retrieve the copied version of an op from the
 first graph inside a scope in the second graph.
 
 @@copy_op_to_graph
@@ -77,7 +77,7 @@
       else:
         collections.append(scope + '/' + name)
 
-  #See if its trainable.
+  #See if it's trainable.
   trainable = (
       org_instance in org_instance.graph.get_collection(
           ops.GraphKeys.TRAINABLE_VARIABLES))
@@ -162,7 +162,7 @@
 
   if isinstance(org_instance, ops.Tensor):
 
-    #If its a Tensor, it is one of the outputs of the underlying
+    #If it's a Tensor, it is one of the outputs of the underlying
     #op. Therefore, copy the op itself and return the appropriate
     #output.
     op = org_instance.op
diff --git a/tensorflow/contrib/data/python/kernel_tests/BUILD b/tensorflow/contrib/data/python/kernel_tests/BUILD
index 9a454ef..1845732 100644
--- a/tensorflow/contrib/data/python/kernel_tests/BUILD
+++ b/tensorflow/contrib/data/python/kernel_tests/BUILD
@@ -380,6 +380,7 @@
         "//tensorflow/python:sparse_tensor",
         "//tensorflow/python/data/ops:dataset_ops",
         "//third_party/py/numpy",
+        "@absl_py//absl/testing:parameterized",
     ],
 )
 
diff --git a/tensorflow/contrib/data/python/kernel_tests/optimize_dataset_op_test.py b/tensorflow/contrib/data/python/kernel_tests/optimize_dataset_op_test.py
index 3bb9723..21eebcc 100644
--- a/tensorflow/contrib/data/python/kernel_tests/optimize_dataset_op_test.py
+++ b/tensorflow/contrib/data/python/kernel_tests/optimize_dataset_op_test.py
@@ -35,8 +35,6 @@
     with self.test_session() as sess:
       graph = graph_pb2.GraphDef().FromString(
           sess.run(dataset._as_serialized_graph()))
-      self.assertTrue(
-          all([node.op != "MapAndBatchDatasetV2" for node in graph.node]))
       self.assertAllEqual([x * x for x in range(10)], sess.run(get_next))
       with self.assertRaises(errors.OutOfRangeError):
         sess.run(get_next)
@@ -50,8 +48,6 @@
     with self.test_session() as sess:
       graph = graph_pb2.GraphDef().FromString(
           sess.run(dataset._as_serialized_graph()))
-      self.assertTrue(
-          all([node.op != "MapAndBatchDatasetV2" for node in graph.node]))
       self.assertAllEqual([x * x for x in range(10)], sess.run(get_next))
       with self.assertRaises(errors.OutOfRangeError):
         sess.run(get_next)
@@ -65,8 +61,6 @@
     with self.test_session() as sess:
       graph = graph_pb2.GraphDef().FromString(
           sess.run(dataset._as_serialized_graph()))
-      self.assertTrue(
-          any([node.op == "MapAndBatchDatasetV2" for node in graph.node]))
       self.assertAllEqual([x * x for x in range(10)], sess.run(get_next))
       with self.assertRaises(errors.OutOfRangeError):
         sess.run(get_next)
diff --git a/tensorflow/contrib/data/python/kernel_tests/slide_dataset_op_test.py b/tensorflow/contrib/data/python/kernel_tests/slide_dataset_op_test.py
index 5590a4b..8b2f846 100644
--- a/tensorflow/contrib/data/python/kernel_tests/slide_dataset_op_test.py
+++ b/tensorflow/contrib/data/python/kernel_tests/slide_dataset_op_test.py
@@ -17,6 +17,7 @@
 from __future__ import division
 from __future__ import print_function
 
+from absl.testing import parameterized
 import numpy as np
 
 from tensorflow.contrib.data.python.ops import sliding
@@ -29,28 +30,45 @@
 from tensorflow.python.platform import test
 
 
-class SlideDatasetTest(test.TestCase):
+class SlideDatasetTest(test.TestCase, parameterized.TestCase):
 
-  def testSlideDataset(self):
-    """Test an dataset that maps a TF function across its input elements."""
+  @parameterized.parameters(
+      (20, 14, 7, 1),
+      (20, 17, 9, 1),
+      (20, 14, 14, 1),
+      (20, 10, 14, 1),
+      (20, 14, 19, 1),
+      (20, 4, 1, 2),
+      (20, 2, 1, 6),
+      (20, 4, 7, 2),
+      (20, 2, 7, 6),
+      (1, 10, 4, 1),
+      (0, 10, 4, 1),
+  )
+  def testSlideDataset(self, count, window_size, window_shift, window_stride):
+    """Tests a dataset that slides a window its input elements."""
     components = (np.arange(7),
                   np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
                   np.array(37.0) * np.arange(7))
 
-    count = array_ops.placeholder(dtypes.int64, shape=[])
-    window_size = array_ops.placeholder(dtypes.int64, shape=[])
-    stride = array_ops.placeholder(dtypes.int64, shape=[])
+    count_t = array_ops.placeholder(dtypes.int64, shape=[])
+    window_size_t = array_ops.placeholder(dtypes.int64, shape=[])
+    window_shift_t = array_ops.placeholder(dtypes.int64, shape=[])
+    window_stride_t = array_ops.placeholder(dtypes.int64, shape=[])
 
     def _map_fn(x, y, z):
       return math_ops.square(x), math_ops.square(y), math_ops.square(z)
 
     # The pipeline is TensorSliceDataset -> MapDataset(square_3) ->
-    # RepeatDataset(count) -> _SlideDataset(window_size, stride).
-    iterator = (dataset_ops.Dataset.from_tensor_slices(components)
-                .map(_map_fn)
-                .repeat(count)
-                .apply(sliding.sliding_window_batch(window_size, stride))
-                .make_initializable_iterator())
+    # RepeatDataset(count) ->
+    # _SlideDataset(window_size, window_shift, window_stride).
+    iterator = (
+        dataset_ops.Dataset.from_tensor_slices(components).map(_map_fn)
+        .repeat(count).apply(
+            sliding.sliding_window_batch(
+                window_size=window_size_t,
+                window_shift=window_shift_t,
+                window_stride=window_stride_t)).make_initializable_iterator())
     init_op = iterator.initializer
     get_next = iterator.get_next()
 
@@ -58,90 +76,126 @@
                      [t.shape.as_list() for t in get_next])
 
     with self.test_session() as sess:
-      # stride < window_size.
-      # Slide over a finite input, where the window_size divides the
-      # total number of elements.
-      sess.run(init_op, feed_dict={count: 20, window_size: 14, stride: 7})
-      # Same formula with convolution layer.
-      num_batches = (20 * 7 - 14) // 7 + 1
+      sess.run(
+          init_op,
+          feed_dict={
+              count_t: count,
+              window_size_t: window_size,
+              window_shift_t: window_shift,
+              window_stride_t: window_stride
+          })
+      num_batches = (count * 7 - (
+          (window_size - 1) * window_stride + 1)) // window_shift + 1
       for i in range(num_batches):
         result = sess.run(get_next)
         for component, result_component in zip(components, result):
-          for j in range(14):
-            self.assertAllEqual(component[(i*7 + j) % 7]**2,
-                                result_component[j])
+          for j in range(window_size):
+            self.assertAllEqual(
+                component[(i * window_shift + j * window_stride) % 7]**2,
+                result_component[j])
       with self.assertRaises(errors.OutOfRangeError):
         sess.run(get_next)
-      # Slide over a finite input, where the window_size does not
-      # divide the total number of elements.
-      sess.run(init_op, feed_dict={count: 20, window_size: 17, stride: 9})
-      num_batches = (20 * 7 - 17) // 9 + 1
+
+  @parameterized.parameters(
+      (20, 14, 7, 1),
+      (20, 17, 9, 1),
+      (20, 14, 14, 1),
+      (20, 10, 14, 1),
+      (20, 14, 19, 1),
+      (20, 4, 1, 2),
+      (20, 2, 1, 6),
+      (20, 4, 7, 2),
+      (20, 2, 7, 6),
+      (1, 10, 4, 1),
+      (0, 10, 4, 1),
+  )
+  def testSlideDatasetDeprecated(self, count, window_size, stride,
+                                 window_stride):
+    """Tests a dataset that slides a window its input elements."""
+    components = (np.arange(7),
+                  np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
+                  np.array(37.0) * np.arange(7))
+
+    count_t = array_ops.placeholder(dtypes.int64, shape=[])
+    window_size_t = array_ops.placeholder(dtypes.int64, shape=[])
+    stride_t = array_ops.placeholder(dtypes.int64, shape=[])
+    window_stride_t = array_ops.placeholder(dtypes.int64, shape=[])
+
+    def _map_fn(x, y, z):
+      return math_ops.square(x), math_ops.square(y), math_ops.square(z)
+
+    # The pipeline is TensorSliceDataset -> MapDataset(square_3) ->
+    # RepeatDataset(count) -> _SlideDataset(window_size, stride, window_stride).
+    iterator = (
+        dataset_ops.Dataset.from_tensor_slices(components).map(_map_fn)
+        .repeat(count).apply(
+            sliding.sliding_window_batch(
+                window_size=window_size_t,
+                stride=stride_t,
+                window_stride=window_stride_t)).make_initializable_iterator())
+    init_op = iterator.initializer
+    get_next = iterator.get_next()
+
+    self.assertEqual([[None] + list(c.shape[1:]) for c in components],
+                     [t.shape.as_list() for t in get_next])
+
+    with self.test_session() as sess:
+      sess.run(
+          init_op,
+          feed_dict={
+              count_t: count,
+              window_size_t: window_size,
+              stride_t: stride,
+              window_stride_t: window_stride
+          })
+      num_batches = (count * 7 - (
+          (window_size - 1) * window_stride + 1)) // stride + 1
       for i in range(num_batches):
         result = sess.run(get_next)
         for component, result_component in zip(components, result):
-          for j in range(17):
-            self.assertAllEqual(component[(i*9 + j) % 7]**2,
-                                result_component[j])
+          for j in range(window_size):
+            self.assertAllEqual(
+                component[(i * stride + j * window_stride) % 7]**2,
+                result_component[j])
       with self.assertRaises(errors.OutOfRangeError):
         sess.run(get_next)
 
-      # stride == window_size.
-      sess.run(init_op, feed_dict={count: 20, window_size: 14, stride: 14})
-      num_batches = 20 * 7 // 14
-      for i in range(num_batches):
-        result = sess.run(get_next)
-        for component, result_component in zip(components, result):
-          for j in range(14):
-            self.assertAllEqual(component[(i*14 + j) % 7]**2,
-                                result_component[j])
-      with self.assertRaises(errors.OutOfRangeError):
-        sess.run(get_next)
+  @parameterized.parameters(
+      (14, 0, 3, 1),
+      (14, 3, 0, 1),
+      (14, 3, 3, 0),
+  )
+  def testSlideDatasetInvalid(self, count, window_size, window_shift,
+                              window_stride):
+    count_t = array_ops.placeholder(dtypes.int64, shape=[])
+    window_size_t = array_ops.placeholder(dtypes.int64, shape=[])
+    window_shift_t = array_ops.placeholder(dtypes.int64, shape=[])
+    window_stride_t = array_ops.placeholder(dtypes.int64, shape=[])
 
-      # stride > window_size.
-      sess.run(init_op, feed_dict={count: 20, window_size: 10, stride: 14})
-      num_batches = 20 * 7 // 14
-      for i in range(num_batches):
-        result = sess.run(get_next)
-        for component, result_component in zip(components, result):
-          for j in range(10):
-            self.assertAllEqual(component[(i*14 + j) % 7]**2,
-                                result_component[j])
-      with self.assertRaises(errors.OutOfRangeError):
-        sess.run(get_next)
-      # Drop the last batch which is smaller than window_size.
-      sess.run(init_op, feed_dict={count: 20, window_size: 14, stride: 19})
-      num_batches = (20 * 7 - 7) // 19  # = 19 * 7 // 19
-      for i in range(num_batches):
-        result = sess.run(get_next)
-        for component, result_component in zip(components, result):
-          for j in range(14):
-            self.assertAllEqual(component[(i*19 + j) % 7]**2,
-                                result_component[j])
-      with self.assertRaises(errors.OutOfRangeError):
-        sess.run(get_next)
+    iterator = (
+        dataset_ops.Dataset.range(10).map(lambda x: x).repeat(count_t).apply(
+            sliding.sliding_window_batch(
+                window_size=window_size_t,
+                window_shift=window_shift_t,
+                window_stride=window_stride_t)).make_initializable_iterator())
+    init_op = iterator.initializer
 
-      # Slide over a finite input, which is less than window_size,
-      # should fail straight away.
-      sess.run(init_op, feed_dict={count: 1, window_size: 10, stride: 4})
-      with self.assertRaises(errors.OutOfRangeError):
-        sess.run(get_next)
-
-      sess.run(init_op, feed_dict={count: 1, window_size: 10, stride: 8})
-      with self.assertRaises(errors.OutOfRangeError):
-        sess.run(get_next)
-
-      # Slide over an empty input should fail straight away.
-      sess.run(init_op, feed_dict={count: 0, window_size: 8, stride: 4})
-      with self.assertRaises(errors.OutOfRangeError):
-        sess.run(get_next)
-
-      # Empty window_size should be an initialization time error.
+    with self.test_session() as sess:
       with self.assertRaises(errors.InvalidArgumentError):
-        sess.run(init_op, feed_dict={count: 14, window_size: 0, stride: 0})
+        sess.run(
+            init_op,
+            feed_dict={
+                count_t: count,
+                window_size_t: window_size,
+                window_shift_t: window_shift,
+                window_stride_t: window_stride
+            })
 
-      # Invalid stride should be an initialization time error.
-      with self.assertRaises(errors.InvalidArgumentError):
-        sess.run(init_op, feed_dict={count: 14, window_size: 3, stride: 0})
+  def testSlideDatasetValueError(self):
+    with self.assertRaises(ValueError):
+      dataset_ops.Dataset.range(10).map(lambda x: x).apply(
+          sliding.sliding_window_batch(
+              window_size=1, stride=1, window_shift=1, window_stride=1))
 
   def assertSparseValuesEqual(self, a, b):
     self.assertAllEqual(a.indices, b.indices)
@@ -155,7 +209,8 @@
           indices=[[0]], values=(i * [1]), dense_shape=[1])
 
     iterator = dataset_ops.Dataset.range(10).map(_sparse).apply(
-        sliding.sliding_window_batch(5, 3)).make_initializable_iterator()
+        sliding.sliding_window_batch(
+            window_size=5, window_shift=3)).make_initializable_iterator()
     init_op = iterator.initializer
     get_next = iterator.get_next()
 
@@ -183,7 +238,8 @@
           dense_shape=[i])
 
     iterator = dataset_ops.Dataset.range(10).map(_sparse).apply(
-        sliding.sliding_window_batch(5, 3)).make_initializable_iterator()
+        sliding.sliding_window_batch(
+            window_size=5, window_shift=3)).make_initializable_iterator()
     init_op = iterator.initializer
     get_next = iterator.get_next()
 
@@ -213,11 +269,11 @@
       return sparse_tensor.SparseTensorValue(
           indices=[[0]], values=(i * [1]), dense_shape=[1])
 
-    iterator = (dataset_ops.Dataset.range(10)
-                .map(_sparse)
-                .apply(sliding.sliding_window_batch(4, 2))
-                .apply(sliding.sliding_window_batch(3, 1))
-                .make_initializable_iterator())
+    iterator = (
+        dataset_ops.Dataset.range(10).map(_sparse).apply(
+            sliding.sliding_window_batch(window_size=4, window_shift=2)).apply(
+                sliding.sliding_window_batch(window_size=3, window_shift=1))
+        .make_initializable_iterator())
     init_op = iterator.initializer
     get_next = iterator.get_next()
 
@@ -226,9 +282,9 @@
       # Slide: 1st batch.
       actual = sess.run(get_next)
       expected = sparse_tensor.SparseTensorValue(
-          indices=[[0, 0, 0], [0, 1, 0], [0, 2, 0], [0, 3, 0],
-                   [1, 0, 0], [1, 1, 0], [1, 2, 0], [1, 3, 0],
-                   [2, 0, 0], [2, 1, 0], [2, 2, 0], [2, 3, 0]],
+          indices=[[0, 0, 0], [0, 1, 0], [0, 2, 0], [0, 3, 0], [1, 0, 0],
+                   [1, 1, 0], [1, 2, 0], [1, 3, 0], [2, 0, 0], [2, 1, 0],
+                   [2, 2, 0], [2, 3, 0]],
           values=[0, 1, 2, 3, 2, 3, 4, 5, 4, 5, 6, 7],
           dense_shape=[3, 4, 1])
       self.assertTrue(sparse_tensor.is_sparse(actual))
@@ -236,9 +292,9 @@
       # Slide: 2nd batch.
       actual = sess.run(get_next)
       expected = sparse_tensor.SparseTensorValue(
-          indices=[[0, 0, 0], [0, 1, 0], [0, 2, 0], [0, 3, 0],
-                   [1, 0, 0], [1, 1, 0], [1, 2, 0], [1, 3, 0],
-                   [2, 0, 0], [2, 1, 0], [2, 2, 0], [2, 3, 0]],
+          indices=[[0, 0, 0], [0, 1, 0], [0, 2, 0], [0, 3, 0], [1, 0, 0],
+                   [1, 1, 0], [1, 2, 0], [1, 3, 0], [2, 0, 0], [2, 1, 0],
+                   [2, 2, 0], [2, 3, 0]],
           values=[2, 3, 4, 5, 4, 5, 6, 7, 6, 7, 8, 9],
           dense_shape=[3, 4, 1])
       self.assertTrue(sparse_tensor.is_sparse(actual))
@@ -253,10 +309,11 @@
       yield [4.0, 5.0, 6.0]
       yield [7.0, 8.0, 9.0, 10.0]
 
-    iterator = (dataset_ops.Dataset.from_generator(generator, dtypes.float32,
-                                                   output_shapes=[None])
-                .apply(sliding.sliding_window_batch(3, 1))
-                .make_initializable_iterator())
+    iterator = (
+        dataset_ops.Dataset.from_generator(
+            generator, dtypes.float32, output_shapes=[None]).apply(
+                sliding.sliding_window_batch(window_size=3, window_shift=1))
+        .make_initializable_iterator())
     next_element = iterator.get_next()
 
     with self.test_session() as sess:
diff --git a/tensorflow/contrib/data/python/ops/sliding.py b/tensorflow/contrib/data/python/ops/sliding.py
index 3f3c5ca..e9dd745 100644
--- a/tensorflow/contrib/data/python/ops/sliding.py
+++ b/tensorflow/contrib/data/python/ops/sliding.py
@@ -23,25 +23,29 @@
 from tensorflow.python.framework import ops
 from tensorflow.python.framework import tensor_shape
 from tensorflow.python.ops import gen_dataset_ops
+from tensorflow.python.util import deprecation
 
 
 class _SlideDataset(dataset_ops.Dataset):
   """A `Dataset` that passes a sliding window over its input."""
 
-  def __init__(self, input_dataset, window_size, stride=1):
+  def __init__(self, input_dataset, window_size, window_shift, window_stride):
     """See `sliding_window_batch` for details."""
     super(_SlideDataset, self).__init__()
     self._input_dataset = input_dataset
     self._window_size = ops.convert_to_tensor(
-        window_size, dtype=dtypes.int64, name="window_size")
-    self._stride = ops.convert_to_tensor(
-        stride, dtype=dtypes.int64, name="stride")
+        window_size, dtype=dtypes.int64, name="window_stride")
+    self._window_stride = ops.convert_to_tensor(
+        window_stride, dtype=dtypes.int64, name="window_stride")
+    self._window_shift = ops.convert_to_tensor(
+        window_shift, dtype=dtypes.int64, name="window_shift")
 
   def _as_variant_tensor(self):
     return gen_dataset_ops.slide_dataset(
         self._input_dataset._as_variant_tensor(),  # pylint: disable=protected-access
         window_size=self._window_size,
-        stride=self._stride,
+        window_shift=self._window_shift,
+        window_stride=self._window_stride,
         **dataset_ops.flat_structure(self))
 
   @property
@@ -61,38 +65,63 @@
     return self._input_dataset.output_types
 
 
-def sliding_window_batch(window_size, stride=1):
-  """A sliding window with size of `window_size` and step of `stride`.
+@deprecation.deprecated_args(
+    None, "stride is deprecated, use window_shift instead", "stride")
+def sliding_window_batch(window_size,
+                         stride=None,
+                         window_shift=None,
+                         window_stride=1):
+  """A sliding window over a dataset.
 
-  This transformation passes a sliding window over this dataset. The
-  window size is `window_size` and step size is `stride`. If the left
-  elements cannot fill up the sliding window, this transformation will
-  drop the final smaller element. For example:
+  This transformation passes a sliding window over this dataset. The window size
+  is `window_size`, the stride of the input elements is `window_stride`, and the
+  shift between consecutive windows is `window_shift`. If the remaining elements
+  cannot fill up the sliding window, this transformation will drop the final
+  smaller element. For example:
 
   ```python
   # NOTE: The following examples use `{ ... }` to represent the
   # contents of a dataset.
   a = { [1], [2], [3], [4], [5], [6] }
 
-  a.apply(tf.contrib.data.sliding_window_batch(window_size=3, stride=2)) ==
-  {
-      [[1], [2], [3]],
-      [[3], [4], [5]],
-  }
+  a.apply(sliding_window_batch(window_size=3)) ==
+  { [[1], [2], [3]], [[2], [3], [4]], [[3], [4], [5]], [[4], [5], [6]] }
+
+  a.apply(sliding_window_batch(window_size=3, window_shift=2)) ==
+  { [[1], [2], [3]], [[3], [4], [5]] }
+
+  a.apply(sliding_window_batch(window_size=3, window_stride=2)) ==
+  { [[1], [3], [5]], [[2], [4], [6]] }
   ```
 
   Args:
     window_size: A `tf.int64` scalar `tf.Tensor`, representing the number of
-      elements in the sliding window.
+      elements in the sliding window. It must be positive.
     stride: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the
-      steps moving the sliding window forward for one iteration. The default
-      is `1`. It must be positive.
+      forward shift of the sliding window in each iteration. The default is `1`.
+      It must be positive. Deprecated alias for `window_shift`.
+    window_shift: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the
+      forward shift of the sliding window in each iteration. The default is `1`.
+      It must be positive.
+    window_stride: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the
+      stride of the input elements in the sliding window. The default is `1`.
+      It must be positive.
 
   Returns:
     A `Dataset` transformation function, which can be passed to
     @{tf.data.Dataset.apply}.
+
+  Raises:
+    ValueError: if invalid arguments are provided.
   """
+  if stride is None and window_shift is None:
+    window_shift = 1
+  elif stride is not None and window_shift is None:
+    window_shift = stride
+  elif stride is not None and window_shift is not None:
+    raise ValueError("Cannot specify both `stride` and `window_shift`")
+
   def _apply_fn(dataset):
-    return _SlideDataset(dataset, window_size, stride)
+    return _SlideDataset(dataset, window_size, window_shift, window_stride)
 
   return _apply_fn
diff --git a/tensorflow/contrib/distributions/python/ops/bijectors/masked_autoregressive.py b/tensorflow/contrib/distributions/python/ops/bijectors/masked_autoregressive.py
index b8f2a4b..296e66f 100644
--- a/tensorflow/contrib/distributions/python/ops/bijectors/masked_autoregressive.py
+++ b/tensorflow/contrib/distributions/python/ops/bijectors/masked_autoregressive.py
@@ -514,9 +514,8 @@
        Masked Autoencoder for Distribution Estimation. In _International
        Conference on Machine Learning_, 2015. https://arxiv.org/abs/1502.03509
   """
-
-  with ops.name_scope(name, "masked_autoregressive_default_template",
-                      values=[log_scale_min_clip, log_scale_max_clip]):
+  name = name or "masked_autoregressive_default_template"
+  with ops.name_scope(name, values=[log_scale_min_clip, log_scale_max_clip]):
     def _fn(x):
       """MADE parameterized via `masked_autoregressive_default_template`."""
       # TODO(b/67594795): Better support of dynamic shape.
@@ -552,8 +551,7 @@
                     else _clip_by_value_preserve_grad)
       log_scale = which_clip(log_scale, log_scale_min_clip, log_scale_max_clip)
       return shift, log_scale
-    return template_ops.make_template(
-        "masked_autoregressive_default_template", _fn)
+    return template_ops.make_template(name, _fn)
 
 
 @deprecation.deprecated(
diff --git a/tensorflow/contrib/eager/python/examples/gan/mnist.py b/tensorflow/contrib/eager/python/examples/gan/mnist.py
index cc9cf53..b332430 100644
--- a/tensorflow/contrib/eager/python/examples/gan/mnist.py
+++ b/tensorflow/contrib/eager/python/examples/gan/mnist.py
@@ -214,7 +214,7 @@
 
   total_generator_loss = 0.0
   total_discriminator_loss = 0.0
-  for (batch_index, images) in enumerate(tfe.Iterator(dataset)):
+  for (batch_index, images) in enumerate(dataset):
     with tf.device('/cpu:0'):
       tf.assign_add(step_counter, 1)
 
@@ -227,7 +227,10 @@
           maxval=1.,
           seed=batch_index)
 
-      with tf.GradientTape(persistent=True) as g:
+      # we can use 2 tapes or a single persistent tape.
+      # Using two tapes is memory efficient since intermediate tensors can be
+      # released between the two .gradient() calls below
+      with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
         generated_images = generator(noise)
         tf.contrib.summary.image(
             'generated_images',
@@ -243,9 +246,10 @@
         generator_loss_val = generator_loss(discriminator_gen_outputs)
         total_generator_loss += generator_loss_val
 
-      generator_grad = g.gradient(generator_loss_val, generator.variables)
-      discriminator_grad = g.gradient(discriminator_loss_val,
-                                      discriminator.variables)
+      generator_grad = gen_tape.gradient(generator_loss_val,
+                                         generator.variables)
+      discriminator_grad = disc_tape.gradient(discriminator_loss_val,
+                                              discriminator.variables)
 
       generator_optimizer.apply_gradients(
           zip(generator_grad, generator.variables))
diff --git a/tensorflow/contrib/eager/python/examples/generative_examples/image_captioning_with_attention.ipynb b/tensorflow/contrib/eager/python/examples/generative_examples/image_captioning_with_attention.ipynb
index 9c7004b..1a5a186 100644
--- a/tensorflow/contrib/eager/python/examples/generative_examples/image_captioning_with_attention.ipynb
+++ b/tensorflow/contrib/eager/python/examples/generative_examples/image_captioning_with_attention.ipynb
@@ -1154,7 +1154,7 @@
       },
       "cell_type": "code",
       "source": [
-        "image_url = 'https://tensorflow.org/images/imcap_prediction.png'\n",
+        "image_url = 'https://tensorflow.org/images/surf.jpg'\n",
         "image_extension = image_url[-4:]\n",
         "image_path = tf.keras.utils.get_file('image'+image_extension, \n",
         "                                     origin=image_url)\n",
diff --git a/tensorflow/contrib/eager/python/examples/resnet50/resnet50_test.py b/tensorflow/contrib/eager/python/examples/resnet50/resnet50_test.py
index b14ef1d..07d8788 100644
--- a/tensorflow/contrib/eager/python/examples/resnet50/resnet50_test.py
+++ b/tensorflow/contrib/eager/python/examples/resnet50/resnet50_test.py
@@ -29,6 +29,7 @@
 from tensorflow.contrib.eager.python.examples.resnet50 import resnet50
 from tensorflow.contrib.summary import summary_test_util
 from tensorflow.python.client import device_lib
+from tensorflow.python.eager import tape
 
 
 def device_and_data_format():
@@ -49,13 +50,21 @@
   return images, one_hot
 
 
-def compute_gradients(model, images, labels):
-  with tf.GradientTape() as tape:
+def compute_gradients(model, images, labels, num_replicas=1):
+  with tf.GradientTape() as grad_tape:
     logits = model(images, training=True)
     loss = tf.losses.softmax_cross_entropy(
         logits=logits, onehot_labels=labels)
     tf.contrib.summary.scalar(name='loss', tensor=loss)
-  return tape.gradient(loss, model.variables)
+    if num_replicas != 1:
+      loss /= num_replicas
+
+  # TODO(b/110991947): We can mistakenly trace the gradient call in
+  # multi-threaded environment. Explicitly disable recording until
+  # this is fixed.
+  with tape.stop_recording():
+    grads = grad_tape.gradient(loss, model.variables)
+  return grads
 
 
 def apply_gradients(model, optimizer, gradients):
@@ -188,11 +197,14 @@
         return (32,)
     return (16, 32)
 
-  def _report(self, label, start, num_iters, device, batch_size, data_format):
+  def _report(self, label, start, num_iters, device, batch_size, data_format,
+              num_replicas=1):
     avg_time = (time.time() - start) / num_iters
     dev = tf.DeviceSpec.from_string(device).device_type.lower()
-    name = '%s_%s_batch_%d_%s' % (label, dev, batch_size, data_format)
-    extras = {'examples_per_sec': batch_size / avg_time}
+    replica_str = '' if num_replicas == 1 else 'replicas_%d_' % num_replicas
+    name = '%s_%s_batch_%d_%s%s' % (label, dev, batch_size,
+                                    replica_str, data_format)
+    extras = {'examples_per_sec': (num_replicas * batch_size) / avg_time}
     self.report_benchmark(
         iters=num_iters, wall_time=avg_time, name=name, extras=extras)
 
diff --git a/tensorflow/contrib/framework/python/ops/variables_test.py b/tensorflow/contrib/framework/python/ops/variables_test.py
index 7e0c7db..3c44630 100644
--- a/tensorflow/contrib/framework/python/ops/variables_test.py
+++ b/tensorflow/contrib/framework/python/ops/variables_test.py
@@ -106,8 +106,9 @@
   def testResourceVariable(self):
     a = variables_lib2.local_variable(0)
     b = variables_lib2.local_variable(0, use_resource=True)
-    self.assertEqual(type(a), variables_lib.Variable)
-    self.assertEqual(type(b), resource_variable_ops.ResourceVariable)
+    self.assertTrue(isinstance(a, variables_lib.Variable))
+    self.assertFalse(isinstance(a, resource_variable_ops.ResourceVariable))
+    self.assertTrue(isinstance(b, resource_variable_ops.ResourceVariable))
 
 
 class GlobalVariableTest(test.TestCase):
@@ -176,8 +177,9 @@
   def testResourceVariable(self):
     a = variables_lib2.global_variable(0)
     b = variables_lib2.global_variable(0, use_resource=True)
-    self.assertEqual(type(a), variables_lib.Variable)
-    self.assertEqual(type(b), resource_variable_ops.ResourceVariable)
+    self.assertTrue(isinstance(a, variables_lib.Variable))
+    self.assertFalse(isinstance(a, resource_variable_ops.ResourceVariable))
+    self.assertTrue(isinstance(b, resource_variable_ops.ResourceVariable))
 
 
 class GlobalStepTest(test.TestCase):
diff --git a/tensorflow/contrib/learn/python/learn/estimators/run_config.py b/tensorflow/contrib/learn/python/learn/estimators/run_config.py
index 14ee2ba..7cb8761 100644
--- a/tensorflow/contrib/learn/python/learn/estimators/run_config.py
+++ b/tensorflow/contrib/learn/python/learn/estimators/run_config.py
@@ -240,6 +240,7 @@
                keep_checkpoint_max=5,
                keep_checkpoint_every_n_hours=10000,
                log_step_count_steps=100,
+               protocol=None,
                evaluation_master='',
                model_dir=None,
                session_config=None):
@@ -289,6 +290,8 @@
       session_config: a ConfigProto used to set session parameters, or None.
         Note - using this argument, it is easy to provide settings which break
         otherwise perfectly good models. Use with care.
+      protocol: An optional argument which specifies the protocol used when
+        starting server. None means default to grpc.
     """
     # Neither parent class calls super().__init__(), so here we have to
     # manually call their __init__() methods.
@@ -313,6 +316,7 @@
     self._save_summary_steps = save_summary_steps
     self._save_checkpoints_secs = save_checkpoints_secs
     self._log_step_count_steps = log_step_count_steps
+    self._protocol = protocol
     self._session_config = session_config
     if save_checkpoints_secs == RunConfig._USE_DEFAULT:
       if save_checkpoints_steps is None:
diff --git a/tensorflow/contrib/legacy_seq2seq/python/ops/seq2seq.py b/tensorflow/contrib/legacy_seq2seq/python/ops/seq2seq.py
index 5e7b422..e742447 100644
--- a/tensorflow/contrib/legacy_seq2seq/python/ops/seq2seq.py
+++ b/tensorflow/contrib/legacy_seq2seq/python/ops/seq2seq.py
@@ -625,11 +625,13 @@
     v = []
     attention_vec_size = attn_size  # Size of query vectors for attention.
     for a in xrange(num_heads):
-      k = variable_scope.get_variable("AttnW_%d" % a,
-                                      [1, 1, attn_size, attention_vec_size])
+      k = variable_scope.get_variable(
+          "AttnW_%d" % a, [1, 1, attn_size, attention_vec_size],
+          dtype=dtype)
       hidden_features.append(nn_ops.conv2d(hidden, k, [1, 1, 1, 1], "SAME"))
       v.append(
-          variable_scope.get_variable("AttnV_%d" % a, [attention_vec_size]))
+          variable_scope.get_variable(
+              "AttnV_%d" % a, [attention_vec_size], dtype=dtype))
 
     state = initial_state
 
@@ -647,11 +649,13 @@
         with variable_scope.variable_scope("Attention_%d" % a):
           y = Linear(query, attention_vec_size, True)(query)
           y = array_ops.reshape(y, [-1, 1, 1, attention_vec_size])
+          y = math_ops.cast(y, dtype)
           # Attention mask is a softmax of v^T * tanh(...).
           s = math_ops.reduce_sum(v[a] * math_ops.tanh(hidden_features[a] + y),
                                   [2, 3])
-          a = nn_ops.softmax(s)
+          a = nn_ops.softmax(math_ops.cast(s, dtype=dtypes.float32))
           # Now calculate the attention-weighted vector d.
+          a = math_ops.cast(a, dtype)
           d = math_ops.reduce_sum(
               array_ops.reshape(a, [-1, attn_length, 1, 1]) * hidden, [1, 2])
           ds.append(array_ops.reshape(d, [-1, attn_size]))
@@ -681,6 +685,7 @@
         raise ValueError("Could not infer input size from input: %s" % inp.name)
 
       inputs = [inp] + attns
+      inputs = [math_ops.cast(e, dtype) for e in inputs]
       x = Linear(inputs, input_size, True)(inputs)
       # Run the RNN.
       cell_output, state = cell(x, state)
@@ -693,6 +698,7 @@
         attns = attention(state)
 
       with variable_scope.variable_scope("AttnOutputProjection"):
+        cell_output = math_ops.cast(cell_output, dtype)
         inputs = [cell_output] + attns
         output = Linear(inputs, output_size, True)(inputs)
       if loop_function is not None:
diff --git a/tensorflow/contrib/lite/arena_planner.cc b/tensorflow/contrib/lite/arena_planner.cc
index 4257e75..16a0e71 100644
--- a/tensorflow/contrib/lite/arena_planner.cc
+++ b/tensorflow/contrib/lite/arena_planner.cc
@@ -36,12 +36,13 @@
 
 ArenaPlanner::ArenaPlanner(TfLiteContext* context,
                            std::unique_ptr<GraphInfo> graph_info,
-                           bool preserve_inputs)
+                           bool preserve_inputs, bool preserve_intermediates)
     : context_(context),
       graph_info_(std::move(graph_info)),
       arena_(kDefaultArenaAlignment),
       persistent_arena_(kDefaultArenaAlignment),
-      preserve_inputs_(preserve_inputs) {}
+      preserve_inputs_(preserve_inputs),
+      preserve_intermediates_(preserve_intermediates) {}
 ArenaPlanner::~ArenaPlanner() {}
 
 int64_t ArenaPlanner::BasePointer(TfLiteAllocationType type) {
@@ -164,13 +165,15 @@
 
     // Then update the ref-counts of the node's inputs, and if necessary queue
     // them for deallocation.
-    TfLiteIntArray* node_inputs = node.inputs;
-    for (int j = 0; j < node_inputs->size; ++j) {
-      int tensor_index = node_inputs->data[j];
-      if (tensor_index != kOptionalTensor) {
-        refcounts[tensor_index]--;
-        if (refcounts[tensor_index] == 0) {
-          TF_LITE_ENSURE_STATUS(deallocate(i, tensor_index));
+    if (!preserve_intermediates_) {
+      TfLiteIntArray* node_inputs = node.inputs;
+      for (int j = 0; j < node_inputs->size; ++j) {
+        int tensor_index = node_inputs->data[j];
+        if (tensor_index != kOptionalTensor) {
+          refcounts[tensor_index]--;
+          if (refcounts[tensor_index] == 0) {
+            TF_LITE_ENSURE_STATUS(deallocate(i, tensor_index));
+          }
         }
       }
     }
diff --git a/tensorflow/contrib/lite/arena_planner.h b/tensorflow/contrib/lite/arena_planner.h
index 1d84950..82c8667 100644
--- a/tensorflow/contrib/lite/arena_planner.h
+++ b/tensorflow/contrib/lite/arena_planner.h
@@ -47,7 +47,7 @@
   // graph will not share memory with any other tensor, effectively preserving
   // them until the end of inference.
   ArenaPlanner(TfLiteContext* context, std::unique_ptr<GraphInfo> graph_info,
-               bool preserve_inputs);
+               bool preserve_inputs, bool preserve_intermediates);
   ~ArenaPlanner() override;
   ArenaPlanner(const ArenaPlanner&) = delete;
   ArenaPlanner& operator=(const ArenaPlanner&) = delete;
@@ -104,7 +104,14 @@
   // declared as kTfLiteArenaRwPersistent.
   SimpleMemoryArena persistent_arena_;
 
+  // Ensure that the memory self-allocated for inputs is never reused by the
+  // allocator. This allows for example, multiple runs without getting
+  // unpredictable results.
   bool preserve_inputs_;
+
+  // If true, then no overlapping of memory areas is done, meaning intermediates
+  // results can be queried after running (modulo running delegates).
+  bool preserve_intermediates_;
 };
 
 }  // namespace tflite
diff --git a/tensorflow/contrib/lite/arena_planner_test.cc b/tensorflow/contrib/lite/arena_planner_test.cc
index f5bd193..1adb426 100644
--- a/tensorflow/contrib/lite/arena_planner_test.cc
+++ b/tensorflow/contrib/lite/arena_planner_test.cc
@@ -156,7 +156,7 @@
     context_.ReportError = ReportError;
     planner_.reset(new ArenaPlanner(
         &context_, std::unique_ptr<GraphInfo>(new TestGraphInfo(graph)),
-        preserve_inputs));
+        preserve_inputs, /*preserve intermediates*/ false));
     CHECK(planner_->ResetAllocations() == kTfLiteOk);
     CHECK(planner_->PlanAllocations() == kTfLiteOk);
   }
diff --git a/tensorflow/contrib/lite/build_def.bzl b/tensorflow/contrib/lite/build_def.bzl
index b735d08..bed8624 100644
--- a/tensorflow/contrib/lite/build_def.bzl
+++ b/tensorflow/contrib/lite/build_def.bzl
@@ -234,6 +234,8 @@
         "padv2",
         "prelu",
         "pow",
+        "reduce_max",
+        "reduce_prod",
         "relu",
         "relu1",
         "relu6",
diff --git a/tensorflow/contrib/lite/builtin_op_data.h b/tensorflow/contrib/lite/builtin_op_data.h
index a58dde9..a24aaad 100644
--- a/tensorflow/contrib/lite/builtin_op_data.h
+++ b/tensorflow/contrib/lite/builtin_op_data.h
@@ -268,9 +268,13 @@
 } TfLiteShapeParams;
 
 typedef struct {
+  // Parameters supported by version 1:
   float min;
   float max;
   int num_bits;
+
+  // Parameters supported by version 2:
+  bool narrow_range;
 } TfLiteFakeQuantParams;
 
 #ifdef __cplusplus
diff --git a/tensorflow/contrib/lite/builtin_ops.h b/tensorflow/contrib/lite/builtin_ops.h
index 6bde5d2..4c7b27c 100644
--- a/tensorflow/contrib/lite/builtin_ops.h
+++ b/tensorflow/contrib/lite/builtin_ops.h
@@ -106,6 +106,8 @@
   kTfLiteBuiltinPow = 78,
   kTfLiteBuiltinArgMin = 79,
   kTfLiteBuiltinFakeQuant = 80,
+  kTfLiteBuiltinReduceProd = 81,
+  kTfLiteBuiltinReduceMax = 82,
 } TfLiteBuiltinOperator;
 
 #ifdef __cplusplus
diff --git a/tensorflow/contrib/lite/delegates/eager/BUILD b/tensorflow/contrib/lite/delegates/eager/BUILD
new file mode 100644
index 0000000..270d83d
--- /dev/null
+++ b/tensorflow/contrib/lite/delegates/eager/BUILD
@@ -0,0 +1,66 @@
+#
+# This is a TF Lite delegate that is powered by TensorFlow's Eager.
+#
+package(default_visibility = [
+    "//visibility:public",
+])
+
+licenses(["notice"])  # Apache 2.0
+
+cc_library(
+    name = "buffer_map",
+    srcs = ["buffer_map.cc"],
+    hdrs = ["buffer_map.h"],
+    deps = [
+        ":util",
+        "//tensorflow/c:c_api_internal",
+        "//tensorflow/contrib/lite:framework",
+        "//tensorflow/contrib/lite:kernel_api",
+        "//tensorflow/core:framework",
+        "//tensorflow/core:protos_all_cc",
+    ],
+)
+
+cc_test(
+    name = "buffer_map_test",
+    size = "small",
+    srcs = ["buffer_map_test.cc"],
+    tags = [
+        "tflite_not_portable",
+    ],
+    deps = [
+        ":buffer_map",
+        "//tensorflow/contrib/lite:framework",
+        "//tensorflow/contrib/lite:util",
+        "//tensorflow/contrib/lite/testing:util",
+        "@com_google_googletest//:gtest",
+    ],
+)
+
+cc_library(
+    name = "util",
+    srcs = ["util.cc"],
+    hdrs = ["util.h"],
+    deps = [
+        "//tensorflow/c:c_api_internal",
+        "//tensorflow/contrib/lite:framework",
+        "//tensorflow/contrib/lite:kernel_api",
+        "//tensorflow/core:framework",
+        "//tensorflow/core:lib",
+    ],
+)
+
+cc_test(
+    name = "util_test",
+    size = "small",
+    srcs = ["util_test.cc"],
+    tags = [
+        "tflite_not_portable",
+    ],
+    deps = [
+        ":util",
+        "//tensorflow/contrib/lite/testing:util",
+        "//tensorflow/core:lib",
+        "@com_google_googletest//:gtest",
+    ],
+)
diff --git a/tensorflow/contrib/lite/delegates/eager/buffer_map.cc b/tensorflow/contrib/lite/delegates/eager/buffer_map.cc
new file mode 100644
index 0000000..e4a780b
--- /dev/null
+++ b/tensorflow/contrib/lite/delegates/eager/buffer_map.cc
@@ -0,0 +1,105 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#include "tensorflow/contrib/lite/delegates/eager/buffer_map.h"
+
+#include "tensorflow/c/c_api_internal.h"
+#include "tensorflow/contrib/lite/delegates/eager/util.h"
+#include "tensorflow/core/framework/allocation_description.pb.h"
+#include "tensorflow/core/framework/log_memory.h"
+
+namespace tflite {
+namespace {
+// A tensor buffer that is allocated, deallocated and populated by TF Lite.
+class TfLiteTensorBuffer : public tensorflow::TensorBuffer {
+ public:
+  explicit TfLiteTensorBuffer(const TfLiteTensor* tensor) {
+    len_ = tensor->bytes;
+    // TODO(ahentz): if we can guarantee that TF Lite allocated tensors with
+    // the same alignment as TensorFlow (EIGEN_MAX_ALIGN_BYTES), then we can
+    // potentially eliminate the copy below.
+    data_ =
+        tensorflow::cpu_allocator()->AllocateRaw(EIGEN_MAX_ALIGN_BYTES, len_);
+    if (data_ != nullptr) {
+      if (tensorflow::LogMemory::IsEnabled()) {
+        tensorflow::LogMemory::RecordRawAllocation(
+            "TfLiteTensorBuffer_New",
+            tensorflow::LogMemory::EXTERNAL_TENSOR_ALLOCATION_STEP_ID, len_,
+            data_, tensorflow::cpu_allocator());
+      }
+      std::memcpy(data_, tensor->data.raw, tensor->bytes);
+    }
+  }
+
+  ~TfLiteTensorBuffer() override {
+    if (tensorflow::LogMemory::IsEnabled() && data_ != nullptr) {
+      tensorflow::LogMemory::RecordRawDeallocation(
+          "TfLiteTensorBuffer_Delete",
+          tensorflow::LogMemory::EXTERNAL_TENSOR_ALLOCATION_STEP_ID, data_,
+          tensorflow::cpu_allocator(), false);
+    }
+    tensorflow::cpu_allocator()->DeallocateRaw(data_);
+  }
+
+  void* data() const override { return data_; }
+  size_t size() const override { return len_; }
+
+  TensorBuffer* root_buffer() override { return this; }
+  void FillAllocationDescription(
+      tensorflow::AllocationDescription* proto) const override {
+    tensorflow::int64 rb = size();
+    proto->set_requested_bytes(rb);
+    proto->set_allocator_name(tensorflow::cpu_allocator()->Name());
+  }
+
+  // Prevents input forwarding from mutating this buffer.
+  bool OwnsMemory() const override { return false; }
+
+ private:
+  void* data_;
+  size_t len_;
+};
+}  // namespace
+
+BufferMap::BufferMap() {}
+
+BufferMap::~BufferMap() {}
+
+bool BufferMap::HasTensor(int tensor_index) const {
+  return id_to_tensor_.count(tensor_index) != 0;
+}
+
+tensorflow::Tensor BufferMap::GetTensor(int tensor_index) const {
+  return id_to_tensor_.at(tensor_index);
+}
+
+void BufferMap::SetFromTfLite(int tensor_index, const TfLiteTensor* tensor) {
+  tensorflow::TensorShape shape;
+  int num_dims = tensor->dims->size;
+  for (int i = 0; i < num_dims; ++i) {
+    shape.AddDim(tensor->dims->data[i]);
+  }
+  auto* buf = new TfLiteTensorBuffer(tensor);
+  tensorflow::Tensor t = tensorflow::TensorCApi::MakeTensor(
+      GetTensorFlowDataType(tensor->type), shape, buf);
+  buf->Unref();
+
+  SetFromTensorFlow(tensor_index, std::move(t));
+}
+
+void BufferMap::SetFromTensorFlow(int tensor_index, tensorflow::Tensor tensor) {
+  id_to_tensor_[tensor_index] = std::move(tensor);
+}
+
+}  // namespace tflite
diff --git a/tensorflow/contrib/lite/delegates/eager/buffer_map.h b/tensorflow/contrib/lite/delegates/eager/buffer_map.h
new file mode 100644
index 0000000..922f67f
--- /dev/null
+++ b/tensorflow/contrib/lite/delegates/eager/buffer_map.h
@@ -0,0 +1,59 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#ifndef TENSORFLOW_CONTRIB_LITE_DELEGATES_EAGER_BUFFER_MAP_H_
+#define TENSORFLOW_CONTRIB_LITE_DELEGATES_EAGER_BUFFER_MAP_H_
+
+#include <map>
+
+#include "tensorflow/contrib/lite/context.h"
+#include "tensorflow/core/framework/tensor.h"
+
+namespace tflite {
+
+// Maps a TF Lite tensor index into a TensorFlow tensor.
+//
+// The TF Lite interpreter assigns integer indices to each of its tensors, but
+// the Eager delegate deals in terms of TensorFlow tensors. This class maps
+// from indices to tensors and allows the creation of new tensors to be
+// associated with a given index.
+class BufferMap {
+ public:
+  BufferMap();
+  ~BufferMap();
+
+  // Returns true if the given 'tensor_index' has a corresponding
+  // tensorflow::Tensor.
+  bool HasTensor(int tensor_index) const;
+
+  // Returns the tensorflow::Tensor associated with the given 'tensor_index'.
+  // Precondition: HasTensor() is true.
+  tensorflow::Tensor GetTensor(int tensor_index) const;
+
+  // Associates the given tensorflow::Tensor with the given 'tensor_index'.
+  // Note that tensorflow Tensors share data buffers, so this method is only a
+  // shallow copy.
+  void SetFromTensorFlow(int tensor_index, tensorflow::Tensor tensor);
+
+  // Same as above but creates a new tensorflow::Tensor with a copy of the
+  // given TfLiteTensor's data.
+  void SetFromTfLite(int tensor_index, const TfLiteTensor* tensor);
+
+ private:
+  std::map<int, tensorflow::Tensor> id_to_tensor_;
+};
+
+}  // namespace tflite
+
+#endif  // TENSORFLOW_CONTRIB_LITE_DELEGATES_EAGER_BUFFER_MAP_H_
diff --git a/tensorflow/contrib/lite/delegates/eager/buffer_map_test.cc b/tensorflow/contrib/lite/delegates/eager/buffer_map_test.cc
new file mode 100644
index 0000000..c447eea
--- /dev/null
+++ b/tensorflow/contrib/lite/delegates/eager/buffer_map_test.cc
@@ -0,0 +1,172 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#include "tensorflow/contrib/lite/delegates/eager/buffer_map.h"
+
+#include <gmock/gmock.h>
+#include <gtest/gtest.h>
+#include "tensorflow/contrib/lite/interpreter.h"
+#include "tensorflow/contrib/lite/testing/util.h"
+#include "tensorflow/contrib/lite/util.h"
+
+namespace tflite {
+namespace {
+
+using ::testing::ElementsAre;
+
+// A bit of RAII to simplify handling of TfLiteTensors in the tests.
+using UniqueTfLiteTensor =
+    std::unique_ptr<TfLiteTensor, std::function<void(TfLiteTensor*)>>;
+
+template <typename T>
+UniqueTfLiteTensor MakeLiteTensor(const std::vector<int>& shape,
+                                  const std::vector<T>& data) {
+  auto tensor = UniqueTfLiteTensor(new TfLiteTensor, [](TfLiteTensor* t) {
+    TfLiteTensorDataFree(t);
+    TfLiteIntArrayFree(t->dims);
+    delete t;
+  });
+  tensor->allocation_type = kTfLiteDynamic;
+  tensor->type = typeToTfLiteType<T>();
+  tensor->dims = ConvertVectorToTfLiteIntArray(shape);
+  tensor->data.raw = nullptr;
+  TfLiteTensorRealloc(data.size() * sizeof(T), tensor.get());
+  memcpy(tensor->data.raw, data.data(), data.size() * sizeof(T));
+  return tensor;
+}
+
+template <typename T>
+tensorflow::Tensor MakeTensor(const std::vector<int>& shape,
+                              const std::vector<T>& data) {
+  BufferMap buffer_map;  // BufferMap is the easiest way to build the tensor.
+  UniqueTfLiteTensor t1 = MakeLiteTensor<T>(shape, data);
+  buffer_map.SetFromTfLite(0, t1.get());
+  return buffer_map.GetTensor(0);
+}
+
+std::vector<int64> GetTensorShape(const tensorflow::Tensor& t) {
+  std::vector<int64> shape(t.dims());
+  for (int i = 0; i < t.dims(); ++i) {
+    shape[i] = t.dim_size(i);
+  }
+  return shape;
+}
+
+template <typename T>
+std::vector<T> GetTensorData(const tensorflow::Tensor& t) {
+  const T* data = t.flat<T>().data();
+  return std::vector<T>(data, data + t.NumElements());
+}
+
+TEST(BufferMapTest, EmptyBuffer) {
+  BufferMap buffer_map;
+  EXPECT_FALSE(buffer_map.HasTensor(0));
+}
+
+TEST(BufferMapTest, SetFromTfLite) {
+  BufferMap buffer_map;
+
+  UniqueTfLiteTensor t =
+      MakeLiteTensor<float>({1, 2, 1, 3}, {0, 0, 0, 0.123f, 0, 0});
+  buffer_map.SetFromTfLite(0, t.get());
+  ASSERT_TRUE(buffer_map.HasTensor(0));
+
+  EXPECT_THAT(GetTensorData<float>(buffer_map.GetTensor(0)),
+              ElementsAre(0, 0, 0, 0.123f, 0, 0));
+
+  // Also check details of the tensor.
+  tensorflow::Tensor out_tensor = buffer_map.GetTensor(0);
+  ASSERT_EQ(out_tensor.dtype(), tensorflow::DT_FLOAT);
+  ASSERT_EQ(out_tensor.NumElements(), 6);
+  ASSERT_THAT(GetTensorShape(out_tensor), ElementsAre(1, 2, 1, 3));
+}
+
+TEST(BufferMapTest, SetFromTfLiteTwice) {
+  UniqueTfLiteTensor t1 =
+      MakeLiteTensor<float>({1, 2, 1, 3}, {0, 0, 0, 0.123f, 0, 0});
+  UniqueTfLiteTensor t2 =
+      MakeLiteTensor<int>({1, 2, 4}, {0, 0, 0, 3, 0, 0, 1, 2});
+
+  BufferMap buffer_map;
+  buffer_map.SetFromTfLite(0, t1.get());
+  buffer_map.SetFromTfLite(0, t2.get());
+
+  EXPECT_THAT(GetTensorData<int>(buffer_map.GetTensor(0)),
+              ElementsAre(0, 0, 0, 3, 0, 0, 1, 2));
+}
+
+TEST(BufferMapTest, SetFromTensorFlow) {
+  tensorflow::Tensor t1 =
+      MakeTensor<float>({1, 2, 1, 3}, {0, 0, 0, 0.123f, 0, 0});
+
+  BufferMap buffer_map;
+  buffer_map.SetFromTensorFlow(0, t1);
+
+  EXPECT_THAT(GetTensorData<float>(buffer_map.GetTensor(0)),
+              ElementsAre(0, 0, 0, 0.123f, 0, 0));
+
+  // Also check details of the tensor.
+  tensorflow::Tensor out_tensor = buffer_map.GetTensor(0);
+  ASSERT_EQ(out_tensor.dtype(), tensorflow::DT_FLOAT);
+  ASSERT_EQ(out_tensor.NumElements(), 6);
+  ASSERT_THAT(GetTensorShape(out_tensor), ElementsAre(1, 2, 1, 3));
+}
+
+TEST(BufferMapTest, SetFromTensorFlowTwice) {
+  tensorflow::Tensor t1 =
+      MakeTensor<float>({1, 2, 1, 3}, {0, 0, 0, 0.123f, 0, 0});
+  tensorflow::Tensor t2 = MakeTensor<int>({1, 2, 4}, {0, 0, 0, 3, 0, 0, 1, 2});
+  BufferMap buffer_map;
+  buffer_map.SetFromTensorFlow(0, t1);
+  buffer_map.SetFromTensorFlow(0, t2);
+
+  EXPECT_THAT(GetTensorData<int>(buffer_map.GetTensor(0)),
+              ElementsAre(0, 0, 0, 3, 0, 0, 1, 2));
+}
+
+TEST(BufferMapTest, TfLiteOverwritesTensorFlow) {
+  tensorflow::Tensor t1 =
+      MakeTensor<float>({1, 2, 1, 3}, {0, 0, 0, 0.123f, 0, 0});
+  UniqueTfLiteTensor t2 =
+      MakeLiteTensor<int>({1, 2, 4}, {0, 0, 0, 3, 0, 0, 1, 2});
+
+  BufferMap buffer_map;
+  buffer_map.SetFromTensorFlow(0, t1);
+  buffer_map.SetFromTfLite(0, t2.get());
+
+  EXPECT_THAT(GetTensorData<int>(buffer_map.GetTensor(0)),
+              ElementsAre(0, 0, 0, 3, 0, 0, 1, 2));
+}
+
+TEST(BufferMapTest, TensorFlowOverwritesTfLite) {
+  tensorflow::Tensor t1 =
+      MakeTensor<float>({1, 2, 1, 3}, {0, 0, 0, 0.123f, 0, 0});
+  UniqueTfLiteTensor t2 =
+      MakeLiteTensor<int>({1, 2, 4}, {0, 0, 0, 3, 0, 0, 1, 2});
+  BufferMap buffer_map;
+  buffer_map.SetFromTfLite(0, t2.get());
+  buffer_map.SetFromTensorFlow(0, t1);
+
+  EXPECT_THAT(GetTensorData<float>(buffer_map.GetTensor(0)),
+              ElementsAre(0, 0, 0, 0.123f, 0, 0));
+}
+
+}  // namespace
+}  // namespace tflite
+
+int main(int argc, char** argv) {
+  ::tflite::LogToStderr();
+  ::testing::InitGoogleTest(&argc, argv);
+  return RUN_ALL_TESTS();
+}
diff --git a/tensorflow/contrib/lite/delegates/eager/util.cc b/tensorflow/contrib/lite/delegates/eager/util.cc
new file mode 100644
index 0000000..e1879bd
--- /dev/null
+++ b/tensorflow/contrib/lite/delegates/eager/util.cc
@@ -0,0 +1,70 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#include "tensorflow/contrib/lite/delegates/eager/util.h"
+
+namespace tflite {
+
+TfLiteStatus ConvertStatus(TfLiteContext* context,
+                           const tensorflow::Status& status) {
+  if (!status.ok()) {
+    context->ReportError(context, "%s", status.error_message().c_str());
+    return kTfLiteError;
+  }
+  return kTfLiteOk;
+}
+
+TfLiteStatus CopyShape(TfLiteContext* context, const tensorflow::Tensor& src,
+                       TfLiteTensor* tensor) {
+  int num_dims = src.dims();
+  TfLiteIntArray* shape = TfLiteIntArrayCreate(num_dims);
+  for (int j = 0; j < num_dims; ++j) {
+    // We need to cast from TensorFlow's int64 to TF Lite's int32. Let's
+    // make sure there's no overflow.
+    if (src.dim_size(j) >= std::numeric_limits<int>::max()) {
+      context->ReportError(context,
+                           "Dimension value in TensorFlow shape is larger than "
+                           "supported by TF Lite");
+      TfLiteIntArrayFree(shape);
+      return kTfLiteError;
+    }
+    shape->data[j] = static_cast<int>(src.dim_size(j));
+  }
+  return context->ResizeTensor(context, tensor, shape);
+}
+
+TF_DataType GetTensorFlowDataType(TfLiteType type) {
+  switch (type) {
+    case kTfLiteNoType:
+      return TF_FLOAT;
+    case kTfLiteFloat32:
+      return TF_FLOAT;
+    case kTfLiteInt16:
+      return TF_INT16;
+    case kTfLiteInt32:
+      return TF_INT32;
+    case kTfLiteUInt8:
+      return TF_UINT8;
+    case kTfLiteInt64:
+      return TF_INT64;
+    case kTfLiteComplex64:
+      return TF_COMPLEX64;
+    case kTfLiteString:
+      return TF_STRING;
+    case kTfLiteBool:
+      return TF_BOOL;
+  }
+}
+
+}  // namespace tflite
diff --git a/tensorflow/contrib/lite/delegates/eager/util.h b/tensorflow/contrib/lite/delegates/eager/util.h
new file mode 100644
index 0000000..12b33b9
--- /dev/null
+++ b/tensorflow/contrib/lite/delegates/eager/util.h
@@ -0,0 +1,40 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#ifndef TENSORFLOW_CONTRIB_LITE_DELEGATES_EAGER_UTIL_H_
+#define TENSORFLOW_CONTRIB_LITE_DELEGATES_EAGER_UTIL_H_
+
+#include "tensorflow/c/c_api_internal.h"
+#include "tensorflow/contrib/lite/context.h"
+#include "tensorflow/core/framework/tensor.h"
+#include "tensorflow/core/lib/core/status.h"
+
+namespace tflite {
+
+// Converts a tensorflow:Status into a TfLiteStatus. If the original status
+// represented an error, reports it using the given 'context'.
+TfLiteStatus ConvertStatus(TfLiteContext* context,
+                           const tensorflow::Status& status);
+
+// Copies the given shape of the given 'src' into a TF Lite 'tensor'. Logs an
+// error and returns kTfLiteError if the shape can't be converted.
+TfLiteStatus CopyShape(TfLiteContext* context, const tensorflow::Tensor& src,
+                       TfLiteTensor* tensor);
+
+// Returns the TF C API Data type that corresponds to the given TfLiteType.
+TF_DataType GetTensorFlowDataType(TfLiteType type);
+
+}  // namespace tflite
+
+#endif  // TENSORFLOW_CONTRIB_LITE_DELEGATES_EAGER_UTIL_H_
diff --git a/tensorflow/contrib/lite/delegates/eager/util_test.cc b/tensorflow/contrib/lite/delegates/eager/util_test.cc
new file mode 100644
index 0000000..53ed4db
--- /dev/null
+++ b/tensorflow/contrib/lite/delegates/eager/util_test.cc
@@ -0,0 +1,111 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#include "tensorflow/contrib/lite/delegates/eager/util.h"
+
+#include <cstdarg>
+
+#include <gmock/gmock.h>
+#include <gtest/gtest.h>
+#include "tensorflow/contrib/lite/testing/util.h"
+
+namespace tflite {
+namespace {
+
+using tensorflow::DT_FLOAT;
+using tensorflow::Tensor;
+using ::testing::ElementsAre;
+
+struct TestContext : public TfLiteContext {
+  string error;
+  std::vector<int> new_size;
+};
+
+void ReportError(TfLiteContext* context, const char* format, ...) {
+  TestContext* c = static_cast<TestContext*>(context);
+  const size_t kBufferSize = 1024;
+  char temp_buffer[kBufferSize];
+
+  va_list args;
+  va_start(args, format);
+  vsnprintf(temp_buffer, kBufferSize, format, args);
+  va_end(args);
+
+  c->error = temp_buffer;
+}
+
+TfLiteStatus ResizeTensor(TfLiteContext* context, TfLiteTensor* tensor,
+                          TfLiteIntArray* new_size) {
+  TestContext* c = static_cast<TestContext*>(context);
+  c->new_size.clear();
+  for (int i = 0; i < new_size->size; ++i) {
+    c->new_size.push_back(new_size->data[i]);
+  }
+  TfLiteIntArrayFree(new_size);
+  return kTfLiteOk;
+}
+
+TEST(UtilTest, ConvertStatus) {
+  TestContext context;
+  context.ReportError = ReportError;
+
+  EXPECT_EQ(ConvertStatus(&context, tensorflow::errors::Internal("Some Error")),
+            kTfLiteError);
+  EXPECT_EQ(context.error, "Some Error");
+
+  context.error.clear();
+  EXPECT_EQ(ConvertStatus(&context, tensorflow::Status()), kTfLiteOk);
+  EXPECT_TRUE(context.error.empty());
+}
+
+TEST(UtilTest, CopyShape) {
+  TestContext context;
+  context.ReportError = ReportError;
+  context.ResizeTensor = ResizeTensor;
+
+  TfLiteTensor dst;
+
+  EXPECT_EQ(CopyShape(&context, Tensor(), &dst), kTfLiteOk);
+  EXPECT_THAT(context.new_size, ElementsAre(0));
+
+  EXPECT_EQ(CopyShape(&context, Tensor(DT_FLOAT, {1, 2}), &dst), kTfLiteOk);
+  EXPECT_THAT(context.new_size, ElementsAre(1, 2));
+
+  EXPECT_EQ(CopyShape(&context, Tensor(DT_FLOAT, {1LL << 44, 2}), &dst),
+            kTfLiteError);
+  EXPECT_EQ(context.error,
+            "Dimension value in TensorFlow shape is larger than supported by "
+            "TF Lite");
+}
+
+TEST(UtilTest, TypeConversions) {
+  EXPECT_EQ(TF_FLOAT, GetTensorFlowDataType(kTfLiteNoType));
+  EXPECT_EQ(TF_FLOAT, GetTensorFlowDataType(kTfLiteFloat32));
+  EXPECT_EQ(TF_INT16, GetTensorFlowDataType(kTfLiteInt16));
+  EXPECT_EQ(TF_INT32, GetTensorFlowDataType(kTfLiteInt32));
+  EXPECT_EQ(TF_UINT8, GetTensorFlowDataType(kTfLiteUInt8));
+  EXPECT_EQ(TF_INT64, GetTensorFlowDataType(kTfLiteInt64));
+  EXPECT_EQ(TF_COMPLEX64, GetTensorFlowDataType(kTfLiteComplex64));
+  EXPECT_EQ(TF_STRING, GetTensorFlowDataType(kTfLiteString));
+  EXPECT_EQ(TF_BOOL, GetTensorFlowDataType(kTfLiteBool));
+}
+
+}  // namespace
+}  // namespace tflite
+
+int main(int argc, char** argv) {
+  ::tflite::LogToStderr();
+  ::testing::InitGoogleTest(&argc, argv);
+  return RUN_ALL_TESTS();
+}
diff --git a/tensorflow/contrib/lite/examples/android/app/README.md b/tensorflow/contrib/lite/examples/android/app/README.md
index 3065a5f..8e12bd0 100644
--- a/tensorflow/contrib/lite/examples/android/app/README.md
+++ b/tensorflow/contrib/lite/examples/android/app/README.md
@@ -4,10 +4,10 @@
 
 1. Follow the [Bazel steps for the TF Demo App](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/examples/android#bazel).
 
-2. Build the app with Bazel. The demo needs C++11:
+2. Build the app with Bazel. The demo needs C++11. We configure the fat_apk_cpu flag to package support for 4 hardware variants. You may replace it with --config=android_arm64 on a 64-bit device and --config=android_arm for 32-bit device:
 
   ```shell
-  bazel build -c opt --cxxopt='--std=c++11' \
+  bazel build -c opt --cxxopt='--std=c++11' --fat_apk_cpu=x86,x86_64,arm64-v8a,armeabi-v7a \
     //tensorflow/contrib/lite/examples/android:tflite_demo
   ```
 
diff --git a/tensorflow/contrib/lite/examples/android/app/build.gradle b/tensorflow/contrib/lite/examples/android/app/build.gradle
index 1ffb9dd..eb7fd70 100644
--- a/tensorflow/contrib/lite/examples/android/app/build.gradle
+++ b/tensorflow/contrib/lite/examples/android/app/build.gradle
@@ -51,7 +51,7 @@
 
 dependencies {
     compile fileTree(dir: 'libs', include: ['*.jar'])
-    androidTestCompile('com.androidx.test.espresso:espresso-core:2.2.2', {
+    androidTestCompile('androidx.test.espresso:espresso-core:3.1.0-alpha3', {
         exclude group: 'com.android.support', module: 'support-annotations'
     })
     compile 'org.tensorflow:tensorflow-lite:0.0.0-nightly'
diff --git a/tensorflow/contrib/lite/g3doc/models.md b/tensorflow/contrib/lite/g3doc/models.md
index c1c8ef0..4e7d33a 100644
--- a/tensorflow/contrib/lite/g3doc/models.md
+++ b/tensorflow/contrib/lite/g3doc/models.md
@@ -39,22 +39,22 @@
 
 Model Name               | Paper_Model_Files                                                                                                                                         | Model_Size | Top-1 Accuracy | Top-5 Accuracy | TF Lite Performance
 ------------------------ | :-------------------------------------------------------------------------------------------------------------------------------------------------------: | ---------: | -------------: | -------------: | ------------------:
-Mobilenet_0.25_128_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_0.25_128_quant.tgz) | 0.5 Mb     | 39.9%          | 65.8%          | 3.7 ms
-Mobilenet_0.25_160_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_0.25_160_quant.tgz) | 0.5 Mb     | 43.5%          | 69.1%          | 5.5 ms
-Mobilenet_0.25_192_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_0.25_192_quant.tgz) | 0.5 Mb     | 45.8%          | 71.9%          | 7.9 ms
-Mobilenet_0.25_224_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_0.25_224_quant.tgz) | 0.5 Mb     | 48.2%          | 73.8%          | 10.4 ms
-Mobilenet_0.50_128_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_0.5_128_quant.tgz)  | 1.4 Mb     | 54.9%          | 78.9%          | 8.8 ms
-Mobilenet_0.50_160_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_0.5_160_quant.tgz)  | 1.4 Mb     | 57.7%          | 81.3%          | 13.0 ms
-Mobilenet_0.50_192_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_0.5_192_quant.tgz)  | 1.4 Mb     | 60.4%          | 83.2%          | 18.3 ms
-Mobilenet_0.50_224_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_0.5_224_quant.tgz)  | 1.4 Mb     | 62.2%          | 84.5%          | 24.7 ms
-Mobilenet_0.75_128_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_0.75_128_quant.tgz) | 2.6 Mb     | 59.8%          | 82.8%          | 16.2 ms
-Mobilenet_0.75_160_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_0.75_160_quant.tgz) | 2.6 Mb     | 63.9%          | 85.5%          | 24.3 ms
-Mobilenet_0.75_192_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_0.75_192_quant.tgz) | 2.6 Mb     | 66.2%          | 87.1%          | 33.8 ms
-Mobilenet_0.75_224_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_0.75_224_quant.tgz) | 2.6 Mb     | 67.9%          | 88.1%          | 45.4 ms
-Mobilenet_1.0_128_quant  | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_1.0_128_quant.tgz)  | 4.3 Mb     | 64.0%          | 85.5%          | 24.9 ms
-Mobilenet_1.0_160_quant  | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_1.0_160_quant.tgz)  | 4.3 Mb     | 67.3%          | 87.7%          | 37.4 ms
-Mobilenet_1.0_192_quant  | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_1.0_192_quant.tgz)  | 4.3 Mb     | 69.0%          | 88.9%          | 51.9 ms
-Mobilenet_1.0_224_quant  | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_1.0_224_quant.tgz)  | 4.3 Mb     | 69.7%          | 89.5%          | 70.2 ms
+Mobilenet_0.25_128_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_07_12/mobilenet_v1_0.25_128_quant.tgz) | 0.5 Mb     | 39.7%          | 65.8%          | 3.7 ms
+Mobilenet_0.25_160_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_07_12/mobilenet_v1_0.25_160_quant.tgz) | 0.5 Mb     | 41.9%          | 69.1%          | 5.5 ms
+Mobilenet_0.25_192_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_07_12/mobilenet_v1_0.25_192_quant.tgz) | 0.5 Mb     | 45.3%          | 71.9%          | 7.9 ms
+Mobilenet_0.25_224_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_07_12/mobilenet_v1_0.25_224_quant.tgz) | 0.5 Mb     | 46.4%          | 73.8%          | 10.4 ms
+Mobilenet_0.50_128_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_07_12/mobilenet_v1_0.5_128_quant.tgz)  | 1.4 Mb     | 54.1%          | 78.9%          | 8.8 ms
+Mobilenet_0.50_160_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_07_12/mobilenet_v1_0.5_160_quant.tgz)  | 1.4 Mb     | 57.6%          | 81.3%          | 13.0 ms
+Mobilenet_0.50_192_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_07_12/mobilenet_v1_0.5_192_quant.tgz)  | 1.4 Mb     | 59.1%          | 83.2%          | 18.3 ms
+Mobilenet_0.50_224_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_07_12/mobilenet_v1_0.5_224_quant.tgz)  | 1.4 Mb     | 61.0%          | 84.5%          | 24.7 ms
+Mobilenet_0.75_128_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_07_12/mobilenet_v1_0.75_128_quant.tgz) | 2.6 Mb     | 52.5%          | 82.8%          | 16.2 ms
+Mobilenet_0.75_160_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_07_12/mobilenet_v1_0.75_160_quant.tgz) | 2.6 Mb     | 63.6%          | 85.5%          | 24.3 ms
+Mobilenet_0.75_192_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_07_12/mobilenet_v1_0.75_192_quant.tgz) | 2.6 Mb     | 61.1%          | 87.1%          | 33.8 ms
+Mobilenet_0.75_224_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_07_12/mobilenet_v1_0.75_224_quant.tgz) | 2.6 Mb     | 66.7%          | 88.1%          | 45.4 ms
+Mobilenet_1.0_128_quant  | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_07_12/mobilenet_v1_1.0_128_quant.tgz)  | 4.3 Mb     | 62.7%          | 85.5%          | 24.9 ms
+Mobilenet_1.0_160_quant  | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_07_12/mobilenet_v1_1.0_160_quant.tgz)  | 4.3 Mb     | 66.6%          | 87.7%          | 37.4 ms
+Mobilenet_1.0_192_quant  | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_07_12/mobilenet_v1_1.0_192_quant.tgz)  | 4.3 Mb     | 69.2%          | 88.9%          | 51.9 ms
+Mobilenet_1.0_224_quant  | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_07_12/mobilenet_v1_1.0_224_quant.tgz)  | 4.3 Mb     | 69.3%          | 89.5%          | 70.2 ms
 
 ## Other models
 
diff --git a/tensorflow/contrib/lite/interpreter.cc b/tensorflow/contrib/lite/interpreter.cc
index 0641a08..d103786 100644
--- a/tensorflow/contrib/lite/interpreter.cc
+++ b/tensorflow/contrib/lite/interpreter.cc
@@ -593,7 +593,7 @@
   if (!memory_planner_) {
     memory_planner_.reset(new ArenaPlanner(
         &context_, std::unique_ptr<GraphInfo>(new InterpreterInfo(this)),
-        /*preserve_inputs=*/true));
+        /*preserve_inputs=*/true, /*preserve_intermediates*/ false));
     memory_planner_->PlanAllocations();
   }
 
diff --git a/tensorflow/contrib/lite/java/demo/app/build.gradle b/tensorflow/contrib/lite/java/demo/app/build.gradle
index 49868c5..92f04c6 100644
--- a/tensorflow/contrib/lite/java/demo/app/build.gradle
+++ b/tensorflow/contrib/lite/java/demo/app/build.gradle
@@ -44,7 +44,7 @@
 
 dependencies {
     compile fileTree(dir: 'libs', include: ['*.jar'])
-    androidTestCompile('com.androidx.test.espresso:espresso-core:2.2.2', {
+    androidTestCompile('androidx.test.espresso:espresso-core:3.1.0-alpha3', {
         exclude group: 'com.android.support', module: 'support-annotations'
     })
     compile 'com.android.support:appcompat-v7:25.2.0'
diff --git a/tensorflow/contrib/lite/java/ovic/demo/app/build.gradle b/tensorflow/contrib/lite/java/ovic/demo/app/build.gradle
index 3f32d62..2a08608 100644
--- a/tensorflow/contrib/lite/java/ovic/demo/app/build.gradle
+++ b/tensorflow/contrib/lite/java/ovic/demo/app/build.gradle
@@ -43,7 +43,7 @@
 
 dependencies {
     compile fileTree(dir: 'libs', include: ['*.jar'])
-    androidTestCompile('com.androidx.test.espresso:espresso-core:2.2.2', {
+    androidTestCompile('androidx.test.espresso:espresso-core:3.1.0-alpha3', {
         exclude group: 'com.android.support', module: 'support-annotations'
     })
     compile 'com.android.support:appcompat-v7:25.2.0'
diff --git a/tensorflow/contrib/lite/kernels/BUILD b/tensorflow/contrib/lite/kernels/BUILD
index edce739..33594c1 100644
--- a/tensorflow/contrib/lite/kernels/BUILD
+++ b/tensorflow/contrib/lite/kernels/BUILD
@@ -984,7 +984,6 @@
         ":builtin_ops",
         "//tensorflow/contrib/lite:framework",
         "//tensorflow/contrib/lite/kernels:test_util",
-        "@com_google_absl//absl/memory",
         "@com_google_googletest//:gtest",
     ],
 )
diff --git a/tensorflow/contrib/lite/kernels/bidirectional_sequence_lstm.cc b/tensorflow/contrib/lite/kernels/bidirectional_sequence_lstm.cc
index 3425288..14a19ae 100644
--- a/tensorflow/contrib/lite/kernels/bidirectional_sequence_lstm.cc
+++ b/tensorflow/contrib/lite/kernels/bidirectional_sequence_lstm.cc
@@ -276,27 +276,33 @@
 TfLiteStatus CheckInputTensorDimensions(TfLiteContext* context,
                                         TfLiteNode* node, int n_input,
                                         int n_output, int n_cell) {
-  CheckLstmTensorDimensions(
-      context, node, n_input, n_output, n_cell, kFwInputToInputWeightsTensor,
-      kFwInputToForgetWeightsTensor, kFwInputToCellWeightsTensor,
-      kFwInputToOutputWeightsTensor, kFwRecurrentToInputWeightsTensor,
-      kFwRecurrentToForgetWeightsTensor, kFwRecurrentToCellWeightsTensor,
-      kFwRecurrentToOutputWeightsTensor, kFwCellToInputWeightsTensor,
-      kFwCellToForgetWeightsTensor, kFwCellToOutputWeightsTensor,
-      kFwInputGateBiasTensor, kFwForgetGateBiasTensor, kFwCellGateBiasTensor,
-      kFwOutputGateBiasTensor, kFwProjectionWeightsTensor,
-      kFwProjectionBiasTensor);
+  TF_LITE_ENSURE_OK(
+      context,
+      CheckLstmTensorDimensions(
+          context, node, n_input, n_output, n_cell,
+          kFwInputToInputWeightsTensor, kFwInputToForgetWeightsTensor,
+          kFwInputToCellWeightsTensor, kFwInputToOutputWeightsTensor,
+          kFwRecurrentToInputWeightsTensor, kFwRecurrentToForgetWeightsTensor,
+          kFwRecurrentToCellWeightsTensor, kFwRecurrentToOutputWeightsTensor,
+          kFwCellToInputWeightsTensor, kFwCellToForgetWeightsTensor,
+          kFwCellToOutputWeightsTensor, kFwInputGateBiasTensor,
+          kFwForgetGateBiasTensor, kFwCellGateBiasTensor,
+          kFwOutputGateBiasTensor, kFwProjectionWeightsTensor,
+          kFwProjectionBiasTensor));
 
-  CheckLstmTensorDimensions(
-      context, node, n_input, n_output, n_cell, kBwInputToInputWeightsTensor,
-      kBwInputToForgetWeightsTensor, kBwInputToCellWeightsTensor,
-      kBwInputToOutputWeightsTensor, kBwRecurrentToInputWeightsTensor,
-      kBwRecurrentToForgetWeightsTensor, kBwRecurrentToCellWeightsTensor,
-      kBwRecurrentToOutputWeightsTensor, kBwCellToInputWeightsTensor,
-      kBwCellToForgetWeightsTensor, kBwCellToOutputWeightsTensor,
-      kBwInputGateBiasTensor, kBwForgetGateBiasTensor, kBwCellGateBiasTensor,
-      kBwOutputGateBiasTensor, kBwProjectionWeightsTensor,
-      kBwProjectionBiasTensor);
+  TF_LITE_ENSURE_OK(
+      context,
+      CheckLstmTensorDimensions(
+          context, node, n_input, n_output, n_cell,
+          kBwInputToInputWeightsTensor, kBwInputToForgetWeightsTensor,
+          kBwInputToCellWeightsTensor, kBwInputToOutputWeightsTensor,
+          kBwRecurrentToInputWeightsTensor, kBwRecurrentToForgetWeightsTensor,
+          kBwRecurrentToCellWeightsTensor, kBwRecurrentToOutputWeightsTensor,
+          kBwCellToInputWeightsTensor, kBwCellToForgetWeightsTensor,
+          kBwCellToOutputWeightsTensor, kBwInputGateBiasTensor,
+          kBwForgetGateBiasTensor, kBwCellGateBiasTensor,
+          kBwOutputGateBiasTensor, kBwProjectionWeightsTensor,
+          kBwProjectionBiasTensor));
 
   // Check if Forward and Backward tensors match along required dimensions.
   return kTfLiteOk;
@@ -334,7 +340,9 @@
   const int n_fw_output = fw_recurrent_to_output_weights->dims->data[1];
 
   // Check that input tensor dimensions matches with each other.
-  CheckInputTensorDimensions(context, node, n_input, n_fw_output, n_fw_cell);
+  TF_LITE_ENSURE_OK(
+      context, CheckInputTensorDimensions(context, node, n_input, n_fw_output,
+                                          n_fw_cell));
 
   // Get the pointer to output, state and scratch buffer tensors.
   TfLiteTensor* fw_output = GetOutput(context, node, kFwOutputTensor);
@@ -404,7 +412,9 @@
   const int n_bw_output = bw_recurrent_to_output_weights->dims->data[1];
 
   // Check that input tensor dimensions matches with each other.
-  CheckInputTensorDimensions(context, node, n_input, n_bw_output, n_bw_cell);
+  TF_LITE_ENSURE_OK(
+      context, CheckInputTensorDimensions(context, node, n_input, n_bw_output,
+                                          n_bw_cell));
 
   // Get the pointer to output, output_state and cell_state buffer tensors.
   TfLiteTensor* bw_output = GetOutput(context, node, kBwOutputTensor);
diff --git a/tensorflow/contrib/lite/kernels/embedding_lookup.cc b/tensorflow/contrib/lite/kernels/embedding_lookup.cc
index 0ba170a..f550339 100644
--- a/tensorflow/contrib/lite/kernels/embedding_lookup.cc
+++ b/tensorflow/contrib/lite/kernels/embedding_lookup.cc
@@ -112,8 +112,9 @@
       // TODO(alanchiao): refactor scalar multiply into separate function
       // for ease of adding a neon equivalent if ever necessary.
       for (int j = 0; j < col_size; j++) {
+        const int8_t* value_ptr = reinterpret_cast<int8_t*>(value->data.uint8);
         output->data.f[j + i * col_size] =
-            value->data.uint8[j + idx * col_size] * scaling_factor;
+            value_ptr[j + idx * col_size] * scaling_factor;
       }
     }
   }
diff --git a/tensorflow/contrib/lite/kernels/embedding_lookup_test.cc b/tensorflow/contrib/lite/kernels/embedding_lookup_test.cc
index 04657fd..4a88d16 100644
--- a/tensorflow/contrib/lite/kernels/embedding_lookup_test.cc
+++ b/tensorflow/contrib/lite/kernels/embedding_lookup_test.cc
@@ -107,9 +107,9 @@
   HybridEmbeddingLookupOpModel m({3}, {3, 8});
   m.SetInput({1, 0, 2});
   m.SetWeight({
-      0.00, 0.01, 0.02, 0.03, 0.10, 0.11, 0.12, 0.13,  // Row 0
-      1.00, 1.01, 1.02, 1.03, 1.10, 1.11, 1.12, 1.13,  // Row 1
-      2.00, 2.01, 2.02, 2.03, 2.10, 2.11, 2.12, 2.13,  // Row 2
+      0.00, 0.01,  0.02, 0.03, 0.10, 0.11, 0.12, 0.13,  // Row 0
+      1.00, -1.01, 1.02, 1.03, 1.10, 1.11, 1.12, 1.13,  // Row 1
+      2.00, 2.01,  2.02, 2.03, 2.10, 2.11, 2.12, 2.13,  // Row 2
   });
 
   m.Invoke();
@@ -117,9 +117,9 @@
   EXPECT_THAT(m.GetOutput(),
               ElementsAreArray(ArrayFloatNear(
                   {
-                      1.00, 1.01, 1.02, 1.03, 1.10, 1.11, 1.12, 1.13,  // Row 1
-                      0.00, 0.01, 0.02, 0.03, 0.10, 0.11, 0.12, 0.13,  // Row 0
-                      2.00, 2.01, 2.02, 2.03, 2.10, 2.11, 2.12, 2.13,  // Row 2
+                      1.00, -1.01, 1.02, 1.03, 1.10, 1.11, 1.12, 1.13,  // Row 1
+                      0.00, 0.01,  0.02, 0.03, 0.10, 0.11, 0.12, 0.13,  // Row 0
+                      2.00, 2.01,  2.02, 2.03, 2.10, 2.11, 2.12, 2.13,  // Row 2
                   },
                   7.41e-03)));
 }
@@ -128,9 +128,9 @@
   HybridEmbeddingLookupOpModel m({3}, {3, 2, 4});
   m.SetInput({1, 0, 2});
   m.SetWeight({
-      0.00, 0.01, 0.02, 0.03, 0.10, 0.11, 0.12, 0.13,  // Row 0
-      1.00, 1.01, 1.02, 1.03, 1.10, 1.11, 1.12, 1.13,  // Row 1
-      2.00, 2.01, 2.02, 2.03, 2.10, 2.11, 2.12, 2.13,  // Row 2
+      0.00, 0.01,  0.02, 0.03, 0.10, 0.11, 0.12, 0.13,  // Row 0
+      1.00, -1.01, 1.02, 1.03, 1.10, 1.11, 1.12, 1.13,  // Row 1
+      2.00, 2.01,  2.02, 2.03, 2.10, 2.11, 2.12, 2.13,  // Row 2
   });
 
   m.Invoke();
@@ -138,9 +138,9 @@
   EXPECT_THAT(m.GetOutput(),
               ElementsAreArray(ArrayFloatNear(
                   {
-                      1.00, 1.01, 1.02, 1.03, 1.10, 1.11, 1.12, 1.13,  // Row 1
-                      0.00, 0.01, 0.02, 0.03, 0.10, 0.11, 0.12, 0.13,  // Row 0
-                      2.00, 2.01, 2.02, 2.03, 2.10, 2.11, 2.12, 2.13,  // Row 2
+                      1.00, -1.01, 1.02, 1.03, 1.10, 1.11, 1.12, 1.13,  // Row 1
+                      0.00, 0.01,  0.02, 0.03, 0.10, 0.11, 0.12, 0.13,  // Row 0
+                      2.00, 2.01,  2.02, 2.03, 2.10, 2.11, 2.12, 2.13,  // Row 2
                   },
                   7.41e-03)));
 }
@@ -149,9 +149,9 @@
   HybridEmbeddingLookupOpModel m({3}, {3, 2, 2, 2});
   m.SetInput({1, 0, 2});
   m.SetWeight({
-      0.00, 0.01, 0.02, 0.03, 0.10, 0.11, 0.12, 0.13,  // Row 0
-      1.00, 1.01, 1.02, 1.03, 1.10, 1.11, 1.12, 1.13,  // Row 1
-      2.00, 2.01, 2.02, 2.03, 2.10, 2.11, 2.12, 2.13,  // Row 2
+      0.00, 0.01,  0.02, 0.03, 0.10, 0.11, 0.12, 0.13,  // Row 0
+      1.00, -1.01, 1.02, 1.03, 1.10, 1.11, 1.12, 1.13,  // Row 1
+      2.00, 2.01,  2.02, 2.03, 2.10, 2.11, 2.12, 2.13,  // Row 2
   });
 
   m.Invoke();
@@ -159,9 +159,9 @@
   EXPECT_THAT(m.GetOutput(),
               ElementsAreArray(ArrayFloatNear(
                   {
-                      1.00, 1.01, 1.02, 1.03, 1.10, 1.11, 1.12, 1.13,  // Row 1
-                      0.00, 0.01, 0.02, 0.03, 0.10, 0.11, 0.12, 0.13,  // Row 0
-                      2.00, 2.01, 2.02, 2.03, 2.10, 2.11, 2.12, 2.13,  // Row 2
+                      1.00, -1.01, 1.02, 1.03, 1.10, 1.11, 1.12, 1.13,  // Row 1
+                      0.00, 0.01,  0.02, 0.03, 0.10, 0.11, 0.12, 0.13,  // Row 0
+                      2.00, 2.01,  2.02, 2.03, 2.10, 2.11, 2.12, 2.13,  // Row 2
                   },
                   7.41e-03)));
 }
diff --git a/tensorflow/contrib/lite/kernels/fake_quant.cc b/tensorflow/contrib/lite/kernels/fake_quant.cc
index f8927a0..0ef1a50 100644
--- a/tensorflow/contrib/lite/kernels/fake_quant.cc
+++ b/tensorflow/contrib/lite/kernels/fake_quant.cc
@@ -44,6 +44,17 @@
   TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
   TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
 
+  const auto* params =
+      reinterpret_cast<TfLiteFakeQuantParams*>(node->builtin_data);
+
+  if (params->narrow_range) {
+    context->ReportError(
+        context,
+        "narrow_range FakeQuant is not currently supported at runtime. "
+        "narrow_range is only meant to be applied to weights, not activations");
+    return kTfLiteError;
+  }
+
   OpContext op_context(context, node);
   TfLiteIntArray* output_dims = TfLiteIntArrayCopy(op_context.input->dims);
   op_context.output->type = op_context.input->type;
diff --git a/tensorflow/contrib/lite/kernels/fully_connected.cc b/tensorflow/contrib/lite/kernels/fully_connected.cc
index 6c9a845..3b203dd 100644
--- a/tensorflow/contrib/lite/kernels/fully_connected.cc
+++ b/tensorflow/contrib/lite/kernels/fully_connected.cc
@@ -152,10 +152,10 @@
     }
   }
 
-  // Resize output to the same as input (except the last dimension which is
-  // determined by the number of units).
-  TfLiteIntArray* output_size_array = TfLiteIntArrayCopy(input->dims);
-  output_size_array->data[input->dims->size - 1] = num_units;
+  // Resize output.
+  TfLiteIntArray* output_size_array = TfLiteIntArrayCreate(2);
+  output_size_array->data[0] = batch_size;
+  output_size_array->data[1] = num_units;
   TF_LITE_ENSURE_OK(context,
                     context->ResizeTensor(context, output, output_size_array));
   return kTfLiteOk;
diff --git a/tensorflow/contrib/lite/kernels/fully_connected_test.cc b/tensorflow/contrib/lite/kernels/fully_connected_test.cc
index a6b6b2f..ec94905 100644
--- a/tensorflow/contrib/lite/kernels/fully_connected_test.cc
+++ b/tensorflow/contrib/lite/kernels/fully_connected_test.cc
@@ -207,7 +207,6 @@
   }
 
   std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
-  std::vector<int> GetOutputSize() { return GetTensorShape(output_); }
 };
 
 class QuantizedFullyConnectedOpModel : public BaseFullyConnectedOpModel {
@@ -299,7 +298,6 @@
 
   void SetInput(const std::vector<float>& f) { PopulateTensor(input_, f); }
   std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
-  std::vector<int> GetOutputSize() { return GetTensorShape(output_); }
 
   int input_size() { return input_size_; }
   int num_units() { return units_; }
@@ -374,7 +372,6 @@
 
   m.Invoke();
 
-  EXPECT_THAT(m.GetOutputSize(), ElementsAre(2, 3));
   EXPECT_THAT(m.GetOutput(), ElementsAre(24, 25, 26, 58, 59, 60));
 }
 
@@ -393,7 +390,6 @@
 
   m.Invoke();
 
-  EXPECT_THAT(m.GetOutputSize(), ElementsAre(2, 1));
   EXPECT_THAT(m.GetOutput(), ElementsAre(11, 9));
 }
 
@@ -580,10 +576,11 @@
 
 TEST_P(FloatFullyConnectedOpTest, SimpleTest4DInput) {
   // Note that it is not required that the first dimension be the number of
-  // batches. All we care is that the input size is the last dimension.
+  // batches. All we care is that the input can be evenly distributed in
+  // batches. In this case, we need the input to have multiples of '2'.
   FloatFullyConnectedOpModel m(GetRegistration(),
                                /*units=*/3, /*batches=*/2,
-                               /*input=*/{TensorType_FLOAT32, {1, 2, 1, 10}});
+                               /*input=*/{TensorType_FLOAT32, {4, 1, 5, 1}});
   m.SetWeights({
       1, 2, 3, 4, 5, 6, 7, 8, 9, 10,  // u = 0
       1, 2, 3, 4, 5, 6, 7, 8, 9, 10,  // u = 1
@@ -598,7 +595,6 @@
 
   m.Invoke();
 
-  EXPECT_THAT(m.GetOutputSize(), ElementsAre(1, 2, 1, 3));
   EXPECT_THAT(m.GetOutput(), ElementsAreArray({
                                  24, 25, 26,  // first batch
                                  58, 59, 60,  // second batch
@@ -608,7 +604,7 @@
 TEST_P(QuantizedFullyConnectedOpTest, SimpleTest4dInputQuantized) {
   QuantizedFullyConnectedOpModel m(
       GetRegistration(), /*units=*/3, /*batches=*/2,
-      /*input=*/{TensorType_UINT8, {1, 2, 1, 10}, -63.5, 64},
+      /*input=*/{TensorType_UINT8, {4, 1, 5, 1}, -63.5, 64},
       /*output=*/{TensorType_UINT8, {}, -127, 128});
 
   // input_product_scale < output_scale was not true.
diff --git a/tensorflow/contrib/lite/kernels/internal/kernel_utils.cc b/tensorflow/contrib/lite/kernels/internal/kernel_utils.cc
index a0e382e..200f2f1 100644
--- a/tensorflow/contrib/lite/kernels/internal/kernel_utils.cc
+++ b/tensorflow/contrib/lite/kernels/internal/kernel_utils.cc
@@ -255,14 +255,6 @@
                            output_state_ptr);
 }
 
-// TODO(alanchiao): move this to tensor_utils.
-void VectorMultiply(const int8_t* vector, const int v_size, const float scale,
-                    float* result) {
-  for (int i = 0; i < v_size; ++i) {
-    *result++ = scale * *vector++;
-  }
-}
-
 void LstmStep(
     const float* input_ptr_batch, const int8_t* input_to_input_weights_ptr,
     float input_to_input_weights_scale,
@@ -415,8 +407,9 @@
   // For each batch and cell: update input gate.
   if (!use_cifg) {
     if (use_peephole && !is_cell_state_all_zeros) {
-      VectorMultiply(cell_to_input_weights_ptr, n_cell,
-                     cell_to_input_weights_scale, recovered_cell_weights);
+      tensor_utils::VectorScalarMultiply(cell_to_input_weights_ptr, n_cell,
+                                         cell_to_input_weights_scale,
+                                         recovered_cell_weights);
       tensor_utils::VectorBatchVectorCwiseProductAccumulate(
           recovered_cell_weights, n_cell, cell_state_ptr, n_batch,
           input_gate_scratch);
@@ -427,8 +420,9 @@
 
   // For each batch and cell: update forget gate.
   if (use_peephole && !is_cell_state_all_zeros) {
-    VectorMultiply(cell_to_forget_weights_ptr, n_cell,
-                   cell_to_forget_weights_scale, recovered_cell_weights);
+    tensor_utils::VectorScalarMultiply(cell_to_forget_weights_ptr, n_cell,
+                                       cell_to_forget_weights_scale,
+                                       recovered_cell_weights);
     tensor_utils::VectorBatchVectorCwiseProductAccumulate(
         recovered_cell_weights, n_cell, cell_state_ptr, n_batch,
         forget_gate_scratch);
@@ -459,8 +453,9 @@
       tensor_utils::IsZeroVector(cell_state_ptr, n_batch * n_cell);
   // For each batch and cell: update the output gate.
   if (use_peephole && !is_cell_state_all_zeros) {
-    VectorMultiply(cell_to_output_weights_ptr, n_cell,
-                   cell_to_output_weights_scale, recovered_cell_weights);
+    tensor_utils::VectorScalarMultiply(cell_to_output_weights_ptr, n_cell,
+                                       cell_to_output_weights_scale,
+                                       recovered_cell_weights);
     tensor_utils::VectorBatchVectorCwiseProductAccumulate(
         recovered_cell_weights, n_cell, cell_state_ptr, n_batch,
         output_gate_scratch);
diff --git a/tensorflow/contrib/lite/kernels/internal/optimized/neon_tensor_utils.cc b/tensorflow/contrib/lite/kernels/internal/optimized/neon_tensor_utils.cc
index 8c57c98..420bc68 100644
--- a/tensorflow/contrib/lite/kernels/internal/optimized/neon_tensor_utils.cc
+++ b/tensorflow/contrib/lite/kernels/internal/optimized/neon_tensor_utils.cc
@@ -342,6 +342,77 @@
   }
 }
 
+void NeonVectorScalarMultiply(const int8_t* vector, const int v_size,
+                              const float scale, float* result) {
+  // Here the assumption is that each buffer is 4-byte aligned.
+  const int kWeightsPerUint32 = 4;
+  TFLITE_CHECK_EQ((intptr_t)(&vector[0]) & (kWeightsPerUint32 - 1), 0);
+  // If v_size is not divisible by kWeightsPerNeonLane, we cannot use the main
+  // vectorized loop, and we need to process sequentially. postamble_start shows
+  // the start index where this should happen.
+  const int kWeightsPerNeonLane = 16;
+  const int postamble_start = v_size - (v_size & (kWeightsPerNeonLane - 1));
+
+  // Create a vector of 4 floats with the scale value.
+  const float32x4_t scale_f32x4 = vdupq_n_f32(scale);
+  int v = 0;
+  for (; v < postamble_start; v += kWeightsPerNeonLane) {
+    // Load int8 values, sixteen at a time.
+    const int8x16_t v_i8x16 = vld1q_s8(vector + v);
+    // Split it into two components of size eight.
+    const int8x8_t v0_i8x8 = vget_low_s8(v_i8x16);
+    const int8x8_t v1_i8x8 = vget_high_s8(v_i8x16);
+    // Convert both components to int16 first.
+    const int16x8_t v0_i16x8 = vmovl_s8(v0_i8x8);
+    const int16x8_t v1_i16x8 = vmovl_s8(v1_i8x8);
+    // Split each of them into two components each.
+    const int16x4_t v0_i16x4 = vget_low_s16(v0_i16x8);
+    const int16x4_t v1_i16x4 = vget_high_s16(v0_i16x8);
+    const int16x4_t v2_i16x4 = vget_low_s16(v1_i16x8);
+    const int16x4_t v3_i16x4 = vget_high_s16(v1_i16x8);
+    // Convert these to int32 and then to float.
+    float32x4_t v0_f32x4 = vcvtq_f32_s32(vmovl_s16(v0_i16x4));
+    float32x4_t v1_f32x4 = vcvtq_f32_s32(vmovl_s16(v1_i16x4));
+    float32x4_t v2_f32x4 = vcvtq_f32_s32(vmovl_s16(v2_i16x4));
+    float32x4_t v3_f32x4 = vcvtq_f32_s32(vmovl_s16(v3_i16x4));
+    // Vector multiply four floats at a time.
+    v0_f32x4 = vmulq_f32(v0_f32x4, scale_f32x4);
+    v1_f32x4 = vmulq_f32(v1_f32x4, scale_f32x4);
+    v2_f32x4 = vmulq_f32(v2_f32x4, scale_f32x4);
+    v3_f32x4 = vmulq_f32(v3_f32x4, scale_f32x4);
+    // Store the results.
+    vst1q_f32(result + v, v0_f32x4);
+    vst1q_f32(result + v + 4, v1_f32x4);
+    vst1q_f32(result + v + 8, v2_f32x4);
+    vst1q_f32(result + v + 12, v3_f32x4);
+  }
+
+  if (v_size - postamble_start >= (kWeightsPerNeonLane >> 1)) {
+    // Load eight int8 values, if there is at least eight remaining.
+    const int8x8_t v_i8x8 = vld1_s8(vector + v);
+    // Convert them to int16 first.
+    const int16x8_t v_i16x8 = vmovl_s8(v_i8x8);
+    // Split it into two components.
+    const int16x4_t v0_i16x4 = vget_low_s16(v_i16x8);
+    const int16x4_t v1_i16x4 = vget_high_s16(v_i16x8);
+    // Convert the components two floats.
+    float32x4_t v0_f32x4 = vcvtq_f32_s32(vmovl_s16(v0_i16x4));
+    float32x4_t v1_f32x4 = vcvtq_f32_s32(vmovl_s16(v1_i16x4));
+    // Vector multiply four floats at a time.
+    v0_f32x4 = vmulq_f32(v0_f32x4, scale_f32x4);
+    v1_f32x4 = vmulq_f32(v1_f32x4, scale_f32x4);
+    // Store the results.
+    vst1q_f32(result + v, v0_f32x4);
+    vst1q_f32(result + v + 4, v1_f32x4);
+    v += (kWeightsPerNeonLane >> 1);
+  }
+
+  // Postamble loop.
+  for (; v < v_size; v++) {
+    result[v] = scale * vector[v];
+  }
+}
+
 void NeonSymmetricQuantizeFloats(const float* values, const int size,
                                  int8_t* quantized_values, float* min,
                                  float* max, float* scaling_factor) {
diff --git a/tensorflow/contrib/lite/kernels/internal/optimized/neon_tensor_utils.h b/tensorflow/contrib/lite/kernels/internal/optimized/neon_tensor_utils.h
index 7a5a8fc..45c9f65 100644
--- a/tensorflow/contrib/lite/kernels/internal/optimized/neon_tensor_utils.h
+++ b/tensorflow/contrib/lite/kernels/internal/optimized/neon_tensor_utils.h
@@ -105,6 +105,10 @@
   return NEON_OR_PORTABLE(IsZeroVector, vector, v_size);
 }
 
+void VectorScalarMultiply(const int8_t* vector, int v_size, float scale,
+                          float* result) {
+  NEON_OR_PORTABLE(VectorScalarMultiply, vector, v_size, scale, result);
+}
 void ClipVector(const float* vector, int v_size, float abs_limit,
                 float* result) {
   NEON_OR_PORTABLE(ClipVector, vector, v_size, abs_limit, result);
diff --git a/tensorflow/contrib/lite/kernels/internal/optimized/optimized_ops.h b/tensorflow/contrib/lite/kernels/internal/optimized/optimized_ops.h
index c857fdf..2f73036 100644
--- a/tensorflow/contrib/lite/kernels/internal/optimized/optimized_ops.h
+++ b/tensorflow/contrib/lite/kernels/internal/optimized/optimized_ops.h
@@ -3318,6 +3318,19 @@
   }
 }
 
+inline void Sub(const int32* input1_data, const Dims<4>& input1_dims,
+                const int32* input2_data, const Dims<4>& input2_dims,
+                int32 output_activation_min, int32 output_activation_max,
+                int32* output_data, const Dims<4>& output_dims) {
+  gemmlowp::ScopedProfilingLabel label("Sub/int32");
+  const int flat_size = MatchingFlatSize(input1_dims, input2_dims, output_dims);
+  for (int i = 0; i < flat_size; ++i) {
+    output_data[i] = ActivationFunctionWithMinMax(
+        input1_data[i] - input2_data[i], output_activation_min,
+        output_activation_max);
+  }
+}
+
 // TODO(jiawen): We can implement BroadcastSub on buffers of arbitrary
 // dimensionality if the runtime code does a single loop over one dimension
 // that handles broadcasting as the base case. The code generator would then
diff --git a/tensorflow/contrib/lite/kernels/internal/optimized/tensor_utils_impl.h b/tensorflow/contrib/lite/kernels/internal/optimized/tensor_utils_impl.h
index f146670..db7926d 100644
--- a/tensorflow/contrib/lite/kernels/internal/optimized/tensor_utils_impl.h
+++ b/tensorflow/contrib/lite/kernels/internal/optimized/tensor_utils_impl.h
@@ -124,6 +124,12 @@
 // Fill vector with 0.f.
 void PortableZeroVector(float* vector, int v_size);
 
+// Multiply all elements of vector with a scalar.
+void PortableVectorScalarMultiply(const int8_t* vector, int v_size, float scale,
+                                  float* result);
+void NeonVectorScalarMultiply(const int8_t* vector, int v_size, float scale,
+                              float* result);
+
 // Limit a float input f between +abs_limit and -abs_limit.
 float PortableClip(float f, float abs_limit);
 
diff --git a/tensorflow/contrib/lite/kernels/internal/quantization_util.h b/tensorflow/contrib/lite/kernels/internal/quantization_util.h
index 525857a..9b3f182 100644
--- a/tensorflow/contrib/lite/kernels/internal/quantization_util.h
+++ b/tensorflow/contrib/lite/kernels/internal/quantization_util.h
@@ -28,8 +28,9 @@
 // Given the min and max values of a float array, return
 // reasonable quantization parameters to use for this array.
 template <typename T>
-QuantizationParams ChooseQuantizationParams(double rmin, double rmax) {
-  const T qmin = std::numeric_limits<T>::min();
+QuantizationParams ChooseQuantizationParams(double rmin, double rmax,
+                                            bool narrow_range) {
+  const T qmin = std::numeric_limits<T>::min() + (narrow_range ? 1 : 0);
   const T qmax = std::numeric_limits<T>::max();
   const double qmin_double = qmin;
   const double qmax_double = qmax;
@@ -97,6 +98,11 @@
   return quantization_params;
 }
 
+template <typename T>
+QuantizationParams ChooseQuantizationParams(double rmin, double rmax) {
+  return ChooseQuantizationParams<T>(rmin, rmax, false);
+}
+
 // Converts a floating-point number to an integer. For all inputs x where
 // static_cast<IntOut>(x) is legal according to the C++ standard, the result
 // is identical to that cast (i.e. the result is x with its fractional part
diff --git a/tensorflow/contrib/lite/kernels/internal/reference/portable_tensor_utils.cc b/tensorflow/contrib/lite/kernels/internal/reference/portable_tensor_utils.cc
index ccf112c..7ead449 100644
--- a/tensorflow/contrib/lite/kernels/internal/reference/portable_tensor_utils.cc
+++ b/tensorflow/contrib/lite/kernels/internal/reference/portable_tensor_utils.cc
@@ -195,6 +195,13 @@
   memset(vector, 0, v_size * sizeof(float));
 }
 
+void PortableVectorScalarMultiply(const int8_t* vector, const int v_size,
+                                  const float scale, float* result) {
+  for (int v = 0; v < v_size; ++v) {
+    *result++ = scale * *vector++;
+  }
+}
+
 void PortableClipVector(const float* vector, int v_size, float abs_limit,
                         float* result) {
   for (int v = 0; v < v_size; v++) {
diff --git a/tensorflow/contrib/lite/kernels/internal/reference/portable_tensor_utils.h b/tensorflow/contrib/lite/kernels/internal/reference/portable_tensor_utils.h
index d2e1fec..d3a4fa8 100644
--- a/tensorflow/contrib/lite/kernels/internal/reference/portable_tensor_utils.h
+++ b/tensorflow/contrib/lite/kernels/internal/reference/portable_tensor_utils.h
@@ -96,6 +96,10 @@
 // Fill vector with 0.f.
 void PortableZeroVector(float* vector, int v_size);
 
+// Multiply all elements of vector with a scalar.
+void PortableVectorScalarMultiply(const int8_t* vector, int v_size, float scale,
+                                  float* result);
+
 // Clip elements of a vector using a abs_limit value.
 void PortableClipVector(const float* vector, int v_size, float abs_limit,
                         float* result);
@@ -199,6 +203,12 @@
   PortableZeroVector(vector, v_size);
 }
 
+// Multiply all elements of vector with a scalar.
+void VectorScalarMultiply(const int8_t* vector, int v_size, float scale,
+                          float* result) {
+  PortableVectorScalarMultiply(vector, v_size, scale, result);
+}
+
 void ClipVector(const float* vector, int v_size, float abs_limit,
                 float* result) {
   PortableClipVector(vector, v_size, abs_limit, result);
diff --git a/tensorflow/contrib/lite/kernels/internal/reference/reference_ops.h b/tensorflow/contrib/lite/kernels/internal/reference/reference_ops.h
index 2d40f17..6fabb9c 100644
--- a/tensorflow/contrib/lite/kernels/internal/reference/reference_ops.h
+++ b/tensorflow/contrib/lite/kernels/internal/reference/reference_ops.h
@@ -1666,10 +1666,11 @@
   }
 }
 
-inline void Sub(const float* input1_data, const Dims<4>& input1_dims,
-                const float* input2_data, const Dims<4>& input2_dims,
-                float output_activation_min, float output_activation_max,
-                float* output_data, const Dims<4>& output_dims) {
+template <typename T>
+inline void Sub(const T* input1_data, const Dims<4>& input1_dims,
+                const T* input2_data, const Dims<4>& input2_dims,
+                T output_activation_min, T output_activation_max,
+                T* output_data, const Dims<4>& output_dims) {
   const int flat_size = MatchingFlatSize(input1_dims, input2_dims, output_dims);
   for (int i = 0; i < flat_size; ++i) {
     output_data[i] = ActivationFunctionWithMinMax(
@@ -3467,7 +3468,8 @@
                    const int* output_dims, const int input_num_dims,
                    const int output_num_dims, const int* axis,
                    const int num_axis, int* input_iter,
-                   Out reducer(Out current, const In in), Out* output_data) {
+                   Out reducer(const Out current, const In in),
+                   Out* output_data) {
   // Reset input iterator.
   TFLITE_DCHECK(input_num_dims > 0);
   for (int idx = 0; idx < input_num_dims; ++idx) {
@@ -3485,11 +3487,12 @@
   return true;
 }
 
-inline bool ResolveAxis(const int num_dims, const int* axis, const int num_axis,
-                        int* out_axis, int* out_num_axis) {
+inline bool ResolveAxis(const int num_dims, const int* axis,
+                        const int64_t num_axis, int* out_axis,
+                        int* out_num_axis) {
   *out_num_axis = 0;  // Just in case.
   // o(n^2) is fine since out_num_axis should be really small, mostly <= 4
-  for (int idx = 0; idx < num_axis; ++idx) {
+  for (int64_t idx = 0; idx < num_axis; ++idx) {
     // Handle negative index.
     int current = axis[idx] < 0 ? (axis[idx] + num_dims) : axis[idx];
     TFLITE_DCHECK(current >= 0 && current < num_dims);
@@ -3515,7 +3518,7 @@
                           const int output_num_dims, const int* axis,
                           const int num_axis, int* input_iter,
                           Out* output_data) {
-  auto reducer = [](Out current, const In in) -> Out {
+  auto reducer = [](const Out current, const In in) -> Out {
     const Out actual_in = static_cast<Out>(in);
     return current + actual_in;
   };
@@ -3524,6 +3527,24 @@
                          output_data);
 }
 
+template <typename T>
+inline bool InitTensorDataForReduce(const int* dims, const int num_dims,
+                                    const T init_value, T* data) {
+  size_t num_elements = 1;
+  for (int idx = 0; idx < num_dims; ++idx) {
+    size_t current = static_cast<size_t>(dims[idx]);
+    // Overflow prevention.
+    if (num_elements > std::numeric_limits<size_t>::max() / current) {
+      return false;
+    }
+    num_elements *= current;
+  }
+  for (size_t idx = 0; idx < num_elements; ++idx) {
+    data[idx] = init_value;
+  }
+  return true;
+}
+
 // Computes the sum of elements across dimensions given in axis.
 template <typename T>
 inline bool Sum(const T* input_data, const int* input_dims,
@@ -3532,17 +3553,9 @@
                 const int* axis, const int num_axis_dimensions, bool keep_dims,
                 int* temp_index, int* resolved_axis) {
   // Reset output data.
-  size_t num_outputs = 1;
-  for (int idx = 0; idx < output_num_dims; ++idx) {
-    size_t current = static_cast<size_t>(output_dims[idx]);
-    // Overflow prevention.
-    if (num_outputs > std::numeric_limits<size_t>::max() / current) {
-      return false;
-    }
-    num_outputs *= current;
-  }
-  for (size_t idx = 0; idx < num_outputs; ++idx) {
-    output_data[idx] = T();
+  if (!InitTensorDataForReduce(output_dims, output_num_dims, static_cast<T>(0),
+                               output_data)) {
+    return false;
   }
 
   // Resolve axis.
@@ -3557,6 +3570,61 @@
                              num_resolved_axis, temp_index, output_data);
 }
 
+// Computes the max of elements across dimensions given in axis.
+template <typename T>
+inline bool ReduceMax(const T* input_data, const int* input_dims,
+                      const int input_num_dims, T* output_data,
+                      const int* output_dims, const int output_num_dims,
+                      const int* axis, const int64_t num_axis_dimensions,
+                      bool keep_dims, int* temp_index, int* resolved_axis) {
+  T init_value = std::numeric_limits<T>::lowest();
+  // Reset output data.
+  if (!InitTensorDataForReduce(output_dims, output_num_dims, init_value,
+                               output_data)) {
+    return false;
+  }
+
+  // Resolve axis.
+  int num_resolved_axis = 0;
+  if (!ResolveAxis(input_num_dims, axis, num_axis_dimensions, resolved_axis,
+                   &num_resolved_axis)) {
+    return false;
+  }
+
+  auto reducer = [](const T current, const T in) -> T {
+    return (in > current) ? in : current;
+  };
+  return Reduce<T, T>(input_data, input_dims, output_dims, input_num_dims,
+                      output_num_dims, resolved_axis, num_resolved_axis,
+                      temp_index, reducer, output_data);
+}
+
+// Computes the prod of elements across dimensions given in axis.
+template <typename T>
+inline bool ReduceProd(const T* input_data, const int* input_dims,
+                       const int input_num_dims, T* output_data,
+                       const int* output_dims, const int output_num_dims,
+                       const int* axis, const int64_t num_axis_dimensions,
+                       bool keep_dims, int* temp_index, int* resolved_axis) {
+  // Reset output data.
+  if (!InitTensorDataForReduce(output_dims, output_num_dims, static_cast<T>(1),
+                               output_data)) {
+    return false;
+  }
+
+  // Resolve axis.
+  int num_resolved_axis = 0;
+  if (!ResolveAxis(input_num_dims, axis, num_axis_dimensions, resolved_axis,
+                   &num_resolved_axis)) {
+    return false;
+  }
+
+  auto reducer = [](const T current, const T in) -> T { return in * current; };
+  return Reduce<T, T>(input_data, input_dims, output_dims, input_num_dims,
+                      output_num_dims, resolved_axis, num_resolved_axis,
+                      temp_index, reducer, output_data);
+}
+
 // Computes the mean of elements across dimensions given in axis.
 // It does so in two stages, first calculates the sum of elements along the axis
 // then divides it by the number of element in axis.
diff --git a/tensorflow/contrib/lite/kernels/internal/tensor_utils.h b/tensorflow/contrib/lite/kernels/internal/tensor_utils.h
index 5160e22..82f4503 100644
--- a/tensorflow/contrib/lite/kernels/internal/tensor_utils.h
+++ b/tensorflow/contrib/lite/kernels/internal/tensor_utils.h
@@ -124,6 +124,10 @@
 // Fill vector with 0.f.
 void ZeroVector(float* vector, int v_size);
 
+// Multiply all elements of vector with a scalar.
+void VectorScalarMultiply(const int8_t* vector, int v_size, float scale,
+                          float* result);
+
 // Clip elements of a vector using a abs_limit value.
 void ClipVector(const float* vector, int v_size, float abs_limit,
                 float* result);
diff --git a/tensorflow/contrib/lite/kernels/internal/tensor_utils_test.cc b/tensorflow/contrib/lite/kernels/internal/tensor_utils_test.cc
index aa0d49a..372a6ef 100644
--- a/tensorflow/contrib/lite/kernels/internal/tensor_utils_test.cc
+++ b/tensorflow/contrib/lite/kernels/internal/tensor_utils_test.cc
@@ -32,6 +32,22 @@
                   {0.0, -0.5, 1.0, -1.5, 2.0, -2.0, 2.0, -2.0, 2.0, -2.0})));
 }
 
+TEST(uKernels, VectorScalarMultiply) {
+  constexpr int kVectorSize = 29;
+  static int8_t input[kVectorSize];
+  for (int i = 0; i < 29; ++i) {
+    input[i] = static_cast<int8_t>(i - 14);
+  }
+  const float scale = 0.1f;
+  std::vector<float> output(kVectorSize, 0.0f);
+  VectorScalarMultiply(input, kVectorSize, scale, output.data());
+  EXPECT_THAT(output,
+              ElementsAreArray(ArrayFloatNear(
+                  {-1.4, -1.3, -1.2, -1.1, -1.0, -0.9, -0.8, -0.7, -0.6, -0.5,
+                   -0.4, -0.3, -0.2, -0.1, 0,    0.1,  0.2,  0.3,  0.4,  0.5,
+                   0.6,  0.7,  0.8,  0.9,  1.0,  1.1,  1.2,  1.3,  1.4})));
+}
+
 TEST(uKernels, IsZeroTest) {
   constexpr int kVectorSize = 21;
   static float zeros[kVectorSize] = {0.0};
diff --git a/tensorflow/contrib/lite/kernels/lstm.cc b/tensorflow/contrib/lite/kernels/lstm.cc
index 3577ae6..4dfc891 100644
--- a/tensorflow/contrib/lite/kernels/lstm.cc
+++ b/tensorflow/contrib/lite/kernels/lstm.cc
@@ -306,7 +306,8 @@
   const int n_output = recurrent_to_output_weights->dims->data[1];
 
   // Check that input tensor dimensions matches with each other.
-  CheckInputTensorDimensions(context, node, n_input, n_output, n_cell);
+  TF_LITE_ENSURE_OK(context, CheckInputTensorDimensions(context, node, n_input,
+                                                        n_output, n_cell));
 
   // Get the pointer to output, activation_state and cell_state tensors.
   TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
diff --git a/tensorflow/contrib/lite/kernels/lstm_test.cc b/tensorflow/contrib/lite/kernels/lstm_test.cc
index 0b7c561..0266f5f 100644
--- a/tensorflow/contrib/lite/kernels/lstm_test.cc
+++ b/tensorflow/contrib/lite/kernels/lstm_test.cc
@@ -13,6 +13,9 @@
 limitations under the License.
 ==============================================================================*/
 // Unit test for TFLite LSTM op.
+//
+// TODO(alanchiao): add unit test with invalid input dimensions for this and its
+// variants.
 
 #include <memory>
 #include <vector>
diff --git a/tensorflow/contrib/lite/kernels/reduce.cc b/tensorflow/contrib/lite/kernels/reduce.cc
index 31c331a..52e4084 100644
--- a/tensorflow/contrib/lite/kernels/reduce.cc
+++ b/tensorflow/contrib/lite/kernels/reduce.cc
@@ -315,6 +315,99 @@
   return kTfLiteOk;
 }
 
+template <KernelType kernel_type>
+TfLiteStatus EvalProd(TfLiteContext* context, TfLiteNode* node) {
+  OpContext op_context(context, node);
+  int64_t num_axis = NumElements(op_context.axis);
+  TfLiteTensor* temp_index = GetTemporary(context, node, /*index=*/0);
+  TfLiteTensor* resolved_axis = GetTemporary(context, node, /*index=*/1);
+  // Resize the output tensor if the output tensor is dynamic.
+  if (IsDynamicTensor(op_context.output)) {
+    TF_LITE_ENSURE_OK(context,
+                      ResizeTempAxis(context, &op_context, resolved_axis));
+    TF_LITE_ENSURE_OK(context, ResizeOutputTensor(context, &op_context));
+  }
+
+#define TF_LITE_PROD(kernel_type, data_type)                        \
+  kernel_type::ReduceProd<>(                                        \
+      GetTensorData<data_type>(op_context.input),                   \
+      op_context.input->dims->data, op_context.input->dims->size,   \
+      GetTensorData<data_type>(op_context.output),                  \
+      op_context.output->dims->data, op_context.output->dims->size, \
+      GetTensorData<int>(op_context.axis), num_axis,                \
+      op_context.params->keep_dims, GetTensorData<int>(temp_index), \
+      GetTensorData<int>(resolved_axis))
+
+  if (kernel_type == kReference) {
+    switch (op_context.input->type) {
+      case kTfLiteFloat32:
+        TF_LITE_ENSURE(context, TF_LITE_PROD(reference_ops, float));
+        break;
+      case kTfLiteInt32:
+        TF_LITE_ENSURE(context, TF_LITE_PROD(reference_ops, int));
+        break;
+      case kTfLiteInt64:
+        TF_LITE_ENSURE(context, TF_LITE_PROD(reference_ops, int64_t));
+        break;
+      case kTfLiteUInt8:
+        // TODO(wangtz): uint8 reduce_prod is not yet supported.
+      default:
+        return kTfLiteError;
+    }
+  }
+#undef TF_LITE_PROD
+  return kTfLiteOk;
+}
+
+template <KernelType kernel_type>
+TfLiteStatus EvalMax(TfLiteContext* context, TfLiteNode* node) {
+  OpContext op_context(context, node);
+  int64_t num_axis = NumElements(op_context.axis);
+  TfLiteTensor* temp_index = GetTemporary(context, node, /*index=*/0);
+  TfLiteTensor* resolved_axis = GetTemporary(context, node, /*index=*/1);
+  // Resize the output tensor if the output tensor is dynamic.
+  if (IsDynamicTensor(op_context.output)) {
+    TF_LITE_ENSURE_OK(context,
+                      ResizeTempAxis(context, &op_context, resolved_axis));
+    TF_LITE_ENSURE_OK(context, ResizeOutputTensor(context, &op_context));
+  }
+
+#define TF_LITE_MAX(kernel_type, data_type)                         \
+  kernel_type::ReduceMax<>(                                         \
+      GetTensorData<data_type>(op_context.input),                   \
+      op_context.input->dims->data, op_context.input->dims->size,   \
+      GetTensorData<data_type>(op_context.output),                  \
+      op_context.output->dims->data, op_context.output->dims->size, \
+      GetTensorData<int>(op_context.axis), num_axis,                \
+      op_context.params->keep_dims, GetTensorData<int>(temp_index), \
+      GetTensorData<int>(resolved_axis))
+
+  if (kernel_type == kReference) {
+    switch (op_context.input->type) {
+      case kTfLiteFloat32:
+        TF_LITE_ENSURE(context, TF_LITE_MAX(reference_ops, float));
+        break;
+      case kTfLiteInt32:
+        TF_LITE_ENSURE(context, TF_LITE_MAX(reference_ops, int));
+        break;
+      case kTfLiteInt64:
+        TF_LITE_ENSURE(context, TF_LITE_MAX(reference_ops, int64_t));
+        break;
+      case kTfLiteUInt8:
+        TF_LITE_ENSURE_EQ(context, op_context.input->params.scale,
+                          op_context.output->params.scale);
+        TF_LITE_ENSURE_EQ(context, op_context.input->params.zero_point,
+                          op_context.output->params.zero_point);
+        TF_LITE_ENSURE(context, TF_LITE_MAX(reference_ops, uint8_t));
+        break;
+      default:
+        return kTfLiteError;
+    }
+  }
+#undef TF_LITE_MAX
+  return kTfLiteOk;
+}
+
 }  // namespace reduce
 
 TfLiteRegistration* Register_MEAN_REF() {
@@ -331,9 +424,27 @@
   return &r;
 }
 
+TfLiteRegistration* Register_REDUCE_PROD_REF() {
+  static TfLiteRegistration r = {reduce::Init, reduce::Free,
+                                 reduce::PrepareSimple,
+                                 reduce::EvalProd<reduce::kReference>};
+  return &r;
+}
+
+TfLiteRegistration* Register_REDUCE_MAX_REF() {
+  static TfLiteRegistration r = {reduce::Init, reduce::Free,
+                                 reduce::PrepareSimple,
+                                 reduce::EvalMax<reduce::kReference>};
+  return &r;
+}
+
 // TODO(kanlig): add optimized implementation of Mean.
 TfLiteRegistration* Register_MEAN() { return Register_MEAN_REF(); }
 TfLiteRegistration* Register_SUM() { return Register_SUM_REF(); }
+TfLiteRegistration* Register_REDUCE_PROD() {
+  return Register_REDUCE_PROD_REF();
+}
+TfLiteRegistration* Register_REDUCE_MAX() { return Register_REDUCE_MAX_REF(); }
 
 }  // namespace builtin
 }  // namespace ops
diff --git a/tensorflow/contrib/lite/kernels/reduce_test.cc b/tensorflow/contrib/lite/kernels/reduce_test.cc
index 9e94682..7d28931 100644
--- a/tensorflow/contrib/lite/kernels/reduce_test.cc
+++ b/tensorflow/contrib/lite/kernels/reduce_test.cc
@@ -25,10 +25,10 @@
 
 class BaseOpModel : public SingleOpModel {
  public:
-  void SetAxis(std::initializer_list<int> data) { PopulateTensor(axis_, data); }
+  void SetAxis(const std::vector<int>& data) { PopulateTensor(axis_, data); }
 
   template <class T>
-  void SetInput(std::initializer_list<T> data) {
+  void SetInput(std::vector<T> data) {
     PopulateTensor(input_, data);
   }
 
@@ -110,14 +110,72 @@
   }
 };
 
+// Model for the tests case where axis is a const tensor.
+class ProdOpConstModel : public BaseOpModel {
+ public:
+  ProdOpConstModel(const TensorData& input, const TensorData& output,
+                   std::initializer_list<int> axis_shape,
+                   std::initializer_list<int> axis, bool keep_dims) {
+    input_ = AddInput(input);
+    axis_ = AddConstInput(TensorType_INT32, axis, axis_shape);
+    output_ = AddOutput(output);
+    SetBuiltinOp(BuiltinOperator_REDUCE_PROD, BuiltinOptions_ReducerOptions,
+                 CreateReducerOptions(builder_, keep_dims).Union());
+    BuildInterpreter({GetShape(input_)});
+  }
+};
+
+// Model for the tests case where axis is a dynamic tensor.
+class ProdOpDynamicModel : public BaseOpModel {
+ public:
+  ProdOpDynamicModel(const TensorData& input, const TensorData& output,
+                     const TensorData& axis, bool keep_dims) {
+    input_ = AddInput(input);
+    axis_ = AddInput(axis);
+    output_ = AddOutput(output);
+    SetBuiltinOp(BuiltinOperator_REDUCE_PROD, BuiltinOptions_ReducerOptions,
+                 CreateReducerOptions(builder_, keep_dims).Union());
+    BuildInterpreter({GetShape(input_)});
+  }
+};
+
+// Model for the tests case where axis is a const tensor.
+class MaxOpConstModel : public BaseOpModel {
+ public:
+  MaxOpConstModel(const TensorData& input, const TensorData& output,
+                  std::initializer_list<int> axis_shape,
+                  std::initializer_list<int> axis, bool keep_dims) {
+    input_ = AddInput(input);
+    axis_ = AddConstInput(TensorType_INT32, axis, axis_shape);
+    output_ = AddOutput(output);
+    SetBuiltinOp(BuiltinOperator_REDUCE_MAX, BuiltinOptions_ReducerOptions,
+                 CreateReducerOptions(builder_, keep_dims).Union());
+    BuildInterpreter({GetShape(input_)});
+  }
+};
+
+// Model for the tests case where axis is a dynamic tensor.
+class MaxOpDynamicModel : public BaseOpModel {
+ public:
+  MaxOpDynamicModel(const TensorData& input, const TensorData& output,
+                    const TensorData& axis, bool keep_dims) {
+    input_ = AddInput(input);
+    axis_ = AddInput(axis);
+    output_ = AddOutput(output);
+    SetBuiltinOp(BuiltinOperator_REDUCE_MAX, BuiltinOptions_ReducerOptions,
+                 CreateReducerOptions(builder_, keep_dims).Union());
+    BuildInterpreter({GetShape(input_)});
+  }
+};
+
 // for quantized Add, the error shouldn't exceed step
 float GetTolerance(int min, int max) { return (max - min) / 255.0; }
 
 // Tests for reduce_mean
 TEST(ConstFloatMeanOpTest, NotKeepDims) {
-  std::initializer_list<float> data = {
-      1.0,  2.0,  3.0,  4.0,  5.0,  6.0,  7.0,  8.0,  9.0,  10.0, 11.0, 12.0,
-      13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
+  std::vector<float> data = {1.0,  2.0,  3.0,  4.0,  5.0,  6.0,  7.0,  8.0,
+                             9.0,  10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
+                             17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
   MeanOpConstModel m({TensorType_FLOAT32, {4, 3, 2}}, {TensorType_FLOAT32, {2}},
                      {4}, {1, 0, -3, -3}, false);
   m.SetInput(data);
@@ -127,9 +185,9 @@
 }
 
 TEST(ConstFloatMeanOpTest, KeepDims) {
-  std::initializer_list<float> data = {
-      1.0,  2.0,  3.0,  4.0,  5.0,  6.0,  7.0,  8.0,  9.0,  10.0, 11.0, 12.0,
-      13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
+  std::vector<float> data = {1.0,  2.0,  3.0,  4.0,  5.0,  6.0,  7.0,  8.0,
+                             9.0,  10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
+                             17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
   MeanOpConstModel m({TensorType_FLOAT32, {4, 3, 2}}, {TensorType_FLOAT32, {3}},
                      {2}, {0, 2}, true);
   m.SetInput(data);
@@ -140,13 +198,13 @@
 }
 
 TEST(DynamicFloatMeanOpTest, NotKeepDims) {
-  std::initializer_list<float> data = {
-      1.0,  2.0,  3.0,  4.0,  5.0,  6.0,  7.0,  8.0,  9.0,  10.0, 11.0, 12.0,
-      13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
+  std::vector<float> data = {1.0,  2.0,  3.0,  4.0,  5.0,  6.0,  7.0,  8.0,
+                             9.0,  10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
+                             17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
   MeanOpDynamicModel m({TensorType_FLOAT32, {4, 3, 2}},
                        {TensorType_FLOAT32, {2}}, {TensorType_INT32, {4}},
                        false);
-  std::initializer_list<int> axis = {1, 0, -3, -3};
+  std::vector<int> axis = {1, 0, -3, -3};
   m.SetAxis(axis);
   m.SetInput(data);
   m.Invoke();
@@ -155,13 +213,13 @@
 }
 
 TEST(DynamicFloatMeanOpTest, KeepDims) {
-  std::initializer_list<float> data = {
-      1.0,  2.0,  3.0,  4.0,  5.0,  6.0,  7.0,  8.0,  9.0,  10.0, 11.0, 12.0,
-      13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
+  std::vector<float> data = {1.0,  2.0,  3.0,  4.0,  5.0,  6.0,  7.0,  8.0,
+                             9.0,  10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
+                             17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
   MeanOpDynamicModel m({TensorType_FLOAT32, {4, 3, 2}},
                        {TensorType_FLOAT32, {3}}, {TensorType_INT32, {2}},
                        true);
-  std::initializer_list<int> axis = {0, 2};
+  std::vector<int> axis = {0, 2};
   m.SetAxis(axis);
   m.SetInput(data);
   m.Invoke();
@@ -171,10 +229,10 @@
 }
 
 TEST(DynamicFloatMeanOpTest, Scale) {
-  std::initializer_list<float> data = {9.527};
+  std::vector<float> data = {9.527};
   MeanOpDynamicModel m({TensorType_FLOAT32, {1}}, {TensorType_FLOAT32, {1}},
                        {TensorType_INT32, {1}}, true);
-  std::initializer_list<int> axis = {0};
+  std::vector<int> axis = {0};
   m.SetAxis(axis);
   m.SetInput(data);
   m.Invoke();
@@ -185,7 +243,7 @@
 
 TEST(ConstUint8MeanOpTest, NotKeepDims) {
   float kQuantizedTolerance = GetTolerance(-1.0, 1.0);
-  std::initializer_list<float> data = {0.4, 0.2, 0.3, 0.4, 0.5, 0.6};
+  std::vector<float> data = {0.4, 0.2, 0.3, 0.4, 0.5, 0.6};
   MeanOpConstModel m({TensorType_UINT8, {1, 3, 2}, -1.0, 1.0},
                      {TensorType_UINT8, {2}, -1.0, 1.0}, {1}, {1}, false);
   m.QuantizeAndPopulate<uint8_t>(m.Input(), data);
@@ -197,7 +255,7 @@
 
 TEST(ConstUint8MeanOpTest, KeepDims) {
   float kQuantizedTolerance = GetTolerance(-1.0, 1.0);
-  std::initializer_list<float> data = {0.4, 0.2, 0.3, 0.4, 0.5, 0.6};
+  std::vector<float> data = {0.4, 0.2, 0.3, 0.4, 0.5, 0.6};
   MeanOpConstModel m({TensorType_UINT8, {3, 2}, -1.0, 1.0},
                      {TensorType_UINT8, {3}, -1.0, 1.0}, {1}, {1}, true);
   m.QuantizeAndPopulate<uint8_t>(m.Input(), data);
@@ -210,11 +268,11 @@
 
 TEST(DynamicUint8MeanOpTest, NotKeepDims) {
   float kQuantizedTolerance = GetTolerance(-5.0, 2.0);
-  std::initializer_list<float> data = {1.3, -4.8, -3.6, 0.24};
+  std::vector<float> data = {1.3, -4.8, -3.6, 0.24};
   MeanOpDynamicModel m({TensorType_UINT8, {2, 2}, -5.0, 2.0},
                        {TensorType_UINT8, {2}, -5.0, 2.0},
                        {TensorType_INT32, {1}}, false);
-  std::initializer_list<int> axis = {1};
+  std::vector<int> axis = {1};
   m.SetAxis(axis);
   m.QuantizeAndPopulate<uint8_t>(m.Input(), data);
   m.Invoke();
@@ -226,11 +284,11 @@
 
 TEST(DynamicUint8MeanOpTest, KeepDims) {
   float kQuantizedTolerance = GetTolerance(-10.0, 12.0);
-  std::initializer_list<float> data = {11.14, -0.14, 7.423, 0.879};
+  std::vector<float> data = {11.14, -0.14, 7.423, 0.879};
   MeanOpDynamicModel m({TensorType_UINT8, {2, 2}, -10.0, 12.0},
                        {TensorType_UINT8, {2}, -10.0, 12.0},
                        {TensorType_INT32, {1}}, true);
-  std::initializer_list<int> axis = {0};
+  std::vector<int> axis = {0};
   m.SetAxis(axis);
   m.QuantizeAndPopulate<uint8_t>(m.Input(), data);
   m.Invoke();
@@ -243,9 +301,9 @@
 // Tests for reduce_sum
 
 TEST(ConstFloatSumOpTest, NotKeepDims) {
-  std::initializer_list<float> data = {
-      1.0,  2.0,  3.0,  4.0,  5.0,  6.0,  7.0,  8.0,  9.0,  10.0, 11.0, 12.0,
-      13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
+  std::vector<float> data = {1.0,  2.0,  3.0,  4.0,  5.0,  6.0,  7.0,  8.0,
+                             9.0,  10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
+                             17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
   SumOpConstModel m({TensorType_FLOAT32, {4, 3, 2}}, {TensorType_FLOAT32, {2}},
                     {4}, {1, 0, -3, -3}, false);
   m.SetInput(data);
@@ -256,9 +314,9 @@
 }
 
 TEST(ConstFloatSumOpTest, KeepDims) {
-  std::initializer_list<float> data = {
-      1.0,  2.0,  3.0,  4.0,  5.0,  6.0,  7.0,  8.0,  9.0,  10.0, 11.0, 12.0,
-      13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
+  std::vector<float> data = {1.0,  2.0,  3.0,  4.0,  5.0,  6.0,  7.0,  8.0,
+                             9.0,  10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
+                             17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
   SumOpConstModel m({TensorType_FLOAT32, {4, 3, 2}}, {TensorType_FLOAT32, {3}},
                     {2}, {0, 2}, true);
   m.SetInput(data);
@@ -269,13 +327,13 @@
 }
 
 TEST(DynamicFloatSumOpTest, NotKeepDims) {
-  std::initializer_list<float> data = {
-      1.0,  2.0,  3.0,  4.0,  5.0,  6.0,  7.0,  8.0,  9.0,  10.0, 11.0, 12.0,
-      13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
+  std::vector<float> data = {1.0,  2.0,  3.0,  4.0,  5.0,  6.0,  7.0,  8.0,
+                             9.0,  10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
+                             17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
   SumOpDynamicModel m({TensorType_FLOAT32, {4, 3, 2}},
                       {TensorType_FLOAT32, {2}}, {TensorType_INT32, {4}},
                       false);
-  std::initializer_list<int> axis = {1, 0, -3, -3};
+  std::vector<int> axis = {1, 0, -3, -3};
   m.SetAxis(axis);
   m.SetInput(data);
   m.Invoke();
@@ -285,12 +343,12 @@
 }
 
 TEST(DynamicFloatSumOpTest, KeepDims) {
-  std::initializer_list<float> data = {
-      1.0,  2.0,  3.0,  4.0,  5.0,  6.0,  7.0,  8.0,  9.0,  10.0, 11.0, 12.0,
-      13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
+  std::vector<float> data = {1.0,  2.0,  3.0,  4.0,  5.0,  6.0,  7.0,  8.0,
+                             9.0,  10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
+                             17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
   SumOpDynamicModel m({TensorType_FLOAT32, {4, 3, 2}},
                       {TensorType_FLOAT32, {3}}, {TensorType_INT32, {2}}, true);
-  std::initializer_list<int> axis = {0, 2};
+  std::vector<int> axis = {0, 2};
   m.SetAxis(axis);
   m.SetInput(data);
   m.Invoke();
@@ -300,10 +358,10 @@
 }
 
 TEST(DynamicFloatSumOpTest, Scale) {
-  std::initializer_list<float> data = {9.527};
+  std::vector<float> data = {9.527};
   SumOpDynamicModel m({TensorType_FLOAT32, {1}}, {TensorType_FLOAT32, {1}},
                       {TensorType_INT32, {1}}, true);
-  std::initializer_list<int> axis = {0};
+  std::vector<int> axis = {0};
   m.SetAxis(axis);
   m.SetInput(data);
   m.Invoke();
@@ -313,7 +371,7 @@
 
 TEST(ConstUint8SumOpTest, NotKeepDims) {
   float kQuantizedTolerance = GetTolerance(-1.0, 1.0);
-  std::initializer_list<float> data = {0.4, 0.2, 0.3, 0.4, 0.5, 0.6};
+  std::vector<float> data = {0.4, 0.2, 0.3, 0.4, 0.5, 0.6};
   SumOpConstModel m({TensorType_UINT8, {1, 3, 2}, -1.0, 1.0},
                     {TensorType_UINT8, {2}, -1.0, 1.0}, {1}, {1}, false);
   m.QuantizeAndPopulate<uint8_t>(m.Input(), data);
@@ -326,7 +384,7 @@
 
 TEST(ConstUint8SumOpTest, KeepDims) {
   float kQuantizedTolerance = GetTolerance(-1.0, 1.0);
-  std::initializer_list<float> data = {0.4, 0.2, 0.3, 0.4, 0.5, 0.6};
+  std::vector<float> data = {0.4, 0.2, 0.3, 0.4, 0.5, 0.6};
   SumOpConstModel m({TensorType_UINT8, {3, 2}, -1.0, 1.0},
                     {TensorType_UINT8, {3}, -1.0, 1.0}, {1}, {1}, true);
   m.QuantizeAndPopulate<uint8_t>(m.Input(), data);
@@ -339,11 +397,11 @@
 
 TEST(DynamicUint8SumOpTest, NotKeepDims) {
   float kQuantizedTolerance = GetTolerance(-5.0, 2.0);
-  std::initializer_list<float> data = {1.3, -4.8, -3.6, 0.24};
+  std::vector<float> data = {1.3, -4.8, -3.6, 0.24};
   SumOpDynamicModel m({TensorType_UINT8, {2, 2}, -5.0, 2.0},
                       {TensorType_UINT8, {2}, -5.0, 2.0},
                       {TensorType_INT32, {1}}, false);
-  std::initializer_list<int> axis = {1};
+  std::vector<int> axis = {1};
   m.SetAxis(axis);
   m.QuantizeAndPopulate<uint8_t>(m.Input(), data);
   m.Invoke();
@@ -355,11 +413,11 @@
 
 TEST(DynamicUint8SumOpTest, KeepDims) {
   float kQuantizedTolerance = GetTolerance(-10.0, 12.0);
-  std::initializer_list<float> data = {11.14, -0.14, 7.423, 0.879};
+  std::vector<float> data = {11.14, -0.14, 7.423, 0.879};
   SumOpDynamicModel m({TensorType_UINT8, {2, 2}, -10.0, 12.0},
                       {TensorType_UINT8, {2}, -10.0, 12.0},
                       {TensorType_INT32, {1}}, true);
-  std::initializer_list<int> axis = {0};
+  std::vector<int> axis = {0};
   m.SetAxis(axis);
   m.QuantizeAndPopulate<uint8_t>(m.Input(), data);
   m.Invoke();
@@ -369,6 +427,209 @@
       ElementsAreArray(ArrayFloatNear({6.47059, 10.698}, kQuantizedTolerance)));
 }
 
+// Tests for reduce_prod
+
+TEST(ConstFloatProdOpTest, NotKeepDims) {
+  std::vector<float> data = {1.0,  2.0,  3.0,  4.0,  5.0,  6.0,  7.0,  8.0,
+                             9.0,  10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
+                             17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
+  ProdOpConstModel m({TensorType_FLOAT32, {4, 3, 2}}, {TensorType_FLOAT32, {2}},
+                     {4}, {1, 0, -3, -3}, false);
+  m.SetInput(data);
+  m.Invoke();
+  EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2}));
+  EXPECT_THAT(
+      m.GetOutput<float>(),
+      ElementsAreArray(ArrayFloatNear({3.162341376e+11, 1.9619905536e+12})));
+}
+
+TEST(ConstFloatProdOpTest, KeepDims) {
+  std::vector<float> data = {1.0,  2.0,  3.0,  4.0,  5.0,  6.0,  7.0,  8.0,
+                             9.0,  10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
+                             17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
+  ProdOpConstModel m({TensorType_FLOAT32, {4, 3, 2}}, {TensorType_FLOAT32, {3}},
+                     {2}, {0, 2}, true);
+  m.SetInput(data);
+  m.Invoke();
+  EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 3, 1}));
+  EXPECT_THAT(m.GetOutput<float>(),
+              ElementsAreArray(
+                  ArrayFloatNear({7.74592e+06, 1.197504e+08, 6.6889152e+08})));
+}
+
+TEST(DynamicFloatProdOpTest, NotKeepDims) {
+  std::vector<float> data = {1.0,  2.0,  3.0,  4.0,  5.0,  6.0,  7.0,  8.0,
+                             9.0,  10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
+                             17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
+  ProdOpDynamicModel m({TensorType_FLOAT32, {4, 3, 2}},
+                       {TensorType_FLOAT32, {2}}, {TensorType_INT32, {4}},
+                       false);
+  std::vector<int> axis = {1, 0, -3, -3};
+  m.SetAxis(axis);
+  m.SetInput(data);
+  m.Invoke();
+  EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2}));
+  EXPECT_THAT(
+      m.GetOutput<float>(),
+      ElementsAreArray(ArrayFloatNear({3.16234143225e+11, 1.9619905536e+12})));
+}
+
+TEST(DynamicFloatProdOpTest, KeepDims) {
+  std::vector<float> data = {1.0,  2.0,  3.0,  4.0,  5.0,  6.0,  7.0,  8.0,
+                             9.0,  10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
+                             17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
+  ProdOpDynamicModel m({TensorType_FLOAT32, {4, 3, 2}},
+                       {TensorType_FLOAT32, {3}}, {TensorType_INT32, {2}},
+                       true);
+  std::vector<int> axis = {0, 2};
+  m.SetAxis(axis);
+  m.SetInput(data);
+  m.Invoke();
+  EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 3, 1}));
+  EXPECT_THAT(m.GetOutput<float>(),
+              ElementsAreArray(
+                  ArrayFloatNear({7.74592e+06, 1.197504e+08, 6.6889152e+08})));
+}
+
+TEST(DynamicFloatProdOpTest, Scale) {
+  std::vector<float> data = {9.527};
+  ProdOpDynamicModel m({TensorType_FLOAT32, {1}}, {TensorType_FLOAT32, {1}},
+                       {TensorType_INT32, {1}}, true);
+  std::vector<int> axis = {0};
+  m.SetAxis(axis);
+  m.SetInput(data);
+  m.Invoke();
+  EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1}));
+  EXPECT_THAT(m.GetOutput<float>(), ElementsAreArray(ArrayFloatNear({9.527})));
+}
+
+// Tests for reduce_max
+
+TEST(ConstFloatMaxOpTest, NotKeepDims) {
+  std::vector<float> data = {1.0,  2.0,  3.0,  4.0,  5.0,  6.0,  7.0,  8.0,
+                             9.0,  10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
+                             17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
+  MaxOpConstModel m({TensorType_FLOAT32, {4, 3, 2}}, {TensorType_FLOAT32, {2}},
+                    {4}, {1, 0, -3, -3}, false);
+  m.SetInput(data);
+  m.Invoke();
+  EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2}));
+  EXPECT_THAT(m.GetOutput<float>(), ElementsAreArray(ArrayFloatNear({23, 24})));
+}
+
+TEST(ConstFloatMaxOpTest, KeepDims) {
+  std::vector<float> data = {1.0,  2.0,  3.0,  4.0,  5.0,  6.0,  7.0,  8.0,
+                             9.0,  10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
+                             17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
+  MaxOpConstModel m({TensorType_FLOAT32, {4, 3, 2}}, {TensorType_FLOAT32, {3}},
+                    {2}, {0, 2}, true);
+  m.SetInput(data);
+  m.Invoke();
+  EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 3, 1}));
+  EXPECT_THAT(m.GetOutput<float>(),
+              ElementsAreArray(ArrayFloatNear({20, 22, 24})));
+}
+
+TEST(DynamicFloatMaxOpTest, NotKeepDims) {
+  std::vector<float> data = {1.0,  2.0,  3.0,  4.0,  5.0,  6.0,  7.0,  8.0,
+                             9.0,  10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
+                             17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
+  MaxOpDynamicModel m({TensorType_FLOAT32, {4, 3, 2}},
+                      {TensorType_FLOAT32, {2}}, {TensorType_INT32, {4}},
+                      false);
+  std::vector<int> axis = {1, 0, -3, -3};
+  m.SetAxis(axis);
+  m.SetInput(data);
+  m.Invoke();
+  EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2}));
+  EXPECT_THAT(m.GetOutput<float>(), ElementsAreArray(ArrayFloatNear({23, 24})));
+}
+
+TEST(DynamicFloatMaxOpTest, KeepDims) {
+  std::vector<float> data = {1.0,  2.0,  3.0,  4.0,  5.0,  6.0,  7.0,  8.0,
+                             9.0,  10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
+                             17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
+  MaxOpDynamicModel m({TensorType_FLOAT32, {4, 3, 2}},
+                      {TensorType_FLOAT32, {3}}, {TensorType_INT32, {2}}, true);
+  std::vector<int> axis = {0, 2};
+  m.SetAxis(axis);
+  m.SetInput(data);
+  m.Invoke();
+  EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 3, 1}));
+  EXPECT_THAT(m.GetOutput<float>(),
+              ElementsAreArray(ArrayFloatNear({20, 22, 24})));
+}
+
+TEST(DynamicFloatMaxOpTest, Scale) {
+  std::vector<float> data = {9.527};
+  MaxOpDynamicModel m({TensorType_FLOAT32, {1}}, {TensorType_FLOAT32, {1}},
+                      {TensorType_INT32, {1}}, true);
+  std::vector<int> axis = {0};
+  m.SetAxis(axis);
+  m.SetInput(data);
+  m.Invoke();
+  EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1}));
+  EXPECT_THAT(m.GetOutput<float>(), ElementsAreArray(ArrayFloatNear({9.527})));
+}
+
+TEST(ConstUint8MaxOpTest, NotKeepDims) {
+  float kQuantizedTolerance = GetTolerance(-1.0, 1.0);
+  std::vector<float> data = {0.4, 0.2, 0.3, 0.4, 0.5, 0.6};
+  MaxOpConstModel m({TensorType_UINT8, {1, 3, 2}, -1.0, 1.0},
+                    {TensorType_UINT8, {2}, -1.0, 1.0}, {1}, {1}, false);
+  m.QuantizeAndPopulate<uint8_t>(m.Input(), data);
+  m.Invoke();
+  EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 2}));
+  EXPECT_THAT(m.GetDequantizedOutput(),
+              ElementsAreArray(
+                  ArrayFloatNear({0.501961, 0.603922}, kQuantizedTolerance)));
+}
+
+TEST(ConstUint8MaxOpTest, KeepDims) {
+  float kQuantizedTolerance = GetTolerance(-1.0, 1.0);
+  std::vector<float> data = {0.4, 0.2, 0.3, 0.4, 0.5, 0.6};
+  MaxOpConstModel m({TensorType_UINT8, {3, 2}, -1.0, 1.0},
+                    {TensorType_UINT8, {3}, -1.0, 1.0}, {1}, {1}, true);
+  m.QuantizeAndPopulate<uint8_t>(m.Input(), data);
+  m.Invoke();
+  EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({3, 1}));
+  EXPECT_THAT(m.GetDequantizedOutput(),
+              ElementsAreArray(
+                  ArrayFloatNear({0.4, 0.4, 0.603922}, kQuantizedTolerance)));
+}
+
+TEST(DynamicUint8MaxOpTest, NotKeepDims) {
+  float kQuantizedTolerance = GetTolerance(-5.0, 2.0);
+  std::vector<float> data = {1.3, -4.8, -3.6, 0.24};
+  MaxOpDynamicModel m({TensorType_UINT8, {2, 2}, -5.0, 2.0},
+                      {TensorType_UINT8, {2}, -5.0, 2.0},
+                      {TensorType_INT32, {1}}, false);
+  std::vector<int> axis = {1};
+  m.SetAxis(axis);
+  m.QuantizeAndPopulate<uint8_t>(m.Input(), data);
+  m.Invoke();
+  EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2}));
+  EXPECT_THAT(m.GetDequantizedOutput(),
+              ElementsAreArray(
+                  ArrayFloatNear({1.2902, 0.247059}, kQuantizedTolerance)));
+}
+
+TEST(DynamicUint8MaxOpTest, KeepDims) {
+  float kQuantizedTolerance = GetTolerance(-10.0, 12.0);
+  std::vector<float> data = {11.14, -0.14, 7.423, 0.879};
+  MaxOpDynamicModel m({TensorType_UINT8, {2, 2}, -10.0, 12.0},
+                      {TensorType_UINT8, {2}, -10.0, 12.0},
+                      {TensorType_INT32, {1}}, true);
+  std::vector<int> axis = {0};
+  m.SetAxis(axis);
+  m.QuantizeAndPopulate<uint8_t>(m.Input(), data);
+  m.Invoke();
+  EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 2}));
+  EXPECT_THAT(m.GetDequantizedOutput(),
+              ElementsAreArray(
+                  ArrayFloatNear({11.1294, 0.862745}, kQuantizedTolerance)));
+}
+
 }  // namespace
 }  // namespace tflite
 
diff --git a/tensorflow/contrib/lite/kernels/register.cc b/tensorflow/contrib/lite/kernels/register.cc
index 1994e85..f0f2757 100644
--- a/tensorflow/contrib/lite/kernels/register.cc
+++ b/tensorflow/contrib/lite/kernels/register.cc
@@ -91,6 +91,8 @@
 TfLiteRegistration* Register_TILE();
 TfLiteRegistration* Register_NEG();
 TfLiteRegistration* Register_SUM();
+TfLiteRegistration* Register_REDUCE_PROD();
+TfLiteRegistration* Register_REDUCE_MAX();
 TfLiteRegistration* Register_SELECT();
 TfLiteRegistration* Register_SLICE();
 TfLiteRegistration* Register_SIN();
@@ -182,6 +184,8 @@
   AddBuiltin(BuiltinOperator_TRANSPOSE_CONV, Register_TRANSPOSE_CONV());
   AddBuiltin(BuiltinOperator_TILE, Register_TILE());
   AddBuiltin(BuiltinOperator_SUM, Register_SUM());
+  AddBuiltin(BuiltinOperator_REDUCE_PROD, Register_REDUCE_PROD());
+  AddBuiltin(BuiltinOperator_REDUCE_MAX, Register_REDUCE_MAX());
   AddBuiltin(BuiltinOperator_EXPAND_DIMS, Register_EXPAND_DIMS());
   AddBuiltin(BuiltinOperator_SPARSE_TO_DENSE, Register_SPARSE_TO_DENSE());
   AddBuiltin(BuiltinOperator_EQUAL, Register_EQUAL());
@@ -190,7 +194,7 @@
   AddBuiltin(BuiltinOperator_RSQRT, Register_RSQRT());
   AddBuiltin(BuiltinOperator_SHAPE, Register_SHAPE());
   AddBuiltin(BuiltinOperator_POW, Register_POW());
-  AddBuiltin(BuiltinOperator_FAKE_QUANT, Register_FAKE_QUANT());
+  AddBuiltin(BuiltinOperator_FAKE_QUANT, Register_FAKE_QUANT(), 1, 2);
 
   // TODO(andrewharp, ahentz): Move these somewhere more appropriate so that
   // custom ops aren't always included by default.
diff --git a/tensorflow/contrib/lite/kernels/sub.cc b/tensorflow/contrib/lite/kernels/sub.cc
index 1247525..541c85f 100644
--- a/tensorflow/contrib/lite/kernels/sub.cc
+++ b/tensorflow/contrib/lite/kernels/sub.cc
@@ -78,29 +78,44 @@
 }
 
 template <KernelType kernel_type>
-void EvalFloat(TfLiteContext* context, TfLiteNode* node,
-               TfLiteSubParams* params, const OpData* data,
-               const TfLiteTensor* input1, const TfLiteTensor* input2,
-               TfLiteTensor* output) {
-  float output_activation_min, output_activation_max;
-  CalculateActivationRange(params->activation, &output_activation_min,
-                           &output_activation_max);
-#define TF_LITE_SUB(type, opname)                                   \
-  type::opname(GetTensorData<float>(input1), GetTensorDims(input1), \
-               GetTensorData<float>(input2), GetTensorDims(input2), \
-               output_activation_min, output_activation_max,        \
-               GetTensorData<float>(output), GetTensorDims(output))
-  if (kernel_type == kReference) {
-    if (data->requires_broadcast) {
-      TF_LITE_SUB(reference_ops, BroadcastSub);
+void EvalSub(TfLiteContext* context, TfLiteNode* node, TfLiteSubParams* params,
+             const OpData* data, const TfLiteTensor* input1,
+             const TfLiteTensor* input2, TfLiteTensor* output) {
+#define TF_LITE_SUB(type, opname, data_type)                            \
+  data_type output_activation_min, output_activation_max;               \
+  CalculateActivationRange(params->activation, &output_activation_min,  \
+                           &output_activation_max);                     \
+  type::opname(GetTensorData<data_type>(input1), GetTensorDims(input1), \
+               GetTensorData<data_type>(input2), GetTensorDims(input2), \
+               output_activation_min, output_activation_max,            \
+               GetTensorData<data_type>(output), GetTensorDims(output))
+  if (output->type == kTfLiteInt32) {
+    if (kernel_type == kReference) {
+      if (data->requires_broadcast) {
+        TF_LITE_SUB(reference_ops, BroadcastSub, int32_t);
+      } else {
+        TF_LITE_SUB(reference_ops, Sub, int32_t);
+      }
     } else {
-      TF_LITE_SUB(reference_ops, Sub);
+      if (data->requires_broadcast) {
+        TF_LITE_SUB(optimized_ops, BroadcastSub, int32_t);
+      } else {
+        TF_LITE_SUB(optimized_ops, Sub, int32_t);
+      }
     }
-  } else {
-    if (data->requires_broadcast) {
-      TF_LITE_SUB(optimized_ops, BroadcastSub);
+  } else if (output->type == kTfLiteFloat32) {
+    if (kernel_type == kReference) {
+      if (data->requires_broadcast) {
+        TF_LITE_SUB(reference_ops, BroadcastSub, float);
+      } else {
+        TF_LITE_SUB(reference_ops, Sub, float);
+      }
     } else {
-      TF_LITE_SUB(optimized_ops, Sub);
+      if (data->requires_broadcast) {
+        TF_LITE_SUB(optimized_ops, BroadcastSub, float);
+      } else {
+        TF_LITE_SUB(optimized_ops, Sub, float);
+      }
     }
   }
 #undef TF_LITE_SUB
@@ -171,14 +186,15 @@
   const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2);
   TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
 
-  if (output->type == kTfLiteFloat32) {
-    EvalFloat<kernel_type>(context, node, params, data, input1, input2, output);
+  if (output->type == kTfLiteFloat32 || output->type == kTfLiteInt32) {
+    EvalSub<kernel_type>(context, node, params, data, input1, input2, output);
   } else if (output->type == kTfLiteUInt8) {
     EvalQuantized<kernel_type>(context, node, params, data, input1, input2,
                                output);
   } else {
     context->ReportError(
-        context, "output type %d is not supported, requires float|uint8 types.",
+        context,
+        "output type %d is not supported, requires float|uint8|int32 types.",
         output->type);
     return kTfLiteError;
   }
diff --git a/tensorflow/contrib/lite/kernels/sub_test.cc b/tensorflow/contrib/lite/kernels/sub_test.cc
index ff07aee..5978c57 100644
--- a/tensorflow/contrib/lite/kernels/sub_test.cc
+++ b/tensorflow/contrib/lite/kernels/sub_test.cc
@@ -52,6 +52,13 @@
   std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
 };
 
+class IntegerSubOpModel : public BaseSubOpModel {
+ public:
+  using BaseSubOpModel::BaseSubOpModel;
+
+  std::vector<int32_t> GetOutput() { return ExtractVector<int32_t>(output_); }
+};
+
 class QuantizedSubOpModel : public BaseSubOpModel {
  public:
   using BaseSubOpModel::BaseSubOpModel;
@@ -125,6 +132,57 @@
   }
 }
 
+TEST(IntegerSubOpModel, NoActivation) {
+  IntegerSubOpModel m({TensorType_INT32, {1, 2, 2, 1}},
+                      {TensorType_INT32, {1, 2, 2, 1}}, {TensorType_INT32, {}},
+                      ActivationFunctionType_NONE);
+  m.PopulateTensor<int32_t>(m.input1(), {-20, 2, 7, 8});
+  m.PopulateTensor<int32_t>(m.input2(), {1, 2, 3, 5});
+  m.Invoke();
+  EXPECT_THAT(m.GetOutput(), ElementsAreArray({-21, 0, 4, 3}));
+}
+
+TEST(IntegerSubOpModel, ActivationRELU_N1_TO_1) {
+  IntegerSubOpModel m({TensorType_INT32, {1, 2, 2, 1}},
+                      {TensorType_INT32, {1, 2, 2, 1}}, {TensorType_INT32, {}},
+                      ActivationFunctionType_RELU_N1_TO_1);
+  m.PopulateTensor<int32_t>(m.input1(), {-20, 2, 7, 8});
+  m.PopulateTensor<int32_t>(m.input2(), {1, 2, 3, 5});
+  m.Invoke();
+  EXPECT_THAT(m.GetOutput(), ElementsAreArray({-1, 0, 1, 1}));
+}
+
+TEST(IntegerSubOpModel, VariousInputShapes) {
+  std::vector<std::initializer_list<int>> test_shapes = {
+      {6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}};
+  for (int i = 0; i < test_shapes.size(); ++i) {
+    IntegerSubOpModel m({TensorType_INT32, test_shapes[i]},
+                        {TensorType_INT32, test_shapes[i]},
+                        {TensorType_INT32, {}}, ActivationFunctionType_NONE);
+    m.PopulateTensor<int32_t>(m.input1(), {-20, 2, 7, 8, 11, 20});
+    m.PopulateTensor<int32_t>(m.input2(), {1, 2, 3, 5, 11, 1});
+    m.Invoke();
+    EXPECT_THAT(m.GetOutput(), ElementsAreArray({-21, 0, 4, 3, 0, 19}))
+        << "With shape number " << i;
+  }
+}
+
+TEST(IntegerSubOpModel, WithBroadcast) {
+  std::vector<std::initializer_list<int>> test_shapes = {
+      {6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}};
+  for (int i = 0; i < test_shapes.size(); ++i) {
+    IntegerSubOpModel m({TensorType_INT32, test_shapes[i]},
+                        {TensorType_INT32, {}},  // always a scalar
+                        {TensorType_INT32, {}}, ActivationFunctionType_NONE);
+    m.PopulateTensor<int32_t>(m.input1(), {-20, 2, 7, 8, 11, 20});
+    m.PopulateTensor<int32_t>(m.input2(), {1});
+    m.Invoke();
+    EXPECT_THAT(m.GetOutput(),
+                ElementsAreArray(ArrayFloatNear({-21, 1, 6, 7, 10, 19})))
+        << "With shape number " << i;
+  }
+}
+
 TEST(QuantizedSubOpModel, QuantizedTestsNoActivation) {
   float kQuantizedTolerance = GetTolerance(-1.0, 1.0);
   std::vector<std::initializer_list<float>> inputs1 = {
diff --git a/tensorflow/contrib/lite/kernels/transpose_conv.cc b/tensorflow/contrib/lite/kernels/transpose_conv.cc
index 7182374..8b9deee 100644
--- a/tensorflow/contrib/lite/kernels/transpose_conv.cc
+++ b/tensorflow/contrib/lite/kernels/transpose_conv.cc
@@ -22,7 +22,6 @@
 
 #include "tensorflow/contrib/lite/builtin_op_data.h"
 #include "tensorflow/contrib/lite/context.h"
-#include "tensorflow/contrib/lite/kernels/eigen_support.h"
 #include "tensorflow/contrib/lite/kernels/internal/optimized/optimized_ops.h"
 #include "tensorflow/contrib/lite/kernels/internal/tensor.h"
 #include "tensorflow/contrib/lite/kernels/kernel_util.h"
@@ -39,35 +38,9 @@
 constexpr int kDataInputTensor = 2;
 constexpr int kOutputTensor = 0;
 
-const int kTensorNotAllocated = -1;
-
-struct OpData {
-  // IDs are the arbitrary identifiers used by TF Lite to identify and access
-  // memory buffers.
-  int im2col_id = kTensorNotAllocated;
-
-  // im2col is the only temporary currently tracked, therefore always index 0.
-  // If more temporaries are added, they should be properly tracked.
-  int32_t im2col_index = 0;
-};
-
-void* Init(TfLiteContext* context, const char* buffer, size_t length) {
-  // This is a builtin op, so we don't use the contents in 'buffer', if any.
-  // Instead, we allocate a new object to use as scratch space for im2col, and
-  // to carry information from Prepare() to Eval().
-  auto* data = new OpData;
-  eigen_support::IncrementUsageCounter(context);
-  return data;
-}
-
-void Free(TfLiteContext* context, void* buffer) {
-  eigen_support::DecrementUsageCounter(context);
-  delete reinterpret_cast<OpData*>(buffer);
-}
-
-TfLiteStatus ResizeOutputTensor(TfLiteContext* context,
-                                const TfLiteTensor* output_shape,
-                                TfLiteTensor* output) {
+TfLiteStatus ResizeOutputShape(TfLiteContext* context,
+                               const TfLiteTensor* output_shape,
+                               TfLiteTensor* output) {
   // Currently only support int32 for output shape.
   if (output_shape->type != kTfLiteInt32) {
     context->ReportError(context, "Output shape is %d, not int32.",
@@ -83,60 +56,15 @@
   return context->ResizeTensor(context, output, output_shape_array);
 }
 
-// Allocate temporary im2col tensor.
-static TfLiteStatus AllocateIm2colTensor(TfLiteContext* context,
-                                         TfLiteNode* node) {
-  OpData* data = reinterpret_cast<OpData*>(node->user_data);
-  if (data->im2col_id == kTensorNotAllocated) {
-    context->AddTensors(context, 1, &data->im2col_id);
-  }
-
-  TfLiteIntArrayFree(node->temporaries);
-  node->temporaries = TfLiteIntArrayCreate(1);
-  node->temporaries->data[data->im2col_index] = data->im2col_id;
-
-  return kTfLiteOk;
-}
-
-TfLiteStatus ResizeIm2ColTensor(TfLiteContext* context,
-                                const TfLiteTensor* output_shape,
-                                const TfLiteTensor* weights,
-                                const TfLiteTensor* input,
-                                TfLiteTensor* im2col) {
-  if (output_shape->type != kTfLiteInt32) {
-    context->ReportError(context, "im2col shape is %d, not int32.",
-                         output_shape->type);
-    return kTfLiteError;
-  }
-  TF_LITE_ENSURE_EQ(context, NumElements(output_shape), 4);
-  TfLiteIntArray* im2col_shape_array = TfLiteIntArrayCreate(4);
-  im2col_shape_array->data[0] = output_shape->data.i32[0];
-  im2col_shape_array->data[1] = output_shape->data.i32[1];
-  im2col_shape_array->data[2] = output_shape->data.i32[2];
-  const int input_depth = SizeOfDimension(input, 3);
-  const int filter_width = SizeOfDimension(weights, 1);
-  const int filter_height = SizeOfDimension(weights, 2);
-  im2col_shape_array->data[3] = input_depth * filter_height * filter_width;
-
-  im2col->type = input->type;
-  im2col->allocation_type = kTfLiteArenaRw;
-  return context->ResizeTensor(context, im2col, im2col_shape_array);
-}
-
 TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
   TF_LITE_ENSURE_EQ(context, NumInputs(node), 3);
   TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
 
-  TF_LITE_ENSURE_STATUS(AllocateIm2colTensor(context, node));
-
   const TfLiteTensor* output_shape =
       GetInput(context, node, kOutputShapeTensor);
   const TfLiteTensor* weights = GetInput(context, node, kWeightsTensor);
   const TfLiteTensor* input = GetInput(context, node, kDataInputTensor);
   TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
-  OpData* user_data = reinterpret_cast<OpData*>(node->user_data);
-  TfLiteTensor* im2col =
-      &context->tensors[node->temporaries->data[user_data->im2col_index]];
 
   TF_LITE_ENSURE_EQ(context, NumDimensions(output_shape), 1);
   TF_LITE_ENSURE_EQ(context, NumDimensions(input), 4);
@@ -153,15 +81,11 @@
   TF_LITE_ENSURE_EQ(context, SizeOfDimension(input, 3),
                     SizeOfDimension(weights, 3));
 
-  if (IsConstantTensor(output_shape)) {
-    TF_LITE_ENSURE_STATUS(ResizeOutputTensor(context, output_shape, output));
-    TF_LITE_ENSURE_STATUS(
-        ResizeIm2ColTensor(context, output_shape, weights, input, im2col));
-  } else {
-    // Defer resizing until Eval().
+  if (!IsConstantTensor(output_shape)) {
     SetTensorToDynamic(output);
+    return kTfLiteOk;
   }
-  return kTfLiteOk;
+  return ResizeOutputShape(context, output_shape, output);
 }
 
 TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
@@ -170,19 +94,13 @@
   const TfLiteTensor* weights = GetInput(context, node, kWeightsTensor);
   const TfLiteTensor* input = GetInput(context, node, kDataInputTensor);
   TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
-  OpData* user_data = reinterpret_cast<OpData*>(node->user_data);
-  TfLiteTensor* im2col =
-      &context->tensors[node->temporaries->data[user_data->im2col_index]];
+
   const auto* params =
       reinterpret_cast<TfLiteTransposeConvParams*>(node->builtin_data);
 
   if (IsDynamicTensor(output)) {
     TF_LITE_ENSURE_OK(context,
-                      ResizeOutputTensor(context, output_shape, output));
-  }
-  if (IsDynamicTensor(im2col)) {
-    TF_LITE_ENSURE_OK(context, ResizeIm2ColTensor(context, output_shape,
-                                                  weights, input, im2col));
+                      ResizeOutputShape(context, output_shape, output));
   }
 
   // Get height and width of the output image.
@@ -201,12 +119,17 @@
   // Currently only support float32.
   switch (input->type) {
     case kTfLiteFloat32:
-      optimized_ops::TransposeConv(
+      reference_ops::TransposeConv(
           GetTensorData<float>(input), GetTensorDims(input),
           GetTensorData<float>(weights), GetTensorDims(weights), stride_width,
           stride_height, padding_size.width, padding_size.height,
           GetTensorData<float>(output), GetTensorDims(output),
-          GetTensorData<float>(im2col), GetTensorDims(im2col));
+          // Last two args specify im2col which reference_ops ignores.
+          // (Note this does not lead to a performance regression, as the
+          // previous optimized version was just a copy of the reference code.)
+          // TODO(b/110208176): Allocate im2col tensors and switch to
+          // optimized_ops.
+          GetTensorData<float>(output), GetTensorDims(output));
       break;
     default:
       context->ReportError(context, "Type %d, not currently supported.",
@@ -219,8 +142,8 @@
 }  // namespace transpose_conv
 
 TfLiteRegistration* Register_TRANSPOSE_CONV() {
-  static TfLiteRegistration r = {transpose_conv::Init, transpose_conv::Free,
-                                 transpose_conv::Prepare, transpose_conv::Eval};
+  static TfLiteRegistration r = {nullptr, nullptr, transpose_conv::Prepare,
+                                 transpose_conv::Eval};
   return &r;
 }
 
diff --git a/tensorflow/contrib/lite/kernels/transpose_conv_test.cc b/tensorflow/contrib/lite/kernels/transpose_conv_test.cc
index c741df1..55df897 100644
--- a/tensorflow/contrib/lite/kernels/transpose_conv_test.cc
+++ b/tensorflow/contrib/lite/kernels/transpose_conv_test.cc
@@ -14,7 +14,6 @@
 ==============================================================================*/
 #include <cstdarg>
 #include <gtest/gtest.h>
-#include "absl/memory/memory.h"
 #include "tensorflow/contrib/lite/interpreter.h"
 #include "tensorflow/contrib/lite/kernels/register.h"
 #include "tensorflow/contrib/lite/kernels/test_util.h"
@@ -25,49 +24,9 @@
 
 using ::testing::ElementsAreArray;
 
-class ConstTransposeConvOpModel : public SingleOpModel {
-  // Just to be extra confusing, transpose_conv has an _input_ named
-  // "output_shape". This input sets the shape of the output tensor of the op.
-  // In this version of the test class, "output_shape" is a constant that must
-  // be specified in the constructor.
- public:
-  ConstTransposeConvOpModel(TfLiteRegistration* registration,
-                            std::initializer_list<int> input_shape,
-                            std::initializer_list<int> filter_shape,
-                            std::initializer_list<int> output_shape_data,
-                            Padding padding, int stride_w, int stride_h) {
-    output_shape_ = AddConstInput(TensorType_INT32, output_shape_data,
-                                  {static_cast<int>(output_shape_data.size())});
-    filter_ = AddInput(TensorType_FLOAT32);
-    input_ = AddInput(TensorType_FLOAT32);
-    output_ = AddOutput(TensorType_FLOAT32);
-    SetBuiltinOp(
-        BuiltinOperator_TRANSPOSE_CONV, BuiltinOptions_TransposeConvOptions,
-        CreateTransposeConvOptions(builder_, padding, stride_w, stride_h)
-            .Union());
-    resolver_ = absl::make_unique<SingleOpResolver>(
-        BuiltinOperator_TRANSPOSE_CONV, registration);
-    BuildInterpreter({{4}, filter_shape, input_shape});
-  }
-
-  int output_shape() { return output_shape_; }
-  int filter() { return filter_; }
-  int input() { return input_; }
-
-  std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
-  std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
-
- private:
-  int output_shape_;
-  int filter_;
-  int input_;
-  int output_;
-};
-
 class TransposeConvOpModel : public SingleOpModel {
  public:
-  TransposeConvOpModel(TfLiteRegistration* registration,
-                       std::initializer_list<int> input_shape,
+  TransposeConvOpModel(std::initializer_list<int> input_shape,
                        std::initializer_list<int> filter_shape, Padding padding,
                        int stride_w, int stride_h) {
     output_shape_ = AddInput(TensorType_INT32);
@@ -78,8 +37,6 @@
         BuiltinOperator_TRANSPOSE_CONV, BuiltinOptions_TransposeConvOptions,
         CreateTransposeConvOptions(builder_, padding, stride_w, stride_h)
             .Union());
-    resolver_ = absl::make_unique<SingleOpResolver>(
-        BuiltinOperator_TRANSPOSE_CONV, registration);
     BuildInterpreter({{4}, filter_shape, input_shape});
   }
 
@@ -97,15 +54,6 @@
   int output_;
 };
 
-const auto kKernelMap = new std::map<string, TfLiteRegistration*>({});
-
-class TransposeConvOpTest : public SingleOpTest {
- protected:
-  const std::map<string, TfLiteRegistration*>& GetKernelMap() override {
-    return *kKernelMap;
-  }
-};
-
 // Test case:
 // output = tf.nn.conv2d_backprop_input(
 //     tf.constant([ 1, 4, 4, 1 ]),
@@ -113,9 +61,8 @@
 //     tf.constant(np.arange(1, 17), shape=[ 1, 4, 4, 1 ], dtype=tf.float32),
 //     [1, 1, 1, 1 ],
 //     "SAME")
-TEST_P(TransposeConvOpTest, SimpleTest) {
-  TransposeConvOpModel m(GetRegistration(), {1, 4, 4, 1}, {1, 3, 3, 1},
-                         Padding_SAME, 1, 1);
+TEST(TransposeConvOpModelTest, SimpleTest) {
+  TransposeConvOpModel m({1, 4, 4, 1}, {1, 3, 3, 1}, Padding_SAME, 1, 1);
   m.PopulateTensor<int>(m.output_shape(), {1, 4, 4, 1});
   m.PopulateTensor<float>(m.filter(), {1, 2, 3, 4, 5, 6, 7, 8, 9});
   m.PopulateTensor<float>(
@@ -128,21 +75,6 @@
   EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 4, 4, 1}));
 }
 
-// Test case: Same as above, but with a const "output_shape"
-TEST_P(TransposeConvOpTest, ConstSimpleTest) {
-  ConstTransposeConvOpModel m(GetRegistration(), {1, 4, 4, 1}, {1, 4, 4, 1},
-                              {1, 3, 3, 1}, Padding_SAME, 1, 1);
-  m.PopulateTensor<float>(m.filter(), {1, 2, 3, 4, 5, 6, 7, 8, 9});
-  m.PopulateTensor<float>(
-      m.input(), {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16});
-  m.Invoke();
-
-  EXPECT_THAT(m.GetOutput(),
-              ElementsAreArray({29, 62, 83, 75, 99, 192, 237, 198, 207, 372,
-                                417, 330, 263, 446, 485, 365}));
-  EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 4, 4, 1}));
-}
-
 // Test case:
 // filter = tf.constant(np.arange(1, 19),
 //                      shape=[ 3, 3, 1, 2 ],
@@ -155,9 +87,8 @@
 //     "SAME")
 // And filter value is derived by:
 // filter = tf.reshape(tf.transpose(filter, perm=[3, 0, 1, 2]), shape=[18, 1])
-TEST_P(TransposeConvOpTest, TwoFiltersTest) {
-  TransposeConvOpModel m(GetRegistration(), {1, 4, 4, 2}, {1, 3, 3, 2},
-                         Padding_SAME, 1, 1);
+TEST(TransposeConvOpModelTest, TwoFiltersTest) {
+  TransposeConvOpModel m({1, 4, 4, 2}, {1, 3, 3, 2}, Padding_SAME, 1, 1);
   m.PopulateTensor<int>(m.output_shape(), {1, 4, 4, 1});
   m.PopulateTensor<float>(m.filter(), {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
                                        13, 14, 15, 16, 17, 18});
@@ -185,9 +116,8 @@
 //     "VALID")
 // And filter value is derived by:
 // filter = tf.reshape(tf.transpose(filter, perm=[3, 0, 1, 2]), shape=[1, 18])
-TEST_P(TransposeConvOpTest, PaddingValidTest) {
-  TransposeConvOpModel m(GetRegistration(), {1, 4, 4, 2}, {1, 3, 3, 2},
-                         Padding_VALID, 1, 1);
+TEST(TransposeConvOpModelTest, PaddingValidTest) {
+  TransposeConvOpModel m({1, 4, 4, 2}, {1, 3, 3, 2}, Padding_VALID, 1, 1);
   m.PopulateTensor<int>(m.output_shape(), {1, 6, 6, 1});
   m.PopulateTensor<float>(m.filter(), {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
                                        13, 14, 15, 16, 17, 18});
@@ -216,9 +146,8 @@
 //     tf.constant(np.arange(1, 5), shape=[ 1, 2, 2, 1 ], dtype=tf.float32),
 //     [1, 2, 2, 1 ],
 //     "VALID")
-TEST_P(TransposeConvOpTest, StrideValidTest) {
-  TransposeConvOpModel m(GetRegistration(), {1, 2, 2, 1}, {1, 3, 3, 1},
-                         Padding_VALID, 2, 2);
+TEST(TransposeConvOpModelTest, StrideValidTest) {
+  TransposeConvOpModel m({1, 2, 2, 1}, {1, 3, 3, 1}, Padding_VALID, 2, 2);
   m.PopulateTensor<int>(m.output_shape(), {1, 5, 5, 1});
   m.PopulateTensor<float>(m.filter(), {1, 2, 3, 4, 5, 6, 7, 8, 9});
   m.PopulateTensor<float>(m.input(), {1, 2, 3, 4});
@@ -241,9 +170,8 @@
 //     tf.constant(np.arange(1, 5), shape=[ 1, 2, 2, 1 ], dtype=tf.float32),
 //     [1, 2, 2, 1 ],
 //     "VALID")
-TEST_P(TransposeConvOpTest, MultiChannelTest) {
-  TransposeConvOpModel m(GetRegistration(), {1, 2, 2, 1}, {2, 3, 3, 1},
-                         Padding_VALID, 2, 2);
+TEST(TransposeConvOpModelTest, MultiChannelTest) {
+  TransposeConvOpModel m({1, 2, 2, 1}, {2, 3, 3, 1}, Padding_VALID, 2, 2);
   m.PopulateTensor<int>(m.output_shape(), {1, 5, 5, 2});
   m.PopulateTensor<float>(m.filter(), {1, 3, 5, 7, 9, 11, 13, 15, 17, 2, 4, 6,
                                        8, 10, 12, 14, 16, 18});
@@ -259,24 +187,6 @@
   EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 5, 5, 2}));
 }
 
-// Test case: Same as above, but with a const "output_shape"
-TEST_P(TransposeConvOpTest, ConstMultiChannelTest) {
-  ConstTransposeConvOpModel m(GetRegistration(), {1, 2, 2, 1}, {2, 3, 3, 1},
-                              {1, 5, 5, 2}, Padding_VALID, 2, 2);
-  m.PopulateTensor<float>(m.filter(), {1, 3, 5, 7, 9, 11, 13, 15, 17, 2, 4, 6,
-                                       8, 10, 12, 14, 16, 18});
-  m.PopulateTensor<float>(m.input(), {1, 2, 3, 4});
-  m.Invoke();
-
-  EXPECT_THAT(
-      m.GetOutput(),
-      ElementsAreArray({1,  2,  3,  4,  7,  10,  6,   8,  10, 12, 7,  8,  9,
-                        10, 25, 28, 18, 20, 22,  24,  16, 20, 24, 28, 62, 72,
-                        42, 48, 54, 60, 21, 24,  27,  30, 61, 68, 36, 40, 44,
-                        48, 39, 42, 45, 48, 103, 110, 60, 64, 68, 72}));
-  EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 5, 5, 2}));
-}
-
 // Test case:
 // filter = tf.constant(np.random.randint(1, 10, size=9),
 //                      shape=[ 3, 3, 1, 1 ],
@@ -289,9 +199,8 @@
 //     "SAME")
 // And filter value is derived by:
 // filter = tf.reshape(tf.transpose(filter, perm=[3, 0, 1, 2]), shape=[-1])
-TEST_P(TransposeConvOpTest, AccuracyTest) {
-  TransposeConvOpModel m(GetRegistration(), {1, 1, 2, 1}, {1, 3, 3, 1},
-                         Padding_SAME, 3, 3);
+TEST(TransposeConvOpModelTest, AccuracyTest) {
+  TransposeConvOpModel m({1, 1, 2, 1}, {1, 3, 3, 1}, Padding_SAME, 3, 3);
   m.PopulateTensor<int>(m.output_shape(), {1, 3, 4, 1});
   m.PopulateTensor<float>(m.filter(), {9, 5, 6, 9, 8, 5, 3, 1, 4});
   m.PopulateTensor<float>(m.input(), {323, 521});
@@ -303,10 +212,6 @@
   EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 3, 4, 1}));
 }
 
-INSTANTIATE_TEST_CASE_P(
-    TransposeConvOpTest, TransposeConvOpTest,
-    ::testing::ValuesIn(SingleOpTest::GetKernelTags(*kKernelMap)));
-
 }  // namespace
 }  // namespace tflite
 
diff --git a/tensorflow/contrib/lite/kernels/unidirectional_sequence_lstm.cc b/tensorflow/contrib/lite/kernels/unidirectional_sequence_lstm.cc
index 32daf2b..c48b470 100644
--- a/tensorflow/contrib/lite/kernels/unidirectional_sequence_lstm.cc
+++ b/tensorflow/contrib/lite/kernels/unidirectional_sequence_lstm.cc
@@ -274,7 +274,8 @@
   const int n_output = recurrent_to_output_weights->dims->data[1];
 
   // Check that input tensor dimensions matches with each other.
-  CheckInputTensorDimensions(context, node, n_input, n_output, n_cell);
+  TF_LITE_ENSURE_OK(context, CheckInputTensorDimensions(context, node, n_input,
+                                                        n_output, n_cell));
 
   // Get the pointer to output, output_state and cell_state buffer tensors.
   TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
diff --git a/tensorflow/contrib/lite/model.cc b/tensorflow/contrib/lite/model.cc
index 93b3df9..6c1ba36 100644
--- a/tensorflow/contrib/lite/model.cc
+++ b/tensorflow/contrib/lite/model.cc
@@ -616,6 +616,8 @@
       break;
     }
     case BuiltinOperator_MEAN:
+    case BuiltinOperator_REDUCE_MAX:
+    case BuiltinOperator_REDUCE_PROD:
     case BuiltinOperator_SUM: {
       auto* params = MallocPOD<TfLiteReducerParams>();
       if (auto* schema_params = op->builtin_options_as_ReducerOptions()) {
@@ -715,6 +717,7 @@
         params->min = schema_params->min();
         params->max = schema_params->max();
         params->num_bits = schema_params->num_bits();
+        params->narrow_range = schema_params->narrow_range();
       }
       *builtin_data = static_cast<void*>(params);
       break;
diff --git a/tensorflow/contrib/lite/nnapi_delegate.cc b/tensorflow/contrib/lite/nnapi_delegate.cc
index cc66848..5950840 100644
--- a/tensorflow/contrib/lite/nnapi_delegate.cc
+++ b/tensorflow/contrib/lite/nnapi_delegate.cc
@@ -607,6 +607,8 @@
       case tflite::BuiltinOperator_EQUAL:
       case tflite::BuiltinOperator_NOT_EQUAL:
       case tflite::BuiltinOperator_SUM:
+      case tflite::BuiltinOperator_REDUCE_MAX:
+      case tflite::BuiltinOperator_REDUCE_PROD:
       case tflite::BuiltinOperator_SQRT:
       case tflite::BuiltinOperator_RSQRT:
       case tflite::BuiltinOperator_SHAPE:
diff --git a/tensorflow/contrib/lite/profiling/profile_summarizer.cc b/tensorflow/contrib/lite/profiling/profile_summarizer.cc
index c37a096..36e87b6 100644
--- a/tensorflow/contrib/lite/profiling/profile_summarizer.cc
+++ b/tensorflow/contrib/lite/profiling/profile_summarizer.cc
@@ -83,7 +83,7 @@
   OperatorDetails details;
   details.name = op_name;
   if (profiling_string) {
-    details.name += ":" + string(profiling_string);
+    details.name += ":" + std::string(profiling_string);
   }
   details.inputs = GetTensorNames(interpreter, inputs);
   details.outputs = GetTensorNames(interpreter, outputs);
diff --git a/tensorflow/contrib/lite/python/interpreter_wrapper/interpreter_wrapper.h b/tensorflow/contrib/lite/python/interpreter_wrapper/interpreter_wrapper.h
index febfd2d..556ec71 100644
--- a/tensorflow/contrib/lite/python/interpreter_wrapper/interpreter_wrapper.h
+++ b/tensorflow/contrib/lite/python/interpreter_wrapper/interpreter_wrapper.h
@@ -15,13 +15,13 @@
 #ifndef TENSORFLOW_CONTRIB_LITE_PYTHON_INTERPRETER_WRAPPER_INTERPRETER_WRAPPER_H_
 #define TENSORFLOW_CONTRIB_LITE_PYTHON_INTERPRETER_WRAPPER_INTERPRETER_WRAPPER_H_
 
+// Place `<locale>` before <Python.h> to avoid build failures in macOS.
+#include <locale>
 #include <memory>
 #include <string>
 #include <vector>
 
-// Place `<locale>` before <Python.h> to avoid build failures in macOS.
 #include <Python.h>
-#include <locale>
 
 // We forward declare TFLite classes here to avoid exposing them to SWIG.
 namespace tflite {
diff --git a/tensorflow/contrib/lite/schema/schema.fbs b/tensorflow/contrib/lite/schema/schema.fbs
index 17ea260..6c3189a 100644
--- a/tensorflow/contrib/lite/schema/schema.fbs
+++ b/tensorflow/contrib/lite/schema/schema.fbs
@@ -155,13 +155,15 @@
   EQUAL = 71,
   NOT_EQUAL = 72,
   LOG = 73,
-  SUM=74,
+  SUM = 74,
   SQRT = 75,
   RSQRT = 76,
   SHAPE = 77,
   POW = 78,
   ARG_MIN = 79,
   FAKE_QUANT = 80,
+  REDUCE_PROD = 81,
+  REDUCE_MAX = 82,
 }
 
 // Options for the builtin operators.
@@ -526,9 +528,13 @@
 }
 
 table FakeQuantOptions {
+  // Parameters supported by version 1:
   min:float;
   max:float;
   num_bits:int;
+
+  // Parameters supported by version 2:
+  narrow_range:bool;
 }
 
 // An OperatorCode can be an enum value (BuiltinOperator) if the operator is a
diff --git a/tensorflow/contrib/lite/schema/schema_generated.h b/tensorflow/contrib/lite/schema/schema_generated.h
index 37489eb..8052404 100755
--- a/tensorflow/contrib/lite/schema/schema_generated.h
+++ b/tensorflow/contrib/lite/schema/schema_generated.h
@@ -351,11 +351,13 @@
   BuiltinOperator_POW = 78,
   BuiltinOperator_ARG_MIN = 79,
   BuiltinOperator_FAKE_QUANT = 80,
+  BuiltinOperator_REDUCE_PROD = 81,
+  BuiltinOperator_REDUCE_MAX = 82,
   BuiltinOperator_MIN = BuiltinOperator_ADD,
-  BuiltinOperator_MAX = BuiltinOperator_FAKE_QUANT
+  BuiltinOperator_MAX = BuiltinOperator_REDUCE_MAX
 };
 
-inline BuiltinOperator (&EnumValuesBuiltinOperator())[80] {
+inline BuiltinOperator (&EnumValuesBuiltinOperator())[82] {
   static BuiltinOperator values[] = {
     BuiltinOperator_ADD,
     BuiltinOperator_AVERAGE_POOL_2D,
@@ -436,7 +438,9 @@
     BuiltinOperator_SHAPE,
     BuiltinOperator_POW,
     BuiltinOperator_ARG_MIN,
-    BuiltinOperator_FAKE_QUANT
+    BuiltinOperator_FAKE_QUANT,
+    BuiltinOperator_REDUCE_PROD,
+    BuiltinOperator_REDUCE_MAX
   };
   return values;
 }
@@ -524,6 +528,8 @@
     "POW",
     "ARG_MIN",
     "FAKE_QUANT",
+    "REDUCE_PROD",
+    "REDUCE_MAX",
     nullptr
   };
   return names;
@@ -5213,10 +5219,12 @@
   float min;
   float max;
   int32_t num_bits;
+  bool narrow_range;
   FakeQuantOptionsT()
       : min(0.0f),
         max(0.0f),
-        num_bits(0) {
+        num_bits(0),
+        narrow_range(false) {
   }
 };
 
@@ -5225,7 +5233,8 @@
   enum {
     VT_MIN = 4,
     VT_MAX = 6,
-    VT_NUM_BITS = 8
+    VT_NUM_BITS = 8,
+    VT_NARROW_RANGE = 10
   };
   float min() const {
     return GetField<float>(VT_MIN, 0.0f);
@@ -5236,11 +5245,15 @@
   int32_t num_bits() const {
     return GetField<int32_t>(VT_NUM_BITS, 0);
   }
+  bool narrow_range() const {
+    return GetField<uint8_t>(VT_NARROW_RANGE, 0) != 0;
+  }
   bool Verify(flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            VerifyField<float>(verifier, VT_MIN) &&
            VerifyField<float>(verifier, VT_MAX) &&
            VerifyField<int32_t>(verifier, VT_NUM_BITS) &&
+           VerifyField<uint8_t>(verifier, VT_NARROW_RANGE) &&
            verifier.EndTable();
   }
   FakeQuantOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
@@ -5260,6 +5273,9 @@
   void add_num_bits(int32_t num_bits) {
     fbb_.AddElement<int32_t>(FakeQuantOptions::VT_NUM_BITS, num_bits, 0);
   }
+  void add_narrow_range(bool narrow_range) {
+    fbb_.AddElement<uint8_t>(FakeQuantOptions::VT_NARROW_RANGE, static_cast<uint8_t>(narrow_range), 0);
+  }
   explicit FakeQuantOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
@@ -5276,11 +5292,13 @@
     flatbuffers::FlatBufferBuilder &_fbb,
     float min = 0.0f,
     float max = 0.0f,
-    int32_t num_bits = 0) {
+    int32_t num_bits = 0,
+    bool narrow_range = false) {
   FakeQuantOptionsBuilder builder_(_fbb);
   builder_.add_num_bits(num_bits);
   builder_.add_max(max);
   builder_.add_min(min);
+  builder_.add_narrow_range(narrow_range);
   return builder_.Finish();
 }
 
@@ -7896,6 +7914,7 @@
   { auto _e = min(); _o->min = _e; };
   { auto _e = max(); _o->max = _e; };
   { auto _e = num_bits(); _o->num_bits = _e; };
+  { auto _e = narrow_range(); _o->narrow_range = _e; };
 }
 
 inline flatbuffers::Offset<FakeQuantOptions> FakeQuantOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const FakeQuantOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
@@ -7909,11 +7928,13 @@
   auto _min = _o->min;
   auto _max = _o->max;
   auto _num_bits = _o->num_bits;
+  auto _narrow_range = _o->narrow_range;
   return tflite::CreateFakeQuantOptions(
       _fbb,
       _min,
       _max,
-      _num_bits);
+      _num_bits,
+      _narrow_range);
 }
 
 inline OperatorCodeT *OperatorCode::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
diff --git a/tensorflow/contrib/lite/testing/generate_examples.py b/tensorflow/contrib/lite/testing/generate_examples.py
index 1093bd2..32d04c0 100644
--- a/tensorflow/contrib/lite/testing/generate_examples.py
+++ b/tensorflow/contrib/lite/testing/generate_examples.py
@@ -879,16 +879,24 @@
 
 def make_mean_tests(zip_path):
   """Make a set of tests to do mean."""
-
   return make_reduce_tests(tf.reduce_mean)(zip_path)
 
 
 def make_sum_tests(zip_path):
   """Make a set of tests to do sum."""
-
   return make_reduce_tests(tf.reduce_sum)(zip_path)
 
 
+def make_reduce_prod_tests(zip_path):
+  """Make a set of tests to do prod."""
+  return make_reduce_tests(tf.reduce_prod)(zip_path)
+
+
+def make_reduce_max_tests(zip_path):
+  """Make a set of tests to do max."""
+  return make_reduce_tests(tf.reduce_max)(zip_path)
+
+
 def make_exp_tests(zip_path):
   """Make a set of tests to do exp."""
 
diff --git a/tensorflow/contrib/lite/testing/generated_examples_zip_test.cc b/tensorflow/contrib/lite/testing/generated_examples_zip_test.cc
index 58f6bb5..ba36017 100644
--- a/tensorflow/contrib/lite/testing/generated_examples_zip_test.cc
+++ b/tensorflow/contrib/lite/testing/generated_examples_zip_test.cc
@@ -54,7 +54,6 @@
 // TODO(ahentz): make sure we clean this list up frequently.
 std::map<string, string> kBrokenTests = {
     {R"(^\/div.*int32)", "68808744"},
-    {R"(^\/sub.*int32)", "68808744"},
 
     // Pad and PadV2 only supports 4D tensors.
     {R"(^\/pad.*,input_shape=\[.,.\],paddings=\[\[.,.\],\[.,.\]\])",
diff --git a/tensorflow/contrib/lite/toco/BUILD b/tensorflow/contrib/lite/toco/BUILD
index 209dce5..bbce93f 100644
--- a/tensorflow/contrib/lite/toco/BUILD
+++ b/tensorflow/contrib/lite/toco/BUILD
@@ -212,7 +212,7 @@
         "graph_transformations/quantization_util.h",
         "graph_transformations/quantize.cc",
         "graph_transformations/quantize_weights.cc",
-        "graph_transformations/read_fake_quant_min_max.cc",
+        "graph_transformations/read_array_minmax_and_narrow_range_from_fake_quant.cc",
         "graph_transformations/remove_final_dequantize_op.cc",
         "graph_transformations/remove_tensorflow_assert.cc",
         "graph_transformations/remove_tensorflow_identity.cc",
@@ -245,10 +245,11 @@
         "graph_transformations/resolve_constant_strided_slice.cc",
         "graph_transformations/resolve_constant_transpose.cc",
         "graph_transformations/resolve_constant_unary.cc",
-        "graph_transformations/resolve_mean_attributes.cc",
+        "graph_transformations/resolve_fake_quant_args_from_vars.cc",
         "graph_transformations/resolve_multiply_by_zero.cc",
         "graph_transformations/resolve_pad_attributes.cc",
         "graph_transformations/resolve_padv2_attributes.cc",
+        "graph_transformations/resolve_reduce_attributes.cc",
         "graph_transformations/resolve_reorder_axes.cc",
         "graph_transformations/resolve_reshape_attributes.cc",
         "graph_transformations/resolve_slice_attributes.cc",
diff --git a/tensorflow/contrib/lite/toco/export_tensorflow.cc b/tensorflow/contrib/lite/toco/export_tensorflow.cc
index a08cdbf..17375d1 100644
--- a/tensorflow/contrib/lite/toco/export_tensorflow.cc
+++ b/tensorflow/contrib/lite/toco/export_tensorflow.cc
@@ -884,6 +884,9 @@
   if (src_op.num_bits) {
     (*fakequant_op->mutable_attr())["num_bits"].set_i(src_op.num_bits);
   }
+  if (src_op.narrow_range) {
+    (*fakequant_op->mutable_attr())["narrow_range"].set_b(src_op.narrow_range);
+  }
 }
 
 void ConvertMaxPoolOperator(const MaxPoolOperator& src_op,
@@ -1620,10 +1623,11 @@
   CreateSliceInput(src_op.inputs[2], src_op.size, tensorflow_graph);
 }
 
-void ConvertMeanOperator(const Model& model, const MeanOperator& src_op,
-                         GraphDef* tensorflow_graph) {
+template <typename T>
+void ConvertReduceOperator(const Model& model, const T& src_op,
+                           GraphDef* tensorflow_graph, const string& op_name) {
   tensorflow::NodeDef* new_op = tensorflow_graph->add_node();
-  new_op->set_op("Mean");
+  new_op->set_op(op_name);
   new_op->set_name(src_op.outputs[0]);
   CHECK_EQ(src_op.inputs.size(), 2);
   *new_op->add_input() = src_op.inputs[0];
@@ -1958,8 +1962,20 @@
         model, static_cast<const StridedSliceOperator&>(src_op),
         tensorflow_graph);
   } else if (src_op.type == OperatorType::kMean) {
-    ConvertMeanOperator(model, static_cast<const MeanOperator&>(src_op),
-                        tensorflow_graph);
+    ConvertReduceOperator(model, static_cast<const MeanOperator&>(src_op),
+                          tensorflow_graph, "Mean");
+  } else if (src_op.type == OperatorType::kSum) {
+    ConvertReduceOperator(model,
+                          static_cast<const TensorFlowSumOperator&>(src_op),
+                          tensorflow_graph, "Sum");
+  } else if (src_op.type == OperatorType::kReduceProd) {
+    ConvertReduceOperator(model,
+                          static_cast<const TensorFlowProdOperator&>(src_op),
+                          tensorflow_graph, "Prod");
+  } else if (src_op.type == OperatorType::kReduceMax) {
+    ConvertReduceOperator(model,
+                          static_cast<const TensorFlowMaxOperator&>(src_op),
+                          tensorflow_graph, "Max");
   } else if (src_op.type == OperatorType::kSub) {
     ConvertSubOperator(model, static_cast<const SubOperator&>(src_op),
                        tensorflow_graph);
diff --git a/tensorflow/contrib/lite/toco/graph_transformations/dequantize.cc b/tensorflow/contrib/lite/toco/graph_transformations/dequantize.cc
index 2c7ffe4..1688586 100644
--- a/tensorflow/contrib/lite/toco/graph_transformations/dequantize.cc
+++ b/tensorflow/contrib/lite/toco/graph_transformations/dequantize.cc
@@ -159,6 +159,7 @@
   new_array.GetOrCreateMinMax() = array->GetMinMax();
   fakequant_op->minmax.reset(new MinMax);
   *fakequant_op->minmax = array->GetMinMax();
+  fakequant_op->narrow_range = array->narrow_range;
   if (must_insert_fakequant_before) {
     for (const auto& op : model->operators) {
       for (string& output : op->outputs) {
diff --git a/tensorflow/contrib/lite/toco/graph_transformations/graph_transformations.h b/tensorflow/contrib/lite/toco/graph_transformations/graph_transformations.h
index 8cd1298..8db7df5 100644
--- a/tensorflow/contrib/lite/toco/graph_transformations/graph_transformations.h
+++ b/tensorflow/contrib/lite/toco/graph_transformations/graph_transformations.h
@@ -159,7 +159,7 @@
 DECLARE_GRAPH_TRANSFORMATION(ResolveConstantUnaryOperator)
 DECLARE_GRAPH_TRANSFORMATION(CreateIm2colArrays)
 DECLARE_GRAPH_TRANSFORMATION(DropIm2colArrays)
-DECLARE_GRAPH_TRANSFORMATION(ReadFakeQuantMinMax)
+DECLARE_GRAPH_TRANSFORMATION(ReadArrayMinmaxAndNarrowRangeFromFakeQuant)
 DECLARE_GRAPH_TRANSFORMATION(ReorderElementwiseUnary)
 DECLARE_GRAPH_TRANSFORMATION(ReorderReshapeTranspose)
 DECLARE_GRAPH_TRANSFORMATION(ResolveReorderAxes)
@@ -180,7 +180,7 @@
 DECLARE_GRAPH_TRANSFORMATION(ResolvePadV2Attributes)
 DECLARE_GRAPH_TRANSFORMATION(ResolveStridedSliceAttributes)
 DECLARE_GRAPH_TRANSFORMATION(ResolveSliceAttributes)
-DECLARE_GRAPH_TRANSFORMATION(ResolveMeanAttributes)
+DECLARE_GRAPH_TRANSFORMATION(ResolveReduceAttributes)
 DECLARE_GRAPH_TRANSFORMATION(ResolveTransposeAttributes)
 DECLARE_GRAPH_TRANSFORMATION(ResolveConstantRandomUniform)
 DECLARE_GRAPH_TRANSFORMATION(ResolveConstantRange)
@@ -194,6 +194,7 @@
 DECLARE_GRAPH_TRANSFORMATION(Dequantize)
 DECLARE_GRAPH_TRANSFORMATION(UnpartitionEmbeddingLookup)
 DECLARE_GRAPH_TRANSFORMATION(ShuffleFCWeights)
+DECLARE_GRAPH_TRANSFORMATION(ResolveFakeQuantArgsFromVars)
 
 class PropagateDefaultMinMax : public GraphTransformation {
  public:
diff --git a/tensorflow/contrib/lite/toco/graph_transformations/make_initial_dequantize_operator.cc b/tensorflow/contrib/lite/toco/graph_transformations/make_initial_dequantize_operator.cc
index 45d9f73..f684de0 100644
--- a/tensorflow/contrib/lite/toco/graph_transformations/make_initial_dequantize_operator.cc
+++ b/tensorflow/contrib/lite/toco/graph_transformations/make_initial_dequantize_operator.cc
@@ -85,15 +85,8 @@
   dequantized_input_minmax = input_minmax;
   auto& input_qparams = input_array.GetOrCreateQuantizationParams();
   input_array.data_type = input_array.final_data_type;
-  if (input_array.data_type == ArrayDataType::kUint8) {
-    GetQuantizationParamsFromMinMax<ArrayDataType::kUint8>(input_minmax,
-                                                           &input_qparams);
-  } else if (input_array.data_type == ArrayDataType::kInt16) {
-    GetQuantizationParamsFromMinMax<ArrayDataType::kInt16>(input_minmax,
-                                                           &input_qparams);
-  } else {
-    LOG(FATAL) << "unhandled data type";
-  }
+  ChooseQuantizationParamsForArrayAndQuantizedDataType(
+      input_array, input_array.data_type, &input_qparams);
 
   transformation->AddMessageF(
       "Created %s"
diff --git a/tensorflow/contrib/lite/toco/graph_transformations/propagate_fake_quant_num_bits.cc b/tensorflow/contrib/lite/toco/graph_transformations/propagate_fake_quant_num_bits.cc
index 53fc87d..3ad6b0e 100644
--- a/tensorflow/contrib/lite/toco/graph_transformations/propagate_fake_quant_num_bits.cc
+++ b/tensorflow/contrib/lite/toco/graph_transformations/propagate_fake_quant_num_bits.cc
@@ -66,24 +66,10 @@
         "Rescaling min/max from %g,%g (%s) to %g,%g (%s)", array_minmax.min,
         array_minmax.max, ArrayDataTypeName(array->data_type), min, max,
         ArrayDataTypeName(new_data_type));
-
     array_minmax.min = min;
     array_minmax.max = max;
-    switch (new_data_type) {
-      case ArrayDataType::kUint8:
-        GetQuantizationParamsFromMinMax<ArrayDataType::kUint8>(
-            array_minmax, array->quantization_params.get());
-        break;
-      case ArrayDataType::kInt16:
-        GetQuantizationParamsFromMinMax<ArrayDataType::kInt16>(
-            array_minmax, array->quantization_params.get());
-        break;
-      default:
-        CHECK(false) << "Unsupported quantized data type: "
-                     << ArrayDataTypeName(new_data_type);
-        return false;
-    }
-
+    ChooseQuantizationParamsForArrayAndQuantizedDataType(
+        *array, new_data_type, array->quantization_params.get());
     // Directly change the type as the array was already quantized.
     array->data_type = new_data_type;
     changed = true;
diff --git a/tensorflow/contrib/lite/toco/graph_transformations/propagate_fixed_sizes.cc b/tensorflow/contrib/lite/toco/graph_transformations/propagate_fixed_sizes.cc
index 4f95c57..f422e3a 100644
--- a/tensorflow/contrib/lite/toco/graph_transformations/propagate_fixed_sizes.cc
+++ b/tensorflow/contrib/lite/toco/graph_transformations/propagate_fixed_sizes.cc
@@ -524,10 +524,12 @@
   switch (op.type) {
     case OperatorType::kMin:  //  Reduction Min
       return static_cast<const TensorFlowMinOperator&>(op).keep_dims;
-    case OperatorType::kMax:  //  Reduction Max
+    case OperatorType::kReduceMax:  //  Reduction Max
       return static_cast<const TensorFlowMaxOperator&>(op).keep_dims;
     case OperatorType::kSum:
       return static_cast<const TensorFlowSumOperator&>(op).keep_dims;
+    case OperatorType::kReduceProd:
+      return static_cast<const TensorFlowProdOperator&>(op).keep_dims;
     case OperatorType::kMean:
       return static_cast<const MeanOperator&>(op).keep_dims;
     default:
@@ -1606,8 +1608,9 @@
       ProcessL2PoolOperator(model, static_cast<L2PoolOperator*>(op));
       break;
     case OperatorType::kMin:  //  Reduction Min
-    case OperatorType::kMax:  //  Reduction Max
+    case OperatorType::kReduceMax:  //  Reduction Max
     case OperatorType::kSum:
+    case OperatorType::kReduceProd:
     case OperatorType::kMean:
       ProcessTensorFlowReductionOperator(model, op);
       break;
diff --git a/tensorflow/contrib/lite/toco/graph_transformations/quantization_util.cc b/tensorflow/contrib/lite/toco/graph_transformations/quantization_util.cc
index d74cad9..4473339 100644
--- a/tensorflow/contrib/lite/toco/graph_transformations/quantization_util.cc
+++ b/tensorflow/contrib/lite/toco/graph_transformations/quantization_util.cc
@@ -74,46 +74,54 @@
   }
 }
 
-void GetQuantizationParams(ArrayDataType data_type, const MinMax& minmax,
-                           QuantizationParams* quantization_params) {
-  switch (data_type) {
+template <ArrayDataType A>
+void ChooseQuantizationParamsForArrayAndQuantizedDataType(
+    const Array& array, QuantizationParams* quantization_params) {
+  *quantization_params = ::tflite::ChooseQuantizationParams<DataType<A>>(
+      array.minmax->min, array.minmax->max, array.narrow_range);
+}
+
+void ChooseQuantizationParamsForArrayAndQuantizedDataType(
+    const Array& array, ArrayDataType quantized_data_type,
+    QuantizationParams* quantization_params) {
+  switch (quantized_data_type) {
     case ArrayDataType::kInt8:
-      GetQuantizationParamsFromMinMax<ArrayDataType::kInt8>(
-          minmax, quantization_params);
+      ChooseQuantizationParamsForArrayAndQuantizedDataType<
+          ArrayDataType::kInt8>(array, quantization_params);
       break;
     case ArrayDataType::kUint8:
-      GetQuantizationParamsFromMinMax<ArrayDataType::kUint8>(
-          minmax, quantization_params);
+      ChooseQuantizationParamsForArrayAndQuantizedDataType<
+          ArrayDataType::kUint8>(array, quantization_params);
       break;
     case ArrayDataType::kInt16:
-      GetQuantizationParamsFromMinMax<ArrayDataType::kInt16>(
-          minmax, quantization_params);
+      ChooseQuantizationParamsForArrayAndQuantizedDataType<
+          ArrayDataType::kInt16>(array, quantization_params);
       break;
     case ArrayDataType::kUint16:
-      GetQuantizationParamsFromMinMax<ArrayDataType::kUint16>(
-          minmax, quantization_params);
+      ChooseQuantizationParamsForArrayAndQuantizedDataType<
+          ArrayDataType::kUint16>(array, quantization_params);
       break;
     case ArrayDataType::kInt32:
-      GetQuantizationParamsFromMinMax<ArrayDataType::kInt32>(
-          minmax, quantization_params);
+      ChooseQuantizationParamsForArrayAndQuantizedDataType<
+          ArrayDataType::kInt32>(array, quantization_params);
       break;
     case ArrayDataType::kUint32:
-      GetQuantizationParamsFromMinMax<ArrayDataType::kUint32>(
-          minmax, quantization_params);
+      ChooseQuantizationParamsForArrayAndQuantizedDataType<
+          ArrayDataType::kUint32>(array, quantization_params);
       break;
     case ArrayDataType::kInt64:
-      GetQuantizationParamsFromMinMax<ArrayDataType::kInt64>(
-          minmax, quantization_params);
+      ChooseQuantizationParamsForArrayAndQuantizedDataType<
+          ArrayDataType::kInt64>(array, quantization_params);
       break;
     case ArrayDataType::kUint64:
-      GetQuantizationParamsFromMinMax<ArrayDataType::kUint64>(
-          minmax, quantization_params);
+      ChooseQuantizationParamsForArrayAndQuantizedDataType<
+          ArrayDataType::kUint64>(array, quantization_params);
       break;
     case ArrayDataType::kFloat:
     case ArrayDataType::kNone:
     default:
       LOG(FATAL) << "Unhandled final quantization type "
-                 << static_cast<int>(data_type);
+                 << static_cast<int>(quantized_data_type);
   }
 }
 
@@ -121,8 +129,8 @@
 
 template <ArrayDataType A>
 std::unique_ptr<GenericBuffer> QuantizeBuffer(
-    const GenericBuffer& buffer,
-    const QuantizationParams& quantization_params) {
+    const Array& array, const QuantizationParams& quantization_params) {
+  const GenericBuffer& buffer = *array.buffer;
   const auto inverse_scale = 1. / quantization_params.scale;
   CHECK(buffer.type == ArrayDataType::kFloat);
   const auto& float_buffer =
@@ -140,8 +148,15 @@
     } else {
       scaled_val = quantization_params.zero_point + inverse_scale * src_val;
     }
-    quantized_buffer->data[i] =
-        tflite::SafeCast<DataType<A>>(std::round(scaled_val));
+    auto integer_val = tflite::SafeCast<DataType<A>>(std::round(scaled_val));
+    // In addition to its effect on the choice of quantization params upstream
+    // of here, narrow_range also means nudge the min quantized value by +1,
+    // so e.g. uint8 values get constrained to [1, 255].
+    if (integer_val == std::numeric_limits<DataType<A>>::min() &&
+        array.narrow_range) {
+      integer_val++;
+    }
+    quantized_buffer->data[i] = integer_val;
   }
   return std::unique_ptr<GenericBuffer>(quantized_buffer);
 }
@@ -155,7 +170,7 @@
   CHECK(!array.quantization_params);
   array.GetOrCreateQuantizationParams() = quantization_params;
   if (array.buffer) {
-    array.buffer = QuantizeBuffer<A>(*array.buffer, quantization_params);
+    array.buffer = QuantizeBuffer<A>(array, quantization_params);
   }
   array.data_type = A;
   array.final_data_type = A;
@@ -210,8 +225,8 @@
     } else {
       // Work around cases where we are asking for this prior to the Quantize
       // transformation having added the quantization_params.
-      GetQuantizationParams(quantized_data_type, *array.minmax,
-                            &quantization_params);
+      ChooseQuantizationParamsForArrayAndQuantizedDataType(
+          array, quantized_data_type, &quantization_params);
       transformation->AddMessageF(
           "No quantization params - infering from data type %s with minmax "
           "%g,%g as zero_point=%g, scale=%g",
diff --git a/tensorflow/contrib/lite/toco/graph_transformations/quantization_util.h b/tensorflow/contrib/lite/toco/graph_transformations/quantization_util.h
index 79a2ce7..cf093c6 100644
--- a/tensorflow/contrib/lite/toco/graph_transformations/quantization_util.h
+++ b/tensorflow/contrib/lite/toco/graph_transformations/quantization_util.h
@@ -38,21 +38,11 @@
 ArrayDataType GetQuantizedDataType(const Array& array,
                                    ArrayDataType default_type);
 
-// Returns the quantization params for the array with the given data type and
-// minmax.
-void GetQuantizationParams(ArrayDataType data_type, const MinMax& minmax,
-                           QuantizationParams* quantization_params);
-
-// Returns the quantization params for the data type and minmax values.
-template <ArrayDataType A>
-void GetQuantizationParamsFromMinMax(const MinMax& minmax,
-                                     QuantizationParams* quantization_params) {
-  using Integer = DataType<A>;
-  const double rmin = minmax.min;
-  const double rmax = minmax.max;
-  *quantization_params =
-      ::tflite::ChooseQuantizationParams<Integer>(rmin, rmax);
-}
+// Chooses the quantization params for a given array and a given target
+// quantized data type (which may not be the array's current data type).
+void ChooseQuantizationParamsForArrayAndQuantizedDataType(
+    const Array& array, ArrayDataType quantized_data_type,
+    QuantizationParams* quantization_params);
 
 // Quantizes an array by setting its data type and (if constant) quantizing
 // all values in the array.
diff --git a/tensorflow/contrib/lite/toco/graph_transformations/quantize.cc b/tensorflow/contrib/lite/toco/graph_transformations/quantize.cc
index 58885b4..5be2757 100644
--- a/tensorflow/contrib/lite/toco/graph_transformations/quantize.cc
+++ b/tensorflow/contrib/lite/toco/graph_transformations/quantize.cc
@@ -212,13 +212,15 @@
   if (op.type == OperatorType::kLstmCell) {
     if (input_index == LstmCellOperator::PREV_STATE_INPUT) {
       *quantized_data_type = ArrayDataType::kInt16;
-      GetQuantizationParams(*quantized_data_type, minmax, quantization_params);
+      ChooseQuantizationParamsForArrayAndQuantizedDataType(
+          array, *quantized_data_type, quantization_params);
       return true;
     }
   }
 
   *quantized_data_type = GetQuantizedDataType(array, ArrayDataType::kUint8);
-  GetQuantizationParams(*quantized_data_type, minmax, quantization_params);
+  ChooseQuantizationParamsForArrayAndQuantizedDataType(
+      array, *quantized_data_type, quantization_params);
   transformation->AddMessageF(
       "For input array %s with min=%g, max=%g, chose to quantize as %s (f=%s) "
       "with zero_point=%d, scale=%g",
@@ -358,12 +360,14 @@
     if (output_index == LstmCellOperator::STATE_OUTPUT ||
         output_index == LstmCellOperator::ACTIV_TEMP) {
       *quantized_data_type = ArrayDataType::kInt16;
-      GetQuantizationParams(*quantized_data_type, minmax, quantization_params);
+      ChooseQuantizationParamsForArrayAndQuantizedDataType(
+          array, *quantized_data_type, quantization_params);
       return true;
     }
   }
   *quantized_data_type = GetQuantizedDataType(array, ArrayDataType::kUint8);
-  GetQuantizationParams(*quantized_data_type, minmax, quantization_params);
+  ChooseQuantizationParamsForArrayAndQuantizedDataType(
+      array, *quantized_data_type, quantization_params);
   transformation->AddMessageF(
       "For output array %s with min=%g, max=%g"
       ", chose to quantize as %s with zero_point=%d"
diff --git a/tensorflow/contrib/lite/toco/graph_transformations/quantize_weights.cc b/tensorflow/contrib/lite/toco/graph_transformations/quantize_weights.cc
index 88ea094..7a8515f 100644
--- a/tensorflow/contrib/lite/toco/graph_transformations/quantize_weights.cc
+++ b/tensorflow/contrib/lite/toco/graph_transformations/quantize_weights.cc
@@ -36,10 +36,8 @@
   const std::vector<float>& float_vals =
       array.GetBuffer<ArrayDataType::kFloat>().data;
   auto minmax = std::minmax_element(float_vals.begin(), float_vals.end());
-  MinMax toco_minmax;
-  toco_minmax.min = *minmax.first;
-  toco_minmax.max = *minmax.second;
-  GetQuantizationParams(ArrayDataType::kUint8, toco_minmax, params);
+  *params = tflite::ChooseQuantizationParams<uint8>(
+      *minmax.first, *minmax.second, array.narrow_range);
 }
 
 }  // namespace
diff --git a/tensorflow/contrib/lite/toco/graph_transformations/read_array_minmax_and_narrow_range_from_fake_quant.cc b/tensorflow/contrib/lite/toco/graph_transformations/read_array_minmax_and_narrow_range_from_fake_quant.cc
new file mode 100644
index 0000000..5b41c49
--- /dev/null
+++ b/tensorflow/contrib/lite/toco/graph_transformations/read_array_minmax_and_narrow_range_from_fake_quant.cc
@@ -0,0 +1,78 @@
+/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#include <algorithm>
+#include <memory>
+#include <string>
+#include <unordered_map>
+#include <vector>
+
+#include "tensorflow/contrib/lite/toco/graph_transformations/graph_transformations.h"
+#include "tensorflow/contrib/lite/toco/model.h"
+#include "tensorflow/contrib/lite/toco/tooling_util.h"
+#include "tensorflow/core/platform/logging.h"
+
+namespace toco {
+
+namespace {
+
+bool ApplyAttrsToArray(GraphTransformation* transformation, Model* model,
+                       const FakeQuantOperator& fq_op,
+                       const string& array_name) {
+  bool changed = false;
+  auto& annotated_array = model->GetArray(array_name);
+  if (!annotated_array.minmax) {
+    const MinMax& minmax = *fq_op.minmax;
+    annotated_array.GetOrCreateMinMax() = minmax;
+    transformation->AddMessageF(
+        "Read min/max annotation for array %s: min=%g, max=%g", array_name,
+        minmax.min, minmax.max);
+    changed = true;
+  }
+  if (fq_op.narrow_range && !annotated_array.narrow_range) {
+    annotated_array.narrow_range = true;
+    transformation->AddMessageF("Read narrow_range annotation for array %s",
+                                array_name);
+    changed = true;
+  }
+  return changed;
+}
+
+}  // end namespace
+
+bool ReadArrayMinmaxAndNarrowRangeFromFakeQuant::Run(Model* model,
+                                                     std::size_t op_index) {
+  const auto fakequant_it = model->operators.begin() + op_index;
+  auto* fakequant_base_op = fakequant_it->get();
+  if (fakequant_base_op->type != OperatorType::kFakeQuant) {
+    return false;
+  }
+  auto* fq_op = static_cast<FakeQuantOperator*>(fakequant_base_op);
+
+  if (!fq_op->minmax) {
+    // Need to be resolved first by ResolveFakeQuantArgsFromVars.
+    return false;
+  }
+
+  // At this point, this FakeQuantOperator should have a MinMax
+  // attached to it, and should only have 1 input (it should not have
+  // 2nd and 3rd input arrays giving min and max anymore).
+  CHECK(fq_op->minmax);
+  CHECK_EQ(1, fq_op->inputs.size());
+
+  return ApplyAttrsToArray(this, model, *fq_op, fq_op->inputs[0]) ||
+         ApplyAttrsToArray(this, model, *fq_op, fq_op->outputs[0]);
+}
+
+}  // namespace toco
diff --git a/tensorflow/contrib/lite/toco/graph_transformations/read_fake_quant_min_max.cc b/tensorflow/contrib/lite/toco/graph_transformations/read_fake_quant_min_max.cc
deleted file mode 100644
index bdcca5b..0000000
--- a/tensorflow/contrib/lite/toco/graph_transformations/read_fake_quant_min_max.cc
+++ /dev/null
@@ -1,112 +0,0 @@
-/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-#include <algorithm>
-#include <memory>
-#include <string>
-#include <unordered_map>
-#include <vector>
-
-#include "tensorflow/contrib/lite/toco/graph_transformations/graph_transformations.h"
-#include "tensorflow/contrib/lite/toco/model.h"
-#include "tensorflow/contrib/lite/toco/tooling_util.h"
-#include "tensorflow/core/platform/logging.h"
-
-namespace toco {
-
-namespace {
-
-bool ApplyMinMaxToArray(GraphTransformation* transformation, Model* model,
-                        const MinMax& minmax, const string& array_name) {
-  auto& annotated_array = model->GetArray(array_name);
-  if (annotated_array.minmax) {
-    return false;
-  }
-  annotated_array.GetOrCreateMinMax() = minmax;
-  transformation->AddMessageF(
-      "Read min/max annotation for array %s: min=%g, max=%g", array_name,
-      minmax.min, minmax.max);
-  return true;
-}
-
-}  // end namespace
-
-bool ReadFakeQuantMinMax::Run(Model* model, std::size_t op_index) {
-  const auto fakequant_it = model->operators.begin() + op_index;
-  auto* fakequant_base_op = fakequant_it->get();
-  if (fakequant_base_op->type != OperatorType::kFakeQuant) {
-    return false;
-  }
-  auto* fakequant_op = static_cast<FakeQuantOperator*>(fakequant_base_op);
-
-  bool changed = false;
-
-  if (!fakequant_op->minmax) {
-    CHECK_EQ(fakequant_op->inputs.size(), 3);
-    // We need to yield until the min and max parameters have been
-    // resolved to constant arrays.
-    for (int i = 1; i <= 2; i++) {
-      if (!IsConstantParameterArray(*model, fakequant_op->inputs[1])) {
-        return false;
-      }
-    }
-
-    // Obtain the final min/max values
-    const auto& min_array = model->GetArray(fakequant_op->inputs[1]);
-    const auto& max_array = model->GetArray(fakequant_op->inputs[2]);
-    CHECK_EQ(RequiredBufferSizeForShape(min_array.shape()), 1);
-    CHECK_EQ(RequiredBufferSizeForShape(max_array.shape()), 1);
-    fakequant_op->minmax.reset(new MinMax);
-    MinMax& minmax = *fakequant_op->minmax;
-    minmax.min = min_array.GetBuffer<ArrayDataType::kFloat>().data[0];
-    minmax.max = max_array.GetBuffer<ArrayDataType::kFloat>().data[0];
-    // We always want [min, max] to contain 0.
-    if (minmax.min > 0 || minmax.max < 0) {
-      LOG(ERROR) << "For " << LogName(*fakequant_op) << " the MinMax range "
-                 << "[" << minmax.min << ", " << minmax.max
-                 << "] does not contain 0. "
-                 << "Proceeding by tweaking it to contain 0, which will result "
-                    "in poor accuracy.";
-    }
-    minmax.min = std::min(minmax.min, 0.);
-    minmax.max = std::max(minmax.max, 0.);
-
-    // We won't use the input arrays that provided these min and max
-    // values, anymore. Delete them unless they are used by something
-    // else.
-    for (int i = 1; i <= 2; i++) {
-      if (CountOpsWithInput(*model, fakequant_op->inputs[i]) == 1) {
-        model->EraseArray(fakequant_op->inputs[i]);
-      }
-    }
-    fakequant_op->inputs.resize(1);
-    changed = true;
-  }
-
-  // At this point, this FakeQuantOperator should have a MinMax
-  // attached to it, and should only have 1 input (it should not have
-  // 2nd and 3rd input arrays giving min and max anymore).
-  CHECK(fakequant_op->minmax);
-  CHECK_EQ(1, fakequant_op->inputs.size());
-
-  const MinMax& minmax = *fakequant_op->minmax;
-
-  // Record the MinMax info on the input and output arrays
-  changed |= ApplyMinMaxToArray(this, model, minmax, fakequant_op->inputs[0]);
-  changed |= ApplyMinMaxToArray(this, model, minmax, fakequant_op->outputs[0]);
-
-  return changed;
-}
-
-}  // namespace toco
diff --git a/tensorflow/contrib/lite/toco/graph_transformations/resolve_constant_fake_quant.cc b/tensorflow/contrib/lite/toco/graph_transformations/resolve_constant_fake_quant.cc
index efb7bb2..058f314 100644
--- a/tensorflow/contrib/lite/toco/graph_transformations/resolve_constant_fake_quant.cc
+++ b/tensorflow/contrib/lite/toco/graph_transformations/resolve_constant_fake_quant.cc
@@ -25,6 +25,37 @@
 
 namespace toco {
 
+template <ArrayDataType A>
+void GetBoundsForQuantizedDataType(double* min, double* max) {
+  using limits = std::numeric_limits<DataType<A>>;
+  *min = limits::min();
+  *max = limits::max();
+}
+
+void GetBoundsForQuantizedDataType(ArrayDataType quantized_data_type,
+                                   double* min, double* max) {
+  switch (quantized_data_type) {
+    case ArrayDataType::kUint8:
+      return GetBoundsForQuantizedDataType<ArrayDataType::kUint8>(min, max);
+    case ArrayDataType::kInt8:
+      return GetBoundsForQuantizedDataType<ArrayDataType::kInt8>(min, max);
+    case ArrayDataType::kUint16:
+      return GetBoundsForQuantizedDataType<ArrayDataType::kUint16>(min, max);
+    case ArrayDataType::kInt16:
+      return GetBoundsForQuantizedDataType<ArrayDataType::kInt16>(min, max);
+    case ArrayDataType::kUint32:
+      return GetBoundsForQuantizedDataType<ArrayDataType::kUint32>(min, max);
+    case ArrayDataType::kInt32:
+      return GetBoundsForQuantizedDataType<ArrayDataType::kInt32>(min, max);
+    case ArrayDataType::kUint64:
+      return GetBoundsForQuantizedDataType<ArrayDataType::kUint64>(min, max);
+    case ArrayDataType::kInt64:
+      return GetBoundsForQuantizedDataType<ArrayDataType::kInt64>(min, max);
+    default:
+      LOG(FATAL) << "unhandled quantized data type";
+  }
+}
+
 bool ResolveConstantFakeQuant::Run(Model* model, std::size_t op_index) {
   const auto fakequant_it = model->operators.begin() + op_index;
   const auto* fakequant_base_op = fakequant_it->get();
@@ -76,14 +107,21 @@
   const int size = input_buffer.data.size();
   output_buffer.data.resize(size);
   QuantizationParams qparams;
-  GetQuantizationParamsFromMinMax<ArrayDataType::kUint8>(*fakequant_op->minmax,
-                                                         &qparams);
+  ChooseQuantizationParamsForArrayAndQuantizedDataType(
+      output_array, quantized_data_type, &qparams);
+  double quantized_min, quantized_max;
+  GetBoundsForQuantizedDataType(quantized_data_type, &quantized_min,
+                                &quantized_max);
+  if (fakequant_op->narrow_range) {
+    quantized_min++;
+  }
+
   for (int i = 0; i < size; i++) {
     const double src_val = input_buffer.data[i];
     const double unclamped_quantized_val =
         std::round(qparams.zero_point + src_val / qparams.scale);
-    const double quantized_val =
-        std::min(255., std::max(0., unclamped_quantized_val));
+    const double quantized_val = std::min(
+        quantized_max, std::max(quantized_min, unclamped_quantized_val));
     const double dst_val = qparams.scale * (quantized_val - qparams.zero_point);
     output_buffer.data[i] = dst_val;
   }
diff --git a/tensorflow/contrib/lite/toco/graph_transformations/resolve_constant_unary.cc b/tensorflow/contrib/lite/toco/graph_transformations/resolve_constant_unary.cc
index f89ef85..51099cf 100644
--- a/tensorflow/contrib/lite/toco/graph_transformations/resolve_constant_unary.cc
+++ b/tensorflow/contrib/lite/toco/graph_transformations/resolve_constant_unary.cc
@@ -58,7 +58,7 @@
     case OperatorType::kSquare:
     case OperatorType::kSum:
     case OperatorType::kMin:  //  Reduction Min
-    case OperatorType::kMax:  //  Reduction Max
+    case OperatorType::kReduceMax:  //  Reduction Max
     case OperatorType::kReshape:
     case OperatorType::kRelu6:
     case OperatorType::kRelu1:
@@ -207,7 +207,7 @@
       min = std::min(min, (*input_float_data)[i]);
     }
     output_float_data[0] = min;
-  } else if (unary_op->type == OperatorType::kMax) {
+  } else if (unary_op->type == OperatorType::kReduceMax) {
     // At the moment only full reduction across all dimensions is supported.
     // TODO(starka): Output should not be padded.
     for (int i = 0; i < output_dims_count; i++) {
diff --git a/tensorflow/contrib/lite/toco/graph_transformations/resolve_fake_quant_args_from_vars.cc b/tensorflow/contrib/lite/toco/graph_transformations/resolve_fake_quant_args_from_vars.cc
new file mode 100644
index 0000000..0dda1fd
--- /dev/null
+++ b/tensorflow/contrib/lite/toco/graph_transformations/resolve_fake_quant_args_from_vars.cc
@@ -0,0 +1,80 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#include <algorithm>
+#include <memory>
+#include <string>
+#include <unordered_map>
+#include <vector>
+
+#include "tensorflow/contrib/lite/toco/graph_transformations/graph_transformations.h"
+#include "tensorflow/contrib/lite/toco/model.h"
+#include "tensorflow/contrib/lite/toco/tooling_util.h"
+#include "tensorflow/core/platform/logging.h"
+
+namespace toco {
+
+bool ResolveFakeQuantArgsFromVars::Run(Model* model, std::size_t op_index) {
+  const auto fakequant_it = model->operators.begin() + op_index;
+  auto* fakequant_base_op = fakequant_it->get();
+  if (fakequant_base_op->type != OperatorType::kFakeQuant) {
+    return false;
+  }
+  auto* fakequant_op = static_cast<FakeQuantOperator*>(fakequant_base_op);
+
+  if (fakequant_op->minmax) {
+    // Already resolved.
+    return false;
+  }
+
+  CHECK_EQ(fakequant_op->inputs.size(), 3);
+  // We need to yield until the min and max parameters have been
+  // resolved to constant arrays.
+  for (int i = 1; i <= 2; i++) {
+    if (!IsConstantParameterArray(*model, fakequant_op->inputs[i])) {
+      return false;
+    }
+  }
+
+  // Obtain the final min/max values
+  const auto& min_array = model->GetArray(fakequant_op->inputs[1]);
+  const auto& max_array = model->GetArray(fakequant_op->inputs[2]);
+  CHECK_EQ(RequiredBufferSizeForShape(min_array.shape()), 1);
+  CHECK_EQ(RequiredBufferSizeForShape(max_array.shape()), 1);
+  fakequant_op->minmax.reset(new MinMax);
+  MinMax& minmax = *fakequant_op->minmax;
+  minmax.min = min_array.GetBuffer<ArrayDataType::kFloat>().data[0];
+  minmax.max = max_array.GetBuffer<ArrayDataType::kFloat>().data[0];
+  // We always want [min, max] to contain 0.
+  if (minmax.min > 0 || minmax.max < 0) {
+    LOG(ERROR) << "For " << LogName(*fakequant_op) << " the MinMax range "
+               << "[" << minmax.min << ", " << minmax.max
+               << "] does not contain 0. "
+               << "Proceeding by tweaking it to contain 0, which will result "
+                  "in poor accuracy.";
+  }
+  minmax.min = std::min(minmax.min, 0.);
+  minmax.max = std::max(minmax.max, 0.);
+
+  // We won't use the input arrays that provided these min and max
+  // values, anymore. Delete them unless they are used by something
+  // else.
+  for (int i = 1; i <= 2; i++) {
+    DeleteArrayIfUsedOnce(fakequant_op->inputs[i], model);
+  }
+  fakequant_op->inputs.resize(1);
+  return true;
+}
+
+}  // namespace toco
diff --git a/tensorflow/contrib/lite/toco/graph_transformations/resolve_mean_attributes.cc b/tensorflow/contrib/lite/toco/graph_transformations/resolve_reduce_attributes.cc
similarity index 64%
rename from tensorflow/contrib/lite/toco/graph_transformations/resolve_mean_attributes.cc
rename to tensorflow/contrib/lite/toco/graph_transformations/resolve_reduce_attributes.cc
index 013b50a..5f8a06b 100644
--- a/tensorflow/contrib/lite/toco/graph_transformations/resolve_mean_attributes.cc
+++ b/tensorflow/contrib/lite/toco/graph_transformations/resolve_reduce_attributes.cc
@@ -24,11 +24,8 @@
 
 namespace toco {
 
-bool ResolveMeanAttributes::Run(Model* model, std::size_t op_index) {
-  auto* mean_op = model->operators[op_index].get();
-  if (mean_op->type != OperatorType::kMean) return false;
-  auto* op = static_cast<MeanOperator*>(mean_op);
-
+template <typename T>
+bool ResolveAttributes(Model* model, T* op) {
   if (!op->axis.empty()) {
     // Attributes already resolved
     return false;
@@ -36,10 +33,26 @@
   if (op->inputs.size() != 2) return false;
   if (!IsConstantParameterArray(*model, op->inputs[1])) return false;
 
-  const auto& indices_array = model->GetArray(op->inputs[1]);
+  const Array& indices_array = model->GetArray(op->inputs[1]);
   if (!indices_array.has_shape()) return false;
   op->axis = indices_array.GetBuffer<ArrayDataType::kInt32>().data;
   return true;
 }
 
+bool ResolveReduceAttributes::Run(Model* model, std::size_t op_index) {
+  Operator* op = model->operators[op_index].get();
+  switch (op->type) {
+    case OperatorType::kMean:
+      return ResolveAttributes(model, static_cast<MeanOperator*>(op));
+    case OperatorType::kSum:
+      return ResolveAttributes(model, static_cast<TensorFlowSumOperator*>(op));
+    case OperatorType::kReduceProd:
+      return ResolveAttributes(model, static_cast<TensorFlowProdOperator*>(op));
+    case OperatorType::kReduceMax:
+      return ResolveAttributes(model, static_cast<TensorFlowMaxOperator*>(op));
+    default:
+      return false;
+  }
+}
+
 }  // namespace toco
diff --git a/tensorflow/contrib/lite/toco/import_tensorflow.cc b/tensorflow/contrib/lite/toco/import_tensorflow.cc
index bc439a2..2ffab49 100644
--- a/tensorflow/contrib/lite/toco/import_tensorflow.cc
+++ b/tensorflow/contrib/lite/toco/import_tensorflow.cc
@@ -755,6 +755,9 @@
   op->outputs.push_back(node.name());
   // tf.fake_quant_with_min_max_args num_bits defaults to 8.
   op->num_bits = HasAttr(node, "num_bits") ? GetIntAttr(node, "num_bits") : 8;
+  if (HasAttr(node, "narrow_range")) {
+    op->narrow_range = GetBoolAttr(node, "narrow_range");
+  }
   model->operators.emplace_back(op);
   return tensorflow::Status::OK();
 }
@@ -774,6 +777,9 @@
   }
   op->outputs.push_back(node.name());
   op->num_bits = HasAttr(node, "num_bits") ? GetIntAttr(node, "num_bits") : 8;
+  if (HasAttr(node, "narrow_range")) {
+    op->narrow_range = GetBoolAttr(node, "narrow_range");
+  }
   model->operators.emplace_back(op);
   return tensorflow::Status::OK();
 }
@@ -799,22 +805,6 @@
   return tensorflow::Status::OK();
 }
 
-tensorflow::Status ConvertSumOperator(
-    const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
-    Model* model) {
-  CHECK_EQ(node.op(), "Sum");
-  TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 2));
-  auto* op = new TensorFlowSumOperator;
-  op->inputs.push_back(node.input(0));
-  op->inputs.push_back(node.input(1));
-  op->outputs.push_back(node.name());
-  model->operators.emplace_back(op);
-  if (HasAttr(node, "keep_dims")) {
-    op->keep_dims = GetBoolAttr(node, "keep_dims");
-  }
-  return tensorflow::Status::OK();
-}
-
 tensorflow::Status ConvertSplitOperator(
     const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
     Model* model) {
@@ -1052,22 +1042,6 @@
   return ConvertSimpleOperator<Op>(node, tf_import_flags, model);
 }
 
-tensorflow::Status ConvertMaxOperator(
-    const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
-    Model* model) {
-  CHECK_EQ(node.op(), "Max");
-  TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 2));
-  auto* op = new TensorFlowMaxOperator;
-  op->inputs.push_back(node.input(0));
-  op->inputs.push_back(node.input(1));
-  op->outputs.push_back(node.name());
-  model->operators.emplace_back(op);
-  if (HasAttr(node, "keep_dims")) {
-    op->keep_dims = GetBoolAttr(node, "keep_dims");
-  }
-  return tensorflow::Status::OK();
-}
-
 tensorflow::Status ConvertMinOperator(
     const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
     Model* model) {
@@ -1406,12 +1380,12 @@
   return tensorflow::Status::OK();
 }
 
-tensorflow::Status ConvertMeanOperator(
+template <typename T>
+tensorflow::Status ConvertReduceOperator(
     const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
     Model* model) {
-  CHECK_EQ(node.op(), "Mean");
   TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 2));
-  auto* op = new MeanOperator;
+  auto* op = new T;
   op->inputs.push_back(node.input(0));
   op->inputs.push_back(node.input(1));
   op->outputs.push_back(node.name());
@@ -1887,10 +1861,10 @@
       {"Log", ConvertSimpleOperator<LogOperator, 1>},
       {"LogSoftmax", ConvertSimpleOperator<LogSoftmaxOperator, 1>},
       {"MatMul", ConvertMatMulOperator},
-      {"Max", ConvertMaxOperator},
+      {"Max", ConvertReduceOperator<TensorFlowMaxOperator>},
       {"MaxPool", ConvertMaxPoolOperator},
       {"Maximum", ConvertSimpleOperator<TensorFlowMaximumOperator, 2>},
-      {"Mean", ConvertMeanOperator},
+      {"Mean", ConvertReduceOperator<MeanOperator>},
       {"Merge", ConvertSimpleOperator<TensorFlowMergeOperator, 2>},
       {"Min", ConvertMinOperator},
       {"Minimum", ConvertSimpleOperator<TensorFlowMinimumOperator, 2>},
@@ -1906,6 +1880,7 @@
       {"Placeholder", ConvertPlaceholderOperator},
       {"PlaceholderWithDefault", ConvertIdentityOperator},
       {"Pow", ConvertSimpleOperator<PowOperator, 2>},
+      {"Prod", ConvertReduceOperator<TensorFlowProdOperator>},
       {"RandomUniform", ConvertRandomUniform},
       {"Range", ConvertRangeOperator},
       {"Rank", ConvertSimpleOperator<RankOperator, 1>},
@@ -1932,7 +1907,7 @@
       {"StopGradient", ConvertIdentityOperator},
       {"StridedSlice", ConvertStridedSliceOperator},
       {"Sub", ConvertSimpleOperator<SubOperator, 2>},
-      {"Sum", ConvertSumOperator},
+      {"Sum", ConvertReduceOperator<TensorFlowSumOperator>},
       {"Svdf", ConvertSvdfOperator},
       {"Switch", ConvertSwitchOperator},
       {"Tanh", ConvertSimpleOperator<TanhOperator, 1>},
diff --git a/tensorflow/contrib/lite/toco/model.h b/tensorflow/contrib/lite/toco/model.h
index 8660464..37f4188 100644
--- a/tensorflow/contrib/lite/toco/model.h
+++ b/tensorflow/contrib/lite/toco/model.h
@@ -85,6 +85,7 @@
   kBatchToSpaceND,
   kPad,
   kPadV2,
+  kReduceProd,  // Reduction product
   kStridedSlice,
   kSlice,
   kSqueeze,
@@ -106,10 +107,10 @@
   kIdentity,
   kLess,
   kLessEqual,
-  kMax,      //  Reduction Max
-  kMaximum,  //  Element-wise Maximum
-  kMin,      //  Reduction Min
-  kMinimum,  //  Element-wise Minimum
+  kReduceMax,  //  Reduction Max
+  kMaximum,    //  Element-wise Maximum
+  kMin,        //  Reduction Min
+  kMinimum,    //  Element-wise Minimum
   kMatMul,
   kMerge,
   kNeg,
@@ -791,6 +792,7 @@
   FakeQuantOperator() : Operator(OperatorType::kFakeQuant) {}
   std::unique_ptr<MinMax> minmax;
   int num_bits = 8;
+  bool narrow_range = false;
 };
 
 // Element-wise division operator.
@@ -1228,6 +1230,19 @@
 // TensorFlow equivalent: Sum
 struct TensorFlowSumOperator : Operator {
   TensorFlowSumOperator() : Operator(OperatorType::kSum) {}
+  std::vector<int> axis;
+  bool keep_dims = false;
+};
+
+// Prod reduction: computes the product of all of entries across the axes.
+//
+// Inputs:
+//   inputs[0]: required: the input array
+//
+// TensorFlow equivalent: Prod
+struct TensorFlowProdOperator : Operator {
+  TensorFlowProdOperator() : Operator(OperatorType::kReduceProd) {}
+  std::vector<int> axis;
   bool keep_dims = false;
 };
 
@@ -1387,16 +1402,15 @@
   TensorFlowNotEqualOperator() : Operator(OperatorType::kNotEqual) {}
 };
 
-// Global max reduction: computes the max of all of entries in the input array.
-// Thus the output is "0-dimensional": it consists of a single scalar value.
+// Max reduction: computes the max of all of entries across the axes.
 //
 // Inputs:
 //   inputs[0]: required: the input array
 //
-// TensorFlow equivalent: Max --- except that we only support the special case
-// of global reduction across all dimensions.
+// TensorFlow equivalent: Max
 struct TensorFlowMaxOperator : Operator {
-  TensorFlowMaxOperator() : Operator(OperatorType::kMax) {}
+  TensorFlowMaxOperator() : Operator(OperatorType::kReduceMax) {}
+  std::vector<int> axis;
   bool keep_dims = false;
 };
 
@@ -1854,6 +1868,40 @@
   // If this is non-null, then these quantization parameters are to be used
   // to assign a meaning as real numbers to the elements of this array.
   std::unique_ptr<QuantizationParams> quantization_params;
+  // narrow_range is a detail of how toco handles FakeQuant operators with
+  // narrow_range, see
+  // https://www.tensorflow.org/api_docs/python/tf/fake_quant_with_min_max_vars
+  //
+  // For more context about what that is useful for, see the big comment in
+  // graph_transformations/ensure_uint8_weights_safe_for_fast_int8_kernels.cc
+  //
+  // The narrow_range flag applies only to quantized arrays, and changes
+  // their quantization in the following way when it is set to 'true':
+  // 1. The computation of {zero_point, scale} from {min, max} needs to be
+  //    amended so that the real min value will get quantized to
+  //    (min_quantized_value + 1) instead of just (min_quantized_value).
+  //    E.g. for uint8 quantization, the real min value should get quantized to
+  //    the uint8 value 1, not 0.
+  // 2. Quantized values should get clamped to the interval
+  //    [min_quantized_value + 1, max_value]. Equivalently, the
+  //    min_quantized_value should get nudged to (min_quantized_value + 1).
+  // The reason why 1. does not imply 2. is that real values may not belong to
+  // the stated [min, max] interval. Concretely, weights recorded at the last
+  // learning step may not fall in the [min, max] interval recorded over
+  // previous learning steps, as the values evolve across learning steps.
+  //
+  // Rationale why this is directly a field on Array:
+  // - This can't be just a field on FakeQuantOperator, because
+  //   FakeQuantOperators are gone (DropFakeQuant) before we get to using that
+  //   information (Quantize). We need a place to store that bit in the interim.
+  // - This can't be in QuantizationParams because we need to record this
+  //   ahead of quantization, and QuantizationParams are only created during
+  //   quantization.
+  // - This could be in MinMax, but that would be an abuse of what MinMax is
+  //   about, and would break existing code that assumes that a MinMax is just
+  //   a min and a max. Unlike MinMax which is agnostic as to the quantized
+  //   data type, narrow_range refers to values in the quantized data type.
+  bool narrow_range = false;
 
  private:
   std::unique_ptr<Shape> array_shape;
diff --git a/tensorflow/contrib/lite/toco/tflite/operator.cc b/tensorflow/contrib/lite/toco/tflite/operator.cc
index 8377ba6..68d1358 100644
--- a/tensorflow/contrib/lite/toco/tflite/operator.cc
+++ b/tensorflow/contrib/lite/toco/tflite/operator.cc
@@ -290,8 +290,8 @@
   flatbuffers::Offset<TfLiteOptions> WriteOptions(
       const TocoOperator& op,
       flatbuffers::FlatBufferBuilder* builder) const override {
-    return ::tflite::CreateFakeQuantOptions(*builder, op.minmax->min,
-                                            op.minmax->max, op.num_bits);
+    return ::tflite::CreateFakeQuantOptions(
+        *builder, op.minmax->min, op.minmax->max, op.num_bits, op.narrow_range);
   }
   void ReadOptions(const TfLiteOptions& options,
                    TocoOperator* op) const override {
@@ -300,9 +300,13 @@
     minmax->max = options.max();
     op->minmax.reset(minmax);
     op->num_bits = options.num_bits();
+    op->narrow_range = options.narrow_range();
   }
 
-  int GetVersion(const Operator& op) const override { return 1; }
+  int GetVersion(const Operator& op) const override {
+    const auto& fq_op = static_cast<const FakeQuantOperator&>(op);
+    return fq_op.narrow_range ? 2 : 1;
+  }
 };
 
 class FullyConnected
@@ -763,6 +767,44 @@
   int GetVersion(const Operator& op) const override { return 1; }
 };
 
+class ReduceMax
+    : public BuiltinOperator<TensorFlowSumOperator, ::tflite::ReducerOptions,
+                             ::tflite::BuiltinOptions_ReducerOptions> {
+ public:
+  using BuiltinOperator::BuiltinOperator;
+  flatbuffers::Offset<TfLiteOptions> WriteOptions(
+      const TocoOperator& op,
+      flatbuffers::FlatBufferBuilder* builder) const override {
+    return ::tflite::CreateReducerOptions(*builder, op.keep_dims);
+  }
+
+  void ReadOptions(const TfLiteOptions& options,
+                   TocoOperator* op) const override {
+    op->keep_dims = options.keep_dims();
+  }
+
+  int GetVersion(const Operator& op) const override { return 1; }
+};
+
+class ReduceProd
+    : public BuiltinOperator<TensorFlowSumOperator, ::tflite::ReducerOptions,
+                             ::tflite::BuiltinOptions_ReducerOptions> {
+ public:
+  using BuiltinOperator::BuiltinOperator;
+  flatbuffers::Offset<TfLiteOptions> WriteOptions(
+      const TocoOperator& op,
+      flatbuffers::FlatBufferBuilder* builder) const override {
+    return ::tflite::CreateReducerOptions(*builder, op.keep_dims);
+  }
+
+  void ReadOptions(const TfLiteOptions& options,
+                   TocoOperator* op) const override {
+    op->keep_dims = options.keep_dims();
+  }
+
+  int GetVersion(const Operator& op) const override { return 1; }
+};
+
 class ResizeBilinear
     : public BuiltinOperator<ResizeBilinearOperator,
                              ::tflite::ResizeBilinearOptions,
@@ -1179,6 +1221,10 @@
   ops.emplace_back(
       new Mean(::tflite::BuiltinOperator_MEAN, OperatorType::kMean));
   ops.emplace_back(new Sum(::tflite::BuiltinOperator_SUM, OperatorType::kSum));
+  ops.emplace_back(new ReduceProd(::tflite::BuiltinOperator_REDUCE_PROD,
+                                  OperatorType::kReduceProd));
+  ops.emplace_back(new ReduceMax(::tflite::BuiltinOperator_REDUCE_MAX,
+                                 OperatorType::kReduceMax));
   ops.emplace_back(new ResizeBilinear(::tflite::BuiltinOperator_RESIZE_BILINEAR,
                                       OperatorType::kResizeBilinear));
   ops.emplace_back(
diff --git a/tensorflow/contrib/lite/toco/toco_tooling.cc b/tensorflow/contrib/lite/toco/toco_tooling.cc
index 3ca3633..7a0d960 100644
--- a/tensorflow/contrib/lite/toco/toco_tooling.cc
+++ b/tensorflow/contrib/lite/toco/toco_tooling.cc
@@ -105,14 +105,15 @@
   transformations->Add(new IdentifyRelu1);
   transformations->Add(new IdentifyPRelu);
   transformations->Add(new RemoveTrivialBinaryOperator);
-  transformations->Add(new ReadFakeQuantMinMax);
+  transformations->Add(new ResolveFakeQuantArgsFromVars);
+  transformations->Add(new ReadArrayMinmaxAndNarrowRangeFromFakeQuant);
   transformations->Add(new ResolveSpaceToBatchNDAttributes);
   transformations->Add(new ResolveBatchToSpaceNDAttributes);
   transformations->Add(new ResolvePadAttributes);
   transformations->Add(new ResolvePadV2Attributes);
   transformations->Add(new ResolveStridedSliceAttributes);
   transformations->Add(new ResolveSliceAttributes);
-  transformations->Add(new ResolveMeanAttributes);
+  transformations->Add(new ResolveReduceAttributes);
   transformations->Add(new ResolveConstantShapeOrRank);
   transformations->Add(new MakeInitialDequantizeOperator);
   transformations->Add(new UnpartitionEmbeddingLookup);
diff --git a/tensorflow/contrib/lite/toco/tooling_util.cc b/tensorflow/contrib/lite/toco/tooling_util.cc
index 4ec74e3..45cd10e 100644
--- a/tensorflow/contrib/lite/toco/tooling_util.cc
+++ b/tensorflow/contrib/lite/toco/tooling_util.cc
@@ -350,7 +350,7 @@
     HANDLE_OPERATORTYPENAME_CASE(Less)
     HANDLE_OPERATORTYPENAME_CASE(LessEqual)
     HANDLE_OPERATORTYPENAME_CASE(MatMul)
-    HANDLE_OPERATORTYPENAME_CASE(Max)      //  Reduction Max
+    HANDLE_OPERATORTYPENAME_CASE(ReduceMax)  //  Reduction Max
     HANDLE_OPERATORTYPENAME_CASE(Maximum)  //  Element-wise Maximum
     HANDLE_OPERATORTYPENAME_CASE(Merge)
     HANDLE_OPERATORTYPENAME_CASE(Min)      //  Reduction Min
@@ -385,6 +385,7 @@
     HANDLE_OPERATORTYPENAME_CASE(SpaceToBatchND)
     HANDLE_OPERATORTYPENAME_CASE(BatchToSpaceND)
     HANDLE_OPERATORTYPENAME_CASE(Mean)
+    HANDLE_OPERATORTYPENAME_CASE(ReduceProd)
     HANDLE_OPERATORTYPENAME_CASE(Svdf)
     HANDLE_OPERATORTYPENAME_CASE(ArgMax)
     HANDLE_OPERATORTYPENAME_CASE(ArgMin)
diff --git a/tensorflow/contrib/lite/tools/benchmark/README.md b/tensorflow/contrib/lite/tools/benchmark/README.md
index 9376930..f1e257a 100644
--- a/tensorflow/contrib/lite/tools/benchmark/README.md
+++ b/tensorflow/contrib/lite/tools/benchmark/README.md
@@ -115,7 +115,7 @@
 can use the following command:
 
 ```
-adb shell tasket f0 /data/local/tmp/benchmark_model \
+adb shell taskset f0 /data/local/tmp/benchmark_model \
   --graph=/data/local/tmp/mobilenet_quant_v1_224.tflite \
   --input_layer="input" \
   --input_layer_shape="1,224,224,3" \
diff --git a/tensorflow/contrib/lite/tools/benchmark/benchmark_params.h b/tensorflow/contrib/lite/tools/benchmark/benchmark_params.h
index 33448dd..c98f47b 100644
--- a/tensorflow/contrib/lite/tools/benchmark/benchmark_params.h
+++ b/tensorflow/contrib/lite/tools/benchmark/benchmark_params.h
@@ -31,6 +31,8 @@
 class BenchmarkParam {
  protected:
   enum class ParamType { TYPE_INT32, TYPE_FLOAT, TYPE_BOOL, TYPE_STRING };
+  template <typename T>
+  static ParamType GetValueType();
 
  public:
   template <typename T>
@@ -49,8 +51,6 @@
 
  private:
   static void AssertHasSameType(ParamType a, ParamType b);
-  template <typename T>
-  static ParamType GetValueType();
 
   const ParamType type_;
 };
diff --git a/tensorflow/contrib/lite/util.h b/tensorflow/contrib/lite/util.h
index 89d9b4f..3c48011 100644
--- a/tensorflow/contrib/lite/util.h
+++ b/tensorflow/contrib/lite/util.h
@@ -26,12 +26,17 @@
 
 namespace tflite {
 
-// Converts a `std::vector` to a `TfLiteIntArray`.
+// Converts a `std::vector` to a `TfLiteIntArray`. The caller takes ownership
+// of the returned pointer.
 TfLiteIntArray* ConvertVectorToTfLiteIntArray(const std::vector<int>& input);
 
+// Converts an array (of the given size) to a `TfLiteIntArray`. The caller
+// takes ownership of the returned pointer, and must make sure 'dims' has at
+// least 'rank' elemnts.
 TfLiteIntArray* ConvertArrayToTfLiteIntArray(const int rank, const int* dims);
 
 // Checks whether a `TfLiteIntArray` and an int array have matching elements.
+// The caller must guarantee that 'b' has at least 'b_size' elements.
 bool EqualArrayAndTfLiteIntArray(const TfLiteIntArray* a, const int b_size,
                                  const int* b);
 
diff --git a/tensorflow/contrib/proto/python/kernel_tests/BUILD b/tensorflow/contrib/proto/python/kernel_tests/BUILD
index 3f53ef1..3c6fde2 100644
--- a/tensorflow/contrib/proto/python/kernel_tests/BUILD
+++ b/tensorflow/contrib/proto/python/kernel_tests/BUILD
@@ -10,33 +10,12 @@
 load("//tensorflow/core:platform/default/build_config.bzl", "tf_proto_library")
 
 tf_py_test(
-    name = "decode_proto_fail_test",
-    size = "small",
-    srcs = ["decode_proto_fail_test.py"],
-    additional_deps = [
-        ":py_test_deps",
-        "//third_party/py/numpy",
-        "//tensorflow/contrib/proto:proto",
-        "//tensorflow/contrib/proto/python/ops:decode_proto_op_py",
-    ],
-    data = if_static(
-        [],
-        otherwise = [":libtestexample.so"],
-    ),
-    tags = [
-        "no_pip",  # TODO(b/78026780)
-        "no_windows",  # TODO(b/78028010)
-    ],
-)
-
-tf_py_test(
     name = "decode_proto_op_test",
     size = "small",
     srcs = ["decode_proto_op_test.py"],
     additional_deps = [
+        ":decode_proto_op_test_base",
         ":py_test_deps",
-        "@absl_py//absl/testing:parameterized",
-        "//third_party/py/numpy",
         "//tensorflow/contrib/proto:proto",
         "//tensorflow/contrib/proto/python/ops:decode_proto_op_py",
     ],
@@ -55,9 +34,8 @@
     size = "small",
     srcs = ["encode_proto_op_test.py"],
     additional_deps = [
+        ":encode_proto_op_test_base",
         ":py_test_deps",
-        "@absl_py//absl/testing:parameterized",
-        "//third_party/py/numpy",
         "//tensorflow/contrib/proto:proto",
         "//tensorflow/contrib/proto/python/ops:decode_proto_op_py",
         "//tensorflow/contrib/proto/python/ops:encode_proto_op_py",
@@ -73,8 +51,9 @@
 )
 
 py_library(
-    name = "test_base",
-    srcs = ["test_base.py"],
+    name = "proto_op_test_base",
+    testonly = 1,
+    srcs = ["proto_op_test_base.py"],
     deps = [
         ":test_example_proto_py",
         "//tensorflow/python:client_testlib",
@@ -82,13 +61,31 @@
 )
 
 py_library(
-    name = "py_test_deps",
+    name = "decode_proto_op_test_base",
+    testonly = 1,
+    srcs = ["decode_proto_op_test_base.py"],
     deps = [
-        ":test_base",
+        ":proto_op_test_base",
         ":test_example_proto_py",
+        "//third_party/py/numpy",
+        "@absl_py//absl/testing:parameterized",
     ],
 )
 
+py_library(
+    name = "encode_proto_op_test_base",
+    testonly = 1,
+    srcs = ["encode_proto_op_test_base.py"],
+    deps = [
+        ":proto_op_test_base",
+        ":test_example_proto_py",
+        "//third_party/py/numpy",
+        "@absl_py//absl/testing:parameterized",
+    ],
+)
+
+py_library(name = "py_test_deps")
+
 tf_proto_library(
     name = "test_example_proto",
     srcs = ["test_example.proto"],
diff --git a/tensorflow/contrib/proto/python/kernel_tests/decode_proto_fail_test.py b/tensorflow/contrib/proto/python/kernel_tests/decode_proto_fail_test.py
deleted file mode 100644
index 3b98286..0000000
--- a/tensorflow/contrib/proto/python/kernel_tests/decode_proto_fail_test.py
+++ /dev/null
@@ -1,68 +0,0 @@
-# =============================================================================
-# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# =============================================================================
-
-# Python3 preparedness imports.
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-import numpy as np
-
-from tensorflow.contrib.proto.python.kernel_tests import test_base
-from tensorflow.contrib.proto.python.ops import decode_proto_op
-from tensorflow.python.framework import dtypes
-from tensorflow.python.framework import errors
-from tensorflow.python.platform import test
-
-
-class DecodeProtoFailTest(test_base.ProtoOpTestBase):
-  """Test failure cases for DecodeToProto."""
-
-  def _TestCorruptProtobuf(self, sanitize):
-    """Test failure cases for DecodeToProto."""
-
-    # The goal here is to check the error reporting.
-    # Testing against a variety of corrupt protobufs is
-    # done by fuzzing.
-    corrupt_proto = 'This is not a binary protobuf'
-
-    # Numpy silently truncates the strings if you don't specify dtype=object.
-    batch = np.array(corrupt_proto, dtype=object)
-    msg_type = 'tensorflow.contrib.proto.TestCase'
-    field_names = ['sizes']
-    field_types = [dtypes.int32]
-
-    with self.test_session() as sess:
-      ctensor, vtensor = decode_proto_op.decode_proto(
-          batch,
-          message_type=msg_type,
-          field_names=field_names,
-          output_types=field_types,
-          sanitize=sanitize)
-      with self.assertRaisesRegexp(errors.DataLossError,
-                                   'Unable to parse binary protobuf'
-                                   '|Failed to consume entire buffer'):
-        _ = sess.run([ctensor] + vtensor)
-
-  def testCorrupt(self):
-    self._TestCorruptProtobuf(sanitize=False)
-
-  def testSanitizerCorrupt(self):
-    self._TestCorruptProtobuf(sanitize=True)
-
-
-if __name__ == '__main__':
-  test.main()
diff --git a/tensorflow/contrib/proto/python/kernel_tests/decode_proto_op_test.py b/tensorflow/contrib/proto/python/kernel_tests/decode_proto_op_test.py
index 2a07794..934035e 100644
--- a/tensorflow/contrib/proto/python/kernel_tests/decode_proto_op_test.py
+++ b/tensorflow/contrib/proto/python/kernel_tests/decode_proto_op_test.py
@@ -13,273 +13,22 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 # =============================================================================
-"""Table-driven test for decode_proto op.
+"""Tests for decode_proto op."""
 
-This test is run once with each of the *.TestCase.pbtxt files
-in the test directory.
-"""
 # Python3 preparedness imports.
 from __future__ import absolute_import
 from __future__ import division
 from __future__ import print_function
 
-from absl.testing import parameterized
-import numpy as np
-
-
-from google.protobuf import text_format
-
-from tensorflow.contrib.proto.python.kernel_tests import test_base
-from tensorflow.contrib.proto.python.kernel_tests import test_example_pb2
+from tensorflow.contrib.proto.python.kernel_tests import decode_proto_op_test_base as test_base
 from tensorflow.contrib.proto.python.ops import decode_proto_op
-from tensorflow.python.framework import dtypes
 from tensorflow.python.platform import test
 
 
-class DecodeProtoOpTest(test_base.ProtoOpTestBase, parameterized.TestCase):
+class DecodeProtoOpTest(test_base.DecodeProtoOpTestBase):
 
-  def _compareValues(self, fd, vs, evs):
-    """Compare lists/arrays of field values."""
-
-    if len(vs) != len(evs):
-      self.fail('Field %s decoded %d outputs, expected %d' %
-                (fd.name, len(vs), len(evs)))
-    for i, ev in enumerate(evs):
-      # Special case fuzzy match for float32. TensorFlow seems to mess with
-      # MAX_FLT slightly and the test doesn't work otherwise.
-      # TODO(nix): ask on TF list about why MAX_FLT doesn't pass through.
-      if fd.cpp_type == fd.CPPTYPE_FLOAT:
-        # Numpy isclose() is better than assertIsClose() which uses an absolute
-        # value comparison.
-        self.assertTrue(
-            np.isclose(vs[i], ev), 'expected %r, actual %r' % (ev, vs[i]))
-      elif fd.cpp_type == fd.CPPTYPE_STRING:
-        # In Python3 string tensor values will be represented as bytes, so we
-        # reencode the proto values to match that.
-        self.assertEqual(vs[i], ev.encode('ascii'))
-      else:
-        # Doubles and other types pass through unscathed.
-        self.assertEqual(vs[i], ev)
-
-  def _compareRepeatedPrimitiveValue(self, batch_shape, sizes, fields,
-                                     field_dict):
-    """Compare protos of type RepeatedPrimitiveValue.
-
-    Args:
-      batch_shape: the shape of the input tensor of serialized messages.
-      sizes: int matrix of repeat counts returned by decode_proto
-      fields: list of test_example_pb2.FieldSpec (types and expected values)
-      field_dict: map from field names to decoded numpy tensors of values
-    """
-
-    # Check that expected values match.
-    for field in fields:
-      values = field_dict[field.name]
-      self.assertEqual(dtypes.as_dtype(values.dtype), field.dtype)
-
-      fd = field.expected.DESCRIPTOR.fields_by_name[field.name]
-
-      # Values has the same shape as the input plus an extra
-      # dimension for repeats.
-      self.assertEqual(list(values.shape)[:-1], batch_shape)
-
-      # Nested messages are represented as TF strings, requiring
-      # some special handling.
-      if field.name == 'message_value':
-        vs = []
-        for buf in values.flat:
-          msg = test_example_pb2.PrimitiveValue()
-          msg.ParseFromString(buf)
-          vs.append(msg)
-        evs = getattr(field.expected, field.name)
-        if len(vs) != len(evs):
-          self.fail('Field %s decoded %d outputs, expected %d' %
-                    (fd.name, len(vs), len(evs)))
-        for v, ev in zip(vs, evs):
-          self.assertEqual(v, ev)
-        continue
-
-      # This can be a little confusing. For testing we are using
-      # RepeatedPrimitiveValue in two ways: it's the proto that we
-      # decode for testing, and it's used in the expected value as a
-      # union type. The two cases are slightly different: this is the
-      # second case.
-      # We may be fetching the uint64_value from the test proto, but
-      # in the expected proto we store it in the int64_value field
-      # because TensorFlow doesn't support unsigned int64.
-      tf_type_to_primitive_value_field = {
-          dtypes.float32:
-              'float_value',
-          dtypes.float64:
-              'double_value',
-          dtypes.int32:
-              'int32_value',
-          dtypes.uint8:
-              'uint8_value',
-          dtypes.int8:
-              'int8_value',
-          dtypes.string:
-              'string_value',
-          dtypes.int64:
-              'int64_value',
-          dtypes.bool:
-              'bool_value',
-          # Unhandled TensorFlow types:
-          # DT_INT16 DT_COMPLEX64 DT_QINT8 DT_QUINT8 DT_QINT32
-          # DT_BFLOAT16 DT_QINT16 DT_QUINT16 DT_UINT16
-      }
-      tf_field_name = tf_type_to_primitive_value_field.get(field.dtype)
-      if tf_field_name is None:
-        self.fail('Unhandled tensorflow type %d' % field.dtype)
-
-      self._compareValues(fd, values.flat,
-                          getattr(field.expected, tf_field_name))
-
-  def _runDecodeProtoTests(self, fields, case_sizes, batch_shape, batch,
-                           message_type, message_format, sanitize,
-                           force_disordered=False):
-    """Run decode tests on a batch of messages.
-
-    Args:
-      fields: list of test_example_pb2.FieldSpec (types and expected values)
-      case_sizes: expected sizes array
-      batch_shape: the shape of the input tensor of serialized messages
-      batch: list of serialized messages
-      message_type: descriptor name for messages
-      message_format: format of messages, 'text' or 'binary'
-      sanitize: whether to sanitize binary protobuf inputs
-      force_disordered: whether to force fields encoded out of order.
-    """
-
-    if force_disordered:
-      # Exercise code path that handles out-of-order fields by prepending extra
-      # fields with tag numbers higher than any real field. Note that this won't
-      # work with sanitization because that forces reserialization using a
-      # trusted decoder and encoder.
-      assert not sanitize
-      extra_fields = test_example_pb2.ExtraFields()
-      extra_fields.string_value = 'IGNORE ME'
-      extra_fields.bool_value = False
-      extra_msg = extra_fields.SerializeToString()
-      batch = [extra_msg + msg for msg in batch]
-
-    # Numpy silently truncates the strings if you don't specify dtype=object.
-    batch = np.array(batch, dtype=object)
-    batch = np.reshape(batch, batch_shape)
-
-    field_names = [f.name for f in fields]
-    output_types = [f.dtype for f in fields]
-
-    with self.test_session() as sess:
-      sizes, vtensor = decode_proto_op.decode_proto(
-          batch,
-          message_type=message_type,
-          field_names=field_names,
-          output_types=output_types,
-          message_format=message_format,
-          sanitize=sanitize)
-
-      vlist = sess.run([sizes] + vtensor)
-      sizes = vlist[0]
-      # Values is a list of tensors, one for each field.
-      value_tensors = vlist[1:]
-
-      # Check that the repeat sizes are correct.
-      self.assertTrue(
-          np.all(np.array(sizes.shape) == batch_shape + [len(field_names)]))
-
-      # Check that the decoded sizes match the expected sizes.
-      self.assertEqual(len(sizes.flat), len(case_sizes))
-      self.assertTrue(
-          np.all(sizes.flat == np.array(
-              case_sizes, dtype=np.int32)))
-
-      field_dict = dict(zip(field_names, value_tensors))
-
-      self._compareRepeatedPrimitiveValue(batch_shape, sizes, fields,
-                                          field_dict)
-
-  @parameterized.named_parameters(*test_base.ProtoOpTestBase.named_parameters())
-  def testBinary(self, case):
-    batch = [primitive.SerializeToString() for primitive in case.primitive]
-    self._runDecodeProtoTests(
-        case.field,
-        case.sizes,
-        list(case.shape),
-        batch,
-        'tensorflow.contrib.proto.RepeatedPrimitiveValue',
-        'binary',
-        sanitize=False)
-
-  @parameterized.named_parameters(*test_base.ProtoOpTestBase.named_parameters())
-  def testBinaryDisordered(self, case):
-    batch = [primitive.SerializeToString() for primitive in case.primitive]
-    self._runDecodeProtoTests(
-        case.field,
-        case.sizes,
-        list(case.shape),
-        batch,
-        'tensorflow.contrib.proto.RepeatedPrimitiveValue',
-        'binary',
-        sanitize=False,
-        force_disordered=True)
-
-  @parameterized.named_parameters(*test_base.ProtoOpTestBase.named_parameters())
-  def testPacked(self, case):
-    # Now try with the packed serialization.
-    # We test the packed representations by loading the same test cases
-    # using PackedPrimitiveValue instead of RepeatedPrimitiveValue.
-    # To do this we rely on the text format being the same for packed and
-    # unpacked fields, and reparse the test message using the packed version
-    # of the proto.
-    packed_batch = [
-        # Note: float_format='.17g' is necessary to ensure preservation of
-        # doubles and floats in text format.
-        text_format.Parse(
-            text_format.MessageToString(
-                primitive, float_format='.17g'),
-            test_example_pb2.PackedPrimitiveValue()).SerializeToString()
-        for primitive in case.primitive
-    ]
-
-    self._runDecodeProtoTests(
-        case.field,
-        case.sizes,
-        list(case.shape),
-        packed_batch,
-        'tensorflow.contrib.proto.PackedPrimitiveValue',
-        'binary',
-        sanitize=False)
-
-  @parameterized.named_parameters(*test_base.ProtoOpTestBase.named_parameters())
-  def testText(self, case):
-    # Note: float_format='.17g' is necessary to ensure preservation of
-    # doubles and floats in text format.
-    text_batch = [
-        text_format.MessageToString(
-            primitive, float_format='.17g') for primitive in case.primitive
-    ]
-
-    self._runDecodeProtoTests(
-        case.field,
-        case.sizes,
-        list(case.shape),
-        text_batch,
-        'tensorflow.contrib.proto.RepeatedPrimitiveValue',
-        'text',
-        sanitize=False)
-
-  @parameterized.named_parameters(*test_base.ProtoOpTestBase.named_parameters())
-  def testSanitizerGood(self, case):
-    batch = [primitive.SerializeToString() for primitive in case.primitive]
-    self._runDecodeProtoTests(
-        case.field,
-        case.sizes,
-        list(case.shape),
-        batch,
-        'tensorflow.contrib.proto.RepeatedPrimitiveValue',
-        'binary',
-        sanitize=True)
+  def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
+    super(DecodeProtoOpTest, self).__init__(decode_proto_op, methodName)
 
 
 if __name__ == '__main__':
diff --git a/tensorflow/contrib/proto/python/kernel_tests/decode_proto_op_test_base.py b/tensorflow/contrib/proto/python/kernel_tests/decode_proto_op_test_base.py
new file mode 100644
index 0000000..5f7f510
--- /dev/null
+++ b/tensorflow/contrib/proto/python/kernel_tests/decode_proto_op_test_base.py
@@ -0,0 +1,310 @@
+# =============================================================================
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# =============================================================================
+"""Tests for decode_proto op."""
+
+# Python3 preparedness imports.
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from absl.testing import parameterized
+import numpy as np
+
+
+from google.protobuf import text_format
+
+from tensorflow.contrib.proto.python.kernel_tests import proto_op_test_base as test_base
+from tensorflow.contrib.proto.python.kernel_tests import test_example_pb2
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import errors
+
+
+class DecodeProtoOpTestBase(test_base.ProtoOpTestBase, parameterized.TestCase):
+  """Base class for testing proto decoding ops."""
+
+  def __init__(self, decode_module, methodName='runTest'):  # pylint: disable=invalid-name
+    """DecodeProtoOpTestBase initializer.
+
+    Args:
+      decode_module: a module containing the `decode_proto_op` method
+      methodName: the name of the test method (same as for test.TestCase)
+    """
+
+    super(DecodeProtoOpTestBase, self).__init__(methodName)
+    self._decode_module = decode_module
+
+  def _compareValues(self, fd, vs, evs):
+    """Compare lists/arrays of field values."""
+
+    if len(vs) != len(evs):
+      self.fail('Field %s decoded %d outputs, expected %d' %
+                (fd.name, len(vs), len(evs)))
+    for i, ev in enumerate(evs):
+      # Special case fuzzy match for float32. TensorFlow seems to mess with
+      # MAX_FLT slightly and the test doesn't work otherwise.
+      # TODO(nix): ask on TF list about why MAX_FLT doesn't pass through.
+      if fd.cpp_type == fd.CPPTYPE_FLOAT:
+        # Numpy isclose() is better than assertIsClose() which uses an absolute
+        # value comparison.
+        self.assertTrue(
+            np.isclose(vs[i], ev), 'expected %r, actual %r' % (ev, vs[i]))
+      elif fd.cpp_type == fd.CPPTYPE_STRING:
+        # In Python3 string tensor values will be represented as bytes, so we
+        # reencode the proto values to match that.
+        self.assertEqual(vs[i], ev.encode('ascii'))
+      else:
+        # Doubles and other types pass through unscathed.
+        self.assertEqual(vs[i], ev)
+
+  def _compareProtos(self, batch_shape, sizes, fields, field_dict):
+    """Compare protos of type TestValue.
+
+    Args:
+      batch_shape: the shape of the input tensor of serialized messages.
+      sizes: int matrix of repeat counts returned by decode_proto
+      fields: list of test_example_pb2.FieldSpec (types and expected values)
+      field_dict: map from field names to decoded numpy tensors of values
+    """
+
+    # Check that expected values match.
+    for field in fields:
+      values = field_dict[field.name]
+      self.assertEqual(dtypes.as_dtype(values.dtype), field.dtype)
+
+      fd = field.value.DESCRIPTOR.fields_by_name[field.name]
+
+      # Values has the same shape as the input plus an extra
+      # dimension for repeats.
+      self.assertEqual(list(values.shape)[:-1], batch_shape)
+
+      # Nested messages are represented as TF strings, requiring
+      # some special handling.
+      if field.name == 'message_value':
+        vs = []
+        for buf in values.flat:
+          msg = test_example_pb2.PrimitiveValue()
+          msg.ParseFromString(buf)
+          vs.append(msg)
+        evs = getattr(field.value, field.name)
+        if len(vs) != len(evs):
+          self.fail('Field %s decoded %d outputs, expected %d' %
+                    (fd.name, len(vs), len(evs)))
+        for v, ev in zip(vs, evs):
+          self.assertEqual(v, ev)
+        continue
+
+      # This can be a little confusing. For testing we are using TestValue in
+      # two ways: it's the proto that we decode for testing, and it's used in
+      # the expected value as a union type.
+      #
+      # The two cases are slightly different: this is the second case. We may be
+      # fetching the uint64_value from the test proto, but in the expected proto
+      # we store it in the int64_value field because TensorFlow doesn't support
+      # unsigned int64.
+      tf_type_to_primitive_value_field = {
+          dtypes.float32:
+              'float_value',
+          dtypes.float64:
+              'double_value',
+          dtypes.int32:
+              'int32_value',
+          dtypes.uint8:
+              'uint8_value',
+          dtypes.int8:
+              'int8_value',
+          dtypes.string:
+              'string_value',
+          dtypes.int64:
+              'int64_value',
+          dtypes.bool:
+              'bool_value',
+          # Unhandled TensorFlow types:
+          # DT_INT16 DT_COMPLEX64 DT_QINT8 DT_QUINT8 DT_QINT32
+          # DT_BFLOAT16 DT_QINT16 DT_QUINT16 DT_UINT16
+      }
+      tf_field_name = tf_type_to_primitive_value_field.get(field.dtype)
+      if tf_field_name is None:
+        self.fail('Unhandled tensorflow type %d' % field.dtype)
+
+      self._compareValues(fd, values.flat,
+                          getattr(field.value, tf_field_name))
+
+  def _runDecodeProtoTests(self, fields, case_sizes, batch_shape, batch,
+                           message_type, message_format, sanitize,
+                           force_disordered=False):
+    """Run decode tests on a batch of messages.
+
+    Args:
+      fields: list of test_example_pb2.FieldSpec (types and expected values)
+      case_sizes: expected sizes array
+      batch_shape: the shape of the input tensor of serialized messages
+      batch: list of serialized messages
+      message_type: descriptor name for messages
+      message_format: format of messages, 'text' or 'binary'
+      sanitize: whether to sanitize binary protobuf inputs
+      force_disordered: whether to force fields encoded out of order.
+    """
+
+    if force_disordered:
+      # Exercise code path that handles out-of-order fields by prepending extra
+      # fields with tag numbers higher than any real field. Note that this won't
+      # work with sanitization because that forces reserialization using a
+      # trusted decoder and encoder.
+      assert not sanitize
+      extra_fields = test_example_pb2.ExtraFields()
+      extra_fields.string_value = 'IGNORE ME'
+      extra_fields.bool_value = False
+      extra_msg = extra_fields.SerializeToString()
+      batch = [extra_msg + msg for msg in batch]
+
+    # Numpy silently truncates the strings if you don't specify dtype=object.
+    batch = np.array(batch, dtype=object)
+    batch = np.reshape(batch, batch_shape)
+
+    field_names = [f.name for f in fields]
+    output_types = [f.dtype for f in fields]
+
+    with self.test_session() as sess:
+      sizes, vtensor = self._decode_module.decode_proto(
+          batch,
+          message_type=message_type,
+          field_names=field_names,
+          output_types=output_types,
+          message_format=message_format,
+          sanitize=sanitize)
+
+      vlist = sess.run([sizes] + vtensor)
+      sizes = vlist[0]
+      # Values is a list of tensors, one for each field.
+      value_tensors = vlist[1:]
+
+      # Check that the repeat sizes are correct.
+      self.assertTrue(
+          np.all(np.array(sizes.shape) == batch_shape + [len(field_names)]))
+
+      # Check that the decoded sizes match the expected sizes.
+      self.assertEqual(len(sizes.flat), len(case_sizes))
+      self.assertTrue(
+          np.all(sizes.flat == np.array(
+              case_sizes, dtype=np.int32)))
+
+      field_dict = dict(zip(field_names, value_tensors))
+
+      self._compareProtos(batch_shape, sizes, fields, field_dict)
+
+  @parameterized.named_parameters(*test_base.ProtoOpTestBase.named_parameters())
+  def testBinary(self, case):
+    batch = [value.SerializeToString() for value in case.values]
+    self._runDecodeProtoTests(
+        case.fields,
+        case.sizes,
+        list(case.shapes),
+        batch,
+        'tensorflow.contrib.proto.TestValue',
+        'binary',
+        sanitize=False)
+
+  @parameterized.named_parameters(*test_base.ProtoOpTestBase.named_parameters())
+  def testBinaryDisordered(self, case):
+    batch = [value.SerializeToString() for value in case.values]
+    self._runDecodeProtoTests(
+        case.fields,
+        case.sizes,
+        list(case.shapes),
+        batch,
+        'tensorflow.contrib.proto.TestValue',
+        'binary',
+        sanitize=False,
+        force_disordered=True)
+
+  @parameterized.named_parameters(*test_base.ProtoOpTestBase.named_parameters())
+  def testPacked(self, case):
+    # Now try with the packed serialization.
+    #
+    # We test the packed representations by loading the same test case using
+    # PackedTestValue instead of TestValue. To do this we rely on the text
+    # format being the same for packed and unpacked fields, and reparse the
+    # test message using the packed version of the proto.
+    packed_batch = [
+        # Note: float_format='.17g' is necessary to ensure preservation of
+        # doubles and floats in text format.
+        text_format.Parse(
+            text_format.MessageToString(
+                value, float_format='.17g'),
+            test_example_pb2.PackedTestValue()).SerializeToString()
+        for value in case.values
+    ]
+
+    self._runDecodeProtoTests(
+        case.fields,
+        case.sizes,
+        list(case.shapes),
+        packed_batch,
+        'tensorflow.contrib.proto.PackedTestValue',
+        'binary',
+        sanitize=False)
+
+  @parameterized.named_parameters(*test_base.ProtoOpTestBase.named_parameters())
+  def testText(self, case):
+    # Note: float_format='.17g' is necessary to ensure preservation of
+    # doubles and floats in text format.
+    text_batch = [
+        text_format.MessageToString(
+            value, float_format='.17g') for value in case.values
+    ]
+
+    self._runDecodeProtoTests(
+        case.fields,
+        case.sizes,
+        list(case.shapes),
+        text_batch,
+        'tensorflow.contrib.proto.TestValue',
+        'text',
+        sanitize=False)
+
+  @parameterized.named_parameters(*test_base.ProtoOpTestBase.named_parameters())
+  def testSanitizerGood(self, case):
+    batch = [value.SerializeToString() for value in case.values]
+    self._runDecodeProtoTests(
+        case.fields,
+        case.sizes,
+        list(case.shapes),
+        batch,
+        'tensorflow.contrib.proto.TestValue',
+        'binary',
+        sanitize=True)
+
+  @parameterized.parameters((False), (True))
+  def testCorruptProtobuf(self, sanitize):
+    corrupt_proto = 'This is not a binary protobuf'
+
+    # Numpy silently truncates the strings if you don't specify dtype=object.
+    batch = np.array(corrupt_proto, dtype=object)
+    msg_type = 'tensorflow.contrib.proto.TestCase'
+    field_names = ['sizes']
+    field_types = [dtypes.int32]
+
+    with self.test_session() as sess:
+      ctensor, vtensor = self._decode_module.decode_proto(
+          batch,
+          message_type=msg_type,
+          field_names=field_names,
+          output_types=field_types,
+          sanitize=sanitize)
+      with self.assertRaisesRegexp(errors.DataLossError,
+                                   'Unable to parse binary protobuf'
+                                   '|Failed to consume entire buffer'):
+        _ = sess.run([ctensor] + vtensor)
diff --git a/tensorflow/contrib/proto/python/kernel_tests/encode_proto_op_test.py b/tensorflow/contrib/proto/python/kernel_tests/encode_proto_op_test.py
index fb33660..fc5cd25 100644
--- a/tensorflow/contrib/proto/python/kernel_tests/encode_proto_op_test.py
+++ b/tensorflow/contrib/proto/python/kernel_tests/encode_proto_op_test.py
@@ -13,164 +13,24 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 # =============================================================================
-"""Table-driven test for encode_proto op.
+"""Tests for encode_proto op."""
 
-This test is run once with each of the *.TestCase.pbtxt files
-in the test directory.
-
-It tests that encode_proto is a lossless inverse of decode_proto
-(for the specified fields).
-"""
 # Python3 readiness boilerplate
 from __future__ import absolute_import
 from __future__ import division
 from __future__ import print_function
 
-from absl.testing import parameterized
-import numpy as np
-
-from google.protobuf import text_format
-
-from tensorflow.contrib.proto.python.kernel_tests import test_base
-from tensorflow.contrib.proto.python.kernel_tests import test_example_pb2
+from tensorflow.contrib.proto.python.kernel_tests import encode_proto_op_test_base as test_base
 from tensorflow.contrib.proto.python.ops import decode_proto_op
 from tensorflow.contrib.proto.python.ops import encode_proto_op
-from tensorflow.python.framework import dtypes
-from tensorflow.python.ops import array_ops
-from tensorflow.python.platform import flags
 from tensorflow.python.platform import test
 
-FLAGS = flags.FLAGS
 
-flags.DEFINE_string('message_text_file', None,
-                    'A file containing a text serialized TestCase protobuf.')
+class EncodeProtoOpTest(test_base.EncodeProtoOpTestBase):
 
-
-class EncodeProtoOpTest(test_base.ProtoOpTestBase, parameterized.TestCase):
-
-  def testBadInputs(self):
-    # Invalid field name
-    with self.test_session():
-      with self.assertRaisesOpError('Unknown field: non_existent_field'):
-        encode_proto_op.encode_proto(
-            sizes=[[1]],
-            values=[np.array([[0.0]], dtype=np.int32)],
-            message_type='tensorflow.contrib.proto.RepeatedPrimitiveValue',
-            field_names=['non_existent_field']).eval()
-
-    # Incorrect types.
-    with self.test_session():
-      with self.assertRaisesOpError(
-          'Incompatible type for field double_value.'):
-        encode_proto_op.encode_proto(
-            sizes=[[1]],
-            values=[np.array([[0.0]], dtype=np.int32)],
-            message_type='tensorflow.contrib.proto.RepeatedPrimitiveValue',
-            field_names=['double_value']).eval()
-
-    # Incorrect shapes of sizes.
-    with self.test_session():
-      with self.assertRaisesOpError(
-          r'sizes should be batch_size \+ \[len\(field_names\)\]'):
-        sizes = array_ops.placeholder(dtypes.int32)
-        values = array_ops.placeholder(dtypes.float64)
-        encode_proto_op.encode_proto(
-            sizes=sizes,
-            values=[values],
-            message_type='tensorflow.contrib.proto.RepeatedPrimitiveValue',
-            field_names=['double_value']).eval(feed_dict={
-                sizes: [[[0, 0]]],
-                values: [[0.0]]
-            })
-
-    # Inconsistent shapes of values.
-    with self.test_session():
-      with self.assertRaisesOpError(
-          'Values must match up to the last dimension'):
-        sizes = array_ops.placeholder(dtypes.int32)
-        values1 = array_ops.placeholder(dtypes.float64)
-        values2 = array_ops.placeholder(dtypes.int32)
-        (encode_proto_op.encode_proto(
-            sizes=[[1, 1]],
-            values=[values1, values2],
-            message_type='tensorflow.contrib.proto.RepeatedPrimitiveValue',
-            field_names=['double_value', 'int32_value']).eval(feed_dict={
-                values1: [[0.0]],
-                values2: [[0], [0]]
-            }))
-
-  def _testRoundtrip(self, in_bufs, message_type, fields):
-
-    field_names = [f.name for f in fields]
-    out_types = [f.dtype for f in fields]
-
-    with self.test_session() as sess:
-      sizes, field_tensors = decode_proto_op.decode_proto(
-          in_bufs,
-          message_type=message_type,
-          field_names=field_names,
-          output_types=out_types)
-
-      out_tensors = encode_proto_op.encode_proto(
-          sizes,
-          field_tensors,
-          message_type=message_type,
-          field_names=field_names)
-
-      out_bufs, = sess.run([out_tensors])
-
-      # Check that the re-encoded tensor has the same shape.
-      self.assertEqual(in_bufs.shape, out_bufs.shape)
-
-      # Compare the input and output.
-      for in_buf, out_buf in zip(in_bufs.flat, out_bufs.flat):
-        in_obj = test_example_pb2.RepeatedPrimitiveValue()
-        in_obj.ParseFromString(in_buf)
-
-        out_obj = test_example_pb2.RepeatedPrimitiveValue()
-        out_obj.ParseFromString(out_buf)
-
-        # Check that the deserialized objects are identical.
-        self.assertEqual(in_obj, out_obj)
-
-        # Check that the input and output serialized messages are identical.
-        # If we fail here, there is a difference in the serialized
-        # representation but the new serialization still parses. This could
-        # be harmless (a change in map ordering?) or it could be bad (e.g.
-        # loss of packing in the encoding).
-        self.assertEqual(in_buf, out_buf)
-
-  @parameterized.named_parameters(*test_base.ProtoOpTestBase.named_parameters())
-  def testRoundtrip(self, case):
-    in_bufs = [primitive.SerializeToString() for primitive in case.primitive]
-
-    # np.array silently truncates strings if you don't specify dtype=object.
-    in_bufs = np.reshape(np.array(in_bufs, dtype=object), list(case.shape))
-    return self._testRoundtrip(
-        in_bufs, 'tensorflow.contrib.proto.RepeatedPrimitiveValue', case.field)
-
-  @parameterized.named_parameters(*test_base.ProtoOpTestBase.named_parameters())
-  def testRoundtripPacked(self, case):
-    # Now try with the packed serialization.
-    # We test the packed representations by loading the same test cases
-    # using PackedPrimitiveValue instead of RepeatedPrimitiveValue.
-    # To do this we rely on the text format being the same for packed and
-    # unpacked fields, and reparse the test message using the packed version
-    # of the proto.
-    in_bufs = [
-        # Note: float_format='.17g' is necessary to ensure preservation of
-        # doubles and floats in text format.
-        text_format.Parse(
-            text_format.MessageToString(
-                primitive, float_format='.17g'),
-            test_example_pb2.PackedPrimitiveValue()).SerializeToString()
-        for primitive in case.primitive
-    ]
-
-    # np.array silently truncates strings if you don't specify dtype=object.
-    in_bufs = np.reshape(np.array(in_bufs, dtype=object), list(case.shape))
-    return self._testRoundtrip(
-        in_bufs, 'tensorflow.contrib.proto.PackedPrimitiveValue', case.field)
+  def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
+    super(EncodeProtoOpTest, self).__init__(decode_proto_op, encode_proto_op,
+                                            methodName)
 
 
 if __name__ == '__main__':
diff --git a/tensorflow/contrib/proto/python/kernel_tests/encode_proto_op_test_base.py b/tensorflow/contrib/proto/python/kernel_tests/encode_proto_op_test_base.py
new file mode 100644
index 0000000..07dfb92
--- /dev/null
+++ b/tensorflow/contrib/proto/python/kernel_tests/encode_proto_op_test_base.py
@@ -0,0 +1,177 @@
+# =============================================================================
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# =============================================================================
+"""Table-driven test for encode_proto op.
+
+This test is run once with each of the *.TestCase.pbtxt files
+in the test directory.
+
+It tests that encode_proto is a lossless inverse of decode_proto
+(for the specified fields).
+"""
+# Python3 readiness boilerplate
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from absl.testing import parameterized
+import numpy as np
+
+from google.protobuf import text_format
+
+from tensorflow.contrib.proto.python.kernel_tests import proto_op_test_base as test_base
+from tensorflow.contrib.proto.python.kernel_tests import test_example_pb2
+from tensorflow.python.framework import dtypes
+from tensorflow.python.ops import array_ops
+
+
+class EncodeProtoOpTestBase(test_base.ProtoOpTestBase, parameterized.TestCase):
+  """Base class for testing proto encoding ops."""
+
+  def __init__(self, decode_module, encode_module, methodName='runTest'):  # pylint: disable=invalid-name
+    """EncodeProtoOpTestBase initializer.
+
+    Args:
+      decode_module: a module containing the `decode_proto_op` method
+      encode_module: a module containing  the `encode_proto_op` method
+      methodName: the name of the test method (same as for test.TestCase)
+    """
+
+    super(EncodeProtoOpTestBase, self).__init__(methodName)
+    self._decode_module = decode_module
+    self._encode_module = encode_module
+
+  def testBadInputs(self):
+    # Invalid field name
+    with self.test_session():
+      with self.assertRaisesOpError('Unknown field: non_existent_field'):
+        self._encode_module.encode_proto(
+            sizes=[[1]],
+            values=[np.array([[0.0]], dtype=np.int32)],
+            message_type='tensorflow.contrib.proto.TestValue',
+            field_names=['non_existent_field']).eval()
+
+    # Incorrect types.
+    with self.test_session():
+      with self.assertRaisesOpError(
+          'Incompatible type for field double_value.'):
+        self._encode_module.encode_proto(
+            sizes=[[1]],
+            values=[np.array([[0.0]], dtype=np.int32)],
+            message_type='tensorflow.contrib.proto.TestValue',
+            field_names=['double_value']).eval()
+
+    # Incorrect shapes of sizes.
+    with self.test_session():
+      with self.assertRaisesOpError(
+          r'sizes should be batch_size \+ \[len\(field_names\)\]'):
+        sizes = array_ops.placeholder(dtypes.int32)
+        values = array_ops.placeholder(dtypes.float64)
+        self._encode_module.encode_proto(
+            sizes=sizes,
+            values=[values],
+            message_type='tensorflow.contrib.proto.TestValue',
+            field_names=['double_value']).eval(feed_dict={
+                sizes: [[[0, 0]]],
+                values: [[0.0]]
+            })
+
+    # Inconsistent shapes of values.
+    with self.test_session():
+      with self.assertRaisesOpError(
+          'Values must match up to the last dimension'):
+        sizes = array_ops.placeholder(dtypes.int32)
+        values1 = array_ops.placeholder(dtypes.float64)
+        values2 = array_ops.placeholder(dtypes.int32)
+        (self._encode_module.encode_proto(
+            sizes=[[1, 1]],
+            values=[values1, values2],
+            message_type='tensorflow.contrib.proto.TestValue',
+            field_names=['double_value', 'int32_value']).eval(feed_dict={
+                values1: [[0.0]],
+                values2: [[0], [0]]
+            }))
+
+  def _testRoundtrip(self, in_bufs, message_type, fields):
+
+    field_names = [f.name for f in fields]
+    out_types = [f.dtype for f in fields]
+
+    with self.test_session() as sess:
+      sizes, field_tensors = self._decode_module.decode_proto(
+          in_bufs,
+          message_type=message_type,
+          field_names=field_names,
+          output_types=out_types)
+
+      out_tensors = self._encode_module.encode_proto(
+          sizes,
+          field_tensors,
+          message_type=message_type,
+          field_names=field_names)
+
+      out_bufs, = sess.run([out_tensors])
+
+      # Check that the re-encoded tensor has the same shape.
+      self.assertEqual(in_bufs.shape, out_bufs.shape)
+
+      # Compare the input and output.
+      for in_buf, out_buf in zip(in_bufs.flat, out_bufs.flat):
+        in_obj = test_example_pb2.TestValue()
+        in_obj.ParseFromString(in_buf)
+
+        out_obj = test_example_pb2.TestValue()
+        out_obj.ParseFromString(out_buf)
+
+        # Check that the deserialized objects are identical.
+        self.assertEqual(in_obj, out_obj)
+
+        # Check that the input and output serialized messages are identical.
+        # If we fail here, there is a difference in the serialized
+        # representation but the new serialization still parses. This could
+        # be harmless (a change in map ordering?) or it could be bad (e.g.
+        # loss of packing in the encoding).
+        self.assertEqual(in_buf, out_buf)
+
+  @parameterized.named_parameters(*test_base.ProtoOpTestBase.named_parameters())
+  def testRoundtrip(self, case):
+    in_bufs = [value.SerializeToString() for value in case.values]
+
+    # np.array silently truncates strings if you don't specify dtype=object.
+    in_bufs = np.reshape(np.array(in_bufs, dtype=object), list(case.shapes))
+    return self._testRoundtrip(
+        in_bufs, 'tensorflow.contrib.proto.TestValue', case.fields)
+
+  @parameterized.named_parameters(*test_base.ProtoOpTestBase.named_parameters())
+  def testRoundtripPacked(self, case):
+    # Now try with the packed serialization.
+    # We test the packed representations by loading the same test cases using
+    # PackedTestValue instead of TestValue. To do this we rely on the text
+    # format being the same for packed and unpacked fields, and reparse the test
+    # message using the packed version of the proto.
+    in_bufs = [
+        # Note: float_format='.17g' is necessary to ensure preservation of
+        # doubles and floats in text format.
+        text_format.Parse(
+            text_format.MessageToString(
+                value, float_format='.17g'),
+            test_example_pb2.PackedTestValue()).SerializeToString()
+        for value in case.values
+    ]
+
+    # np.array silently truncates strings if you don't specify dtype=object.
+    in_bufs = np.reshape(np.array(in_bufs, dtype=object), list(case.shapes))
+    return self._testRoundtrip(
+        in_bufs, 'tensorflow.contrib.proto.PackedTestValue', case.fields)
diff --git a/tensorflow/contrib/proto/python/kernel_tests/proto_op_test_base.py b/tensorflow/contrib/proto/python/kernel_tests/proto_op_test_base.py
new file mode 100644
index 0000000..cbc7b3d
--- /dev/null
+++ b/tensorflow/contrib/proto/python/kernel_tests/proto_op_test_base.py
@@ -0,0 +1,407 @@
+# =============================================================================
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# =============================================================================
+"""Test case base for testing proto operations."""
+
+# Python3 preparedness imports.
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import ctypes as ct
+import os
+
+from tensorflow.contrib.proto.python.kernel_tests import test_example_pb2
+from tensorflow.core.framework import types_pb2
+from tensorflow.python.platform import test
+
+
+class ProtoOpTestBase(test.TestCase):
+  """Base class for testing proto decoding and encoding ops."""
+
+  def __init__(self, methodName="runTest"):  # pylint: disable=invalid-name
+    super(ProtoOpTestBase, self).__init__(methodName)
+    lib = os.path.join(os.path.dirname(__file__), "libtestexample.so")
+    if os.path.isfile(lib):
+      ct.cdll.LoadLibrary(lib)
+
+  @staticmethod
+  def named_parameters():
+    return (
+        ("defaults", ProtoOpTestBase.defaults_test_case()),
+        ("minmax", ProtoOpTestBase.minmax_test_case()),
+        ("nested", ProtoOpTestBase.nested_test_case()),
+        ("optional", ProtoOpTestBase.optional_test_case()),
+        ("promote_unsigned", ProtoOpTestBase.promote_unsigned_test_case()),
+        ("ragged", ProtoOpTestBase.ragged_test_case()),
+        ("shaped_batch", ProtoOpTestBase.shaped_batch_test_case()),
+        ("simple", ProtoOpTestBase.simple_test_case()),
+    )
+
+  @staticmethod
+  def defaults_test_case():
+    test_case = test_example_pb2.TestCase()
+    test_case.values.add()  # No fields specified, so we get all defaults.
+    test_case.shapes.append(1)
+    test_case.sizes.append(0)
+    field = test_case.fields.add()
+    field.name = "double_value_with_default"
+    field.dtype = types_pb2.DT_DOUBLE
+    field.value.double_value.append(1.0)
+    test_case.sizes.append(0)
+    field = test_case.fields.add()
+    field.name = "float_value_with_default"
+    field.dtype = types_pb2.DT_FLOAT
+    field.value.float_value.append(2.0)
+    test_case.sizes.append(0)
+    field = test_case.fields.add()
+    field.name = "int64_value_with_default"
+    field.dtype = types_pb2.DT_INT64
+    field.value.int64_value.append(3)
+    test_case.sizes.append(0)
+    field = test_case.fields.add()
+    field.name = "sfixed64_value_with_default"
+    field.dtype = types_pb2.DT_INT64
+    field.value.int64_value.append(11)
+    test_case.sizes.append(0)
+    field = test_case.fields.add()
+    field.name = "sint64_value_with_default"
+    field.dtype = types_pb2.DT_INT64
+    field.value.int64_value.append(13)
+    test_case.sizes.append(0)
+    field = test_case.fields.add()
+    field.name = "uint64_value_with_default"
+    field.dtype = types_pb2.DT_INT64
+    field.value.int64_value.append(4)
+    test_case.sizes.append(0)
+    field = test_case.fields.add()
+    field.name = "fixed64_value_with_default"
+    field.dtype = types_pb2.DT_INT64
+    field.value.int64_value.append(6)
+    test_case.sizes.append(0)
+    field = test_case.fields.add()
+    field.name = "int32_value_with_default"
+    field.dtype = types_pb2.DT_INT32
+    field.value.int32_value.append(5)
+    test_case.sizes.append(0)
+    field = test_case.fields.add()
+    field.name = "sfixed32_value_with_default"
+    field.dtype = types_pb2.DT_INT32
+    field.value.int32_value.append(10)
+    test_case.sizes.append(0)
+    field = test_case.fields.add()
+    field.name = "sint32_value_with_default"
+    field.dtype = types_pb2.DT_INT32
+    field.value.int32_value.append(12)
+    test_case.sizes.append(0)
+    field = test_case.fields.add()
+    field.name = "uint32_value_with_default"
+    field.dtype = types_pb2.DT_INT32
+    field.value.int32_value.append(9)
+    test_case.sizes.append(0)
+    field = test_case.fields.add()
+    field.name = "fixed32_value_with_default"
+    field.dtype = types_pb2.DT_INT32
+    field.value.int32_value.append(7)
+    test_case.sizes.append(0)
+    field = test_case.fields.add()
+    field.name = "bool_value_with_default"
+    field.dtype = types_pb2.DT_BOOL
+    field.value.bool_value.append(True)
+    test_case.sizes.append(0)
+    field = test_case.fields.add()
+    field.name = "string_value_with_default"
+    field.dtype = types_pb2.DT_STRING
+    field.value.string_value.append("a")
+    test_case.sizes.append(0)
+    field = test_case.fields.add()
+    field.name = "bytes_value_with_default"
+    field.dtype = types_pb2.DT_STRING
+    field.value.string_value.append("a longer default string")
+    return test_case
+
+  @staticmethod
+  def minmax_test_case():
+    test_case = test_example_pb2.TestCase()
+    value = test_case.values.add()
+    value.double_value.append(-1.7976931348623158e+308)
+    value.double_value.append(2.2250738585072014e-308)
+    value.double_value.append(1.7976931348623158e+308)
+    value.float_value.append(-3.402823466e+38)
+    value.float_value.append(1.175494351e-38)
+    value.float_value.append(3.402823466e+38)
+    value.int64_value.append(-9223372036854775808)
+    value.int64_value.append(9223372036854775807)
+    value.sfixed64_value.append(-9223372036854775808)
+    value.sfixed64_value.append(9223372036854775807)
+    value.sint64_value.append(-9223372036854775808)
+    value.sint64_value.append(9223372036854775807)
+    value.uint64_value.append(0)
+    value.uint64_value.append(18446744073709551615)
+    value.fixed64_value.append(0)
+    value.fixed64_value.append(18446744073709551615)
+    value.int32_value.append(-2147483648)
+    value.int32_value.append(2147483647)
+    value.sfixed32_value.append(-2147483648)
+    value.sfixed32_value.append(2147483647)
+    value.sint32_value.append(-2147483648)
+    value.sint32_value.append(2147483647)
+    value.uint32_value.append(0)
+    value.uint32_value.append(4294967295)
+    value.fixed32_value.append(0)
+    value.fixed32_value.append(4294967295)
+    value.bool_value.append(False)
+    value.bool_value.append(True)
+    value.string_value.append("")
+    value.string_value.append("I refer to the infinite.")
+    test_case.shapes.append(1)
+    test_case.sizes.append(3)
+    field = test_case.fields.add()
+    field.name = "double_value"
+    field.dtype = types_pb2.DT_DOUBLE
+    field.value.double_value.append(-1.7976931348623158e+308)
+    field.value.double_value.append(2.2250738585072014e-308)
+    field.value.double_value.append(1.7976931348623158e+308)
+    test_case.sizes.append(3)
+    field = test_case.fields.add()
+    field.name = "float_value"
+    field.dtype = types_pb2.DT_FLOAT
+    field.value.float_value.append(-3.402823466e+38)
+    field.value.float_value.append(1.175494351e-38)
+    field.value.float_value.append(3.402823466e+38)
+    test_case.sizes.append(2)
+    field = test_case.fields.add()
+    field.name = "int64_value"
+    field.dtype = types_pb2.DT_INT64
+    field.value.int64_value.append(-9223372036854775808)
+    field.value.int64_value.append(9223372036854775807)
+    test_case.sizes.append(2)
+    field = test_case.fields.add()
+    field.name = "sfixed64_value"
+    field.dtype = types_pb2.DT_INT64
+    field.value.int64_value.append(-9223372036854775808)
+    field.value.int64_value.append(9223372036854775807)
+    test_case.sizes.append(2)
+    field = test_case.fields.add()
+    field.name = "sint64_value"
+    field.dtype = types_pb2.DT_INT64
+    field.value.int64_value.append(-9223372036854775808)
+    field.value.int64_value.append(9223372036854775807)
+    test_case.sizes.append(2)
+    field = test_case.fields.add()
+    field.name = "uint64_value"
+    field.dtype = types_pb2.DT_INT64
+    field.value.int64_value.append(0)
+    field.value.int64_value.append(-1)
+    test_case.sizes.append(2)
+    field = test_case.fields.add()
+    field.name = "fixed64_value"
+    field.dtype = types_pb2.DT_INT64
+    field.value.int64_value.append(0)
+    field.value.int64_value.append(-1)
+    test_case.sizes.append(2)
+    field = test_case.fields.add()
+    field.name = "int32_value"
+    field.dtype = types_pb2.DT_INT32
+    field.value.int32_value.append(-2147483648)
+    field.value.int32_value.append(2147483647)
+    test_case.sizes.append(2)
+    field = test_case.fields.add()
+    field.name = "sfixed32_value"
+    field.dtype = types_pb2.DT_INT32
+    field.value.int32_value.append(-2147483648)
+    field.value.int32_value.append(2147483647)
+    test_case.sizes.append(2)
+    field = test_case.fields.add()
+    field.name = "sint32_value"
+    field.dtype = types_pb2.DT_INT32
+    field.value.int32_value.append(-2147483648)
+    field.value.int32_value.append(2147483647)
+    test_case.sizes.append(2)
+    field = test_case.fields.add()
+    field.name = "uint32_value"
+    field.dtype = types_pb2.DT_INT32
+    field.value.int32_value.append(0)
+    field.value.int32_value.append(-1)
+    test_case.sizes.append(2)
+    field = test_case.fields.add()
+    field.name = "fixed32_value"
+    field.dtype = types_pb2.DT_INT32
+    field.value.int32_value.append(0)
+    field.value.int32_value.append(-1)
+    test_case.sizes.append(2)
+    field = test_case.fields.add()
+    field.name = "bool_value"
+    field.dtype = types_pb2.DT_BOOL
+    field.value.bool_value.append(False)
+    field.value.bool_value.append(True)
+    test_case.sizes.append(2)
+    field = test_case.fields.add()
+    field.name = "string_value"
+    field.dtype = types_pb2.DT_STRING
+    field.value.string_value.append("")
+    field.value.string_value.append("I refer to the infinite.")
+    return test_case
+
+  @staticmethod
+  def nested_test_case():
+    test_case = test_example_pb2.TestCase()
+    value = test_case.values.add()
+    message_value = value.message_value.add()
+    message_value.double_value = 23.5
+    test_case.shapes.append(1)
+    test_case.sizes.append(1)
+    field = test_case.fields.add()
+    field.name = "message_value"
+    field.dtype = types_pb2.DT_STRING
+    message_value = field.value.message_value.add()
+    message_value.double_value = 23.5
+    return test_case
+
+  @staticmethod
+  def optional_test_case():
+    test_case = test_example_pb2.TestCase()
+    value = test_case.values.add()
+    value.bool_value.append(True)
+    test_case.shapes.append(1)
+    test_case.sizes.append(1)
+    field = test_case.fields.add()
+    field.name = "bool_value"
+    field.dtype = types_pb2.DT_BOOL
+    field.value.bool_value.append(True)
+    test_case.sizes.append(0)
+    field = test_case.fields.add()
+    field.name = "double_value"
+    field.dtype = types_pb2.DT_DOUBLE
+    field.value.double_value.append(0.0)
+    return test_case
+
+  @staticmethod
+  def promote_unsigned_test_case():
+    test_case = test_example_pb2.TestCase()
+    value = test_case.values.add()
+    value.fixed32_value.append(4294967295)
+    value.uint32_value.append(4294967295)
+    test_case.shapes.append(1)
+    test_case.sizes.append(1)
+    field = test_case.fields.add()
+    field.name = "fixed32_value"
+    field.dtype = types_pb2.DT_INT64
+    field.value.int64_value.append(4294967295)
+    test_case.sizes.append(1)
+    field = test_case.fields.add()
+    field.name = "uint32_value"
+    field.dtype = types_pb2.DT_INT64
+    field.value.int64_value.append(4294967295)
+    # Comes from an explicitly-specified default
+    test_case.sizes.append(0)
+    field = test_case.fields.add()
+    field.name = "uint32_value_with_default"
+    field.dtype = types_pb2.DT_INT64
+    field.value.int64_value.append(9)
+    return test_case
+
+  @staticmethod
+  def ragged_test_case():
+    test_case = test_example_pb2.TestCase()
+    value = test_case.values.add()
+    value.double_value.append(23.5)
+    value.double_value.append(123.0)
+    value.bool_value.append(True)
+    value = test_case.values.add()
+    value.double_value.append(3.1)
+    value.bool_value.append(False)
+    test_case.shapes.append(2)
+    test_case.sizes.append(2)
+    test_case.sizes.append(1)
+    test_case.sizes.append(1)
+    test_case.sizes.append(1)
+    field = test_case.fields.add()
+    field.name = "double_value"
+    field.dtype = types_pb2.DT_DOUBLE
+    field.value.double_value.append(23.5)
+    field.value.double_value.append(123.0)
+    field.value.double_value.append(3.1)
+    field.value.double_value.append(0.0)
+    field = test_case.fields.add()
+    field.name = "bool_value"
+    field.dtype = types_pb2.DT_BOOL
+    field.value.bool_value.append(True)
+    field.value.bool_value.append(False)
+    return test_case
+
+  @staticmethod
+  def shaped_batch_test_case():
+    test_case = test_example_pb2.TestCase()
+    value = test_case.values.add()
+    value.double_value.append(23.5)
+    value.bool_value.append(True)
+    value = test_case.values.add()
+    value.double_value.append(44.0)
+    value.bool_value.append(False)
+    value = test_case.values.add()
+    value.double_value.append(3.14159)
+    value.bool_value.append(True)
+    value = test_case.values.add()
+    value.double_value.append(1.414)
+    value.bool_value.append(True)
+    value = test_case.values.add()
+    value.double_value.append(-32.2)
+    value.bool_value.append(False)
+    value = test_case.values.add()
+    value.double_value.append(0.0001)
+    value.bool_value.append(True)
+    test_case.shapes.append(3)
+    test_case.shapes.append(2)
+    for _ in range(12):
+      test_case.sizes.append(1)
+    field = test_case.fields.add()
+    field.name = "double_value"
+    field.dtype = types_pb2.DT_DOUBLE
+    field.value.double_value.append(23.5)
+    field.value.double_value.append(44.0)
+    field.value.double_value.append(3.14159)
+    field.value.double_value.append(1.414)
+    field.value.double_value.append(-32.2)
+    field.value.double_value.append(0.0001)
+    field = test_case.fields.add()
+    field.name = "bool_value"
+    field.dtype = types_pb2.DT_BOOL
+    field.value.bool_value.append(True)
+    field.value.bool_value.append(False)
+    field.value.bool_value.append(True)
+    field.value.bool_value.append(True)
+    field.value.bool_value.append(False)
+    field.value.bool_value.append(True)
+    return test_case
+
+  @staticmethod
+  def simple_test_case():
+    test_case = test_example_pb2.TestCase()
+    value = test_case.values.add()
+    value.double_value.append(23.5)
+    value.bool_value.append(True)
+    test_case.shapes.append(1)
+    test_case.sizes.append(1)
+    field = test_case.fields.add()
+    field.name = "double_value"
+    field.dtype = types_pb2.DT_DOUBLE
+    field.value.double_value.append(23.5)
+    test_case.sizes.append(1)
+    field = test_case.fields.add()
+    field.name = "bool_value"
+    field.dtype = types_pb2.DT_BOOL
+    field.value.bool_value.append(True)
+    return test_case
diff --git a/tensorflow/contrib/proto/python/kernel_tests/test_base.py b/tensorflow/contrib/proto/python/kernel_tests/test_base.py
deleted file mode 100644
index 1fc8c16..0000000
--- a/tensorflow/contrib/proto/python/kernel_tests/test_base.py
+++ /dev/null
@@ -1,407 +0,0 @@
-# =============================================================================
-# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# =============================================================================
-"""Test case base for testing proto operations."""
-
-# Python3 preparedness imports.
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-import ctypes as ct
-import os
-
-from tensorflow.contrib.proto.python.kernel_tests import test_example_pb2
-from tensorflow.core.framework import types_pb2
-from tensorflow.python.platform import test
-
-
-class ProtoOpTestBase(test.TestCase):
-  """Base class for testing proto decoding and encoding ops."""
-
-  def __init__(self, methodName="runTest"):  # pylint: disable=invalid-name
-    super(ProtoOpTestBase, self).__init__(methodName)
-    lib = os.path.join(os.path.dirname(__file__), "libtestexample.so")
-    if os.path.isfile(lib):
-      ct.cdll.LoadLibrary(lib)
-
-  @staticmethod
-  def named_parameters():
-    return (
-        ("defaults", ProtoOpTestBase.defaults_test_case()),
-        ("minmax", ProtoOpTestBase.minmax_test_case()),
-        ("nested", ProtoOpTestBase.nested_test_case()),
-        ("optional", ProtoOpTestBase.optional_test_case()),
-        ("promote_unsigned", ProtoOpTestBase.promote_unsigned_test_case()),
-        ("ragged", ProtoOpTestBase.ragged_test_case()),
-        ("shaped_batch", ProtoOpTestBase.shaped_batch_test_case()),
-        ("simple", ProtoOpTestBase.simple_test_case()),
-    )
-
-  @staticmethod
-  def defaults_test_case():
-    test_case = test_example_pb2.TestCase()
-    test_case.primitive.add()  # No fields specified, so we get all defaults.
-    test_case.shape.append(1)
-    test_case.sizes.append(0)
-    field = test_case.field.add()
-    field.name = "double_default"
-    field.dtype = types_pb2.DT_DOUBLE
-    field.expected.double_value.append(1.0)
-    test_case.sizes.append(0)
-    field = test_case.field.add()
-    field.name = "float_default"
-    field.dtype = types_pb2.DT_FLOAT
-    field.expected.float_value.append(2.0)
-    test_case.sizes.append(0)
-    field = test_case.field.add()
-    field.name = "int64_default"
-    field.dtype = types_pb2.DT_INT64
-    field.expected.int64_value.append(3)
-    test_case.sizes.append(0)
-    field = test_case.field.add()
-    field.name = "sfixed64_default"
-    field.dtype = types_pb2.DT_INT64
-    field.expected.int64_value.append(11)
-    test_case.sizes.append(0)
-    field = test_case.field.add()
-    field.name = "sint64_default"
-    field.dtype = types_pb2.DT_INT64
-    field.expected.int64_value.append(13)
-    test_case.sizes.append(0)
-    field = test_case.field.add()
-    field.name = "uint64_default"
-    field.dtype = types_pb2.DT_INT64
-    field.expected.int64_value.append(4)
-    test_case.sizes.append(0)
-    field = test_case.field.add()
-    field.name = "fixed64_default"
-    field.dtype = types_pb2.DT_INT64
-    field.expected.int64_value.append(6)
-    test_case.sizes.append(0)
-    field = test_case.field.add()
-    field.name = "int32_default"
-    field.dtype = types_pb2.DT_INT32
-    field.expected.int32_value.append(5)
-    test_case.sizes.append(0)
-    field = test_case.field.add()
-    field.name = "sfixed32_default"
-    field.dtype = types_pb2.DT_INT32
-    field.expected.int32_value.append(10)
-    test_case.sizes.append(0)
-    field = test_case.field.add()
-    field.name = "sint32_default"
-    field.dtype = types_pb2.DT_INT32
-    field.expected.int32_value.append(12)
-    test_case.sizes.append(0)
-    field = test_case.field.add()
-    field.name = "uint32_default"
-    field.dtype = types_pb2.DT_INT32
-    field.expected.int32_value.append(-1)
-    test_case.sizes.append(0)
-    field = test_case.field.add()
-    field.name = "fixed32_default"
-    field.dtype = types_pb2.DT_INT32
-    field.expected.int32_value.append(7)
-    test_case.sizes.append(0)
-    field = test_case.field.add()
-    field.name = "bool_default"
-    field.dtype = types_pb2.DT_BOOL
-    field.expected.bool_value.append(True)
-    test_case.sizes.append(0)
-    field = test_case.field.add()
-    field.name = "string_default"
-    field.dtype = types_pb2.DT_STRING
-    field.expected.string_value.append("a")
-    test_case.sizes.append(0)
-    field = test_case.field.add()
-    field.name = "bytes_default"
-    field.dtype = types_pb2.DT_STRING
-    field.expected.string_value.append("a longer default string")
-    return test_case
-
-  @staticmethod
-  def minmax_test_case():
-    test_case = test_example_pb2.TestCase()
-    primitive = test_case.primitive.add()
-    primitive.double_value.append(-1.7976931348623158e+308)
-    primitive.double_value.append(2.2250738585072014e-308)
-    primitive.double_value.append(1.7976931348623158e+308)
-    primitive.float_value.append(-3.402823466e+38)
-    primitive.float_value.append(1.175494351e-38)
-    primitive.float_value.append(3.402823466e+38)
-    primitive.int64_value.append(-9223372036854775808)
-    primitive.int64_value.append(9223372036854775807)
-    primitive.sfixed64_value.append(-9223372036854775808)
-    primitive.sfixed64_value.append(9223372036854775807)
-    primitive.sint64_value.append(-9223372036854775808)
-    primitive.sint64_value.append(9223372036854775807)
-    primitive.uint64_value.append(0)
-    primitive.uint64_value.append(18446744073709551615)
-    primitive.fixed64_value.append(0)
-    primitive.fixed64_value.append(18446744073709551615)
-    primitive.int32_value.append(-2147483648)
-    primitive.int32_value.append(2147483647)
-    primitive.sfixed32_value.append(-2147483648)
-    primitive.sfixed32_value.append(2147483647)
-    primitive.sint32_value.append(-2147483648)
-    primitive.sint32_value.append(2147483647)
-    primitive.uint32_value.append(0)
-    primitive.uint32_value.append(4294967295)
-    primitive.fixed32_value.append(0)
-    primitive.fixed32_value.append(4294967295)
-    primitive.bool_value.append(False)
-    primitive.bool_value.append(True)
-    primitive.string_value.append("")
-    primitive.string_value.append("I refer to the infinite.")
-    test_case.shape.append(1)
-    test_case.sizes.append(3)
-    field = test_case.field.add()
-    field.name = "double_value"
-    field.dtype = types_pb2.DT_DOUBLE
-    field.expected.double_value.append(-1.7976931348623158e+308)
-    field.expected.double_value.append(2.2250738585072014e-308)
-    field.expected.double_value.append(1.7976931348623158e+308)
-    test_case.sizes.append(3)
-    field = test_case.field.add()
-    field.name = "float_value"
-    field.dtype = types_pb2.DT_FLOAT
-    field.expected.float_value.append(-3.402823466e+38)
-    field.expected.float_value.append(1.175494351e-38)
-    field.expected.float_value.append(3.402823466e+38)
-    test_case.sizes.append(2)
-    field = test_case.field.add()
-    field.name = "int64_value"
-    field.dtype = types_pb2.DT_INT64
-    field.expected.int64_value.append(-9223372036854775808)
-    field.expected.int64_value.append(9223372036854775807)
-    test_case.sizes.append(2)
-    field = test_case.field.add()
-    field.name = "sfixed64_value"
-    field.dtype = types_pb2.DT_INT64
-    field.expected.int64_value.append(-9223372036854775808)
-    field.expected.int64_value.append(9223372036854775807)
-    test_case.sizes.append(2)
-    field = test_case.field.add()
-    field.name = "sint64_value"
-    field.dtype = types_pb2.DT_INT64
-    field.expected.int64_value.append(-9223372036854775808)
-    field.expected.int64_value.append(9223372036854775807)
-    test_case.sizes.append(2)
-    field = test_case.field.add()
-    field.name = "uint64_value"
-    field.dtype = types_pb2.DT_INT64
-    field.expected.int64_value.append(0)
-    field.expected.int64_value.append(-1)
-    test_case.sizes.append(2)
-    field = test_case.field.add()
-    field.name = "fixed64_value"
-    field.dtype = types_pb2.DT_INT64
-    field.expected.int64_value.append(0)
-    field.expected.int64_value.append(-1)
-    test_case.sizes.append(2)
-    field = test_case.field.add()
-    field.name = "int32_value"
-    field.dtype = types_pb2.DT_INT32
-    field.expected.int32_value.append(-2147483648)
-    field.expected.int32_value.append(2147483647)
-    test_case.sizes.append(2)
-    field = test_case.field.add()
-    field.name = "sfixed32_value"
-    field.dtype = types_pb2.DT_INT32
-    field.expected.int32_value.append(-2147483648)
-    field.expected.int32_value.append(2147483647)
-    test_case.sizes.append(2)
-    field = test_case.field.add()
-    field.name = "sint32_value"
-    field.dtype = types_pb2.DT_INT32
-    field.expected.int32_value.append(-2147483648)
-    field.expected.int32_value.append(2147483647)
-    test_case.sizes.append(2)
-    field = test_case.field.add()
-    field.name = "uint32_value"
-    field.dtype = types_pb2.DT_INT32
-    field.expected.int32_value.append(0)
-    field.expected.int32_value.append(-1)
-    test_case.sizes.append(2)
-    field = test_case.field.add()
-    field.name = "fixed32_value"
-    field.dtype = types_pb2.DT_INT32
-    field.expected.int32_value.append(0)
-    field.expected.int32_value.append(-1)
-    test_case.sizes.append(2)
-    field = test_case.field.add()
-    field.name = "bool_value"
-    field.dtype = types_pb2.DT_BOOL
-    field.expected.bool_value.append(False)
-    field.expected.bool_value.append(True)
-    test_case.sizes.append(2)
-    field = test_case.field.add()
-    field.name = "string_value"
-    field.dtype = types_pb2.DT_STRING
-    field.expected.string_value.append("")
-    field.expected.string_value.append("I refer to the infinite.")
-    return test_case
-
-  @staticmethod
-  def nested_test_case():
-    test_case = test_example_pb2.TestCase()
-    primitive = test_case.primitive.add()
-    message_value = primitive.message_value.add()
-    message_value.double_value = 23.5
-    test_case.shape.append(1)
-    test_case.sizes.append(1)
-    field = test_case.field.add()
-    field.name = "message_value"
-    field.dtype = types_pb2.DT_STRING
-    message_value = field.expected.message_value.add()
-    message_value.double_value = 23.5
-    return test_case
-
-  @staticmethod
-  def optional_test_case():
-    test_case = test_example_pb2.TestCase()
-    primitive = test_case.primitive.add()
-    primitive.bool_value.append(True)
-    test_case.shape.append(1)
-    test_case.sizes.append(1)
-    field = test_case.field.add()
-    field.name = "bool_value"
-    field.dtype = types_pb2.DT_BOOL
-    field.expected.bool_value.append(True)
-    test_case.sizes.append(0)
-    field = test_case.field.add()
-    field.name = "double_value"
-    field.dtype = types_pb2.DT_DOUBLE
-    field.expected.double_value.append(0.0)
-    return test_case
-
-  @staticmethod
-  def promote_unsigned_test_case():
-    test_case = test_example_pb2.TestCase()
-    primitive = test_case.primitive.add()
-    primitive.fixed32_value.append(4294967295)
-    primitive.uint32_value.append(4294967295)
-    test_case.shape.append(1)
-    test_case.sizes.append(1)
-    field = test_case.field.add()
-    field.name = "fixed32_value"
-    field.dtype = types_pb2.DT_INT64
-    field.expected.int64_value.append(4294967295)
-    test_case.sizes.append(1)
-    field = test_case.field.add()
-    field.name = "uint32_value"
-    field.dtype = types_pb2.DT_INT64
-    field.expected.int64_value.append(4294967295)
-    # Comes from an explicitly-specified default
-    test_case.sizes.append(0)
-    field = test_case.field.add()
-    field.name = "uint32_default"
-    field.dtype = types_pb2.DT_INT64
-    field.expected.int64_value.append(4294967295)
-    return test_case
-
-  @staticmethod
-  def ragged_test_case():
-    test_case = test_example_pb2.TestCase()
-    primitive = test_case.primitive.add()
-    primitive.double_value.append(23.5)
-    primitive.double_value.append(123.0)
-    primitive.bool_value.append(True)
-    primitive = test_case.primitive.add()
-    primitive.double_value.append(3.1)
-    primitive.bool_value.append(False)
-    test_case.shape.append(2)
-    test_case.sizes.append(2)
-    test_case.sizes.append(1)
-    test_case.sizes.append(1)
-    test_case.sizes.append(1)
-    field = test_case.field.add()
-    field.name = "double_value"
-    field.dtype = types_pb2.DT_DOUBLE
-    field.expected.double_value.append(23.5)
-    field.expected.double_value.append(123.0)
-    field.expected.double_value.append(3.1)
-    field.expected.double_value.append(0.0)
-    field = test_case.field.add()
-    field.name = "bool_value"
-    field.dtype = types_pb2.DT_BOOL
-    field.expected.bool_value.append(True)
-    field.expected.bool_value.append(False)
-    return test_case
-
-  @staticmethod
-  def shaped_batch_test_case():
-    test_case = test_example_pb2.TestCase()
-    primitive = test_case.primitive.add()
-    primitive.double_value.append(23.5)
-    primitive.bool_value.append(True)
-    primitive = test_case.primitive.add()
-    primitive.double_value.append(44.0)
-    primitive.bool_value.append(False)
-    primitive = test_case.primitive.add()
-    primitive.double_value.append(3.14159)
-    primitive.bool_value.append(True)
-    primitive = test_case.primitive.add()
-    primitive.double_value.append(1.414)
-    primitive.bool_value.append(True)
-    primitive = test_case.primitive.add()
-    primitive.double_value.append(-32.2)
-    primitive.bool_value.append(False)
-    primitive = test_case.primitive.add()
-    primitive.double_value.append(0.0001)
-    primitive.bool_value.append(True)
-    test_case.shape.append(3)
-    test_case.shape.append(2)
-    for _ in range(12):
-      test_case.sizes.append(1)
-    field = test_case.field.add()
-    field.name = "double_value"
-    field.dtype = types_pb2.DT_DOUBLE
-    field.expected.double_value.append(23.5)
-    field.expected.double_value.append(44.0)
-    field.expected.double_value.append(3.14159)
-    field.expected.double_value.append(1.414)
-    field.expected.double_value.append(-32.2)
-    field.expected.double_value.append(0.0001)
-    field = test_case.field.add()
-    field.name = "bool_value"
-    field.dtype = types_pb2.DT_BOOL
-    field.expected.bool_value.append(True)
-    field.expected.bool_value.append(False)
-    field.expected.bool_value.append(True)
-    field.expected.bool_value.append(True)
-    field.expected.bool_value.append(False)
-    field.expected.bool_value.append(True)
-    return test_case
-
-  @staticmethod
-  def simple_test_case():
-    test_case = test_example_pb2.TestCase()
-    primitive = test_case.primitive.add()
-    primitive.double_value.append(23.5)
-    primitive.bool_value.append(True)
-    test_case.shape.append(1)
-    test_case.sizes.append(1)
-    field = test_case.field.add()
-    field.name = "double_value"
-    field.dtype = types_pb2.DT_DOUBLE
-    field.expected.double_value.append(23.5)
-    test_case.sizes.append(1)
-    field = test_case.field.add()
-    field.name = "bool_value"
-    field.dtype = types_pb2.DT_BOOL
-    field.expected.bool_value.append(True)
-    return test_case
diff --git a/tensorflow/contrib/proto/python/kernel_tests/test_example.proto b/tensorflow/contrib/proto/python/kernel_tests/test_example.proto
index a2c88e3..674d881 100644
--- a/tensorflow/contrib/proto/python/kernel_tests/test_example.proto
+++ b/tensorflow/contrib/proto/python/kernel_tests/test_example.proto
@@ -1,6 +1,4 @@
 // Test description and protos to work with it.
-//
-// Many of the protos in this file are for unit tests that haven't been written yet.
 
 syntax = "proto2";
 
@@ -8,32 +6,103 @@
 
 package tensorflow.contrib.proto;
 
-// A TestCase holds a proto and a bunch of assertions
-// about how it should decode.
+// A TestCase holds a proto and assertions about how it should decode.
 message TestCase {
-  // A batch of primitives to be serialized and decoded.
-  repeated RepeatedPrimitiveValue primitive = 1;
-  // The shape of the batch.
-  repeated int32 shape = 2;
+  // Batches of primitive values.
+  repeated TestValue values = 1;
+  // The batch shapes.
+  repeated int32 shapes = 2;
   // Expected sizes for each field.
   repeated int32 sizes = 3;
   // Expected values for each field.
-  repeated FieldSpec field = 4;
+  repeated FieldSpec fields = 4;
 };
 
 // FieldSpec describes the expected output for a single field.
 message FieldSpec {
   optional string name = 1;
   optional tensorflow.DataType dtype = 2;
-  optional RepeatedPrimitiveValue expected = 3;
+  optional TestValue value = 3;
 };
 
+// NOTE: This definition must be kept in sync with PackedTestValue.
 message TestValue {
-  optional PrimitiveValue primitive_value = 1;
-  optional EnumValue enum_value = 2;
-  optional MessageValue message_value = 3;
-  optional RepeatedMessageValue repeated_message_value = 4;
-  optional RepeatedPrimitiveValue repeated_primitive_value = 6;
+  repeated double double_value = 1;
+  repeated float float_value = 2;
+  repeated int64 int64_value = 3;
+  repeated uint64 uint64_value = 4;
+  repeated int32 int32_value = 5;
+  repeated fixed64 fixed64_value = 6;
+  repeated fixed32 fixed32_value = 7;
+  repeated bool bool_value = 8;
+  repeated string string_value = 9;
+  repeated bytes bytes_value = 12;
+  repeated uint32 uint32_value = 13;
+  repeated sfixed32 sfixed32_value = 15;
+  repeated sfixed64 sfixed64_value = 16;
+  repeated sint32 sint32_value = 17;
+  repeated sint64 sint64_value = 18;
+  repeated PrimitiveValue message_value = 19;
+
+  // Optional fields with explicitly-specified defaults.
+  optional double double_value_with_default = 20 [default = 1.0];
+  optional float float_value_with_default = 21 [default = 2.0];
+  optional int64 int64_value_with_default = 22 [default = 3];
+  optional uint64 uint64_value_with_default = 23 [default = 4];
+  optional int32 int32_value_with_default = 24 [default = 5];
+  optional fixed64 fixed64_value_with_default = 25 [default = 6];
+  optional fixed32 fixed32_value_with_default = 26 [default = 7];
+  optional bool bool_value_with_default = 27 [default = true];
+  optional string string_value_with_default = 28 [default = "a"];
+  optional bytes bytes_value_with_default = 29
+      [default = "a longer default string"];
+  optional uint32 uint32_value_with_default = 30 [default = 9];
+  optional sfixed32 sfixed32_value_with_default = 31 [default = 10];
+  optional sfixed64 sfixed64_value_with_default = 32 [default = 11];
+  optional sint32 sint32_value_with_default = 33 [default = 12];
+  optional sint64 sint64_value_with_default = 34 [default = 13];
+}
+
+// A PackedTestValue looks exactly the same as a TestValue in the text format,
+// but the binary serializion is different. We test the packed representations
+// by loading the same test cases using this definition instead of TestValue.
+//
+// NOTE: This definition must be kept in sync with TestValue in every way except
+// the packed=true declaration.
+message PackedTestValue {
+  repeated double double_value = 1 [packed = true];
+  repeated float float_value = 2 [packed = true];
+  repeated int64 int64_value = 3 [packed = true];
+  repeated uint64 uint64_value = 4 [packed = true];
+  repeated int32 int32_value = 5 [packed = true];
+  repeated fixed64 fixed64_value = 6 [packed = true];
+  repeated fixed32 fixed32_value = 7 [packed = true];
+  repeated bool bool_value = 8 [packed = true];
+  repeated string string_value = 9;
+  repeated bytes bytes_value = 12;
+  repeated uint32 uint32_value = 13 [packed = true];
+  repeated sfixed32 sfixed32_value = 15 [packed = true];
+  repeated sfixed64 sfixed64_value = 16 [packed = true];
+  repeated sint32 sint32_value = 17 [packed = true];
+  repeated sint64 sint64_value = 18 [packed = true];
+  repeated PrimitiveValue message_value = 19;
+
+  optional double double_value_with_default = 20 [default = 1.0];
+  optional float float_value_with_default = 21 [default = 2.0];
+  optional int64 int64_value_with_default = 22 [default = 3];
+  optional uint64 uint64_value_with_default = 23 [default = 4];
+  optional int32 int32_value_with_default = 24 [default = 5];
+  optional fixed64 fixed64_value_with_default = 25 [default = 6];
+  optional fixed32 fixed32_value_with_default = 26 [default = 7];
+  optional bool bool_value_with_default = 27 [default = true];
+  optional string string_value_with_default = 28 [default = "a"];
+  optional bytes bytes_value_with_default = 29
+      [default = "a longer default string"];
+  optional uint32 uint32_value_with_default = 30 [default = 9];
+  optional sfixed32 sfixed32_value_with_default = 31 [default = 10];
+  optional sfixed64 sfixed64_value_with_default = 32 [default = 11];
+  optional sint32 sint32_value_with_default = 33 [default = 12];
+  optional sint64 sint64_value_with_default = 34 [default = 13];
 }
 
 message PrimitiveValue {
@@ -54,83 +123,16 @@
   optional sint64 sint64_value = 18;
 }
 
-// NOTE: This definition must be kept in sync with PackedPrimitiveValue.
-message RepeatedPrimitiveValue {
-  repeated double double_value = 1;
-  repeated float float_value = 2;
-  repeated int64 int64_value = 3;
-  repeated uint64 uint64_value = 4;
-  repeated int32 int32_value = 5;
-  repeated fixed64 fixed64_value = 6;
-  repeated fixed32 fixed32_value = 7;
-  repeated bool bool_value = 8;
-  repeated string string_value = 9;
-  repeated bytes bytes_value = 12;
-  repeated uint32 uint32_value = 13;
-  repeated sfixed32 sfixed32_value = 15;
-  repeated sfixed64 sfixed64_value = 16;
-  repeated sint32 sint32_value = 17;
-  repeated sint64 sint64_value = 18;
-  repeated PrimitiveValue message_value = 19;
-
-  // Optional fields with explicitly-specified defaults.
-  optional double double_default = 20 [default = 1.0];
-  optional float float_default = 21 [default = 2.0];
-  optional int64 int64_default = 22 [default = 3];
-  optional uint64 uint64_default = 23 [default = 4];
-  optional int32 int32_default = 24 [default = 5];
-  optional fixed64 fixed64_default = 25 [default = 6];
-  optional fixed32 fixed32_default = 26 [default = 7];
-  optional bool bool_default = 27 [default = true];
-  optional string string_default = 28 [default = "a"];
-  optional bytes bytes_default = 29 [default = "a longer default string"];
-  optional uint32 uint32_default = 30 [default = 4294967295];
-  optional sfixed32 sfixed32_default = 31 [default = 10];
-  optional sfixed64 sfixed64_default = 32 [default = 11];
-  optional sint32 sint32_default = 33 [default = 12];
-  optional sint64 sint64_default = 34 [default = 13];
+// Message containing fields with field numbers higher than any field above.
+// An instance of this message is prepended to each binary message in the test
+// to exercise the code path that handles fields encoded out of order of field
+// number.
+message ExtraFields {
+  optional string string_value = 1776;
+  optional bool bool_value = 1777;
 }
 
-// A PackedPrimitiveValue looks exactly the same as a RepeatedPrimitiveValue
-// in the text format, but the binary serializion is different.
-// We test the packed representations by loading the same test cases
-// using this definition instead of RepeatedPrimitiveValue.
-// NOTE: This definition must be kept in sync with RepeatedPrimitiveValue
-// in every way except the packed=true declaration.
-message PackedPrimitiveValue {
-  repeated double double_value = 1 [packed = true];
-  repeated float float_value = 2 [packed = true];
-  repeated int64 int64_value = 3 [packed = true];
-  repeated uint64 uint64_value = 4 [packed = true];
-  repeated int32 int32_value = 5 [packed = true];
-  repeated fixed64 fixed64_value = 6 [packed = true];
-  repeated fixed32 fixed32_value = 7 [packed = true];
-  repeated bool bool_value = 8 [packed = true];
-  repeated string string_value = 9;
-  repeated bytes bytes_value = 12;
-  repeated uint32 uint32_value = 13 [packed = true];
-  repeated sfixed32 sfixed32_value = 15 [packed = true];
-  repeated sfixed64 sfixed64_value = 16 [packed = true];
-  repeated sint32 sint32_value = 17 [packed = true];
-  repeated sint64 sint64_value = 18 [packed = true];
-  repeated PrimitiveValue message_value = 19;
-
-  optional double double_default = 20 [default = 1.0];
-  optional float float_default = 21 [default = 2.0];
-  optional int64 int64_default = 22 [default = 3];
-  optional uint64 uint64_default = 23 [default = 4];
-  optional int32 int32_default = 24 [default = 5];
-  optional fixed64 fixed64_default = 25 [default = 6];
-  optional fixed32 fixed32_default = 26 [default = 7];
-  optional bool bool_default = 27 [default = true];
-  optional string string_default = 28 [default = "a"];
-  optional bytes bytes_default = 29 [default = "a longer default string"];
-  optional uint32 uint32_default = 30 [default = 4294967295];
-  optional sfixed32 sfixed32_default = 31 [default = 10];
-  optional sfixed64 sfixed64_default = 32 [default = 11];
-  optional sint32 sint32_default = 33 [default = 12];
-  optional sint64 sint64_default = 34 [default = 13];
-}
+// The messages below are for yet-to-be created tests.
 
 message EnumValue {
   enum Color {
@@ -171,12 +173,3 @@
 
   repeated NestedMessageValue message_values = 11;
 }
-
-// Message containing fields with field numbers higher than any field above. An
-// instance of this message is prepended to each binary message in the test to
-// exercise the code path that handles fields encoded out of order of field
-// number.
-message ExtraFields {
-  optional string string_value = 1776;
-  optional bool bool_value = 1777;
-}
diff --git a/tensorflow/contrib/rpc/python/kernel_tests/BUILD b/tensorflow/contrib/rpc/python/kernel_tests/BUILD
index 2311c15..cb0b89a 100644
--- a/tensorflow/contrib/rpc/python/kernel_tests/BUILD
+++ b/tensorflow/contrib/rpc/python/kernel_tests/BUILD
@@ -1,5 +1,3 @@
-# TODO(b/76425722): Port everything in here to OS (currently excluded).
-
 package(default_visibility = ["//visibility:public"])
 
 licenses(["notice"])  # Apache 2.0
@@ -17,7 +15,6 @@
     srcs = ["test_example.proto"],
     has_services = 1,
     cc_api_version = 2,
-    protodeps = ["//tensorflow/core:protos_all"],
 )
 
 py_library(
diff --git a/tensorflow/contrib/rpc/python/kernel_tests/rpc_op_test_base.py b/tensorflow/contrib/rpc/python/kernel_tests/rpc_op_test_base.py
index 27273d1..1c23c28 100644
--- a/tensorflow/contrib/rpc/python/kernel_tests/rpc_op_test_base.py
+++ b/tensorflow/contrib/rpc/python/kernel_tests/rpc_op_test_base.py
@@ -51,23 +51,23 @@
   def testScalarHostPortRpc(self):
     with self.test_session() as sess:
       request_tensors = (
-          test_example_pb2.TestCase(shape=[1, 2, 3]).SerializeToString())
+          test_example_pb2.TestCase(values=[1, 2, 3]).SerializeToString())
       response_tensors = self.rpc(
-          method=self.get_method_name('IncrementTestShapes'),
+          method=self.get_method_name('Increment'),
           address=self._address,
           request=request_tensors)
       self.assertEqual(response_tensors.shape, ())
       response_values = sess.run(response_tensors)
     response_message = test_example_pb2.TestCase()
     self.assertTrue(response_message.ParseFromString(response_values))
-    self.assertAllEqual([2, 3, 4], response_message.shape)
+    self.assertAllEqual([2, 3, 4], response_message.values)
 
   def testScalarHostPortTryRpc(self):
     with self.test_session() as sess:
       request_tensors = (
-          test_example_pb2.TestCase(shape=[1, 2, 3]).SerializeToString())
+          test_example_pb2.TestCase(values=[1, 2, 3]).SerializeToString())
       response_tensors, status_code, status_message = self.try_rpc(
-          method=self.get_method_name('IncrementTestShapes'),
+          method=self.get_method_name('Increment'),
           address=self._address,
           request=request_tensors)
       self.assertEqual(status_code.shape, ())
@@ -77,7 +77,7 @@
           sess.run((response_tensors, status_code, status_message)))
     response_message = test_example_pb2.TestCase()
     self.assertTrue(response_message.ParseFromString(response_values))
-    self.assertAllEqual([2, 3, 4], response_message.shape)
+    self.assertAllEqual([2, 3, 4], response_message.values)
     # For the base Rpc op, don't expect to get error status back.
     self.assertEqual(errors.OK, status_code_values)
     self.assertEqual(b'', status_message_values)
@@ -86,7 +86,7 @@
     with self.test_session() as sess:
       request_tensors = []
       response_tensors = self.rpc(
-          method=self.get_method_name('IncrementTestShapes'),
+          method=self.get_method_name('Increment'),
           address=self._address,
           request=request_tensors)
       self.assertAllEqual(response_tensors.shape, [0])
@@ -95,7 +95,7 @@
 
   def testInvalidMethod(self):
     for method in [
-        '/InvalidService.IncrementTestShapes',
+        '/InvalidService.Increment',
         self.get_method_name('InvalidMethodName')
     ]:
       with self.test_session() as sess:
@@ -115,12 +115,12 @@
       with self.assertRaises(errors.UnavailableError):
         sess.run(
             self.rpc(
-                method=self.get_method_name('IncrementTestShapes'),
+                method=self.get_method_name('Increment'),
                 address=address,
                 request=''))
       _, status_code_value, status_message_value = sess.run(
           self.try_rpc(
-              method=self.get_method_name('IncrementTestShapes'),
+              method=self.get_method_name('Increment'),
               address=address,
               request=''))
       self.assertEqual(errors.UNAVAILABLE, status_code_value)
@@ -182,10 +182,10 @@
     with self.test_session() as sess:
       request_tensors = [
           test_example_pb2.TestCase(
-              shape=[i, i + 1, i + 2]).SerializeToString() for i in range(20)
+              values=[i, i + 1, i + 2]).SerializeToString() for i in range(20)
       ]
       response_tensors = self.rpc(
-          method=self.get_method_name('IncrementTestShapes'),
+          method=self.get_method_name('Increment'),
           address=self._address,
           request=request_tensors)
       self.assertEqual(response_tensors.shape, (20,))
@@ -194,17 +194,17 @@
     for i in range(20):
       response_message = test_example_pb2.TestCase()
       self.assertTrue(response_message.ParseFromString(response_values[i]))
-      self.assertAllEqual([i + 1, i + 2, i + 3], response_message.shape)
+      self.assertAllEqual([i + 1, i + 2, i + 3], response_message.values)
 
   def testVecHostPortManyParallelRpcs(self):
     with self.test_session() as sess:
       request_tensors = [
           test_example_pb2.TestCase(
-              shape=[i, i + 1, i + 2]).SerializeToString() for i in range(20)
+              values=[i, i + 1, i + 2]).SerializeToString() for i in range(20)
       ]
       many_response_tensors = [
           self.rpc(
-              method=self.get_method_name('IncrementTestShapes'),
+              method=self.get_method_name('Increment'),
               address=self._address,
               request=request_tensors) for _ in range(10)
       ]
@@ -216,25 +216,25 @@
       for i in range(20):
         response_message = test_example_pb2.TestCase()
         self.assertTrue(response_message.ParseFromString(response_values[i]))
-        self.assertAllEqual([i + 1, i + 2, i + 3], response_message.shape)
+        self.assertAllEqual([i + 1, i + 2, i + 3], response_message.values)
 
   def testVecHostPortRpcUsingEncodeAndDecodeProto(self):
     with self.test_session() as sess:
       request_tensors = encode_proto_op.encode_proto(
           message_type='tensorflow.contrib.rpc.TestCase',
-          field_names=['shape'],
+          field_names=['values'],
           sizes=[[3]] * 20,
           values=[
               [[i, i + 1, i + 2] for i in range(20)],
           ])
       response_tensor_strings = self.rpc(
-          method=self.get_method_name('IncrementTestShapes'),
+          method=self.get_method_name('Increment'),
           address=self._address,
           request=request_tensors)
       _, (response_shape,) = decode_proto_op.decode_proto(
           bytes=response_tensor_strings,
           message_type='tensorflow.contrib.rpc.TestCase',
-          field_names=['shape'],
+          field_names=['values'],
           output_types=[dtypes.int32])
       response_shape_values = sess.run(response_shape)
     self.assertAllEqual([[i + 1, i + 2, i + 3]
@@ -285,9 +285,9 @@
       addresses = flatten([[
           self._address, 'unix:/tmp/this_unix_socket_doesnt_exist_97820348!!@'
       ] for _ in range(10)])
-      request = test_example_pb2.TestCase(shape=[0, 1, 2]).SerializeToString()
+      request = test_example_pb2.TestCase(values=[0, 1, 2]).SerializeToString()
       response_tensors, status_code, _ = self.try_rpc(
-          method=self.get_method_name('IncrementTestShapes'),
+          method=self.get_method_name('Increment'),
           address=addresses,
           request=request)
       response_tensors_values, status_code_values = sess.run((response_tensors,
@@ -303,9 +303,9 @@
     flatten = lambda x: list(itertools.chain.from_iterable(x))
     with self.test_session() as sess:
       methods = flatten(
-          [[self.get_method_name('IncrementTestShapes'), 'InvalidMethodName']
+          [[self.get_method_name('Increment'), 'InvalidMethodName']
            for _ in range(10)])
-      request = test_example_pb2.TestCase(shape=[0, 1, 2]).SerializeToString()
+      request = test_example_pb2.TestCase(values=[0, 1, 2]).SerializeToString()
       response_tensors, status_code, _ = self.try_rpc(
           method=methods, address=self._address, request=request)
       response_tensors_values, status_code_values = sess.run((response_tensors,
@@ -325,10 +325,10 @@
       ] for _ in range(10)])
       requests = [
           test_example_pb2.TestCase(
-              shape=[i, i + 1, i + 2]).SerializeToString() for i in range(20)
+              values=[i, i + 1, i + 2]).SerializeToString() for i in range(20)
       ]
       response_tensors, status_code, _ = self.try_rpc(
-          method=self.get_method_name('IncrementTestShapes'),
+          method=self.get_method_name('Increment'),
           address=addresses,
           request=requests)
       response_tensors_values, status_code_values = sess.run((response_tensors,
@@ -343,4 +343,4 @@
           response_message = test_example_pb2.TestCase()
           self.assertTrue(
               response_message.ParseFromString(response_tensors_values[i]))
-          self.assertAllEqual([i + 1, i + 2, i + 3], response_message.shape)
+          self.assertAllEqual([i + 1, i + 2, i + 3], response_message.values)
diff --git a/tensorflow/contrib/rpc/python/kernel_tests/rpc_op_test_servicer.py b/tensorflow/contrib/rpc/python/kernel_tests/rpc_op_test_servicer.py
index 7cbd636..265254a 100644
--- a/tensorflow/contrib/rpc/python/kernel_tests/rpc_op_test_servicer.py
+++ b/tensorflow/contrib/rpc/python/kernel_tests/rpc_op_test_servicer.py
@@ -30,8 +30,8 @@
 class RpcOpTestServicer(test_example_pb2_grpc.TestCaseServiceServicer):
   """Test servicer for RpcOp tests."""
 
-  def IncrementTestShapes(self, request, context):
-    """Increment the entries in the shape attribute of request.
+  def Increment(self, request, context):
+    """Increment the entries in the `values` attribute of request.
 
     Args:
       request: input TestCase.
@@ -40,8 +40,8 @@
     Returns:
       output TestCase.
     """
-    for i in range(len(request.shape)):
-      request.shape[i] += 1
+    for i in range(len(request.values)):
+      request.values[i] += 1
     return request
 
   def AlwaysFailWithInvalidArgument(self, request, context):
diff --git a/tensorflow/contrib/rpc/python/kernel_tests/test_example.proto b/tensorflow/contrib/rpc/python/kernel_tests/test_example.proto
index 96f4550..8141466 100644
--- a/tensorflow/contrib/rpc/python/kernel_tests/test_example.proto
+++ b/tensorflow/contrib/rpc/python/kernel_tests/test_example.proto
@@ -1,29 +1,17 @@
 // Test description and protos to work with it.
-//
-// Many of the protos in this file are for unit tests that haven't been written yet.
 
 syntax = "proto2";
 
-import "tensorflow/core/framework/types.proto";
-
 package tensorflow.contrib.rpc;
 
-// A TestCase holds a proto and a bunch of assertions
-// about how it should decode.
+// A TestCase holds a sequence of values.
 message TestCase {
-  // A batch of primitives to be serialized and decoded.
-  repeated RepeatedPrimitiveValue primitive = 1;
-  // The shape of the batch.
-  repeated int32 shape = 2;
-  // Expected sizes for each field.
-  repeated int32 sizes = 3;
-  // Expected values for each field.
-  repeated FieldSpec field = 4;
+  repeated int32 values = 1;
 };
 
 service TestCaseService {
-  // Copy input, and increment each entry in 'shape' by 1.
-  rpc IncrementTestShapes(TestCase) returns (TestCase) {
+  // Copy input, and increment each entry in 'values' by 1.
+  rpc Increment(TestCase) returns (TestCase) {
   }
 
   // Sleep forever.
@@ -42,130 +30,3 @@
   rpc SometimesFailWithInvalidArgument(TestCase) returns (TestCase) {
   }
 };
-
-// FieldSpec describes the expected output for a single field.
-message FieldSpec {
-  optional string name = 1;
-  optional tensorflow.DataType dtype = 2;
-  optional RepeatedPrimitiveValue expected = 3;
-};
-
-message TestValue {
-  optional PrimitiveValue primitive_value = 1;
-  optional EnumValue enum_value = 2;
-  optional MessageValue message_value = 3;
-  optional RepeatedMessageValue repeated_message_value = 4;
-  optional RepeatedPrimitiveValue repeated_primitive_value = 6;
-}
-
-message PrimitiveValue {
-  optional double double_value = 1;
-  optional float float_value = 2;
-  optional int64 int64_value = 3;
-  optional uint64 uint64_value = 4;
-  optional int32 int32_value = 5;
-  optional fixed64 fixed64_value = 6;
-  optional fixed32 fixed32_value = 7;
-  optional bool bool_value = 8;
-  optional string string_value = 9;
-  optional bytes bytes_value = 12;
-  optional uint32 uint32_value = 13;
-  optional sfixed32 sfixed32_value = 15;
-  optional sfixed64 sfixed64_value = 16;
-  optional sint32 sint32_value = 17;
-  optional sint64 sint64_value = 18;
-}
-
-// NOTE: This definition must be kept in sync with PackedPrimitiveValue.
-message RepeatedPrimitiveValue {
-  repeated double double_value = 1;
-  repeated float float_value = 2;
-  repeated int64 int64_value = 3;
-  repeated uint64 uint64_value = 4;
-  repeated int32 int32_value = 5;
-  repeated fixed64 fixed64_value = 6;
-  repeated fixed32 fixed32_value = 7;
-  repeated bool bool_value = 8;
-  repeated string string_value = 9;
-  repeated bytes bytes_value = 12;
-  repeated uint32 uint32_value = 13;
-  repeated sfixed32 sfixed32_value = 15;
-  repeated sfixed64 sfixed64_value = 16;
-  repeated sint32 sint32_value = 17;
-  repeated sint64 sint64_value = 18;
-  repeated PrimitiveValue message_value = 19;
-}
-
-// A PackedPrimitiveValue looks exactly the same as a RepeatedPrimitiveValue
-// in the text format, but the binary serializion is different.
-// We test the packed representations by loading the same test cases
-// using this definition instead of RepeatedPrimitiveValue.
-// NOTE: This definition must be kept in sync with RepeatedPrimitiveValue
-// in every way except the packed=true declaration.
-message PackedPrimitiveValue {
-  repeated double double_value = 1 [packed = true];
-  repeated float float_value = 2 [packed = true];
-  repeated int64 int64_value = 3 [packed = true];
-  repeated uint64 uint64_value = 4 [packed = true];
-  repeated int32 int32_value = 5 [packed = true];
-  repeated fixed64 fixed64_value = 6 [packed = true];
-  repeated fixed32 fixed32_value = 7 [packed = true];
-  repeated bool bool_value = 8 [packed = true];
-  repeated string string_value = 9;
-  repeated bytes bytes_value = 12;
-  repeated uint32 uint32_value = 13 [packed = true];
-  repeated sfixed32 sfixed32_value = 15 [packed = true];
-  repeated sfixed64 sfixed64_value = 16 [packed = true];
-  repeated sint32 sint32_value = 17 [packed = true];
-  repeated sint64 sint64_value = 18 [packed = true];
-  repeated PrimitiveValue message_value = 19;
-}
-
-message EnumValue {
-  enum Color {
-    RED = 0;
-    ORANGE = 1;
-    YELLOW = 2;
-    GREEN = 3;
-    BLUE = 4;
-    INDIGO = 5;
-    VIOLET = 6;
-  };
-  optional Color enum_value = 14;
-  repeated Color repeated_enum_value = 15;
-}
-
-
-message InnerMessageValue {
-  optional float float_value = 2;
-  repeated bytes bytes_values = 8;
-}
-
-message MiddleMessageValue {
-  repeated int32 int32_values = 5;
-  optional InnerMessageValue message_value = 11;
-  optional uint32 uint32_value = 13;
-}
-
-message MessageValue {
-  optional double double_value = 1;
-  optional MiddleMessageValue message_value = 11;
-}
-
-message RepeatedMessageValue {
-  message NestedMessageValue {
-    optional float float_value = 2;
-    repeated bytes bytes_values = 8;
-  }
-
-  repeated NestedMessageValue message_values = 11;
-}
-
-// Message containing fields with field numbers higher than any field above. An
-// instance of this message is prepended to each binary message in the test to
-// exercise the code path that handles fields encoded out of order of field
-// number.
-message ExtraFields {
-  optional string string_value = 1776;
-  optional bool bool_value = 1777;
-}
diff --git a/tensorflow/contrib/seq2seq/python/kernel_tests/beam_search_decoder_test.py b/tensorflow/contrib/seq2seq/python/kernel_tests/beam_search_decoder_test.py
index 1783286..4073b39 100644
--- a/tensorflow/contrib/seq2seq/python/kernel_tests/beam_search_decoder_test.py
+++ b/tensorflow/contrib/seq2seq/python/kernel_tests/beam_search_decoder_test.py
@@ -132,6 +132,48 @@
   def test_gather_tree_from_array_2d(self):
     self._test_gather_tree_from_array(depth_ndims=2)
 
+  def test_gather_tree_from_array_complex_trajectory(self):
+    # Max. time = 7, batch = 1, beam = 5.
+    array = np.expand_dims(np.array(
+        [[[25, 12, 114, 89, 97]],
+         [[9, 91, 64, 11, 162]],
+         [[34, 34, 34, 34, 34]],
+         [[2, 4, 2, 2, 4]],
+         [[2, 3, 6, 2, 2]],
+         [[2, 2, 2, 3, 2]],
+         [[2, 2, 2, 2, 2]]]), -1)
+    parent_ids = np.array(
+        [[[0, 0, 0, 0, 0]],
+         [[0, 0, 0, 0, 0]],
+         [[0, 1, 2, 3, 4]],
+         [[0, 0, 1, 2, 1]],
+         [[0, 1, 1, 2, 3]],
+         [[0, 1, 3, 1, 2]],
+         [[0, 1, 2, 3, 4]]])
+    expected_array = np.expand_dims(np.array(
+        [[[25, 25, 25, 25, 25]],
+         [[9, 9, 91, 9, 9]],
+         [[34, 34, 34, 34, 34]],
+         [[2, 4, 2, 4, 4]],
+         [[2, 3, 6, 3, 6]],
+         [[2, 2, 2, 3, 2]],
+         [[2, 2, 2, 2, 2]]]), -1)
+    sequence_length = [[4, 6, 4, 7, 6]]
+
+    array = ops.convert_to_tensor(
+        array, dtype=dtypes.float32)
+    parent_ids = ops.convert_to_tensor(
+        parent_ids, dtype=dtypes.int32)
+    expected_array = ops.convert_to_tensor(
+        expected_array, dtype=dtypes.float32)
+
+    sorted_array = beam_search_decoder.gather_tree_from_array(
+        array, parent_ids, sequence_length)
+
+    with self.test_session() as sess:
+      sorted_array, expected_array = sess.run([sorted_array, expected_array])
+      self.assertAllEqual(expected_array, sorted_array)
+
 
 class TestArrayShapeChecks(test.TestCase):
 
diff --git a/tensorflow/contrib/seq2seq/python/ops/beam_search_decoder.py b/tensorflow/contrib/seq2seq/python/ops/beam_search_decoder.py
index c7fbeea..f17dbb0 100644
--- a/tensorflow/contrib/seq2seq/python/ops/beam_search_decoder.py
+++ b/tensorflow/contrib/seq2seq/python/ops/beam_search_decoder.py
@@ -145,24 +145,20 @@
       array_ops.expand_dims(math_ops.range(beam_width), 0), 0)
   beam_ids = array_ops.tile(beam_ids, [max_time, batch_size, 1])
 
-  mask = array_ops.sequence_mask(
-      sequence_length, maxlen=max_time, dtype=dtypes.int32)
-  mask = array_ops.transpose(mask, perm=[2, 0, 1])
-
-  # Use beam_width + 1 to mark the end of beam.
-  masked_beam_ids = (beam_ids * mask) + (1 - mask) * (beam_width + 1)
-
   max_sequence_lengths = math_ops.to_int32(
       math_ops.reduce_max(sequence_length, axis=1))
   sorted_beam_ids = beam_search_ops.gather_tree(
-      step_ids=masked_beam_ids,
+      step_ids=beam_ids,
       parent_ids=parent_ids,
       max_sequence_lengths=max_sequence_lengths,
       end_token=beam_width + 1)
 
   # For out of range steps, simply copy the same beam.
+  in_bound_steps = array_ops.transpose(
+      array_ops.sequence_mask(sequence_length, maxlen=max_time),
+      perm=[2, 0, 1])
   sorted_beam_ids = array_ops.where(
-      math_ops.cast(mask, dtypes.bool), x=sorted_beam_ids, y=beam_ids)
+      in_bound_steps, x=sorted_beam_ids, y=beam_ids)
 
   # Generate indices for gather_nd.
   time_ind = array_ops.tile(array_ops.reshape(
diff --git a/tensorflow/contrib/tensorrt/BUILD b/tensorflow/contrib/tensorrt/BUILD
index adda0b7..cb2daa7 100644
--- a/tensorflow/contrib/tensorrt/BUILD
+++ b/tensorflow/contrib/tensorrt/BUILD
@@ -11,7 +11,7 @@
 
 load(
     "//tensorflow:tensorflow.bzl",
-    "py_test",
+    "cuda_py_test",
     "tf_cc_test",
     "tf_copts",
     "tf_cuda_library",
@@ -32,10 +32,7 @@
     name = "tensorrt_test_cc",
     size = "small",
     srcs = ["tensorrt_test.cc"],
-    tags = [
-        "manual",
-        "notap",
-    ],
+    tags = ["no_windows"],
     deps = [
         "//tensorflow/core:lib",
         "//tensorflow/core:test",
@@ -185,6 +182,9 @@
     name = "wrap_conversion",
     srcs = ["trt_conversion.i"],
     copts = tf_copts(),
+    swig_includes = [
+        "//tensorflow/python:platform/base.i",
+    ],
     deps = [
         ":trt_conversion",
         ":trt_engine_op_kernel",
@@ -275,6 +275,7 @@
     name = "segment_test",
     size = "small",
     srcs = ["segment/segment_test.cc"],
+    tags = ["no_windows"],
     deps = [
         ":segment",
         "//tensorflow/c:c_api",
@@ -310,10 +311,6 @@
     name = "trt_plugin_factory_test",
     size = "small",
     srcs = ["plugin/trt_plugin_factory_test.cc"],
-    tags = [
-        "manual",
-        "notap",
-    ],
     deps = [
         ":trt_plugins",
         "//tensorflow/core:lib",
@@ -325,23 +322,24 @@
     ]),
 )
 
-py_test(
+cuda_py_test(
     name = "tf_trt_integration_test",
     srcs = ["test/tf_trt_integration_test.py"],
-    main = "test/tf_trt_integration_test.py",
-    srcs_version = "PY2AND3",
-    tags = [
-        "manual",
-        "notap",
-    ],
-    deps = [
+    additional_deps = [
         ":init_py",
         "//tensorflow/python:client_testlib",
         "//tensorflow/python:framework_test_lib",
     ],
+    main = "test/tf_trt_integration_test.py",
+    tags = [
+        "no_windows",
+        "nomac",
+    ],
 )
 
 cc_library(
     name = "utils",
+    srcs = ["convert/utils.cc"],
     hdrs = ["convert/utils.h"],
+    copts = tf_copts(),
 )
diff --git a/tensorflow/contrib/tensorrt/convert/convert_graph.cc b/tensorflow/contrib/tensorrt/convert/convert_graph.cc
index 5bfc7f9..089b03d 100644
--- a/tensorflow/contrib/tensorrt/convert/convert_graph.cc
+++ b/tensorflow/contrib/tensorrt/convert/convert_graph.cc
@@ -86,27 +86,48 @@
   // TODO(jie): Segmentation shouldn't associated with op name.
   //            Split it into a registration for each kernel.
   static const std::set<string> candidate_ops = {
-      "Identity",
-      "Snapshot",
-      "Const",
-      "Conv2D",
-      "MaxPool",
-      "BiasAdd",
-      "Relu",
-      "Add",
-      "Mul",
-      "Sub",
-      "Rsqrt",
-      "Pad",
-      "Mean",
-      "AvgPool",
-      "ConcatV2",
-      "DepthwiseConv2dNative",
-      "FusedBatchNorm",
-      "FusedBatchNormV2",
-      // TODO(ben,jie): ...
+    "Identity",
+    "Snapshot",
+    "Const",
+    "Conv2D",
+    "MaxPool",
+    "BiasAdd",
+    "Relu",
+    "Add",
+    "Mul",
+    "Sub",
+    "Rsqrt",
+    "Pad",
+    "Mean",
+    "AvgPool",
+    "ConcatV2",
+    "DepthwiseConv2dNative",
+    "FusedBatchNorm",
+    "FusedBatchNormV2",
+    "Div",
+    "RealDiv",
+    "Rsqrt",
+    "Reciprocal",
+    "Exp",
+    "Log",
+    "Sqrt",
+    "Abs",
+    "Neg",
+#if NV_TENSORRT_MAJOR > 3
+    "MatMul",
+    "BatchMatMul",
+    "Softmax",
+    "Minimum",
+    "Maximum",
+    "TopKV2",
+    "Sum",
+    "Prod",
+    "Max",
+    "Min",
+#endif
+    // TODO(ben,jie): ...
   };
-  // LINT.ThenChange(//tensorflow/contrib/tensorrt/convert/convert_nodes.h)
+  // LINT.ThenChange(//tensorflow/contrib/tensorrt/convert/convert_nodes.cc)
   return (candidate_ops.count(node->type_string()) ||
           PluginFactoryTensorRT::GetInstance()->IsPlugin(node->type_string()));
 }
@@ -248,6 +269,7 @@
     const std::vector<tensorflow::Node*>& reverse_topo_order,
     EngineInfo* info) {
   std::vector<int> subgraph_node_ids;
+  std::set<int> added_const_node_ids;  // Used to prevent double insertion.
   std::set<string> segment_devices;
   int input_port = 0;
   int output_port = 0;
@@ -257,6 +279,7 @@
   // edge, thus there must not be any duplicates since source nodes of
   // input/output edges must be in different split of the graph.
   // TODO(aaroey): consider using node id and port instead.
+  // TODO(aaroey): using topo order instead of reverting reverse topo order.
   std::unordered_map<string, int> created_edges;
   for (auto it = reverse_topo_order.rbegin(); it != reverse_topo_order.rend();
        ++it) {
@@ -275,8 +298,7 @@
                 << " neither have requested device nor assigned device";
       }
     }
-    int node_id = node->id();
-    subgraph_node_ids.push_back(node_id);
+    const int node_id = node->id();
     for (const auto edge : node->in_edges()) {
       auto input_node = edge->src();
       if (segment_nodes.count(input_node->name()) == 0) {
@@ -286,7 +308,10 @@
         // won't be removed from the graph. If it doesn't have any edges, TF
         // will prune it out.
         if (input_node->type_string() == "Const") {
-          subgraph_node_ids.push_back(input_node->id());
+          if (added_const_node_ids.count(input_node->id()) == 0) {
+            added_const_node_ids.insert(input_node->id());
+            subgraph_node_ids.push_back(input_node->id());
+          }
         } else if (!edge->IsControlEdge() && !input_node->IsSource()) {
           string s(input_node->name());
           StrAppend(&s, ":", edge->src_output());
@@ -304,6 +329,9 @@
         }
       }
     }
+    // We need to add possible const input nodes before adding this node in
+    // order to keep the topological order.
+    subgraph_node_ids.push_back(node_id);
     for (const auto edge : node->out_edges()) {
       auto output_node = edge->dst();
       if (segment_nodes.count(output_node->name()) == 0 &&
@@ -596,7 +624,9 @@
         edge->src()->output_type(edge->src_output()));
     VLOG(1) << " input " << nout.node << ":" << nout.index
             << " dtype=" << tensorflow::DataTypeString(nout.data_type);
-    node_builder.Input({nout});
+    // nvcc complains that Input(<brace-enclosed initializer list>) is
+    // ambiguous, so do not use Input({nout}).
+    node_builder.Input(nout);
     TF_RETURN_IF_ERROR(node_builder.Attr("T", node->output_type(0))
                            .Attr("index", i)
                            .Finalize(&nd));
@@ -801,7 +831,9 @@
     // The allocator is used to build the engine. The build and the built engine
     // will be destroyed after we get the serialized engine string, so it's fine
     // to use unique_ptr here.
-    std::unique_ptr<nvinfer1::IGpuAllocator> alloc;
+    // TODO(aaroey): nvinfer1::IGpuAllocator doesn't have a virtual destructor
+    // and destructing the unique_ptr will result in segfault, fix it.
+    std::unique_ptr<TRTDeviceAllocator> alloc;
     auto device_alloc = GetDeviceAndAllocator(params, engine);
     int cuda_device_id = 0;
     if (device_alloc.first >= 0) {
diff --git a/tensorflow/contrib/tensorrt/convert/convert_nodes.cc b/tensorflow/contrib/tensorrt/convert/convert_nodes.cc
index 146b9c7..65fef27 100644
--- a/tensorflow/contrib/tensorrt/convert/convert_nodes.cc
+++ b/tensorflow/contrib/tensorrt/convert/convert_nodes.cc
@@ -49,9 +49,29 @@
 #if GOOGLE_TENSORRT
 #include "tensorrt/include/NvInfer.h"
 
-//  Check if the types are equal. Cast to int first so that failure log message
-//  would work!
-#define CHECK_EQ_TYPE(val1, val2) CHECK_EQ((int)val1, (int)val2)
+// Check if the types are equal. Cast to int first so that failure log message
+// would work!
+#define TFTRT_CHECK_EQ_TYPE(val1, val2) CHECK_EQ((int)val1, (int)val2)
+
+#define TFTRT_INTERNAL_ERROR_AT_NODE(node)                               \
+  do {                                                                   \
+    return tensorflow::errors::Internal(                                 \
+        "TFTRT::", __FUNCTION__, "failed to add TRT layer, at: ", node); \
+  } while (0)
+
+#define TFTRT_RETURN_ERROR_IF_FALSE(status, node) \
+  do {                                            \
+    if (status == false) {                        \
+      TFTRT_INTERNAL_ERROR_AT_NODE(node);         \
+    }                                             \
+  } while (0)
+
+#define TFTRT_RETURN_ERROR_IF_NULLPTR(ptr, node) \
+  do {                                           \
+    if (ptr == nullptr) {                        \
+      TFTRT_INTERNAL_ERROR_AT_NODE(node);        \
+    }                                            \
+  } while (0)
 
 namespace tensorflow {
 namespace tensorrt {
@@ -75,13 +95,110 @@
     case tensorflow::DataType::DT_HALF:
       *trt_dtype = nvinfer1::DataType::kHALF;
       break;
+#if NV_TENSORRT_MAJOR > 3
+    case tensorflow::DataType::DT_INT32:
+      *trt_dtype = nvinfer1::DataType::kINT32;
+      break;
+#endif
     default:
       return tensorflow::errors::InvalidArgument(
-          "Unsupported data type " + tensorflow::DataTypeString(tf_dtype));
+          "Unsupported data type ", tensorflow::DataTypeString(tf_dtype));
   }
   return tensorflow::Status::OK();
 }
 
+// Return whether or not the broadcast is feasible;
+bool TensorRTGetBroadcastShape(const nvinfer1::Dims& operand_l,
+                               const bool operand_l_is_tensor,
+                               const nvinfer1::Dims& operand_r,
+                               const bool operand_r_is_tensor,
+                               nvinfer1::Dims* operand_l_new_shape,
+                               nvinfer1::Dims* operand_r_new_shape) {
+  // ***************************************************************************
+  // TensorRT Elementwise op supports broadcast but requires both tensor to be
+  // of Identical rank
+  //
+  // We consider case of:
+  //   1. operand_l to be a Tensor & operand_r to be a Const;
+  //   2. operand_l to be a Tensor & operand_r to be a Tensor;
+  // note: const op const (constant folding) should fallback to TensorFlow
+  //
+  // broadcast scheme:
+  //       T:  1 3 5    (tensor would not have batch dimension)
+  //       W:  1 1 3 1  (weight would have all explicit dimensions)
+  // i. fill in explicit dimensions
+  //    -> T: -1 1 3 5  (we put a -1 for batch dimension)
+  //    -> W:  1 1 3 1
+  // ii. compare broadcast feasibility
+  //
+  // We cannot support the following since TensorRT does not allow manipulation
+  // on batch dimension, we cannot generate output with proper shape
+  //    T: 3 5 1
+  //    W: 1 1 1  1 3 5 1
+  // -> T: 1 1 1 -1 3 5 1
+  // -> W: 1 1 1  1 3 5 1
+  // ***************************************************************************
+  const int max_nb_dims = nvinfer1::Dims::MAX_DIMS + 1;
+  const size_t element_size = sizeof(operand_l.d[0]);
+
+  // fill in dimensions
+  int l_s[max_nb_dims];
+  std::fill(l_s, l_s + max_nb_dims, 1);
+  int l_d = operand_l_is_tensor ? operand_l.nbDims + 1 : operand_l.nbDims;
+  int r_s[max_nb_dims];
+  std::fill(r_s, r_s + max_nb_dims, 1);
+  int r_d = operand_r_is_tensor ? operand_r.nbDims + 1 : operand_r.nbDims;
+
+  int max_d = std::max(l_d, r_d);
+  std::memcpy(l_s + max_d - operand_l.nbDims, operand_l.d,
+              operand_l.nbDims * element_size);
+  std::memcpy(r_s + max_d - operand_r.nbDims, operand_r.d,
+              operand_r.nbDims * element_size);
+
+  // set -1 for batch dimension, since batch size is not supposed to be
+  // broadcasted
+  if (operand_l_is_tensor) {
+    if (max_d != l_d) {  // if broadcast beyond batch dimension, fail
+      return false;
+    }
+    l_s[0] = -1;
+  }
+  if (operand_r_is_tensor) {
+    if (max_d != r_d) {  // if broadcast beyond batch dimension, fail
+      return false;
+    }
+    r_s[0] = -1;
+  }
+
+  // compare broadcast feasibility
+  for (int i = max_d - 1; i >= 0; i--) {
+    if ((l_s[i] != r_s[i]) && (l_s[i] != 1) && (r_s[i] != 1)) {
+      return false;
+    }
+  }
+
+  // output new TensorRT Dimension (stripping the batch dimension)
+  operand_l_new_shape->nbDims = max_d - 1;
+  std::memcpy(operand_l_new_shape->d, l_s + 1, (max_d - 1) * element_size);
+  operand_r_new_shape->nbDims = max_d - 1;
+  std::memcpy(operand_r_new_shape->d, r_s + 1, (max_d - 1) * element_size);
+
+  return true;
+}
+
+inline bool DimsEqual(const nvinfer1::Dims& dim_l,
+                      const nvinfer1::Dims& dim_r) {
+  if (dim_l.nbDims != dim_r.nbDims) {
+    return false;
+  }
+  for (int i = 0; i < dim_l.nbDims; i++) {
+    if (dim_l.d[i] != dim_r.d[i]) {
+      return false;
+    }
+  }
+  return true;
+}
+
 inline nvinfer1::Dims GetTensorShape(const tensorflow::Tensor& tensor) {
   nvinfer1::Dims dims;
   dims.nbDims = tensor.dims();
@@ -91,7 +208,7 @@
   return dims;
 }
 
-inline int64_t GetShapeSize(nvinfer1::Dims shape) {
+inline int64_t GetShapeSize(const nvinfer1::Dims& shape) {
   // Returns total number of elements in shape
   int64_t count = 1;
   for (int d = 0; d < shape.nbDims; ++d) {
@@ -104,7 +221,7 @@
     const nvinfer1::DimsHW& stride, const nvinfer1::DimsHW& kernel,
     const std::vector<int64_t>& input_dims) {
   std::vector<std::pair<int, int>> padding(input_dims.size());
-  CHECK_EQ((size_t)stride.nbDims, input_dims.size());  // TODO(jie): N+C? NC+?
+  CHECK_EQ(stride.nbDims, input_dims.size());  // TODO(jie): N+C? NC+?
 
   for (size_t i = 0; i < input_dims.size(); ++i) {
     // Formula to calculate the padding
@@ -134,6 +251,7 @@
   return op_name_a.substr(0, last_scope_separator);
 }
 
+// Class to convert TF weight to TRT weight.
 class TRT_ShapedWeights {
  public:
   TRT_ShapedWeights(tensorflow::DataType type, const void* values,
@@ -145,12 +263,14 @@
   explicit TRT_ShapedWeights(tensorflow::DataType type)
       : shape_(), type_(type), values_(nullptr), empty_weight_flag_(true) {}
 
+  // TODO(aaroey): use rvalue reference.
   TRT_ShapedWeights(const TRT_ShapedWeights& rhs)
       : shape_(rhs.shape_),
         type_(rhs.type_),
         values_(rhs.values_),
         empty_weight_flag_(rhs.empty_weight_flag_) {}
 
+  // TODO(aaroey): use GetShapeSize() instead.
   int64_t count() const {
     int64_t c = 1;
     for (int i = 0; i < shape_.nbDims; i++) c *= shape_.d[i];
@@ -168,6 +288,7 @@
 
   const void* GetValues() const { return values_; }
 
+  // TODO(aaroey): get rid of this method.
   void SetValues(const void* values) { values_ = values; }
 
   size_t size_bytes() const {
@@ -178,10 +299,12 @@
   // Default converter
   operator nvinfer1::Weights() const { return GetWeightsForTRT(); }
 
+  // TODO(aaroey): make these private.
   nvinfer1::Dims shape_;
   tensorflow::DataType type_;
 
  private:
+  // TODO(aaroey): this should not be const as it's always from TRTWeightStore.
   const void* values_;
   bool empty_weight_flag_;
 };
@@ -192,6 +315,7 @@
       : tensor_(tensor), weights_(DT_FLOAT), variant_(TRT_NODE_TENSOR) {}
   explicit TRT_TensorOrWeights(const TRT_ShapedWeights& weights)
       : tensor_(nullptr), weights_(weights), variant_(TRT_NODE_WEIGHTS) {}
+  // TODO(aaroey): use rvalue reference.
   TRT_TensorOrWeights(const TRT_TensorOrWeights& rhs)
       : tensor_(rhs.tensor_), weights_(rhs.weights_), variant_(rhs.variant_) {}
   ~TRT_TensorOrWeights() {}
@@ -200,19 +324,19 @@
   bool is_weights() const { return variant_ == TRT_NODE_WEIGHTS; }
 
   nvinfer1::ITensor* tensor() {
-    CHECK_EQ(is_tensor(), true);
+    CHECK(is_tensor());
     return tensor_;
   }
   const nvinfer1::ITensor* tensor() const {
-    CHECK_EQ(is_tensor(), true);
+    CHECK(is_tensor());
     return tensor_;
   }
   TRT_ShapedWeights& weights() {
-    CHECK_EQ(is_weights(), true);
+    CHECK(is_weights());
     return weights_;
   }
   const TRT_ShapedWeights& weights() const {
-    CHECK_EQ(is_weights(), true);
+    CHECK(is_weights());
     return weights_;
   }
   nvinfer1::Dims shape() const {
@@ -236,21 +360,25 @@
       attrs_.insert({attr.first, &attr.second});
     }
   }
-  bool count(string key) const { return attrs_.count(key); }
-  tensorflow::AttrValue const* at(string key) const {
+
+  bool count(const string& key) const { return attrs_.count(key); }
+
+  tensorflow::AttrValue const* at(const string& key) const {
     if (!attrs_.count(key)) {
       LOG(FATAL) << "Attribute not found: " << key;
     }
     return attrs_.at(key);
   }
+
   template <typename T>
   T get(const string& key) const;
+
   template <typename T>
   T get(const string& key, const T& default_value) const {
     return attrs_.count(key) ? this->get<T>(key) : default_value;
   }
 
-  std::vector<string> GetAllAttrKey() {
+  std::vector<string> GetAllAttrKeys() const {
     std::vector<string> attr_list;
     for (const auto& attr_item : attrs_) {
       attr_list.emplace_back(attr_item.first);
@@ -285,15 +413,6 @@
   auto attr = this->at(key)->list().s();
   return std::vector<string>(attr.begin(), attr.end());
 }
-template <>
-nvinfer1::Dims TFAttrs::get<nvinfer1::Dims>(const string& key) const {
-  auto values = this->get<std::vector<int>>(key);
-  nvinfer1::Dims dims;
-  dims.nbDims = values.size();
-  std::copy(values.begin(), values.end(), dims.d);
-  // Note: No dimension type information is included
-  return dims;
-}
 
 template <>
 nvinfer1::DataType TFAttrs::get<nvinfer1::DataType>(const string& key) const {
@@ -319,10 +438,11 @@
 }
 
 // TODO(jie): reorder4 & reorder2 should be merged?
+// TODO(aaroey): fix the order of parameters.
 template <typename T>
-void Reorder4(nvinfer1::DimsNCHW shape, const T* idata,
-              nvinfer1::DimsNCHW istrides, T* odata,
-              nvinfer1::DimsNCHW ostrides) {
+void Reorder4(const nvinfer1::DimsNCHW& shape, const T* idata,
+              const nvinfer1::DimsNCHW& istrides, T* odata,
+              const nvinfer1::DimsNCHW& ostrides) {
   for (int n = 0; n < shape.n(); ++n) {
     for (int c = 0; c < shape.c(); ++c) {
       for (int h = 0; h < shape.h(); ++h) {
@@ -337,12 +457,13 @@
 }
 
 template <typename T>
-void Reorder2(nvinfer1::DimsHW shape, const T* idata, nvinfer1::DimsHW istrides,
-              T* odata, nvinfer1::DimsHW ostrides) {
+void Reorder2(const nvinfer1::DimsHW& shape, const T* idata,
+              const nvinfer1::DimsHW& istrides, T* odata,
+              const nvinfer1::DimsHW& ostrides) {
   for (int h = 0; h < shape.h(); ++h) {
     for (int w = 0; w < shape.w(); ++w) {
       odata[h * ostrides.h() + w * ostrides.w()] =
-          idata[h * ostrides.h() + w * ostrides.w()];
+          idata[h * istrides.h() + w * istrides.w()];
     }
   }
 }
@@ -350,16 +471,17 @@
 // TODO(jie): fallback to tensorflow!!
 void ReorderCKtoKC(const TRT_ShapedWeights& iweights,
                    TRT_ShapedWeights* oweights) {
-  int c = iweights.shape_.d[0];
-  int k = iweights.shape_.d[1];
+  const int c = iweights.shape_.d[0];
+  const int k = iweights.shape_.d[1];
   oweights->shape_.d[0] = k;
   oweights->shape_.d[1] = c;
-  nvinfer1::DimsHW istrides = {1, k};
-  nvinfer1::DimsHW ostrides = {c, 1};
+  const nvinfer1::DimsHW istrides = {1, k};
+  const nvinfer1::DimsHW ostrides = {c, 1};
   switch (iweights.type_) {
     case tensorflow::DataType::DT_FLOAT: {
       Reorder2({k, c}, static_cast<float const*>(iweights.GetValues()),
                istrides,
+               // TODO(aaroey): get rid of all the const_cast like this.
                static_cast<float*>(const_cast<void*>(oweights->GetValues())),
                ostrides);
       break;
@@ -382,21 +504,24 @@
                        TRT_ShapedWeights* oweights, int num_groups) {
   CHECK_EQ(iweights.type_, oweights->type_);
   CHECK_EQ(iweights.size_bytes(), oweights->size_bytes());
-  int r = iweights.shape_.d[0];
-  int s = iweights.shape_.d[1];
-  // TRT requires GKcRS, while TF depthwise has RSCK
-  //   where c=1, C=G
+  // K indexes over output channels, C over input channels, and R and S over the
+  // height and width of the convolution
+  const int r = iweights.shape_.d[0];
+  const int s = iweights.shape_.d[1];
+  // TRT requires GKcRS, while TF depthwise has RSCK where c=1, C=G
   VLOG(2) << "num_groups: " << num_groups;
-  int c = iweights.shape_.d[2] / num_groups;
+  const int c = iweights.shape_.d[2] / num_groups;
   VLOG(2) << "c" << iweights.shape_.d[2] << " then " << c;
-  int k = iweights.shape_.d[3] * num_groups;
+  const int k = iweights.shape_.d[3] * num_groups;
   VLOG(2) << "k" << iweights.shape_.d[3] << " then " << k;
+  VLOG(2) << "r" << iweights.shape_.d[0] << " then " << r;
+  VLOG(2) << "s" << iweights.shape_.d[1] << " then " << s;
   oweights->shape_.d[0] = k / num_groups;
   oweights->shape_.d[1] = c * num_groups;
   oweights->shape_.d[2] = r;
   oweights->shape_.d[3] = s;
-  nvinfer1::DimsNCHW istrides = {1, k, s * k * c, c * k};
-  nvinfer1::DimsNCHW ostrides = {c * r * s, r * s, s, 1};
+  const nvinfer1::DimsNCHW istrides = {1, k, s * k * c, c * k};
+  const nvinfer1::DimsNCHW ostrides = {c * r * s, r * s, s, 1};
   switch (iweights.type_) {
     case tensorflow::DataType::DT_FLOAT: {
       Reorder4({k, c, r, s}, static_cast<float const*>(iweights.GetValues()),
@@ -428,11 +553,14 @@
                                      std::vector<TRT_TensorOrWeights>*)>;
 
 class Converter {
+  // TODO(aaroey): fix the order of members.
   std::unordered_map<string, TRT_TensorOrWeights> trt_tensors_;
   std::unordered_map<string, OpConverter> op_registry_;
   OpConverter plugin_converter_;
   nvinfer1::INetworkDefinition* trt_network_;
   std::list<std::vector<uint8_t>> temp_bufs_;
+  // TODO(aaroey): inline the definition of TRTWeightStore here, and add APIs to
+  // operate the stored weights instead of operating it directly.
   TRTWeightStore* weight_store_;
   bool fp16_;
   void register_op_converters();
@@ -440,7 +568,7 @@
                                 std::vector<TRT_TensorOrWeights>* inputs) {
     for (auto const& input_name : node_def.input()) {
       /*************************************************************************
-       * TODO(jie) handle case 1) here
+       * TODO(jie): handle case 1) here.
        * Normalizes the inputs and extracts associated metadata:
        * 1) Inputs can contain a colon followed by a suffix of characters.
        *    That suffix may be a single number (e.g. inputName:1) or several
@@ -454,6 +582,7 @@
       if (input_name[0] == '^') continue;
       string name = input_name;
       auto first = name.find_first_of(':');
+      // TODO(aaroey): why removing the colon but not the zero? A bug?
       if (first != string::npos && first + 2 == name.size() &&
           name[first + 1] == '0')
         name.erase(first);
@@ -462,12 +591,13 @@
       if (trt_tensors_.count(name)) {
         inputs->push_back(trt_tensors_.at(name));
       } else {
-        string str("Node ");
-        StrAppend(&str, node_def.name(), " should have an input named '", name,
+        // TODO(aaroey): this should not happen, make it a CHECK.
+        // TODO(aaroey): use StrCat for pattern like this.
+        string msg("Node ");
+        StrAppend(&msg, node_def.name(), " should have an input named '", name,
                   "' but it is not available");
-        LOG(WARNING) << "input: " << name << " not available for node at "
-                     << node_def.name();
-        return tensorflow::errors::InvalidArgument(str);
+        LOG(ERROR) << msg;
+        return tensorflow::errors::InvalidArgument(msg);
       }
     }
     return tensorflow::Status::OK();
@@ -488,6 +618,7 @@
     weights.SetValues(weight_store_->store_.back().data());
     return weights;
   }
+  // TODO(aaroey): fix all the namings.
   bool isFP16() { return fp16_; }
   TRT_ShapedWeights get_temp_weights_like(const TRT_ShapedWeights& weights) {
     return this->get_temp_weights(weights.type_, weights.shape_);
@@ -496,9 +627,10 @@
   tensorflow::Status convert_node(const tensorflow::NodeDef& node_def) {
     std::vector<TRT_TensorOrWeights> inputs;
     TF_RETURN_IF_ERROR(this->get_inputs(node_def, &inputs));
-    string op = node_def.op();
+    const string& op = node_def.op();
     std::vector<TRT_TensorOrWeights> outputs;
     if (PluginFactoryTensorRT::GetInstance()->IsPlugin(op)) {
+      // TODO(aaroey): plugin_converter_ is not set, fix it.
       TF_RETURN_IF_ERROR(plugin_converter_(*this, node_def, inputs, &outputs));
     } else {
       if (!op_registry_.count(op)) {
@@ -509,7 +641,7 @@
       TF_RETURN_IF_ERROR(op_converter(*this, node_def, inputs, &outputs));
     }
     for (size_t i = 0; i < outputs.size(); ++i) {
-      TRT_TensorOrWeights output = outputs.at(i);
+      TRT_TensorOrWeights& output = outputs[i];
       // TODO(jie): tf protobuf seems to be omitting the :0 suffix
       string output_name = node_def.name();
       if (i != 0) output_name = StrCat(output_name, ":", i);
@@ -527,26 +659,29 @@
 
   nvinfer1::INetworkDefinition* network() { return trt_network_; }
 
-  TRT_TensorOrWeights get_tensor(string name) {
+  TRT_TensorOrWeights get_tensor(const string& name) {
     if (!trt_tensors_.count(name)) {
       return TRT_TensorOrWeights(nullptr);
     }
     return trt_tensors_.at(name);
   }
 
-  bool insert_input_tensor(string name, nvinfer1::ITensor* tensor) {
+  bool insert_input_tensor(const string& name, nvinfer1::ITensor* tensor) {
     return trt_tensors_.insert({name, TRT_TensorOrWeights(tensor)}).second;
   }
 
   nvinfer1::ITensor* TransposeTensor(nvinfer1::ITensor* input_tensor,
-                                     std::vector<int> order) {
-    auto dims = input_tensor->getDimensions();
+                                     const std::vector<int>& order) {
+    const auto dims = input_tensor->getDimensions();
 
     // TODO(jie): change the return to status and properly exit
     if (order.size() - 1 != size_t(dims.nbDims))
       LOG(ERROR) << "Dimension does not match, fail gracefully";
 
     nvinfer1::IShuffleLayer* layer = this->network()->addShuffle(*input_tensor);
+    if (layer == nullptr) {
+      return nullptr;
+    }
     nvinfer1::Permutation permutation;
     for (int32_t i = 0; i < dims.nbDims; ++i) {
       permutation.order[i] = order[i + 1] - 1;
@@ -577,13 +712,14 @@
   }
   return weights;
 }
+
 // ****************************************************************************
 // Constant folding functions
 // TODO(jie): once optimizer kicks in, we should have done constant folding
 // there.
-//*****************************************************************************/
+// *****************************************************************************
 struct LambdaFactory {
-  enum class OP_CATEGORY : int { RSQRT = 0, NEG, ADD, MUL, SUB };
+  enum class OP_CATEGORY : int { RSQRT = 0, NEG, ADD, MUL, SUB, RECIP };
   OP_CATEGORY op;
 
   template <typename T>
@@ -595,6 +731,8 @@
       }
       case OP_CATEGORY::NEG:
         return [](T t) -> T { return -t; };
+      case OP_CATEGORY::RECIP:
+        return [](T t) -> T { return 1.0 / t; };
       default:
         VLOG(2) << "Not supported op for unary: " << static_cast<int>(op);
         return nullptr;
@@ -628,7 +766,6 @@
           VLOG(2) << "LAMBDA VAL : " << val;
           return l + val;
         };
-      // Return [val](T l)-> T {return l+val;};
       case OP_CATEGORY::SUB:
         return [val](T l) -> T {
           VLOG(2) << "LAMBDA VAL : " << val;
@@ -688,11 +825,13 @@
     }
     case OP_CATEGORY::NEG:
       return [](Eigen::half t) -> Eigen::half { return -t; };
+    // TODO(aaroey): can we support RECIP?
     default:
       VLOG(2) << "Not supported op for unary: " << static_cast<int>(op);
       return nullptr;
   }
 }
+
 tensorflow::Status UnaryCompute(const TRT_ShapedWeights& iweights,
                                 TRT_ShapedWeights* oweights,
                                 LambdaFactory unary_op) {
@@ -738,6 +877,7 @@
       if (iweights_l.count() != iweights_r.count()) {
         // We only supports broadcast of RankZero
         if (iweights_l.count() == 1) {
+          // TODO(aaroey): Remove loggings like this.
           VLOG(2) << "I bet it is not working!" << (*inp_l);
           std::transform(inp_r, inp_r + iweights_r.count(), oup,
                          binary_op.broadcast_l<float>(*inp_l));
@@ -790,117 +930,21 @@
   return tensorflow::Status::OK();
 }
 
-tensorflow::Status ConstantFoldUnary(
-    Converter& ctx, const tensorflow::NodeDef& node_def,
-    const std::vector<TRT_TensorOrWeights>& inputs,
-    std::vector<TRT_TensorOrWeights>* outputs) {
-  TRT_ShapedWeights weights_input = inputs.at(0).weights();
-
-  // Allocate output weights
-  TRT_ShapedWeights weights_output = ctx.get_temp_weights_like(weights_input);
-
-  // FIXME assume type matches input weights
-  // Get trt type & shape
-  // Maybe this part has to be moved into the block of rsqrt later
-  // Check type consistency
-  CHECK_EQ(weights_input.type_,
-           TFAttrs(node_def).get<tensorflow::DataType>("T"));
-
-  LambdaFactory unary_op;
-  if (node_def.op() == "Rsqrt") {
-    // Compute rsqrt
-    unary_op.op = LambdaFactory::OP_CATEGORY::RSQRT;
-    auto ret = UnaryCompute(weights_input, &weights_output, unary_op);
-    // Pass the output
-    if (ret == tensorflow::Status::OK()) {
-      outputs->push_back(TRT_TensorOrWeights(weights_output));
-    }
-    return ret;
-  } else {
-    return tensorflow::errors::Unimplemented("Binary op not supported: " +
-                                             node_def.op());
-  }
-}
-
-// TODO(jie,ben) broadcast is needed yet not implemented
-// Let's get the simple stuff working first. Maybe we should fall back to TF
-//   approach for constant folding
-tensorflow::Status ConstantFoldBinary(
-    Converter& ctx, const tensorflow::NodeDef& node_def,
-    const std::vector<TRT_TensorOrWeights>& inputs,
-    std::vector<TRT_TensorOrWeights>* outputs) {
-  TRT_ShapedWeights weights_input_l = inputs.at(0).weights();
-  TRT_ShapedWeights weights_input_r = inputs.at(1).weights();
-
-  // Check type consistency
-  CHECK_EQ(weights_input_l.type_, weights_input_r.type_);
-
-  if (weights_input_l.shape_.nbDims != weights_input_r.shape_.nbDims)
-    return tensorflow::errors::Unimplemented(
-        "Binary op implicit broadcast not supported: " + node_def.op());
-
-  // TODO(jie): constant fold should really fall back to TF.
-  int num_dims = weights_input_l.shape_.nbDims;
-  nvinfer1::Dims output_shape;
-  output_shape.nbDims = num_dims;
-  VLOG(2) << "nb_dims: " << num_dims
-          << ", the other: " << weights_input_r.shape_.nbDims;
-  for (int i = 0; i < num_dims; i++) {
-    if (weights_input_l.shape_.d[i] == weights_input_r.shape_.d[i]) {
-      output_shape.d[i] = weights_input_l.shape_.d[i];
-    } else if (weights_input_l.shape_.d[i] == 1 ||
-               weights_input_r.shape_.d[i] == 1) {
-      output_shape.d[i] =
-          std::max(weights_input_l.shape_.d[i], weights_input_r.shape_.d[i]);
-    } else {
-      return tensorflow::errors::Unimplemented(
-          "Binary op with incompatible shape at, " + node_def.op());
-    }
-    VLOG(2) << "left: " << weights_input_l.shape_.d[i]
-            << "right: " << weights_input_r.shape_.d[i]
-            << "output: " << output_shape.d[i];
-  }
-
-  // FIXME assume type matches input weights
-  // Get trt type & shape
-  TFAttrs attrs(node_def);
-  // Maybe this part has to be moved into the block of rsqrt later
-  tensorflow::DataType dtype = attrs.get<tensorflow::DataType>("T");
-
-  // Allocate output weights
-  TRT_ShapedWeights weights_output = ctx.get_temp_weights(dtype, output_shape);
-
-  LambdaFactory binary_op;
-  if (node_def.op() == "Sub") {
-    binary_op.op = LambdaFactory::OP_CATEGORY::SUB;
-  } else if (node_def.op() == "Mul") {
-    binary_op.op = LambdaFactory::OP_CATEGORY::MUL;
-  } else if (node_def.op() == "Add") {
-    binary_op.op = LambdaFactory::OP_CATEGORY::ADD;
-  } else {
-    return tensorflow::errors::Unimplemented("Binary op not supported: " +
-                                             node_def.op());
-  }
-  auto ret = BinaryCompute(weights_input_l, weights_input_r, &weights_output,
-                           binary_op);
-
-  // Pass the output
-  if (ret == tensorflow::Status::OK()) {
-    outputs->push_back(TRT_TensorOrWeights(weights_output));
-  }
-
-  return ret;
-}
-
 // TODO(jie): broadcast is needed yet not implemented.
 // Only implemented channel wise for the time being
 tensorflow::Status BinaryTensorOpWeight(
     Converter& ctx, const tensorflow::NodeDef& node_def,
     const nvinfer1::ITensor* tensor, TRT_ShapedWeights weights,
-    std::vector<TRT_TensorOrWeights>* outputs) {
-  // FIXME assume type matches input weights
-  // Get trt type & shape
-  // Maybe this part has to be moved into the block of rsqrt later
+    bool swapped_inputs, std::vector<TRT_TensorOrWeights>* outputs) {
+  // tensor is the left operand while weights is the right operand;
+  // when swapped_inputs set to true, those two are swapped.
+  // TODO(aaroey): use a set.
+  if (node_def.op() != "Sub" && node_def.op() != "Add" &&
+      node_def.op() != "Mul" && node_def.op() != "Div" &&
+      node_def.op() != "RealDiv") {
+    return tensorflow::errors::Unimplemented(
+        "op not supported: " + node_def.op() + ", at: " + node_def.name());
+  }
 
   // Check type consistency
   nvinfer1::DataType ttype;
@@ -910,6 +954,12 @@
   auto dims_w = weights.shape_;
   auto dims_t = tensor->getDimensions();
 
+  // TODO(jie): addScale checks for input tensor dimension
+  if (dims_t.nbDims != 3) {
+    return tensorflow::errors::InvalidArgument(
+        "addScale requires tensor with rank 3, " + node_def.name());
+  }
+
   // default to element-wise
   auto scale_mode = nvinfer1::ScaleMode::kELEMENTWISE;
 
@@ -980,6 +1030,7 @@
       permutation[dims_t.nbDims] = 1;
       tensor = ctx.TransposeTensor(const_cast<nvinfer1::ITensor*>(tensor),
                                    permutation);
+      TFTRT_RETURN_ERROR_IF_NULLPTR(tensor, node_def.name());
     } else {
       return tensorflow::errors::InvalidArgument(
           "Transpose cannot be applied, " + node_def.name());
@@ -997,11 +1048,35 @@
 
   // Maybe I should do a switch
   if (node_def.op() == "Sub") {
-    TRT_ShapedWeights neg_weights = ctx.get_temp_weights_like(weights);
-    LambdaFactory unary_op;
-    unary_op.op = LambdaFactory::OP_CATEGORY::NEG;
-    TF_RETURN_IF_ERROR(UnaryCompute(weights, &neg_weights, unary_op));
-    shift_weights = neg_weights;
+    if (swapped_inputs) {
+      shift_weights = weights;
+      nvinfer1::IUnaryLayer* layer =
+          ctx.network()->addUnary(*const_cast<nvinfer1::ITensor*>(tensor),
+                                  nvinfer1::UnaryOperation::kNEG);
+      TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
+      tensor = layer->getOutput(0);
+    } else {
+      TRT_ShapedWeights neg_weights = ctx.get_temp_weights_like(weights);
+      LambdaFactory unary_op;
+      unary_op.op = LambdaFactory::OP_CATEGORY::NEG;
+      TF_RETURN_IF_ERROR(UnaryCompute(weights, &neg_weights, unary_op));
+      shift_weights = neg_weights;
+    }
+  } else if (node_def.op() == "Div" || node_def.op() == "RealDiv") {
+    if (swapped_inputs) {
+      scale_weights = weights;
+      nvinfer1::IUnaryLayer* layer =
+          ctx.network()->addUnary(*const_cast<nvinfer1::ITensor*>(tensor),
+                                  nvinfer1::UnaryOperation::kRECIP);
+      TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
+      tensor = layer->getOutput(0);
+    } else {
+      TRT_ShapedWeights recip_weights = ctx.get_temp_weights_like(weights);
+      LambdaFactory unary_op;
+      unary_op.op = LambdaFactory::OP_CATEGORY::RECIP;
+      TF_RETURN_IF_ERROR(UnaryCompute(weights, &recip_weights, unary_op));
+      scale_weights = recip_weights;
+    }
   } else if (node_def.op() == "Mul") {
     scale_weights = weights;
   } else if (node_def.op() == "Add") {
@@ -1014,11 +1089,13 @@
   nvinfer1::IScaleLayer* layer = ctx.network()->addScale(
       *const_cast<nvinfer1::ITensor*>(tensor), scale_mode, shift_weights,
       scale_weights, power_weights);
+  TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
 
   nvinfer1::ITensor* output_tensor = layer->getOutput(0);
   // transpose back dimension
   if (permutation_flag) {
     output_tensor = ctx.TransposeTensor(output_tensor, permutation);
+    TFTRT_RETURN_ERROR_IF_NULLPTR(output_tensor, node_def.name());
   }
 
   // Pass the output
@@ -1042,20 +1119,31 @@
   if (data_format == "NHWC") {
     tensor = ctx.TransposeTensor(const_cast<nvinfer1::ITensor*>(tensor),
                                  {0, 3, 1, 2});
+    TFTRT_RETURN_ERROR_IF_NULLPTR(tensor, node_def.name());
     h_index = 1;
     w_index = 2;
     // TODO(jie): transpose it
   }
 
   // tensor after transpose (NCHW)
-  auto tensor_dim = tensor->getDimensions();
+  const auto tensor_dim = tensor->getDimensions();
 
   int num_groups = group;
-  if (num_groups == 0)  // depthwise convolution
-    num_groups = tensor_dim.d[0];
+  if (num_groups == 0) num_groups = tensor_dim.d[0];  // depthwise convolution
   VLOG(2) << "groups count: " << num_groups;
 
   TRT_ShapedWeights weights_rsck = inputs.at(1).weights();
+
+  VLOG(2) << "weight shape: " << weights_rsck.shape_.nbDims;
+  for (int i = 0; i < weights_rsck.shape_.nbDims; i++) {
+    VLOG(2) << weights_rsck.shape_.d[i];
+  }
+
+  if (weights_rsck.shape_.nbDims != 4) {
+    return tensorflow::errors::Internal(
+        "Conv2D expects kernel of dimension 4, at: " + node_def.name());
+  }
+
   if (ctx.isFP16()) {
     weights_rsck = ConvertFP32ToFP16(ctx, inputs.at(1).weights());
   }
@@ -1063,18 +1151,22 @@
   TRT_ShapedWeights weights = ctx.get_temp_weights_like(weights_rsck);
   ReorderRSCKToKCRS(weights_rsck, &weights, num_groups);
   TRT_ShapedWeights biases(weights.type_);
-  int noutput = weights.shape_.d[0] * num_groups;
+  const int noutput = weights.shape_.d[0] * num_groups;
   nvinfer1::DimsHW kernel_size;
   kernel_size.h() = weights.shape_.d[2];
   kernel_size.w() = weights.shape_.d[3];
+  VLOG(2) << "RSCK: ";
+  for (int i = 0; i < 4; i++) {
+    VLOG(2) << "     " << weights.shape_.d[i];
+  }
   VLOG(2) << "kernel size: " << kernel_size.h() << ", " << kernel_size.w();
 
   // TODO(jie): stride. (NHWC/NCHW)
-  auto tf_stride = attrs.get<std::vector<int>>("strides");
+  const auto tf_stride = attrs.get<std::vector<int>>("strides");
   VLOG(2) << "h_INDEX" << h_index << ", w_index " << w_index;
   VLOG(2) << "stride!!!: " << tf_stride[0] << tf_stride[1] << tf_stride[2]
           << tf_stride[3];
-  nvinfer1::DimsHW stride(tf_stride[h_index], tf_stride[w_index]);
+  const nvinfer1::DimsHW stride(tf_stride[h_index], tf_stride[w_index]);
 
   std::vector<std::pair<int, int>> padding;
   // TODO(jie): padding.
@@ -1102,6 +1194,7 @@
         *const_cast<nvinfer1::ITensor*>(tensor),
         nvinfer1::DimsHW(padding[0].first, padding[1].first),
         nvinfer1::DimsHW(padding[0].second, padding[1].second));
+    TFTRT_RETURN_ERROR_IF_NULLPTR(pad_layer, node_def.name());
     padding = {{0, 0}, {0, 0}};
     tensor = pad_layer->getOutput(0);
     auto dim_after = tensor->getDimensions();
@@ -1112,6 +1205,7 @@
   nvinfer1::IConvolutionLayer* layer =
       ctx.network()->addConvolution(*const_cast<nvinfer1::ITensor*>(tensor),
                                     noutput, kernel_size, weights, biases);
+  TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
 
   layer->setStride(stride);
   layer->setPadding({padding[0].first, padding[1].first});
@@ -1126,6 +1220,7 @@
   if (data_format == "NHWC") {
     // TODO(jie): transpose it back!
     output_tensor = ctx.TransposeTensor(output_tensor, {0, 2, 3, 1});
+    TFTRT_RETURN_ERROR_IF_NULLPTR(output_tensor, node_def.name());
   } else {
     VLOG(2) << "NCHW !!!!";
   }
@@ -1147,35 +1242,91 @@
                                            node_def.name());
 }
 
+// Helper function converts input into tensor with shape specified by dims.
+bool PrepareTensorForShape(Converter& ctx, const TRT_TensorOrWeights& input,
+                           const nvinfer1::Dims& dims,
+                           const nvinfer1::ITensor** tensor) {
+  if (input.is_tensor()) {
+    if (DimsEqual(input.shape(), dims)) {
+      *tensor = input.tensor();
+    } else {
+      nvinfer1::IShuffleLayer* layer = ctx.network()->addShuffle(
+          *const_cast<nvinfer1::ITensor*>(input.tensor()));
+      if (layer != nullptr) {
+        layer->setReshapeDimensions(dims);
+        *tensor = layer->getOutput(0);
+      } else {
+        return false;
+      }
+    }
+  } else {
+#if NV_TENSORRT_MAJOR > 3
+    nvinfer1::IConstantLayer* layer =
+        ctx.network()->addConstant(dims, input.weights());
+    if (layer != nullptr) {
+      *tensor = layer->getOutput(0);
+    } else {
+      return false;
+    }
+#else
+    return false;
+#endif
+  }
+  return true;
+}
+
 tensorflow::Status BinaryTensorOpTensor(
     Converter& ctx, const tensorflow::NodeDef& node_def,
-    const nvinfer1::ITensor* tensor_l, const nvinfer1::ITensor* tensor_r,
+    const TRT_TensorOrWeights& operand_l, const TRT_TensorOrWeights& operand_r,
     std::vector<TRT_TensorOrWeights>* outputs) {
   static const std::unordered_map<string, nvinfer1::ElementWiseOperation> ops{
       {"Add", nvinfer1::ElementWiseOperation::kSUM},
       {"Mul", nvinfer1::ElementWiseOperation::kPROD},
       {"Sub", nvinfer1::ElementWiseOperation::kSUB},
       {"Div", nvinfer1::ElementWiseOperation::kDIV},
+      {"RealDiv", nvinfer1::ElementWiseOperation::kDIV},
+      {"Minimum", nvinfer1::ElementWiseOperation::kMIN},
+      {"Maximum", nvinfer1::ElementWiseOperation::kMAX},
   };
 
-  // FIXME assume type matches input weights
+  const nvinfer1::ITensor* tensor_l;
+  const nvinfer1::ITensor* tensor_r;
+
+  nvinfer1::Dims dim_l;
+  nvinfer1::Dims dim_r;
+
+  if (!TensorRTGetBroadcastShape(operand_l.shape(), operand_l.is_tensor(),
+                                 operand_r.shape(), operand_r.is_tensor(),
+                                 &dim_l, &dim_r)) {
+    return tensorflow::errors::InvalidArgument(
+        "Binary op broadcast scheme not supported by TensorRT op: " +
+        node_def.op() + ", at: " + node_def.name());
+  }
+
+  TFTRT_RETURN_ERROR_IF_FALSE(
+      PrepareTensorForShape(ctx, operand_l, dim_l, &tensor_l), node_def.name());
+  TFTRT_RETURN_ERROR_IF_FALSE(
+      PrepareTensorForShape(ctx, operand_r, dim_r, &tensor_r), node_def.name());
+
   // get trt type & shape
   TFAttrs attrs(node_def);
   // maybe this part has to be moved into the block of rsqrt later
   nvinfer1::DataType dtype = attrs.get<nvinfer1::DataType>("T");
 
   // check type consistency
-  CHECK_EQ_TYPE(tensor_l->getType(), dtype);
-  CHECK_EQ_TYPE(tensor_r->getType(), dtype);
+  TFTRT_CHECK_EQ_TYPE(tensor_l->getType(), dtype);
+  TFTRT_CHECK_EQ_TYPE(tensor_r->getType(), dtype);
   auto op_pair = ops.find(node_def.op());
-  if (op_pair == ops.end())
+  if (op_pair == ops.end()) {
     return tensorflow::errors::Unimplemented(
-        "binary op: " + node_def.op() +
-        " not supported at: " + node_def.name());
+        "binary op: ", node_def.op(), " not supported at: ", node_def.name());
+  }
 
   nvinfer1::IElementWiseLayer* layer = ctx.network()->addElementWise(
+      // TODO(aaroey): will tensor_l/tensor_r get modified?
       *const_cast<nvinfer1::ITensor*>(tensor_l),
       *const_cast<nvinfer1::ITensor*>(tensor_r), op_pair->second);
+  TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
 
   nvinfer1::ITensor* output_tensor = layer->getOutput(0);
 
@@ -1202,7 +1353,7 @@
   // passing attributes
   // TODO(jie): support more general attribute
   TFAttrs attrs(node_def);
-  auto attr_key_vector = attrs.GetAllAttrKey();
+  auto attr_key_vector = attrs.GetAllAttrKeys();
   for (auto attr_key : attr_key_vector) {
     // TODO(jie): support only list of float for toy example here.
     auto data = attrs.get<std::vector<float>>(attr_key);
@@ -1223,29 +1374,6 @@
   return tensorflow::Status::OK();
 }
 
-tensorflow::Status ConvertPlaceholder(
-    Converter& ctx, const tensorflow::NodeDef& node_def,
-    const std::vector<TRT_TensorOrWeights>& inputs,
-    std::vector<TRT_TensorOrWeights>* outputs) {
-  VLOG(2) << "Placeholder should have been replace already";
-  return tensorflow::errors::Unimplemented("cannot convert Placeholder op");
-  // OK this make sense since we are supposed to replace it with input
-  TFAttrs attrs(node_def);
-  nvinfer1::DataType dtype = attrs.get<nvinfer1::DataType>("dtype");
-  nvinfer1::Dims dims = attrs.get<nvinfer1::Dims>("shape");
-
-  dims.nbDims--;
-  for (int i = 0; i < dims.nbDims; i++) dims.d[i] = dims.d[i + 1];
-
-  nvinfer1::ITensor* output =
-      ctx.network()->addInput(node_def.name().c_str(), dtype, dims);
-  if (!output) {
-    return tensorflow::errors::InvalidArgument("Failed to create Input layer");
-  }
-  outputs->push_back(TRT_TensorOrWeights(output));
-  return tensorflow::Status::OK();
-}
-
 tensorflow::Status ConvertConv2D(Converter& ctx,
                                  const tensorflow::NodeDef& node_def,
                                  const std::vector<TRT_TensorOrWeights>& inputs,
@@ -1271,65 +1399,64 @@
 
   int h_index = 2;
   int w_index = 3;
-  auto data_format = attrs.get<string>("data_format");
+  const auto data_format = attrs.get<string>("data_format");
   if (data_format == "NHWC") {
     h_index = 1;
     w_index = 2;
     tensor = ctx.TransposeTensor(const_cast<nvinfer1::ITensor*>(tensor),
                                  {0, 3, 1, 2});
-  } else {
-    VLOG(2) << "NCHW !!!!";
+    TFTRT_RETURN_ERROR_IF_NULLPTR(tensor, node_def.name());
   }
+
   nvinfer1::PoolingType type;
-  // TODO(jie): support other pooling type
-  if (node_def.op() == "MaxPool")
+  if (node_def.op() == "MaxPool") {
     type = nvinfer1::PoolingType::kMAX;
-  else if (node_def.op() == "AvgPool")
+  } else if (node_def.op() == "AvgPool") {
     type = nvinfer1::PoolingType::kAVERAGE;
-  else
-    return tensorflow::errors::Unimplemented("Only supports Max pool");
+  } else {
+    return tensorflow::errors::Unimplemented("Unsupported pool type: ",
+                                             node_def.op());
+  }
 
-  // TODO(jie): NCHW
-  auto tf_stride = attrs.get<std::vector<int>>("strides");
-  nvinfer1::DimsHW stride(tf_stride[h_index], tf_stride[w_index]);
+  const auto tf_stride = attrs.get<std::vector<int>>("strides");
+  const nvinfer1::DimsHW stride(tf_stride[h_index], tf_stride[w_index]);
 
-  auto tf_kernel = attrs.get<std::vector<int>>("ksize");
-  nvinfer1::DimsHW ksize(tf_kernel[h_index], tf_kernel[w_index]);
+  const auto tf_kernel = attrs.get<std::vector<int>>("ksize");
+  const nvinfer1::DimsHW ksize(tf_kernel[h_index], tf_kernel[w_index]);
 
   auto tensor_dim = tensor->getDimensions();
   std::vector<std::pair<int, int>> padding;
-  // TODO(jie): padding.
-  if (attrs.get<string>("padding") == "SAME") {
+  const string padding_type = attrs.get<string>("padding");
+  if (padding_type == "SAME") {
     // This is NCHW tensor with no batch dimension.
     //  1 -> h
     //  2 -> w
     padding = CreateSamePadding(
         stride, ksize,
         {static_cast<int>(tensor_dim.d[1]), static_cast<int>(tensor_dim.d[2])});
-  } else if (attrs.get<string>("padding") == "VALID") {
-    // No padding for valid padding here
-    VLOG(2) << "No padding added for VALID padding in pool" << node_def.name();
+  } else if (padding_type == "VALID") {
     padding = {{0, 0}, {0, 0}};
   } else {
-    return tensorflow::errors::Unimplemented(
-        "Current MaxPool cannot support padding other than SAME");
+    return tensorflow::errors::Unimplemented("Unsupported padding type: ",
+                                             padding_type);
   }
 
   if (padding[0].first != padding[0].second ||
       padding[1].first != padding[1].second) {
-    // TODO(jie): handle asymmetric padding
     VLOG(2) << "Padding!!!: " << padding[0].first << padding[0].second
             << padding[1].first << padding[1].second;
     auto pad_layer = ctx.network()->addPadding(
         *const_cast<nvinfer1::ITensor*>(tensor),
         nvinfer1::DimsHW(padding[0].first, padding[1].first),
         nvinfer1::DimsHW(padding[0].second, padding[1].second));
+    TFTRT_RETURN_ERROR_IF_NULLPTR(pad_layer, node_def.name());
     padding = {{0, 0}, {0, 0}};
     tensor = pad_layer->getOutput(0);
   }
 
   nvinfer1::IPoolingLayer* layer = ctx.network()->addPooling(
       *const_cast<nvinfer1::ITensor*>(tensor), type, ksize);
+  TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
 
   layer->setStride(stride);
   layer->setPadding({padding[0].first, padding[1].first});
@@ -1337,10 +1464,8 @@
   nvinfer1::ITensor* output_tensor = layer->getOutput(0);
 
   if (data_format == "NHWC") {
-    // TODO(jie): transpose it back!
     output_tensor = ctx.TransposeTensor(output_tensor, {0, 2, 3, 1});
-  } else {
-    VLOG(2) << "NCHW !!!!";
+    TFTRT_RETURN_ERROR_IF_NULLPTR(output_tensor, node_def.name());
   }
   outputs->push_back(TRT_TensorOrWeights(output_tensor));
   return tensorflow::Status::OK();
@@ -1353,6 +1478,7 @@
   const nvinfer1::ITensor* tensor = inputs.at(0).tensor();
   nvinfer1::IActivationLayer* layer = ctx.network()->addActivation(
       *const_cast<nvinfer1::ITensor*>(tensor), nvinfer1::ActivationType::kRELU);
+  TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
   nvinfer1::ITensor* output_tensor = layer->getOutput(0);
   outputs->push_back(TRT_TensorOrWeights(output_tensor));
   return tensorflow::Status::OK();
@@ -1363,40 +1489,61 @@
                                 const std::vector<TRT_TensorOrWeights>& inputs,
                                 std::vector<TRT_TensorOrWeights>* outputs) {
   if (inputs.size() != 2 || !inputs.at(0).is_tensor() ||
-      !inputs.at(1).is_weights())
+      !inputs.at(1).is_weights()) {
     return tensorflow::errors::Unimplemented(
-        "Only supports tensor op weight for now, at " + node_def.name());
-  // Implement tensor binaryOp weight [channel wise] for now;
-  const nvinfer1::ITensor* tensor = inputs.at(0).tensor();
+        "ConvertScale only supports tensor<op>weight: ", node_def.name());
+  }
 
+  const nvinfer1::ITensor* tensor = inputs.at(0).tensor();
   TRT_ShapedWeights weights = inputs.at(1).weights();
   if (ctx.isFP16()) {
     weights = ConvertFP32ToFP16(ctx, inputs.at(1).weights());
   }
 
   TRT_ShapedWeights empty_weights(weights.type_);
-
   TFAttrs attrs(node_def);
 
-  // Transpose NHWC
-  auto data_format = attrs.get<string>("data_format");
+  const auto data_format = attrs.get<string>("data_format");
+  int channel_index;
+  const auto dims = tensor->getDimensions();
   if (data_format == "NHWC") {
-    tensor = ctx.TransposeTensor(const_cast<nvinfer1::ITensor*>(tensor),
-                                 {0, 3, 1, 2});
-    // TODO(jie): transpose it
+    //  1). NHWC is really N+C
+    channel_index = dims.nbDims - 1;  // batch dimension is implicit here!
   } else {
-    VLOG(2) << "NCHW !!!!";
+    //  2). NCHW is really N+CHW
+    channel_index = dims.nbDims - 3;  // batch dimension is implicit here!
   }
 
-  auto dims = tensor->getDimensions();
-  VLOG(2) << "tensor dimensions: " << dims.nbDims;
-  for (int i = 0; i < dims.nbDims; i++) {
-    VLOG(2) << "i: " << dims.d[i];
+  nvinfer1::Permutation permutation;
+  for (int32_t i = 0; i < dims.nbDims; ++i) {
+    permutation.order[i] = i;
   }
-  dims = weights.shape_;
-  VLOG(2) << "tensor dimensions: " << dims.nbDims;
-  for (int i = 0; i < dims.nbDims; i++) {
-    VLOG(2) << "i: " << dims.d[i];
+
+  if (channel_index >= 0) {
+    permutation.order[0] = channel_index;
+    permutation.order[channel_index] = 0;
+  } else {
+    return tensorflow::errors::Unimplemented(
+        "TFTRT::BiasAdd cannot apply on batch dimension, at ", node_def.name());
+  }
+
+  // TensorRT addScale requires input to be of rank 3, we need to apply
+  // transpose as well as reshape
+  if (channel_index != 0 || dims.nbDims != 3) {
+    nvinfer1::IShuffleLayer* shuffle_layer =
+        ctx.network()->addShuffle(*const_cast<nvinfer1::ITensor*>(tensor));
+    TFTRT_RETURN_ERROR_IF_NULLPTR(shuffle_layer, node_def.name());
+    nvinfer1::Dims reshape_dims;
+    reshape_dims.nbDims = 3;
+    reshape_dims.d[0] = 0;                          // 0 copy from the input
+    reshape_dims.d[1] = dims.nbDims >= 2 ? 0 : 1;   // 0 copy from the input
+    reshape_dims.d[2] = dims.nbDims >= 3 ? -1 : 1;  // -1 infer from the rest
+    if (channel_index != 0) {
+      // maybe we do not need this check. concerned about TRT optimization
+      shuffle_layer->setFirstTranspose(permutation);
+    }
+    shuffle_layer->setReshapeDimensions(reshape_dims);
+    tensor = shuffle_layer->getOutput(0);
   }
 
   nvinfer1::ScaleMode mode = nvinfer1::ScaleMode::kCHANNEL;
@@ -1407,14 +1554,26 @@
   nvinfer1::IScaleLayer* layer =
       ctx.network()->addScale(*const_cast<nvinfer1::ITensor*>(tensor), mode,
                               weights, empty_weights, empty_weights);
+  TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
 
   nvinfer1::ITensor* output_tensor = layer->getOutput(0);
-  if (data_format == "NHWC") {
-    // TODO(jie): transpose it back!
-    output_tensor = ctx.TransposeTensor(output_tensor, {0, 2, 3, 1});
-  } else {
-    VLOG(2) << "NCHW !!!!";
+
+  // restore transpose & reshape
+  if (channel_index != 0 || dims.nbDims != 3) {
+    nvinfer1::IShuffleLayer* shuffle_layer = ctx.network()->addShuffle(
+        *const_cast<nvinfer1::ITensor*>(output_tensor));
+    TFTRT_RETURN_ERROR_IF_NULLPTR(shuffle_layer, node_def.name());
+    nvinfer1::Dims reshape_dims = dims;
+    int tmp = reshape_dims.d[channel_index];
+    reshape_dims.d[channel_index] = reshape_dims.d[0];
+    reshape_dims.d[0] = tmp;
+    shuffle_layer->setReshapeDimensions(reshape_dims);
+    if (channel_index != 0) {
+      shuffle_layer->setSecondTranspose(permutation);
+    }
+    output_tensor = shuffle_layer->getOutput(0);
   }
+
   outputs->push_back(TRT_TensorOrWeights(output_tensor));
   return tensorflow::Status::OK();
 }
@@ -1431,11 +1590,13 @@
 
   // Create shaped weights as output
   tensorflow::Tensor tensor;
-  if (!tensor.FromProto(weights_tensor))
-    return tensorflow::errors::Internal("Cannot parse weight tensor proto: " +
+  if (!tensor.FromProto(weights_tensor)) {
+    return tensorflow::errors::Internal("Cannot parse weight tensor proto: ",
                                         node_def.name());
+  }
 
   TRT_ShapedWeights weights(dtype);
+  // TODO(aaroey): we should choose the array using dtype and shape.
   if (!weights_tensor.float_val().empty()) {
     VLOG(2) << "SCALAR!!!" << node_def.name();
     nvinfer1::Dims scalar_shape;
@@ -1443,22 +1604,16 @@
       VLOG(2) << "dimensions: " << tensor.dims();
       VLOG(2) << "size: " << weights_tensor.float_val_size();
       scalar_shape = GetTensorShape(tensor);
+      VLOG(2) << "details: ";
       for (int i = 0; i < scalar_shape.nbDims; i++)
         VLOG(2) << scalar_shape.d[i];
-      if (GetShapeSize(scalar_shape) != weights_tensor.float_val_size()) {
-        if (weights_tensor.float_val_size() == 1 ||
-            scalar_shape.d[0] == weights_tensor.float_val_size()) {
-          scalar_shape.nbDims = 1;
-          // no dimension provided. flatten it
-          scalar_shape.d[0] = weights_tensor.float_val_size();
-          scalar_shape.type[0] = nvinfer1::DimensionType::kSPATIAL;
-        } else {
-          LOG(WARNING) << "Broadcast on weights only supports kCHANNEL and"
-                       << " kUNIFORM, at: " << node_def.name();
-          string err_str("Broadcast method is not supported for '");
-          StrAppend(&err_str, node_def.name(), "' of type ", node_def.op());
-          return tensorflow::errors::InvalidArgument(err_str);
-        }
+      if (GetShapeSize(scalar_shape) != weights_tensor.float_val_size() &&
+          weights_tensor.float_val_size() != 1) {
+        LOG(ERROR) << "Broadcast on weights only supports kCHANNEL and"
+                   << " kUNIFORM, at: " << node_def.name();
+        string err_str("Broadcast method is not supported for '");
+        StrAppend(&err_str, node_def.name(), "' of type ", node_def.op());
+        return tensorflow::errors::InvalidArgument(err_str);
       }
     } else {
       VLOG(2) << "Dimensions: " << tensor.dims();
@@ -1468,39 +1623,42 @@
       scalar_shape.type[0] = nvinfer1::DimensionType::kSPATIAL;
       for (int i = 1; i < nvinfer1::Dims::MAX_DIMS; i++) {
         scalar_shape.d[i] = 0;
-        scalar_shape.type[i] = nvinfer1::DimensionType::kSPATIAL;
       }
     }
+    // TODO(aaroey): use GetShapeSize().
     size_t len_data = tensorflow::DataTypeSize(dtype);
     for (int i = 0; i < scalar_shape.nbDims; i++) len_data *= scalar_shape.d[i];
     ctx.weight_store()->store_.push_back(std::vector<uint8_t>(len_data));
     void* dst = static_cast<void*>(&(ctx.weight_store()->store_.back()[0]));
-    std::vector<float> tensor_data(
-        weights_tensor.float_val().begin(),
-        weights_tensor.float_val()
-            .end());  //  make a local copy first to flatten
-    memcpy(dst, tensor_data.data(), len_data);  // store into weight store
+    if (weights_tensor.float_val_size() == 1) {
+      std::fill_n((float*)dst, GetShapeSize(scalar_shape),
+                  *weights_tensor.float_val().begin());
+    } else {
+      // TODO(aaroey): get rid of this copy as RepeatedField is always
+      // contiguous make a local copy first to flatten doesn't have to be
+      // contiguous
+      std::vector<float> tensor_data(weights_tensor.float_val().begin(),
+                                     weights_tensor.float_val().end());
+      memcpy(dst, tensor_data.data(), len_data);  // store into weight store
+    }
+    VLOG(2) << "create shape details: ";
+    for (int i = 0; i < scalar_shape.nbDims; i++) VLOG(2) << scalar_shape.d[i];
     weights = TRT_ShapedWeights(dtype, dst, scalar_shape);
   } else if (!weights_tensor.int_val().empty()) {
+    // TODO(aaroey): this is very similar to the above code for float, merge
+    // them.
     VLOG(2) << "int!!!" << node_def.name();
     nvinfer1::Dims scalar_shape;
     if (tensor.dims() > 0) {
       VLOG(2) << "dimensions: " << tensor.dims();
       scalar_shape = GetTensorShape(tensor);
-      if (GetShapeSize(scalar_shape) != weights_tensor.int_val_size()) {
-        if (weights_tensor.int_val_size() == 1 ||
-            scalar_shape.d[0] == weights_tensor.int_val_size()) {
-          scalar_shape.nbDims = 1;
-          // no dimension provided. flatten it
-          scalar_shape.d[0] = weights_tensor.int_val_size();
-          scalar_shape.type[0] = nvinfer1::DimensionType::kSPATIAL;
-        } else {
-          LOG(WARNING) << "Broadcast on weights only supports kCHANNEL and"
-                       << " kUNIFORM, at: " << node_def.name();
-          string err_str("Broadcast method is not supported for '");
-          StrAppend(&err_str, node_def.name(), "' of type ", node_def.op());
-          return tensorflow::errors::InvalidArgument(err_str);
-        }
+      if (GetShapeSize(scalar_shape) != weights_tensor.int_val_size() &&
+          weights_tensor.int_val_size() != 1) {
+        LOG(WARNING) << "Broadcast on weights only supports kCHANNEL and"
+                     << " kUNIFORM, at: " << node_def.name();
+        string err_str("Broadcast method is not supported for '");
+        StrAppend(&err_str, node_def.name(), "' of type ", node_def.op());
+        return tensorflow::errors::InvalidArgument(err_str);
       }
     } else {
       VLOG(2) << "dimensions: " << tensor.dims();
@@ -1513,23 +1671,30 @@
         scalar_shape.type[i] = nvinfer1::DimensionType::kSPATIAL;
       }
     }
-    //  we should not have converted //if (ctx.isFP16()) {
+    // we should not have converted
     size_t len_data = tensorflow::DataTypeSize(dtype);
     for (int i = 0; i < scalar_shape.nbDims; i++) len_data *= scalar_shape.d[i];
     size_t len_tensor = weights_tensor.int_val_size() * sizeof(int32);
     len_data = std::max(len_data, len_tensor);
     ctx.weight_store()->store_.push_back(std::vector<uint8_t>(len_data));
     void* dst = static_cast<void*>(&(ctx.weight_store()->store_.back()[0]));
-    std::vector<int32> tensor_data(
-        weights_tensor.int_val().begin(),
-        weights_tensor.int_val().end());  //  make a local copy first to flatten
-                                          //  doesn't have to be contigous
-    memcpy(dst, tensor_data.data(), len_tensor);  // store into weight store
+    if (weights_tensor.int_val_size() == 1) {
+      std::fill_n((int*)dst, GetShapeSize(scalar_shape),
+                  *weights_tensor.int_val().begin());
+    } else {
+      // TODO(aaroey): get rid of this copy as RepeatedField is always
+      // contiguous make a local copy first to flatten doesn't have to be
+      // contiguous
+      std::vector<int32> tensor_data(weights_tensor.int_val().begin(),
+                                     weights_tensor.int_val().end());
+      memcpy(dst, tensor_data.data(), len_tensor);  // store into weight store
+    }
     weights = TRT_ShapedWeights(dtype, dst, scalar_shape);
   } else if (!weights_tensor.tensor_content().empty()) {
-    //  obsolete method.
-    //  After optimization path, we do not see weights in this format.
-    //  fp16 conversion technically should be needed here.
+    // obsolete method.
+    // After optimization path, we do not see weights in this format.
+    // TODO(aaroey): why?
+    // fp16 conversion technically should be needed here.
     VLOG(2) << "TENSOR!!!" << node_def.name();
     const auto& content = weights_tensor.tensor_content();
 
@@ -1543,8 +1708,8 @@
           content, static_cast<char*>(const_cast<void*>(weights.GetValues())));
     }
   } else {
-    return tensorflow::errors::Unimplemented(
-        "Not supported constant type, at " + node_def.name());
+    return tensorflow::errors::Unimplemented("Not supported constant type, at ",
+                                             node_def.name());
   }
   // Pass the output
   outputs->push_back(TRT_TensorOrWeights(weights));
@@ -1563,96 +1728,144 @@
                                  const tensorflow::NodeDef& node_def,
                                  const std::vector<TRT_TensorOrWeights>& inputs,
                                  std::vector<TRT_TensorOrWeights>* outputs) {
-  if (inputs.size() != 2)
+  if (inputs.size() != 2) {
     return tensorflow::errors::FailedPrecondition(
-        "Binary ops require two tensor input, at " + node_def.name());
+        "Binary ops require two tensor input, at ", node_def.name());
+  }
 
-  if (inputs.at(0).is_weights() && inputs.at(1).is_weights())
-    return ConstantFoldBinary(ctx, node_def, inputs, outputs);
+  // Constant folding should have been done by TensorFlow
 
-  if (inputs.at(0).is_tensor() && inputs.at(1).is_weights())
-    return BinaryTensorOpWeight(ctx, node_def, inputs.at(0).tensor(),
-                                inputs.at(1).weights(), outputs);
+  if (inputs.at(0).is_weights() && inputs.at(1).is_weights()) {
+    return tensorflow::errors::Unimplemented(
+        "Constant folding is falled back to TensorFlow, binary op received "
+        "both input as constant at: ",
+        node_def.name());
+  }
 
-  if (inputs.at(0).is_weights() && inputs.at(1).is_tensor())
-    return BinaryTensorOpWeight(ctx, node_def, inputs.at(1).tensor(),
-                                inputs.at(0).weights(), outputs);
-
-  if (inputs.at(0).is_tensor() && inputs.at(1).is_tensor())
-    return BinaryTensorOpTensor(ctx, node_def, inputs.at(0).tensor(),
-                                inputs.at(1).tensor(), outputs);
-
-  return tensorflow::errors::Unknown("Binary op input error, at " +
-                                     node_def.name());
+  // Try to convert into Scale layer first (for better performance)
+  // Since scale layer supports restricted broadcast policy and op types, we
+  // allow failure and try to handle it through Elementwise op
+  // (BinaryTensorOpTensor)
+  Status status = tensorflow::Status::OK();
+  if (inputs.at(0).is_tensor() && inputs.at(1).is_weights()) {
+    status = BinaryTensorOpWeight(ctx, node_def, inputs.at(0).tensor(),
+                                  inputs.at(1).weights(), false, outputs);
+  } else if (inputs.at(0).is_weights() && inputs.at(1).is_tensor()) {
+    status = BinaryTensorOpWeight(ctx, node_def, inputs.at(1).tensor(),
+                                  inputs.at(0).weights(), true, outputs);
+#if NV_TENSORRT_MAJOR == 3
+  } else {
+#else
+  }
+  if ((inputs.at(0).is_tensor() && inputs.at(1).is_tensor()) || !status.ok()) {
+#endif
+    status = BinaryTensorOpTensor(ctx, node_def, inputs.at(0), inputs.at(1),
+                                  outputs);
+  }
+  return status;
 }
 
 tensorflow::Status ConvertUnary(Converter& ctx,
                                 const tensorflow::NodeDef& node_def,
                                 const std::vector<TRT_TensorOrWeights>& inputs,
                                 std::vector<TRT_TensorOrWeights>* outputs) {
-  if (inputs.size() != 1)
+  static const std::unordered_map<string, nvinfer1::UnaryOperation> ops{
+      {"Neg", nvinfer1::UnaryOperation::kNEG},
+      {"Exp", nvinfer1::UnaryOperation::kEXP},
+      {"Log", nvinfer1::UnaryOperation::kLOG},
+      {"Sqrt", nvinfer1::UnaryOperation::kSQRT},
+      {"Abs", nvinfer1::UnaryOperation::kABS},
+      {"Reciprocal", nvinfer1::UnaryOperation::kRECIP},
+  };
+
+  if (inputs.size() != 1) {
     return tensorflow::errors::FailedPrecondition(
-        "Unary ops require single tensor input, at " + node_def.name());
+        "Unary ops require single tensor input, at ", node_def.name());
+  }
 
-  if (inputs.at(0).is_weights())
-    return ConstantFoldUnary(ctx, node_def, inputs, outputs);
-  else if (inputs.at(0).is_tensor())
+#if NV_TENSORRT_MAJOR == 3
+  if (inputs.at(0).is_weights()) {
     return tensorflow::errors::Unimplemented(
-        "Unary op for tensor not supported, at " + node_def.name());
+        "Constant folding for unary op is not supported", node_def.name());
+  }
+#endif
 
-  return tensorflow::errors::Unknown("Binary op input error, at " +
-                                     node_def.name());
+  // TODO(jie): check type
+  const nvinfer1::ITensor* tensor;
+  TFTRT_RETURN_ERROR_IF_FALSE(
+      PrepareTensorForShape(ctx, inputs.at(0), inputs.at(0).shape(), &tensor),
+      node_def.name());
+
+  nvinfer1::IUnaryLayer* layer;
+  if (node_def.op() == "Rsqrt") {
+    layer = ctx.network()->addUnary(*const_cast<nvinfer1::ITensor*>(tensor),
+                                    nvinfer1::UnaryOperation::kSQRT);
+    TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
+    tensor = layer->getOutput(0);
+    layer = ctx.network()->addUnary(*const_cast<nvinfer1::ITensor*>(tensor),
+                                    nvinfer1::UnaryOperation::kRECIP);
+  } else if (ops.count(node_def.op()) != 0) {
+    layer = ctx.network()->addUnary(*const_cast<nvinfer1::ITensor*>(tensor),
+                                    ops.at(node_def.op()));
+  } else {
+    return tensorflow::errors::InvalidArgument(
+        "Binary op: ", node_def.op(), " not supported, at ", node_def.name());
+  }
+
+  TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
+  nvinfer1::ITensor* output_tensor = layer->getOutput(0);
+  outputs->push_back(TRT_TensorOrWeights(output_tensor));
+  return tensorflow::Status::OK();
 }
 
-tensorflow::Status ConvertReduce(Converter& ctx,
-                                 const tensorflow::NodeDef& node_def,
-                                 const std::vector<TRT_TensorOrWeights>& inputs,
-                                 std::vector<TRT_TensorOrWeights>* outputs) {
+#if NV_TENSORRT_MAJOR == 3
+tensorflow::Status ConvertReducePool(
+    Converter& ctx, const tensorflow::NodeDef& node_def,
+    const std::vector<TRT_TensorOrWeights>& inputs,
+    std::vector<TRT_TensorOrWeights>* outputs) {
   if (inputs.size() != 2 || !inputs.at(0).is_tensor() ||
-      !inputs.at(1).is_weights())
+      !inputs.at(1).is_weights()) {
     return tensorflow::errors::InvalidArgument(
-        "Input expects tensor and weights, at" + node_def.name());
+        "Input expects tensor and weights, at", node_def.name());
+  }
 
   // Implement tensor binaryOp weight [channel wise] for now;
   const nvinfer1::ITensor* tensor = inputs.at(0).tensor();
-  auto dims = tensor->getDimensions();
+  const auto dims = tensor->getDimensions();
   // Restore implicit batch dimension
-  int nb_dims = dims.nbDims + 1;
+  const int nb_dims = dims.nbDims + 1;
 
   TRT_ShapedWeights index_list = inputs.at(1).weights();
-
   TFAttrs attrs(node_def);
-  // TODO(jie): handle data type.
-  // Index type here is done through TF type, so I can leverage their
-  // EnumToDataType for my cast
   auto index_type = attrs.get<tensorflow::DataType>("Tidx");
 
   // Only expect to handle INT32 as attributes for now
-  if (index_type != tensorflow::DataType::DT_INT32)
+  if (index_type != tensorflow::DataType::DT_INT32) {
     return tensorflow::errors::Unimplemented("Tidx supports only DT_INT32");
-  auto index_list_data =
+  }
+  const auto index_list_data =
       static_cast<int*>(const_cast<void*>(index_list.GetValues()));
 
-  // Hack warning: have to fall back to pool layer since reduce is not in public
-  // TRT yet.
-  if (nb_dims != 4)
+  if (nb_dims != 4) {
     return tensorflow::errors::InvalidArgument(
-        "TRT only support reduce on 4 dimensional tensors, at" +
+        "TRT only support reduce on 4 dimensional tensors, at",
         node_def.name());
-  if (index_list.count() > 2)
+  }
+  if (index_list.count() > 2) {
     return tensorflow::errors::InvalidArgument(
-        "TRT cannot support reduce on more than 2 dimensions, at" +
+        "TRT cannot support reduce on more than 2 dimensions, at",
         node_def.name());
+  }
 
   std::set<int> idx_set;
   // We cannot operate on Channel. permutation flag used to transpose tensor
   int permuted_index = -1;
   for (int i = 0; i < index_list.count(); i++) {
-    if (index_list_data[i] == 0)
-      return tensorflow::errors::InvalidArgument("TRT cannot reduce at 0, at" +
+    if (index_list_data[i] == 0) {
+      return tensorflow::errors::InvalidArgument("TRT cannot reduce at 0, at",
                                                  node_def.name());
+    }
     if (index_list_data[i] == 1) permuted_index = 1;
-
     idx_set.emplace(index_list_data[i]);
   }
 
@@ -1673,6 +1886,7 @@
     // Apply permutation before extracting dimension for pool_kernel
     tensor = ctx.TransposeTensor(const_cast<nvinfer1::ITensor*>(tensor),
                                  permutation_order);
+    TFTRT_RETURN_ERROR_IF_NULLPTR(tensor, node_def.name());
   }
 
   // Apply permutation before extracting dimension for pool_kernel
@@ -1685,34 +1899,104 @@
     nvinfer1::IPoolingLayer* layer =
         ctx.network()->addPooling(*const_cast<nvinfer1::ITensor*>(tensor),
                                   nvinfer1::PoolingType::kAVERAGE, pool_kernel);
+    TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
     output_tensor = layer->getOutput(0);
   } else {
-    return tensorflow::errors::Unimplemented(
-        "Op not supported " + node_def.op() + " , at " + node_def.name());
+    return tensorflow::errors::Unimplemented("Op not supported ", node_def.op(),
+                                             " , at ", node_def.name());
   }
   if (permuted_index != -1) {
     // Apply permutation before extracting dimension for pool_kernel
     output_tensor = ctx.TransposeTensor(
         const_cast<nvinfer1::ITensor*>(output_tensor), permutation_order);
+    TFTRT_RETURN_ERROR_IF_NULLPTR(output_tensor, node_def.name());
   }
   outputs->push_back(TRT_TensorOrWeights(output_tensor));
   return tensorflow::Status::OK();
 }
+#elif NV_TENSORRT_MAJOR > 3
+tensorflow::Status ConvertReduce(Converter& ctx,
+                                 const tensorflow::NodeDef& node_def,
+                                 const std::vector<TRT_TensorOrWeights>& inputs,
+                                 std::vector<TRT_TensorOrWeights>* outputs) {
+  if (inputs.size() != 2 || !inputs.at(0).is_tensor() ||
+      !inputs.at(1).is_weights()) {
+    return tensorflow::errors::InvalidArgument(
+        "Input expects tensor and weights, at", node_def.name());
+  }
+
+  const nvinfer1::ITensor* tensor = inputs.at(0).tensor();
+  TRT_ShapedWeights index_list = inputs.at(1).weights();
+
+  TFAttrs attrs(node_def);
+  auto index_type = attrs.get<tensorflow::DataType>("Tidx");
+
+  // Only expect to handle INT32 as attributes for now
+  if (index_type != tensorflow::DataType::DT_INT32) {
+    return tensorflow::errors::Unimplemented("Tidx supports only DT_INT32");
+  }
+
+  const auto keep_dims = attrs.get<bool>("keep_dims");
+  auto index_list_data =
+      static_cast<int*>(const_cast<void*>(index_list.GetValues()));
+
+  int axes = 0;
+  if (index_list.count() == 0) {
+    return tensorflow::errors::InvalidArgument(
+        "TRT cannot support reduce on all (batch) dimensions, at",
+        node_def.name());
+  } else {
+    for (int i = 0; i < index_list.count(); i++) {
+      if (index_list_data[i] == 0) {
+        return tensorflow::errors::InvalidArgument(
+            "TRT cannot reduce at batch dimension, at", node_def.name());
+      }
+      axes |= (1 << (index_list_data[i] - 1));
+    }
+  }
+
+  nvinfer1::ReduceOperation reduce_operation;
+  if (node_def.op() == "Sum") {
+    reduce_operation = nvinfer1::ReduceOperation::kSUM;
+  } else if (node_def.op() == "Prod") {
+    reduce_operation = nvinfer1::ReduceOperation::kPROD;
+  } else if (node_def.op() == "Max") {
+    reduce_operation = nvinfer1::ReduceOperation::kMAX;
+  } else if (node_def.op() == "Min") {
+    reduce_operation = nvinfer1::ReduceOperation::kMIN;
+  } else if (node_def.op() == "Mean") {
+    reduce_operation = nvinfer1::ReduceOperation::kAVG;
+  } else {
+    return tensorflow::errors::Unimplemented("Op not supported ", node_def.op(),
+                                             " , at ", node_def.name());
+  }
+
+  nvinfer1::ILayer* layer =
+      ctx.network()->addReduce(*const_cast<nvinfer1::ITensor*>(tensor),
+                               reduce_operation, axes, keep_dims);
+  TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
+
+  outputs->push_back(TRT_TensorOrWeights(layer->getOutput(0)));
+  return tensorflow::Status::OK();
+}
+#endif
 
 tensorflow::Status ConvertPad(Converter& ctx,
                               const tensorflow::NodeDef& node_def,
                               const std::vector<TRT_TensorOrWeights>& inputs,
                               std::vector<TRT_TensorOrWeights>* outputs) {
+  // TODO(aaroey): make a routine for this check and reuse it.
   if (inputs.size() != 2 || !inputs.at(0).is_tensor() ||
-      !inputs.at(1).is_weights())
+      !inputs.at(1).is_weights()) {
     return tensorflow::errors::InvalidArgument(
-        "Input expects tensor and weights, at" + node_def.name());
+        "Input expects tensor and weights, at", node_def.name());
+  }
 
   // Implement tensor binaryOp weight [channel wise] for now;
   const nvinfer1::ITensor* tensor = inputs.at(0).tensor();
-  auto dims = tensor->getDimensions();
+  const auto dims = tensor->getDimensions();
   // Restore implicit batch dimension
-  int nb_dims = dims.nbDims + 1;
+  const int nb_dims = dims.nbDims + 1;
 
   TRT_ShapedWeights pads = inputs.at(1).weights();
 
@@ -1722,21 +2006,24 @@
   auto padding_type = attrs.get<tensorflow::DataType>("Tpaddings");
   // TODO(jie): handle data type conversion for TRT?
 
-  if (pads.shape_.d[0] != nb_dims || pads.shape_.d[1] != 2)
+  if (pads.shape_.d[0] != nb_dims || pads.shape_.d[1] != 2) {
     return tensorflow::errors::InvalidArgument(
-        "Pad only supports explicit padding on 4 dimensional tensor, at " +
+        "Pad only supports explicit padding on 4 dimensional tensor, at ",
         node_def.name());
+  }
 
   // Only expect to handle INT32 as attributes for now
-  if (padding_type != tensorflow::DataType::DT_INT32)
+  if (padding_type != tensorflow::DataType::DT_INT32) {
     return tensorflow::errors::Unimplemented(
         "Tpaddings supports only DT_INT32");
+  }
   auto pad_data = static_cast<int*>(const_cast<void*>(pads.GetValues()));
 
   std::vector<int32_t> pad_index;
   for (int i = 0; i < nb_dims; i++) {
-    if (pad_data[2 * i] != 0 || pad_data[2 * i + 1] != 0)
+    if (pad_data[2 * i] != 0 || pad_data[2 * i + 1] != 0) {
       pad_index.push_back(i);
+    }
   }
 
   // No padding at all, we should exit
@@ -1746,20 +2033,23 @@
   }
 
   // Only supports padding on less than 2 axis GIE-2579
-  if (pad_index.size() > 2)
+  if (pad_index.size() > 2) {
     return tensorflow::errors::InvalidArgument(
         "Padding layer does not support padding on > 2");
+  }
 
   // Padding on batch dimension is not supported
-  if (pad_index[0] == 0)
+  if (pad_index[0] == 0) {
     return tensorflow::errors::InvalidArgument(
         "Padding layer does not support padding on batch dimension");
+  }
 
   // Not doing the legit thing here. ignoring padding on dim 1 and 3;
   // TODO(jie): implement pad as uff parser
-  if (pad_index.size() == 2 && pad_index[0] == 0 && pad_index[1] == 3)
+  if (pad_index.size() == 2 && pad_index[0] == 0 && pad_index[1] == 3) {
     return tensorflow::errors::Unimplemented(
         "Padding layer does not support padding on dimension 1 and 3 yet");
+  }
 
   bool legit_pad = true;
   nvinfer1::DimsHW pre_padding(0, 0);
@@ -1770,6 +2060,7 @@
     legit_pad = false;
     tensor = ctx.TransposeTensor(const_cast<nvinfer1::ITensor*>(tensor),
                                  {0, 3, 2, 1});
+    TFTRT_RETURN_ERROR_IF_NULLPTR(tensor, node_def.name());
     permuted_pad_index[0] = 3;
   }
 
@@ -1786,11 +2077,14 @@
 
   nvinfer1::IPaddingLayer* layer = ctx.network()->addPadding(
       *const_cast<nvinfer1::ITensor*>(tensor), pre_padding, post_padding);
+  TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
   nvinfer1::ITensor* output_tensor = layer->getOutput(0);
 
-  if (!legit_pad)
+  if (!legit_pad) {
     output_tensor = ctx.TransposeTensor(
         const_cast<nvinfer1::ITensor*>(output_tensor), {0, 3, 2, 1});
+    TFTRT_RETURN_ERROR_IF_NULLPTR(output_tensor, node_def.name());
+  }
 
   outputs->push_back(TRT_TensorOrWeights(output_tensor));
   return tensorflow::Status::OK();
@@ -1803,9 +2097,10 @@
   // not including the last input (axis) here
   int input_size = static_cast<int>(inputs.size()) - 1;
 
-  if (!inputs.at(0).is_tensor())
+  if (!inputs.at(0).is_tensor()) {
     return tensorflow::errors::InvalidArgument(
-        "Concat in TRT support only Tensor input, at " + node_def.name());
+        "Concat in TRT support only Tensor input, at ", node_def.name());
+  }
 
   // We are retrieving the axis
   TRT_ShapedWeights axis = inputs.at(input_size).weights();
@@ -1816,8 +2111,8 @@
   // TODO(jie): handle data type
   // Only expect to handle INT32 as index attributes for now
   if (index_type != tensorflow::DataType::DT_INT32)
-    return tensorflow::errors::Unimplemented(
-        "Tidx supports only DT_INT32, at " + node_def.name());
+    return tensorflow::errors::Unimplemented("Tidx supports only DT_INT32, at ",
+                                             node_def.name());
 
   int index = *(static_cast<int*>(const_cast<void*>(axis.GetValues())));
 
@@ -1825,23 +2120,29 @@
 
   auto dim = inputs.at(0).tensor()->getDimensions();
   // dimension check
-  if (index > dim.nbDims + 1)
+  if (index > dim.nbDims + 1) {
     return tensorflow::errors::InvalidArgument(
-        "Concatenate on axis out of dimension range, at " + node_def.name());
-
-  if (index == 0)
+        "Concatenate on axis out of dimension range, at ", node_def.name());
+  }
+  if (index == 0) {
     return tensorflow::errors::InvalidArgument(
-        "Concatenate on batch dimension not supported, at " + node_def.name());
+        "Concatenate on batch dimension not supported, at ", node_def.name());
+  }
+  if (index < 0) {
+    index = dim.nbDims + index + 1;
+  }
 
+#if NV_TENSORRT_MAJOR == 3
   // incase we need permutation;
   std::vector<int> permutation_order(dim.nbDims + 1);
 
   for (int i = 0; i < dim.nbDims + 1; i++) permutation_order[i] = i;
 
   if (index != 1) {
-    permutation_order[1] = index - 1;
-    permutation_order[index - 1] = 1;
+    permutation_order[1] = index;
+    permutation_order[index] = 1;
   }
+#endif
 
   std::vector<nvinfer1::ITensor const*> inputs_vec;
   // Shap chack (all input tensor should have same shape)
@@ -1849,24 +2150,28 @@
   for (int i = 0; i < input_size; i++) {
     auto tensor_i = inputs.at(i).tensor();
     auto dim_i = tensor_i->getDimensions();
-    if (dim_i.nbDims != dim.nbDims)
+    if (dim_i.nbDims != dim.nbDims) {
       return tensorflow::errors::InvalidArgument(
-          "Concatenate receives inputs with inconsistent dimensions, at " +
+          "Concatenate receives inputs with inconsistent dimensions, at ",
           node_def.name());
-
+    }
     for (int j = 0; j < dim.nbDims; j++) {
       // check dimension consistency on non-concatenate axis
-      if (j != index - 1 && dim_i.d[j] != dim.d[j])
+      if (j != index - 1 && dim_i.d[j] != dim.d[j]) {
         return tensorflow::errors::InvalidArgument(
-            "Concatenate receives inputs with inconsistent shape, at" +
+            "Concatenate receives inputs with inconsistent shape, at",
             node_def.name());
+      }
     }
 
-    // TRT does concatenation only on channel!
-    if (index != 1)
+#if NV_TENSORRT_MAJOR == 3
+    // TRT3 does concatenation only on channel!
+    if (index != 1) {
       tensor_i = ctx.TransposeTensor(const_cast<nvinfer1::ITensor*>(tensor_i),
                                      permutation_order);
-
+      TFTRT_RETURN_ERROR_IF_NULLPTR(tensor_i, node_def.name());
+    }
+#endif
     inputs_vec.push_back(tensor_i);
   }
 
@@ -1874,11 +2179,18 @@
   nvinfer1::IConcatenationLayer* layer = ctx.network()->addConcatenation(
       const_cast<nvinfer1::ITensor* const*>(inputs_vec.data()),
       inputs_vec.size());
+  TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
+#if NV_TENSORRT_MAJOR > 3
+  layer->setAxis(index - 1);
+#endif
   nvinfer1::ITensor* output_tensor = layer->getOutput(0);
 
+#if NV_TENSORRT_MAJOR == 3
   if (index != 1) {
     output_tensor = ctx.TransposeTensor(output_tensor, permutation_order);
+    TFTRT_RETURN_ERROR_IF_NULLPTR(output_tensor, node_def.name());
   }
+#endif
   outputs->push_back(TRT_TensorOrWeights(output_tensor));
   return tensorflow::Status::OK();
 }
@@ -1997,112 +2309,243 @@
                               combined_offset_weights.GetWeightsForTRT(),
                               combined_scale_weights.GetWeightsForTRT(),
                               dummy_power_weights.GetWeightsForTRT());
+  TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
   nvinfer1::ITensor* output_tensor = layer->getOutput(0);
   outputs->push_back(TRT_TensorOrWeights(output_tensor));
   return tensorflow::Status::OK();
 }
 
-tensorflow::Status ConvertMatMul(Converter& ctx,
-                                 const tensorflow::NodeDef& node_def,
-                                 const std::vector<TRT_TensorOrWeights>& inputs,
-                                 std::vector<TRT_TensorOrWeights>* outputs) {
-  const nvinfer1::ITensor* tensor = inputs.at(0).tensor();
+#if NV_TENSORRT_MAJOR > 3
+tensorflow::Status ConvertMatMulHelper(
+    Converter& ctx, TRT_TensorOrWeights tensor_input,
+    TRT_ShapedWeights weights_raw, bool transpose_weight, string node_name,
+    std::vector<TRT_TensorOrWeights>* outputs) {
+  nvinfer1::ITensor* output_tensor;
+  if (!tensor_input.is_tensor()) {
+    return tensorflow::errors::InvalidArgument("Input 0 expects tensor");
+  }
+  const nvinfer1::ITensor* tensor = tensor_input.tensor();
 
-  // TODO(jie): transpose!
-  TFAttrs attrs(node_def);
-
-  TRT_ShapedWeights weights_ck = inputs.at(1).weights();
-  TRT_ShapedWeights weights = ctx.get_temp_weights_like(weights_ck);
-  ReorderCKtoKC(weights_ck, &weights);
+  TRT_ShapedWeights weights(weights_raw.type_);
+  if (transpose_weight) {
+    weights = weights_raw;
+  } else {
+    TRT_ShapedWeights weights_ck = weights_raw;
+    weights = ctx.get_temp_weights_like(weights_ck);
+    ReorderCKtoKC(weights_raw, &weights);
+  }
   TRT_ShapedWeights biases(weights.type_);
 
   int noutput = weights.shape_.d[0];
 
+  auto input_dim = tensor->getDimensions();
+  while (input_dim.nbDims != 3) {
+    input_dim.d[input_dim.nbDims++] = 1;
+  }
+  TFTRT_RETURN_ERROR_IF_FALSE(
+      PrepareTensorForShape(ctx, tensor_input, input_dim, &tensor), node_name);
+
   nvinfer1::IFullyConnectedLayer* layer = ctx.network()->addFullyConnected(
       *const_cast<nvinfer1::ITensor*>(tensor), noutput, weights, biases);
+  TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_name);
+  output_tensor = layer->getOutput(0);
 
-  nvinfer1::ITensor* output_tensor = layer->getOutput(0);
+  const nvinfer1::ITensor* temp_tensor;
+  auto output_dim = output_tensor->getDimensions();
+  output_dim.nbDims = 1;
+  TFTRT_RETURN_ERROR_IF_FALSE(
+      PrepareTensorForShape(ctx, TRT_TensorOrWeights(output_tensor), output_dim,
+                            &temp_tensor),
+      node_name);
+  output_tensor = const_cast<nvinfer1::ITensor*>(temp_tensor);
   outputs->push_back(TRT_TensorOrWeights(output_tensor));
   return tensorflow::Status::OK();
 }
 
-tensorflow::Status ConvertReshape(
+// inputs are both two dimensional (tensorflow::ops::MatMul)
+tensorflow::Status ConvertMatMul(Converter& ctx,
+                                 const tensorflow::NodeDef& node_def,
+                                 const std::vector<TRT_TensorOrWeights>& inputs,
+                                 std::vector<TRT_TensorOrWeights>* outputs) {
+  if (!inputs.at(0).is_tensor()) {
+    return tensorflow::errors::InvalidArgument("Input 0 expects tensor, at" +
+                                               node_def.name());
+  }
+
+  TFAttrs attrs(node_def);
+  // TODO(jie): INT32 should be converted?
+  tensorflow::DataType tf_dtype = attrs.get<tensorflow::DataType>("T");
+  if (tf_dtype != tensorflow::DataType::DT_FLOAT &&
+      tf_dtype != tensorflow::DataType::DT_HALF) {
+    return tensorflow::errors::Unimplemented(
+        "data type is not supported, for node " + node_def.name() + " got " +
+        tensorflow::DataTypeString(tf_dtype));
+  }
+  bool transpose_a = attrs.get<bool>("transpose_a");
+  bool transpose_b = attrs.get<bool>("transpose_b");
+
+  // FullyConnected:
+  if (transpose_a) {
+    return tensorflow::errors::Internal(
+        "Transpose_a is not supported for TensorRT FullyConnected (op: " +
+        node_def.op() + "), at: " + node_def.name());
+  }
+  if (inputs.at(1).is_tensor()) {
+    return tensorflow::errors::Internal(
+        "Operand 1 must be constant for TensorRT FullyConnected (op: " +
+        node_def.op() + "), at: " + node_def.name());
+  }
+  return ConvertMatMulHelper(ctx, inputs.at(0), inputs.at(1).weights(),
+                             transpose_b, node_def.name(), outputs);
+}
+
+tensorflow::Status ConvertBatchMatMul(
     Converter& ctx, const tensorflow::NodeDef& node_def,
     const std::vector<TRT_TensorOrWeights>& inputs,
     std::vector<TRT_TensorOrWeights>* outputs) {
-  if (inputs.size() != 2 || !inputs.at(0).is_tensor() ||
-      !inputs.at(1).is_weights())
-    return tensorflow::errors::InvalidArgument(
-        "Input expects tensor and weights, at" + node_def.name());
-
-  // implement tensor binaryOp weight [channel wise] for now;
-  const nvinfer1::ITensor* tensor = inputs.at(0).tensor();
-  auto dims = tensor->getDimensions();
-  // restore implicit batch dimension
-
-  TRT_ShapedWeights shape = inputs.at(1).weights();
-
   TFAttrs attrs(node_def);
 
-  auto padding_type = attrs.get<tensorflow::DataType>("Tshape");
-
-  if (shape.shape_.nbDims != 1)
-    return tensorflow::errors::InvalidArgument(
-        "reshape new shape is not 1 dimensional, at " + node_def.name());
-
-  // Only expect to handle INT32 as attributes for now
-  if (padding_type != tensorflow::DataType::DT_INT32)
+  // TODO(jie): INT32 should be converted?
+  tensorflow::DataType tf_dtype = attrs.get<tensorflow::DataType>("T");
+  if (tf_dtype != tensorflow::DataType::DT_FLOAT &&
+      tf_dtype != tensorflow::DataType::DT_HALF) {
     return tensorflow::errors::Unimplemented(
-        "reshape new shape supports only DT_INT32, at " + node_def.name());
-
-  auto shape_data = static_cast<int*>(const_cast<void*>(shape.GetValues()));
-
-  if (shape_data[0] != -1)
-    return tensorflow::errors::InvalidArgument(
-        "reshape new shape first dimension is not -1, at " + node_def.name());
-
-  auto shape_num_dims = shape.shape_.d[0];
-  VLOG(2) << "shape dimensions: " << shape_num_dims;
-  int volume_w = 1;
-  for (int i = 1; i < shape.shape_.d[0]; i++) volume_w *= shape_data[i];
-
-  int volume_t = 1;
-  for (int i = 0; i < dims.nbDims; i++) volume_t *= dims.d[i];
-
-  VLOG(2) << "volume: " << volume_t << " volume weights: " << volume_w;
-  if (volume_w != volume_t)
-    return tensorflow::errors::InvalidArgument(
-        "volume does not agree between tensor and new shape, at " +
-        node_def.name());
-
-  nvinfer1::IShuffleLayer* layer =
-      ctx.network()->addShuffle(*const_cast<nvinfer1::ITensor*>(tensor));
-
-  nvinfer1::Dims reshape_dims;
-  VLOG(2) << "new dimension: " << shape_num_dims - 1;
-  reshape_dims.nbDims = shape_num_dims - 1;
-  for (int32_t i = 0; i < reshape_dims.nbDims; ++i) {
-    reshape_dims.d[i] = shape_data[i + 1];
+        "data type is not supported, for node " + node_def.name() + " got " +
+        tensorflow::DataTypeString(tf_dtype));
   }
-  layer->setReshapeDimensions(reshape_dims);
-  VLOG(2) << "new dimension: " << shape_num_dims - 1;
 
+  bool transpose_a = attrs.get<bool>("adj_x");
+  bool transpose_b = attrs.get<bool>("adj_y");
+
+  auto dims = inputs.at(0).shape();
+  if (dims.nbDims == 1) {  // NC * CK is only supported through fully connected
+    if (transpose_a == false && inputs.at(0).is_tensor() &&
+        inputs.at(1).is_weights()) {
+      return ConvertMatMulHelper(ctx, inputs.at(0), inputs.at(1).weights(),
+                                 transpose_b, node_def.name(), outputs);
+    } else {
+      return tensorflow::errors::InvalidArgument(
+          "Invalid configuration for MatMul, at: " + node_def.name());
+    }
+  }
+
+  const nvinfer1::ITensor* tensor_l;
+  const nvinfer1::ITensor* tensor_r;
+  auto dims_l = inputs.at(0).shape();
+  auto dims_r = inputs.at(1).shape();
+  if (inputs.at(0).is_weights()) {
+    if (inputs.at(0).shape().d[0] != 1) {
+      return tensorflow::errors::InvalidArgument(
+          "Input 0 as weight assumes broadcast across batch for MatMul, at: " +
+          node_def.name());
+    } else {
+      for (int i = 0; i < dims_l.nbDims - 1; i++) {
+        dims_l.d[i] = dims_l.d[i + 1];
+      }
+      dims_l.nbDims--;
+    }
+  }
+  if (inputs.at(1).is_weights()) {
+    if (inputs.at(1).shape().d[0] != 1) {
+      return tensorflow::errors::InvalidArgument(
+          "Input 1 as weight assumes broadcast across batch for MatMul, at: " +
+          node_def.name());
+    } else {
+      for (int i = 0; i < dims_r.nbDims - 1; i++) {
+        dims_r.d[i] = dims_r.d[i + 1];
+      }
+      dims_r.nbDims--;
+    }
+  }
+
+  TFTRT_RETURN_ERROR_IF_FALSE(
+      PrepareTensorForShape(ctx, inputs.at(0), dims_l, &tensor_l),
+      node_def.name());
+  TFTRT_RETURN_ERROR_IF_FALSE(
+      PrepareTensorForShape(ctx, inputs.at(1), dims_r, &tensor_r),
+      node_def.name());
+
+  nvinfer1::IMatrixMultiplyLayer* layer = ctx.network()->addMatrixMultiply(
+      *const_cast<nvinfer1::ITensor*>(tensor_l), transpose_a,
+      *const_cast<nvinfer1::ITensor*>(tensor_r), transpose_b);
+  TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
   nvinfer1::ITensor* output_tensor = layer->getOutput(0);
-  auto dims_output = output_tensor->getDimensions();
-  VLOG(2) << "output tensor dimension:" << dims_output.nbDims;
   outputs->push_back(TRT_TensorOrWeights(output_tensor));
   return tensorflow::Status::OK();
 }
+#endif
+
+#if NV_TENSORRT_MAJOR > 3
+tensorflow::Status ConvertSoftmax(
+    Converter& ctx, const tensorflow::NodeDef& node_def,
+    const std::vector<TRT_TensorOrWeights>& inputs,
+    std::vector<TRT_TensorOrWeights>* outputs) {
+  const nvinfer1::ITensor* tensor = inputs.at(0).tensor();
+
+  int nbDims = tensor->getDimensions().nbDims;
+  if (nbDims == 0) {
+    return tensorflow::errors::InvalidArgument(
+        "TensorRT Softmax cannot apply on batch dimension, at" +
+        node_def.name());
+  }
+  nvinfer1::ISoftMaxLayer* layer =
+      ctx.network()->addSoftMax(*const_cast<nvinfer1::ITensor*>(tensor));
+  TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
+  // Tensorflow SoftMax assumes applying softmax on the last dimension.
+  layer->setAxes(1 << (nbDims - 1));
+
+  nvinfer1::ITensor* output_tensor = layer->getOutput(0);
+  outputs->push_back(TRT_TensorOrWeights(output_tensor));
+  return tensorflow::Status::OK();
+}
+#endif
+
+#if NV_TENSORRT_MAJOR > 3
+tensorflow::Status ConvertTopK(Converter& ctx,
+                               const tensorflow::NodeDef& node_def,
+                               const std::vector<TRT_TensorOrWeights>& inputs,
+                               std::vector<TRT_TensorOrWeights>* outputs) {
+  const nvinfer1::ITensor* tensor = inputs.at(0).tensor();
+
+  int nbDims = tensor->getDimensions().nbDims;
+  if (nbDims == 0) {
+    return tensorflow::errors::InvalidArgument(
+        "TensorRT TopK cannot apply on batch dimension, at" + node_def.name());
+  }
+
+  TRT_ShapedWeights k_w = inputs.at(1).weights();
+  int k = *(static_cast<int*>(const_cast<void*>(k_w.GetValues())));
+
+  nvinfer1::TopKOperation op;
+  uint32_t reducedAxes = 0;
+  if (node_def.op() == "TopKV2") {
+    op = nvinfer1::TopKOperation::kMAX;
+    reducedAxes |= 1 << (nbDims - 1);
+  } else {
+    return tensorflow::errors::Unimplemented(
+        "Operation: " + node_def.op() +
+        " not implemented, at: " + node_def.name());
+  }
+
+  nvinfer1::ITopKLayer* layer = ctx.network()->addTopK(
+      *const_cast<nvinfer1::ITensor*>(tensor), op, k, reducedAxes);
+  TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
+
+  nvinfer1::ITensor* output_value_tensor = layer->getOutput(0);
+  nvinfer1::ITensor* output_indices_tensor = layer->getOutput(1);
+  outputs->push_back(TRT_TensorOrWeights(output_value_tensor));
+  outputs->push_back(TRT_TensorOrWeights(output_indices_tensor));
+  return tensorflow::Status::OK();
+}
+#endif
 
 void Converter::register_op_converters() {
   // vgg_16 slim implementation
-  op_registry_["Placeholder"] = ConvertPlaceholder;
   op_registry_["Conv2D"] = ConvertConv2D;
   op_registry_["DepthwiseConv2dNative"] = ConvertConv2DDepthwise;
   op_registry_["Relu"] = ConvertActivation;
   op_registry_["MaxPool"] = ConvertPool;
   op_registry_["AvgPool"] = ConvertPool;
-  // This could be really handled as ConvertBinary
   op_registry_["BiasAdd"] = ConvertScale;
   op_registry_["Const"] = ConvertConst;
   // TODO(ben,jie): this is a temp hack.
@@ -2113,18 +2556,38 @@
   op_registry_["Add"] = ConvertBinary;
   op_registry_["Mul"] = ConvertBinary;
   op_registry_["Sub"] = ConvertBinary;
-  op_registry_["Rsqrt"] = ConvertUnary;
-  op_registry_["Mean"] = ConvertReduce;
   op_registry_["Pad"] = ConvertPad;
-  // TODO(ben,jie): Add more ops
 
   op_registry_["ConcatV2"] = ConvertConcat;
-  op_registry_["MatMul"] = ConvertMatMul;
-  op_registry_["Reshape"] = ConvertReshape;
   op_registry_["FusedBatchNorm"] = ConvertFusedBatchNorm;
   op_registry_["FusedBatchNormV2"] = ConvertFusedBatchNorm;
 
-  plugin_converter_ = ConvertPlugin;
+  op_registry_["Div"] = ConvertBinary;
+  op_registry_["RealDiv"] = ConvertBinary;
+
+  op_registry_["Rsqrt"] = ConvertUnary;
+  op_registry_["Reciprocal"] = ConvertUnary;
+  op_registry_["Exp"] = ConvertUnary;
+  op_registry_["Log"] = ConvertUnary;
+  op_registry_["Sqrt"] = ConvertUnary;
+  op_registry_["Abs"] = ConvertUnary;
+  op_registry_["Neg"] = ConvertUnary;
+#if NV_TENSORRT_MAJOR == 3
+  op_registry_["Mean"] = ConvertReducePool;
+#endif
+#if NV_TENSORRT_MAJOR > 3
+  op_registry_["Sum"] = ConvertReduce;
+  op_registry_["Prod"] = ConvertReduce;
+  op_registry_["Max"] = ConvertReduce;
+  op_registry_["Min"] = ConvertReduce;
+  op_registry_["Mean"] = ConvertReduce;
+  op_registry_["Maximum"] = ConvertBinary;
+  op_registry_["Minimum"] = ConvertBinary;
+  op_registry_["Softmax"] = ConvertSoftmax;
+  op_registry_["MatMul"] = ConvertMatMul;
+  op_registry_["BatchMatMul"] = ConvertBatchMatMul;
+  op_registry_["TopKV2"] = ConvertTopK;
+#endif
 }
 
 }  // namespace
diff --git a/tensorflow/contrib/tensorrt/convert/trt_optimization_pass.cc b/tensorflow/contrib/tensorrt/convert/trt_optimization_pass.cc
index ec9dbfa..044c736 100644
--- a/tensorflow/contrib/tensorrt/convert/trt_optimization_pass.cc
+++ b/tensorflow/contrib/tensorrt/convert/trt_optimization_pass.cc
@@ -17,6 +17,7 @@
 #include "tensorflow/core/grappler/clusters/cluster.h"
 #include "tensorflow/core/grappler/grappler_item.h"
 #include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
+#include "tensorflow/core/lib/strings/numbers.h"
 #include "tensorflow/core/lib/strings/str_util.h"
 #include "tensorflow/core/lib/strings/strcat.h"
 #include "tensorflow/core/platform/logging.h"
@@ -232,8 +233,25 @@
   tensorflow::grappler::GraphProperties static_graph_properties(item);
   TF_RETURN_IF_ERROR(static_graph_properties.InferStatically(true));
   tensorflow::tensorrt::convert::ConversionParams cp;
+
+  std::vector<string> nodes_to_preserve;
+  for (const auto& n : item.NodesToPreserve()) {
+    auto tokens = str_util::Split(n, ":");
+    string s = tokens.at(0);
+    for (int i = 1; i < tokens.size() - 1; ++i) {
+      StrAppend(&s, ":", tokens.at(i));
+    }
+    int dumm_port = -1;
+    // If the last token is not an integer, it must be part of the name.
+    // Otherwise it is port number.
+    if (tokens.size() > 1 &&
+        !strings::safe_strto32(tokens.back(), &dumm_port)) {
+      StrAppend(&s, ":", tokens.back());
+    }
+    nodes_to_preserve.push_back(s);
+  }
   cp.input_graph_def = &item.graph;
-  cp.output_names = &item.fetch;
+  cp.output_names = &nodes_to_preserve;
   cp.max_batch_size = maximum_batch_size_;
   cp.max_workspace_size_bytes = maximum_workspace_size_;
   cp.output_graph_def = optimized_graph;
diff --git a/tensorflow/contrib/tensorrt/convert/utils.cc b/tensorflow/contrib/tensorrt/convert/utils.cc
new file mode 100644
index 0000000..24591cf
--- /dev/null
+++ b/tensorflow/contrib/tensorrt/convert/utils.cc
@@ -0,0 +1,35 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/contrib/tensorrt/convert/utils.h"
+
+namespace tensorflow {
+namespace tensorrt {
+
+bool IsGoogleTensorRTEnabled() {
+  // TODO(laigd): consider also checking if tensorrt shared libraries are
+  // accessible. We can then direct users to this function to make sure they can
+  // safely write code that uses tensorrt conditionally. E.g. if it does not
+  // check for for tensorrt, and user mistakenly uses tensorrt, they will just
+  // crash and burn.
+#ifdef GOOGLE_TENSORRT
+  return true;
+#else
+  return false;
+#endif
+}
+
+}  // namespace tensorrt
+}  // namespace tensorflow
diff --git a/tensorflow/contrib/tensorrt/convert/utils.h b/tensorflow/contrib/tensorrt/convert/utils.h
index f601c06..8b5f4d6 100644
--- a/tensorflow/contrib/tensorrt/convert/utils.h
+++ b/tensorflow/contrib/tensorrt/convert/utils.h
@@ -31,6 +31,8 @@
 template <typename T>
 using TrtUniquePtrType = std::unique_ptr<T, TrtDestroyer<T>>;
 
+bool IsGoogleTensorRTEnabled();
+
 }  // namespace tensorrt
 }  // namespace tensorflow
 
diff --git a/tensorflow/contrib/tensorrt/kernels/trt_engine_op.cc b/tensorflow/contrib/tensorrt/kernels/trt_engine_op.cc
index 8a17eb0..04d072f 100644
--- a/tensorflow/contrib/tensorrt/kernels/trt_engine_op.cc
+++ b/tensorflow/contrib/tensorrt/kernels/trt_engine_op.cc
@@ -316,6 +316,11 @@
         ctx->SetStatus(tensorflow::errors::InvalidArgument(
             "INT8 inputs are not supported!"));
         return;
+#if NV_TENSORRT_MAJOR > 3
+      case nvinfer1::DataType::kINT32:
+        buffers[binding_index] = (void*)(input_tensor.flat<int32>().data());
+        break;
+#endif
       default:
         LOG(ERROR) << "Unknown TRT data type: " << int(dtype);
         ctx->SetStatus(tensorflow::errors::InvalidArgument(
@@ -368,6 +373,12 @@
         ctx->SetStatus(tensorflow::errors::InvalidArgument(
             "INT8 outputs are not supported!"));
         return;
+#if NV_TENSORRT_MAJOR > 3
+      case nvinfer1::DataType::kINT32:
+        buffers[binding_index] =
+            reinterpret_cast<void*>(output_tensor->flat<int32>().data());
+        break;
+#endif
       default:
         LOG(ERROR) << "Unknown TRT data type: " << static_cast<int>(dtype);
         ctx->SetStatus(tensorflow::errors::InvalidArgument(
diff --git a/tensorflow/contrib/tensorrt/ops/trt_engine_op.cc b/tensorflow/contrib/tensorrt/ops/trt_engine_op.cc
index 383635f..e0c7b62 100644
--- a/tensorflow/contrib/tensorrt/ops/trt_engine_op.cc
+++ b/tensorflow/contrib/tensorrt/ops/trt_engine_op.cc
@@ -42,8 +42,14 @@
     .Attr("precision_mode: {'FP32', 'FP16', 'INT8', 'INT8CALIB'}")
     .Attr("calibration_data: string = ''")
     .Input("in_tensor: InT")
-    .Output("out_tensor: OutT")
-    .SetShapeFn(shape_inference::TRTEngineOpShapeInference);
+    .Output("out_tensor: OutT");
+// TODO(jie): TF requires concrete output shape for concrete input shapes.
+// This is tricky for batch dimension, since we cannot ensure which input
+// would carry the correct batch dimension (for the current stage of the
+// implementation, we do require all input tensor to carry the same batch
+// size, but this could change in the future). Hence we disable shape
+// inference function as a workaround.
+// .SetShapeFn(shape_inference::TRTEngineOpShapeInference);
 
 }  // namespace tensorflow
 
diff --git a/tensorflow/contrib/tensorrt/python/__init__.py b/tensorflow/contrib/tensorrt/python/__init__.py
index 0b2321b..fe4fa16 100644
--- a/tensorflow/contrib/tensorrt/python/__init__.py
+++ b/tensorflow/contrib/tensorrt/python/__init__.py
@@ -22,4 +22,5 @@
 from tensorflow.contrib.tensorrt.python.ops import trt_engine_op
 from tensorflow.contrib.tensorrt.python.trt_convert import calib_graph_to_infer_graph
 from tensorflow.contrib.tensorrt.python.trt_convert import create_inference_graph
+from tensorflow.contrib.tensorrt.python.trt_convert import is_tensorrt_enabled
 # pylint: enable=unused-import,line-too-long
diff --git a/tensorflow/contrib/tensorrt/python/trt_convert.py b/tensorflow/contrib/tensorrt/python/trt_convert.py
index 79f512d..2b67931 100644
--- a/tensorflow/contrib/tensorrt/python/trt_convert.py
+++ b/tensorflow/contrib/tensorrt/python/trt_convert.py
@@ -23,6 +23,7 @@
 from tensorflow.contrib.tensorrt.wrap_conversion import calib_convert
 from tensorflow.contrib.tensorrt.wrap_conversion import get_linked_tensorrt_version
 from tensorflow.contrib.tensorrt.wrap_conversion import get_loaded_tensorrt_version
+from tensorflow.contrib.tensorrt.wrap_conversion import is_tensorrt_enabled
 from tensorflow.contrib.tensorrt.wrap_conversion import trt_convert
 from tensorflow.core.framework import graph_pb2
 from tensorflow.core.protobuf import rewriter_config_pb2
diff --git a/tensorflow/contrib/tensorrt/resources/trt_allocator.h b/tensorflow/contrib/tensorrt/resources/trt_allocator.h
index c5d2cec..97ac82c 100644
--- a/tensorflow/contrib/tensorrt/resources/trt_allocator.h
+++ b/tensorflow/contrib/tensorrt/resources/trt_allocator.h
@@ -51,6 +51,9 @@
   // Allocator implementation wrapping TF device allocators.
  public:
   TRTDeviceAllocator(tensorflow::Allocator* allocator);
+
+  // TODO(aaroey): base class doesn't have a virtual destructor, work with
+  // Nvidia to fix it.
   virtual ~TRTDeviceAllocator() {
     VLOG(1) << "Destroying allocator attached to " << allocator_->Name();
   }
diff --git a/tensorflow/contrib/tensorrt/shape_fn/trt_shfn.cc b/tensorflow/contrib/tensorrt/shape_fn/trt_shfn.cc
index 227ac12..f30dba5 100644
--- a/tensorflow/contrib/tensorrt/shape_fn/trt_shfn.cc
+++ b/tensorflow/contrib/tensorrt/shape_fn/trt_shfn.cc
@@ -28,36 +28,50 @@
 namespace tensorflow {
 namespace shape_inference {
 
-tensorflow::Status TRTEngineOpShapeInference(InferenceContext* context) {
-  std::vector<tensorflow::TensorShape> shapes;
-  for (int i = 0; i < context->num_outputs(); ++i) {
-    context->set_output(i, context->UnknownShape());
+tensorflow::Status TRTEngineOpShapeInference(InferenceContext* c) {
+  for (int i = 0; i < c->num_outputs(); ++i) {
+    c->set_output(i, c->UnknownShape());
   }
-  auto status = context->GetAttr("input_shapes", &shapes);
-  // it is ok to not to have shapes
-  if (!status.ok()) return Status::OK();
-  if ((int)shapes.size() != context->num_inputs()) return Status::OK();
-  bool different_input = false;
-  for (int i = 0; i < context->num_inputs(); ++i) {
-    if (shapes.at(i) != context->input_tensor(i)->shape())
-      different_input = true;
+
+  // Check the sanity of the input shapes.
+  std::vector<tensorflow::TensorShape> input_shapes;
+  TF_RETURN_IF_ERROR(c->GetAttr("input_shapes", &input_shapes));
+  if (input_shapes.size() != c->num_inputs()) {
+    return tensorflow::errors::InvalidArgument(
+        "The actual number of inputs doesn't match the number of input "
+        "shapes set in the attr: ",
+        c->num_inputs(), " vs ", input_shapes.size());
   }
-  if (different_input) return Status::OK();
-  shapes.resize(0);
-  status = context->GetAttr("output_shapes", &shapes);
-  if (!status.ok()) return Status::OK();
-  if ((int)shapes.size() != context->num_outputs()) return Status::OK();
-  std::vector<ShapeHandle> shape_handles(shapes.size());
-  for (size_t i = 0; i < shapes.size(); ++i) {
-    status =
-        context->MakeShapeFromTensorShape(shapes.at(i), &shape_handles.at(i));
-    if (!status.ok()) return Status::OK();
+  bool input_match = true;
+  for (int i = 0; i < c->num_inputs(); ++i) {
+    ShapeHandle handle;
+    TF_RETURN_IF_ERROR(
+        c->MakeShapeFromTensorShape(input_shapes.at(i), &handle));
+    ShapeHandle merged;
+    if (!c->Merge(c->input(i), handle, &merged).ok()) {
+      // Input shape doesn't match what was set in attr, fine.
+      input_match = false;
+    }
   }
-  for (int i = 0; i < context->num_outputs(); ++i) {
-    context->set_output(i, shape_handles.at(i));
+
+  // Check the sanity of the output shapes.
+  std::vector<tensorflow::TensorShape> output_shapes;
+  TF_RETURN_IF_ERROR(c->GetAttr("output_shapes", &output_shapes));
+  if (output_shapes.size() != c->num_outputs()) {
+    return tensorflow::errors::InvalidArgument(
+        "The actual number of outputs doesn't match the number of output "
+        "shapes set in the attr: ",
+        c->num_outputs(), " vs ", output_shapes.size());
+  }
+  for (size_t i = 0; i < output_shapes.size(); ++i) {
+    ShapeHandle handle;
+    TF_RETURN_IF_ERROR(
+        c->MakeShapeFromTensorShape(output_shapes.at(i), &handle));
+    if (input_match) c->set_output(i, handle);
   }
   return Status::OK();
 }
+
 }  // namespace shape_inference
 }  // namespace tensorflow
 
diff --git a/tensorflow/contrib/tensorrt/test/tf_trt_integration_test.py b/tensorflow/contrib/tensorrt/test/tf_trt_integration_test.py
index d9c41f9..7c3ef49 100644
--- a/tensorflow/contrib/tensorrt/test/tf_trt_integration_test.py
+++ b/tensorflow/contrib/tensorrt/test/tf_trt_integration_test.py
@@ -290,7 +290,12 @@
   def testIdempotence(self):
     # Test that applying tensorrt optimizer or offline conversion tools multiple
     # times to the same graph will result in same graph.
-    # TODO(aaroey): implement this.
+    #
+    # TODO(aaroey): currently the conversion is not deterministic, this is
+    # mainly because during tensorflow::ConvertGraphDefToGraph(), the graph uses
+    # EdgeSet which use a map keyed by Edge*, so the order of input/output edges
+    # of a node is nondeterministic, thus the order for segmenter to contract
+    # edges is nondeterministic. Need to evaluate whether we should fix this.
     pass
 
 
@@ -342,6 +347,7 @@
 
 
 if __name__ == "__main__":
-  for index, t in enumerate(GetTests()):
-    setattr(TfTrtIntegrationTest, "testTfTRT_" + str(index), t)
+  if trt.is_tensorrt_enabled():
+    for index, t in enumerate(GetTests()):
+      setattr(TfTrtIntegrationTest, "testTfTRT_" + str(index), t)
   test.main()
diff --git a/tensorflow/contrib/tensorrt/trt_conversion.i b/tensorflow/contrib/tensorrt/trt_conversion.i
index d6628cd..422740f 100644
--- a/tensorflow/contrib/tensorrt/trt_conversion.i
+++ b/tensorflow/contrib/tensorrt/trt_conversion.i
@@ -100,6 +100,7 @@
 #include "tensorflow/core/lib/core/status.h"
 #include "tensorflow/core/util/stat_summarizer.h"
 #include "tensorflow/contrib/tensorrt/convert/convert_graph.h"
+#include "tensorflow/contrib/tensorrt/convert/utils.h"
 %}
 
 %ignoreall
@@ -108,6 +109,7 @@
 %unignore calib_convert;
 %unignore get_linked_tensorrt_version;
 %unignore get_loaded_tensorrt_version;
+%unignore is_tensorrt_enabled;
 
 %{
 
@@ -140,7 +142,7 @@
     return std::pair<string, string>{out_status, ""};
   }
 
-  if(precision_mode < 0 || precision_mode > 2){
+  if (precision_mode < 0 || precision_mode > 2) {
     out_status = "InvalidArgument;Invalid precision_mode";
     return std::pair<string, string>{out_status, ""};
   }
@@ -232,7 +234,8 @@
 #endif  // GOOGLE_CUDA && GOOGLE_TENSORRT
   return s;
 }
-version_struct get_loaded_tensorrt_version(){
+
+version_struct get_loaded_tensorrt_version() {
   // Return the version from the loaded library.
   version_struct s;
 #if GOOGLE_CUDA && GOOGLE_TENSORRT
@@ -244,6 +247,10 @@
   return s;
 }
 
+bool is_tensorrt_enabled() {
+  return tensorflow::tensorrt::IsGoogleTensorRTEnabled();
+}
+
 %}
 
 std::pair<string, string> calib_convert(string graph_def_string, bool is_dyn_op);
@@ -258,5 +265,6 @@
                                       std::vector<int> cached_engine_batches);
 version_struct get_linked_tensorrt_version();
 version_struct get_loaded_tensorrt_version();
+bool is_tensorrt_enabled();
 
 %unignoreall
diff --git a/tensorflow/contrib/tpu/BUILD b/tensorflow/contrib/tpu/BUILD
index 0d1c7fc..ef6c752 100644
--- a/tensorflow/contrib/tpu/BUILD
+++ b/tensorflow/contrib/tpu/BUILD
@@ -16,7 +16,6 @@
         "//cloud/vmm/testing/tests/tpu:__subpackages__",
         "//learning/brain:__subpackages__",
         "//tensorflow:__subpackages__",
-        "//third_party/cloud_tpu:__subpackages__",
     ],
 )
 
@@ -175,12 +174,16 @@
         "//tensorflow/contrib/tpu/proto:compilation_result_proto_py",
         "//tensorflow/core:protos_all_py",
         "//tensorflow/python:array_ops",
+        "//tensorflow/python:dtypes",
         "//tensorflow/python:framework_ops",
+        "//tensorflow/python:linalg_ops",
         "//tensorflow/python:math_ops",
         "//tensorflow/python:platform",
+        "//tensorflow/python:random_ops",
         "//tensorflow/python:session",
         "//tensorflow/python:tensor_spec",
         "//tensorflow/python:variable_scope",
+        "//tensorflow/python/data/ops:dataset_ops",
         "//tensorflow/python/estimator:model_fn",
         "//tensorflow/python/keras:backend",
         "//tensorflow/python/keras:engine",
diff --git a/tensorflow/contrib/tpu/__init__.py b/tensorflow/contrib/tpu/__init__.py
index d623386..d5484e9 100644
--- a/tensorflow/contrib/tpu/__init__.py
+++ b/tensorflow/contrib/tpu/__init__.py
@@ -42,10 +42,10 @@
 
 @@TPUEstimator
 @@TPUEstimatorSpec
+@@export_estimator_savedmodel
 @@RunConfig
 @@InputPipelineConfig
 @@TPUConfig
-
 @@bfloat16_scope
 """
 
diff --git a/tensorflow/contrib/tpu/python/tpu/keras_support.py b/tensorflow/contrib/tpu/python/tpu/keras_support.py
index 6e9c607..8292c92 100644
--- a/tensorflow/contrib/tpu/python/tpu/keras_support.py
+++ b/tensorflow/contrib/tpu/python/tpu/keras_support.py
@@ -45,6 +45,7 @@
 from __future__ import division
 from __future__ import print_function
 
+import abc
 import collections
 import contextlib
 import re
@@ -63,8 +64,11 @@
 from tensorflow.contrib.tpu.python.tpu import tpu_optimizer
 from tensorflow.core.protobuf import config_pb2
 from tensorflow.python.client import session as tf_session
+from tensorflow.python.data.ops import dataset_ops
 from tensorflow.python.estimator import model_fn as model_fn_lib
+from tensorflow.python.framework import dtypes
 from tensorflow.python.framework import ops
+from tensorflow.python.framework import tensor_shape
 from tensorflow.python.framework import tensor_spec
 from tensorflow.python.keras import backend as K
 from tensorflow.python.keras import models
@@ -72,7 +76,9 @@
 from tensorflow.python.keras.engine import base_layer
 from tensorflow.python.keras.layers import embeddings
 from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import gen_linalg_ops
 from tensorflow.python.ops import math_ops
+from tensorflow.python.ops import random_ops
 from tensorflow.python.ops import variable_scope
 from tensorflow.python.platform import tf_logging as logging
 
@@ -199,7 +205,6 @@
       caller_obj = caller_frame.f_locals.get('self')
       if (caller_obj is not None and
           isinstance(caller_obj, base_layer.Layer) and name is not None):
-        logging.info('Intercepted name_scope: %s', caller_obj)
         return variable_scope.variable_scope(
             name, default_name, values, reuse=variable_scope.AUTO_REUSE)
 
@@ -208,8 +213,51 @@
     self._default_placeholder = array_ops.placeholder
     self._default_name_scope = ops.name_scope
     self._default_make_variable = base_layer.make_variable
+    self._default_random_normal = random_ops.random_normal
+    self._default_qr = gen_linalg_ops.qr
 
     array_ops.placeholder = _placeholder
+
+    # Replace random_ops.random_normal with a dummy function because
+    # `random_normal` isn't yet implemented on the TPU. Because these
+    # initialized values are overwritten by the CPU values, this is okay.
+    def random_normal(shape,
+                      mean=0.0,
+                      stddev=1.0,
+                      dtype=dtypes.float32,
+                      seed=None,
+                      name=None):
+      del mean
+      del stddev
+      del seed
+      return array_ops.zeros(shape, dtype=dtype, name=name)
+
+    random_ops.random_normal = random_normal
+
+    # Replace gen_linalg_ops.qr because QR decomposition is not yet implemented.
+    # TODO(saeta): Remove qr override once we confirm the qr implementation is
+    # ok.
+    # pylint: disable=redefined-builtin
+    def qr(input, full_matrices=False, name=None):
+      """Dummy implementation of qr decomposition."""
+      del full_matrices  # TODO(saeta): Properly handle the full matrix case.
+      input_shape = input.shape
+      if len(input_shape) < 2:
+        raise ValueError('Invalid shape passed to qr: %s' % input_shape)
+      p = min(input_shape[-1], input_shape[-2])
+      if len(input_shape) == 2:
+        q = array_ops.zeros((p, p), name=name)
+        r = array_ops.zeros(input_shape, name=name)
+        return (r, q)
+      elif len(input_shape) == 3:
+        n = input_shape[0]
+        q = array_ops.zeros((n, p, p), name=name)
+        r = array_ops.zeros(input_shape, name=name)
+        return (r, q)
+      else:
+        raise ValueError('Invalid shape passed to qr: %s' % input_shape)
+    gen_linalg_ops.qr = qr
+
     ops.name_scope = _name_scope
     base_layer.make_variable = variable_scope.get_variable
     logging.info('Overriding default placeholder.')
@@ -219,6 +267,331 @@
     array_ops.placeholder = self._default_placeholder
     ops.name_scope = self._default_name_scope
     base_layer.make_variable = self._default_make_variable
+    random_ops.random_normal = self._default_random_normal
+    gen_linalg_ops.qr = self._default_qr
+
+
+class SizedInfeed(collections.namedtuple('SizedInfeed',
+                                         ['sharded_infeed_tensors',
+                                          'infeed_ops'])):
+  """Represents an instantiation of the infeed ops for a concrete input shape.
+
+  sharded_infeed_tensors: A data structure of Tensors used to represent the
+    placeholder tensors that must be fed when using feed_dicts.
+
+  infeed_ops: the set of ops that will be run to drive infeed for a single step.
+  """
+  pass
+
+
+class TPUInfeedInstance(object):
+  """TPUInfeedInstance represents the logic to manage feeding in a single step.
+
+  See the comments on the `TPUInfeedManager` for a description for how infeed
+  is managed.
+  """
+
+  @abc.abstractmethod
+  def make_input_specs(self, input_tensors):
+    """Constructs the infeed_specs for the given Infeed instance.
+
+    Args:
+      input_tensors: The inputs to the model.
+
+    Returns:
+      A list of
+    """
+    pass
+
+  def make_feed_dict(self, tpu_model_op):
+    """Constructs a feed_dict for this instance, given the tpu_model_op.
+
+    Args:
+      tpu_model_op: A `TPUModelOp` representing the TPU Model for this
+        instance's input spec.
+
+    Returns:
+      A dictionary to use as the feed_dict of a `session.run` call.
+    """
+    pass
+
+
+class TPUInfeedManager(object):
+  """TPUInfeedManager manages the data infeeding of data to a TPU computation.
+
+  Because there are multiple data sources (e.g. in-memory NumPy arrays,
+  `tf.data.Dataset`s), we abstract the different logic behind a single
+  interface: the `TPUInfeedManager`.
+
+  (1) A `TPUFunction` is called with a set of inputs. Based on the inputs,
+  `TPUFunction` retrieves the corresponding `TPUInfeedManager` (or constructs a
+  new one if required).
+
+  (2) The `TPUFunction` calls `make_infeed_instance` on the `TPUInfeedManager`
+  which returns a `TPUInfeedInstance`.
+
+  (3) The `TPUFunction` checks in the shape cache for a pre-compiled instance of
+  the model based on the returned `input_specs` from `TPUInfeedInstance`.
+
+  (4) [Optional.] If the model has not already been instantiated for the given
+  input spec, the `TPUFunction` compiles the model for the input spec (using the
+  `TPUInfeedManager`).
+
+  (5) The `TPUInfeedInstance` constructs the session.run's feed_dict given the
+  compiled model instance corresponding to its shape.
+  """
+
+  @abc.abstractmethod
+  def make_infeed_instance(self, inputs):
+    """Given a single step's input, construct a `TPUInfeedInstance`.
+
+    Args:
+      inputs: The inputs to a given step.
+
+    Returns:
+      A subclass of `TPUInfeedInstance`.
+    """
+    pass
+
+  @abc.abstractmethod
+  def build_infeed_from_input_specs(self, input_specs, execution_mode):
+    """For a given input specification (size, type), construct the infeed ops.
+
+    This is called only once for a given input specification and builds the
+    graph ops. It does not have a pointer to the actual infeed data.
+
+    Args:
+      input_specs: TODO(saeta): Document me!
+      execution_mode: TODO(saeta): Document me!
+
+    Returns:
+      A `SizedInfeed` instance.
+    """
+    pass
+
+
+class TPUNumpyInfeedManager(TPUInfeedManager):
+  """TPU Infeed manager for Numpy inputs."""
+
+  class NumpyInfeedInstance(TPUInfeedInstance):
+    """Infeed instance for Numpy inputs."""
+
+    def __init__(self, sharded_inputs):
+      self._sharded_inputs = sharded_inputs
+
+    def make_input_specs(self, input_tensors):
+      # Compute an input specification (used to generate infeed enqueue and
+      # dequeue operations).  We use the shape from our input array and the
+      # dtype from our model.  A user may pass in a float64 for a float32
+      # input: for model compatibility we still must generate a float32 infeed.
+      input_specs = []
+      # We use the shape and dtype from the first shard to compute the input
+      # metadata (`input_specs`); all replicas have the same type and shape.
+      for tensor, ary in zip(input_tensors, self._sharded_inputs[0]):
+        input_specs.append(
+            tensor_spec.TensorSpec(ary.shape, tensor.dtype,
+                                   _valid_name(tensor.name)))
+
+      return input_specs
+
+    def make_feed_dict(self, tpu_model_op):
+      infeed_dict = {}
+      for infeed_tensors, inputs in zip(tpu_model_op.infeed_tensors,
+                                        self._sharded_inputs):
+        for tensor, value in zip(infeed_tensors, inputs):
+          infeed_dict[tensor] = value
+      return infeed_dict
+
+  def __init__(self, distribution_strategy):
+    self._strategy = distribution_strategy
+
+  def _split_tensors(self, inputs):
+    """Split input data across shards.
+
+    Each input is sliced along the batch axis.
+
+    Args:
+      inputs: List of Numpy arrays to run on the TPU.
+
+    Returns:
+      List of lists containing the input to feed to each TPU shard.
+    """
+    if self._strategy.num_towers == 1:
+      return [inputs]
+
+    batch_size = inputs[0].shape[0]
+    assert batch_size % self._strategy.num_towers == 0, (
+        'batch_size must be divisible by strategy.num_towers (%s vs %s)' %
+        (batch_size, self._strategy.num_towers))
+    shard_size = batch_size // self._strategy.num_towers
+    input_list = []
+    for index in range(self._strategy.num_towers):
+      shard_inputs = [
+          x[index * shard_size:(index + 1) * shard_size] for x in inputs
+      ]
+      input_list.append(shard_inputs)
+    return input_list
+
+  def make_infeed_instance(self, inputs):
+    sharded_inputs = self._split_tensors(inputs)
+    return self.NumpyInfeedInstance(sharded_inputs)
+
+  def build_infeed_from_input_specs(self, input_specs, execution_mode):
+    infeed_op = []
+    shard_infeed_tensors = []
+
+    for shard_id in range(self._strategy.num_towers):
+      with ops.device('/device:TPU:%d' % shard_id):
+        infeed_tensors = []
+        for spec in input_specs:
+          # Construct placeholders for each of the inputs.
+          infeed_tensors.append(
+              array_ops.placeholder(
+                  dtype=spec.dtype,
+                  shape=spec.shape,
+                  name='infeed-enqueue-%s-%d' % (spec.name, shard_id)))
+        shard_infeed_tensors.append(infeed_tensors)
+
+        infeed_op.append(
+            tpu_ops.infeed_enqueue_tuple(
+                infeed_tensors, [spec.shape for spec in input_specs],
+                name='infeed-enqueue-%s-%d' % (execution_mode, shard_id)))
+    return SizedInfeed(infeed_ops=infeed_op,
+                       sharded_infeed_tensors=shard_infeed_tensors)
+
+
+class TPUDatasetInfeedManager(TPUInfeedManager):
+  """Manages infeed for a `tf.data.Dataset` into a TPU computation.
+  """
+
+  class DatasetInfeedInstance(TPUInfeedInstance):
+    """An instance of the TPU infeed."""
+
+    def __init__(self, input_specs):
+      self._input_specs = input_specs
+
+    def make_input_specs(self, input_tensors):
+      # TODO(saeta): Do error checking here!
+      return self._input_specs
+
+    def make_feed_dict(self, tpu_model_op):
+      # TODO(saeta): Verify tpu_model_op is as expected!
+      return {}
+
+  def __init__(self, dataset, distribution_strategy, tpu_session):
+    """Constructs a TPUDatasetInfeedManager.
+
+    Must be called within a `KerasTPUModel.tpu_session` context!
+
+    Args:
+      dataset: A `tf.data.Dataset` to infeed.
+      distribution_strategy: The `TPUDistributionStrategy` used to configure the
+        Keras TPU model.
+      tpu_session: The `tf.Session` object used for running the TPU model.
+    """
+    self._verify_dataset_shape(dataset)
+    self._dataset = dataset
+    self._strategy = distribution_strategy
+    dummy_x_shape = dataset.output_shapes[0].as_list()
+    dummy_x_shape[0] *= distribution_strategy.num_towers
+    dummy_y_shape = dataset.output_shapes[1].as_list()
+    dummy_y_shape[0] *= distribution_strategy.num_towers
+    self._iterator = dataset.make_initializable_iterator()
+    tpu_session.run(self._iterator.initializer)
+
+    self._get_next_ops = []
+    ctrl_deps = []
+    for i in range(distribution_strategy.num_towers):
+      with ops.control_dependencies(ctrl_deps):  # Ensure deterministic
+        # TODO(saeta): Ensure correct placement!
+        get_next_op = self._iterator.get_next()
+        self._get_next_ops.append(get_next_op)
+        ctrl_deps.extend(get_next_op)
+
+    # Use dummy numpy inputs for the rest of Keras' shape checking. We
+    # intercept them when building the model.
+    self._dummy_x = np.zeros(dummy_x_shape,
+                             dtype=dataset.output_types[0].as_numpy_dtype)
+    self._dummy_y = np.zeros(dummy_y_shape,
+                             dtype=dataset.output_types[1].as_numpy_dtype)
+
+    input_specs = []
+    if isinstance(self._iterator.output_shapes, tuple):
+      assert isinstance(self._iterator.output_types, tuple)
+      assert len(self._iterator.output_shapes) == len(
+          self._iterator.output_types)
+      for i in range(len(self._iterator.output_shapes)):
+        spec = tensor_spec.TensorSpec(self._iterator.output_shapes[i],
+                                      self._iterator.output_types[i])
+        input_specs.append(spec)
+    elif isinstance(self._iterator.output_shapes, tensor_shape.TensorShape):
+      spec = tensor_spec.TensorSpec(self._iterator.output_shapes,
+                                    self._iterator.output_types)
+      input_specs.append(spec)
+
+    self._infeed_instance = self.DatasetInfeedInstance(input_specs)
+
+  def _verify_dataset_shape(self, dataset):
+    """Verifies a dataset is of an appropriate shape for TPUs."""
+    if not isinstance(dataset, dataset_ops.Dataset):
+      raise ValueError('The function passed as the `x` parameter did not '
+                       'return a `tf.data.Dataset`.')
+    if not isinstance(dataset.output_classes, tuple):
+      raise ValueError('The dataset must return a tuple of tf.Tensors, '
+                       'instead it returns: %s' % dataset.output_classes)
+    if len(dataset.output_classes) != 2:
+      raise ValueError(
+          'The dataset must return a 2-element tuple, got '
+          '%s output classes instead.' % (dataset.output_classes,))
+    for i, cls in enumerate(dataset.output_classes):
+      if cls != ops.Tensor:
+        raise ValueError('The dataset returned a non-Tensor type (%s) at '
+                         'index %d.' % (cls, i))
+    for i, shape in enumerate(dataset.output_shapes):
+      if not shape:
+        raise ValueError('The dataset returns a scalar tensor in '
+                         'tuple index %d. Did you forget to batch? '
+                         '(Output shapes: %s).' % (i,
+                                                   dataset.output_shapes))
+      for j, dim in enumerate(shape):
+        if dim.value is None:
+          if j == 0:
+            hint = (' Hint: did you use `ds.batch(BATCH_SIZE, '
+                    'drop_remainder=True)`?')
+          else:
+            hint = ''
+          raise ValueError(
+              'The Keras-TPU integration for `tf.data` '
+              'currently requires static shapes. The provided '
+              'dataset only has a partially defined shape. '
+              '(Dimension %d of output tensor %d is not statically known '
+              'for output shapes: %s.%s)' % (i, j, dataset.output_shapes, hint))
+
+  @property
+  def dummy_x(self):
+    return self._dummy_x
+
+  @property
+  def dummy_y(self):
+    return self._dummy_y
+
+  def make_infeed_instance(self, inputs):
+    # TODO(saeta): Verify inputs is as expected.
+    return self._infeed_instance
+
+  def build_infeed_from_input_specs(self, input_specs, execution_mode):
+    shard_infeed_tensors = self._get_next_ops
+    assert len(shard_infeed_tensors) == self._strategy.num_towers
+    infeed_ops = []
+    for shard_id in range(self._strategy.num_towers):
+      with ops.device('/device:TPU:%d' % shard_id):
+        infeed_ops.append(
+            tpu_ops.infeed_enqueue_tuple(
+                shard_infeed_tensors[shard_id],
+                [spec.shape for spec in input_specs],
+                name='infeed-enqueue-%s-%d' % (execution_mode, shard_id)))
+    return SizedInfeed(infeed_ops=infeed_ops,
+                       sharded_infeed_tensors=shard_infeed_tensors)
 
 
 class TPUFunction(object):
@@ -246,7 +619,7 @@
     if not isinstance(self.model.optimizer, keras_optimizers.TFOptimizer):
       self._optimizer_config = self.model.optimizer.get_config()
 
-  def _specialize_model(self, input_specs):
+  def _specialize_model(self, input_specs, infeed_manager):
     """Specialize `self.model` (a Keras model) for the given input shapes."""
     # Re-create our input and output layers inside our subgraph.  They will be
     # attached to the true computation when we clone our model in `tpu_fn`.
@@ -272,8 +645,8 @@
           name='infeed-%s' % self.execution_mode)
 
       assert len(infeed_tensors) == len(infeed_layers), (
-          'Infeed inputs did not match model: %s vs %s', (infeed_layers,
-                                                          infeed_tensors))
+          'Infeed inputs did not match model: %s vs %s' % (infeed_layers,
+                                                           infeed_tensors))
 
       tpu_targets = []
       tpu_input_map = {}
@@ -287,7 +660,9 @@
 
       # Clone our CPU model, running within the TPU device context.
       with TPURewriteContext(tpu_input_map):
-        self._cloned_model = models.clone_model(self.model)
+        # TODO(power): Replicate variables.
+        with ops.device('/device:TPU:0'):
+          self._cloned_model = models.clone_model(self.model)
 
       # Create a copy of the optimizer for this graph.
       if isinstance(self.model.optimizer, keras_optimizers.TFOptimizer):
@@ -360,26 +735,12 @@
 
     # Generate CPU side operations to enqueue features/labels and dequeue
     # outputs from the model call.
-    infeed_op = []
+    sized_infeed = infeed_manager.build_infeed_from_input_specs(
+        input_specs, self.execution_mode)
+    # Build output ops.
     outfeed_op = []
-    shard_infeed_tensors = []
-
     for shard_id in range(self._strategy.num_towers):
       with ops.device('/device:TPU:%d' % shard_id):
-        infeed_tensors = []
-        for spec in input_specs:
-          infeed_tensors.append(
-              array_ops.placeholder(
-                  dtype=spec.dtype,
-                  shape=spec.shape,
-                  name='infeed-enqueue-%s-%d' % (spec.name, shard_id)))
-        shard_infeed_tensors.append(infeed_tensors)
-
-        infeed_op.append(
-            tpu_ops.infeed_enqueue_tuple(
-                infeed_tensors, [spec.shape for spec in input_specs],
-                name='infeed-enqueue-%s-%d' % (self.execution_mode, shard_id)))
-
         outfeed_op.extend(
             tpu_ops.outfeed_dequeue_tuple(
                 dtypes=[spec.dtype for spec in self._outfeed_spec],
@@ -389,8 +750,8 @@
     return TPUModelOp(
         compile_op,
         execute_op,
-        infeed_tensors=shard_infeed_tensors,
-        infeed_op=infeed_op,
+        infeed_tensors=sized_infeed.sharded_infeed_tensors,
+        infeed_op=sized_infeed.infeed_ops,
         outfeed_op=outfeed_op)
 
   def _test_model_compiles(self, tpu_model_ops):
@@ -409,36 +770,17 @@
     logging.info('Finished compiling. Time elapsed: %s secs',
                  end_time - start_time)
 
-  def _split_tensors(self, inputs):
-    """Split input data across shards.
-
-    Each input is sliced along the batch axis.
-
-    Args:
-      inputs: List of Numpy arrays to run on the TPU.
-
-    Returns:
-      List of lists containing the input to feed to each TPU shard.
-    """
-    if self._strategy.num_towers == 1:
-      return [inputs]
-
-    batch_size = inputs[0].shape[0]
-    assert batch_size % self._strategy.num_towers == 0, (
-        'batch_size must be divisible by strategy.num_towers (%s vs %s)' %
-        (batch_size, self._strategy.num_towers))
-    shard_size = batch_size // self._strategy.num_towers
-    input_list = []
-    for index in range(self._strategy.num_towers):
-      shard_inputs = [
-          x[index * shard_size:(index + 1) * shard_size] for x in inputs
-      ]
-      input_list.append(shard_inputs)
-    return input_list
-
   def __call__(self, inputs):
     assert isinstance(inputs, list)
 
+    infeed_manager = None
+    for x, mgr in self.model._numpy_to_infeed_manager_list:
+      if inputs[0] is x:
+        infeed_manager = mgr
+        break
+    if infeed_manager is None:
+      infeed_manager = TPUNumpyInfeedManager(self.model._strategy)
+
     # Strip sample weight from inputs
     if (self.execution_mode == model_fn_lib.ModeKeys.TRAIN or
         self.execution_mode == model_fn_lib.ModeKeys.EVAL):
@@ -447,21 +789,9 @@
     else:
       input_tensors = self.model._feed_inputs
 
-    shard_inputs = self._split_tensors(inputs)
+    infeed_instance = infeed_manager.make_infeed_instance(inputs)
     del inputs  # To avoid accident usage.
-
-    # Compute an input specification (used to generate infeed enqueue and
-    # dequeue operations).  We use the shape from our input array and the
-    # dtype from our model.  A user may pass in a float64 for a float32
-    # input: for model compatibility we still must generate a float32 infeed.
-    input_specs = []
-
-    # We use the shape and dtype from the first shard to compute the input
-    # metadata (`input_specs`); all replicas have the same type and shape.
-    for tensor, ary in zip(input_tensors, shard_inputs[0]):
-      input_specs.append(
-          tensor_spec.TensorSpec(ary.shape, tensor.dtype,
-                                 _valid_name(tensor.name)))
+    input_specs = infeed_instance.make_input_specs(input_tensors)
 
     # XLA requires every operation in the graph has a fixed shape.  To
     # handle varying batch sizes we recompile a new sub-graph for each
@@ -472,7 +802,8 @@
       with self.model.tpu_session():
         logging.info('New input shapes; (re-)compiling: mode=%s, %s',
                      self.execution_mode, input_specs)
-        new_tpu_model_ops = self._specialize_model(input_specs)
+        new_tpu_model_ops = self._specialize_model(input_specs,
+                                                   infeed_manager)
         self._compilation_cache[shape_key] = new_tpu_model_ops
         self._test_model_compiles(new_tpu_model_ops)
 
@@ -480,11 +811,7 @@
     self.model._initialize_weights(self._cloned_model)
     tpu_model_ops = self._compilation_cache[shape_key]
 
-    infeed_dict = {}
-    for infeed_tensors, inputs in zip(tpu_model_ops.infeed_tensors,
-                                      shard_inputs):
-      for tensor, value in zip(infeed_tensors, inputs):
-        infeed_dict[tensor] = value
+    infeed_dict = infeed_instance.make_feed_dict(tpu_model_ops)
 
     with self.model.tpu_session() as session:
       _, _, outfeed_outputs = session.run([
@@ -518,6 +845,11 @@
         name=cpu_model.name,
     )
 
+    # Create a mapping from numpy arrays to infeed managers.
+    # Note: uses a list of tuples instead of a map because numpy arrays are
+    # not hashable.
+    self._numpy_to_infeed_manager_list = []
+
     self.predict_function = None
     self.test_function = None
     self.train_function = None
@@ -529,14 +861,16 @@
     self._tpu_weights_initialized = False
     self._graph = ops.Graph()
 
-    cluster_resolver = tpu_cluster_resolver.TPUClusterResolver(
+    self._cluster_resolver = tpu_cluster_resolver.TPUClusterResolver(
         tpu_name_or_address)
-    cluster_spec = cluster_resolver.cluster_spec()
+    master = self._cluster_resolver.master()
+    cluster_spec = self._cluster_resolver.cluster_spec()
     self._session = tf_session.Session(
         graph=self._graph,
-        target=cluster_resolver.master(),
+        target=master,
         config=config_pb2.ConfigProto(isolate_session_state=True))
 
+    # TODO(saeta): Confirm the lines below work in ClusterSpec propagation env.
     if cluster_spec:
       self._session.cluster_def.CopyFrom(cluster_spec.as_cluster_def())
 
@@ -588,6 +922,92 @@
                               sample_weight_mode, weighted_metrics,
                               target_tensors, **kwargs)
 
+  def fit(self,
+          x=None,
+          y=None,
+          batch_size=None,
+          epochs=1,
+          verbose=1,
+          callbacks=None,
+          validation_split=0.,
+          validation_data=None,
+          shuffle=True,
+          class_weight=None,
+          sample_weight=None,
+          initial_epoch=0,
+          steps_per_epoch=None,
+          validation_steps=None,
+          **kwargs):
+    assert not self._numpy_to_infeed_manager_list  # Ensure empty.
+
+    infeed_managers = []  # Managers to clean up at the end of the fit call.
+    if isinstance(x, dataset_ops.Dataset):
+      # TODO(b/111413240): Support taking a tf.data.Dataset directly.
+      raise ValueError(
+          'Taking a Dataset directly is not yet supported. Please '
+          'wrap your dataset construction code in a function and '
+          'pass that to fit instead. For examples, see: '
+          'https://github.com/tensorflow/tpu/tree/master/models/experimental'
+          '/keras')
+    if callable(x):
+      with self.tpu_session() as sess:
+        dataset = x()
+        if steps_per_epoch is None:
+          raise ValueError('When using tf.data as input to a model, you '
+                           'should specify the steps_per_epoch argument.')
+        if y is not None:
+          raise ValueError('When using tf.data as input to a model, y must be '
+                           'None')
+        infeed_manager = TPUDatasetInfeedManager(dataset, self._strategy, sess)
+        # Use dummy numpy inputs for the rest of Keras' shape checking. We
+        # intercept them when building the model.
+        x = infeed_manager.dummy_x
+        y = infeed_manager.dummy_y
+        infeed_managers.append((x, infeed_manager))
+
+    if isinstance(validation_data, dataset_ops.Dataset):
+      # TODO(b/111413240): Support taking a tf.data.Dataset directly.
+      raise ValueError(
+          'Taking a Dataset directly is not yet supported. Please '
+          'wrap your dataset construction code in a function and '
+          'pass that to fit instead. For examples, see: '
+          'https://github.com/tensorflow/tpu/tree/master/models/experimental'
+          '/keras')
+    if callable(validation_data):
+      with self.tpu_session() as sess:
+        dataset = validation_data()
+        if validation_steps is None:
+          raise ValueError('When using tf.data as validation for a model, you '
+                           'should specify the validation_steps argument.')
+        infeed_manager = TPUDatasetInfeedManager(dataset, self._strategy, sess)
+        # Use dummy numpy inputs for the rest of Keras' shape checking. We
+        # intercept them when building the model.
+        val_x = infeed_manager.dummy_x
+        val_y = infeed_manager.dummy_y
+        infeed_managers.append((val_x, infeed_manager))
+        validation_data = (val_x, val_y)
+
+    self._numpy_to_infeed_manager_list = infeed_managers
+    try:
+      return super(KerasTPUModel, self).fit(
+          x,
+          y,
+          batch_size,
+          epochs,
+          verbose,
+          callbacks,
+          validation_split,
+          validation_data,
+          shuffle,
+          class_weight,
+          sample_weight,
+          initial_epoch,
+          steps_per_epoch,
+          validation_steps,
+          **kwargs)
+    finally:
+      self._numpy_to_infeed_manager_list = []
+
   def _make_train_function(self):
     if not self.train_function:
       self.train_function = TPUFunction(
@@ -669,10 +1089,10 @@
       K.set_session(default_session)
 
   def shutdown(self):
-    logging.info('Shutting down TPU session.')
-    with self.tpu_session() as session:
-      session.run(tpu.shutdown_system())
-
+    # TODO(b/111364423): Actually shut down the system.
+    logging.info('Skipping shutting down TPU system.')
+    # with self.tpu_session() as session:
+    #   session.run(tpu.shutdown_system())
     self._session.close()
 
 
diff --git a/tensorflow/contrib/tpu/python/tpu/tpu.py b/tensorflow/contrib/tpu/python/tpu/tpu.py
index 6a64893..7216626 100644
--- a/tensorflow/contrib/tpu/python/tpu/tpu.py
+++ b/tensorflow/contrib/tpu/python/tpu/tpu.py
@@ -151,6 +151,41 @@
     self._name = name
     self._unsupported_ops = []
     self._pivot = pivot
+    self._replicated_vars = {}
+
+  def get_replicated_var_handle(self, var):
+    """Returns a variable handle for replicated TPU variable 'var'.
+
+    This is an method used by an experimental replicated variable
+    implementation and is not intended as a public API.
+
+    Args:
+      var: The replicated TPU variable.
+
+    Returns:
+      The handle of the TPU replicated input node.
+    """
+    handle = self._replicated_vars.get(var)
+    if handle is not None:
+      return handle
+
+    # Builds a TPUReplicatedInput node for the variable, if one does not already
+    # exist. The TPUReplicatedInput node must belong to the enclosing
+    # control-flow scope of the TPUReplicateContext.
+    # TODO(phawkins): consider changing the contract of the TPU encapsulation
+    # so the TPUReplicatedInput nodes go inside the TPUReplicateContext scope
+    # instead.
+
+    # pylint: disable=protected-access
+    graph = ops.get_default_graph()
+    saved_context = graph._get_control_flow_context()
+    graph._set_control_flow_context(self.outer_context)
+    handle = tpu_ops.tpu_replicated_input(
+        [v.handle for v in var._vars], name=var.name + "/handle")
+    graph._set_control_flow_context(saved_context)
+    # pylint: enable=protected-access
+    self._replicated_vars[var] = handle
+    return handle
 
   def report_unsupported_operations(self):
     if self._unsupported_ops:
diff --git a/tensorflow/contrib/tpu/python/tpu/tpu_estimator.py b/tensorflow/contrib/tpu/python/tpu/tpu_estimator.py
index 74157a6..718ea63 100644
--- a/tensorflow/contrib/tpu/python/tpu/tpu_estimator.py
+++ b/tensorflow/contrib/tpu/python/tpu/tpu_estimator.py
@@ -3249,3 +3249,47 @@
   else:
     # Now params is Python dict.
     params[key] = value
+
+
+def export_estimator_savedmodel(estimator,
+                                export_dir_base,
+                                serving_input_receiver_fn,
+                                assets_extra=None,
+                                as_text=False,
+                                checkpoint_path=None,
+                                strip_default_attrs=False):
+  """Export `Estimator` trained model for TPU inference.
+
+  Args:
+    estimator: `Estimator` with which model has been trained.
+    export_dir_base: A string containing a directory in which to create
+      timestamped subdirectories containing exported SavedModels.
+    serving_input_receiver_fn: A function that takes no argument and
+      returns a `ServingInputReceiver` or `TensorServingInputReceiver`.
+    assets_extra: A dict specifying how to populate the assets.extra directory
+      within the exported SavedModel, or `None` if no extra assets are needed.
+    as_text: whether to write the SavedModel proto in text format.
+    checkpoint_path: The checkpoint path to export.  If `None` (the default),
+      the most recent checkpoint found within the model directory is chosen.
+    strip_default_attrs: Boolean. If `True`, default-valued attributes will be
+      removed from the NodeDefs.
+
+  Returns:
+    The string path to the exported directory.
+  """
+  # `TPUEstimator` requires `tpu_config.RunConfig`, so we cannot use
+  # `estimator.config`.
+  config = tpu_config.RunConfig(model_dir=estimator.model_dir)
+  est = TPUEstimator(
+      estimator._model_fn,  # pylint: disable=protected-access
+      config=config,
+      params=estimator.params,
+      use_tpu=True,
+      train_batch_size=2048,  # Does not matter.
+      eval_batch_size=2048,  # Does not matter.
+  )
+  return est.export_savedmodel(export_dir_base, serving_input_receiver_fn,
+                               assets_extra,
+                               as_text,
+                               checkpoint_path,
+                               strip_default_attrs)
diff --git a/tensorflow/core/BUILD b/tensorflow/core/BUILD
index dbe87a6..514713b 100644
--- a/tensorflow/core/BUILD
+++ b/tensorflow/core/BUILD
@@ -846,6 +846,7 @@
         "util/sparse/sparse_tensor.h",
         "util/stat_summarizer.h",
         "util/stat_summarizer_options.h",
+        "util/status_util.h",
         "util/stream_executor_util.h",
         "util/strided_slice_op.h",
         "util/tensor_format.h",
@@ -2464,6 +2465,7 @@
             "framework/resource_handle.cc",
             "util/memmapped_file_system.*",
             "util/memmapped_file_system_writer.*",
+            "util/stats_calculator.*",
             "util/version_info.cc",
         ],
     ) + select({
@@ -2490,6 +2492,7 @@
         ":protos_all_proto_text",
         ":error_codes_proto_text",
         ":protos_all_cc",
+        ":stats_calculator_portable",
         ":version_lib",
         "//tensorflow/core/platform/default/build_config:platformlib",
         "//tensorflow/core/kernels:bounds_check",
diff --git a/tensorflow/core/api_def/base_api/api_def_SlideDataset.pbtxt b/tensorflow/core/api_def/base_api/api_def_SlideDataset.pbtxt
index c80ee77..ddde3ee 100644
--- a/tensorflow/core/api_def/base_api/api_def_SlideDataset.pbtxt
+++ b/tensorflow/core/api_def/base_api/api_def_SlideDataset.pbtxt
@@ -8,11 +8,18 @@
 END
   }
   in_arg {
-    name: "stride"
+    name: "window_shift"
     description: <<END
 A scalar representing the steps moving the sliding window
 forward in one iteration. It must be positive.
 END
   }
+  in_arg {
+    name: "window_stride"
+    description: <<END
+A scalar representing the stride of the input elements of the sliding window.
+It must be positive.
+END
+  }
   summary: "Creates a dataset that passes a sliding window over `input_dataset`."
 }
diff --git a/tensorflow/core/common_runtime/direct_session.cc b/tensorflow/core/common_runtime/direct_session.cc
index f903faf..1732553 100644
--- a/tensorflow/core/common_runtime/direct_session.cc
+++ b/tensorflow/core/common_runtime/direct_session.cc
@@ -1188,12 +1188,11 @@
         delete kernel;
       }
     };
-    params.node_outputs_cb = node_outputs_callback_;
 
     optimizer.Optimize(lib, options_.env, device, &iter->second,
                        /*shape_map=*/nullptr);
 
-    // EXPERIMENTAL: tfdbg inserts debug nodes in the graph.
+    // TensorFlow Debugger (tfdbg) inserts debug nodes in the graph.
     const DebugOptions& debug_options =
         options.callable_options.run_options().debug_options();
     if (!debug_options.debug_tensor_watch_opts().empty()) {
diff --git a/tensorflow/core/common_runtime/eager/context.cc b/tensorflow/core/common_runtime/eager/context.cc
index 70208fb..aaca633 100644
--- a/tensorflow/core/common_runtime/eager/context.cc
+++ b/tensorflow/core/common_runtime/eager/context.cc
@@ -34,8 +34,16 @@
           local_device_manager_.get(), opts.env, TF_GRAPH_DEF_VERSION,
           &func_lib_def_, {}, thread_pool_.get())),
       log_device_placement_(opts.config.log_device_placement()),
-      async_default_(async) {
+      async_default_(async),
+      env_(opts.env) {
   InitDeviceMapAndAsync();
+  if (opts.config.inter_op_parallelism_threads() > 0) {
+    runner_ = [this](std::function<void()> closure) {
+      this->thread_pool_->Schedule(closure);
+    };
+  } else {
+    runner_ = [](std::function<void()> closure) { closure(); };
+  }
 }
 
 #ifndef __ANDROID__
@@ -57,6 +65,7 @@
       log_device_placement_(opts.config.log_device_placement()),
       async_default_(async),
       remote_device_manager_(std::move(remote_device_manager)),
+      env_(opts.env),
       server_(std::move(server)),
       remote_eager_workers_(std::move(remote_eager_workers)),
       remote_contexts_(remote_contexts) {
@@ -109,6 +118,9 @@
 void EagerContext::ClearCaches() {
   mutex_lock ml(cache_mu_);
   gtl::STLDeleteValues(&kernel_cache_);
+  pflr_.reset(new ProcessFunctionLibraryRuntime(
+      local_device_manager_.get(), env_, TF_GRAPH_DEF_VERSION, &func_lib_def_,
+      {}, thread_pool_.get()));
 }
 
 void EagerContext::SetThreadLocalDevicePlacementPolicy(
diff --git a/tensorflow/core/common_runtime/eager/context.h b/tensorflow/core/common_runtime/eager/context.h
index 864f514..6825c39 100644
--- a/tensorflow/core/common_runtime/eager/context.h
+++ b/tensorflow/core/common_runtime/eager/context.h
@@ -105,6 +105,8 @@
 
   EagerExecutor* Executor() { return &executor_; }
 
+  std::function<void(std::function<void()>)>* runner() { return &runner_; }
+
   // Sets whether this thread should run in synchronous or asynchronous mode.
   Status SetAsyncForThread(bool async);
 
@@ -209,10 +211,9 @@
 
   std::unique_ptr<thread::ThreadPool> thread_pool_;
 
-  // One FunctionLibraryRuntime per device.
-  // func_libs[i] is the FunctionLibraryRuntime corresponding to
-  // session->devices[i].
-  const std::unique_ptr<ProcessFunctionLibraryRuntime> pflr_;
+  std::unique_ptr<ProcessFunctionLibraryRuntime> pflr_;
+
+  std::function<void(std::function<void()>)> runner_;
 
   mutex cache_mu_;
   std::unordered_map<Fprint128, KernelAndDevice*, Fprint128Hasher> kernel_cache_
@@ -235,6 +236,8 @@
 
   const std::unique_ptr<DeviceMgr> remote_device_manager_;
 
+  tensorflow::Env* const env_;
+
   // The server_ is not const since we release it when the context is destroyed.
   // Therefore the server_ object is not marked as const (even though it should
   // be).
diff --git a/tensorflow/core/common_runtime/eager/execute.cc b/tensorflow/core/common_runtime/eager/execute.cc
index 7a2b477..5ea814e 100644
--- a/tensorflow/core/common_runtime/eager/execute.cc
+++ b/tensorflow/core/common_runtime/eager/execute.cc
@@ -512,7 +512,8 @@
     // See WARNING comment in Execute (before kernel->Run) - would be nice to
     // rework to avoid this subtlety.
     tf_shared_lock l(*ctx->FunctionsMu());
-    status = KernelAndDevice::Init(ndef, ctx->func_lib(device), kernel);
+    status = KernelAndDevice::Init(ndef, ctx->func_lib(device), ctx->runner(),
+                                   kernel);
     if (!status.ok()) {
       delete kernel;
       return status;
diff --git a/tensorflow/core/common_runtime/eager/kernel_and_device.cc b/tensorflow/core/common_runtime/eager/kernel_and_device.cc
index b410ea1..dae5d19 100644
--- a/tensorflow/core/common_runtime/eager/kernel_and_device.cc
+++ b/tensorflow/core/common_runtime/eager/kernel_and_device.cc
@@ -41,17 +41,22 @@
   out->device_ = device;
   out->kernel_.reset(k);
   out->flib_ = nullptr;
+  out->runner_ = nullptr;
+  out->default_runner_ = [](std::function<void()> f) { f(); };
   return s;
 }
 
 // static
 Status KernelAndDevice::Init(const NodeDef& ndef, FunctionLibraryRuntime* flib,
+                             std::function<void(std::function<void()>)>* runner,
                              KernelAndDevice* out) {
   OpKernel* k = nullptr;
   Status s = flib->CreateKernel(ndef, &k);
   out->device_ = flib->device();
   out->kernel_.reset(k);
   out->flib_ = flib;
+  out->runner_ = runner;
+  out->default_runner_ = [](std::function<void()> f) { f(); };
   return s;
 }
 
@@ -83,10 +88,11 @@
   if (stats != nullptr) {
     params.track_allocations = true;
   }
-  // TODO(apassos): use a thread pool.
-  std::function<void(std::function<void()>)> runner =
-      [](std::function<void()> f) { f(); };
-  params.runner = &runner;
+  if (runner_ == nullptr) {
+    params.runner = &default_runner_;
+  } else {
+    params.runner = runner_;
+  }
 
   ScopedStepContainer step_container(0, [this](const string& name) {
     device_->resource_manager()->Cleanup(name).IgnoreError();
diff --git a/tensorflow/core/common_runtime/eager/kernel_and_device.h b/tensorflow/core/common_runtime/eager/kernel_and_device.h
index c41a097..c0b676b 100644
--- a/tensorflow/core/common_runtime/eager/kernel_and_device.h
+++ b/tensorflow/core/common_runtime/eager/kernel_and_device.h
@@ -57,6 +57,7 @@
   // the FunctionLibraryRuntime is pushed on to the caller (see locking in
   // c_api.cc).
   static Status Init(const NodeDef& ndef, FunctionLibraryRuntime* flib,
+                     std::function<void(std::function<void()>)>* runner,
                      KernelAndDevice* out);
   // TODO(ashankar): Remove this
   static Status InitOp(Device* device, const NodeDef& ndef,
@@ -88,6 +89,8 @@
   checkpoint::TensorSliceReaderCacheWrapper slice_reader_cache_;
   Rendezvous* rendez_;
   DataTypeVector output_dtypes_;
+  std::function<void(std::function<void()>)>* runner_;
+  std::function<void(std::function<void()>)> default_runner_;
 };
 
 }  // namespace tensorflow
diff --git a/tensorflow/core/common_runtime/eager/kernel_and_device_test.cc b/tensorflow/core/common_runtime/eager/kernel_and_device_test.cc
index b4349e1..6abe98f 100644
--- a/tensorflow/core/common_runtime/eager/kernel_and_device_test.cc
+++ b/tensorflow/core/common_runtime/eager/kernel_and_device_test.cc
@@ -107,8 +107,8 @@
   KernelAndDevice k(nullptr);
   tensorflow::testing::StartTiming();
   for (int i = 0; i < iters; ++i) {
-    TF_CHECK_OK(
-        KernelAndDevice::Init(ndef, env.function_library_runtime(), &k));
+    TF_CHECK_OK(KernelAndDevice::Init(ndef, env.function_library_runtime(),
+                                      nullptr, &k));
   }
 }
 BENCHMARK(BM_KernelAndDeviceInit);
@@ -128,8 +128,8 @@
                    .BuildNodeDef());
   TestEnv env;
   KernelAndDevice kernel(nullptr);
-  TF_CHECK_OK(
-      KernelAndDevice::Init(ndef, env.function_library_runtime(), &kernel));
+  TF_CHECK_OK(KernelAndDevice::Init(ndef, env.function_library_runtime(),
+                                    nullptr, &kernel));
   tensorflow::testing::StartTiming();
   for (int i = 0; i < iters; ++i) {
     TF_CHECK_OK(kernel.Run(&inputs, &outputs, nullptr));
diff --git a/tensorflow/core/common_runtime/eager/tensor_handle.cc b/tensorflow/core/common_runtime/eager/tensor_handle.cc
index f9b9abc..85b0b79 100644
--- a/tensorflow/core/common_runtime/eager/tensor_handle.cc
+++ b/tensorflow/core/common_runtime/eager/tensor_handle.cc
@@ -109,6 +109,19 @@
   return Status::OK();
 }
 
+Status TensorHandle::Shape(tensorflow::TensorShape* shape) {
+  if (IsRemote()) {
+    TF_RETURN_IF_ERROR(WaitForNode(remote_shape_node_id_, false));
+    CHECK(remote_shape_ != nullptr);
+    *shape = *(remote_shape_.get());
+  } else {
+    TF_RETURN_IF_ERROR(WaitReady());
+    DCHECK(IsReady());
+    *shape = tensor_.shape();
+  }
+  return Status::OK();
+}
+
 Status TensorHandle::NumDims(int* num_dims) {
   if (IsRemote()) {
     TF_RETURN_IF_ERROR(WaitForNode(remote_shape_node_id_, false));
diff --git a/tensorflow/core/common_runtime/eager/tensor_handle.h b/tensorflow/core/common_runtime/eager/tensor_handle.h
index 46bc94f..1bc9c65 100644
--- a/tensorflow/core/common_runtime/eager/tensor_handle.h
+++ b/tensorflow/core/common_runtime/eager/tensor_handle.h
@@ -109,6 +109,8 @@
                          tensorflow::Device** device,
                          tensorflow::Device** op_device);
 
+  Status Shape(tensorflow::TensorShape* shape);
+
   Status NumDims(int* num_dims);
   Status Dim(int dim_index, int64* dim);
 
@@ -138,6 +140,12 @@
     remote_shape_ = std::move(remote_shape);
   }
 
+  bool OnHostCPU() {
+    mutex_lock ml(ctx_mutex_);
+    return device_ == nullptr ||
+           (ctx_ == nullptr || ctx_->HostCPU() == device_);
+  }
+
  private:
   // If the contents of the Tensor pointed to by this handle is yet to be
   // computed by a EagerNode, this function will block till that compuatation is
diff --git a/tensorflow/core/common_runtime/executor.cc b/tensorflow/core/common_runtime/executor.cc
index f7f2cdc..8096139 100644
--- a/tensorflow/core/common_runtime/executor.cc
+++ b/tensorflow/core/common_runtime/executor.cc
@@ -1966,17 +1966,9 @@
     device_context = device_context_map_[node->id()];
   }
 
-  // Experimental: debugger (tfdb) access to intermediate node completion.
-  if (item.num_outputs == 0 && impl_->params_.node_outputs_cb != nullptr) {
-    // If the node has no output, invoke the callback with output slot set to
-    // -1, signifying that this is a no-output node.
-    s.Update(impl_->params_.node_outputs_cb(item.node->name(), -1, nullptr,
-                                            false, ctx));
-  }
-
   for (int i = 0; i < item.num_outputs; ++i) {
     const TensorValue val = ctx->release_output(i);
-    if (*ctx->is_output_dead() || val.tensor == nullptr) {
+    if (val.tensor == nullptr) {
       // Unless it's a Switch or a Recv, the node must produce a
       // tensor value at i-th output.
       if (!IsSwitch(node) && !IsRecv(node)) {
@@ -2018,13 +2010,6 @@
             LogMemory::RecordTensorOutput(ctx->op_kernel().name(),
                                           ctx->step_id(), i, to_log);
           }
-
-          // Experimental: debugger (tfdb) access to intermediate node
-          // outputs.
-          if (impl_->params_.node_outputs_cb != nullptr) {
-            s.Update(impl_->params_.node_outputs_cb(item.node->name(), i,
-                                                    out->ref, true, ctx));
-          }
         } else {
           // NOTE that std::move is used here, so val.tensor goes to
           // uninitialized state (val.tensor->IsInitialized return false).
@@ -2036,12 +2021,6 @@
             LogMemory::RecordTensorOutput(ctx->op_kernel().name(),
                                           ctx->step_id(), i, *out->val);
           }
-
-          // Experimental: debugger access to intermediate node outputs.
-          if (impl_->params_.node_outputs_cb != nullptr) {
-            s.Update(impl_->params_.node_outputs_cb(
-                item.node->name(), i, out->val.get(), false, ctx));
-          }
         }
       } else {
         s.Update(errors::Internal("Output ", i, " of type ",
diff --git a/tensorflow/core/common_runtime/executor.h b/tensorflow/core/common_runtime/executor.h
index e5d7b7c..cd01b43 100644
--- a/tensorflow/core/common_runtime/executor.h
+++ b/tensorflow/core/common_runtime/executor.h
@@ -103,7 +103,6 @@
                                  const Tensor* tensor, const bool is_ref,
                                  OpKernelContext* ctx)>
         NodeOutputsCallback;
-    NodeOutputsCallback node_outputs_cb = nullptr;
   };
   typedef std::function<void(const Status&)> DoneCallback;
   virtual void RunAsync(const Args& args, DoneCallback done) = 0;
@@ -139,8 +138,6 @@
   // when the executor is deleted.
   std::function<Status(const NodeDef&, OpKernel**)> create_kernel;
   std::function<void(OpKernel*)> delete_kernel;
-
-  Executor::Args::NodeOutputsCallback node_outputs_cb;
 };
 ::tensorflow::Status NewLocalExecutor(const LocalExecutorParams& params,
                                       std::unique_ptr<const Graph> graph,
diff --git a/tensorflow/core/common_runtime/placer.cc b/tensorflow/core/common_runtime/placer.cc
index 1f0773d..6781c87 100644
--- a/tensorflow/core/common_runtime/placer.cc
+++ b/tensorflow/core/common_runtime/placer.cc
@@ -30,6 +30,7 @@
 #include "tensorflow/core/lib/core/errors.h"
 #include "tensorflow/core/lib/core/stringpiece.h"
 #include "tensorflow/core/lib/strings/str_util.h"
+#include "tensorflow/core/util/status_util.h"
 
 namespace tensorflow {
 
@@ -822,10 +823,10 @@
     std::vector<Device*>* devices;
     Status status = colocation_graph.GetDevicesForNode(node, &devices);
     if (!status.ok()) {
-      return AttachDef(
-          errors::InvalidArgument("Cannot assign a device for operation '",
-                                  node->name(), "': ", status.error_message()),
-          *node);
+      return AttachDef(errors::InvalidArgument(
+                           "Cannot assign a device for operation ",
+                           RichNodeName(node), ": ", status.error_message()),
+                       *node);
     }
 
     // Returns the first device in sorted devices list so we will always
@@ -869,10 +870,10 @@
     std::vector<Device*>* devices;
     Status status = colocation_graph.GetDevicesForNode(node, &devices);
     if (!status.ok()) {
-      return AttachDef(
-          errors::InvalidArgument("Cannot assign a device for operation '",
-                                  node->name(), "': ", status.error_message()),
-          *node);
+      return AttachDef(errors::InvalidArgument(
+                           "Cannot assign a device for operation ",
+                           RichNodeName(node), ": ", status.error_message()),
+                       *node);
     }
 
     int assigned_device = -1;
@@ -938,4 +939,22 @@
   }
 }
 
+bool Placer::ClientHandlesErrorFormatting() const {
+  return options_ != nullptr &&
+         options_->config.experimental().client_handles_error_formatting();
+}
+
+// Returns the node name in single quotes. If the client handles formatted
+// errors, appends a formatting tag which the client will reformat into, for
+// example, " (defined at filename:123)".
+string Placer::RichNodeName(const Node* node) const {
+  string quoted_name = strings::StrCat("'", node->name(), "'");
+  if (ClientHandlesErrorFormatting()) {
+    string file_and_line = error_format_tag(*node, "${file}:${line}");
+    return strings::StrCat(quoted_name, " (defined at ", file_and_line, ")");
+  } else {
+    return quoted_name;
+  }
+}
+
 }  // namespace tensorflow
diff --git a/tensorflow/core/common_runtime/placer.h b/tensorflow/core/common_runtime/placer.h
index 75dce7c..fce8726 100644
--- a/tensorflow/core/common_runtime/placer.h
+++ b/tensorflow/core/common_runtime/placer.h
@@ -87,6 +87,8 @@
   // placement if the SessionOptions entry in 'options_' requests it.
   void AssignAndLog(int assigned_device, Node* node) const;
   void LogDeviceAssignment(const Node* node) const;
+  bool ClientHandlesErrorFormatting() const;
+  string RichNodeName(const Node* node) const;
 
   Graph* const graph_;              // Not owned.
   const DeviceSet* const devices_;  // Not owned.
diff --git a/tensorflow/core/common_runtime/placer_test.cc b/tensorflow/core/common_runtime/placer_test.cc
index 07a7724..cede899 100644
--- a/tensorflow/core/common_runtime/placer_test.cc
+++ b/tensorflow/core/common_runtime/placer_test.cc
@@ -1142,6 +1142,50 @@
   EXPECT_TRUE(str_util::StrContains(s.error_message(), "/device:fakegpu:11"));
 }
 
+// Test that the "Cannot assign a device" error message contains a format tag
+// when requested.
+TEST_F(PlacerTest, TestNonexistentGpuNoAllowSoftPlacementFormatTag) {
+  Graph g(OpRegistry::Global());
+  {  // Scope for temporary variables used to construct g.
+    GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
+    ops::SourceOp("TestDevice",
+                  b.opts().WithName("in").WithDevice("/device:fakegpu:11"));
+    TF_EXPECT_OK(BuildGraph(b, &g));
+  }
+
+  SessionOptions options;
+  options.config.mutable_experimental()->set_client_handles_error_formatting(
+      true);
+  Status s = Place(&g, &options);
+  EXPECT_EQ(error::INVALID_ARGUMENT, s.code());
+  EXPECT_TRUE(
+      str_util::StrContains(s.error_message(),
+                            "Cannot assign a device for operation 'in'"
+                            " (defined at ^^node:in:${file}:${line}^^)"));
+}
+
+// Test that the "Cannot assign a device" error message does not contain a
+// format tag when not it shouldn't
+TEST_F(PlacerTest, TestNonexistentGpuNoAllowSoftPlacementNoFormatTag) {
+  Graph g(OpRegistry::Global());
+  {  // Scope for temporary variables used to construct g.
+    GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
+    ops::SourceOp("TestDevice",
+                  b.opts().WithName("in").WithDevice("/device:fakegpu:11"));
+    TF_EXPECT_OK(BuildGraph(b, &g));
+  }
+
+  SessionOptions options;
+  options.config.mutable_experimental()->set_client_handles_error_formatting(
+      false);
+  Status s = Place(&g, &options);
+  EXPECT_EQ(error::INVALID_ARGUMENT, s.code());
+  EXPECT_TRUE(str_util::StrContains(
+      s.error_message(), "Cannot assign a device for operation 'in'"));
+  EXPECT_FALSE(str_util::StrContains(
+      s.error_message(), "'in' (defined at ^^node:in:${file}:${line}^^)"));
+}
+
 // Test that placement fails when a node requests an explicit device that is not
 // supported by the registered kernels if allow_soft_placement is no set.
 TEST_F(PlacerTest, TestUnsupportedDeviceNoAllowSoftPlacement) {
diff --git a/tensorflow/core/debug/BUILD b/tensorflow/core/debug/BUILD
index 36e9b34..591c22b 100644
--- a/tensorflow/core/debug/BUILD
+++ b/tensorflow/core/debug/BUILD
@@ -82,25 +82,6 @@
 )
 
 tf_cuda_library(
-    name = "debug_gateway_internal",
-    srcs = ["debug_gateway.cc"],
-    hdrs = ["debug_gateway.h"],
-    copts = tf_copts(),
-    linkstatic = 1,
-    deps = [
-        ":debug",
-        "//tensorflow/core:core_cpu_internal",
-        "//tensorflow/core:direct_session_internal",
-        "//tensorflow/core:framework",
-        "//tensorflow/core:lib",
-        "//tensorflow/core:lib_internal",
-        "//tensorflow/core:proto_text",
-        "//tensorflow/core:protos_all_cc",
-    ],
-    alwayslink = 1,
-)
-
-tf_cuda_library(
     name = "debugger_state_impl",
     srcs = ["debugger_state_impl.cc"],
     hdrs = ["debugger_state_impl.h"],
@@ -187,42 +168,6 @@
     ],
 )
 
-# TODO(cais): Fix flakiness on GPU and change this back to a tf_cc_test_gpu.
-#   See b/34081273.
-tf_cc_test(
-    name = "debug_gateway_test",
-    size = "small",
-    srcs = ["debug_gateway_test.cc"],
-    args = ["--heap_check=local"],
-    linkstatic = tf_kernel_tests_linkstatic(),
-    tags = [
-        "no_cuda_on_cpu_tap",
-        "no_gpu",
-    ],
-    deps = [
-        ":debug",
-        ":debug_gateway_internal",
-        ":debug_graph_utils",
-        "//tensorflow/cc:cc_ops",
-        "//tensorflow/core:all_kernels",
-        "//tensorflow/core:core_cpu",
-        "//tensorflow/core:core_cpu_internal",
-        "//tensorflow/core:direct_session",
-        "//tensorflow/core:direct_session_internal",
-        "//tensorflow/core:framework",
-        "//tensorflow/core:framework_internal",
-        "//tensorflow/core:gpu_runtime",
-        "//tensorflow/core:lib",
-        "//tensorflow/core:lib_internal",
-        "//tensorflow/core:protos_all_cc",
-        "//tensorflow/core:test",
-        "//tensorflow/core:test_main",
-        "//tensorflow/core:testlib",
-        "//tensorflow/core/kernels:debug_ops",
-        "//tensorflow/core/kernels:ops_util",
-    ],
-)
-
 tf_cc_test(
     name = "debug_io_utils_test",
     size = "small",
diff --git a/tensorflow/core/debug/debug_gateway.cc b/tensorflow/core/debug/debug_gateway.cc
deleted file mode 100644
index 2e1aabd..0000000
--- a/tensorflow/core/debug/debug_gateway.cc
+++ /dev/null
@@ -1,122 +0,0 @@
-/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-#include "tensorflow/core/debug/debug_gateway.h"
-
-#include <utility>
-
-#include "tensorflow/core/common_runtime/device_factory.h"
-#include "tensorflow/core/common_runtime/session_factory.h"
-#include "tensorflow/core/framework/tensor.h"
-
-namespace tensorflow {
-
-DebugGateway::DebugGateway(DirectSession* session) : session_(session) {
-  session_->node_outputs_callback_ =
-      [this](const string& node_name, const int output_slot,
-             const Tensor* tensor, const bool is_ref, OpKernelContext* ctx) {
-        if (comp_cb_ != nullptr && output_slot <= 0) {
-          // The node completion callback is invoked once for a node regardless
-          // of whether the node has zero, one or more outputs.
-          // The output_slot can be negative (-1, or kControlSlot) if
-          // node_outputs_callback_ is invoked for a node with no output. If
-          // that is the case, notify the callback that the node in question has
-          // no output.
-          comp_cb_(node_name, output_slot == 0);
-        }
-
-        // Copy tensor values (e.g., from GPU to host) only if the
-        // value callback is not nullptr.
-        if (val_cb_ != nullptr && output_slot >= 0) {
-          CopyTensor(node_name, output_slot, tensor, ctx,
-                     [this, node_name, output_slot,
-                      is_ref](const Tensor* copied_tensor) {
-                       val_cb_(node_name, output_slot, *copied_tensor, is_ref);
-                     });
-        }
-
-        return Status::OK();
-      };
-}
-
-DebugGateway::~DebugGateway() {
-  if (session_ != nullptr) {
-    session_->node_outputs_callback_ = nullptr;
-  }
-}
-
-void DebugGateway::SetNodeCompletionCallback(NodeCompletionCallback callback) {
-  comp_cb_ = std::move(callback);
-}
-
-void DebugGateway::SetNodeValueCallback(NodeValueCallback callback) {
-  val_cb_ = std::move(callback);
-}
-
-void DebugGateway::CopyTensor(const string& node_name, const int output_slot,
-                              const Tensor* src_tensor, OpKernelContext* ctx,
-                              CopyDoneCallback copy_done_cb) {
-  Device* device = static_cast<Device*>(ctx->device());
-
-  // Determine if the tensor is initialized properly.
-  // The second part of the check is necessary because in some cases, a
-  // tensor can pass the IsInitialized() check, but the dtype is not set,
-  // e.g., tf.FIFOQueue.
-  if (src_tensor->IsInitialized() && DataTypeSize(src_tensor->dtype()) > 0) {
-    // Tensor is initialized.
-
-    string tensor_tag = strings::StrCat(node_name, ":", output_slot);
-
-    // Create copied tensor on host
-    Allocator* cpu_allocator = tensorflow::cpu_allocator();
-    Tensor cpu_tensor(cpu_allocator, src_tensor->dtype(), src_tensor->shape());
-
-    // Determine if the tensor is on device (GPU) or host (CPU).
-    // The second part of the check is necessary because even an OpKernel on
-    // may have output tensors allocated on CPU.
-    if ((device->name().find("GPU:") != string::npos ||
-         device->name().find("SYCL:") != string::npos) &&
-        !ctx->output_alloc_attr(output_slot).on_host()) {
-      // GPU tensors: Copy it to host (CPU).
-      DeviceContext* device_ctxt = ctx->op_device_context();
-
-      // Copy device (e.g., GPU) tensor to host and when done, invoke the
-      // callback.
-      device_ctxt->CopyDeviceTensorToCPU(
-          src_tensor, "TensorCopy", device, &cpu_tensor,
-          [node_name, cpu_tensor, copy_done_cb](const Status& s) {
-            if (s.ok()) {
-              copy_done_cb(&cpu_tensor);
-            } else {
-              LOG(ERROR) << "Copying of device Tensor " << node_name
-                         << " to CPU for debugging failed.";
-            }
-          });
-    } else {
-      // For CPU tensors, copy the source tensor and own the copy, because the
-      // value callback may outlive the life time of the tensor and the tensor
-      // may shared the underlying buffer with other tensors.
-      cpu_tensor.UnsafeCopyFromInternal(*src_tensor, src_tensor->dtype(),
-                                        src_tensor->shape());
-
-      copy_done_cb(&cpu_tensor);
-    }
-  } else {
-    // Tensor is not initialized: No need to copy.
-    copy_done_cb(src_tensor);
-  }
-}
-
-}  // namespace tensorflow
diff --git a/tensorflow/core/debug/debug_gateway.h b/tensorflow/core/debug/debug_gateway.h
deleted file mode 100644
index bf5b6e0..0000000
--- a/tensorflow/core/debug/debug_gateway.h
+++ /dev/null
@@ -1,83 +0,0 @@
-/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-#ifndef TENSORFLOW_DEBUG_DEBUG_SESSION_H_
-#define TENSORFLOW_DEBUG_DEBUG_SESSION_H_
-
-#include <unordered_map>
-
-#include "tensorflow/core/common_runtime/direct_session.h"
-#include "tensorflow/core/common_runtime/executor.h"
-
-namespace tensorflow {
-
-// Experimental. tfdb (TensorFlow Debugger): Gateway to intermediate node
-// outputs during Session Run calls. Currently limited to DirectSession.
-class DebugGateway {
- public:
-  DebugGateway(DirectSession* session);
-  virtual ~DebugGateway();
-
-  // Callback for node completion. This callback is invoked only once for
-  // a node regardless of whether it has one or more outputs. The value(s) of
-  // the output tensor(s) are not necessarily available when this callback is
-  // invoked. They may need to be asynchronously copied from device (e.g.,
-  // GPU) to host, hence the need for the NodeValueCallback below.
-  //
-  // Args:
-  //   node_name: Name of the node that has just completed execution
-  //   any_output: Whether the node has any output(s)
-  typedef std::function<void(const string& node_name, const bool any_output)>
-      NodeCompletionCallback;
-  void SetNodeCompletionCallback(NodeCompletionCallback callback);
-
-  // Callback for node value. This is invoked when the value of a node's
-  // output tensor is available on the host, possibly after copying from
-  // a device (e.g., GPU).
-  //
-  // Args:
-  //   node_name: Name of the node of which the output has become available
-  //   output_slot: Output slot number of the output Tensor
-  //   tensor_value: Reference to the tensor value
-  //   is_ref: Whether the output of the reference type
-  typedef std::function<void(const string& node_name, const int output_slot,
-                             const Tensor& tensor_value, const bool is_ref)>
-      NodeValueCallback;
-  void SetNodeValueCallback(NodeValueCallback callback);
-
-  // TODO(cais): Add whitelists for ops/tensors (e.g., {"A:0", "B:0"})
-  // for node completion callback (whitelist_comp_) and node value callback
-  // (whitelist_val_). If whitelist_comp_ is non-empty, the gateway will
-  // invoke the NodeCompletionCallback only for the nodes specified in the
-  // whitelist. And so forth for whitelist_val_.
-
- private:
-  DirectSession* session_;
-  // TODO(cais): DebugGateway currently supports only DirectSession. Add
-  // support for GrpcSession.
-
-  NodeCompletionCallback comp_cb_ = nullptr;
-  NodeValueCallback val_cb_ = nullptr;
-
-  typedef std::function<void(const Tensor* dst_tensor)> CopyDoneCallback;
-
-  void CopyTensor(const string& node_name, const int output_slot,
-                  const Tensor* src_tensor, OpKernelContext* ctx,
-                  CopyDoneCallback copy_done_cb);
-};
-
-}  // end namespace tensorflow
-
-#endif  // TENSORFLOW_DEBUG_DEBUG_SESSION_H_
diff --git a/tensorflow/core/debug/debug_gateway_test.cc b/tensorflow/core/debug/debug_gateway_test.cc
deleted file mode 100644
index b1bbd3f..0000000
--- a/tensorflow/core/debug/debug_gateway_test.cc
+++ /dev/null
@@ -1,1011 +0,0 @@
-/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-#include "tensorflow/core/debug/debug_gateway.h"
-
-#include <algorithm>
-#include <cstdlib>
-#include <memory>
-#include <unordered_map>
-
-#include "tensorflow/core/debug/debug_graph_utils.h"
-#include "tensorflow/core/framework/tensor_testutil.h"
-#include "tensorflow/core/graph/testlib.h"
-#include "tensorflow/core/lib/core/notification.h"
-#include "tensorflow/core/lib/core/status_test_util.h"
-#include "tensorflow/core/lib/core/threadpool.h"
-#include "tensorflow/core/protobuf/rewriter_config.pb.h"
-
-namespace tensorflow {
-namespace {
-
-std::unique_ptr<DirectSession> CreateSession() {
-  SessionOptions options;
-  // Turn off graph optimizer so we can observe intermediate node states.
-  options.config.mutable_graph_options()
-      ->mutable_optimizer_options()
-      ->set_opt_level(OptimizerOptions_Level_L0);
-  options.config.mutable_graph_options()
-      ->mutable_rewrite_options()
-      ->set_constant_folding(RewriterConfig::OFF);
-  options.config.mutable_graph_options()
-      ->mutable_rewrite_options()
-      ->set_dependency_optimization(RewriterConfig::OFF);
-
-  return std::unique_ptr<DirectSession>(
-      dynamic_cast<DirectSession*>(NewSession(options)));
-}
-
-class SessionDebugMinusAXTest : public ::testing::Test {
- public:
-  void Initialize(std::initializer_list<float> a_values) {
-    Graph graph(OpRegistry::Global());
-
-#if GOOGLE_CUDA
-    const string kDeviceName = "/job:localhost/replica:0/task:0/device:GPU:0";
-#elif defined(TENSORFLOW_USE_SYCL)
-    const string kDeviceName = "/job:localhost/replica:0/task:0/device:SYCL:0";
-#else
-    const string kDeviceName = "/job:localhost/replica:0/task:0/device:CPU:0";
-#endif
-
-    Tensor a_tensor(DT_FLOAT, TensorShape({2, 2}));
-    test::FillValues<float>(&a_tensor, a_values);
-    Node* a = test::graph::Constant(&graph, a_tensor);
-    a->set_assigned_device_name(kDeviceName);
-    a_ = a->name();
-
-    Tensor x_tensor(DT_FLOAT, TensorShape({2, 1}));
-    test::FillValues<float>(&x_tensor, {1, 1});
-    Node* x = test::graph::Constant(&graph, x_tensor);
-    x->set_assigned_device_name(kDeviceName);
-    x_ = x->name();
-
-    // y = A * x
-    Node* y = test::graph::Matmul(&graph, a, x, false, false);
-    y->set_assigned_device_name(kDeviceName);
-    y_ = y->name();
-
-    Node* y_neg = test::graph::Unary(&graph, "Neg", y);
-    y_neg_ = y_neg->name();
-    y_neg->set_assigned_device_name(kDeviceName);
-
-    test::graph::ToGraphDef(&graph, &def_);
-  }
-
-  string a_;
-  string x_;
-  string y_;
-  string y_neg_;
-  GraphDef def_;
-};
-
-TEST_F(SessionDebugMinusAXTest, RunSimpleNetwork) {
-  Initialize({3, 2, -1, 0});
-  auto session = CreateSession();
-  ASSERT_TRUE(session != nullptr);
-
-  DebugGateway debug_gateway(session.get());
-
-  // Supply completion and value callbacks
-  mutex mu;
-  // Completed nodes with and without outputs
-  std::vector<string> completed_nodes_w_outputs;
-  std::vector<string> completed_nodes_wo_outputs;
-
-  Notification callbacks_done;
-  debug_gateway.SetNodeCompletionCallback(
-      [&mu, &completed_nodes_w_outputs, &completed_nodes_wo_outputs](
-          const string& node_name, const bool any_output) {
-        mutex_lock l(mu);
-        if (any_output) {
-          completed_nodes_w_outputs.push_back(node_name);
-        } else {
-          completed_nodes_wo_outputs.push_back(node_name);
-        }
-      });
-
-  std::vector<bool> tensors_initialized;
-  std::unordered_map<string, Tensor> tensor_vals;
-  // output_slot values recorded in value callbacks
-  std::vector<int> output_slots_val;
-  // is_ref values recorded in value callbacks
-  std::vector<bool> is_refs_val;
-
-  debug_gateway.SetNodeValueCallback(
-      [this, &mu, &tensors_initialized, &tensor_vals, &output_slots_val,
-       &is_refs_val,
-       &callbacks_done](const string& node_name, const int output_slot,
-                        const Tensor& tensor_value, const bool is_ref) {
-        mutex_lock l(mu);
-        tensors_initialized.push_back(tensor_value.IsInitialized());
-        tensor_vals.insert(std::make_pair(node_name, tensor_value));
-        output_slots_val.push_back(output_slot);
-        is_refs_val.push_back(is_ref);
-
-        // Set the notification once we have the value from the target node.
-        if (node_name == y_neg_ && !callbacks_done.HasBeenNotified()) {
-          callbacks_done.Notify();
-        }
-      });
-
-  TF_ASSERT_OK(session->Create(def_));
-
-  std::vector<std::pair<string, Tensor>> inputs;
-
-  // Request two targets: one fetch output and one non-fetched output.
-  std::vector<string> output_names = {y_ + ":0"};
-  std::vector<string> target_nodes = {y_neg_};
-  std::vector<Tensor> outputs;
-  Status s = session->Run(inputs, output_names, target_nodes, &outputs);
-  TF_ASSERT_OK(s);
-
-  // Wait for callbacks to complete.
-  callbacks_done.WaitForNotification();
-
-  ASSERT_EQ(1, outputs.size());
-  // The first output should be initialized and have the correct
-  // output.
-  auto mat = outputs[0].matrix<float>();
-  ASSERT_TRUE(outputs[0].IsInitialized());
-  EXPECT_FLOAT_EQ(5.0, mat(0, 0));
-
-  // Verify the calling history of the completion callback
-  // The following verifies each node with output(s) invoked the callback
-  // exactly once.
-  ASSERT_GE(completed_nodes_w_outputs.size(), 4);  // There may be added nodes.
-
-  ASSERT_EQ(1, std::count(completed_nodes_w_outputs.begin(),
-                          completed_nodes_w_outputs.end(), a_));
-  ASSERT_EQ(1, std::count(completed_nodes_w_outputs.begin(),
-                          completed_nodes_w_outputs.end(), x_));
-  ASSERT_EQ(1, std::count(completed_nodes_w_outputs.begin(),
-                          completed_nodes_w_outputs.end(), y_));
-  ASSERT_EQ(1, std::count(completed_nodes_w_outputs.begin(),
-                          completed_nodes_w_outputs.end(), y_neg_));
-
-  // Apart from nodes with outputs, there are also no-output (control) nodes.
-  // They ought to be captured by the DebugGateway through
-  // NodeOutputCallback as well.
-  ASSERT_GT(completed_nodes_wo_outputs.size(), 0);
-
-  // The DebugGateway should have captured the _SOURCE node.
-  ASSERT_LE(1, std::count(completed_nodes_wo_outputs.begin(),
-                          completed_nodes_wo_outputs.end(), "_SOURCE"));
-
-  // Verify the calling history of the value callabck
-  ASSERT_EQ(completed_nodes_w_outputs.size(), tensors_initialized.size());
-
-  // In this graph, there is no uninitialized node value.
-  ASSERT_EQ(
-      tensors_initialized.end(),
-      std::find(tensors_initialized.begin(), tensors_initialized.end(), false));
-
-  ASSERT_EQ(completed_nodes_w_outputs.size(), tensor_vals.size());
-  ASSERT_EQ(completed_nodes_w_outputs.size(), output_slots_val.size());
-  ASSERT_EQ(completed_nodes_w_outputs.size(), is_refs_val.size());
-
-  // Verify the intermediate tensor values captured through the value callback
-  auto mat_a = tensor_vals[a_].matrix<float>();
-  ASSERT_EQ(3.0, mat_a(0, 0));
-  ASSERT_EQ(2.0, mat_a(0, 1));
-  ASSERT_EQ(-1.0, mat_a(1, 0));
-  ASSERT_EQ(0.0, mat_a(1, 1));
-
-  auto mat_x = tensor_vals[x_].matrix<float>();
-  ASSERT_EQ(1.0, mat_x(0, 0));
-  ASSERT_EQ(1.0, mat_x(1, 0));
-
-  auto mat_y = tensor_vals[y_].matrix<float>();
-  ASSERT_EQ(5.0, mat_y(0, 0));
-  ASSERT_EQ(-1.0, mat_y(1, 0));
-
-  auto mat_y_neg = tensor_vals[y_neg_].matrix<float>();
-  ASSERT_EQ(-5.0, mat_y_neg(0, 0));
-  ASSERT_EQ(1.0, mat_y_neg(1, 0));
-
-  // In this graph, all outputs are on the first slot
-  ASSERT_EQ(output_slots_val.size(),
-            std::count_if(output_slots_val.begin(), output_slots_val.end(),
-                          [](int slot) { return slot == 0; }));
-
-  // In this graph, there is no ref-type tensor.
-  ASSERT_EQ(is_refs_val.end(),
-            std::find(is_refs_val.begin(), is_refs_val.end(), true));
-}
-
-TEST_F(SessionDebugMinusAXTest, RunSimpleNetworkWithTwoDebugNodesInserted) {
-  // Tensor contains one count of NaN
-  Initialize({3, std::numeric_limits<float>::quiet_NaN(), -1, 0});
-  auto session = CreateSession();
-  ASSERT_TRUE(session != nullptr);
-
-  DebugGateway debug_gateway(session.get());
-
-  // Create debug tensor watch options with two debug ops:
-  // DebugIdentity and DebugNanCount
-  RunOptions run_opts;
-  run_opts.set_output_partition_graphs(true);
-
-  const string debug_identity = "DebugIdentity";
-  const string debug_nan_count = "DebugNanCount";
-  DebugTensorWatch* tensor_watch_opts =
-      run_opts.mutable_debug_options()->add_debug_tensor_watch_opts();
-  tensor_watch_opts->set_node_name(y_);
-  tensor_watch_opts->set_output_slot(0);
-  tensor_watch_opts->add_debug_ops(debug_identity);
-  tensor_watch_opts->add_debug_ops(debug_nan_count);
-
-  // Expected name of the inserted debug node
-  string debug_identity_node_name = DebugNodeInserter::GetDebugNodeName(
-      strings::StrCat(y_, ":", 0), 0, debug_identity);
-  string debug_nan_count_node_name = DebugNodeInserter::GetDebugNodeName(
-      strings::StrCat(y_, ":", 0), 1, debug_nan_count);
-
-  // Supply completion and value callbacks
-  mutex mu;
-  // Completed nodes with and without outputs
-  std::vector<string> completed_debug_nodes;
-
-  Notification callbacks_done;
-  debug_gateway.SetNodeCompletionCallback(
-      [&mu, &debug_identity_node_name, &debug_nan_count_node_name,
-       &completed_debug_nodes](const string& node_name, const bool any_output) {
-        mutex_lock l(mu);
-        if (any_output && (node_name == debug_identity_node_name ||
-                           node_name == debug_nan_count_node_name)) {
-          completed_debug_nodes.push_back(node_name);
-        }
-      });
-
-  std::vector<Tensor> watched_tensor_vals;
-  std::vector<Tensor> debug_identity_tensor_vals;
-  std::vector<Tensor> debug_nan_count_tensor_vals;
-
-  debug_gateway.SetNodeValueCallback(
-      [this, &mu, &debug_identity_node_name, &debug_nan_count_node_name,
-       &watched_tensor_vals, &debug_identity_tensor_vals,
-       &debug_nan_count_tensor_vals,
-       &callbacks_done](const string& node_name, const int output_slot,
-                        const Tensor& tensor_value, const bool is_ref) {
-        mutex_lock l(mu);
-        if (node_name == y_) {
-          watched_tensor_vals.push_back(tensor_value);
-        } else if (node_name == debug_identity_node_name && output_slot == 0) {
-          // output_slot == 0 carries the debug signal. Same below.
-          debug_identity_tensor_vals.push_back(tensor_value);
-        } else if (node_name == debug_nan_count_node_name && output_slot == 0) {
-          debug_nan_count_tensor_vals.push_back(tensor_value);
-        }
-
-        // Set the notification once we have the value from the target node.
-        if (node_name == y_neg_ && !callbacks_done.HasBeenNotified()) {
-          callbacks_done.Notify();
-        }
-      });
-
-  TF_ASSERT_OK(session->Create(def_));
-
-  std::vector<std::pair<string, Tensor>> inputs;
-
-  // Request two targets: one fetch output and one non-fetched output.
-  std::vector<string> output_names = {y_ + ":0"};
-  std::vector<string> target_nodes = {y_neg_};
-  std::vector<Tensor> outputs;
-
-  RunMetadata run_metadata;
-  Status s = session->Run(run_opts, inputs, output_names, target_nodes,
-                          &outputs, &run_metadata);
-  TF_ASSERT_OK(s);
-
-// Verify the correct number of partition graphs (GraphDefs) outputted
-// through RunMetadata, given whether GPU is involved.
-#if GOOGLE_CUDA
-  ASSERT_EQ(2, run_metadata.partition_graphs().size());
-#elif defined(TENSORFLOW_USE_SYCL)
-  ASSERT_EQ(2, run_metadata.partition_graphs().size());
-#else
-  ASSERT_EQ(1, run_metadata.partition_graphs().size());
-#endif
-
-  // Wait for callbacks to complete.
-  callbacks_done.WaitForNotification();
-
-  // Verify that each of the two debug nodes has completed exactly once.
-  ASSERT_EQ(2, completed_debug_nodes.size());
-  ASSERT_EQ(
-      1, std::count(completed_debug_nodes.begin(), completed_debug_nodes.end(),
-                    debug_identity_node_name));
-  ASSERT_EQ(
-      1, std::count(completed_debug_nodes.begin(), completed_debug_nodes.end(),
-                    debug_nan_count_node_name));
-
-  // Verify that the tensor values from the watched node and the identity
-  // debug node are received and they are equal (owing to the debug op being
-  // "DebugIdentity")
-  ASSERT_EQ(1, watched_tensor_vals.size());
-  ASSERT_EQ(1, debug_identity_tensor_vals.size());
-  auto mat_y = watched_tensor_vals[0].matrix<float>();
-  auto mat_identity = debug_identity_tensor_vals[0].matrix<float>();
-  // ASSERT_EQ doesn't work for nan == nan
-  ASSERT_TRUE(std::isnan(mat_y(0, 0)));
-  ASSERT_TRUE(std::isnan(mat_identity(0, 0)));
-  ASSERT_EQ(-1, mat_identity(1, 0));
-
-  // Verify that the output from the NaN-count debug node indicates exactly
-  // one NaN.
-  ASSERT_EQ(1, debug_nan_count_tensor_vals.size());
-  ASSERT_EQ(1, debug_nan_count_tensor_vals[0].scalar<int64>()());
-}
-
-#if !defined(GOOGLE_CUDA) && !defined(TENSORFLOW_USE_SYCL)
-// TODO(cais): Reinstate the following test for concurrent debugged runs on
-//   a GPU once the root cause of the ~0.5% flakiness has been addressed.
-//   (b/34081273)
-TEST_F(SessionDebugMinusAXTest,
-       RunSimpleNetworkConcurrentlyWithDifferentDebugTensorWatches) {
-  // Test concurrent Run() calls on a graph with different debug watches.
-
-  Initialize({3, 2, -1, 0});
-  auto session = CreateSession();
-  ASSERT_TRUE(session != nullptr);
-  TF_ASSERT_OK(session->Create(def_));
-
-  // Number of concurrent Run() calls to launch.
-  const int kConcurrentRuns = 3;
-  thread::ThreadPool* tp =
-      new thread::ThreadPool(Env::Default(), "test", kConcurrentRuns);
-
-  std::vector<string> output_names = {y_ + ":0"};
-  std::vector<string> target_nodes = {y_neg_};
-
-  mutex mu;
-  DebugGateway debug_gateway(session.get());
-  std::unordered_map<string, Tensor> debug_identity_tensor_vals;
-
-  const string debug_identity = "DebugIdentity";
-
-  const string a_debug_identity_node_name = DebugNodeInserter::GetDebugNodeName(
-      strings::StrCat(a_, ":", 0), 0, debug_identity);
-  const string x_debug_identity_node_name = DebugNodeInserter::GetDebugNodeName(
-      strings::StrCat(x_, ":", 0), 0, debug_identity);
-  const string y_debug_identity_node_name = DebugNodeInserter::GetDebugNodeName(
-      strings::StrCat(y_, ":", 0), 0, debug_identity);
-
-  Notification callbacks_done;
-  volatile int val_callback_count = 0;
-
-  debug_gateway.SetNodeValueCallback(
-      [this, &mu, &val_callback_count, &a_debug_identity_node_name,
-       &x_debug_identity_node_name, &y_debug_identity_node_name,
-       &debug_identity_tensor_vals, &callbacks_done,
-       &kConcurrentRuns](const string& node_name, const int output_slot,
-                         const Tensor& tensor_value, const bool is_ref) {
-        mutex_lock l(mu);
-
-        if (node_name == a_debug_identity_node_name && output_slot == 0) {
-          debug_identity_tensor_vals["a"] = tensor_value;
-          val_callback_count++;
-        } else if (node_name == x_debug_identity_node_name &&
-                   output_slot == 0) {
-          // output_slot == 0 carries the debug signal.
-          debug_identity_tensor_vals["x"] = tensor_value;
-          val_callback_count++;
-        } else if (node_name == y_debug_identity_node_name &&
-                   output_slot == 0) {
-          debug_identity_tensor_vals["y"] = tensor_value;
-          val_callback_count++;
-        }
-
-        // Set the notification once we have the value from the callbacks from
-        // all the concurrent Run() calls.
-        if (val_callback_count == kConcurrentRuns &&
-            !callbacks_done.HasBeenNotified()) {
-          callbacks_done.Notify();
-        }
-      });
-
-  int run_counter = 0;
-  mutex run_lock;
-
-  // Function to be executed concurrently.
-  auto fn = [this, &run_lock, &run_counter, &session, output_names,
-             target_nodes, &debug_identity]() {
-    // Create unique debug tensor watch options for each of the concurrent
-    // run calls.
-    RunOptions run_opts;
-    run_opts.set_output_partition_graphs(true);
-
-    DebugTensorWatch* tensor_watch_opts =
-        run_opts.mutable_debug_options()->add_debug_tensor_watch_opts();
-    tensor_watch_opts->set_output_slot(0);
-    tensor_watch_opts->add_debug_ops(debug_identity);
-
-    {
-      // Let the concurrent runs watch different tensors.
-
-      mutex_lock l(run_lock);
-
-      if (run_counter == 0) {
-        // Let the 1st concurrent run watch a.
-        tensor_watch_opts->set_node_name(a_);
-      } else if (run_counter == 1) {
-        // Let the 2nd concurrent watch x.
-        tensor_watch_opts->set_node_name(x_);
-      } else if (run_counter == 2) {
-        // Let the 3rd concurrent watch y.
-        tensor_watch_opts->set_node_name(y_);
-      }
-
-      run_counter++;
-    }
-
-    // Run the graph.
-    RunMetadata run_metadata;
-    std::vector<std::pair<string, Tensor>> inputs;
-    std::vector<Tensor> outputs;
-    Status s = session->Run(run_opts, inputs, output_names, target_nodes,
-                            &outputs, &run_metadata);
-    TF_ASSERT_OK(s);
-
-    ASSERT_EQ(1, run_metadata.partition_graphs().size());
-
-    ASSERT_EQ(1, outputs.size());
-    ASSERT_TRUE(outputs[0].IsInitialized());
-    ASSERT_EQ(TensorShape({2, 1}), outputs[0].shape());
-    auto mat = outputs[0].matrix<float>();
-    EXPECT_FLOAT_EQ(5.0, mat(0, 0));
-    EXPECT_FLOAT_EQ(-1.0, mat(1, 0));
-  };
-
-  for (int i = 0; i < kConcurrentRuns; ++i) {
-    tp->Schedule(fn);
-  }
-
-  // Wait for the debug callbacks to finish.
-  callbacks_done.WaitForNotification();
-
-  // Wait for the concurrent functions with Run() calls to finish.
-  delete tp;
-
-  {
-    mutex_lock l(mu);
-
-    ASSERT_EQ(kConcurrentRuns, val_callback_count);
-    ASSERT_EQ(kConcurrentRuns, debug_identity_tensor_vals.size());
-
-    ASSERT_EQ(TensorShape({2, 2}), debug_identity_tensor_vals["a"].shape());
-    auto a_mat_identity = debug_identity_tensor_vals["a"].matrix<float>();
-    ASSERT_EQ(3.0, a_mat_identity(0, 0));
-    ASSERT_EQ(2.0, a_mat_identity(0, 1));
-    ASSERT_EQ(-1.0, a_mat_identity(1, 0));
-    ASSERT_EQ(0.0, a_mat_identity(1, 1));
-
-    ASSERT_EQ(TensorShape({2, 1}), debug_identity_tensor_vals["x"].shape());
-    auto x_mat_identity = debug_identity_tensor_vals["x"].matrix<float>();
-    ASSERT_EQ(1.0, x_mat_identity(0, 0));
-    ASSERT_EQ(1.0, x_mat_identity(1, 0));
-
-    ASSERT_EQ(TensorShape({2, 1}), debug_identity_tensor_vals["y"].shape());
-    auto y_mat_identity = debug_identity_tensor_vals["y"].matrix<float>();
-    ASSERT_EQ(5.0, y_mat_identity(0, 0));
-    ASSERT_EQ(-1.0, y_mat_identity(1, 0));
-  }
-}
-#endif
-
-class SessionDebugOutputSlotWithoutOutgoingEdgeTest : public ::testing::Test {
- public:
-  void Initialize() {
-    Graph graph(OpRegistry::Global());
-
-#if GOOGLE_CUDA
-    const string kDeviceName = "/job:localhost/replica:0/task:0/device:GPU:0";
-#elif defined(TENSORFLOW_USE_SYCL)
-    const string kDeviceName = "/job:localhost/replica:0/task:0/device:SYCL:0";
-#else
-    const string kDeviceName = "/job:localhost/replica:0/task:0/device:CPU:0";
-#endif
-
-    Tensor a_tensor(DT_FLOAT, TensorShape({1, 1}));
-    test::FillValues<float>(&a_tensor, {42.0});
-    Node* a = test::graph::Constant(&graph, a_tensor);
-    a->set_assigned_device_name(kDeviceName);
-
-    Node* c = test::graph::Constant(&graph, a_tensor);
-    c->set_assigned_device_name(kDeviceName);
-    c_ = c->name();
-
-    // Node c will be executed only because of the control edge from c to y.
-    // Its output slot (slot 0) does not have an outgoing edge. This test
-    // is for testing that the debugger can watch that slot properly.
-    Node* y = test::graph::NoOp(&graph, {c});
-    y->set_assigned_device_name(kDeviceName);
-    y_ = y->name();
-
-    test::graph::ToGraphDef(&graph, &def_);
-  }
-
-  string c_;
-  string y_;
-  GraphDef def_;
-};
-
-TEST_F(SessionDebugOutputSlotWithoutOutgoingEdgeTest,
-       WatchSlotWithoutOutgoingEdge) {
-  Initialize();
-  auto session = CreateSession();
-  ASSERT_TRUE(session != nullptr);
-
-  DebugGateway debug_gateway(session.get());
-
-  // Supply completion and value callbacks
-  mutex mu;
-
-  string debug_identity_node_name = DebugNodeInserter::GetDebugNodeName(
-      strings::StrCat(c_, ":", 0), 0, "DebugIdentity");
-
-  Notification callbacks_done;
-
-  std::vector<Tensor> debug_identity_tensor_vals;
-  debug_gateway.SetNodeValueCallback(
-      [this, &mu, &callbacks_done, &debug_identity_node_name,
-       &debug_identity_tensor_vals](
-          const string& node_name, const int output_slot,
-          const Tensor& tensor_value, const bool is_ref) {
-        mutex_lock l(mu);
-
-        if (node_name == debug_identity_node_name && output_slot == 0) {
-          debug_identity_tensor_vals.push_back(tensor_value);
-
-          if (!callbacks_done.HasBeenNotified()) {
-            callbacks_done.Notify();
-          }
-        }
-      });
-
-  // Add DebugIdentity watch on c:0, which does not have an outgoing edge.
-  RunOptions run_opts;
-  run_opts.set_output_partition_graphs(true);
-
-  DebugTensorWatch* tensor_watch_opts =
-      run_opts.mutable_debug_options()->add_debug_tensor_watch_opts();
-  tensor_watch_opts->set_node_name(c_);
-  tensor_watch_opts->set_output_slot(0);
-  tensor_watch_opts->add_debug_ops("DebugIdentity");
-
-  TF_ASSERT_OK(session->Create(def_));
-
-  // Invoke Session::Run() on y.
-  std::vector<std::pair<string, Tensor>> inputs;
-  std::vector<string> output_names;
-  std::vector<string> target_nodes = {y_};
-  std::vector<Tensor> outputs;
-
-  RunMetadata run_metadata;
-  Status s = session->Run(run_opts, inputs, output_names, target_nodes,
-                          &outputs, &run_metadata);
-  TF_ASSERT_OK(s);
-
-  // Wait for callbacks to complete.
-  callbacks_done.WaitForNotification();
-
-  // Assert that DebugIdentity node watching the control edge has been run.
-  ASSERT_EQ(1, debug_identity_tensor_vals.size());
-  auto mat_identity = debug_identity_tensor_vals[0].matrix<float>();
-  ASSERT_EQ(42.0, mat_identity(0, 0));
-}
-
-class SessionDebugVariableTest : public ::testing::Test {
- public:
-  void Initialize() {
-    Graph graph(OpRegistry::Global());
-
-#if GOOGLE_CUDA
-    const string kDeviceName = "/job:localhost/replica:0/task:0/device:GPU:0";
-#elif defined(TENSORFLOW_USE_SYCL)
-    const string kDeviceName = "/job:localhost/replica:0/task:0/device:SYCL:0";
-#else
-    const string kDeviceName = "/job:localhost/replica:0/task:0/device:CPU:0";
-#endif
-
-    // Define variable node.
-    var_node_name_ = "var";
-    Node* var =
-        test::graph::Var(&graph, DT_FLOAT, TensorShape({3}), var_node_name_);
-    var->set_assigned_device_name(kDeviceName);
-
-    // Define the initial value and the initial-value node.
-    Tensor nan_nan_seven(DT_FLOAT, TensorShape({3}));
-    nan_nan_seven.flat<float>()(0) = std::numeric_limits<float>::quiet_NaN();
-    nan_nan_seven.flat<float>()(1) = std::numeric_limits<float>::quiet_NaN();
-    nan_nan_seven.flat<float>()(2) = 7.0;
-
-    init_val_node_name_ = "init_val";
-    Node* init_val =
-        test::graph::Constant(&graph, nan_nan_seven, init_val_node_name_);
-    init_val->set_assigned_device_name(kDeviceName);
-
-    // Define node for variable value initialization
-    Node* init = test::graph::Assign(&graph, var, init_val);
-    init->set_assigned_device_name(kDeviceName);
-    init_node_name_ = init->name();
-
-    // Define new value node
-    Tensor nan_eight_eight(DT_FLOAT, TensorShape({3}));
-    nan_eight_eight.flat<float>()(0) = std::numeric_limits<float>::quiet_NaN();
-    nan_eight_eight.flat<float>()(1) = 8.0;
-    nan_eight_eight.flat<float>()(2) = 8.0;
-
-    Node* new_val = test::graph::Constant(&graph, nan_eight_eight);
-    new_val->set_assigned_device_name(kDeviceName);
-    new_val_node_name_ = new_val->name();
-
-    // Define node for assigning new value
-    Node* assign = test::graph::Assign(&graph, var, new_val);
-    assign->set_assigned_device_name(kDeviceName);
-    assign_node_name_ = assign->name();
-
-    test::graph::ToGraphDef(&graph, &def_);
-  }
-
-  string var_node_name_;
-  string init_val_node_name_;
-  string init_node_name_;
-  string new_val_node_name_;
-  string assign_node_name_;
-  GraphDef def_;
-};
-
-TEST_F(SessionDebugVariableTest, WatchUninitializedVariableWithDebugOps) {
-  Initialize();
-  auto session = CreateSession();
-  ASSERT_TRUE(session != nullptr);
-
-  DebugGateway debug_gateway(session.get());
-
-  TF_ASSERT_OK(session->Create(def_));
-
-  // Set up DebugTensorWatch for an uninitialized tensor (in node var).
-  RunOptions run_opts;
-  const string debug_identity = "DebugIdentity";
-  DebugTensorWatch* tensor_watch_opts =
-      run_opts.mutable_debug_options()->add_debug_tensor_watch_opts();
-  tensor_watch_opts->set_node_name(var_node_name_);
-  tensor_watch_opts->set_output_slot(0);
-  tensor_watch_opts->add_debug_ops(debug_identity);
-
-  // Expected name of the inserted debug node
-  string debug_identity_node_name = DebugNodeInserter::GetDebugNodeName(
-      strings::StrCat(var_node_name_, ":", 0), 0, debug_identity);
-
-  // Supply completion and value callbacks
-  mutex mu;
-  // Completed nodes with and without outputs
-  std::vector<string> completed_debug_nodes;
-
-  Notification callbacks_done;
-  debug_gateway.SetNodeCompletionCallback(
-      [this, &mu, &debug_identity_node_name, &completed_debug_nodes,
-       &callbacks_done](const string& node_name, const bool any_output) {
-        mutex_lock l(mu);
-        if (any_output && (node_name == debug_identity_node_name)) {
-          completed_debug_nodes.push_back(node_name);
-        }
-      });
-
-  std::vector<Tensor> debug_identity_tensor_vals;
-
-  debug_gateway.SetNodeValueCallback(
-      [this, &mu, &debug_identity_node_name, &debug_identity_tensor_vals,
-       &callbacks_done](const string& node_name, const int output_slot,
-                        const Tensor& tensor_value, const bool is_ref) {
-        mutex_lock l(mu);
-        if (node_name == debug_identity_node_name && output_slot == 0) {
-          // output_slot == 0 carries the debug signal. Same below.
-          debug_identity_tensor_vals.push_back(tensor_value);
-        }
-
-        // Set the notification once we have the value from the target node.
-        if (node_name == init_node_name_ && !callbacks_done.HasBeenNotified()) {
-          callbacks_done.Notify();
-        }
-      });
-
-  // First run the initialization op
-  std::vector<std::pair<string, Tensor>> inputs_init;
-  std::vector<Tensor> outputs_init;
-
-  RunMetadata run_metadata;
-  Status s = session->Run(run_opts, inputs_init, {init_node_name_}, {},
-                          &outputs_init, &run_metadata);
-  TF_ASSERT_OK(s);
-
-  callbacks_done.WaitForNotification();
-
-  ASSERT_EQ(1, completed_debug_nodes.size());
-  ASSERT_EQ(
-      1, std::count(completed_debug_nodes.begin(), completed_debug_nodes.end(),
-                    debug_identity_node_name));
-
-  // Assert the output reflects the uninitialized nature of var's tensor.
-  ASSERT_EQ(1, debug_identity_tensor_vals.size());
-  ASSERT_FALSE(debug_identity_tensor_vals[0].IsInitialized());
-  ASSERT_EQ(DT_FLOAT, debug_identity_tensor_vals[0].dtype());
-  ASSERT_EQ(TensorShape({3}), debug_identity_tensor_vals[0].shape());
-}
-
-TEST_F(SessionDebugVariableTest, VariableAssignWithDebugOps) {
-  // Tensor contains one count of NaN
-  Initialize();
-  auto session = CreateSession();
-  ASSERT_TRUE(session != nullptr);
-
-  DebugGateway debug_gateway(session.get());
-
-  TF_ASSERT_OK(session->Create(def_));
-
-  // First run the initialization op
-  std::vector<std::pair<string, Tensor>> inputs_init;
-  std::vector<Tensor> outputs_init;
-  Status s = session->Run(inputs_init, {init_node_name_}, {}, &outputs_init);
-  TF_ASSERT_OK(s);
-
-  // Create debug tensor watch options with two ref-type debug ops:
-  // DebugIdentity and DebugNanCount
-  RunOptions run_opts;
-  run_opts.set_output_partition_graphs(true);
-  const string debug_identity = "DebugIdentity";
-  const string debug_nan_count = "DebugNanCount";
-  DebugTensorWatch* tensor_watch_opts =
-      run_opts.mutable_debug_options()->add_debug_tensor_watch_opts();
-  tensor_watch_opts->set_node_name(var_node_name_);
-  tensor_watch_opts->set_output_slot(0);
-  tensor_watch_opts->add_debug_ops(debug_identity);
-  tensor_watch_opts->add_debug_ops(debug_nan_count);
-
-  char tempdir_template[] = "/tmp/tfdbg_XXXXXX";
-  string temp_dir(mkdtemp(tempdir_template));
-  tensor_watch_opts->add_debug_urls(strings::StrCat("file://", temp_dir));
-
-  // Expected name of the inserted debug node
-  string debug_identity_node_name = DebugNodeInserter::GetDebugNodeName(
-      strings::StrCat(var_node_name_, ":", 0), 0, debug_identity);
-  string debug_nan_count_node_name = DebugNodeInserter::GetDebugNodeName(
-      strings::StrCat(var_node_name_, ":", 0), 1, debug_nan_count);
-
-  // Supply completion and value callbacks
-  mutex mu;
-  // Completed nodes with and without outputs
-  std::vector<string> completed_debug_nodes;
-
-  Notification callbacks_done;
-  debug_gateway.SetNodeCompletionCallback(
-      [this, &mu, &debug_identity_node_name, &debug_nan_count_node_name,
-       &completed_debug_nodes,
-       &callbacks_done](const string& node_name, const bool any_output) {
-        mutex_lock l(mu);
-        if (any_output && (node_name == debug_identity_node_name ||
-                           node_name == debug_nan_count_node_name)) {
-          completed_debug_nodes.push_back(node_name);
-        }
-      });
-
-  std::vector<Tensor> debug_identity_tensor_vals;
-  std::vector<Tensor> debug_nan_count_tensor_vals;
-
-  debug_gateway.SetNodeValueCallback(
-      [this, &mu, &debug_identity_node_name, &debug_nan_count_node_name,
-       &debug_identity_tensor_vals, &debug_nan_count_tensor_vals,
-       &callbacks_done](const string& node_name, const int output_slot,
-                        const Tensor& tensor_value, const bool is_ref) {
-        mutex_lock l(mu);
-        if (node_name == debug_identity_node_name && output_slot == 0) {
-          // output_slot == 0 carries the debug signal. Same below.
-          debug_identity_tensor_vals.push_back(tensor_value);
-        } else if (node_name == debug_nan_count_node_name && output_slot == 0) {
-          debug_nan_count_tensor_vals.push_back(tensor_value);
-        }
-
-        // Set the notification once we have the value from the target node.
-        if (node_name == assign_node_name_ &&
-            !callbacks_done.HasBeenNotified()) {
-          callbacks_done.Notify();
-        }
-      });
-
-  // // Request two targets: one fetch output and one non-fetched output.
-  std::vector<std::pair<string, Tensor>> inputs;
-  std::vector<string> output_names = {assign_node_name_ + ":0"};
-  std::vector<string> target_nodes = {assign_node_name_};
-  std::vector<Tensor> outputs;
-
-  // Run with RunOptions that has tensor watches
-  RunMetadata run_metadata;
-  s = session->Run(run_opts, inputs, output_names, target_nodes, &outputs,
-                   &run_metadata);
-  TF_ASSERT_OK(s);
-
-#if GOOGLE_CUDA
-  ASSERT_EQ(2, run_metadata.partition_graphs().size());
-#elif defined(TENSORFLOW_USE_SYCL)
-  ASSERT_EQ(2, run_metadata.partition_graphs().size());
-#else
-  ASSERT_EQ(1, run_metadata.partition_graphs().size());
-#endif
-
-  // Wait for callbacks to complete.
-  callbacks_done.WaitForNotification();
-
-  // Verify that the update has happened properly.
-  ASSERT_EQ(1, outputs.size());
-  ASSERT_TRUE(std::isnan(outputs[0].vec<float>()(0)));
-  ASSERT_EQ(8.0, outputs[0].vec<float>()(1));  // Expect new value
-  ASSERT_EQ(8.0, outputs[0].vec<float>()(2));  // Expect new value
-
-  // Verify that each of the two debug nodes has completed exactly once.
-  ASSERT_EQ(2, completed_debug_nodes.size());
-  ASSERT_EQ(
-      1, std::count(completed_debug_nodes.begin(), completed_debug_nodes.end(),
-                    debug_identity_node_name));
-  ASSERT_EQ(
-      1, std::count(completed_debug_nodes.begin(), completed_debug_nodes.end(),
-                    debug_nan_count_node_name));
-
-  // Verify that the values from the ref identity node reflects the value
-  // before the new assign.
-  ASSERT_EQ(1, debug_identity_tensor_vals.size());
-
-  auto vec_identity = debug_identity_tensor_vals[0].vec<float>();
-  ASSERT_TRUE(std::isnan(vec_identity(0)));
-  ASSERT_TRUE(std::isnan(vec_identity(1)));
-  ASSERT_EQ(7.0, vec_identity(2));
-
-  // Verify that the output from the NaN-count debug node indicates exactly
-  // two NaNs, i.e., reflecting the value before the new assign.
-  ASSERT_EQ(1, debug_nan_count_tensor_vals.size());
-  ASSERT_EQ(2, debug_nan_count_tensor_vals[0].scalar<int64>()());
-}
-
-#if defined(GOOGLE_CUDA) || defined(TENSORFLOW_USE_SYCL)
-class SessionDebugGPUSwitchTest : public ::testing::Test {
- public:
-  void Initialize() {
-    Graph graph(OpRegistry::Global());
-
-#ifdef GOOGLE_CUDA
-    const string kDeviceName = "/job:localhost/replica:0/task:0/device:GPU:0";
-#elif TENSORFLOW_USE_SYCL
-    const string kDeviceName = "/job:localhost/replica:0/task:0/device:SYCL:0";
-#endif
-
-    Tensor vb(DT_BOOL, TensorShape({}));
-    vb.scalar<bool>()() = true;
-    Tensor vi(DT_INT64, TensorShape({}));
-    vi.scalar<int>()() = 42;
-    // So vi is expected to be forwarded to the second output port of sw.
-
-    Node* pred = test::graph::Constant(&graph, vb);
-    pred->set_assigned_device_name(kDeviceName);
-    pred_node_name_ = pred->name();
-
-    Node* value = test::graph::Constant(&graph, vi);
-    pred->set_assigned_device_name(kDeviceName);
-    value_node_name_ = value->name();
-
-    Node* sw = test::graph::Switch(&graph, value, pred);
-    sw->set_assigned_device_name(kDeviceName);
-    sw_node_name_ = sw->name();
-
-    Node* z = test::graph::Identity(&graph, sw, 1);
-    sw->set_assigned_device_name(kDeviceName);
-    z_node_name_ = z->name();
-
-    test::graph::ToGraphDef(&graph, &def_);
-  }
-
-  string pred_node_name_;
-  string value_node_name_;
-  string sw_node_name_;
-  string z_node_name_;
-  GraphDef def_;
-};
-
-// Test for debug-watching tensors marked as HOST_MEMORY on GPU.
-TEST_F(SessionDebugGPUSwitchTest, RunSwitchWithHostMemoryDebugOp) {
-  Initialize();
-  auto session = CreateSession();
-  ASSERT_TRUE(session != nullptr);
-
-  DebugGateway debug_gateway(session.get());
-
-  RunOptions run_opts;
-  run_opts.set_output_partition_graphs(true);
-  // This is the name of the boolean tensor fed as pred to the Switch node.
-  // On GPU, this edge is HOST_MEMORY.
-  const string watched_tensor = strings::StrCat(pred_node_name_, "/_1");
-
-  const string debug_identity = "DebugIdentity";
-  DebugTensorWatch* tensor_watch_opts =
-      run_opts.mutable_debug_options()->add_debug_tensor_watch_opts();
-  tensor_watch_opts->set_node_name(watched_tensor);
-  tensor_watch_opts->set_output_slot(0);
-  tensor_watch_opts->add_debug_ops(debug_identity);
-
-  // Expected name of the inserted debug node
-  string debug_identity_node_name = DebugNodeInserter::GetDebugNodeName(
-      strings::StrCat(watched_tensor, ":", 0), 0, debug_identity);
-
-  // Supply completion and value callbacks
-  mutex mu;
-  // Completed nodes with and without outputs
-  std::vector<string> completed_nodes_w_outputs;
-  std::vector<string> completed_nodes_wo_outputs;
-
-  Notification callbacks_done;
-  debug_gateway.SetNodeCompletionCallback(
-      [&mu, &completed_nodes_w_outputs, &completed_nodes_wo_outputs](
-          const string& node_name, const bool any_output) {
-        mutex_lock l(mu);
-        if (any_output) {
-          completed_nodes_w_outputs.push_back(node_name);
-        } else {
-          completed_nodes_wo_outputs.push_back(node_name);
-        }
-      });
-
-  std::vector<Tensor> debug_identity_tensor_vals;
-
-  debug_gateway.SetNodeValueCallback(
-      [this, &mu, &debug_identity_node_name, &debug_identity_tensor_vals,
-       &callbacks_done](const string& node_name, const int output_slot,
-                        const Tensor& tensor_value, const bool is_ref) {
-        mutex_lock l(mu);
-        if (node_name == debug_identity_node_name && output_slot == 0) {
-          debug_identity_tensor_vals.push_back(tensor_value);
-        }
-
-        // Set the notification once we have the value from the target node.
-        if (node_name == z_node_name_ && !callbacks_done.HasBeenNotified()) {
-          callbacks_done.Notify();
-        }
-      });
-
-  TF_ASSERT_OK(session->Create(def_));
-
-  std::vector<std::pair<string, Tensor>> inputs;
-
-  // Request two targets: one fetch output and one non-fetched output.
-  std::vector<string> output_names = {z_node_name_ + ":0"};
-  std::vector<string> target_nodes = {z_node_name_};
-  std::vector<Tensor> outputs;
-
-  RunMetadata run_metadata;
-  Status s = session->Run(run_opts, inputs, output_names, target_nodes,
-                          &outputs, &run_metadata);
-  TF_ASSERT_OK(s);
-
-  ASSERT_EQ(2, run_metadata.partition_graphs().size());
-
-  // Wait for callbacks to complete.
-  callbacks_done.WaitForNotification();
-
-  ASSERT_EQ(1, debug_identity_tensor_vals.size());
-  ASSERT_TRUE(debug_identity_tensor_vals[0].scalar<bool>()());
-}
-#endif  // GOOGLE_CUDA
-
-}  // end namespace
-}  // end namespace tensorflow
diff --git a/tensorflow/core/distributed_runtime/graph_mgr.cc b/tensorflow/core/distributed_runtime/graph_mgr.cc
index e2f13df..6c14603 100644
--- a/tensorflow/core/distributed_runtime/graph_mgr.cc
+++ b/tensorflow/core/distributed_runtime/graph_mgr.cc
@@ -261,7 +261,7 @@
     optimizer.Optimize(lib, worker_env_->env, params.device, &subgraph,
                        /*shape_map=*/nullptr);
 
-    // EXPERIMENTAL: tfdbg inserts debug nodes (i.e., probes) to the graph.
+    // TensorFlow Debugger (tfdbg) inserts debug nodes in the graph.
     if (!debug_options.debug_tensor_watch_opts().empty()) {
       TF_RETURN_IF_ERROR(DecorateAndPublishGraphForDebug(
           debug_options, subgraph.get(), params.device));
diff --git a/tensorflow/core/framework/op_kernel.h b/tensorflow/core/framework/op_kernel.h
index 6c4c3a2..d9fe42f 100644
--- a/tensorflow/core/framework/op_kernel.h
+++ b/tensorflow/core/framework/op_kernel.h
@@ -1044,7 +1044,6 @@
   // For control flow.
   FrameAndIter frame_iter() const { return params_->frame_iter; }
   bool is_input_dead() const { return params_->is_input_dead; }
-  bool* is_output_dead() { return &is_output_dead_; }
 
   // May be used, e.g., to get GPU handles, etc.
   // TODO(tucker): Add example usage.
@@ -1143,8 +1142,6 @@
   // Constructed only if <params->record_tensor_accesses>.
   ManualConstructor<UniqueTensorReferences> referenced_tensors_ GUARDED_BY(mu_);
 
-  bool is_output_dead_ = false;
-
   // The following data members are only used when allocation tracking is
   // enabled.
   mutable mutex stats_mu_;
diff --git a/tensorflow/core/kernels/ctc_loss_op.cc b/tensorflow/core/kernels/ctc_loss_op.cc
index b38d838..fb375ee 100644
--- a/tensorflow/core/kernels/ctc_loss_op.cc
+++ b/tensorflow/core/kernels/ctc_loss_op.cc
@@ -100,8 +100,10 @@
 
     TensorShape labels_shape({batch_size, max_label_len});
     std::vector<int64> order{0, 1};
-    sparse::SparseTensor labels_sp(*labels_indices, *labels_values,
-                                   labels_shape, order);
+    sparse::SparseTensor labels_sp;
+    OP_REQUIRES_OK(
+        ctx, sparse::SparseTensor::Create(*labels_indices, *labels_values,
+                                          labels_shape, order, &labels_sp));
 
     Status labels_sp_valid = labels_sp.IndicesValid();
     OP_REQUIRES(ctx, labels_sp_valid.ok(),
diff --git a/tensorflow/core/kernels/data/optimize_dataset_op.cc b/tensorflow/core/kernels/data/optimize_dataset_op.cc
index 81be691..276f5f8 100644
--- a/tensorflow/core/kernels/data/optimize_dataset_op.cc
+++ b/tensorflow/core/kernels/data/optimize_dataset_op.cc
@@ -53,23 +53,30 @@
     OP_REQUIRES_OK(
         ctx, ParseVectorArgument<string>(ctx, "optimizations", &optimizations));
     Dataset* dataset =
-        new Dataset(ctx, optimizations, output_types_, output_shapes_);
-    OP_REQUIRES_OK(ctx, dataset->Optimize(ctx, input));
+        new Dataset(ctx, input, optimizations, output_types_, output_shapes_);
+    OP_REQUIRES_OK(ctx, dataset->Optimize(ctx));
     *output = dataset;
   }
 
  private:
   class Dataset : public GraphDatasetBase {
    public:
-    Dataset(OpKernelContext* ctx, const std::vector<string>& optimizations,
+    Dataset(OpKernelContext* ctx, const DatasetBase* input,
+            const std::vector<string>& optimizations,
             const DataTypeVector& output_types,
             const std::vector<PartialTensorShape>& output_shapes)
         : GraphDatasetBase(ctx),
+          input_(input),
           optimizations_(optimizations),
           output_types_(output_types),
-          output_shapes_(output_shapes) {}
+          output_shapes_(output_shapes) {
+      input_->Ref();
+    }
 
-    ~Dataset() override { input_->Unref(); }
+    ~Dataset() override {
+      input_->Unref();
+      optimized_input_->Unref();
+    }
 
     std::unique_ptr<IteratorBase> MakeIteratorInternal(
         const string& prefix) const override {
@@ -77,15 +84,17 @@
           new Iterator({this, strings::StrCat(prefix, "::Optimize")}));
     }
 
-    Status Optimize(OpKernelContext* ctx, const DatasetBase* input) {
+    Status Optimize(OpKernelContext* ctx) {
       GraphDefBuilder b;
       DatasetGraphDefBuilder db(&b);
       Node* input_node = nullptr;
-      TF_RETURN_IF_ERROR(db.AddParentDataset(ctx, input, &input_node));
+      TF_RETURN_IF_ERROR(db.AddParentDataset(ctx, input_, &input_node));
       string output_node = input_node->name();
       GraphDef graph_def;
       TF_RETURN_IF_ERROR(b.ToGraphDef(&graph_def));
+      VLOG(3) << "Before optimization: " << graph_def.DebugString();
       TF_RETURN_IF_ERROR(ApplyOptimizations(ctx, &graph_def, &output_node));
+      VLOG(3) << "After optimization: " << graph_def.DebugString();
       flib_def_.reset(new FunctionLibraryDefinition(OpRegistry::Global(),
                                                     graph_def.library()));
       Graph graph(OpRegistry::Global());
@@ -94,8 +103,9 @@
       GraphRunner graph_runner(ctx->function_library()->device());
       TF_RETURN_IF_ERROR(graph_runner.Run(&graph, ctx->function_library(), {},
                                           {output_node}, &outputs));
-      TF_RETURN_IF_ERROR(GetDatasetFromVariantTensor(outputs[0], &input_));
-      input_->Ref();
+      TF_RETURN_IF_ERROR(
+          GetDatasetFromVariantTensor(outputs[0], &optimized_input_));
+      optimized_input_->Ref();
       return Status::OK();
     }
 
@@ -127,7 +137,8 @@
           : DatasetIterator<Dataset>(params) {}
 
       Status Initialize(IteratorContext* ctx) override {
-        return dataset()->input_->MakeIterator(ctx, prefix(), &input_impl_);
+        return dataset()->optimized_input_->MakeIterator(ctx, prefix(),
+                                                         &input_impl_);
       }
 
       Status GetNextInternal(IteratorContext* ctx,
@@ -199,6 +210,12 @@
       tensorflow::grappler::VirtualCluster cluster(device_map);
 
       // Run optimizer.
+      if (VLOG_IS_ON(2)) {
+        LOG(INFO) << "Performing the following optimizations:";
+        for (const string& optimization : optimizations_) {
+          LOG(INFO) << "  " << optimization;
+        }
+      }
       TF_RETURN_IF_ERROR(tensorflow::grappler::RunMetaOptimizer(
           *grappler_item, rewriter_config, ctx->device(), &cluster, graph_def));
 
@@ -213,8 +230,9 @@
       return Status::OK();
     }
 
-    DatasetBase* input_;
+    DatasetBase* optimized_input_;
     std::shared_ptr<FunctionLibraryDefinition> flib_def_;
+    const DatasetBase* input_;
     const std::vector<string> optimizations_;
     const DataTypeVector output_types_;
     const std::vector<PartialTensorShape> output_shapes_;
diff --git a/tensorflow/core/kernels/data/slide_dataset_op.cc b/tensorflow/core/kernels/data/slide_dataset_op.cc
index 07cc91f..5765c61 100644
--- a/tensorflow/core/kernels/data/slide_dataset_op.cc
+++ b/tensorflow/core/kernels/data/slide_dataset_op.cc
@@ -12,6 +12,10 @@
 See the License for the specific language governing permissions and
 limitations under the License.
 ==============================================================================*/
+
+#include <deque>
+#include <vector>
+
 #include "tensorflow/core/framework/partial_tensor_shape.h"
 #include "tensorflow/core/framework/tensor.h"
 #include "tensorflow/core/kernels/data/dataset.h"
@@ -33,36 +37,40 @@
   void MakeDataset(OpKernelContext* ctx, DatasetBase* input,
                    DatasetBase** output) override {
     int64 window_size = 0;
-    int64 stride = 0;
     OP_REQUIRES_OK(
         ctx, ParseScalarArgument<int64>(ctx, "window_size", &window_size));
-    OP_REQUIRES_OK(ctx, ParseScalarArgument<int64>(ctx, "stride", &stride));
     OP_REQUIRES(
         ctx, window_size > 0,
         errors::InvalidArgument("Window size must be greater than zero."));
-    OP_REQUIRES(ctx, stride > 0,
-                errors::InvalidArgument("Stride must be greater than zero."));
-    if (stride == window_size) {
-      LOG(WARNING) << "stride: " << stride
+    int64 window_shift = 0;
+    OP_REQUIRES_OK(
+        ctx, ParseScalarArgument<int64>(ctx, "window_shift", &window_shift));
+    OP_REQUIRES(
+        ctx, window_shift > 0,
+        errors::InvalidArgument("Window shift must be greater than zero."));
+    int64 window_stride = 0;
+    OP_REQUIRES_OK(
+        ctx, ParseScalarArgument<int64>(ctx, "window_stride", &window_stride));
+    OP_REQUIRES(
+        ctx, window_stride > 0,
+        errors::InvalidArgument("window_stride must be greater than zero."));
+    if (window_size == window_shift && window_stride == 1) {
+      LOG(WARNING) << "window_shift: " << window_shift
                    << " is equal to window_size: " << window_size
-                   << ", to use `batch` instead.";
-    } else if (stride > window_size) {
-      LOG(WARNING) << "stride: " << stride
-                   << " is greater than window_size: " << window_size
-                   << ", you will lose some data.";
+                   << " and window_stride is 1, use `batch` instead.";
     }
-
-    *output = new Dataset(ctx, window_size, stride, input);
+    *output = new Dataset(ctx, window_size, window_shift, window_stride, input);
   }
 
  private:
   class Dataset : public GraphDatasetBase {
    public:
-    Dataset(OpKernelContext* ctx, int64 window_size, int64 stride,
-            const DatasetBase* input)
+    Dataset(OpKernelContext* ctx, int64 window_size, int64 window_shift,
+            int64 window_stride, const DatasetBase* input)
         : GraphDatasetBase(ctx),
           window_size_(window_size),
-          stride_(stride),
+          window_shift_(window_shift),
+          window_stride_(window_stride),
           input_(input) {
       input_->Ref();
 
@@ -91,8 +99,8 @@
     }
 
     string DebugString() const override {
-      return strings::StrCat("SlideDatasetOp(", window_size_, ", ", stride_,
-                             ")::Dataset");
+      return strings::StrCat("SlideDatasetOp(", window_size_, ", ",
+                             window_shift_, ", ", window_stride_, ")::Dataset");
     }
 
    protected:
@@ -101,16 +109,18 @@
       Node* input_graph_node = nullptr;
       TF_RETURN_IF_ERROR(b->AddParentDataset(ctx, input_, &input_graph_node));
       Node* window_size = nullptr;
-      Node* stride = nullptr;
+      Node* window_shift = nullptr;
+      Node* window_stride = nullptr;
       TF_RETURN_IF_ERROR(b->AddScalar(window_size_, &window_size));
-      TF_RETURN_IF_ERROR(b->AddScalar(stride_, &stride));
-      TF_RETURN_IF_ERROR(
-          b->AddDataset(this, {input_graph_node, window_size, stride}, output));
+      TF_RETURN_IF_ERROR(b->AddScalar(window_shift_, &window_shift));
+      TF_RETURN_IF_ERROR(b->AddScalar(window_stride_, &window_stride));
+      TF_RETURN_IF_ERROR(b->AddDataset(
+          this, {input_graph_node, window_size, window_shift, window_stride},
+          output));
       return Status::OK();
     }
 
    private:
-
     class Iterator : public DatasetIterator<Dataset> {
      public:
       explicit Iterator(const Params& params)
@@ -124,7 +134,8 @@
                              std::vector<Tensor>* out_tensors,
                              bool* end_of_sequence) override {
         const int64 window_size = dataset()->window_size_;
-        const int64 stride = dataset()->stride_;
+        const int64 window_shift = dataset()->window_shift_;
+        const int64 window_stride = dataset()->window_stride_;
         std::vector<std::vector<Tensor>> batch_elements;
         {
           mutex_lock l(mu_);
@@ -133,55 +144,51 @@
             return Status::OK();
           }
           batch_elements.reserve(window_size);
-          // Use cache if stride < window_size.
-          if (stride < window_size) {
-            const bool first_call = cache_.empty();
-            if (first_call) {
-              cache_.reserve(window_size);
-            } else {
-              // Reuse cache in the previous iteration.
-              cache_.swap(batch_elements);
-            }
-          }
-          // Fill up with new elements.
+
+          // Fill up buffer.
+          size_t target_size = TargetBufferSize(window_size, window_stride);
           *end_of_sequence = false;
-          for (size_t i = batch_elements.size(); i < window_size && !*end_of_sequence;
-              ++i) {
-            std::vector<Tensor> batch_element_tuple;
-            TF_RETURN_IF_ERROR(input_impl_->GetNext(ctx, &batch_element_tuple,
-                                                    end_of_sequence));
+          for (size_t i = buffer_.size(); i < target_size && !*end_of_sequence;
+               ++i) {
+            std::vector<Tensor> element;
+            TF_RETURN_IF_ERROR(
+                input_impl_->GetNext(ctx, &element, end_of_sequence));
             if (!*end_of_sequence) {
-              batch_elements.push_back(std::move(batch_element_tuple));
+              buffer_.push_back(std::move(element));
             } else {
               input_impl_.reset();
             }
           }
-          // Drop the final smaller blocks.
-          if (batch_elements.size() < window_size) {
+
+          // Drop the final smaller batch.
+          if (buffer_.size() < target_size) {
             DCHECK(*end_of_sequence);
             return Status::OK();
           }
 
-          if (stride < window_size) {
-            // Cache the data used for the next iteration.
-            for (size_t i = stride; i < window_size; ++i) {
-              cache_.emplace_back(batch_elements[i]);
-            }
-          } else if (stride > window_size) {
-            // Drop the data before the next iteration.
-            std::vector<Tensor> batch_element_tuple;
-            for (size_t i = window_size; i < stride && !*end_of_sequence; ++i) {
-              TF_RETURN_IF_ERROR(input_impl_->GetNext(ctx, &batch_element_tuple,
-                                                      end_of_sequence));
-              if (*end_of_sequence) {
+          for (size_t i = 0; i < window_size; ++i) {
+            batch_elements.emplace_back(buffer_[window_stride * i]);
+          }
+
+          // Drop the data before the next iteration.
+          if (window_shift >= buffer_.size()) {
+            for (size_t i = buffer_.size(); i < window_shift; ++i) {
+              bool end_of_input;
+              std::vector<Tensor> element;
+              TF_RETURN_IF_ERROR(
+                  input_impl_->GetNext(ctx, &element, &end_of_input));
+              if (end_of_input) {
                 input_impl_.reset();
+                break;
               }
             }
+            buffer_.clear();
+          } else {
+            buffer_.erase(buffer_.begin(), buffer_.begin() + window_shift);
           }
         }
 
         // Construct output tensors.
-        // Those codes below are copied from batch_dataset_op.cc.
         const size_t num_tuple_components = batch_elements[0].size();
         const int64 num_batch_elements = batch_elements.size();
         for (size_t component_index = 0; component_index < num_tuple_components;
@@ -223,15 +230,15 @@
         } else {
           TF_RETURN_IF_ERROR(SaveParent(writer, input_impl_));
         }
-        // Save cache.
-        TF_RETURN_IF_ERROR(
-            writer->WriteScalar(strings::StrCat("cache_size"), cache_.size()));
-        for (int64 i = 0; i < cache_.size(); i++) {
+        // Save buffer.
+        TF_RETURN_IF_ERROR(writer->WriteScalar(strings::StrCat("buffer_size"),
+                                               buffer_.size()));
+        for (int64 i = 0; i < buffer_.size(); i++) {
           TF_RETURN_IF_ERROR(writer->WriteScalar(
-              strings::StrCat("cache[", i, "]_size"), cache_[i].size()));
-          for (int64 j = 0; j < cache_[i].size(); j++) {
+              strings::StrCat("buffer[", i, "]_size"), buffer_[i].size()));
+          for (int64 j = 0; j < buffer_[i].size(); j++) {
             TF_RETURN_IF_ERROR(writer->WriteTensor(
-                strings::StrCat("cache[", i, "][", j, "]"), cache_[i][j]));
+                strings::StrCat("buffer[", i, "][", j, "]"), buffer_[i][j]));
           }
         }
         return Status::OK();
@@ -245,32 +252,37 @@
         } else {
           input_impl_.reset();
         }
-        // Restore cache.
-        int64 cache_size;
+        // Restore buffer.
+        int64 buffer_size;
         TF_RETURN_IF_ERROR(
-            reader->ReadScalar(strings::StrCat("cache_size"), &cache_size));
-        cache_.resize(cache_size);
-        for (int64 i = 0; i < cache_size; i++) {
+            reader->ReadScalar(strings::StrCat("buffer_size"), &buffer_size));
+        buffer_.resize(buffer_size);
+        for (int64 i = 0; i < buffer_size; i++) {
           int64 vector_size;
           TF_RETURN_IF_ERROR(reader->ReadScalar(
-              strings::StrCat("cache[", i, "]_size"), &vector_size));
-          cache_[i].resize(vector_size);
+              strings::StrCat("buffer[", i, "]_size"), &vector_size));
+          buffer_[i].resize(vector_size);
           for (int64 j = 0; j < vector_size; j++) {
             TF_RETURN_IF_ERROR(reader->ReadTensor(
-                strings::StrCat("cache[", i, "][", j, "]"), &cache_[i][j]));
+                strings::StrCat("buffer[", i, "][", j, "]"), &buffer_[i][j]));
           }
         }
         return Status::OK();
       }
 
      private:
+      size_t TargetBufferSize(int64 window_size, int64 window_stride) {
+        return (window_size - 1) * window_stride + 1;
+      }
+
       mutex mu_;
-      std::vector<std::vector<Tensor>> cache_ GUARDED_BY(mu_);
+      std::deque<std::vector<Tensor>> buffer_ GUARDED_BY(mu_);
       std::unique_ptr<IteratorBase> input_impl_ GUARDED_BY(mu_);
     };
 
     const int64 window_size_;
-    const int64 stride_;
+    const int64 window_shift_;
+    const int64 window_stride_;
     const DatasetBase* const input_;
     std::vector<PartialTensorShape> output_shapes_;
   };
diff --git a/tensorflow/core/kernels/data/sparse_tensor_slice_dataset_op.cc b/tensorflow/core/kernels/data/sparse_tensor_slice_dataset_op.cc
index 2604822..b5dff48 100644
--- a/tensorflow/core/kernels/data/sparse_tensor_slice_dataset_op.cc
+++ b/tensorflow/core/kernels/data/sparse_tensor_slice_dataset_op.cc
@@ -252,10 +252,12 @@
       previous_batch_index = next_batch_index;
     }
     gtl::InlinedVector<int64, 8> std_order(dense_shape->NumElements(), 0);
-    sparse::SparseTensor sparse_tensor(
-        *indices, *values, TensorShape(dense_shape->vec<int64>()), std_order);
-
-    *output = new Dataset<T>(ctx, sparse_tensor);
+    sparse::SparseTensor tensor;
+    OP_REQUIRES_OK(
+        ctx, sparse::SparseTensor::Create(
+                 *indices, *values, TensorShape(dense_shape->vec<int64>()),
+                 std_order, &tensor));
+    *output = new Dataset<T>(ctx, std::move(tensor));
   }
 
  private:
diff --git a/tensorflow/core/kernels/deserialize_sparse_string_op.cc b/tensorflow/core/kernels/deserialize_sparse_string_op.cc
index 6fb07c1..2c13f24 100644
--- a/tensorflow/core/kernels/deserialize_sparse_string_op.cc
+++ b/tensorflow/core/kernels/deserialize_sparse_string_op.cc
@@ -165,7 +165,10 @@
     std::vector<SparseTensor> tensors;
     tensors.reserve(num_sparse_tensors);
     for (int i = 0; i < num_sparse_tensors; ++i) {
-      tensors.emplace_back(indices[i], values[i], shape, std_order);
+      SparseTensor tensor;
+      OP_REQUIRES_OK(context, SparseTensor::Create(indices[i], values[i], shape,
+                                                   std_order, &tensor));
+      tensors.push_back(std::move(tensor));
     }
 
     gtl::optional<SparseTensor> maybe_output;
diff --git a/tensorflow/core/kernels/edit_distance_op.cc b/tensorflow/core/kernels/edit_distance_op.cc
index 20d857c..4aecdc9 100644
--- a/tensorflow/core/kernels/edit_distance_op.cc
+++ b/tensorflow/core/kernels/edit_distance_op.cc
@@ -133,10 +133,15 @@
     std::vector<int64> sorted_order(truth_st_shape.dims());
     std::iota(sorted_order.begin(), sorted_order.end(), 0);
 
-    sparse::SparseTensor hypothesis(*hypothesis_indices, *hypothesis_values,
-                                    hypothesis_st_shape, sorted_order);
-    sparse::SparseTensor truth(*truth_indices, *truth_values, truth_st_shape,
-                               sorted_order);
+    sparse::SparseTensor hypothesis;
+    OP_REQUIRES_OK(ctx, sparse::SparseTensor::Create(
+                            *hypothesis_indices, *hypothesis_values,
+                            hypothesis_st_shape, sorted_order, &hypothesis));
+
+    sparse::SparseTensor truth;
+    OP_REQUIRES_OK(ctx, sparse::SparseTensor::Create(
+                            *truth_indices, *truth_values, truth_st_shape,
+                            sorted_order, &truth));
 
     // Group dims 0, 1, ..., RANK - 1.  The very last dim is assumed
     // to store the variable length sequences.
diff --git a/tensorflow/core/kernels/reshape_util.cc b/tensorflow/core/kernels/reshape_util.cc
index ac301f3..50fdc17 100644
--- a/tensorflow/core/kernels/reshape_util.cc
+++ b/tensorflow/core/kernels/reshape_util.cc
@@ -28,7 +28,6 @@
 #include "tensorflow/core/framework/tensor_util.h"
 #include "tensorflow/core/framework/types.h"
 #include "tensorflow/core/lib/gtl/inlined_vector.h"
-#include "tensorflow/core/util/sparse/sparse_tensor.h"
 
 namespace tensorflow {
 
diff --git a/tensorflow/core/kernels/resource_variable_ops.cc b/tensorflow/core/kernels/resource_variable_ops.cc
index af921e4..c5292e1 100644
--- a/tensorflow/core/kernels/resource_variable_ops.cc
+++ b/tensorflow/core/kernels/resource_variable_ops.cc
@@ -174,25 +174,20 @@
 
 #endif  // GOOGLE_CUDA
 
-class DestroyResourceOp : public OpKernel {
- public:
-  explicit DestroyResourceOp(OpKernelConstruction* ctx) : OpKernel(ctx) {
-    OP_REQUIRES_OK(ctx,
-                   ctx->GetAttr("ignore_lookup_error", &ignore_lookup_error_));
-  }
+DestroyResourceOp::DestroyResourceOp(OpKernelConstruction* ctx)
+    : OpKernel(ctx) {
+  OP_REQUIRES_OK(ctx,
+                 ctx->GetAttr("ignore_lookup_error", &ignore_lookup_error_));
+}
 
-  void Compute(OpKernelContext* ctx) override {
-    const ResourceHandle& p = HandleFromInput(ctx, 0);
-    Status status = DeleteResource(ctx, p);
-    if (ignore_lookup_error_ && errors::IsNotFound(status)) {
-      return;
-    }
-    OP_REQUIRES_OK(ctx, status);
+void DestroyResourceOp::Compute(OpKernelContext* ctx) {
+  const ResourceHandle& p = HandleFromInput(ctx, 0);
+  Status status = DeleteResource(ctx, p);
+  if (ignore_lookup_error_ && errors::IsNotFound(status)) {
+    return;
   }
-
- private:
-  bool ignore_lookup_error_;
-};
+  OP_REQUIRES_OK(ctx, status);
+}
 
 REGISTER_KERNEL_BUILDER(Name("DestroyResourceOp").Device(DEVICE_CPU),
                         DestroyResourceOp);
diff --git a/tensorflow/core/kernels/resource_variable_ops.h b/tensorflow/core/kernels/resource_variable_ops.h
index 8cae5d2..9b60106 100644
--- a/tensorflow/core/kernels/resource_variable_ops.h
+++ b/tensorflow/core/kernels/resource_variable_ops.h
@@ -28,6 +28,15 @@
   DataType dtype_;
 };
 
+class DestroyResourceOp : public OpKernel {
+ public:
+  explicit DestroyResourceOp(OpKernelConstruction* ctx);
+  void Compute(OpKernelContext* ctx) override;
+
+ private:
+  bool ignore_lookup_error_;
+};
+
 }  // namespace tensorflow
 
 #endif  // TENSORFLOW_CORE_KERNELS_RESOURCE_VARIABLE_OPS_H_
diff --git a/tensorflow/core/kernels/sdca_internal.cc b/tensorflow/core/kernels/sdca_internal.cc
index 3e16ba8..1c071d3 100644
--- a/tensorflow/core/kernels/sdca_internal.cc
+++ b/tensorflow/core/kernels/sdca_internal.cc
@@ -18,6 +18,7 @@
 #include "tensorflow/core/kernels/sdca_internal.h"
 
 #include <limits>
+#include <numeric>
 #include <random>
 
 #include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
diff --git a/tensorflow/core/kernels/sdca_internal.h b/tensorflow/core/kernels/sdca_internal.h
index 897c488..1eff4b1 100644
--- a/tensorflow/core/kernels/sdca_internal.h
+++ b/tensorflow/core/kernels/sdca_internal.h
@@ -43,8 +43,6 @@
 #include "tensorflow/core/lib/random/distribution_sampler.h"
 #include "tensorflow/core/lib/strings/stringprintf.h"
 #include "tensorflow/core/util/guarded_philox_random.h"
-#include "tensorflow/core/util/sparse/group_iterator.h"
-#include "tensorflow/core/util/sparse/sparse_tensor.h"
 #include "tensorflow/core/util/work_sharder.h"
 
 namespace tensorflow {
diff --git a/tensorflow/core/kernels/sendrecv_ops.cc b/tensorflow/core/kernels/sendrecv_ops.cc
index 2f87057..6521dcf 100644
--- a/tensorflow/core/kernels/sendrecv_ops.cc
+++ b/tensorflow/core/kernels/sendrecv_ops.cc
@@ -160,7 +160,6 @@
           if (!is_dead) {
             ctx->set_output(0, val);
           }
-          *ctx->is_output_dead() = is_dead;
         }
         done();
       },
diff --git a/tensorflow/core/kernels/serialize_sparse_op.cc b/tensorflow/core/kernels/serialize_sparse_op.cc
index 852cef2..577e327 100644
--- a/tensorflow/core/kernels/serialize_sparse_op.cc
+++ b/tensorflow/core/kernels/serialize_sparse_op.cc
@@ -190,8 +190,10 @@
     TensorShape tensor_input_shape(input_shape->vec<int64>());
     gtl::InlinedVector<int64, 8> std_order(rank);
     std::iota(std_order.begin(), std_order.end(), 0);
-    SparseTensor input_st(*input_indices, *input_values, tensor_input_shape,
-                          std_order);
+    SparseTensor input_st;
+    OP_REQUIRES_OK(context, SparseTensor::Create(*input_indices, *input_values,
+                                                 tensor_input_shape, std_order,
+                                                 &input_st));
 
     auto input_shape_t = input_shape->vec<int64>();
     const int64 N = input_shape_t(0);
diff --git a/tensorflow/core/kernels/set_kernels.cc b/tensorflow/core/kernels/set_kernels.cc
index e836c76..f893d4e 100644
--- a/tensorflow/core/kernels/set_kernels.cc
+++ b/tensorflow/core/kernels/set_kernels.cc
@@ -63,9 +63,9 @@
 
 // Build `SparseTensor` from indices, values, and shape in inputs
 // [base_index, base_index + 3), and validate its rank and indices.
-sparse::SparseTensor SparseTensorFromContext(OpKernelContext* ctx,
-                                             const int32 base_index,
-                                             bool validate_indices) {
+Status SparseTensorFromContext(OpKernelContext* ctx, const int32 base_index,
+                               bool validate_indices,
+                               sparse::SparseTensor* tensor) {
   // Assume row-major order.
   const TensorShape shape =
       TensorShape(ctx->input(base_index + 2).vec<int64>());
@@ -73,13 +73,8 @@
   std::vector<int64> order(shape.dims());
   std::iota(order.begin(), order.end(), 0);
 
-  const sparse::SparseTensor st(ctx->input(base_index),
-                                ctx->input(base_index + 1), shape, order);
-  if (validate_indices) {
-    Status s = st.IndicesValid();
-    if (!s.ok()) ctx->SetStatus(s);
-  }
-  return st;
+  return sparse::SparseTensor::Create(
+      ctx->input(base_index), ctx->input(base_index + 1), shape, order, tensor);
 }
 
 // TODO(ptucker): CheckGroup is just a sanity check on the result of
@@ -253,11 +248,13 @@
 
 template <typename T>
 void SetSizeOp<T>::Compute(OpKernelContext* ctx) {
-  const sparse::SparseTensor set_st =
-      SparseTensorFromContext(ctx, 0, validate_indices_);
+  sparse::SparseTensor set_st;
+  OP_REQUIRES_OK(ctx,
+                 SparseTensorFromContext(ctx, 0, validate_indices_, &set_st));
+  OP_REQUIRES_OK(ctx, set_st.IndicesValid());
 
-  // Output shape is same as input except for last dimension, which reduces to
-  // the set size of values along that dimension.
+  // Output shape is same as input except for last dimension, which reduces
+  // to the set size of values along that dimension.
   ShapeArray output_shape;
   OP_REQUIRES_OK(ctx, GroupShape(set_st.shape(), &output_shape));
   const auto output_strides = Strides(output_shape);
@@ -484,8 +481,10 @@
 template <typename T>
 void SetOperationOp<T>::ComputeDenseToSparse(OpKernelContext* ctx) const {
   const Tensor& set1_t = ctx->input(0);
-  const sparse::SparseTensor set2_st =
-      SparseTensorFromContext(ctx, 1, validate_indices_);
+  sparse::SparseTensor set2_st;
+  OP_REQUIRES_OK(ctx,
+                 SparseTensorFromContext(ctx, 1, validate_indices_, &set2_st));
+  OP_REQUIRES_OK(ctx, set2_st.IndicesValid());
   // The following should stay in sync with `_dense_to_sparse_shape` shape
   // assertions in python/ops/set_ops.py, and `SetShapeFn` for
   // `DenseToSparseSetOperation` in ops/set_ops.cc.
@@ -597,10 +596,15 @@
 // with the same first n-1 dimensions in set1 and set2.
 template <typename T>
 void SetOperationOp<T>::ComputeSparseToSparse(OpKernelContext* ctx) const {
-  const sparse::SparseTensor set1_st =
-      SparseTensorFromContext(ctx, 0, validate_indices_);
-  const sparse::SparseTensor set2_st =
-      SparseTensorFromContext(ctx, 3, validate_indices_);
+  sparse::SparseTensor set1_st;
+  OP_REQUIRES_OK(ctx,
+                 SparseTensorFromContext(ctx, 0, validate_indices_, &set1_st));
+  OP_REQUIRES_OK(ctx, set1_st.IndicesValid());
+
+  sparse::SparseTensor set2_st;
+  OP_REQUIRES_OK(ctx,
+                 SparseTensorFromContext(ctx, 3, validate_indices_, &set2_st));
+
   // The following should stay in sync with `_sparse_to_sparse_shape` shape
   // assertions in python/ops/set_ops.py, and `SetShapeFn` for
   // `SparseToSparseSetOperation` in ops/set_ops.cc.
diff --git a/tensorflow/core/kernels/sparse_concat_op.cc b/tensorflow/core/kernels/sparse_concat_op.cc
index f813794..3b2a0cb 100644
--- a/tensorflow/core/kernels/sparse_concat_op.cc
+++ b/tensorflow/core/kernels/sparse_concat_op.cc
@@ -124,9 +124,12 @@
     std::vector<sparse::SparseTensor> sp_inputs;
     for (int i = 0; i < N; ++i) {
       const TensorShape current_shape(shapes[i].vec<int64>());
-      sp_inputs.emplace_back(tensor::DeepCopy(inds[i]),
-                             tensor::DeepCopy(vals[i]), current_shape,
-                             std_order);
+      sparse::SparseTensor tensor;
+      OP_REQUIRES_OK(context,
+                     sparse::SparseTensor::Create(
+                         tensor::DeepCopy(inds[i]), tensor::DeepCopy(vals[i]),
+                         current_shape, std_order, &tensor));
+      sp_inputs.push_back(std::move(tensor));
       sp_inputs[i].Reorder<T>(concat_order);
     }
 
diff --git a/tensorflow/core/kernels/sparse_reduce_op.cc b/tensorflow/core/kernels/sparse_reduce_op.cc
index 9e60791..a465564 100644
--- a/tensorflow/core/kernels/sparse_reduce_op.cc
+++ b/tensorflow/core/kernels/sparse_reduce_op.cc
@@ -172,8 +172,10 @@
     // making deep copies here.  Remove this if/when we change Reorder()'s
     // semantics.
     const auto shape_vec = shape_t->vec<int64>();
-    SparseTensor sp(tensor::DeepCopy(*indices_t), tensor::DeepCopy(*values_t),
-                    TensorShape(shape_vec));
+    SparseTensor sp;
+    OP_REQUIRES_OK(ctx, SparseTensor::Create(
+        tensor::DeepCopy(*indices_t), tensor::DeepCopy(*values_t),
+                    TensorShape(shape_vec), &sp));
     ReduceDetails reduction = SparseTensorReduceHelper(
         sp, reduction_axes_t->flat<int32>(), keep_dims_);
 
@@ -260,8 +262,10 @@
 
     OP_REQUIRES_OK(ctx, ValidateInputs(shape_t, reduction_axes_t));
 
-    SparseTensor sp(tensor::DeepCopy(*indices_t), tensor::DeepCopy(*values_t),
-                    TensorShape(shape_t->vec<int64>()));
+    SparseTensor sp;
+    OP_REQUIRES_OK(ctx, SparseTensor::Create(tensor::DeepCopy(*indices_t),
+                                         tensor::DeepCopy(*values_t),
+                    TensorShape(shape_t->vec<int64>()), &sp));
     ReduceDetails reduction = SparseTensorReduceHelper(
         sp, reduction_axes_t->flat<int32>(), keep_dims_);
 
diff --git a/tensorflow/core/kernels/sparse_reorder_op.cc b/tensorflow/core/kernels/sparse_reorder_op.cc
index d1373fe..6f90658 100644
--- a/tensorflow/core/kernels/sparse_reorder_op.cc
+++ b/tensorflow/core/kernels/sparse_reorder_op.cc
@@ -60,16 +60,21 @@
     std::iota(std_order.begin(), std_order.end(), 0);
 
     // Check if the sparse tensor is already ordered correctly
-    sparse::SparseTensor input_sp(input_ind, input_val, input_shape, std_order);
+    sparse::SparseTensor input_sp;
+    OP_REQUIRES_OK(
+        context, sparse::SparseTensor::Create(input_ind, input_val, input_shape,
+                                              std_order, &input_sp));
 
     if (input_sp.IndicesValid().ok()) {
       context->set_output(0, input_sp.indices());
       context->set_output(1, input_sp.values());
     } else {
       // Deep-copy the input Tensors, then reorder in-place
-      sparse::SparseTensor reordered_sp(tensor::DeepCopy(input_ind),
-                                        tensor::DeepCopy(input_val),
-                                        input_shape);
+      sparse::SparseTensor reordered_sp;
+      OP_REQUIRES_OK(context,
+                     sparse::SparseTensor::Create(tensor::DeepCopy(input_ind),
+                                                  tensor::DeepCopy(input_val),
+                                                  input_shape, &reordered_sp));
       reordered_sp.Reorder<T>(std_order);
       context->set_output(0, reordered_sp.indices());
       context->set_output(1, reordered_sp.values());
diff --git a/tensorflow/core/kernels/sparse_slice_grad_op.cc b/tensorflow/core/kernels/sparse_slice_grad_op.cc
index 90a39ed..f92b641 100644
--- a/tensorflow/core/kernels/sparse_slice_grad_op.cc
+++ b/tensorflow/core/kernels/sparse_slice_grad_op.cc
@@ -18,7 +18,6 @@
 #include "tensorflow/core/framework/tensor.h"
 #include "tensorflow/core/framework/tensor_util.h"
 #include "tensorflow/core/framework/types.h"
-#include "tensorflow/core/util/sparse/sparse_tensor.h"
 
 namespace tensorflow {
 
diff --git a/tensorflow/core/kernels/sparse_slice_op.cc b/tensorflow/core/kernels/sparse_slice_op.cc
index 10dc208..6aaf4fd 100644
--- a/tensorflow/core/kernels/sparse_slice_op.cc
+++ b/tensorflow/core/kernels/sparse_slice_op.cc
@@ -66,8 +66,11 @@
                     "Expected size to be a vector of length ", input_dims,
                     " but got length ", input_size.NumElements()));
 
-    sparse::SparseTensor sparse_tensor(input_indices, input_values,
-                                       TensorShape(input_shape.vec<int64>()));
+    sparse::SparseTensor sparse_tensor;
+    OP_REQUIRES_OK(context,
+                   sparse::SparseTensor::Create(
+                       input_indices, input_values,
+                       TensorShape(input_shape.vec<int64>()), &sparse_tensor));
 
     const gtl::ArraySlice<int64> start(input_start.flat<int64>().data(),
                                        input_dims);
diff --git a/tensorflow/core/kernels/sparse_softmax_op.cc b/tensorflow/core/kernels/sparse_softmax_op.cc
index 444a5f6..dc3119b 100644
--- a/tensorflow/core/kernels/sparse_softmax_op.cc
+++ b/tensorflow/core/kernels/sparse_softmax_op.cc
@@ -69,8 +69,11 @@
 
     const int nnz = static_cast<int>(indices_t->dim_size(0));
     const int rank = static_cast<int>(indices_t->dim_size(1));
-    SparseTensor st(tensor::DeepCopy(*indices_t), tensor::DeepCopy(*values_t),
-                    TensorShape(shape_t->flat<int64>()));
+    SparseTensor st;
+    OP_REQUIRES_OK(
+        context, SparseTensor::Create(
+                     tensor::DeepCopy(*indices_t), tensor::DeepCopy(*values_t),
+                     TensorShape(shape_t->flat<int64>()), &st));
 
     Tensor *output_values = nullptr;
     OP_REQUIRES_OK(context, context->allocate_output(0, TensorShape({nnz}),
diff --git a/tensorflow/core/kernels/sparse_split_op.cc b/tensorflow/core/kernels/sparse_split_op.cc
index 67dcf05..3d02be4 100644
--- a/tensorflow/core/kernels/sparse_split_op.cc
+++ b/tensorflow/core/kernels/sparse_split_op.cc
@@ -63,10 +63,16 @@
                                 input_shape.vec<int64>()(split_dim), "), got ",
                                 num_split_));
 
-    sparse::SparseTensor sparse_tensor(input_indices, input_values,
-                                       TensorShape(input_shape.vec<int64>()));
-    const std::vector<sparse::SparseTensor> outputs =
-        sparse::SparseTensor::Split<T>(sparse_tensor, split_dim, num_split_);
+    sparse::SparseTensor sparse_tensor;
+    OP_REQUIRES_OK(context,
+                   sparse::SparseTensor::Create(
+                       input_indices, input_values,
+                       TensorShape(input_shape.vec<int64>()), &sparse_tensor));
+
+    std::vector<sparse::SparseTensor> outputs;
+    OP_REQUIRES_OK(context,
+                   sparse::SparseTensor::Split<T>(sparse_tensor, split_dim,
+                                                  num_split_, &outputs));
 
     for (int slice_index = 0; slice_index < num_split_; ++slice_index) {
       context->set_output(slice_index, outputs[slice_index].indices());
diff --git a/tensorflow/core/kernels/sparse_tensors_map_ops.cc b/tensorflow/core/kernels/sparse_tensors_map_ops.cc
index 2aadd92..74fa3a1 100644
--- a/tensorflow/core/kernels/sparse_tensors_map_ops.cc
+++ b/tensorflow/core/kernels/sparse_tensors_map_ops.cc
@@ -93,8 +93,9 @@
         const Tensor* ix = sp_iter->second.indices.AccessTensor(ctx);
         const Tensor* values = sp_iter->second.values.AccessTensor(ctx);
         const auto& shape = sp_iter->second.shape;
-        sparse_tensors->emplace_back(*ix, *values, shape);
-
+        SparseTensor tensor;
+        TF_RETURN_IF_ERROR(SparseTensor::Create(*ix, *values, shape, &tensor));
+        sparse_tensors->push_back(std::move(tensor));
         sp_tensors_.erase(sp_iter);
       }
     }
@@ -195,7 +196,9 @@
                    TensorShapeUtils::MakeShape(input_shape->vec<int64>().data(),
                                                input_shape->NumElements(),
                                                &input_shape_object));
-    SparseTensor st(*input_indices, *input_values, input_shape_object);
+    SparseTensor st;
+    OP_REQUIRES_OK(context, SparseTensor::Create(*input_indices, *input_values,
+                                                 input_shape_object, &st));
     int64 handle;
     OP_REQUIRES_OK(context, map->AddSparseTensor(context, st, &handle));
 
@@ -253,8 +256,10 @@
     TensorShape tensor_input_shape(input_shape->vec<int64>());
     gtl::InlinedVector<int64, 8> std_order(rank);
     std::iota(std_order.begin(), std_order.end(), 0);
-    SparseTensor input_st(*input_indices, *input_values, tensor_input_shape,
-                          std_order);
+    SparseTensor input_st;
+    OP_REQUIRES_OK(context, SparseTensor::Create(*input_indices, *input_values,
+                                                 tensor_input_shape, std_order,
+                                                 &input_st));
 
     auto input_shape_t = input_shape->vec<int64>();
     const int64 N = input_shape_t(0);
@@ -300,7 +305,10 @@
         output_values_t(i) = values(i);
       }
 
-      SparseTensor st_i(output_indices, output_values, output_shape);
+      SparseTensor st_i;
+      OP_REQUIRES_OK(context,
+                     SparseTensor::Create(output_indices, output_values,
+                                          output_shape, &st_i));
       int64 handle;
       OP_REQUIRES_OK(context, map->AddSparseTensor(context, st_i, &handle));
       sparse_handles_t(b) = handle;
@@ -311,7 +319,9 @@
     if (visited.size() < N) {
       Tensor empty_indices(DT_INT64, {0, rank - 1});
       Tensor empty_values(DataTypeToEnum<T>::value, {0});
-      SparseTensor empty_st(empty_indices, empty_values, output_shape);
+      SparseTensor empty_st;
+      OP_REQUIRES_OK(context, SparseTensor::Create(empty_indices, empty_values,
+                                                   output_shape, &empty_st));
 
       for (int64 b = 0; b < N; ++b) {
         // We skipped this batch entry.
@@ -466,13 +476,15 @@
     std::vector<SparseTensor> tensors_to_concat;
     tensors_to_concat.reserve(N);
     for (int i = 0; i < N; ++i) {
-      tensors_to_concat.emplace_back(std::move(indices_to_concat[i]),
-                                     std::move(values_to_concat[i]),
-                                     preconcat_shape, std_order);
+      SparseTensor tensor;
+      OP_REQUIRES_OK(context,
+                     SparseTensor::Create(std::move(indices_to_concat[i]),
+                                          std::move(values_to_concat[i]),
+                                          preconcat_shape, std_order, &tensor));
+      tensors_to_concat.push_back(std::move(tensor));
     }
 
-    SparseTensor output(SparseTensor::Concat<T>(tensors_to_concat));
-
+    auto output = SparseTensor::Concat<T>(tensors_to_concat);
     Tensor final_output_shape(DT_INT64, TensorShape({output.dims()}));
 
     std::copy_n(output.shape().data(), output.dims(),
diff --git a/tensorflow/core/kernels/sparse_to_dense_op.cc b/tensorflow/core/kernels/sparse_to_dense_op.cc
index ba3da21..f79a4d0 100644
--- a/tensorflow/core/kernels/sparse_to_dense_op.cc
+++ b/tensorflow/core/kernels/sparse_to_dense_op.cc
@@ -119,8 +119,10 @@
     // Assume SparseTensor is lexicographically sorted.
     gtl::InlinedVector<int64, 8> order(output->shape().dims());
     std::iota(order.begin(), order.end(), 0);
-    sparse::SparseTensor st(indices_shaped, sparse_values_b, output->shape(),
-                            order);
+    sparse::SparseTensor st;
+    OP_REQUIRES_OK(c,
+                   sparse::SparseTensor::Create(indices_shaped, sparse_values_b,
+                                                output->shape(), order, &st));
 
     if (validate_indices_) {
       OP_REQUIRES_OK(c, st.IndicesValid());
diff --git a/tensorflow/core/ops/compat/ops_history.v1.pbtxt b/tensorflow/core/ops/compat/ops_history.v1.pbtxt
index 6cdd03e..d94fa2c 100644
--- a/tensorflow/core/ops/compat/ops_history.v1.pbtxt
+++ b/tensorflow/core/ops/compat/ops_history.v1.pbtxt
@@ -7681,66 +7681,6 @@
   }
 }
 op {
-  name: "AvgPool"
-  input_arg {
-    name: "value"
-    type_attr: "T"
-  }
-  output_arg {
-    name: "output"
-    type_attr: "T"
-  }
-  attr {
-    name: "ksize"
-    type: "list(int)"
-    has_minimum: true
-    minimum: 4
-  }
-  attr {
-    name: "strides"
-    type: "list(int)"
-    has_minimum: true
-    minimum: 4
-  }
-  attr {
-    name: "padding"
-    type: "string"
-    allowed_values {
-      list {
-        s: "SAME"
-        s: "VALID"
-      }
-    }
-  }
-  attr {
-    name: "data_format"
-    type: "string"
-    default_value {
-      s: "NHWC"
-    }
-    allowed_values {
-      list {
-        s: "NHWC"
-        s: "NCHW"
-        s: "HWNC"
-        s: "HWCN"
-      }
-    }
-  }
-  attr {
-    name: "T"
-    type: "type"
-    allowed_values {
-      list {
-        type: DT_HALF
-        type: DT_BFLOAT16
-        type: DT_FLOAT
-        type: DT_DOUBLE
-      }
-    }
-  }
-}
-op {
   name: "AvgPool3D"
   input_arg {
     name: "input"
@@ -8430,70 +8370,6 @@
   }
 }
 op {
-  name: "AvgPoolGrad"
-  input_arg {
-    name: "orig_input_shape"
-    type: DT_INT32
-  }
-  input_arg {
-    name: "grad"
-    type_attr: "T"
-  }
-  output_arg {
-    name: "output"
-    type_attr: "T"
-  }
-  attr {
-    name: "ksize"
-    type: "list(int)"
-    has_minimum: true
-    minimum: 4
-  }
-  attr {
-    name: "strides"
-    type: "list(int)"
-    has_minimum: true
-    minimum: 4
-  }
-  attr {
-    name: "padding"
-    type: "string"
-    allowed_values {
-      list {
-        s: "SAME"
-        s: "VALID"
-      }
-    }
-  }
-  attr {
-    name: "data_format"
-    type: "string"
-    default_value {
-      s: "NHWC"
-    }
-    allowed_values {
-      list {
-        s: "NHWC"
-        s: "NCHW"
-        s: "HWNC"
-        s: "HWCN"
-      }
-    }
-  }
-  attr {
-    name: "T"
-    type: "type"
-    allowed_values {
-      list {
-        type: DT_HALF
-        type: DT_BFLOAT16
-        type: DT_FLOAT
-        type: DT_DOUBLE
-      }
-    }
-  }
-}
-op {
   name: "Barrier"
   output_arg {
     name: "handle"
@@ -10555,61 +10431,6 @@
   }
 }
 op {
-  name: "BiasAdd"
-  input_arg {
-    name: "value"
-    type_attr: "T"
-  }
-  input_arg {
-    name: "bias"
-    type_attr: "T"
-  }
-  output_arg {
-    name: "output"
-    type_attr: "T"
-  }
-  attr {
-    name: "T"
-    type: "type"
-    allowed_values {
-      list {
-        type: DT_FLOAT
-        type: DT_DOUBLE
-        type: DT_INT32
-        type: DT_UINT8
-        type: DT_INT16
-        type: DT_INT8
-        type: DT_COMPLEX64
-        type: DT_INT64
-        type: DT_QINT8
-        type: DT_QUINT8
-        type: DT_QINT32
-        type: DT_BFLOAT16
-        type: DT_UINT16
-        type: DT_COMPLEX128
-        type: DT_HALF
-        type: DT_UINT32
-        type: DT_UINT64
-      }
-    }
-  }
-  attr {
-    name: "data_format"
-    type: "string"
-    default_value {
-      s: "NHWC"
-    }
-    allowed_values {
-      list {
-        s: "NHWC"
-        s: "NCHW"
-        s: "HWNC"
-        s: "HWCN"
-      }
-    }
-  }
-}
-op {
   name: "BiasAddGrad"
   input_arg {
     name: "out_backprop"
@@ -10802,57 +10623,6 @@
   }
 }
 op {
-  name: "BiasAddGrad"
-  input_arg {
-    name: "out_backprop"
-    type_attr: "T"
-  }
-  output_arg {
-    name: "output"
-    type_attr: "T"
-  }
-  attr {
-    name: "T"
-    type: "type"
-    allowed_values {
-      list {
-        type: DT_FLOAT
-        type: DT_DOUBLE
-        type: DT_INT32
-        type: DT_UINT8
-        type: DT_INT16
-        type: DT_INT8
-        type: DT_COMPLEX64
-        type: DT_INT64
-        type: DT_QINT8
-        type: DT_QUINT8
-        type: DT_QINT32
-        type: DT_BFLOAT16
-        type: DT_UINT16
-        type: DT_COMPLEX128
-        type: DT_HALF
-        type: DT_UINT32
-        type: DT_UINT64
-      }
-    }
-  }
-  attr {
-    name: "data_format"
-    type: "string"
-    default_value {
-      s: "NHWC"
-    }
-    allowed_values {
-      list {
-        s: "NHWC"
-        s: "NCHW"
-        s: "HWNC"
-        s: "HWCN"
-      }
-    }
-  }
-}
-op {
   name: "BiasAddV1"
   input_arg {
     name: "value"
@@ -13457,81 +13227,6 @@
   }
 }
 op {
-  name: "Conv2D"
-  input_arg {
-    name: "input"
-    type_attr: "T"
-  }
-  input_arg {
-    name: "filter"
-    type_attr: "T"
-  }
-  output_arg {
-    name: "output"
-    type_attr: "T"
-  }
-  attr {
-    name: "T"
-    type: "type"
-    allowed_values {
-      list {
-        type: DT_HALF
-        type: DT_BFLOAT16
-        type: DT_FLOAT
-        type: DT_DOUBLE
-      }
-    }
-  }
-  attr {
-    name: "strides"
-    type: "list(int)"
-  }
-  attr {
-    name: "use_cudnn_on_gpu"
-    type: "bool"
-    default_value {
-      b: true
-    }
-  }
-  attr {
-    name: "padding"
-    type: "string"
-    allowed_values {
-      list {
-        s: "SAME"
-        s: "VALID"
-      }
-    }
-  }
-  attr {
-    name: "data_format"
-    type: "string"
-    default_value {
-      s: "NHWC"
-    }
-    allowed_values {
-      list {
-        s: "NHWC"
-        s: "NCHW"
-        s: "HWNC"
-        s: "HWCN"
-      }
-    }
-  }
-  attr {
-    name: "dilations"
-    type: "list(int)"
-    default_value {
-      list {
-        i: 1
-        i: 1
-        i: 1
-        i: 1
-      }
-    }
-  }
-}
-op {
   name: "Conv2DBackpropFilter"
   input_arg {
     name: "input"
@@ -13748,85 +13443,6 @@
   }
 }
 op {
-  name: "Conv2DBackpropFilter"
-  input_arg {
-    name: "input"
-    type_attr: "T"
-  }
-  input_arg {
-    name: "filter_sizes"
-    type: DT_INT32
-  }
-  input_arg {
-    name: "out_backprop"
-    type_attr: "T"
-  }
-  output_arg {
-    name: "output"
-    type_attr: "T"
-  }
-  attr {
-    name: "T"
-    type: "type"
-    allowed_values {
-      list {
-        type: DT_HALF
-        type: DT_BFLOAT16
-        type: DT_FLOAT
-        type: DT_DOUBLE
-      }
-    }
-  }
-  attr {
-    name: "strides"
-    type: "list(int)"
-  }
-  attr {
-    name: "use_cudnn_on_gpu"
-    type: "bool"
-    default_value {
-      b: true
-    }
-  }
-  attr {
-    name: "padding"
-    type: "string"
-    allowed_values {
-      list {
-        s: "SAME"
-        s: "VALID"
-      }
-    }
-  }
-  attr {
-    name: "data_format"
-    type: "string"
-    default_value {
-      s: "NHWC"
-    }
-    allowed_values {
-      list {
-        s: "NHWC"
-        s: "NCHW"
-        s: "HWNC"
-        s: "HWCN"
-      }
-    }
-  }
-  attr {
-    name: "dilations"
-    type: "list(int)"
-    default_value {
-      list {
-        i: 1
-        i: 1
-        i: 1
-        i: 1
-      }
-    }
-  }
-}
-op {
   name: "Conv2DBackpropInput"
   input_arg {
     name: "input_sizes"
@@ -14043,85 +13659,6 @@
   }
 }
 op {
-  name: "Conv2DBackpropInput"
-  input_arg {
-    name: "input_sizes"
-    type: DT_INT32
-  }
-  input_arg {
-    name: "filter"
-    type_attr: "T"
-  }
-  input_arg {
-    name: "out_backprop"
-    type_attr: "T"
-  }
-  output_arg {
-    name: "output"
-    type_attr: "T"
-  }
-  attr {
-    name: "T"
-    type: "type"
-    allowed_values {
-      list {
-        type: DT_HALF
-        type: DT_BFLOAT16
-        type: DT_FLOAT
-        type: DT_DOUBLE
-      }
-    }
-  }
-  attr {
-    name: "strides"
-    type: "list(int)"
-  }
-  attr {
-    name: "use_cudnn_on_gpu"
-    type: "bool"
-    default_value {
-      b: true
-    }
-  }
-  attr {
-    name: "padding"
-    type: "string"
-    allowed_values {
-      list {
-        s: "SAME"
-        s: "VALID"
-      }
-    }
-  }
-  attr {
-    name: "data_format"
-    type: "string"
-    default_value {
-      s: "NHWC"
-    }
-    allowed_values {
-      list {
-        s: "NHWC"
-        s: "NCHW"
-        s: "HWNC"
-        s: "HWCN"
-      }
-    }
-  }
-  attr {
-    name: "dilations"
-    type: "list(int)"
-    default_value {
-      list {
-        i: 1
-        i: 1
-        i: 1
-        i: 1
-      }
-    }
-  }
-}
-op {
   name: "Conv3D"
   input_arg {
     name: "input"
@@ -18852,74 +18389,6 @@
   }
 }
 op {
-  name: "DepthwiseConv2dNative"
-  input_arg {
-    name: "input"
-    type_attr: "T"
-  }
-  input_arg {
-    name: "filter"
-    type_attr: "T"
-  }
-  output_arg {
-    name: "output"
-    type_attr: "T"
-  }
-  attr {
-    name: "T"
-    type: "type"
-    allowed_values {
-      list {
-        type: DT_HALF
-        type: DT_BFLOAT16
-        type: DT_FLOAT
-        type: DT_DOUBLE
-      }
-    }
-  }
-  attr {
-    name: "strides"
-    type: "list(int)"
-  }
-  attr {
-    name: "padding"
-    type: "string"
-    allowed_values {
-      list {
-        s: "SAME"
-        s: "VALID"
-      }
-    }
-  }
-  attr {
-    name: "data_format"
-    type: "string"
-    default_value {
-      s: "NHWC"
-    }
-    allowed_values {
-      list {
-        s: "NHWC"
-        s: "NCHW"
-        s: "HWNC"
-        s: "HWCN"
-      }
-    }
-  }
-  attr {
-    name: "dilations"
-    type: "list(int)"
-    default_value {
-      list {
-        i: 1
-        i: 1
-        i: 1
-        i: 1
-      }
-    }
-  }
-}
-op {
   name: "DepthwiseConv2dNativeBackpropFilter"
   input_arg {
     name: "input"
@@ -19158,78 +18627,6 @@
   }
 }
 op {
-  name: "DepthwiseConv2dNativeBackpropFilter"
-  input_arg {
-    name: "input"
-    type_attr: "T"
-  }
-  input_arg {
-    name: "filter_sizes"
-    type: DT_INT32
-  }
-  input_arg {
-    name: "out_backprop"
-    type_attr: "T"
-  }
-  output_arg {
-    name: "output"
-    type_attr: "T"
-  }
-  attr {
-    name: "T"
-    type: "type"
-    allowed_values {
-      list {
-        type: DT_HALF
-        type: DT_BFLOAT16
-        type: DT_FLOAT
-        type: DT_DOUBLE
-      }
-    }
-  }
-  attr {
-    name: "strides"
-    type: "list(int)"
-  }
-  attr {
-    name: "padding"
-    type: "string"
-    allowed_values {
-      list {
-        s: "SAME"
-        s: "VALID"
-      }
-    }
-  }
-  attr {
-    name: "data_format"
-    type: "string"
-    default_value {
-      s: "NHWC"
-    }
-    allowed_values {
-      list {
-        s: "NHWC"
-        s: "NCHW"
-        s: "HWNC"
-        s: "HWCN"
-      }
-    }
-  }
-  attr {
-    name: "dilations"
-    type: "list(int)"
-    default_value {
-      list {
-        i: 1
-        i: 1
-        i: 1
-        i: 1
-      }
-    }
-  }
-}
-op {
   name: "DepthwiseConv2dNativeBackpropInput"
   input_arg {
     name: "input_sizes"
@@ -19468,78 +18865,6 @@
   }
 }
 op {
-  name: "DepthwiseConv2dNativeBackpropInput"
-  input_arg {
-    name: "input_sizes"
-    type: DT_INT32
-  }
-  input_arg {
-    name: "filter"
-    type_attr: "T"
-  }
-  input_arg {
-    name: "out_backprop"
-    type_attr: "T"
-  }
-  output_arg {
-    name: "output"
-    type_attr: "T"
-  }
-  attr {
-    name: "T"
-    type: "type"
-    allowed_values {
-      list {
-        type: DT_HALF
-        type: DT_BFLOAT16
-        type: DT_FLOAT
-        type: DT_DOUBLE
-      }
-    }
-  }
-  attr {
-    name: "strides"
-    type: "list(int)"
-  }
-  attr {
-    name: "padding"
-    type: "string"
-    allowed_values {
-      list {
-        s: "SAME"
-        s: "VALID"
-      }
-    }
-  }
-  attr {
-    name: "data_format"
-    type: "string"
-    default_value {
-      s: "NHWC"
-    }
-    allowed_values {
-      list {
-        s: "NHWC"
-        s: "NCHW"
-        s: "HWNC"
-        s: "HWCN"
-      }
-    }
-  }
-  attr {
-    name: "dilations"
-    type: "list(int)"
-    default_value {
-      list {
-        i: 1
-        i: 1
-        i: 1
-        i: 1
-      }
-    }
-  }
-}
-op {
   name: "Dequantize"
   input_arg {
     name: "input"
@@ -32234,85 +31559,6 @@
   }
 }
 op {
-  name: "MaxPoolGrad"
-  input_arg {
-    name: "orig_input"
-    type_attr: "T"
-  }
-  input_arg {
-    name: "orig_output"
-    type_attr: "T"
-  }
-  input_arg {
-    name: "grad"
-    type_attr: "T"
-  }
-  output_arg {
-    name: "output"
-    type_attr: "T"
-  }
-  attr {
-    name: "ksize"
-    type: "list(int)"
-    has_minimum: true
-    minimum: 4
-  }
-  attr {
-    name: "strides"
-    type: "list(int)"
-    has_minimum: true
-    minimum: 4
-  }
-  attr {
-    name: "padding"
-    type: "string"
-    allowed_values {
-      list {
-        s: "SAME"
-        s: "VALID"
-      }
-    }
-  }
-  attr {
-    name: "data_format"
-    type: "string"
-    default_value {
-      s: "NHWC"
-    }
-    allowed_values {
-      list {
-        s: "NHWC"
-        s: "NCHW"
-        s: "HWNC"
-        s: "HWCN"
-      }
-    }
-  }
-  attr {
-    name: "T"
-    type: "type"
-    default_value {
-      type: DT_FLOAT
-    }
-    allowed_values {
-      list {
-        type: DT_FLOAT
-        type: DT_DOUBLE
-        type: DT_INT32
-        type: DT_UINT8
-        type: DT_INT16
-        type: DT_INT8
-        type: DT_INT64
-        type: DT_BFLOAT16
-        type: DT_UINT16
-        type: DT_HALF
-        type: DT_UINT32
-        type: DT_UINT64
-      }
-    }
-  }
-}
-op {
   name: "MaxPoolGradGrad"
   input_arg {
     name: "orig_input"
@@ -32605,82 +31851,6 @@
   }
 }
 op {
-  name: "MaxPoolGradGrad"
-  input_arg {
-    name: "orig_input"
-    type_attr: "T"
-  }
-  input_arg {
-    name: "orig_output"
-    type_attr: "T"
-  }
-  input_arg {
-    name: "grad"
-    type_attr: "T"
-  }
-  output_arg {
-    name: "output"
-    type_attr: "T"
-  }
-  attr {
-    name: "ksize"
-    type: "list(int)"
-    has_minimum: true
-    minimum: 4
-  }
-  attr {
-    name: "strides"
-    type: "list(int)"
-    has_minimum: true
-    minimum: 4
-  }
-  attr {
-    name: "padding"
-    type: "string"
-    allowed_values {
-      list {
-        s: "SAME"
-        s: "VALID"
-      }
-    }
-  }
-  attr {
-    name: "data_format"
-    type: "string"
-    default_value {
-      s: "NHWC"
-    }
-    allowed_values {
-      list {
-        s: "NHWC"
-        s: "NCHW"
-        s: "HWNC"
-        s: "HWCN"
-      }
-    }
-  }
-  attr {
-    name: "T"
-    type: "type"
-    allowed_values {
-      list {
-        type: DT_FLOAT
-        type: DT_DOUBLE
-        type: DT_INT32
-        type: DT_UINT8
-        type: DT_INT16
-        type: DT_INT8
-        type: DT_INT64
-        type: DT_BFLOAT16
-        type: DT_UINT16
-        type: DT_HALF
-        type: DT_UINT32
-        type: DT_UINT64
-      }
-    }
-  }
-}
-op {
   name: "MaxPoolGradGradV2"
   input_arg {
     name: "orig_input"
@@ -32957,78 +32127,6 @@
   }
 }
 op {
-  name: "MaxPoolGradGradV2"
-  input_arg {
-    name: "orig_input"
-    type_attr: "T"
-  }
-  input_arg {
-    name: "orig_output"
-    type_attr: "T"
-  }
-  input_arg {
-    name: "grad"
-    type_attr: "T"
-  }
-  input_arg {
-    name: "ksize"
-    type: DT_INT32
-  }
-  input_arg {
-    name: "strides"
-    type: DT_INT32
-  }
-  output_arg {
-    name: "output"
-    type_attr: "T"
-  }
-  attr {
-    name: "padding"
-    type: "string"
-    allowed_values {
-      list {
-        s: "SAME"
-        s: "VALID"
-      }
-    }
-  }
-  attr {
-    name: "data_format"
-    type: "string"
-    default_value {
-      s: "NHWC"
-    }
-    allowed_values {
-      list {
-        s: "NHWC"
-        s: "NCHW"
-        s: "HWNC"
-        s: "HWCN"
-      }
-    }
-  }
-  attr {
-    name: "T"
-    type: "type"
-    allowed_values {
-      list {
-        type: DT_FLOAT
-        type: DT_DOUBLE
-        type: DT_INT32
-        type: DT_UINT8
-        type: DT_INT16
-        type: DT_INT8
-        type: DT_INT64
-        type: DT_BFLOAT16
-        type: DT_UINT16
-        type: DT_HALF
-        type: DT_UINT32
-        type: DT_UINT64
-      }
-    }
-  }
-}
-op {
   name: "MaxPoolGradGradWithArgmax"
   input_arg {
     name: "input"
@@ -33597,81 +32695,6 @@
   }
 }
 op {
-  name: "MaxPoolGradV2"
-  input_arg {
-    name: "orig_input"
-    type_attr: "T"
-  }
-  input_arg {
-    name: "orig_output"
-    type_attr: "T"
-  }
-  input_arg {
-    name: "grad"
-    type_attr: "T"
-  }
-  input_arg {
-    name: "ksize"
-    type: DT_INT32
-  }
-  input_arg {
-    name: "strides"
-    type: DT_INT32
-  }
-  output_arg {
-    name: "output"
-    type_attr: "T"
-  }
-  attr {
-    name: "padding"
-    type: "string"
-    allowed_values {
-      list {
-        s: "SAME"
-        s: "VALID"
-      }
-    }
-  }
-  attr {
-    name: "data_format"
-    type: "string"
-    default_value {
-      s: "NHWC"
-    }
-    allowed_values {
-      list {
-        s: "NHWC"
-        s: "NCHW"
-        s: "HWNC"
-        s: "HWCN"
-      }
-    }
-  }
-  attr {
-    name: "T"
-    type: "type"
-    default_value {
-      type: DT_FLOAT
-    }
-    allowed_values {
-      list {
-        type: DT_FLOAT
-        type: DT_DOUBLE
-        type: DT_INT32
-        type: DT_UINT8
-        type: DT_INT16
-        type: DT_INT8
-        type: DT_INT64
-        type: DT_BFLOAT16
-        type: DT_UINT16
-        type: DT_HALF
-        type: DT_UINT32
-        type: DT_UINT64
-      }
-    }
-  }
-}
-op {
   name: "MaxPoolGradWithArgmax"
   input_arg {
     name: "input"
@@ -59148,7 +58171,11 @@
     type: DT_INT64
   }
   input_arg {
-    name: "stride"
+    name: "window_shift"
+    type: DT_INT64
+  }
+  input_arg {
+    name: "window_stride"
     type: DT_INT64
   }
   output_arg {
diff --git a/tensorflow/core/ops/dataset_ops.cc b/tensorflow/core/ops/dataset_ops.cc
index c8bc111..8c83a09 100644
--- a/tensorflow/core/ops/dataset_ops.cc
+++ b/tensorflow/core/ops/dataset_ops.cc
@@ -404,19 +404,20 @@
       return shape_inference::ScalarShape(c);
     });
 
-// TODO(mrry): move SlideDataset to contrib in the future.
 REGISTER_OP("SlideDataset")
     .Input("input_dataset: variant")
     .Input("window_size: int64")
-    .Input("stride: int64")
+    .Input("window_shift: int64")
+    .Input("window_stride: int64")
     .Output("handle: variant")
     .Attr("output_types: list(type) >= 1")
     .Attr("output_shapes: list(shape) >= 1")
     .SetShapeFn([](shape_inference::InferenceContext* c) {
       shape_inference::ShapeHandle unused;
-      // window_size and stride should be scalars.
+      // window_size, window_shift, and window_stride should be scalars.
       TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &unused));
       TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 0, &unused));
+      TF_RETURN_IF_ERROR(c->WithRank(c->input(3), 0, &unused));
       return shape_inference::ScalarShape(c);
     });
 
diff --git a/tensorflow/core/ops/debug_ops.cc b/tensorflow/core/ops/debug_ops.cc
index 5aebdca..2d9b436 100644
--- a/tensorflow/core/ops/debug_ops.cc
+++ b/tensorflow/core/ops/debug_ops.cc
@@ -20,7 +20,7 @@
 
 namespace tensorflow {
 
-// EXPERIMENTAL: tfdbg debugger-inserted ops.
+// TensorFlow Debugger-inserted ops.
 // These ops are used only internally by tfdbg. There is no API for users to
 // direct create them. Users can create them indirectly by using
 // RunOptions.debug_options during Session::Run() call. See tfdbg documentation
diff --git a/tensorflow/core/ops/ops.pbtxt b/tensorflow/core/ops/ops.pbtxt
index 9a9f10f..4f24ab4 100644
--- a/tensorflow/core/ops/ops.pbtxt
+++ b/tensorflow/core/ops/ops.pbtxt
@@ -2490,8 +2490,6 @@
       list {
         s: "NHWC"
         s: "NCHW"
-        s: "HWNC"
-        s: "HWCN"
       }
     }
   }
@@ -2674,8 +2672,6 @@
       list {
         s: "NHWC"
         s: "NCHW"
-        s: "HWNC"
-        s: "HWCN"
       }
     }
   }
@@ -3989,8 +3985,6 @@
       list {
         s: "NHWC"
         s: "NCHW"
-        s: "HWNC"
-        s: "HWCN"
       }
     }
   }
@@ -4040,8 +4034,6 @@
       list {
         s: "NHWC"
         s: "NCHW"
-        s: "HWNC"
-        s: "HWCN"
       }
     }
   }
@@ -5730,8 +5722,6 @@
       list {
         s: "NHWC"
         s: "NCHW"
-        s: "HWNC"
-        s: "HWCN"
       }
     }
   }
@@ -5809,8 +5799,6 @@
       list {
         s: "NHWC"
         s: "NCHW"
-        s: "HWNC"
-        s: "HWCN"
       }
     }
   }
@@ -5888,8 +5876,6 @@
       list {
         s: "NHWC"
         s: "NCHW"
-        s: "HWNC"
-        s: "HWCN"
       }
     }
   }
@@ -8592,8 +8578,6 @@
       list {
         s: "NHWC"
         s: "NCHW"
-        s: "HWNC"
-        s: "HWCN"
       }
     }
   }
@@ -8664,8 +8648,6 @@
       list {
         s: "NHWC"
         s: "NCHW"
-        s: "HWNC"
-        s: "HWCN"
       }
     }
   }
@@ -8736,8 +8718,6 @@
       list {
         s: "NHWC"
         s: "NCHW"
-        s: "HWNC"
-        s: "HWCN"
       }
     }
   }
@@ -15509,8 +15489,6 @@
       list {
         s: "NHWC"
         s: "NCHW"
-        s: "HWNC"
-        s: "HWCN"
       }
     }
   }
@@ -15588,8 +15566,6 @@
       list {
         s: "NHWC"
         s: "NCHW"
-        s: "HWNC"
-        s: "HWCN"
       }
     }
   }
@@ -15660,8 +15636,6 @@
       list {
         s: "NHWC"
         s: "NCHW"
-        s: "HWNC"
-        s: "HWCN"
       }
     }
   }
@@ -15803,8 +15777,6 @@
       list {
         s: "NHWC"
         s: "NCHW"
-        s: "HWNC"
-        s: "HWCN"
       }
     }
   }
@@ -27606,7 +27578,11 @@
     type: DT_INT64
   }
   input_arg {
-    name: "stride"
+    name: "window_shift"
+    type: DT_INT64
+  }
+  input_arg {
+    name: "window_stride"
     type: DT_INT64
   }
   output_arg {
diff --git a/tensorflow/core/protobuf/config.proto b/tensorflow/core/protobuf/config.proto
index 5b6aa47..7763946 100644
--- a/tensorflow/core/protobuf/config.proto
+++ b/tensorflow/core/protobuf/config.proto
@@ -389,6 +389,9 @@
   message Experimental {
     // Task name for group resolution.
     string collective_group_leader = 1;
+    // Whether the client will format templated errors. For example, the string:
+    // "The node was defined on ^^node:Foo:${file}:${line}^^".
+    bool client_handles_error_formatting = 2;
   };
 
   Experimental experimental = 16;
diff --git a/tensorflow/core/protobuf/debug.proto b/tensorflow/core/protobuf/debug.proto
index 499900f..811cf40 100644
--- a/tensorflow/core/protobuf/debug.proto
+++ b/tensorflow/core/protobuf/debug.proto
@@ -7,7 +7,7 @@
 option java_package = "org.tensorflow.framework";
 option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/protobuf";
 
-// EXPERIMENTAL. Option for watching a node.
+// Option for watching a node in TensorFlow Debugger (tfdbg).
 message DebugTensorWatch {
   // Name of the node to watch.
   string node_name = 1;
@@ -51,7 +51,7 @@
   bool tolerate_debug_op_creation_failures = 5;
 }
 
-// EXPERIMENTAL. Options for initializing DebuggerState.
+// Options for initializing DebuggerState in TensorFlow Debugger (tfdbg).
 message DebugOptions {
   // Debugging options
   repeated DebugTensorWatch debug_tensor_watch_opts = 4;
diff --git a/tensorflow/core/protobuf/tensorflow_server.proto b/tensorflow/core/protobuf/tensorflow_server.proto
index be25804..2bf48d5 100644
--- a/tensorflow/core/protobuf/tensorflow_server.proto
+++ b/tensorflow/core/protobuf/tensorflow_server.proto
@@ -46,6 +46,6 @@
 
   // The protocol to be used by this server.
   //
-  // Acceptable values include: "grpc".
+  // Acceptable values include: "grpc", "grpc+verbs".
   string protocol = 5;
 }
diff --git a/tensorflow/core/util/sparse/dim_comparator.h b/tensorflow/core/util/sparse/dim_comparator.h
index b773b33..0782e7e 100644
--- a/tensorflow/core/util/sparse/dim_comparator.h
+++ b/tensorflow/core/util/sparse/dim_comparator.h
@@ -13,8 +13,8 @@
 limitations under the License.
 ==============================================================================*/
 
-#ifndef TENSORFLOW_UTIL_SPARSE_DIM_COMPARATOR_H_
-#define TENSORFLOW_UTIL_SPARSE_DIM_COMPARATOR_H_
+#ifndef TENSORFLOW_CORE_UTIL_SPARSE_DIM_COMPARATOR_H_
+#define TENSORFLOW_CORE_UTIL_SPARSE_DIM_COMPARATOR_H_
 
 #include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
 #include "tensorflow/core/kernels/bounds_check.h"
@@ -49,11 +49,11 @@
   DimComparator(const TTypes<int64>::Matrix& ix, const VarDimArray& order,
                 const VarDimArray& shape)
       : ix_(ix), order_(order), dims_(shape.size()) {
-    CHECK_GT(order.size(), size_t{0}) << "Must order using at least one index";
-    CHECK_LE(order.size(), shape.size()) << "Can only sort up to dims";
+    DCHECK_GT(order.size(), size_t{0}) << "Must order using at least one index";
+    DCHECK_LE(order.size(), shape.size()) << "Can only sort up to dims";
     for (size_t d = 0; d < order.size(); ++d) {
-      CHECK_GE(order[d], 0);
-      CHECK_LT(order[d], shape.size());
+      DCHECK_GE(order[d], 0);
+      DCHECK_LT(order[d], shape.size());
     }
   }
 
@@ -97,7 +97,7 @@
   FixedDimComparator(const TTypes<int64>::Matrix& ix, const VarDimArray& order,
                      const VarDimArray& shape)
       : DimComparator(ix, order, shape) {
-    CHECK_EQ(order.size(), ORDER_DIM);
+    DCHECK_EQ(order.size(), ORDER_DIM);
   }
   inline bool operator()(const int64 i, const int64 j) const {
     bool value = false;
@@ -116,4 +116,4 @@
 }  // namespace sparse
 }  // namespace tensorflow
 
-#endif  // TENSORFLOW_UTIL_SPARSE_DIM_COMPARATOR_H_
+#endif  // TENSORFLOW_CORE_UTIL_SPARSE_DIM_COMPARATOR_H_
diff --git a/tensorflow/core/util/sparse/group_iterator.h b/tensorflow/core/util/sparse/group_iterator.h
index fb70318..3fa8cb6 100644
--- a/tensorflow/core/util/sparse/group_iterator.h
+++ b/tensorflow/core/util/sparse/group_iterator.h
@@ -13,8 +13,8 @@
 limitations under the License.
 ==============================================================================*/
 
-#ifndef TENSORFLOW_UTIL_SPARSE_GROUP_ITERATOR_H_
-#define TENSORFLOW_UTIL_SPARSE_GROUP_ITERATOR_H_
+#ifndef TENSORFLOW_CORE_UTIL_SPARSE_GROUP_ITERATOR_H_
+#define TENSORFLOW_CORE_UTIL_SPARSE_GROUP_ITERATOR_H_
 
 #include <vector>
 #include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
@@ -143,4 +143,4 @@
 }  // namespace sparse
 }  // namespace tensorflow
 
-#endif  // TENSORFLOW_UTIL_SPARSE_GROUP_ITERATOR_H_
+#endif  // TENSORFLOW_CORE_UTIL_SPARSE_GROUP_ITERATOR_H_
diff --git a/tensorflow/core/util/sparse/sparse_tensor.h b/tensorflow/core/util/sparse/sparse_tensor.h
index 258ee41..0f04b65 100644
--- a/tensorflow/core/util/sparse/sparse_tensor.h
+++ b/tensorflow/core/util/sparse/sparse_tensor.h
@@ -13,8 +13,8 @@
 limitations under the License.
 ==============================================================================*/
 
-#ifndef TENSORFLOW_UTIL_SPARSE_SPARSE_TENSOR_H_
-#define TENSORFLOW_UTIL_SPARSE_SPARSE_TENSOR_H_
+#ifndef TENSORFLOW_CORE_UTIL_SPARSE_SPARSE_TENSOR_H_
+#define TENSORFLOW_CORE_UTIL_SPARSE_SPARSE_TENSOR_H_
 
 #include <limits>
 #include <numeric>
@@ -26,8 +26,10 @@
 #include "tensorflow/core/framework/types.h"
 #include "tensorflow/core/framework/types.pb.h"
 #include "tensorflow/core/kernels/bounds_check.h"
+#include "tensorflow/core/lib/core/errors.h"
 #include "tensorflow/core/lib/core/status.h"
 #include "tensorflow/core/lib/strings/str_util.h"
+#include "tensorflow/core/lib/strings/strcat.h"
 #include "tensorflow/core/platform/logging.h"
 #include "tensorflow/core/platform/types.h"
 #include "tensorflow/core/util/sparse/dim_comparator.h"
@@ -41,32 +43,88 @@
   typedef typename gtl::ArraySlice<int64> VarDimArray;
   typedef typename gtl::InlinedVector<int64, 8> ShapeArray;
 
+  static Status Create(Tensor ix, Tensor vals, const VarDimArray shape,
+                       const VarDimArray order, SparseTensor* result) {
+    if (ix.dtype() != DT_INT64) {
+      return Status(
+          error::INVALID_ARGUMENT,
+          strings::StrCat("indices must be type int64 but got: ", ix.dtype()));
+    }
+    if (!TensorShapeUtils::IsVector(vals.shape())) {
+      return Status(error::INVALID_ARGUMENT,
+                    strings::StrCat("vals must be a vec, but got: ",
+                                    vals.shape().DebugString()));
+    }
+    if (ix.shape().dim_size(0) != vals.shape().dim_size(0)) {
+      return Status(error::INVALID_ARGUMENT,
+                    strings::StrCat("indices and values rows (indexing "
+                                    "dimension) must match. (indices = ",
+                                    ix.shape().dim_size(0), ", values = ",
+                                    vals.shape().dim_size(0), ")"));
+    }
+    int dims;
+    TF_RETURN_IF_ERROR(GetDimsFromIx(ix, &dims));
+    if (order.size() != dims) {
+      return Status(error::INVALID_ARGUMENT,
+                    "Order length must be SparseTensor rank.");
+    }
+    if (shape.size() != dims) {
+      return Status(error::INVALID_ARGUMENT,
+                    "Shape rank must be SparseTensor rank.");
+    }
+
+    *result = SparseTensor(ix, vals, shape, order);
+    return Status();
+  }
+
+  static Status Create(Tensor ix, Tensor vals, const TensorShape& shape,
+                       SparseTensor* result) {
+    return Create(ix, vals, TensorShapeToVector(shape),
+                  UndefinedOrder(TensorShapeToVector(shape)), result);
+  }
+
+  static Status Create(Tensor ix, Tensor vals, const VarDimArray shape,
+                       SparseTensor* result) {
+    return Create(ix, vals, shape, UndefinedOrder(shape), result);
+  }
+
+  static Status Create(Tensor ix, Tensor vals, const TensorShape& shape,
+                       const VarDimArray order, SparseTensor* result) {
+    return Create(ix, vals, TensorShapeToVector(shape), order, result);
+  }
+
+  SparseTensor() : dims_(0) {}
+
+  // DEPRECATED: use Create() functions instead of constructors directly.
   SparseTensor(Tensor ix, Tensor vals, const TensorShape& shape)
       : SparseTensor(ix, vals, TensorShapeToVector(shape),
                      UndefinedOrder(TensorShapeToVector(shape))) {}
 
+  // DEPRECATED: use Create() functions instead of constructors directly.
   SparseTensor(Tensor ix, Tensor vals, const VarDimArray shape)
       : SparseTensor(ix, vals, shape, UndefinedOrder(shape)) {}
 
+  // DEPRECATED: use Create() functions instead of constructors directly.
   SparseTensor(Tensor ix, Tensor vals, const TensorShape& shape,
                const VarDimArray order)
       : SparseTensor(ix, vals, TensorShapeToVector(shape), order) {}
 
+  // DEPRECATED: use Create() functions instead of constructors directly.
   SparseTensor(Tensor ix, Tensor vals, const VarDimArray shape,
                const VarDimArray order)
       : ix_(ix),
         vals_(vals),
         shape_(shape.begin(), shape.end()),
         order_(order.begin(), order.end()),
-        dims_(GetDimsFromIx(ix)) {
-    CHECK_EQ(ix.dtype(), DT_INT64)
+        dims_(UnsafeGetDimsFromIx(ix)) {
+    DCHECK_EQ(ix.dtype(), DT_INT64)
         << "indices must be type int64 but got: " << ix.dtype();
-    CHECK(TensorShapeUtils::IsVector(vals.shape()))
+    DCHECK(TensorShapeUtils::IsVector(vals.shape()))
         << "vals must be a vec, but got: " << vals.shape().DebugString();
-    CHECK_EQ(ix.shape().dim_size(0), vals.shape().dim_size(0))
+    DCHECK_EQ(ix.shape().dim_size(0), vals.shape().dim_size(0))
         << "indices and values rows (indexing dimension) must match.";
-    CHECK_EQ(order.size(), dims_) << "Order length must be SparseTensor rank.";
-    CHECK_EQ(shape.size(), dims_) << "Shape rank must be SparseTensor rank.";
+    DCHECK_EQ(order.size(), dims_) << "Order length must be SparseTensor rank.";
+    DCHECK_EQ(shape.size(), dims_) << "Shape rank must be SparseTensor rank.";
   }
 
   SparseTensor(const SparseTensor& other)
@@ -81,6 +139,16 @@
     vals_ = other.vals_;
     shape_ = other.shape_;
     order_ = other.order_;
+    dims_ = other.dims_;
+    return *this;
+  }
+
+  SparseTensor& operator=(SparseTensor&& other) {
+    ix_ = std::move(other.ix_);
+    vals_ = std::move(other.vals_);
+    shape_ = std::move(other.shape_);
+    order_ = std::move(other.order_);
+    dims_ = std::move(other.dims_);
     return *this;
   }
 
@@ -126,11 +194,11 @@
   //
   // See the README.md in this directory for more usage information.
   GroupIterable group(const VarDimArray& group_ix) const {
-    CHECK_LE(group_ix.size(), dims_);
+    DCHECK_LE(group_ix.size(), dims_);
     for (std::size_t di = 0; di < group_ix.size(); ++di) {
-      CHECK_GE(group_ix[di], 0) << "Group dimension out of range";
-      CHECK_LT(group_ix[di], dims_) << "Group dimension out of range";
-      CHECK_EQ(group_ix[di], order_[di])
+      DCHECK_GE(group_ix[di], 0) << "Group dimension out of range";
+      DCHECK_LT(group_ix[di], dims_) << "Group dimension out of range";
+      DCHECK_EQ(group_ix[di], order_[di])
           << "Group dimension does not match sorted order";
     }
     return GroupIterable(ix_, vals_, dims_, group_ix);
@@ -166,9 +234,16 @@
   // isn't an integer multiple of split_dim, we add one extra dimension for
   // each slice.
   template <typename T>
+  static Status Split(const SparseTensor& tensor, const int split_dim,
+                      const int num_split, std::vector<SparseTensor>* result);
+
+  // DEPRECATED: use the form of Split() that takes an output pointer and
+  // returns a status instead.
+  template <typename T>
   static std::vector<SparseTensor> Split(const SparseTensor& tensor,
                                          const int split_dim,
-                                         const int num_split);
+                                         const int num_split,
+                                         Status* status = nullptr);
 
   // Slice() will slice the input SparseTensor into a SparseTensor based on
   // specified start and size. Both start and size are 1-D array with each
@@ -189,9 +264,18 @@
   }
 
  private:
-  static int GetDimsFromIx(const Tensor& ix) {
-    CHECK(TensorShapeUtils::IsMatrix(ix.shape()))
-        << "indices must be a matrix, but got: " << ix.shape().DebugString();
+  static Status GetDimsFromIx(const Tensor& ix, int* result) {
+    if (!TensorShapeUtils::IsMatrix(ix.shape())) {
+      return Status(error::INVALID_ARGUMENT,
+                    strings::StrCat("indices must be a matrix, but got: ",
+                                    ix.shape().DebugString()));
+    }
+    *result = UnsafeGetDimsFromIx(ix);
+    return Status();
+  }
+
+  static int UnsafeGetDimsFromIx(const Tensor& ix) {
+    DCHECK(TensorShapeUtils::IsMatrix(ix.shape()));
     return ix.dim_size(1);
   }
 
@@ -251,8 +335,8 @@
   // Helper for Split() that returns the slice index.
   static inline int GetSliceIndex(const int dim, const int split_size,
                                   const int residual) {
-    CHECK_GT(split_size, 0);
-    CHECK_GE(dim, 0);
+    DCHECK_GT(split_size, 0);
+    DCHECK_GE(dim, 0);
     if (residual == 0) return dim / split_size;
     const int offset = residual * (split_size + 1);
     if (dim < offset) {
@@ -265,8 +349,8 @@
   // Helper for Split() that returns the dimension in the slice.
   static inline int GetDimensionInSlice(const int dim, const int split_size,
                                         const int residual) {
-    CHECK_GT(split_size, 0);
-    CHECK_GE(dim, 0);
+    DCHECK_GT(split_size, 0);
+    DCHECK_GE(dim, 0);
     if (residual == 0) return dim % split_size;
     const int offset = residual * (split_size + 1);
     if (dim < offset) {
@@ -279,8 +363,8 @@
   // Helper for Split() that returns the shape given a slice index.
   static inline int GetSliceShape(const int slice_index, const int split_size,
                                   const int residual) {
-    CHECK_GT(split_size, 0);
-    CHECK_GE(slice_index, 0);
+    DCHECK_GT(split_size, 0);
+    DCHECK_GE(slice_index, 0);
     if (residual == 0) return split_size;
     if (slice_index < residual) {
       return split_size + 1;
@@ -293,7 +377,7 @@
   Tensor vals_;
   ShapeArray shape_;
   ShapeArray order_;
-  const int dims_;
+  int dims_;
 };
 
 // This operation updates the indices and values Tensor rows, so it is
@@ -301,9 +385,9 @@
 // temporary space.
 template <typename T>
 void SparseTensor::Reorder(const VarDimArray& order) {
-  CHECK_EQ(DataTypeToEnum<T>::v(), dtype())
+  DCHECK_EQ(DataTypeToEnum<T>::v(), dtype())
       << "Reorder requested with the wrong datatype";
-  CHECK_EQ(order.size(), dims_) << "Order length must be SparseTensor rank";
+  DCHECK_EQ(order.size(), dims_) << "Order length must be SparseTensor rank";
   auto ix_t = ix_.matrix<int64>();
   auto vals_t = vals_.vec<T>();
 
@@ -360,13 +444,13 @@
 
 template <typename T>
 bool SparseTensor::ValidateAndInitializeToDense(Tensor* out, bool initialize) {
-  CHECK_EQ(DataTypeToEnum<T>::v(), dtype())
+  DCHECK_EQ(DataTypeToEnum<T>::v(), dtype())
       << "ToDense requested with the wrong datatype";
 
-  CHECK_EQ(out->shape().dims(), dims_)
+  DCHECK_EQ(out->shape().dims(), dims_)
       << "Incompatible dimensions between SparseTensor and output";
 
-  CHECK_EQ(out->dtype(), DataTypeToEnum<T>::v())
+  DCHECK_EQ(out->dtype(), DataTypeToEnum<T>::v())
       << "Output must be type: " << DataTypeToEnum<T>::v()
       << " but got: " << out->dtype();
 
@@ -422,9 +506,9 @@
 template <typename T>
 SparseTensor SparseTensor::Concat(
     const gtl::ArraySlice<SparseTensor>& tensors) {
-  CHECK_GE(tensors.size(), size_t{1}) << "Cannot concat 0 SparseTensors";
+  DCHECK_GE(tensors.size(), size_t{1}) << "Cannot concat 0 SparseTensors";
   const int dims = tensors[0].dims_;
-  CHECK_GE(dims, 1) << "Cannot concat 0-dimensional SparseTensors";
+  DCHECK_GE(dims, 1) << "Cannot concat 0-dimensional SparseTensors";
   auto order_0 = tensors[0].order();
   const int primary_dim = order_0[0];
   ShapeArray final_order(order_0.begin(), order_0.end());
@@ -434,17 +518,17 @@
 
   bool fully_ordered = true;
   for (const SparseTensor& st : tensors) {
-    CHECK_EQ(st.dims_, dims) << "All SparseTensors must have the same rank.";
-    CHECK_EQ(DataTypeToEnum<T>::v(), st.dtype())
+    DCHECK_EQ(st.dims_, dims) << "All SparseTensors must have the same rank.";
+    DCHECK_EQ(DataTypeToEnum<T>::v(), st.dtype())
         << "Concat requested with the wrong data type";
-    CHECK_GE(st.order()[0], 0) << "SparseTensor must be ordered";
-    CHECK_EQ(st.order()[0], primary_dim)
+    DCHECK_GE(st.order()[0], 0) << "SparseTensor must be ordered";
+    DCHECK_EQ(st.order()[0], primary_dim)
         << "All SparseTensors' order[0] must match.  This is the concat dim.";
     if (st.order() != final_order) fully_ordered = false;
     const VarDimArray& st_shape = st.shape();
     for (int d = 0; d < dims - 1; ++d) {
       const int cdim = (d < primary_dim) ? d : d + 1;
-      CHECK_EQ(final_shape[cdim], st_shape[cdim])
+      DCHECK_EQ(final_shape[cdim], st_shape[cdim])
           << "All SparseTensors' shapes must match except on the concat dim.  "
           << "Concat dim: " << primary_dim
           << ", mismatched shape at dim: " << cdim
@@ -494,7 +578,8 @@
 template <typename T>
 std::vector<SparseTensor> SparseTensor::Split(const SparseTensor& input_tensor,
                                               const int split_dim,
-                                              const int num_split) {
+                                              const int num_split,
+                                              Status* status /* = nullptr */) {
   std::vector<Tensor> output_indices;
   std::vector<Tensor> output_values;
   std::vector<TensorShape> output_shapes;
@@ -514,12 +599,18 @@
   const int split_dim_size = input_tensor.shape()[split_dim];
   const int split_size = split_dim_size / num_split;
 
-  CHECK(num_split > 0 && num_split <= split_dim_size) << "num_split must be in "
-                                                         "the interval (0, "
-                                                      << split_dim_size << "]";
-  CHECK(split_dim >= 0 && split_dim < num_dim) << "num_dim must be in "
-                                                  "the interval [0, "
-                                               << num_dim << ")";
+  if (!(num_split > 0 && num_split <= split_dim_size) && status != nullptr) {
+    *status = Status(error::INVALID_ARGUMENT,
+                     strings::StrCat("num_split must be in the interval (0, ",
+                                     split_dim_size, "]"));
+    return {};
+  }
+  if (!(split_dim >= 0 && split_dim < num_dim)) {
+    *status = Status(
+        error::INVALID_ARGUMENT,
+        strings::StrCat("num_dim must be in the interval [0, ", num_dim, ")"));
+    return {};
+  }
 
   const int residual = split_dim_size % num_split;
   for (int i = 0; i < input_tensor.indices().dim_size(0); ++i) {
@@ -559,13 +650,28 @@
   std::vector<SparseTensor> output_tensors;
   output_tensors.reserve(num_split);
   for (int i = 0; i < num_split; ++i) {
-    output_tensors.emplace_back(output_indices[i], output_values[i],
-                                output_shapes[i]);
+    SparseTensor tensor;
+    Status create_status =
+        Create(output_indices[i], output_values[i], output_shapes[i], &tensor);
+    if (!create_status.ok() && status != nullptr) {
+      *status = create_status;
+      return {};
+    }
+    output_tensors.push_back(std::move(tensor));
   }
   return output_tensors;
 }
 
 template <typename T>
+Status SparseTensor::Split(const SparseTensor& input_tensor,
+                           const int split_dim, const int num_split,
+                           std::vector<SparseTensor>* result) {
+  Status status;
+  *result = Split<T>(input_tensor, split_dim, num_split, &status);
+  return status;
+}
+
+template <typename T>
 SparseTensor SparseTensor::Slice(const SparseTensor& input_tensor,
                                  const gtl::ArraySlice<int64>& start,
                                  const gtl::ArraySlice<int64>& size) {
@@ -643,4 +749,4 @@
 }  // namespace sparse
 }  // namespace tensorflow
 
-#endif  // TENSORFLOW_UTIL_SPARSE_SPARSE_TENSOR_H_
+#endif  // TENSORFLOW_CORE_UTIL_SPARSE_SPARSE_TENSOR_H_
diff --git a/tensorflow/core/util/sparse/sparse_tensor_test.cc b/tensorflow/core/util/sparse/sparse_tensor_test.cc
index 85de032..5578e42 100644
--- a/tensorflow/core/util/sparse/sparse_tensor_test.cc
+++ b/tensorflow/core/util/sparse/sparse_tensor_test.cc
@@ -94,9 +94,12 @@
   const int NDIM = 3;
   Tensor ix(DT_INT32, TensorShape({N, NDIM}));
   Tensor vals(DT_STRING, TensorShape({N}));
+  SparseTensor result;
 
-  EXPECT_DEATH(SparseTensor(ix, vals, TensorShape({10, 10, 10}), {0, 1, 2}),
-               "indices must be type int64");
+  EXPECT_EQ(SparseTensor::Create(ix, vals, TensorShape({10, 10, 10}), {0, 1, 2},
+                                 &result)
+                .code(),
+            error::INVALID_ARGUMENT);
 }
 
 TEST(SparseTensorTest, SparseTensorInvalidIndicesShape) {
@@ -104,9 +107,12 @@
   const int NDIM = 3;
   Tensor ix(DT_INT64, TensorShape({N, NDIM, 1}));
   Tensor vals(DT_STRING, TensorShape({N}));
+  SparseTensor result;
 
-  EXPECT_DEATH(SparseTensor(ix, vals, TensorShape({10, 10, 10}), {0, 1, 2}),
-               "indices must be a matrix");
+  EXPECT_EQ(SparseTensor::Create(ix, vals, TensorShape({10, 10, 10}), {0, 1, 2},
+                                 &result)
+                .code(),
+            error::INVALID_ARGUMENT);
 }
 
 TEST(SparseTensorTest, SparseTensorInvalidValues) {
@@ -114,9 +120,12 @@
   const int NDIM = 3;
   Tensor ix(DT_INT64, TensorShape({N, NDIM}));
   Tensor vals(DT_STRING, TensorShape({N, 1}));
+  SparseTensor result;
 
-  EXPECT_DEATH(SparseTensor(ix, vals, TensorShape({10, 10, 10}), {0, 1, 2}),
-               "vals must be a vec");
+  EXPECT_EQ(SparseTensor::Create(ix, vals, TensorShape({10, 10, 10}), {0, 1, 2},
+                                 &result)
+                .code(),
+            error::INVALID_ARGUMENT);
 }
 
 TEST(SparseTensorTest, SparseTensorInvalidN) {
@@ -124,9 +133,12 @@
   const int NDIM = 3;
   Tensor ix(DT_INT64, TensorShape({N, NDIM}));
   Tensor vals(DT_STRING, TensorShape({N - 1}));
+  SparseTensor result;
 
-  EXPECT_DEATH(SparseTensor(ix, vals, TensorShape({10, 10, 10}), {0, 1, 2}),
-               "indices and values rows .* must match");
+  EXPECT_EQ(SparseTensor::Create(ix, vals, TensorShape({10, 10, 10}), {0, 1, 2},
+                                 &result)
+                .code(),
+            error::INVALID_ARGUMENT);
 }
 
 TEST(SparseTensorTest, SparseTensorInvalidOrder) {
@@ -134,18 +146,24 @@
   const int NDIM = 3;
   Tensor ix(DT_INT64, TensorShape({N, NDIM}));
   Tensor vals(DT_STRING, TensorShape({N}));
+  SparseTensor result;
 
-  EXPECT_DEATH(SparseTensor(ix, vals, TensorShape({10, 10, 10}), {0, 1}),
-               "Order length must be SparseTensor rank");
+  EXPECT_EQ(
+      SparseTensor::Create(ix, vals, TensorShape({10, 10, 10}), {0, 1}, &result)
+          .code(),
+      error::INVALID_ARGUMENT);
 }
 TEST(SparseTensorTest, SparseTensorInvalidShape) {
   int N = 5;
   const int NDIM = 3;
   Tensor ix(DT_INT64, TensorShape({N, NDIM}));
   Tensor vals(DT_STRING, TensorShape({N}));
+  SparseTensor result;
 
-  EXPECT_DEATH(SparseTensor(ix, vals, TensorShape({10, 10}), {0, 1, 2}),
-               "Shape rank must be SparseTensor rank");
+  EXPECT_EQ(
+      SparseTensor::Create(ix, vals, TensorShape({10, 10}), {0, 1, 2}, &result)
+          .code(),
+      error::INVALID_ARGUMENT);
 }
 
 TEST(SparseTensorTest, SparseTensorConstruction) {
@@ -169,7 +187,8 @@
 
   TensorShape shape({10, 10, 10});
   std::vector<int64> order{0, 1, 2};
-  SparseTensor st(ix, vals, shape, order);
+  SparseTensor st;
+  TF_ASSERT_OK(SparseTensor::Create(ix, vals, shape, order, &st));
   Status st_indices_valid = st.IndicesValid();
   EXPECT_FALSE(st_indices_valid.ok());
   EXPECT_EQ("indices[2] = [2,0,0] is out of order",
@@ -210,7 +229,8 @@
 
   std::vector<int64> shape{10, 10, 10};
   std::vector<int64> order{0, 1, 2};
-  SparseTensor st(ix, vals, shape, order);
+  SparseTensor st;
+  TF_ASSERT_OK(SparseTensor::Create(ix, vals, shape, order, &st));
   TF_EXPECT_OK(st.IndicesValid());
   EXPECT_EQ(st.order(), order);
 
@@ -227,7 +247,8 @@
   Tensor ix(DT_INT64, TensorShape({N, NDIM}));
   Tensor vals(DT_STRING, TensorShape({N}));
   TensorShape shape({1000, 1000, 1000, 1000});
-  SparseTensor st(ix, vals, shape);
+  SparseTensor st;
+  TF_ASSERT_OK(SparseTensor::Create(ix, vals, shape, &st));
 
   auto ix_t = ix.matrix<int64>();
 
@@ -266,7 +287,8 @@
 
   TensorShape shape({10, 10, 10});
   std::vector<int64> order{0, 1, 2};
-  SparseTensor st(ix, vals, shape, order);
+  SparseTensor st;
+  TF_ASSERT_OK(SparseTensor::Create(ix, vals, shape, order, &st));
 
   st.Reorder<string>(order);
   Status st_indices_valid = st.IndicesValid();
@@ -302,7 +324,8 @@
   TensorShape shape({10, 10, 10});
   std::vector<int64> order{0, 1, 2};
 
-  SparseTensor st(ix, vals, shape, order);
+  SparseTensor st;
+  TF_ASSERT_OK(SparseTensor::Create(ix, vals, shape, order, &st));
   EXPECT_FALSE(st.IndicesValid().ok());
 
   st.Reorder<string>(order);
@@ -351,7 +374,8 @@
 
   TensorShape shape({4, 4, 5});
   std::vector<int64> order{0, 1, 2};
-  SparseTensor st(ix, vals, shape, order);
+  SparseTensor st;
+  TF_ASSERT_OK(SparseTensor::Create(ix, vals, shape, order, &st));
 
   Tensor dense(DT_STRING, TensorShape({4, 4, 5}));
   st.ToDense<string>(&dense);
@@ -390,7 +414,8 @@
 
   TensorShape shape({4, 4, 5});
   std::vector<int64> order{0, 1, 2};
-  SparseTensor st(ix, vals, shape, order);
+  SparseTensor st;
+  TF_ASSERT_OK(SparseTensor::Create(ix, vals, shape, order, &st));
 
   Tensor dense(DT_STRING, TensorShape({10, 10, 10}));
   st.ToDense<string>(&dense);
@@ -433,7 +458,8 @@
   TensorShape shape({10, 10, 10});
   std::vector<int64> order{0, 1, 2};
 
-  SparseTensor st(ix, vals, shape, order);
+  SparseTensor st;
+  TF_ASSERT_OK(SparseTensor::Create(ix, vals, shape, order, &st));
   st.Reorder<int32>(order);
 
   std::vector<std::vector<int64> > groups;
@@ -521,7 +547,8 @@
   TensorShape shape({10, 10, 10});
   std::vector<int64> order{0, 1, 2};
 
-  SparseTensor st(ix, vals, shape, order);
+  SparseTensor st;
+  TF_ASSERT_OK(SparseTensor::Create(ix, vals, shape, order, &st));
   EXPECT_FALSE(st.IndicesValid().ok());
   st.Reorder<string>(order);
   TF_EXPECT_OK(st.IndicesValid());
@@ -551,7 +578,9 @@
 
   // Concat works if non-primary ix is out of order, but output order
   // is not defined
-  SparseTensor st_ooo(ix, vals, shape, {0, 2, 1});  // non-primary ix OOO
+  SparseTensor st_ooo;
+  TF_ASSERT_OK(SparseTensor::Create(ix, vals, shape, {0, 2, 1},
+                                    &st_ooo));  // non-primary ix OOO
   SparseTensor conc_ooo = SparseTensor::Concat<string>({st, st, st, st_ooo});
   std::vector<int64> expected_ooo{-1, -1, -1};
   EXPECT_EQ(conc_ooo.order(), expected_ooo);
@@ -584,9 +613,11 @@
   vals.vec<int64>()(2) = 3;
   vals.vec<int64>()(3) = 4;
 
-  SparseTensor st(ids, vals, TensorShape({4, 3}));
+  SparseTensor st;
+  TF_ASSERT_OK(SparseTensor::Create(ids, vals, TensorShape({4, 3}), &st));
 
-  std::vector<SparseTensor> st_list = SparseTensor::Split<int64>(st, 0, 2);
+  std::vector<SparseTensor> st_list;
+  TF_ASSERT_OK(SparseTensor::Split<int64>(st, 0, 2, &st_list));
 
   EXPECT_EQ(st_list.size(), 2);
   auto expected_shape = gtl::InlinedVector<int64, 8>{2, 3};
@@ -633,7 +664,8 @@
   vals.vec<int64>()(2) = 3;
   vals.vec<int64>()(3) = 4;
 
-  SparseTensor st(ids, vals, TensorShape({4, 3}));
+  SparseTensor st;
+  TF_ASSERT_OK(SparseTensor::Create(ids, vals, TensorShape({4, 3}), &st));
 
   std::vector<int64> start(2, 0);
   std::vector<int64> size(2);
@@ -662,7 +694,8 @@
   vals.scalar<int32>()() = 5;
 
   TensorShape shape({});
-  SparseTensor st(ix, vals, shape);
+  SparseTensor st;
+  TF_ASSERT_OK(SparseTensor::Create(ix, vals, shape, &st));
 
   Tensor dense(DT_INT32, TensorShape({}));
   st.ToDense<int32>(&dense);
@@ -699,7 +732,8 @@
         ix_t(i, d) = rnd.Rand64() % 1000;
       }
     }
-    SparseTensor st(ix, vals, shape, order);
+    SparseTensor st;
+    TF_ASSERT_OK(SparseTensor::Create(ix, vals, shape, order, &st));
 
     testing::StartTiming();
     st.Reorder<float>(reorder);
@@ -740,7 +774,8 @@
         ix_t(i, d) = rnd.Rand64() % 1000;
       }
     }
-    SparseTensor st(ix, vals, shape, order);
+    SparseTensor st;
+    TF_ASSERT_OK(SparseTensor::Create(ix, vals, shape, order, &st));
 
     testing::StartTiming();
     st.Reorder<string>(reorder);
diff --git a/tensorflow/core/util/tensor_format.cc b/tensorflow/core/util/tensor_format.cc
index 33ab87a..a5f7ecf 100644
--- a/tensorflow/core/util/tensor_format.cc
+++ b/tensorflow/core/util/tensor_format.cc
@@ -18,7 +18,7 @@
 namespace tensorflow {
 
 string GetConvnetDataFormatAttrString() {
-  return "data_format: { 'NHWC', 'NCHW', 'HWNC', 'HWCN' } = 'NHWC' ";
+  return "data_format: { 'NHWC', 'NCHW' } = 'NHWC' ";
 }
 
 string GetConvnet3dDataFormatAttrString() {
diff --git a/tensorflow/docs_src/extend/new_data_formats.md b/tensorflow/docs_src/extend/new_data_formats.md
index d1d1f69..abbf479 100644
--- a/tensorflow/docs_src/extend/new_data_formats.md
+++ b/tensorflow/docs_src/extend/new_data_formats.md
@@ -77,18 +77,24 @@
 #include "tensorflow/core/framework/op.h"
 #include "tensorflow/core/framework/shape_inference.h"
 
-namespace tensorflow {
+namespace myproject {
 namespace {
 
-class MyReaderDatasetOp : public DatasetOpKernel {
+using ::tensorflow::DT_STRING;
+using ::tensorflow::PartialTensorShape;
+using ::tensorflow::Status;
+
+class MyReaderDatasetOp : public tensorflow::DatasetOpKernel {
  public:
 
-  MyReaderDatasetOp(OpKernelConstruction* ctx) : DatasetOpKernel(ctx) {
+  MyReaderDatasetOp(tensorflow::OpKernelConstruction* ctx)
+      : DatasetOpKernel(ctx) {
     // Parse and validate any attrs that define the dataset using
     // `ctx->GetAttr()`, and store them in member variables.
   }
 
-  void MakeDataset(OpKernelContext* ctx, DatasetBase** output) override {
+  void MakeDataset(tensorflow::OpKernelContext* ctx,
+                   tensorflow::DatasetBase** output) override {
     // Parse and validate any input tensors 0that define the dataset using
     // `ctx->input()` or the utility function
     // `ParseScalarArgument<T>(ctx, &arg)`.
@@ -99,14 +105,14 @@
   }
 
  private:
-  class Dataset : public GraphDatasetBase {
+  class Dataset : public tensorflow::GraphDatasetBase {
    public:
-    Dataset(OpKernelContext* ctx) : GraphDatasetBase(ctx) {}
+    Dataset(tensorflow::OpKernelContext* ctx) : GraphDatasetBase(ctx) {}
 
-    std::unique_ptr<IteratorBase> MakeIteratorInternal(
+    std::unique_ptr<tensorflow::IteratorBase> MakeIteratorInternal(
         const string& prefix) const override {
-      return std::unique_ptr<IteratorBase>(
-          new Iterator({this, strings::StrCat(prefix, "::MyReader")}));
+      return std::unique_ptr<tensorflow::IteratorBase>(new Iterator(
+          {this, tensorflow::strings::StrCat(prefix, "::MyReader")}));
     }
 
     // Record structure: Each record is represented by a scalar string tensor.
@@ -114,8 +120,8 @@
     // Dataset elements can have a fixed number of components of different
     // types and shapes; replace the following two methods to customize this
     // aspect of the dataset.
-    const DataTypeVector& output_dtypes() const override {
-      static DataTypeVector* dtypes = new DataTypeVector({DT_STRING});
+    const tensorflow::DataTypeVector& output_dtypes() const override {
+      static auto* const dtypes = new tensorflow::DataTypeVector({DT_STRING});
       return *dtypes;
     }
     const std::vector<PartialTensorShape>& output_shapes() const override {
@@ -132,16 +138,16 @@
     // Implement this method if you want to be able to save and restore
     // instances of this dataset (and any iterators over it).
     Status AsGraphDefInternal(DatasetGraphDefBuilder* b,
-                              Node** output) const override {
+                              tensorflow::Node** output) const override {
       // Construct nodes to represent any of the input tensors from this
       // object's member variables using `b->AddScalar()` and `b->AddVector()`.
-      std::vector<Node*> input_tensors;
+      std::vector<tensorflow::Node*> input_tensors;
       TF_RETURN_IF_ERROR(b->AddDataset(this, input_tensors, output));
       return Status::OK();
     }
 
    private:
-    class Iterator : public DatasetIterator<Dataset> {
+    class Iterator : public tensorflow::DatasetIterator<Dataset> {
      public:
       explicit Iterator(const Params& params)
           : DatasetIterator<Dataset>(params), i_(0) {}
@@ -158,15 +164,15 @@
       //    return `Status::OK()`.
       // 3. If an error occurs, return an error status using one of the helper
       //    functions from "tensorflow/core/lib/core/errors.h".
-      Status GetNextInternal(IteratorContext* ctx,
-                             std::vector<Tensor>* out_tensors,
+      Status GetNextInternal(tensorflow::IteratorContext* ctx,
+                             std::vector<tensorflow::Tensor>* out_tensors,
                              bool* end_of_sequence) override {
         // NOTE: `GetNextInternal()` may be called concurrently, so it is
         // recommended that you protect the iterator state with a mutex.
-        mutex_lock l(mu_);
+        tensorflow::mutex_lock l(mu_);
         if (i_ < 10) {
           // Create a scalar string tensor and add it to the output.
-          Tensor record_tensor(ctx->allocator({}), DT_STRING, {});
+          tensorflow::Tensor record_tensor(ctx->allocator({}), DT_STRING, {});
           record_tensor.scalar<string>()() = "MyReader!";
           out_tensors->emplace_back(std::move(record_tensor));
           ++i_;
@@ -183,20 +189,20 @@
       //
       // Implement these two methods if you want to be able to save and restore
       // instances of this iterator.
-      Status SaveInternal(IteratorStateWriter* writer) override {
-        mutex_lock l(mu_);
+      Status SaveInternal(tensorflow::IteratorStateWriter* writer) override {
+        tensorflow::mutex_lock l(mu_);
         TF_RETURN_IF_ERROR(writer->WriteScalar(full_name("i"), i_));
         return Status::OK();
       }
-      Status RestoreInternal(IteratorContext* ctx,
-                             IteratorStateReader* reader) override {
-        mutex_lock l(mu_);
+      Status RestoreInternal(tensorflow::IteratorContext* ctx,
+                             tensorflow::IteratorStateReader* reader) override {
+        tensorflow::mutex_lock l(mu_);
         TF_RETURN_IF_ERROR(reader->ReadScalar(full_name("i"), &i_));
         return Status::OK();
       }
 
      private:
-      mutex mu_;
+      tensorflow::mutex mu_;
       int64 i_ GUARDED_BY(mu_);
     };
   };
@@ -211,14 +217,14 @@
 REGISTER_OP("MyReaderDataset")
     .Output("handle: variant")
     .SetIsStateful()
-    .SetShapeFn(shape_inference::ScalarShape);
+    .SetShapeFn(tensorflow::shape_inference::ScalarShape);
 
 // Register the kernel implementation for MyReaderDataset.
-REGISTER_KERNEL_BUILDER(Name("MyReaderDataset").Device(DEVICE_CPU),
+REGISTER_KERNEL_BUILDER(Name("MyReaderDataset").Device(tensorflow::DEVICE_CPU),
                         MyReaderDatasetOp);
 
 }  // namespace
-}  // namespace tensorflow
+}  // namespace myproject
 ```
 
 The last step is to build the C++ code and add a Python wrapper. The easiest way
diff --git a/tensorflow/docs_src/guide/autograph.md b/tensorflow/docs_src/guide/autograph.md
new file mode 100644
index 0000000..823e1c6
--- /dev/null
+++ b/tensorflow/docs_src/guide/autograph.md
@@ -0,0 +1,3 @@
+# AutoGraph: Easy control flow for graphs
+
+[Colab notebook](https://colab.research.google.com/github/tensorflow/models/blob/master/samples/core/guide/autograph.ipynb)
diff --git a/tensorflow/docs_src/guide/index.md b/tensorflow/docs_src/guide/index.md
index eefdb9c..f78dfc9 100644
--- a/tensorflow/docs_src/guide/index.md
+++ b/tensorflow/docs_src/guide/index.md
@@ -16,15 +16,12 @@
 
 ## Estimators
 
-* @{$estimators} provides an introduction.
-* @{$premade_estimators}, introduces Estimators for machine learning.
-* @{$custom_estimators}, which demonstrates how to build and train models you
-  design yourself.
-* @{$feature_columns}, which shows how an Estimator can handle a variety of input
-  data types without changes to the model.
-* @{$datasets_for_estimators} describes using tf.data with estimators.
-* @{$checkpoints}, which explains how to save training progress and resume where
-  you left off.
+* @{$estimators}, learn how to use Estimators for machine learning.
+* @{$premade_estimators}, the basics of premade Estimators.
+* @{$checkpoints}, save training progress and resume where you left off.
+* @{$feature_columns}, handle a variety of input data types without changes to the model.
+* @{$datasets_for_estimators}, use `tf.data` to input data.
+* @{$custom_estimators}, write your own Estimator.
 
 ## Accelerators
 
diff --git a/tensorflow/docs_src/guide/leftnav_files b/tensorflow/docs_src/guide/leftnav_files
index 357a2a1..c4e235b 100644
--- a/tensorflow/docs_src/guide/leftnav_files
+++ b/tensorflow/docs_src/guide/leftnav_files
@@ -8,10 +8,10 @@
 ### Estimators
 estimators.md: Introduction to Estimators
 premade_estimators.md
-custom_estimators.md
+checkpoints.md
 feature_columns.md
 datasets_for_estimators.md
-checkpoints.md
+custom_estimators.md
 
 ### Accelerators
 using_gpu.md
@@ -23,6 +23,7 @@
 variables.md
 graphs.md
 saved_model.md
+autograph.md : Control flow
 
 ### ML Concepts
 embedding.md
diff --git a/tensorflow/docs_src/install/index.md b/tensorflow/docs_src/install/index.md
index c2e5a99..55481cc 100644
--- a/tensorflow/docs_src/install/index.md
+++ b/tensorflow/docs_src/install/index.md
@@ -1,36 +1,39 @@
-# Installing TensorFlow
+# Install TensorFlow
 
-We've built and tested TensorFlow on the following 64-bit laptop/desktop
-operating systems:
+Note: Run the [TensorFlow tutorials](../tutorials) in a pre-configured
+[Colab notebook environment](https://colab.research.google.com/notebooks/welcome.ipynb){: .external},
+without installation.
+
+TensorFlow is built and tested on the following 64-bit operating systems:
 
   * macOS 10.12.6 (Sierra) or later.
   * Ubuntu 16.04 or later
   * Windows 7 or later.
   * Raspbian 9.0 or later.
 
-Although you might be able to install TensorFlow on other laptop or desktop
-systems, we only support (and only fix issues in) the preceding configurations.
+While TensorFlow may work on other systems, we only support—and fix issues in—the
+systems listed above.
 
 The following guides explain how to install a version of TensorFlow
 that enables you to write applications in Python:
 
-  * @{$install_linux$Installing TensorFlow on Ubuntu}
-  * @{$install_mac$Installing TensorFlow on macOS}
-  * @{$install_windows$Installing TensorFlow on Windows}
-  * @{$install_raspbian$Installing TensorFlow on a Raspberry Pi}
-  * @{$install_sources$Installing TensorFlow from Sources}
+  * @{$install_linux$Install TensorFlow on Ubuntu}
+  * @{$install_mac$Install TensorFlow on macOS}
+  * @{$install_windows$Install TensorFlow on Windows}
+  * @{$install_raspbian$Install TensorFlow on a Raspberry Pi}
+  * @{$install_sources$Install TensorFlow from source code}
 
 Many aspects of the Python TensorFlow API changed from version 0.n to 1.0.
 The following guide explains how to migrate older TensorFlow applications
 to Version 1.0:
 
-  * @{$migration$Transitioning to TensorFlow 1.0}
+  * @{$migration$Transition to TensorFlow 1.0}
 
 The following guides explain how to install TensorFlow libraries for use in
 other programming languages. These APIs are aimed at deploying TensorFlow
 models in applications and are not as extensive as the Python APIs.
 
-  * @{$install_java$Installing TensorFlow for Java}
-  * @{$install_c$Installing TensorFlow for C}
-  * @{$install_go$Installing TensorFlow for Go}
+  * @{$install_java$Install TensorFlow for Java}
+  * @{$install_c$Install TensorFlow for C}
+  * @{$install_go$Install TensorFlow for Go}
 
diff --git a/tensorflow/docs_src/install/install_c.md b/tensorflow/docs_src/install/install_c.md
index 2901848..4e1c32f 100644
--- a/tensorflow/docs_src/install/install_c.md
+++ b/tensorflow/docs_src/install/install_c.md
@@ -1,4 +1,4 @@
-# Installing TensorFlow for C
+# Install TensorFlow for C
 
 TensorFlow provides a C API defined in
 [`c_api.h`](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/c/c_api.h),
diff --git a/tensorflow/docs_src/install/install_go.md b/tensorflow/docs_src/install/install_go.md
index 2c126df..162a820 100644
--- a/tensorflow/docs_src/install/install_go.md
+++ b/tensorflow/docs_src/install/install_go.md
@@ -1,4 +1,4 @@
-# Installing TensorFlow for Go
+# Install TensorFlow for Go
 
 TensorFlow provides APIs for use in Go programs. These APIs are particularly
 well-suited to loading models created in Python and executing them within
diff --git a/tensorflow/docs_src/install/install_java.md b/tensorflow/docs_src/install/install_java.md
index 692dfc9..c196bb9 100644
--- a/tensorflow/docs_src/install/install_java.md
+++ b/tensorflow/docs_src/install/install_java.md
@@ -1,4 +1,4 @@
-# Installing TensorFlow for Java
+# Install TensorFlow for Java
 
 TensorFlow provides APIs for use in Java programs. These APIs are particularly
 well-suited to loading models created in Python and executing them within a
diff --git a/tensorflow/docs_src/install/install_linux.md b/tensorflow/docs_src/install/install_linux.md
index f21c073..7534d0f 100644
--- a/tensorflow/docs_src/install/install_linux.md
+++ b/tensorflow/docs_src/install/install_linux.md
@@ -1,38 +1,38 @@
-# Installing TensorFlow on Ubuntu
+# Install TensorFlow on Ubuntu
 
 This guide explains how to install TensorFlow on Ubuntu Linux. While these
-instructions may work on other Linux variants, they are tested and supported with
-the following system requirements:
+instructions may work on other Linux variants, they are tested and supported
+with the following system requirements:
 
-* 64-bit desktops or laptops
-* Ubuntu 16.04 or higher
-
+*   64-bit desktops or laptops
+*   Ubuntu 16.04 or higher
 
 ## Choose which TensorFlow to install
 
 The following TensorFlow variants are available for installation:
 
-* __TensorFlow with CPU support only__. If your system does not have a
-  NVIDIA®&nbsp;GPU, you must install this version. This version of TensorFlow is
-  usually easier to install, so even if you have an NVIDIA GPU, we recommend
-  installing this version first.
-* __TensorFlow with GPU support__. TensorFlow programs usually run much faster on
-  a GPU instead of a CPU. If you run performance-critical applications and your
-  system has an NVIDIA®&nbsp;GPU that meets the prerequisites, you should install
-  this version. See [TensorFlow GPU support](#NVIDIARequirements) for details.
-
+*   __TensorFlow with CPU support only__. If your system does not have a
+    NVIDIA®&nbsp;GPU, you must install this version. This version of TensorFlow
+    is usually easier to install, so even if you have an NVIDIA GPU, we
+    recommend installing this version first.
+*   __TensorFlow with GPU support__. TensorFlow programs usually run much faster
+    on a GPU instead of a CPU. If you run performance-critical applications and
+    your system has an NVIDIA®&nbsp;GPU that meets the prerequisites, you should
+    install this version. See [TensorFlow GPU support](#NVIDIARequirements) for
+    details.
 
 ## How to install TensorFlow
 
 There are a few options to install TensorFlow on your machine:
 
-* [Use pip in a virtual environment](#InstallingVirtualenv) *(recommended)*
-* [Use pip in your system environment](#InstallingNativePip)
-* [Configure a Docker container](#InstallingDocker)
-* [Use pip in Anaconda](#InstallingAnaconda)
-* [Install TensorFlow from source](/install/install_sources)
+*   [Use pip in a virtual environment](#InstallingVirtualenv) *(recommended)*
+*   [Use pip in your system environment](#InstallingNativePip)
+*   [Configure a Docker container](#InstallingDocker)
+*   [Use pip in Anaconda](#InstallingAnaconda)
+*   [Install TensorFlow from source](/install/install_sources)
 
 <a name="InstallingVirtualenv"></a>
+
 ### Use `pip` in a virtual environment
 
 Key Point: Using a virtual environment is the recommended install method.
@@ -41,8 +41,8 @@
 Python environments that are isolated from other Python development on the same
 machine. In this scenario, you install TensorFlow and its dependencies within a
 virtual environment that is available when *activated*. Virtualenv provides a
-reliable way to install and run TensorFlow while avoiding conflicts with the rest
-of the system.
+reliable way to install and run TensorFlow while avoiding conflicts with the
+rest of the system.
 
 ##### 1. Install Python, `pip`, and `virtualenv`.
 
@@ -62,7 +62,7 @@
 </pre>
 
 We *recommend* using `pip` version 8.1 or higher. If using a release before
-version 8.1,  upgrade `pip`:
+version 8.1, upgrade `pip`:
 
 <pre class="prettyprint lang-bsh">
   <code class="devsite-terminal">sudo pip install -U pip</code>
@@ -112,10 +112,10 @@
 
 Choose one of the available TensorFlow packages for installation:
 
-* `tensorflow` —Current release for CPU
-* `tensorflow-gpu` —Current release with GPU support
-* `tf-nightly` —Nightly build for CPU
-* `tf-nightly-gpu` —Nightly build with GPU support
+*   `tensorflow` —Current release for CPU
+*   `tensorflow-gpu` —Current release with GPU support
+*   `tf-nightly` —Nightly build for CPU
+*   `tf-nightly-gpu` —Nightly build with GPU support
 
 Within an active Virtualenv environment, use `pip` to install the package:
 
@@ -160,14 +160,14 @@
   <code class="devsite-terminal">rm -r ~/tensorflow/<var>venv</var></code>
 </pre>
 
-
 <a name="InstallingNativePip"></a>
+
 ### Use `pip` in your system environment
 
 Use `pip` to install the TensorFlow package directly on your system without
 using a container or virtual environment for isolation. This method is
-recommended for system administrators that want a TensorFlow installation that is
-available to everyone on a multi-user system.
+recommended for system administrators that want a TensorFlow installation that
+is available to everyone on a multi-user system.
 
 Since a system install is not isolated, it could interfere with other
 Python-based installations. But if you understand `pip` and your Python
@@ -195,7 +195,7 @@
 </pre>
 
 We *recommend* using `pip` version 8.1 or higher. If using a release before
-version 8.1,  upgrade `pip`:
+version 8.1, upgrade `pip`:
 
 <pre class="prettyprint lang-bsh">
   <code class="devsite-terminal">sudo pip install -U pip</code>
@@ -212,10 +212,10 @@
 
 Choose one of the available TensorFlow packages for installation:
 
-* `tensorflow` —Current release for CPU
-* `tensorflow-gpu` —Current release with GPU support
-* `tf-nightly` —Nightly build for CPU
-* `tf-nightly-gpu` —Nightly build with GPU support
+*   `tensorflow` —Current release for CPU
+*   `tensorflow-gpu` —Current release with GPU support
+*   `tf-nightly` —Nightly build for CPU
+*   `tf-nightly-gpu` —Nightly build with GPU support
 
 And use `pip` to install the package for Python 2 or 3:
 
@@ -260,37 +260,36 @@
 </pre>
 
 <a name="InstallingDocker"></a>
+
 ### Configure a Docker container
 
-Docker completely isolates the TensorFlow installation
-from pre-existing packages on your machine. The Docker container contains
-TensorFlow and all its dependencies. Note that the Docker image can be quite
-large (hundreds of MBs). You might choose the Docker installation if you are
-incorporating TensorFlow into a larger application architecture that already
-uses Docker.
+Docker completely isolates the TensorFlow installation from pre-existing
+packages on your machine. The Docker container contains TensorFlow and all its
+dependencies. Note that the Docker image can be quite large (hundreds of MBs).
+You might choose the Docker installation if you are incorporating TensorFlow
+into a larger application architecture that already uses Docker.
 
 Take the following steps to install TensorFlow through Docker:
 
-  1. Install Docker on your machine as described in the
-     [Docker documentation](http://docs.docker.com/engine/installation/).
-  2. Optionally, create a Linux group called <code>docker</code> to allow
-     launching containers without sudo as described in the
-     [Docker documentation](https://docs.docker.com/engine/installation/linux/linux-postinstall/).
-     (If you don't do this step, you'll have to use sudo each time
-     you invoke Docker.)
-  3. To install a version of TensorFlow that supports GPUs, you must first
-     install [nvidia-docker](https://github.com/NVIDIA/nvidia-docker), which
-     is stored in github.
-  4. Launch a Docker container that contains one of the
-     [TensorFlow binary images](https://hub.docker.com/r/tensorflow/tensorflow/tags/).
+1.  Install Docker on your machine as described in the
+    [Docker documentation](http://docs.docker.com/engine/installation/).
+2.  Optionally, create a Linux group called <code>docker</code> to allow
+    launching containers without sudo as described in the
+    [Docker documentation](https://docs.docker.com/engine/installation/linux/linux-postinstall/).
+    (If you don't do this step, you'll have to use sudo each time you invoke
+    Docker.)
+3.  To install a version of TensorFlow that supports GPUs, you must first
+    install [nvidia-docker](https://github.com/NVIDIA/nvidia-docker), which is
+    stored in github.
+4.  Launch a Docker container that contains one of the
+    [TensorFlow binary images](https://hub.docker.com/r/tensorflow/tensorflow/tags/).
 
 The remainder of this section explains how to launch a Docker container.
 
-
 #### CPU-only
 
-To launch a Docker container with CPU-only support (that is, without
-GPU support), enter a command of the following format:
+To launch a Docker container with CPU-only support (that is, without GPU
+support), enter a command of the following format:
 
 <pre>
 $ docker run -it <i>-p hostPort:containerPort TensorFlowCPUImage</i>
@@ -298,29 +297,31 @@
 
 where:
 
-  * <tt><i>-p hostPort:containerPort</i></tt> is optional.
-    If you plan to run TensorFlow programs from the shell, omit this option.
-    If you plan to run TensorFlow programs as Jupyter notebooks, set both
-    <tt><i>hostPort</i></tt> and <tt><i>containerPort</i></tt>
-    to <tt>8888</tt>.  If you'd like to run TensorBoard inside the container,
-    add a second `-p` flag, setting both <i>hostPort</i> and <i>containerPort</i>
-    to 6006.
-  * <tt><i>TensorFlowCPUImage</i></tt> is required. It identifies the Docker
+*   <tt><i>-p hostPort:containerPort</i></tt> is optional. If you plan to run
+    TensorFlow programs from the shell, omit this option. If you plan to run
+    TensorFlow programs as Jupyter notebooks, set both <tt><i>hostPort</i></tt>
+    and <tt><i>containerPort</i></tt> to <tt>8888</tt>. If you'd like to run
+    TensorBoard inside the container, add a second `-p` flag, setting both
+    <i>hostPort</i> and <i>containerPort</i> to 6006.
+*   <tt><i>TensorFlowCPUImage</i></tt> is required. It identifies the Docker
     container. Specify one of the following values:
-    * <tt>tensorflow/tensorflow</tt>, which is the TensorFlow CPU binary image.
-    * <tt>tensorflow/tensorflow:latest-devel</tt>, which is the latest
-      TensorFlow CPU Binary image plus source code.
-    * <tt>tensorflow/tensorflow:<i>version</i></tt>, which is the
-      specified version (for example, 1.1.0rc1) of TensorFlow CPU binary image.
-    * <tt>tensorflow/tensorflow:<i>version</i>-devel</tt>, which is
-      the specified version (for example, 1.1.0rc1) of the TensorFlow GPU
-      binary image plus source code.
+
+    *   <tt>tensorflow/tensorflow</tt>, which is the TensorFlow CPU binary
+        image.
+    *   <tt>tensorflow/tensorflow:latest-devel</tt>, which is the latest
+        TensorFlow CPU Binary image plus source code.
+    *   <tt>tensorflow/tensorflow:<i>version</i></tt>, which is the specified
+        version (for example, 1.1.0rc1) of TensorFlow CPU binary image.
+    *   <tt>tensorflow/tensorflow:<i>version</i>-devel</tt>, which is the
+        specified version (for example, 1.1.0rc1) of the TensorFlow GPU binary
+        image plus source code.
 
     TensorFlow images are available at
     [dockerhub](https://hub.docker.com/r/tensorflow/tensorflow/).
 
-For example, the following command launches the latest TensorFlow CPU binary image
-in a Docker container from which you can run TensorFlow programs in a shell:
+For example, the following command launches the latest TensorFlow CPU binary
+image in a Docker container from which you can run TensorFlow programs in a
+shell:
 
 <pre>
 $ <b>docker run -it tensorflow/tensorflow bash</b>
@@ -336,10 +337,11 @@
 
 Docker will download the TensorFlow binary image the first time you launch it.
 
-
 #### GPU support
 
-To launch a Docker container with NVidia GPU support, enter a command of the following format (this [does not require any local CUDA installation](https://github.com/nvidia/nvidia-docker/wiki/CUDA#requirements)):
+To launch a Docker container with NVidia GPU support, enter a command of the
+following format (this
+[does not require any local CUDA installation](https://github.com/nvidia/nvidia-docker/wiki/CUDA#requirements)):
 
 <pre>
 $ <b>nvidia-docker run -it</b> <i>-p hostPort:containerPort TensorFlowGPUImage</i>
@@ -347,34 +349,34 @@
 
 where:
 
-  * <tt><i>-p hostPort:containerPort</i></tt> is optional. If you plan
-    to run TensorFlow programs from the shell, omit this option. If you plan
-    to run TensorFlow programs as Jupyter notebooks, set both
-    <tt><i>hostPort</i></tt> and <code><em>containerPort</em></code> to `8888`.
-  * <i>TensorFlowGPUImage</i> specifies the Docker container. You must
-    specify one of the following values:
-    * <tt>tensorflow/tensorflow:latest-gpu</tt>, which is the latest
-      TensorFlow GPU binary image.
-    * <tt>tensorflow/tensorflow:latest-devel-gpu</tt>, which is
-      the latest TensorFlow GPU Binary image plus source code.
-    * <tt>tensorflow/tensorflow:<i>version</i>-gpu</tt>, which is the
-      specified version (for example, 0.12.1) of the TensorFlow GPU
-      binary image.
-    * <tt>tensorflow/tensorflow:<i>version</i>-devel-gpu</tt>, which is
-      the specified version (for example, 0.12.1) of the TensorFlow GPU
-      binary image plus source code.
+*   <tt><i>-p hostPort:containerPort</i></tt> is optional. If you plan to run
+    TensorFlow programs from the shell, omit this option. If you plan to run
+    TensorFlow programs as Jupyter notebooks, set both <tt><i>hostPort</i></tt>
+    and <code><em>containerPort</em></code> to `8888`.
+*   <i>TensorFlowGPUImage</i> specifies the Docker container. You must specify
+    one of the following values:
+    *   <tt>tensorflow/tensorflow:latest-gpu</tt>, which is the latest
+        TensorFlow GPU binary image.
+    *   <tt>tensorflow/tensorflow:latest-devel-gpu</tt>, which is the latest
+        TensorFlow GPU Binary image plus source code.
+    *   <tt>tensorflow/tensorflow:<i>version</i>-gpu</tt>, which is the
+        specified version (for example, 0.12.1) of the TensorFlow GPU binary
+        image.
+    *   <tt>tensorflow/tensorflow:<i>version</i>-devel-gpu</tt>, which is the
+        specified version (for example, 0.12.1) of the TensorFlow GPU binary
+        image plus source code.
 
-We recommend installing one of the `latest` versions. For example, the
-following command launches the latest TensorFlow GPU binary image in a
-Docker container from which you can run TensorFlow programs in a shell:
+We recommend installing one of the `latest` versions. For example, the following
+command launches the latest TensorFlow GPU binary image in a Docker container
+from which you can run TensorFlow programs in a shell:
 
 <pre>
 $ <b>nvidia-docker run -it tensorflow/tensorflow:latest-gpu bash</b>
 </pre>
 
-The following command also launches the latest TensorFlow GPU binary image
-in a Docker container. In this Docker container, you can run TensorFlow
-programs in a Jupyter notebook:
+The following command also launches the latest TensorFlow GPU binary image in a
+Docker container. In this Docker container, you can run TensorFlow programs in a
+Jupyter notebook:
 
 <pre>
 $ <b>nvidia-docker run -it -p 8888:8888 tensorflow/tensorflow:latest-gpu</b>
@@ -390,14 +392,12 @@
 For more details see the
 [TensorFlow docker readme](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/tools/docker).
 
-
 #### Next Steps
 
-You should now
-[validate your installation](#ValidateYourInstallation).
-
+You should now [validate your installation](#ValidateYourInstallation).
 
 <a name="InstallingAnaconda"></a>
+
 ### Use `pip` in Anaconda
 
 Anaconda provides the `conda` utility to create a virtual environment. However,
@@ -410,61 +410,59 @@
 
 Take the following steps to install TensorFlow in an Anaconda environment:
 
-  1. Follow the instructions on the
-     [Anaconda download site](https://www.continuum.io/downloads)
-     to download and install Anaconda.
+1.  Follow the instructions on the
+    [Anaconda download site](https://www.continuum.io/downloads) to download and
+    install Anaconda.
 
-  2. Create a conda environment named <tt>tensorflow</tt> to run a version
-     of Python by invoking the following command:
+2.  Create a conda environment named <tt>tensorflow</tt> to run a version of
+    Python by invoking the following command:
 
      <pre>$ <b>conda create -n tensorflow pip python=2.7 # or python=3.3, etc.</b></pre>
 
-  3. Activate the conda environment by issuing the following command:
+3.  Activate the conda environment by issuing the following command:
 
      <pre>$ <b>source activate tensorflow</b>
      (tensorflow)$  # Your prompt should change </pre>
 
-  4. Issue a command of the following format to install
-     TensorFlow inside your conda environment:
+4.  Issue a command of the following format to install TensorFlow inside your
+    conda environment:
 
      <pre>(tensorflow)$ <b>pip install --ignore-installed --upgrade</b> <i>tfBinaryURL</i></pre>
 
-     where <code><em>tfBinaryURL</em></code> is the
-     [URL of the TensorFlow Python package](#the_url_of_the_tensorflow_python_package).
-     For example, the following command installs the CPU-only version of
-     TensorFlow for Python 3.4:
+    where <code><em>tfBinaryURL</em></code> is the
+    [URL of the TensorFlow Python package](#the_url_of_the_tensorflow_python_package).
+    For example, the following command installs the CPU-only version of
+    TensorFlow for Python 3.4:
 
      <pre>
      (tensorflow)$ <b>pip install --ignore-installed --upgrade \
      https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.9.0rc0-cp34-cp34m-linux_x86_64.whl</b></pre>
 
 <a name="ValidateYourInstallation"></a>
+
 ## Validate your installation
 
 To validate your TensorFlow installation, do the following:
 
-  1. Ensure that your environment is prepared to run TensorFlow programs.
-  2. Run a short TensorFlow program.
-
+1.  Ensure that your environment is prepared to run TensorFlow programs.
+2.  Run a short TensorFlow program.
 
 ### Prepare your environment
 
-If you installed on native pip, Virtualenv, or Anaconda, then
-do the following:
+If you installed on native pip, Virtualenv, or Anaconda, then do the following:
 
-  1. Start a terminal.
-  2. If you installed with Virtualenv or Anaconda, activate your container.
-  3. If you installed TensorFlow source code, navigate to any
-     directory *except* one containing TensorFlow source code.
+1.  Start a terminal.
+2.  If you installed with Virtualenv or Anaconda, activate your container.
+3.  If you installed TensorFlow source code, navigate to any directory *except*
+    one containing TensorFlow source code.
 
-If you installed through Docker, start a Docker container
-from which you can run bash. For example:
+If you installed through Docker, start a Docker container from which you can run
+bash. For example:
 
 <pre>
 $ <b>docker run -it tensorflow/tensorflow bash</b>
 </pre>
 
-
 ### Run a short TensorFlow program
 
 Invoke python from your shell as follows:
@@ -486,94 +484,71 @@
 
 <pre>Hello, TensorFlow!</pre>
 
-If the system outputs an error message instead of a greeting, see [Common
-installation problems](#common_installation_problems).
+If the system outputs an error message instead of a greeting, see
+[Common installation problems](#common_installation_problems).
 
 To learn more, see the [TensorFlow tutorials](../tutorials/).
 
 <a name="NVIDIARequirements"></a>
+
 ## TensorFlow GPU support
 
-To install TensorFlow with GPU support, configure the following NVIDIA® software
-on your system:
+Note: Due to the number of libraries required, using [Docker](#InstallingDocker)
+is recommended over installing directly on the host system.
 
-* [CUDA Toolkit 9.0](http://nvidia.com/cuda). For details, see
-  [NVIDIA's documentation](http://docs.nvidia.com/cuda/cuda-installation-guide-linux/).
-  Append the relevant CUDA pathnames to the `LD_LIBRARY_PATH` environmental
-  variable as described in the NVIDIA documentation.
-* [cuDNN SDK v7](http://developer.nvidia.com/cudnn). For details, see
-  [NVIDIA's documentation](http://docs.nvidia.com/deeplearning/sdk/cudnn-install/).
-  Create the `CUDA_HOME` environment variable as described in the NVIDIA
-  documentation.
-* A GPU card with CUDA Compute Capability 3.0 or higher for building TensorFlow
-  from source. To use the TensorFlow binaries, version 3.5 or higher is required.
-  See the [NVIDIA documentation](https://developer.nvidia.com/cuda-gpus) for a
-  list of supported GPU cards.
-* [GPU drivers](http://nvidia.com/drivers) that support your version of the CUDA
-  Toolkit.
-* The `libcupti-dev` library is the NVIDIA CUDA Profile Tools Interface. This
-  library provides advanced profiling support. To install this library,
-  use the following command for CUDA Toolkit >= 8.0:
+The following NVIDIA® <i>hardware</i> must be installed on your system:
 
-<pre class="prettyprint lang-bsh">
-  <code class="devsite-terminal">sudo apt-get install cuda-command-line-tools</code>
-</pre>
+*   GPU card with CUDA Compute Capability 3.5 or higher. See
+    [NVIDIA documentation](https://developer.nvidia.com/cuda-gpus) for a list of
+    supported GPU cards.
 
-Add this path to the `LD_LIBRARY_PATH` environmental variable:
+The following NVIDIA® <i>software</i> must be installed on your system:
 
-<pre class="prettyprint lang-bsh">
-  <code class="devsite-terminal">export LD_LIBRARY_PATH=${LD_LIBRARY_PATH:+${LD_LIBRARY_PATH}:}/usr/local/cuda/extras/CUPTI/lib64</code>
-</pre>
+*   [GPU drivers](http://nvidia.com/driver). CUDA 9.0 requires 384.x or higher.
+*   [CUDA Toolkit 9.0](http://nvidia.com/cuda).
+*   [cuDNN SDK](http://developer.nvidia.com/cudnn) (>= 7.0). Version 7.1 is
+    recommended.
+*   [CUPTI](http://docs.nvidia.com/cuda/cupti/) ships with the CUDA Toolkit, but
+    you also need to append its path to the `LD_LIBRARY_PATH` environment
+    variable: `export
+    LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/cuda/extras/CUPTI/lib64`
+*   *OPTIONAL*: [NCCL 2.2](https://developer.nvidia.com/nccl) to use TensorFlow
+    with multiple GPUs.
+*   *OPTIONAL*:
+    [TensorRT](http://docs.nvidia.com/deeplearning/sdk/tensorrt-install-guide/index.html)
+    which can improve latency and throughput for inference for some models.
 
-* *OPTIONAL*:  For optimized performance during inference, install
-  *NVIDIA&nbsp;TensorRT&nbsp;3.0*. To install the minimal amount of TensorRT
-  runtime components required to use with the pre-built `tensorflow-gpu` package:
+To use a GPU with CUDA Compute Capability 3.0, or different versions of the
+preceding NVIDIA libraries see
+@{$install_sources$installing TensorFlow from Sources}. If using Ubuntu 16.04
+and possibly other Debian based linux distros, `apt-get` can be used with the
+NVIDIA repository to simplify installation.
 
-<pre class="prettyprint lang-bsh">
-  <code class="devsite-terminal">wget https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1404/x86_64/nvinfer-runtime-trt-repo-ubuntu1404-3.0.4-ga-cuda9.0_1.0-1_amd64.deb</code>
-  <code class="devsite-terminal">sudo dpkg -i nvinfer-runtime-trt-repo-ubuntu1404-3.0.4-ga-cuda9.0_1.0-1_amd64.deb</code>
-  <code class="devsite-terminal">sudo apt-get update</code>
-  <code class="devsite-terminal">sudo apt-get install -y --allow-downgrades libnvinfer-dev libcudnn7-dev=7.0.5.15-1+cuda9.0 libcudnn7=7.0.5.15-1+cuda9.0</code>
-</pre>
-
-Note: For compatibility with the pre-built `tensorflow-gpu` package, use the
-Ubuntu *14.04* package of TensorRT (shown above). Use this even when installing
-on an Ubuntu 16.04 system.
-
-To build the TensorFlow-TensorRT integration module from source instead of using
-the pre-built binaries, see the
-[module documentation](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/tensorrt#using-tensorrt-in-tensorflow).
-For detailed TensorRT installation instructions, see
-[NVIDIA's TensorRT documentation](http://docs.nvidia.com/deeplearning/sdk/tensorrt-install-guide/index.html).
-
-To avoid cuDNN version conflicts during later system upgrades, hold the cuDNN
-version at 7.0.5:
-
-<pre class="prettyprint lang-bsh">
-  <code class="devsite-terminal">sudo apt-mark hold libcudnn7 libcudnn7-dev</code>
-</pre>
-
-To allow upgrades, remove the this hold:
-
-<pre class="prettyprint lang-bsh">
-  <code class="devsite-terminal">sudo apt-mark unhold libcudnn7 libcudnn7-dev</code>
-</pre>
-
-If you have an earlier version of the preceding packages, upgrade to the
-specified versions. If upgrading is not possible, you can still run TensorFlow
-with GPU support by @{$install_sources}.
-
+```bash
+# Adds NVIDIA package repository.
+sudo apt-key adv --fetch-keys http://developer.download.nvidia.com/compute/cuda/repos/ubuntu1604/x86_64/7fa2af80.pub
+wget http://developer.download.nvidia.com/compute/cuda/repos/ubuntu1604/x86_64/cuda-repo-ubuntu1604_9.1.85-1_amd64.deb
+wget http://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1604/x86_64/nvidia-machine-learning-repo-ubuntu1604_1.0.0-1_amd64.deb
+sudo dpkg -i cuda-repo-ubuntu1604_9.1.85-1_amd64.deb
+sudo dpkg -i nvidia-machine-learning-repo-ubuntu1604_1.0.0-1_amd64.deb
+sudo apt-get update
+# Includes optional NCCL 2.x.
+sudo apt-get install cuda9.0 cuda-cublas-9-0 cuda-cufft-9-0 cuda-curand-9-0 \
+  cuda-cusolver-9-0 cuda-cusparse-9-0 libcudnn7=7.1.4.18-1+cuda9.0 \
+   libnccl2=2.2.13-1+cuda9.0 cuda-command-line-tools-9-0
+# Optionally install TensorRT runtime, must be done after above cuda install.
+sudo apt-get update
+sudo apt-get install libnvinfer4=4.1.2-1+cuda9.0
+```
 
 ## Common installation problems
 
 We are relying on Stack Overflow to document TensorFlow installation problems
-and their remedies.  The following table contains links to Stack Overflow
-answers for some common installation problems.
-If you encounter an error message or other
-installation problem not listed in the following table, search for it
-on Stack Overflow.  If Stack Overflow doesn't show the error message,
-ask a new question about it on Stack Overflow and specify
-the `tensorflow` tag.
+and their remedies. The following table contains links to Stack Overflow answers
+for some common installation problems. If you encounter an error message or
+other installation problem not listed in the following table, search for it on
+Stack Overflow. If Stack Overflow doesn't show the error message, ask a new
+question about it on Stack Overflow and specify the `tensorflow` tag.
 
 <table>
 <tr> <th>Link to GitHub or Stack&nbsp;Overflow</th> <th>Error Message</th> </tr>
@@ -657,20 +632,19 @@
 
 </table>
 
-
 <a name="TF_PYTHON_URL"></a>
+
 ## The URL of the TensorFlow Python package
 
 A few installation mechanisms require the URL of the TensorFlow Python package.
 The value you specify depends on three factors:
 
-  * operating system
-  * Python version
-  * CPU only vs. GPU support
+*   operating system
+*   Python version
+*   CPU only vs. GPU support
 
 This section documents the relevant values for Linux installations.
 
-
 ### Python 2.7
 
 CPU only:
@@ -679,7 +653,6 @@
 https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.9.0rc0-cp27-none-linux_x86_64.whl
 </pre>
 
-
 GPU support:
 
 <pre>
@@ -689,7 +662,6 @@
 Note that GPU support requires the NVIDIA hardware and software described in
 [NVIDIA requirements to run TensorFlow with GPU support](#NVIDIARequirements).
 
-
 ### Python 3.4
 
 CPU only:
@@ -698,7 +670,6 @@
 https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.9.0rc0-cp34-cp34m-linux_x86_64.whl
 </pre>
 
-
 GPU support:
 
 <pre>
@@ -708,7 +679,6 @@
 Note that GPU support requires the NVIDIA hardware and software described in
 [NVIDIA requirements to run TensorFlow with GPU support](#NVIDIARequirements).
 
-
 ### Python 3.5
 
 CPU only:
@@ -717,14 +687,12 @@
 https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.9.0rc0-cp35-cp35m-linux_x86_64.whl
 </pre>
 
-
 GPU support:
 
 <pre>
 https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-1.9.0rc0-cp35-cp35m-linux_x86_64.whl
 </pre>
 
-
 Note that GPU support requires the NVIDIA hardware and software described in
 [NVIDIA requirements to run TensorFlow with GPU support](#NVIDIARequirements).
 
@@ -736,13 +704,11 @@
 https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.9.0rc0-cp36-cp36m-linux_x86_64.whl
 </pre>
 
-
 GPU support:
 
 <pre>
 https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-1.9.0rc0-cp36-cp36m-linux_x86_64.whl
 </pre>
 
-
 Note that GPU support requires the NVIDIA hardware and software described in
 [NVIDIA requirements to run TensorFlow with GPU support](#NVIDIARequirements).
diff --git a/tensorflow/docs_src/install/install_mac.md b/tensorflow/docs_src/install/install_mac.md
index c6f0c17..3372e9e 100644
--- a/tensorflow/docs_src/install/install_mac.md
+++ b/tensorflow/docs_src/install/install_mac.md
@@ -1,4 +1,4 @@
-# Installing TensorFlow on macOS
+# Install TensorFlow on macOS
 
 This guide explains how to install TensorFlow on macOS. Although these
 instructions might also work on other macOS variants, we have only
diff --git a/tensorflow/docs_src/install/install_raspbian.md b/tensorflow/docs_src/install/install_raspbian.md
index 46c4944..58a5285 100644
--- a/tensorflow/docs_src/install/install_raspbian.md
+++ b/tensorflow/docs_src/install/install_raspbian.md
@@ -1,4 +1,4 @@
-# Installing TensorFlow on Raspbian
+# Install TensorFlow on Raspbian
 
 This guide explains how to install TensorFlow on a Raspberry Pi running
 Raspbian. Although these instructions might also work on other Pi variants, we
diff --git a/tensorflow/docs_src/install/install_sources.md b/tensorflow/docs_src/install/install_sources.md
index fc1f6d0..502f4de 100644
--- a/tensorflow/docs_src/install/install_sources.md
+++ b/tensorflow/docs_src/install/install_sources.md
@@ -1,28 +1,27 @@
-# Installing TensorFlow from Sources
+# Install TensorFlow from Sources
 
-This guide explains how to build TensorFlow sources into a TensorFlow
-binary and how to install that TensorFlow binary.  Note that we provide
-well-tested, pre-built TensorFlow binaries for Ubuntu, macOS, and Windows
-systems. In addition, there are pre-built TensorFlow
-[docker images](https://hub.docker.com/r/tensorflow/tensorflow/).
-So, don't build a TensorFlow binary yourself unless you are very
-comfortable building complex packages from source and dealing with
-the inevitable aftermath should things not go exactly as documented.
+This guide explains how to build TensorFlow sources into a TensorFlow binary and
+how to install that TensorFlow binary. Note that we provide well-tested,
+pre-built TensorFlow binaries for Ubuntu, macOS, and Windows systems. In
+addition, there are pre-built TensorFlow
+[docker images](https://hub.docker.com/r/tensorflow/tensorflow/). So, don't
+build a TensorFlow binary yourself unless you are very comfortable building
+complex packages from source and dealing with the inevitable aftermath should
+things not go exactly as documented.
 
-If the last paragraph didn't scare you off, welcome.  This guide explains
-how to build TensorFlow on 64-bit desktops and laptops running either of
-the following operating systems:
+If the last paragraph didn't scare you off, welcome. This guide explains how to
+build TensorFlow on 64-bit desktops and laptops running either of the following
+operating systems:
 
 *   Ubuntu
 *   macOS X
 
-Note: Some users have successfully built and installed TensorFlow from
-sources on non-supported systems.  Please remember that we do not fix
-issues stemming from these attempts.
+Note: Some users have successfully built and installed TensorFlow from sources
+on non-supported systems. Please remember that we do not fix issues stemming
+from these attempts.
 
-We **do not support** building TensorFlow on Windows. That said, if you'd
-like to try to build TensorFlow on Windows anyway, use either of the
-following:
+We **do not support** building TensorFlow on Windows. That said, if you'd like
+to try to build TensorFlow on Windows anyway, use either of the following:
 
 *   [Bazel on Windows](https://bazel.build/versions/master/docs/windows.html)
 *   [TensorFlow CMake build](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/cmake)
@@ -32,38 +31,33 @@
 
 ## Determine which TensorFlow to install
 
-You must choose one of the following types of TensorFlow to build and
-install:
+You must choose one of the following types of TensorFlow to build and install:
 
-* **TensorFlow with CPU support only**. If your system does not have a
-  NVIDIA® GPU, build and install this version. Note that this version of
-  TensorFlow is typically easier to build and install, so even if you
-  have an NVIDIA GPU, we recommend building and installing this version
-  first.
-* **TensorFlow with GPU support**. TensorFlow programs typically run
-  significantly faster on a GPU than on a CPU. Therefore, if your system
-  has a NVIDIA GPU and you need to run performance-critical applications,
-  you should ultimately build and install this version.
-  Beyond the NVIDIA GPU itself, your system must also fulfill the NVIDIA
-  software requirements described in one of the following documents:
+*   **TensorFlow with CPU support only**. If your system does not have a NVIDIA®
+    GPU, build and install this version. Note that this version of TensorFlow is
+    typically easier to build and install, so even if you have an NVIDIA GPU, we
+    recommend building and installing this version first.
+*   **TensorFlow with GPU support**. TensorFlow programs typically run
+    significantly faster on a GPU than on a CPU. Therefore, if your system has a
+    NVIDIA GPU and you need to run performance-critical applications, you should
+    ultimately build and install this version. Beyond the NVIDIA GPU itself,
+    your system must also fulfill the NVIDIA software requirements described in
+    one of the following documents:
 
-  * @{$install_linux#NVIDIARequirements$Installing TensorFlow on Ubuntu}
-  * @{$install_mac#NVIDIARequirements$Installing TensorFlow on macOS}
-
+    *   @ {$install_linux#NVIDIARequirements$Installing TensorFlow on Ubuntu}
+    *   @ {$install_mac#NVIDIARequirements$Installing TensorFlow on macOS}
 
 ## Clone the TensorFlow repository
 
-Start the process of building TensorFlow by cloning a TensorFlow
-repository.
+Start the process of building TensorFlow by cloning a TensorFlow repository.
 
 To clone **the latest** TensorFlow repository, issue the following command:
 
 <pre>$ <b>git clone https://github.com/tensorflow/tensorflow</b> </pre>
 
-The preceding <code>git clone</code> command creates a subdirectory
-named `tensorflow`.  After cloning, you may optionally build a
-**specific branch** (such as a release branch) by invoking the
-following commands:
+The preceding <code>git clone</code> command creates a subdirectory named
+`tensorflow`. After cloning, you may optionally build a **specific branch**
+(such as a release branch) by invoking the following commands:
 
 <pre>
 $ <b>cd tensorflow</b>
@@ -75,38 +69,34 @@
 
 <pre>$ <b>git checkout r1.0</b></pre>
 
-Next, you must prepare your environment for
-[Linux](#PrepareLinux)
-or
+Next, you must prepare your environment for [Linux](#PrepareLinux) or
 [macOS](#PrepareMac)
 
-
 <a name="PrepareLinux"></a>
+
 ## Prepare environment for Linux
 
-Before building TensorFlow on Linux, install the following build
-tools on your system:
+Before building TensorFlow on Linux, install the following build tools on your
+system:
 
-  * bazel
-  * TensorFlow Python dependencies
-  * optionally, NVIDIA packages to support TensorFlow for GPU.
-
+*   bazel
+*   TensorFlow Python dependencies
+*   optionally, NVIDIA packages to support TensorFlow for GPU.
 
 ### Install Bazel
 
 If bazel is not installed on your system, install it now by following
 [these directions](https://bazel.build/versions/master/docs/install.html).
 
-
 ### Install TensorFlow Python dependencies
 
 To install TensorFlow, you must install the following packages:
 
-  * `numpy`, which is a numerical processing package that TensorFlow requires.
-  * `dev`, which enables adding extensions to Python.
-  * `pip`, which enables you to install and manage certain Python packages.
-  * `wheel`, which enables you to manage Python compressed packages in
-    the wheel (.whl) format.
+*   `numpy`, which is a numerical processing package that TensorFlow requires.
+*   `dev`, which enables adding extensions to Python.
+*   `pip`, which enables you to install and manage certain Python packages.
+*   `wheel`, which enables you to manage Python compressed packages in the wheel
+    (.whl) format.
 
 To install these packages for Python 2.7, issue the following command:
 
@@ -120,68 +110,70 @@
 $ <b>sudo apt-get install python3-numpy python3-dev python3-pip python3-wheel</b>
 </pre>
 
-
 ### Optional: install TensorFlow for GPU prerequisites
 
 If you are building TensorFlow without GPU support, skip this section.
 
-The following NVIDIA <i>hardware</i> must be installed on your system:
+The following NVIDIA® <i>hardware</i> must be installed on your system:
 
-  * GPU card with CUDA Compute Capability 3.0 or higher.  See
-    [NVIDIA documentation](https://developer.nvidia.com/cuda-gpus)
-    for a list of supported GPU cards.
+*   GPU card with CUDA Compute Capability 3.5 or higher. See
+    [NVIDIA documentation](https://developer.nvidia.com/cuda-gpus) for a list of
+    supported GPU cards.
 
-The following NVIDIA <i>software</i> must be installed on your system:
+The following NVIDIA® <i>software</i> must be installed on your system:
 
-  * [CUDA Toolkit](http://nvidia.com/cuda) (>= 8.0). We recommend version 9.0.
-    For details, see
-    [NVIDIA's documentation](http://docs.nvidia.com/cuda/cuda-installation-guide-linux/).
-    Ensure that you append the relevant CUDA pathnames to the
-    `LD_LIBRARY_PATH` environment variable as described in the
-    NVIDIA documentation.
-  * [GPU drivers](http://nvidia.com/driver) supporting your version of the CUDA
-    Toolkit.
-  * [cuDNN SDK](http://developer.nvidia.com/cudnn) (>= 6.0). We recommend version 7.0. For details, see
-    [NVIDIA's documentation](http://docs.nvidia.com/deeplearning/sdk/cudnn-install/).
-  * [CUPTI](http://docs.nvidia.com/cuda/cupti/) ships with the CUDA Toolkit, but
+*   [GPU drivers](http://nvidia.com/driver). CUDA 9.0 requires 384.x or higher.
+*   [CUDA Toolkit](http://nvidia.com/cuda) (>= 8.0). We recommend version 9.0.
+*   [cuDNN SDK](http://developer.nvidia.com/cudnn) (>= 6.0). We recommend
+    version 7.1.x.
+*   [CUPTI](http://docs.nvidia.com/cuda/cupti/) ships with the CUDA Toolkit, but
     you also need to append its path to the `LD_LIBRARY_PATH` environment
-    variable:
+    variable: `export
+    LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/cuda/extras/CUPTI/lib64`
+*   *OPTIONAL*: [NCCL 2.2](https://developer.nvidia.com/nccl) to use TensorFlow
+    with multiple GPUs.
+*   *OPTIONAL*:
+    [TensorRT](http://docs.nvidia.com/deeplearning/sdk/tensorrt-install-guide/index.html)
+    which can improve latency and throughput for inference for some models.
 
-    <pre> $ <b>export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/cuda/extras/CUPTI/lib64</b> </pre>
+While it is possible to install the NVIDIA libraries via `apt-get` from the
+NVIDIA repository, the libraries and headers are installed in locations that
+make it difficult to configure and debug build issues. Downloading and
+installing the libraries manually or using docker
+([latest-devel-gpu](https://hub.docker.com/r/tensorflow/tensorflow/tags/)) is
+recommended.
 
 ### Next
 
 After preparing the environment, you must now
 [configure the installation](#ConfigureInstallation).
 
-
 <a name="PrepareMac"></a>
+
 ## Prepare environment for macOS
 
 Before building TensorFlow, you must install the following on your system:
 
-  * bazel
-  * TensorFlow Python dependencies.
-  * optionally, NVIDIA packages to support TensorFlow for GPU.
-
+*   bazel
+*   TensorFlow Python dependencies.
+*   optionally, NVIDIA packages to support TensorFlow for GPU.
 
 ### Install bazel
 
 If bazel is not installed on your system, install it now by following
 [these directions](https://bazel.build/versions/master/docs/install.html#mac-os-x).
 
-
 ### Install python dependencies
 
 To build TensorFlow, you must install the following packages:
 
-  * six
-  * numpy, which is a numerical processing package that TensorFlow requires.
-  * wheel, which enables you to manage Python compressed packages
-    in the wheel (.whl) format.
+*   six
+*   numpy, which is a numerical processing package that TensorFlow requires.
+*   wheel, which enables you to manage Python compressed packages in the wheel
+    (.whl) format.
 
-You may install the python dependencies using pip. If you don't have pip
-on your machine, we recommend using homebrew to install Python and pip as
+You may install the python dependencies using pip. If you don't have pip on your
+machine, we recommend using homebrew to install Python and pip as
 [documented here](http://docs.python-guide.org/en/latest/starting/install/osx/).
 If you follow these instructions, you will not need to disable SIP.
 
@@ -192,22 +184,23 @@
 Note: These are just the minimum requirements to _build_ tensorflow. Installing
 the pip package will download additional packages required to _run_ it. If you
 plan on executing tasks directly with `bazel` , without the pip installation,
-you may need to install additional python packages. For example, you should
-`pip install mock enum34` before running TensorFlow's tests with bazel.
+you may need to install additional python packages. For example, you should `pip
+install mock enum34` before running TensorFlow's tests with bazel.
 
 <a name="ConfigureInstallation"></a>
+
 ## Configure the installation
 
-The root of the source tree contains a bash script named
-<code>configure</code>. This script asks you to identify the pathname of all
-relevant TensorFlow dependencies and specify other build configuration options
-such as compiler flags. You must run this script *prior* to
-creating the pip package and installing TensorFlow.
+The root of the source tree contains a bash script named <code>configure</code>.
+This script asks you to identify the pathname of all relevant TensorFlow
+dependencies and specify other build configuration options such as compiler
+flags. You must run this script *prior* to creating the pip package and
+installing TensorFlow.
 
-If you wish to build TensorFlow with GPU, `configure` will ask
-you to specify the version numbers of CUDA and cuDNN. If several
-versions of CUDA or cuDNN are installed on your system, explicitly select
-the desired version instead of relying on the default.
+If you wish to build TensorFlow with GPU, `configure` will ask you to specify
+the version numbers of CUDA and cuDNN. If several versions of CUDA or cuDNN are
+installed on your system, explicitly select the desired version instead of
+relying on the default.
 
 One of the questions that `configure` will ask is as follows:
 
@@ -215,17 +208,17 @@
 Please specify optimization flags to use during compilation when bazel option "--config=opt" is specified [Default is -march=native]
 </pre>
 
-This question refers to a later phase in which you'll use bazel to [build the
-pip package](#build-the-pip-package) or the [C/Java libraries](#BuildCorJava).
-We recommend accepting the default (`-march=native`), which will optimize the
-generated code for your local machine's CPU type.  However, if you are building
-TensorFlow on one CPU type but will run TensorFlow on a different CPU type, then
-consider specifying a more specific optimization
-flag as described in [the gcc
-documentation](https://gcc.gnu.org/onlinedocs/gcc-4.5.3/gcc/i386-and-x86_002d64-Options.html).
+This question refers to a later phase in which you'll use bazel to
+[build the pip package](#build-the-pip-package) or the
+[C/Java libraries](#BuildCorJava). We recommend accepting the default
+(`-march=native`), which will optimize the generated code for your local
+machine's CPU type. However, if you are building TensorFlow on one CPU type but
+will run TensorFlow on a different CPU type, then consider specifying a more
+specific optimization flag as described in
+[the gcc documentation](https://gcc.gnu.org/onlinedocs/gcc-4.5.3/gcc/i386-and-x86_002d64-Options.html).
 
-Here is an example execution of the `configure` script.  Note that your
-own input will likely differ from our sample input:
+Here is an example execution of the `configure` script. Note that your own input
+will likely differ from our sample input:
 
 <pre>
 $ <b>cd tensorflow</b>  # cd to the top-level directory created
@@ -262,26 +255,26 @@
 Please specify a list of comma-separated CUDA compute capabilities you want to build with.
 You can find the compute capability of your device at: https://developer.nvidia.com/cuda-gpus.
 Please note that each additional compute capability significantly increases your build time and binary size.
-[Default is: "3.5,5.2"]: <b>3.0</b>
+
 Do you wish to build TensorFlow with MPI support? [y/N]
 MPI support will not be enabled for TensorFlow
 Configuration finished
 </pre>
 
-If you told `configure` to build for GPU support, then `configure`
-will create a canonical set of symbolic links to the CUDA libraries
-on your system.  Therefore, every time you change the CUDA library paths,
-you must rerun the `configure` script before re-invoking
-the <code>bazel build</code> command.
+[Default is: "3.5,7.0"]: <b>6.0,7.0</b>
+
+If you told `configure` to build for GPU support, then `configure` will create a
+canonical set of symbolic links to the CUDA libraries on your system. Therefore,
+every time you change the CUDA library paths, you must rerun the `configure`
+script before re-invoking the <code>bazel build</code> command.
 
 Note the following:
 
-  * Although it is possible to build both CUDA and non-CUDA configs
-    under the same source tree, we recommend running `bazel clean` when
-    switching between these two configurations in the same source tree.
-  * If you don't run the `configure` script *before* running the
-    `bazel build` command, the `bazel build` command will fail.
-
+*   Although it is possible to build both CUDA and non-CUDA configs under the
+    same source tree, we recommend running `bazel clean` when switching between
+    these two configurations in the same source tree.
+*   If you don't run the `configure` script *before* running the `bazel build`
+    command, the `bazel build` command will fail.
 
 ## Build the pip package
 
@@ -297,7 +290,8 @@
 $ bazel build --config=opt //tensorflow/tools/pip_package:build_pip_package
 </pre>
 
-To build a pip package for TensorFlow with CPU-only support for the Intel® MKL-DNN:
+To build a pip package for TensorFlow with CPU-only support for the Intel®
+MKL-DNN:
 
 <pre>
 $ bazel build --config=mkl --config=opt //tensorflow/tools/pip_package:build_pip_package
@@ -311,32 +305,30 @@
 $ bazel build --config=opt --config=cuda //tensorflow/tools/pip_package:build_pip_package
 </pre>
 
-**NOTE on gcc 5 or later:** the binary pip packages available on the
-TensorFlow website are built with gcc 4, which uses the older ABI. To
-make your build compatible with the older ABI, you need to add
-`--cxxopt="-D_GLIBCXX_USE_CXX11_ABI=0"` to your `bazel build` command.
-ABI compatibility allows custom ops built against the TensorFlow pip package
-to continue to work against your built package.
+**NOTE on gcc 5 or later:** the binary pip packages available on the TensorFlow
+website are built with gcc 4, which uses the older ABI. To make your build
+compatible with the older ABI, you need to add
+`--cxxopt="-D_GLIBCXX_USE_CXX11_ABI=0"` to your `bazel build` command. ABI
+compatibility allows custom ops built against the TensorFlow pip package to
+continue to work against your built package.
 
-<b>Tip:</b> By default, building TensorFlow from sources consumes
-a lot of RAM.  If RAM is an issue on your system, you may limit RAM usage
-by specifying <code>--local_resources 2048,.5,1.0</code> while
-invoking `bazel`.
+<b>Tip:</b> By default, building TensorFlow from sources consumes a lot of RAM.
+If RAM is an issue on your system, you may limit RAM usage by specifying
+<code>--local_resources 2048,.5,1.0</code> while invoking `bazel`.
 
-The <code>bazel build</code> command builds a script named
-`build_pip_package`.  Running this script as follows will build
-a `.whl` file within the `/tmp/tensorflow_pkg` directory:
+The <code>bazel build</code> command builds a script named `build_pip_package`.
+Running this script as follows will build a `.whl` file within the
+`/tmp/tensorflow_pkg` directory:
 
 <pre>
 $ <b>bazel-bin/tensorflow/tools/pip_package/build_pip_package /tmp/tensorflow_pkg</b>
 </pre>
 
-
 ## Install the pip package
 
-Invoke `pip install` to install that pip package.
-The filename of the `.whl` file depends on your platform.
-For example, the following command will install the pip package
+Invoke `pip install` to install that pip package. The filename of the `.whl`
+file depends on your platform. For example, the following command will install
+the pip package
 
 for TensorFlow 1.9.0rc0 on Linux:
 
@@ -374,26 +366,29 @@
 
 To learn more, see the [TensorFlow tutorials](../tutorials/).
 
-If the system outputs an error message instead of a greeting, see [Common
-installation problems](#common_installation_problems).
+If the system outputs an error message instead of a greeting, see
+[Common installation problems](#common_installation_problems).
 
 ## Common build and installation problems
 
 The build and installation problems you encounter typically depend on the
-operating system.  See the "Common installation problems" section
-of one of the following guides:
+operating system. See the "Common installation problems" section of one of the
+following guides:
 
-  * @{$install_linux#common_installation_problems$Installing TensorFlow on Linux}
-  * @{$install_mac#common_installation_problems$Installing TensorFlow on Mac OS}
-  * @{$install_windows#common_installation_problems$Installing TensorFlow on Windows}
+*   @
+    {$install_linux#common_installation_problems$Installing TensorFlow on Linux}
+*   @
+    {$install_mac#common_installation_problems$Installing TensorFlow on Mac OS}
+*   @
+    {$install_windows#common_installation_problems$Installing TensorFlow on Windows}
 
-Beyond the errors documented in those two guides, the following table
-notes additional errors specific to building TensorFlow.  Note that we
-are relying on Stack Overflow as the repository for build and installation
-problems.  If you encounter an error message not listed in the preceding
-two guides or in the following table, search for it on Stack Overflow.  If
-Stack Overflow doesn't show the error message, ask a new question on
-Stack Overflow and specify the `tensorflow` tag.
+Beyond the errors documented in those two guides, the following table notes
+additional errors specific to building TensorFlow. Note that we are relying on
+Stack Overflow as the repository for build and installation problems. If you
+encounter an error message not listed in the preceding two guides or in the
+following table, search for it on Stack Overflow. If Stack Overflow doesn't show
+the error message, ask a new question on Stack Overflow and specify the
+`tensorflow` tag.
 
 <table>
 <tr> <th>Stack Overflow Link</th> <th>Error Message</th> </tr>
@@ -440,6 +435,7 @@
 </table>
 
 ## Tested source configurations
+
 **Linux**
 <table>
 <tr><th>Version:</th><th>CPU/GPU:</th><th>Python Version:</th><th>Compiler:</th><th>Build Tools:</th><th>cuDNN:</th><th>CUDA:</th></tr>
@@ -508,6 +504,7 @@
 </table>
 
 <a name="BuildCorJava"></a>
+
 ## Build the C or Java libraries
 
 The instructions above are tailored to building the TensorFlow Python packages.
@@ -516,10 +513,12 @@
 following:
 
 1.  Follow the steps up to [Configure the installation](#ConfigureInstallation)
-2.  Build the C libraries following instructions in the [README](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/tools/lib_package/README.md).
+2.  Build the C libraries following instructions in the
+    [README](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/tools/lib_package/README.md).
 
-If you're interested inv building the libraries for the TensorFlow Java API,
-do the following:
+If you're interested inv building the libraries for the TensorFlow Java API, do
+the following:
 
 1.  Follow the steps up to [Configure the installation](#ConfigureInstallation)
-2.  Build the Java library following instructions in the [README](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/tools/lib_package/README.md).
+2.  Build the Java library following instructions in the
+    [README](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/tools/lib_package/README.md).
diff --git a/tensorflow/docs_src/install/install_windows.md b/tensorflow/docs_src/install/install_windows.md
index 7b7b17c..e9061bf 100644
--- a/tensorflow/docs_src/install/install_windows.md
+++ b/tensorflow/docs_src/install/install_windows.md
@@ -1,4 +1,4 @@
-# Installing TensorFlow on Windows
+# Install TensorFlow on Windows
 
 This guide explains how to install TensorFlow on Windows. Although these
 instructions might also work on other Windows variants, we have only
diff --git a/tensorflow/docs_src/install/migration.md b/tensorflow/docs_src/install/migration.md
index d6c31f9..19315ac 100644
--- a/tensorflow/docs_src/install/migration.md
+++ b/tensorflow/docs_src/install/migration.md
@@ -1,5 +1,4 @@
-
-# Transitioning to TensorFlow 1.0
+# Transition to TensorFlow 1.0
 
 
 The APIs in TensorFlow 1.0 have changed in ways that are not all backwards
diff --git a/tensorflow/docs_src/tutorials/_index.yaml b/tensorflow/docs_src/tutorials/_index.yaml
index 07d561b..c74fe58 100644
--- a/tensorflow/docs_src/tutorials/_index.yaml
+++ b/tensorflow/docs_src/tutorials/_index.yaml
@@ -175,7 +175,7 @@
               <a href="/guide/estimators">Estimators guide</a>.
             </p>
             <ol style="padding-left: 20px;">
-              <li><a href="/guide/premade_estimators">Premade Estimators guide</a></li>
+              <li><a href="/tutorials/estimators/linear">Build a linear model with Estimators</a></li>
               <li><a href="https://github.com/tensorflow/models/tree/master/official/wide_deep" class="external">Wide and deep learning with Estimators</a></li>
               <li><a href="https://github.com/tensorflow/models/tree/master/official/boosted_trees" class="external">Boosted trees</a></li>
               <li><a href="/hub/tutorials/text_classification_with_tf_hub">How to build a simple text classifier with TF-Hub</a></li>
diff --git a/tensorflow/docs_src/tutorials/_toc.yaml b/tensorflow/docs_src/tutorials/_toc.yaml
index 4db97e3..d33869a 100644
--- a/tensorflow/docs_src/tutorials/_toc.yaml
+++ b/tensorflow/docs_src/tutorials/_toc.yaml
@@ -44,6 +44,8 @@
 - title: ML at production scale
   style: accordion
   section:
+  - title: Linear model with Estimators
+    path: /tutorials/estimators/linear
   - title: Wide and deep learning
     path: https://github.com/tensorflow/models/tree/master/official/wide_deep
     status: external
diff --git a/tensorflow/docs_src/tutorials/eager/custom_training_walkthrough.md b/tensorflow/docs_src/tutorials/eager/custom_training_walkthrough.md
index b45fbef..b564a27 100644
--- a/tensorflow/docs_src/tutorials/eager/custom_training_walkthrough.md
+++ b/tensorflow/docs_src/tutorials/eager/custom_training_walkthrough.md
@@ -1,3 +1,3 @@
 # Custom training: walkthrough
 
-[Colab notebook](https://colab.research.google.com/github/tensorflow/models/blob/master/samples/core/get_started/eager.ipynb)
+[Colab notebook](https://colab.research.google.com/github/tensorflow/models/blob/master/samples/core/tutorials/eager/custom_training_walkthrough.ipynb)
diff --git a/tensorflow/docs_src/tutorials/estimators/linear.md b/tensorflow/docs_src/tutorials/estimators/linear.md
new file mode 100644
index 0000000..067a33a
--- /dev/null
+++ b/tensorflow/docs_src/tutorials/estimators/linear.md
@@ -0,0 +1,3 @@
+# Build a linear model with Estimators
+
+[Colab notebook](https://colab.research.google.com/github/tensorflow/models/blob/master/samples/core/tutorials/estimators/linear.ipynb)
diff --git a/tensorflow/docs_src/tutorials/keras/basic_classification.md b/tensorflow/docs_src/tutorials/keras/basic_classification.md
index 91bbd85..e028af9 100644
--- a/tensorflow/docs_src/tutorials/keras/basic_classification.md
+++ b/tensorflow/docs_src/tutorials/keras/basic_classification.md
@@ -1,3 +1,3 @@
 # Basic Classification
 
-[Colab notebook](https://colab.research.google.com/github/tensorflow/models/blob/master/samples/core/get_started/basic_classification.ipynb)
+[Colab notebook](https://colab.research.google.com/github/tensorflow/models/blob/master/samples/core/tutorials/keras/basic_classification.ipynb)
diff --git a/tensorflow/docs_src/tutorials/keras/basic_regression.md b/tensorflow/docs_src/tutorials/keras/basic_regression.md
index a535f22..8721b7a 100644
--- a/tensorflow/docs_src/tutorials/keras/basic_regression.md
+++ b/tensorflow/docs_src/tutorials/keras/basic_regression.md
@@ -1,3 +1,3 @@
 # Basic Regression
 
-[Colab notebook](https://colab.research.google.com/github/tensorflow/models/blob/master/samples/core/get_started/basic_regression.ipynb)
+[Colab notebook](https://colab.research.google.com/github/tensorflow/models/blob/master/samples/core/tutorials/keras/basic_regression.ipynb)
diff --git a/tensorflow/docs_src/tutorials/keras/basic_text_classification.md b/tensorflow/docs_src/tutorials/keras/basic_text_classification.md
index 7c5d4f7..c2a16bd 100644
--- a/tensorflow/docs_src/tutorials/keras/basic_text_classification.md
+++ b/tensorflow/docs_src/tutorials/keras/basic_text_classification.md
@@ -1,3 +1,3 @@
 # Basic Text Classification
 
-[Colab notebook](https://colab.research.google.com/github/tensorflow/models/blob/master/samples/core/get_started/basic_text_classification.ipynb)
+[Colab notebook](https://colab.research.google.com/github/tensorflow/models/blob/master/samples/core/tutorials/keras/basic_text_classification.ipynb)
diff --git a/tensorflow/docs_src/tutorials/keras/overfit_and_underfit.md b/tensorflow/docs_src/tutorials/keras/overfit_and_underfit.md
index e5b5ae7..f07f3ad 100644
--- a/tensorflow/docs_src/tutorials/keras/overfit_and_underfit.md
+++ b/tensorflow/docs_src/tutorials/keras/overfit_and_underfit.md
@@ -1,3 +1,3 @@
 # Overfitting and Underfitting
 
-[Colab notebook](https://colab.research.google.com/github/tensorflow/models/blob/master/samples/core/get_started/overfit_and_underfit.ipynb)
+[Colab notebook](https://colab.research.google.com/github/tensorflow/models/blob/master/samples/core/tutorials/keras/overfit_and_underfit.ipynb)
diff --git a/tensorflow/docs_src/tutorials/keras/save_and_restore_models.md b/tensorflow/docs_src/tutorials/keras/save_and_restore_models.md
index 44b3772..a799b37 100644
--- a/tensorflow/docs_src/tutorials/keras/save_and_restore_models.md
+++ b/tensorflow/docs_src/tutorials/keras/save_and_restore_models.md
@@ -1,3 +1,3 @@
 # Save and restore Models
 
-[Colab notebook](https://colab.research.google.com/github/tensorflow/models/blob/master/samples/core/get_started/save_and_restore_models.ipynb)
+[Colab notebook](https://colab.research.google.com/github/tensorflow/models/blob/master/samples/core/tutorials/keras/save_and_restore_models.ipynb)
diff --git a/tensorflow/java/src/main/java/org/tensorflow/SavedModelBundle.java b/tensorflow/java/src/main/java/org/tensorflow/SavedModelBundle.java
index c8b9126..49594e6 100644
--- a/tensorflow/java/src/main/java/org/tensorflow/SavedModelBundle.java
+++ b/tensorflow/java/src/main/java/org/tensorflow/SavedModelBundle.java
@@ -25,18 +25,86 @@
  * protocol buffer</a>).
  */
 public class SavedModelBundle implements AutoCloseable {
+  /** Options for loading a SavedModel. */
+  public static final class Loader {
+    /** Load a <code>SavedModelBundle</code> with the configured options. */
+    public SavedModelBundle load() {
+      return SavedModelBundle.load(exportDir, tags, configProto, runOptions);
+    }
+
+    /**
+     * Sets options to use when executing model initialization operations.
+     *
+     * @param options Serialized <a
+     *     href="https://www.tensorflow.org/code/tensorflow/core/protobuf/config.proto">RunOptions
+     *     protocol buffer</a>.
+     */
+    public Loader withRunOptions(byte[] options) {
+      this.runOptions = options;
+      return this;
+    }
+
+    /**
+     * Set configuration of the <code>Session</code> object created when loading the model.
+     *
+     * @param configProto Serialized <a
+     *     href="https://www.tensorflow.org/code/tensorflow/core/protobuf/config.proto">ConfigProto
+     *     protocol buffer</a>.
+     */
+    public Loader withConfigProto(byte[] configProto) {
+      this.configProto = configProto;
+      return this;
+    }
+
+    /**
+     * Sets the set of tags that identify the specific graph in the saved model to load.
+     *
+     * @param tags the tags identifying the specific MetaGraphDef to load.
+     */
+    public Loader withTags(String... tags) {
+      this.tags = tags;
+      return this;
+    }
+
+    private Loader(String exportDir) {
+      this.exportDir = exportDir;
+    }
+
+    private String exportDir = null;
+    private String[] tags = null;
+    private byte[] configProto = null;
+    private byte[] runOptions = null;
+  }
 
   /**
    * Load a saved model from an export directory. The model that is being loaded should be created
    * using the <a href="https://www.tensorflow.org/api_docs/python/tf/saved_model">Saved Model
    * API</a>.
    *
+   * <p>This method is a shorthand for:
+   *
+   * <pre>{@code
+   * SavedModelBundler.loader().withTags(tags).load();
+   * }</pre>
+   *
    * @param exportDir the directory path containing a saved model.
    * @param tags the tags identifying the specific metagraphdef to load.
    * @return a bundle containing the graph and associated session.
    */
   public static SavedModelBundle load(String exportDir, String... tags) {
-    return load(exportDir, tags, null);
+    return loader(exportDir).withTags(tags).load();
+  }
+
+  /**
+   * Load a saved model.
+   *
+   * <p/>Returns a <code>Loader</code> object that can set configuration options before actually
+   * loading the model,
+   *
+   * @param exportDir the directory path containing a saved model.
+   */
+  public static Loader loader(String exportDir) {
+    return new Loader(exportDir);
   }
 
   /**
@@ -95,7 +163,8 @@
     return new SavedModelBundle(graph, session, metaGraphDef);
   }
 
-  private static native SavedModelBundle load(String exportDir, String[] tags, byte[] runOptions);
+  private static native SavedModelBundle load(
+      String exportDir, String[] tags, byte[] config, byte[] runOptions);
 
   static {
     TensorFlow.init();
diff --git a/tensorflow/java/src/main/native/saved_model_bundle_jni.cc b/tensorflow/java/src/main/native/saved_model_bundle_jni.cc
index de6382a..68999fb 100644
--- a/tensorflow/java/src/main/native/saved_model_bundle_jni.cc
+++ b/tensorflow/java/src/main/native/saved_model_bundle_jni.cc
@@ -22,12 +22,25 @@
 
 JNIEXPORT jobject JNICALL Java_org_tensorflow_SavedModelBundle_load(
     JNIEnv* env, jclass clazz, jstring export_dir, jobjectArray tags,
-    jbyteArray run_options) {
+    jbyteArray config, jbyteArray run_options) {
   TF_Status* status = TF_NewStatus();
   jobject bundle = nullptr;
 
   // allocate parameters for TF_LoadSessionFromSavedModel
   TF_SessionOptions* opts = TF_NewSessionOptions();
+  if (config != nullptr) {
+    size_t sz = env->GetArrayLength(config);
+    if (sz > 0) {
+      jbyte* config_data = env->GetByteArrayElements(config, nullptr);
+      TF_SetConfig(opts, static_cast<void*>(config_data), sz, status);
+      env->ReleaseByteArrayElements(config, config_data, JNI_ABORT);
+      if (!throwExceptionIfNotOK(env, status)) {
+        TF_DeleteSessionOptions(opts);
+        TF_DeleteStatus(status);
+        return nullptr;
+      }
+    }
+  }
   TF_Buffer* crun_options = nullptr;
   if (run_options != nullptr) {
     size_t sz = env->GetArrayLength(run_options);
diff --git a/tensorflow/java/src/main/native/saved_model_bundle_jni.h b/tensorflow/java/src/main/native/saved_model_bundle_jni.h
index 6cce6a8..a4b05d0 100644
--- a/tensorflow/java/src/main/native/saved_model_bundle_jni.h
+++ b/tensorflow/java/src/main/native/saved_model_bundle_jni.h
@@ -26,10 +26,10 @@
  * Class:     org_tensorflow_SavedModelBundle
  * Method:    load
  * Signature:
- * (Ljava/lang/String;[Ljava/lang/String;[B)Lorg/tensorflow/SavedModelBundle;
+ * (Ljava/lang/String;[Ljava/lang/String;[B;[B)Lorg/tensorflow/SavedModelBundle;
  */
 JNIEXPORT jobject JNICALL Java_org_tensorflow_SavedModelBundle_load(
-    JNIEnv *, jclass, jstring, jobjectArray, jbyteArray);
+    JNIEnv *, jclass, jstring, jobjectArray, jbyteArray, jbyteArray);
 
 #ifdef __cplusplus
 }  // extern "C"
diff --git a/tensorflow/java/src/test/java/org/tensorflow/SavedModelBundleTest.java b/tensorflow/java/src/test/java/org/tensorflow/SavedModelBundleTest.java
index 7922f33..7d93686 100644
--- a/tensorflow/java/src/test/java/org/tensorflow/SavedModelBundleTest.java
+++ b/tensorflow/java/src/test/java/org/tensorflow/SavedModelBundleTest.java
@@ -47,7 +47,61 @@
       fail("not expected");
     } catch (org.tensorflow.TensorFlowException e) {
       // expected exception
-      assertTrue(e.getMessage().contains("SavedModel not found"));
+      assertTrue(e.getMessage().contains("Could not find SavedModel"));
     }
   }
+
+  @Test
+  public void loader() {
+    try (SavedModelBundle bundle = SavedModelBundle.loader(SAVED_MODEL_PATH)
+        .withTags("serve")
+        .withConfigProto(sillyConfigProto())
+        .withRunOptions(sillyRunOptions())
+        .load()) {
+      assertNotNull(bundle.session());
+      assertNotNull(bundle.graph());
+      assertNotNull(bundle.metaGraphDef());
+    }
+  }
+
+  private static byte[] sillyRunOptions() {
+    // Ideally this would use the generated Java sources for protocol buffers
+    // and end up with something like the snippet below. However, generating
+    // the Java files for the .proto files in tensorflow/core:protos_all is
+    // a bit cumbersome in bazel until the proto_library rule is setup.
+    //
+    // See https://github.com/bazelbuild/bazel/issues/52#issuecomment-194341866
+    // https://github.com/bazelbuild/rules_go/pull/121#issuecomment-251515362
+    // https://github.com/bazelbuild/rules_go/pull/121#issuecomment-251692558
+    //
+    // For this test, for now, the use of specific bytes suffices.
+    return new byte[] {0x08, 0x03};
+    /*
+    return org.tensorflow.framework.RunOptions.newBuilder()
+        .setTraceLevel(RunOptions.TraceLevel.FULL_TRACE)
+        .build()
+        .toByteArray();
+    */
+  }
+
+  public static byte[] sillyConfigProto() {
+    // Ideally this would use the generated Java sources for protocol buffers
+    // and end up with something like the snippet below. However, generating
+    // the Java files for the .proto files in tensorflow/core:protos_all is
+    // a bit cumbersome in bazel until the proto_library rule is setup.
+    //
+    // See https://github.com/bazelbuild/bazel/issues/52#issuecomment-194341866
+    // https://github.com/bazelbuild/rules_go/pull/121#issuecomment-251515362
+    // https://github.com/bazelbuild/rules_go/pull/121#issuecomment-251692558
+    //
+    // For this test, for now, the use of specific bytes suffices.
+    return new byte[] {0x10, 0x01, 0x28, 0x01};
+    /*
+    return org.tensorflow.framework.ConfigProto.newBuilder()
+        .setInterOpParallelismThreads(1)
+        .setIntraOpParallelismThreads(1)
+        .build()
+        .toByteArray();
+     */
+  }
 }
diff --git a/tensorflow/python/BUILD b/tensorflow/python/BUILD
index d00debe..51e6d5a 100644
--- a/tensorflow/python/BUILD
+++ b/tensorflow/python/BUILD
@@ -73,7 +73,7 @@
     visibility = [
         "//tensorflow:__pkg__",
         "//tensorflow/python/tools:__pkg__",
-        "//tensorflow/tools/api/generator:__pkg__",
+        "//tensorflow/python/tools/api/generator:__pkg__",
     ],
     deps = [
         ":array_ops",
@@ -705,7 +705,9 @@
         "framework/error_interpolation.py",
     ],
     srcs_version = "PY2AND3",
-    deps = [],
+    deps = [
+        ":util",
+    ],
 )
 
 py_library(
@@ -1040,6 +1042,7 @@
     srcs_version = "PY2AND3",
     deps = [
         ":client_testlib",
+        ":constant_op",
         ":error_interpolation",
     ],
 )
@@ -3334,6 +3337,9 @@
         ],
     ),
     srcs_version = "PY2AND3",
+    visibility = visibility + [
+        "//tensorflow:__pkg__",
+    ],
     deps = [
         "//third_party/py/numpy",
         "@org_python_pypi_backports_weakref",
diff --git a/tensorflow/python/eager/function.py b/tensorflow/python/eager/function.py
index df83d67..a6906f9 100644
--- a/tensorflow/python/eager/function.py
+++ b/tensorflow/python/eager/function.py
@@ -21,6 +21,7 @@
 
 import collections
 import functools
+import threading
 
 import numpy as np
 
@@ -137,7 +138,7 @@
       inputs[i] = self.capture(inp)
     return super(CapturingGraph, self).create_op(
         op_type, inputs, dtypes, input_types, name, attrs, op_def,
-        compute_shapes, compute_device)
+        compute_device=compute_device)
 
 
 # pylint: disable=invalid-name
@@ -469,7 +470,7 @@
 
   def _construct_backprop_function(self):
     """Constructs the backprop function object for this function."""
-    with self._graph.as_default(), context.graph_mode():
+    with self._graph.as_default():
       c_known_ops = set()
       c_captured_tensors = set()
 
@@ -656,55 +657,58 @@
 def _trace_and_define_function(name, func, compiled, args, kwds):
   """Defines and returns graph-mode version of func."""
   graph_key = ops.get_default_graph()._graph_key  # pylint: disable=protected-access
-  with context.graph_mode():
-    captures = {}
-    tmp_graph = CapturingGraph(captures)
-    # Inherit the graph key, since this is used for matching variables in
-    # optimizers.
-    tmp_graph._graph_key = graph_key  # pylint: disable=protected-access
-    # Copy the graph collections to ensure summaries and other things work. This
-    # lets the function access (but not mutate) collections of the containing
-    # graph, such as the global step and the summary writer collections.
-    curr_graph = ops.get_default_graph()
-    for collection in curr_graph.collections:
-      tmp_graph.get_collection_ref(collection)[:] = curr_graph.get_collection(
-          collection)
-    with tmp_graph.as_default(), AutomaticControlDependencies() as a:
-      func_args = _get_defun_inputs(args)
-      func_kwds = _get_defun_inputs(kwds)
+  captures = {}
+  tmp_graph = CapturingGraph(captures)
+  # Inherit the graph key, since this is used for matching variables in
+  # optimizers.
+  tmp_graph._graph_key = graph_key  # pylint: disable=protected-access
+  # Copy the graph collections to ensure summaries and other things work. This
+  # lets the function access (but not mutate) collections of the containing
+  # graph, such as the global step and the summary writer collections.
+  curr_graph = ops.get_default_graph()
+  for collection in curr_graph.collections:
+    tmp_graph.get_collection_ref(collection)[:] = curr_graph.get_collection(
+        collection)
+  if context.executing_eagerly():
+    tmp_graph.seed = context.global_seed()
+  else:
+    tmp_graph.seed = curr_graph.seed
+  with tmp_graph.as_default(), AutomaticControlDependencies() as a:
+    func_args = _get_defun_inputs(args)
+    func_kwds = _get_defun_inputs(kwds)
 
-      def convert(x):
-        if x is None:
-          return None
-        x = ops.convert_to_tensor_or_indexed_slices(x)
-        x = a.mark_as_return(x)
-        return x
+    def convert(x):
+      if x is None:
+        return None
+      x = ops.convert_to_tensor_or_indexed_slices(x)
+      x = a.mark_as_return(x)
+      return x
 
-      this_tape = tape.push_new_tape()
-      try:
-        func_outputs = func(*func_args, **func_kwds)
-        func_outputs = nest.map_structure(convert, func_outputs)
-      finally:
-        tape.pop_tape(this_tape)
-      variables = this_tape.watched_variables()
+    this_tape = tape.push_new_tape()
+    try:
+      func_outputs = func(*func_args, **func_kwds)
+      func_outputs = nest.map_structure(convert, func_outputs)
+    finally:
+      tape.pop_tape(this_tape)
+    variables = this_tape.watched_variables()
 
-      # Returning a closed-over tensor as an output does not trigger a
-      # call to convert_to_tensor, so we manually capture all such tensors.
-      outputs_list = _flatten(func_outputs)
-      func_def_outputs = [
-          tmp_graph.capture(x) for x in outputs_list
-          if x is not None
-      ]
+    # Returning a closed-over tensor as an output does not trigger a
+    # call to convert_to_tensor, so we manually capture all such tensors.
+    outputs_list = _flatten(func_outputs)
+    func_def_outputs = [
+        tmp_graph.capture(x) for x in outputs_list
+        if x is not None
+    ]
 
-      ids = list(sorted(captures.keys()))
-      if ids:
-        extra_inputs, extra_placeholders = zip(* [captures[x] for x in ids])
-      else:
-        extra_inputs = []
-        extra_placeholders = []
-      output_shapes = tuple(
-          x.shape if isinstance(x, ops.Tensor) else None
-          for x in func_def_outputs)
+    ids = list(sorted(captures.keys()))
+    if ids:
+      extra_inputs, extra_placeholders = zip(* [captures[x] for x in ids])
+    else:
+      extra_inputs = []
+      extra_placeholders = []
+    output_shapes = tuple(
+        x.shape if isinstance(x, ops.Tensor) else None
+        for x in func_def_outputs)
 
   func_kwds_values = _deterministic_dict_values(func_kwds)
   flat_inputs = [
@@ -770,6 +774,11 @@
 
   See the documentation for `defun` for more information on the semantics of
   defined functions.
+
+  _PolymorphicFunction class is thread-compatible meaning that minimal
+  usage of defuns (defining and calling) is thread-safe, but if users call other
+  methods or invoke the base `python_function` themselves, external
+  synchronization is necessary.
   """
 
   def __init__(self, python_function, name, compiled=False):
@@ -787,6 +796,8 @@
     self._arguments_to_functions = {}
     self._variables = []
 
+    self._lock = threading.Lock()
+
   def __get__(self, instance, owner):
     """Makes it possible to defun instance methods."""
     del owner
@@ -825,15 +836,16 @@
     # signature so we don't improperly capture tensors such as variables.
     signature += tuple([context.executing_eagerly() or ops.get_default_graph()])
 
-    if signature not in self._arguments_to_functions:
-      graph_function = _trace_and_define_function(
-          self._name, self._python_function, self._compiled, args, kwds)
-      self._arguments_to_functions[signature] = graph_function
-      self._variables.extend(
-          [v for v in graph_function.variables if v not in self._variables])
-      return graph_function, inputs
-    else:
-      return self._arguments_to_functions[signature], inputs
+    with self._lock:
+      if signature not in self._arguments_to_functions:
+        graph_function = _trace_and_define_function(
+            self._name, self._python_function, self._compiled, args, kwds)
+        self._arguments_to_functions[signature] = graph_function
+        self._variables.extend(
+            [v for v in graph_function.variables if v not in self._variables])
+        return graph_function, inputs
+      else:
+        return self._arguments_to_functions[signature], inputs
 
   def __call__(self, *args, **kwds):
     """Calls a graph function specialized for this input signature."""
diff --git a/tensorflow/python/eager/function_test.py b/tensorflow/python/eager/function_test.py
index a3e63c31..13c4ee7 100644
--- a/tensorflow/python/eager/function_test.py
+++ b/tensorflow/python/eager/function_test.py
@@ -30,6 +30,7 @@
 from tensorflow.python.framework import errors
 from tensorflow.python.framework import function as tf_function
 from tensorflow.python.framework import ops
+from tensorflow.python.framework import random_seed
 from tensorflow.python.framework import tensor_shape
 from tensorflow.python.framework import test_util
 from tensorflow.python.layers import convolutional
@@ -39,6 +40,7 @@
 from tensorflow.python.ops import gradients_impl
 from tensorflow.python.ops import init_ops
 from tensorflow.python.ops import math_ops
+from tensorflow.python.ops import random_ops
 from tensorflow.python.ops import resource_variable_ops
 from tensorflow.python.ops import variable_scope
 from tensorflow.python.ops import variables
@@ -136,6 +138,18 @@
     out = sq_op(t)
     self.assertAllEqual(out, math_ops.matmul(t, t).numpy())
 
+  def testRandomSeed(self):
+
+    @function.defun
+    def f():
+      return random_ops.random_normal(())
+
+    random_seed.set_random_seed(1)
+    x = f()
+    self.assertNotEqual(x, f())
+    random_seed.set_random_seed(1)
+    self.assertAllEqual(f(), x)
+
   def testNestedInputsDefunOpGraphMode(self):
     matmul = function.defun(math_ops.matmul)
 
diff --git a/tensorflow/python/eager/pywrap_tensor.cc b/tensorflow/python/eager/pywrap_tensor.cc
index ea60464..cefd5b1 100644
--- a/tensorflow/python/eager/pywrap_tensor.cc
+++ b/tensorflow/python/eager/pywrap_tensor.cc
@@ -620,10 +620,6 @@
     {Py_tp_init, reinterpret_cast<void*>(EagerTensor_init)},
     {0, nullptr},
 };
-
-PyType_Spec EagerTensor_Type_spec = {"EagerTensor", sizeof(EagerTensor), 0,
-                                     Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HEAPTYPE,
-                                     EagerTensor_Type_slots};
 #else
 // TODO(agarwal): support active_trace.
 static PyTypeObject _EagerTensorType = {
@@ -754,6 +750,34 @@
 #if PY_MAJOR_VERSION >= 3
   PyObject* bases = PyTuple_New(1);
   PyTuple_SET_ITEM(bases, 0, base_class);
+
+  tensorflow::Safe_PyObjectPtr base_class_module(
+      PyObject_GetAttrString(base_class, "__module__"));
+  const char* module = nullptr;
+  if (PyErr_Occurred()) {
+    PyErr_Clear();
+    module = "__builtin__";
+  } else {
+    module = PyBytes_AsString(base_class_module.get());
+    if (module == nullptr) {
+      PyErr_Clear();
+      module = PyUnicode_AsUTF8(base_class_module.get());
+      if (module == nullptr) {
+        PyErr_Clear();
+        module = "__builtin__";
+      }
+    }
+  }
+
+  // NOTE: The c_str from this string needs to outlast the function, hence is
+  // static.
+  static tensorflow::string fully_qualified_name =
+      tensorflow::strings::StrCat(module, ".EagerTensor");
+
+  static PyType_Spec EagerTensor_Type_spec = {
+      fully_qualified_name.c_str(), sizeof(EagerTensor), 0,
+      Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HEAPTYPE, EagerTensor_Type_slots};
+
   EagerTensorType = reinterpret_cast<PyTypeObject*>(
       PyType_FromSpecWithBases(&EagerTensor_Type_spec, bases));
   if (PyErr_Occurred()) {
diff --git a/tensorflow/python/eager/pywrap_tfe_src.cc b/tensorflow/python/eager/pywrap_tfe_src.cc
index ec7e237..4d28e98 100644
--- a/tensorflow/python/eager/pywrap_tfe_src.cc
+++ b/tensorflow/python/eager/pywrap_tfe_src.cc
@@ -1173,14 +1173,14 @@
   if (EagerTensor_CheckExact(tensor)) {
     TFE_TensorHandle* t = EagerTensor_Handle(tensor);
     tensorflow::int64 id = EagerTensor_id(tensor);
-    const tensorflow::Tensor* tensor = nullptr;
-    const tensorflow::Status status = t->handle->Tensor(&tensor);
+    tensorflow::TensorShape tensor_shape;
+    const tensorflow::Status status = t->handle->Shape(&tensor_shape);
+
     if (MaybeRaiseExceptionFromStatus(status, nullptr)) {
       return tensorflow::eager::TapeTensor{id, t->handle->dtype,
                                            tensorflow::TensorShape({})};
     } else {
-      return tensorflow::eager::TapeTensor{id, t->handle->dtype,
-                                           tensor->shape()};
+      return tensorflow::eager::TapeTensor{id, t->handle->dtype, tensor_shape};
     }
   }
   tensorflow::int64 id = FastTensorId(tensor);
diff --git a/tensorflow/python/eager/tensor_test.py b/tensorflow/python/eager/tensor_test.py
index 626a4eb..871136e 100644
--- a/tensorflow/python/eager/tensor_test.py
+++ b/tensorflow/python/eager/tensor_test.py
@@ -278,7 +278,7 @@
 
     with self.assertRaisesRegexp(
         TypeError,
-        r"tensors argument must be a list or a tuple. Got \"EagerTensor\""):
+        r"tensors argument must be a list or a tuple. Got.*EagerTensor"):
       pywrap_tensorflow.TFE_Py_TensorShapeSlice(t1, -2)
 
   def testNegativeSliceDim(self):
diff --git a/tensorflow/python/estimator/BUILD b/tensorflow/python/estimator/BUILD
index 8ee38d3..6c415b1 100644
--- a/tensorflow/python/estimator/BUILD
+++ b/tensorflow/python/estimator/BUILD
@@ -707,6 +707,14 @@
 )
 
 py_library(
+    name = "expect_h5py_installed",
+    # This is a dummy rule used as a numpy dependency in open-source.
+    # We expect h5py to already be installed on the system, e.g. via
+    # `pip install h5py'
+    visibility = ["//visibility:public"],
+)
+
+py_library(
     name = "expect_six_installed",
     # This is a dummy rule used as a numpy dependency in open-source.
     # We expect six to already be installed on the system, e.g. via
diff --git a/tensorflow/python/estimator/api/BUILD b/tensorflow/python/estimator/api/BUILD
index ceb9bae..a75fa7d 100644
--- a/tensorflow/python/estimator/api/BUILD
+++ b/tensorflow/python/estimator/api/BUILD
@@ -6,8 +6,8 @@
 
 licenses(["notice"])  # Apache 2.0
 
-load("//tensorflow/tools/api/generator:api_gen.bzl", "gen_api_init_files")
-load("//tensorflow/tools/api/generator:api_gen.bzl", "ESTIMATOR_API_INIT_FILES")
+load("//tensorflow/python/tools/api/generator:api_gen.bzl", "gen_api_init_files")
+load("//tensorflow/python/tools/api/generator:api_gen.bzl", "ESTIMATOR_API_INIT_FILES")
 
 gen_api_init_files(
     name = "estimator_python_api_gen",
diff --git a/tensorflow/python/estimator/canned/boosted_trees.py b/tensorflow/python/estimator/canned/boosted_trees.py
index 3c832c7..3292e27 100644
--- a/tensorflow/python/estimator/canned/boosted_trees.py
+++ b/tensorflow/python/estimator/canned/boosted_trees.py
@@ -17,6 +17,7 @@
 from __future__ import division
 from __future__ import print_function
 
+import abc
 import collections
 import functools
 
@@ -384,6 +385,249 @@
       run_context.request_stop()
 
 
+def _get_max_splits(tree_hparams):
+  """Calculates the max possible number of splits based on tree params."""
+  # maximum number of splits possible in the whole tree =2^(D-1)-1
+  max_splits = (1 << tree_hparams.max_depth) - 1
+  return max_splits
+
+
+class _EnsembleGrower(object):
+  """Abstract base class for different types of ensemble growers.
+
+  Use it to receive training ops for growing and centering bias, depending
+  on the implementation (for example, in memory or accumulator-based
+  distributed):
+    grower = ...create subclass grower(tree_ensemble, tree_hparams)
+    grow_op = grower.grow_tree(stats_summaries_list, feature_ids_list,
+                               last_layer_nodes_range)
+    training_ops.append(grow_op)
+  """
+
+  def __init__(self, tree_ensemble, tree_hparams):
+    """Initializes a grower object.
+
+    Args:
+      tree_ensemble: A TreeEnsemble variable.
+      tree_hparams: TODO. collections.namedtuple for hyper parameters.
+    """
+    self._tree_ensemble = tree_ensemble
+    self._tree_hparams = tree_hparams
+
+  @abc.abstractmethod
+  def center_bias(self, center_bias_var, gradients, hessians):
+    """Centers bias, if ready, based on statistics.
+
+    Args:
+      center_bias_var: A variable that will be updated when bias centering
+        finished.
+      gradients: A rank 2 tensor of gradients.
+      hessians: A rank 2 tensor of hessians.
+
+    Returns:
+      An operation for centering bias.
+    """
+
+  @abc.abstractmethod
+  def grow_tree(self, stats_summaries_list, feature_ids_list,
+                last_layer_nodes_range):
+    """Grows a tree, if ready, based on provided statistics.
+
+    Args:
+      stats_summaries_list: List of stats summary tensors, representing sums of
+        gradients and hessians for each feature bucket.
+      feature_ids_list: a list of lists of feature ids for each bucket size.
+      last_layer_nodes_range: A tensor representing ids of the nodes in the
+        current layer, to be split.
+
+    Returns:
+      An op for growing a tree.
+    """
+
+  #  ============= Helper methods ===========
+
+  def _center_bias_fn(self, center_bias_var, mean_gradients, mean_hessians):
+    """Updates the ensembles and cache (if needed) with logits prior."""
+    continue_centering = boosted_trees_ops.center_bias(
+        self._tree_ensemble.resource_handle,
+        mean_gradients=mean_gradients,
+        mean_hessians=mean_hessians,
+        l1=self._tree_hparams.l1,
+        l2=self._tree_hparams.l2)
+    return center_bias_var.assign(continue_centering)
+
+  def _grow_tree_from_stats_summaries(self, stats_summaries_list,
+                                      feature_ids_list, last_layer_nodes_range):
+    """Updates ensemble based on the best gains from stats summaries."""
+    node_ids_per_feature = []
+    gains_list = []
+    thresholds_list = []
+    left_node_contribs_list = []
+    right_node_contribs_list = []
+    all_feature_ids = []
+    assert len(stats_summaries_list) == len(feature_ids_list)
+
+    max_splits = _get_max_splits(self._tree_hparams)
+
+    for i, feature_ids in enumerate(feature_ids_list):
+      (numeric_node_ids_per_feature, numeric_gains_list,
+       numeric_thresholds_list, numeric_left_node_contribs_list,
+       numeric_right_node_contribs_list) = (
+           boosted_trees_ops.calculate_best_gains_per_feature(
+               node_id_range=last_layer_nodes_range,
+               stats_summary_list=stats_summaries_list[i],
+               l1=self._tree_hparams.l1,
+               l2=self._tree_hparams.l2,
+               tree_complexity=self._tree_hparams.tree_complexity,
+               min_node_weight=self._tree_hparams.min_node_weight,
+               max_splits=max_splits))
+
+      all_feature_ids += feature_ids
+      node_ids_per_feature += numeric_node_ids_per_feature
+      gains_list += numeric_gains_list
+      thresholds_list += numeric_thresholds_list
+      left_node_contribs_list += numeric_left_node_contribs_list
+      right_node_contribs_list += numeric_right_node_contribs_list
+
+    grow_op = boosted_trees_ops.update_ensemble(
+        # Confirm if local_tree_ensemble or tree_ensemble should be used.
+        self._tree_ensemble.resource_handle,
+        feature_ids=all_feature_ids,
+        node_ids=node_ids_per_feature,
+        gains=gains_list,
+        thresholds=thresholds_list,
+        left_node_contribs=left_node_contribs_list,
+        right_node_contribs=right_node_contribs_list,
+        learning_rate=self._tree_hparams.learning_rate,
+        max_depth=self._tree_hparams.max_depth,
+        pruning_mode=boosted_trees_ops.PruningMode.NO_PRUNING)
+    return grow_op
+
+
+class _InMemoryEnsembleGrower(_EnsembleGrower):
+  """A base class for ensemble growers."""
+
+  def __init__(self, tree_ensemble, tree_hparams):
+
+    super(_InMemoryEnsembleGrower, self).__init__(
+        tree_ensemble=tree_ensemble, tree_hparams=tree_hparams)
+
+  def center_bias(self, center_bias_var, gradients, hessians):
+    # For in memory, we already have a full batch of gradients and hessians,
+    # so just take a mean and proceed with centering.
+    mean_gradients = array_ops.expand_dims(
+        math_ops.reduce_mean(gradients, 0), 0)
+    mean_heassians = array_ops.expand_dims(math_ops.reduce_mean(hessians, 0), 0)
+    return self._center_bias_fn(center_bias_var, mean_gradients, mean_heassians)
+
+  def grow_tree(self, stats_summaries_list, feature_ids_list,
+                last_layer_nodes_range):
+    # For in memory, we already have full data in one batch, so we can grow the
+    # tree immediately.
+    return self._grow_tree_from_stats_summaries(
+        stats_summaries_list, feature_ids_list, last_layer_nodes_range)
+
+
+class _AccumulatorEnsembleGrower(_EnsembleGrower):
+  """A base class for ensemble growers."""
+
+  def __init__(self, tree_ensemble, tree_hparams, stamp_token,
+               n_batches_per_layer, bucket_size_list, is_chief):
+    super(_AccumulatorEnsembleGrower, self).__init__(
+        tree_ensemble=tree_ensemble, tree_hparams=tree_hparams)
+    self._stamp_token = stamp_token
+    self._n_batches_per_layer = n_batches_per_layer
+    self._bucket_size_list = bucket_size_list
+    self._is_chief = is_chief
+
+  def center_bias(self, center_bias_var, gradients, hessians):
+    # For not in memory situation, we need to accumulate enough of batches first
+    # before proceeding with centering bias.
+
+    # Create an accumulator.
+    bias_dependencies = []
+    bias_accumulator = data_flow_ops.ConditionalAccumulator(
+        dtype=dtypes.float32,
+        # The stats consist of grads and hessians means only.
+        # TODO(nponomareva): this will change for a multiclass
+        shape=[2, 1],
+        shared_name='bias_accumulator')
+
+    grads_and_hess = array_ops.stack([gradients, hessians], axis=0)
+    grads_and_hess = math_ops.reduce_mean(grads_and_hess, axis=1)
+
+    apply_grad = bias_accumulator.apply_grad(grads_and_hess, self._stamp_token)
+    bias_dependencies.append(apply_grad)
+
+    # Center bias if enough batches were processed.
+    with ops.control_dependencies(bias_dependencies):
+      if not self._is_chief:
+        return control_flow_ops.no_op()
+
+      def center_bias_from_accumulator():
+        accumulated = array_ops.unstack(bias_accumulator.take_grad(1), axis=0)
+        return self._center_bias_fn(center_bias_var,
+                                    array_ops.expand_dims(accumulated[0], 0),
+                                    array_ops.expand_dims(accumulated[1], 0))
+
+      center_bias_op = control_flow_ops.cond(
+          math_ops.greater_equal(bias_accumulator.num_accumulated(),
+                                 self._n_batches_per_layer),
+          center_bias_from_accumulator,
+          control_flow_ops.no_op,
+          name='wait_until_n_batches_for_bias_accumulated')
+      return center_bias_op
+
+  def grow_tree(self, stats_summaries_list, feature_ids_list,
+                last_layer_nodes_range):
+    # For not in memory situation, we need to accumulate enough of batches first
+    # before proceeding with building a tree layer.
+    max_splits = _get_max_splits(self._tree_hparams)
+
+    # Prepare accumulators.
+    accumulators = []
+    dependencies = []
+    for i, feature_ids in enumerate(feature_ids_list):
+      stats_summaries = stats_summaries_list[i]
+      accumulator = data_flow_ops.ConditionalAccumulator(
+          dtype=dtypes.float32,
+          # The stats consist of grads and hessians (the last dimension).
+          shape=[len(feature_ids), max_splits, self._bucket_size_list[i], 2],
+          shared_name='numeric_stats_summary_accumulator_' + str(i))
+      accumulators.append(accumulator)
+
+      apply_grad = accumulator.apply_grad(
+          array_ops.stack(stats_summaries, axis=0), self._stamp_token)
+      dependencies.append(apply_grad)
+
+    # Grow the tree if enough batches is accumulated.
+    with ops.control_dependencies(dependencies):
+      if not self._is_chief:
+        return control_flow_ops.no_op()
+
+      min_accumulated = math_ops.reduce_min(
+          array_ops.stack([acc.num_accumulated() for acc in accumulators]))
+
+      def grow_tree_from_accumulated_summaries_fn():
+        """Updates tree with the best layer from accumulated summaries."""
+        # Take out the accumulated summaries from the accumulator and grow.
+        stats_summaries_list = []
+        stats_summaries_list = [
+            array_ops.unstack(accumulator.take_grad(1), axis=0)
+            for accumulator in accumulators
+        ]
+        grow_op = self._grow_tree_from_stats_summaries(
+            stats_summaries_list, feature_ids_list, last_layer_nodes_range)
+        return grow_op
+
+      grow_model = control_flow_ops.cond(
+          math_ops.greater_equal(min_accumulated, self._n_batches_per_layer),
+          grow_tree_from_accumulated_summaries_fn,
+          control_flow_ops.no_op,
+          name='wait_until_n_batches_accumulated')
+      return grow_model
+
+
 def _bt_model_fn(
     features,
     labels,
@@ -441,11 +685,6 @@
       raise ValueError('train_in_memory is supported only for '
                        'non-distributed training.')
   worker_device = control_flow_ops.no_op().device
-  # maximum number of splits possible in the whole tree =2^(D-1)-1
-  # TODO(youngheek): perhaps storage could be optimized by storing stats with
-  # the dimension max_splits_per_layer, instead of max_splits (for the entire
-  # tree).
-  max_splits = (1 << tree_hparams.max_depth) - 1
   train_op = []
   with ops.name_scope(name) as name:
     # Prepare.
@@ -543,6 +782,11 @@
         hessians = gradients_impl.gradients(
             gradients, logits, name='Hessians')[0]
 
+      # TODO(youngheek): perhaps storage could be optimized by storing stats
+      # with the dimension max_splits_per_layer, instead of max_splits (for the
+      # entire tree).
+      max_splits = _get_max_splits(tree_hparams)
+
       stats_summaries_list = []
       for i, feature_ids in enumerate(feature_ids_list):
         num_buckets = bucket_size_list[i]
@@ -559,173 +803,28 @@
         ]
         stats_summaries_list.append(summaries)
 
-      # ========= Helper methods for both in and not in memory. ==============
-      def grow_tree_from_stats_summaries(stats_summaries_list,
-                                         feature_ids_list):
-        """Updates ensemble based on the best gains from stats summaries."""
-        node_ids_per_feature = []
-        gains_list = []
-        thresholds_list = []
-        left_node_contribs_list = []
-        right_node_contribs_list = []
-        all_feature_ids = []
-
-        assert len(stats_summaries_list) == len(feature_ids_list)
-
-        for i, feature_ids in enumerate(feature_ids_list):
-          (numeric_node_ids_per_feature, numeric_gains_list,
-           numeric_thresholds_list, numeric_left_node_contribs_list,
-           numeric_right_node_contribs_list) = (
-               boosted_trees_ops.calculate_best_gains_per_feature(
-                   node_id_range=last_layer_nodes_range,
-                   stats_summary_list=stats_summaries_list[i],
-                   l1=tree_hparams.l1,
-                   l2=tree_hparams.l2,
-                   tree_complexity=tree_hparams.tree_complexity,
-                   min_node_weight=tree_hparams.min_node_weight,
-                   max_splits=max_splits))
-
-          all_feature_ids += feature_ids
-          node_ids_per_feature += numeric_node_ids_per_feature
-          gains_list += numeric_gains_list
-          thresholds_list += numeric_thresholds_list
-          left_node_contribs_list += numeric_left_node_contribs_list
-          right_node_contribs_list += numeric_right_node_contribs_list
-
-        grow_op = boosted_trees_ops.update_ensemble(
-            # Confirm if local_tree_ensemble or tree_ensemble should be used.
-            tree_ensemble.resource_handle,
-            feature_ids=all_feature_ids,
-            node_ids=node_ids_per_feature,
-            gains=gains_list,
-            thresholds=thresholds_list,
-            left_node_contribs=left_node_contribs_list,
-            right_node_contribs=right_node_contribs_list,
-            learning_rate=tree_hparams.learning_rate,
-            max_depth=tree_hparams.max_depth,
-            pruning_mode=boosted_trees_ops.PruningMode.NO_PRUNING)
-        return grow_op
-
-      def _center_bias_fn(mean_gradients, mean_hessians):
-        """Updates the ensembles and cache (if needed) with logits prior."""
-        continue_centering = boosted_trees_ops.center_bias(
-            tree_ensemble.resource_handle,
-            mean_gradients=mean_gradients,
-            mean_hessians=mean_hessians,
-            l1=tree_hparams.l1,
-            l2=tree_hparams.l2
-        )
-        return center_bias_var.assign(continue_centering)
-
-      # ========= End of helper methods. ==============
-
       if train_in_memory and is_single_machine:
-        train_op.append(distribute_lib.increment_var(global_step))
-
-        mean_gradients = array_ops.expand_dims(
-            math_ops.reduce_mean(gradients, 0), 0)
-        mean_heassians = array_ops.expand_dims(
-            math_ops.reduce_mean(hessians, 0), 0)
-
-        train_op.append(
-            control_flow_ops.cond(
-                center_bias_var,
-                lambda: _center_bias_fn(mean_gradients, mean_heassians),
-                functools.partial(grow_tree_from_stats_summaries,
-                                  stats_summaries_list, feature_ids_list)))
+        grower = _InMemoryEnsembleGrower(tree_ensemble, tree_hparams)
       else:
+        grower = _AccumulatorEnsembleGrower(tree_ensemble, tree_hparams,
+                                            stamp_token, n_batches_per_layer,
+                                            bucket_size_list, config.is_chief)
 
-        def center_bias_not_in_mem():
-          """Accumulates the data and updates the logits bias, when ready."""
-          bias_dependencies = []
+      update_model = control_flow_ops.cond(
+          center_bias_var,
+          functools.partial(
+              grower.center_bias,
+              center_bias_var,
+              gradients,
+              hessians,
+          ),
+          functools.partial(grower.grow_tree, stats_summaries_list,
+                            feature_ids_list, last_layer_nodes_range))
+      train_op.append(update_model)
 
-          bias_accumulator = data_flow_ops.ConditionalAccumulator(
-              dtype=dtypes.float32,
-              # The stats consist of grads and hessians means only.
-              # TODO(nponomareva): this will change for a multiclass
-              shape=[2, 1],
-              shared_name='bias_accumulator')
-
-          grads_and_hess = array_ops.stack([gradients, hessians], axis=0)
-          grads_and_hess = math_ops.reduce_mean(grads_and_hess, axis=1)
-
-          apply_grad = bias_accumulator.apply_grad(grads_and_hess, stamp_token)
-          bias_dependencies.append(apply_grad)
-
-          def center_bias_from_accumulator():
-            accumulated = array_ops.unstack(
-                bias_accumulator.take_grad(1), axis=0)
-            return _center_bias_fn(
-                array_ops.expand_dims(accumulated[0], 0),
-                array_ops.expand_dims(accumulated[1], 0))
-
-          with ops.control_dependencies(bias_dependencies):
-            if config.is_chief:
-              center_bias_op = control_flow_ops.cond(
-                  math_ops.greater_equal(bias_accumulator.num_accumulated(),
-                                         n_batches_per_layer),
-                  center_bias_from_accumulator,
-                  control_flow_ops.no_op,
-                  name='wait_until_n_batches_for_bias_accumulated')
-
-              return center_bias_op
-            else:
-              return control_flow_ops.no_op()
-
-        def grow_not_in_mem():
-          """Accumulates the data and grows a layer when ready."""
-
-          accumulators = []
-          dependencies = []
-          for i, feature_ids in enumerate(feature_ids_list):
-            stats_summaries = stats_summaries_list[i]
-            accumulator = data_flow_ops.ConditionalAccumulator(
-                dtype=dtypes.float32,
-                # The stats consist of grads and hessians (the last dimension).
-                shape=[len(feature_ids), max_splits, bucket_size_list[i], 2],
-                shared_name='numeric_stats_summary_accumulator_' + str(i))
-            accumulators.append(accumulator)
-
-            apply_grad = accumulator.apply_grad(
-                array_ops.stack(stats_summaries, axis=0), stamp_token)
-            dependencies.append(apply_grad)
-
-          def grow_tree_from_accumulated_summaries_fn():
-            """Updates tree with the best layer from accumulated summaries."""
-            # Take out the accumulated summaries from the accumulator and grow.
-            stats_summaries_list = []
-
-            stats_summaries_list = [
-                array_ops.unstack(accumulator.take_grad(1), axis=0)
-                for accumulator in accumulators
-            ]
-
-            grow_op = grow_tree_from_stats_summaries(stats_summaries_list,
-                                                     feature_ids_list)
-            return grow_op
-
-          with ops.control_dependencies(dependencies):
-            if config.is_chief:
-              min_accumulated = math_ops.reduce_min(
-                  array_ops.stack(
-                      [acc.num_accumulated() for acc in accumulators]))
-
-              grow_model = control_flow_ops.cond(
-                  math_ops.greater_equal(min_accumulated, n_batches_per_layer),
-                  grow_tree_from_accumulated_summaries_fn,
-                  control_flow_ops.no_op,
-                  name='wait_until_n_batches_accumulated')
-
-              return grow_model
-            else:
-              return control_flow_ops.no_op()
-
-        update_model = control_flow_ops.cond(
-            center_bias_var, center_bias_not_in_mem, grow_not_in_mem)
-        train_op.append(update_model)
-        with ops.control_dependencies([update_model]):
-          increment_global = distribute_lib.increment_var(global_step)
-          train_op.append(increment_global)
+      with ops.control_dependencies([update_model]):
+        increment_global = distribute_lib.increment_var(global_step)
+        train_op.append(increment_global)
 
       return control_flow_ops.group(train_op, name='train_op')
 
diff --git a/tensorflow/python/estimator/estimator.py b/tensorflow/python/estimator/estimator.py
index 350a95e..253716b 100644
--- a/tensorflow/python/estimator/estimator.py
+++ b/tensorflow/python/estimator/estimator.py
@@ -576,7 +576,9 @@
     allowed_overrides = set([
         '_call_input_fn', '_create_global_step',
         '_convert_train_steps_to_hooks', '_convert_eval_steps_to_hooks',
-        '_tf_api_names', '_estimator_api_names', '_estimator_api_constants',
+        '_tf_api_names', '_tf_api_names_v1', '_estimator_api_names',
+        '_estimator_api_names_v1', '_estimator_api_constants',
+        '_estimator_api_constants_v1',
         '_validate_features_in_predict_input',
         '_call_model_fn', '_add_meta_graph_for_mode'
     ])
diff --git a/tensorflow/python/estimator/estimator_test.py b/tensorflow/python/estimator/estimator_test.py
index 2a0e4e7..495d019 100644
--- a/tensorflow/python/estimator/estimator_test.py
+++ b/tensorflow/python/estimator/estimator_test.py
@@ -2304,6 +2304,43 @@
     with self.assertRaisesRegexp(ValueError, err_regex):
       est._export_all_saved_models(export_dir_base, input_receiver_fn_map)
 
+  def test_export_all_saved_models_metric_operation(self):
+    """Ensures metrics ops.Operations can be expoerted (b/109740581)."""
+
+    def _model_fn(features, labels, mode):
+      del features, labels  # Unused
+      metrics = {'metrics': (constant_op.constant([0]),
+                             control_flow_ops.no_op())}
+      return model_fn_lib.EstimatorSpec(
+          mode,
+          predictions=constant_op.constant(10.),
+          loss=constant_op.constant(1.),
+          train_op=state_ops.assign_add(training.get_global_step(), 1),
+          eval_metric_ops=metrics)
+
+    tmpdir = tempfile.mkdtemp()
+    est = estimator.Estimator(model_fn=_model_fn)
+    est.train(input_fn=dummy_input_fn, steps=1)
+
+    # Perform the export.
+    export_dir_base = os.path.join(
+        compat.as_bytes(tmpdir), compat.as_bytes('metric_operation_export'))
+
+    input_receiver_fn_map = {
+        model_fn_lib.ModeKeys.EVAL: _get_supervised_input_receiver_fn()}
+
+    export_dir = est._export_all_saved_models(
+        export_dir_base, input_receiver_fn_map)
+
+    # Restore, to validate that the export was well-formed.
+    with ops.Graph().as_default() as graph:
+      with session.Session(graph=graph) as sess:
+        meta_graph = loader.load(sess, [tag_constants.EVAL], export_dir)
+        sig_outputs = meta_graph.signature_def[
+            model_fn_lib.ModeKeys.EVAL].outputs
+        self.assertEqual(
+            sig_outputs['metrics/update_op'].name, 'metric_op_wrapper:0')
+
   def test_export_savedmodel_with_saveables_proto_roundtrip(self):
     tmpdir = tempfile.mkdtemp()
     est = estimator.Estimator(
diff --git a/tensorflow/python/estimator/export/export_output.py b/tensorflow/python/estimator/export/export_output.py
index 6c26d29..20382a5 100644
--- a/tensorflow/python/estimator/export/export_output.py
+++ b/tensorflow/python/estimator/export/export_output.py
@@ -23,6 +23,7 @@
 import six
 
 
+from tensorflow.python.framework import constant_op
 from tensorflow.python.framework import dtypes
 from tensorflow.python.framework import ops
 from tensorflow.python.saved_model import signature_def_utils
@@ -338,8 +339,16 @@
         raise ValueError(
             '{} update_op must be a Tensor or Operation; got {}.'.format(
                 key, metric_op))
+
+      # We must wrap any ops in a Tensor before export, as the SignatureDef
+      # proto expects tensors only. See b/109740581
+      metric_op_tensor = metric_op
+      if isinstance(metric_op, ops.Operation):
+        with ops.control_dependencies([metric_op]):
+          metric_op_tensor = constant_op.constant([], name='metric_op_wrapper')
+
       outputs[val_name] = metric_val
-      outputs[op_name] = metric_op
+      outputs[op_name] = metric_op_tensor
 
     return outputs
 
diff --git a/tensorflow/python/estimator/export/export_output_test.py b/tensorflow/python/estimator/export/export_output_test.py
index b21ba91..d94c764 100644
--- a/tensorflow/python/estimator/export/export_output_test.py
+++ b/tensorflow/python/estimator/export/export_output_test.py
@@ -24,8 +24,10 @@
 from tensorflow.python.estimator.export import export_output as export_output_lib
 from tensorflow.python.framework import constant_op
 from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import ops
 from tensorflow.python.framework import sparse_tensor
 from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import control_flow_ops
 from tensorflow.python.platform import test
 from tensorflow.python.saved_model import signature_constants
 
@@ -335,5 +337,18 @@
     self.assertTrue("predictions/output1" in sig_def.outputs)
     self.assertTrue("features" in sig_def.inputs)
 
+  def test_metric_op_is_operation(self):
+    """Tests that ops.Operation is wrapped by a tensor for metric_ops."""
+    loss = {"my_loss": constant_op.constant([0])}
+    predictions = {u"output1": constant_op.constant(["foo"])}
+    metrics = {"metrics": (constant_op.constant([0]), control_flow_ops.no_op())}
+
+    outputter = MockSupervisedOutput(loss, predictions, metrics)
+    self.assertEqual(outputter.metrics["metrics/value"], metrics["metrics"][0])
+    self.assertEqual(
+        outputter.metrics["metrics/update_op"].name, "metric_op_wrapper:0")
+    self.assertTrue(
+        isinstance(outputter.metrics["metrics/update_op"], ops.Tensor))
+
 if __name__ == "__main__":
   test.main()
diff --git a/tensorflow/python/estimator/keras_test.py b/tensorflow/python/estimator/keras_test.py
index 7a4457f..7a3c5a9 100644
--- a/tensorflow/python/estimator/keras_test.py
+++ b/tensorflow/python/estimator/keras_test.py
@@ -32,7 +32,6 @@
 from tensorflow.python.framework import ops
 from tensorflow.python.framework import test_util
 from tensorflow.python.keras import testing_utils
-from tensorflow.python.keras.applications import mobilenet
 from tensorflow.python.keras.optimizers import SGD
 from tensorflow.python.ops.parsing_ops import gen_parsing_ops
 from tensorflow.python.platform import gfile
@@ -60,9 +59,9 @@
   return model
 
 
-def simple_functional_model():
+def simple_functional_model(activation='relu'):
   a = keras.layers.Input(shape=_INPUT_SIZE)
-  b = keras.layers.Dense(16, activation='relu')(a)
+  b = keras.layers.Dense(16, activation=activation)(a)
   b = keras.layers.Dropout(0.1)(b)
   b = keras.layers.Dense(_NUM_CLASS, activation='softmax')(b)
   model = keras.models.Model(inputs=[a], outputs=[b])
@@ -474,21 +473,25 @@
         est_keras.train(input_fn=invald_output_name_input_fn, steps=100)
 
   def test_custom_objects(self):
-    keras_mobile = mobilenet.MobileNet(weights=None)
-    keras_mobile.compile(loss='categorical_crossentropy', optimizer='adam')
+    
+    def relu6(x):
+      return keras.backend.relu(x, max_value=6)
+    
+    keras_model = simple_functional_model(activation=relu6)
+    keras_model.compile(loss='categorical_crossentropy', optimizer='adam')
     custom_objects = {
-        'relu6': mobilenet.relu6,
-        'DepthwiseConv2D': mobilenet.DepthwiseConv2D
+        'relu6': relu6
     }
+
     with self.assertRaisesRegexp(ValueError, 'relu6'):
       with self.test_session():
         keras_lib.model_to_estimator(
-            keras_model=keras_mobile,
+            keras_model=keras_model,
             model_dir=tempfile.mkdtemp(dir=self._base_dir))
 
     with self.test_session():
       keras_lib.model_to_estimator(
-          keras_model=keras_mobile,
+          keras_model=keras_model,
           model_dir=tempfile.mkdtemp(dir=self._base_dir),
           custom_objects=custom_objects)
 
diff --git a/tensorflow/python/estimator/run_config.py b/tensorflow/python/estimator/run_config.py
index aa594af..b495c48 100644
--- a/tensorflow/python/estimator/run_config.py
+++ b/tensorflow/python/estimator/run_config.py
@@ -48,7 +48,8 @@
     'keep_checkpoint_every_n_hours',
     'log_step_count_steps',
     'train_distribute',
-    'device_fn'
+    'device_fn',
+    'protocol'
 ]
 
 _SAVE_CKPT_ERR = (
@@ -288,6 +289,10 @@
             message='device_fn must be callable with exactly'
                     ' one argument "op".')
 
+  _validate('protocol',
+            lambda protocol: protocol in (None, "grpc", "grpc+verbs"),
+            message='protocol should be grpc or grpc+verbs')
+
 
 class TaskType(object):
   MASTER = 'master'
@@ -312,7 +317,8 @@
                keep_checkpoint_every_n_hours=10000,
                log_step_count_steps=100,
                train_distribute=None,
-               device_fn=None):
+               device_fn=None,
+               protocol=None):
     """Constructs a RunConfig.
 
     All distributed training related properties `cluster_spec`, `is_chief`,
@@ -436,7 +442,7 @@
         the feature.
       log_step_count_steps: The frequency, in number of global steps, that the
         global step/sec and the loss will be logged during training.
-      train_distribute: an optional instance of
+      train_distribute: An optional instance of
         `tf.contrib.distribute.DistributionStrategy`. If specified,
         then Estimator will distribute the user's model during training,
         according to the policy specified by that strategy.
@@ -444,6 +450,8 @@
         `Operation` and returns the device string. If `None`, defaults to
         the device function returned by `tf.train.replica_device_setter`
         with round-robin strategy.
+      protocol: An optional argument which specifies the protocol used when
+        starting server. None means default to grpc.
 
     Raises:
       ValueError: If both `save_checkpoints_steps` and `save_checkpoints_secs`
@@ -481,7 +489,8 @@
         keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours,
         log_step_count_steps=log_step_count_steps,
         train_distribute=train_distribute,
-        device_fn=device_fn)
+        device_fn=device_fn,
+        protocol=protocol)
 
     self._init_distributed_setting_from_environment_var(tf_config)
 
@@ -754,6 +763,11 @@
     """
     return self._train_distribute
 
+  @property
+  def protocol(self):
+    """Returns the optional protocol value."""
+    return self._protocol
+
   def replace(self, **kwargs):
     """Returns a new instance of `RunConfig` replacing specified properties.
 
@@ -769,7 +783,8 @@
       - `keep_checkpoint_every_n_hours`,
       - `log_step_count_steps`,
       - `train_distribute`,
-      - `device_fn`.
+      - `device_fn`,
+      - `protocol`.
 
     In addition, either `save_checkpoints_steps` or `save_checkpoints_secs`
     can be set (should not be both).
diff --git a/tensorflow/python/estimator/training.py b/tensorflow/python/estimator/training.py
index f5ac79c..a01b230 100644
--- a/tensorflow/python/estimator/training.py
+++ b/tensorflow/python/estimator/training.py
@@ -732,7 +732,8 @@
         job_name=config.task_type,
         task_index=config.task_id,
         config=session_config,
-        start=False)
+        start=False,
+        protocol=config.protocol)
     server.start()
     return server
 
diff --git a/tensorflow/python/estimator/training_test.py b/tensorflow/python/estimator/training_test.py
index 6bee7cb..dc106c7 100644
--- a/tensorflow/python/estimator/training_test.py
+++ b/tensorflow/python/estimator/training_test.py
@@ -472,6 +472,7 @@
         job_name=mock_est.config.task_type,
         task_index=mock_est.config.task_id,
         config=test.mock.ANY,
+        protocol=None,
         start=False)
 
     self.assertTrue(mock_server_instance.start.called)
@@ -502,6 +503,7 @@
         job_name=mock_est.config.task_type,
         task_index=mock_est.config.task_id,
         config=test.mock.ANY,
+        protocol=None,
         start=False)
 
     self.assertTrue(mock_server_instance.start.called)
@@ -729,6 +731,7 @@
         job_name=mock_est.config.task_type,
         task_index=mock_est.config.task_id,
         config=test.mock.ANY,
+        protocol=None,
         start=False)
 
     self.assertTrue(mock_server_instance.start.called)
@@ -1481,6 +1484,7 @@
         job_name=mock_est.config.task_type,
         task_index=mock_est.config.task_id,
         config=test.mock.ANY,
+        protocol=None,
         start=False)
 
     self.assertTrue(mock_server_instance.start.called)
diff --git a/tensorflow/python/framework/error_interpolation.py b/tensorflow/python/framework/error_interpolation.py
index 9ccae76..72d5dc9 100644
--- a/tensorflow/python/framework/error_interpolation.py
+++ b/tensorflow/python/framework/error_interpolation.py
@@ -24,11 +24,15 @@
 
 import collections
 import itertools
+import os
 import re
 import string
 
 import six
 
+from tensorflow.python.util import tf_stack
+
+
 _NAME_REGEX = r"[A-Za-z0-9.][A-Za-z0-9_.\-/]*?"
 _FORMAT_REGEX = r"[A-Za-z0-9_.\-/${}:]+"
 _TAG_REGEX = r"\^\^({name}):({name}):({fmt})\^\^".format(
@@ -38,6 +42,11 @@
 
 _ParseTag = collections.namedtuple("_ParseTag", ["type", "name", "format"])
 
+_BAD_FILE_SUBSTRINGS = [
+    os.path.join("tensorflow", "python"),
+    "<embedded",
+]
+
 
 def _parse_message(message):
   """Parses the message.
@@ -48,6 +57,10 @@
   "123^^node:Foo:${file}^^456^^node:Bar:${line}^^789", there are two tags and
   three separators. The separators are the numeric characters.
 
+  Supported tags after node:<node_name>
+    file: Replaced with the filename in which the node was defined.
+    line: Replaced by the line number at which the node was defined.
+
   Args:
     message: String to parse
 
@@ -72,9 +85,47 @@
   return seps, tags
 
 
-# TODO(jtkeeling): Modify to actually interpolate format strings rather than
-# echoing them.
-def interpolate(error_message):
+def _get_field_dict_from_traceback(tf_traceback, frame_index):
+  """Convert traceback elements into interpolation dictionary and return."""
+  frame = tf_traceback[frame_index]
+  return {
+      "file": frame[tf_stack.TB_FILENAME],
+      "line": frame[tf_stack.TB_LINENO],
+  }
+
+
+def _find_index_of_defining_frame_for_op(op):
+  """Return index in op._traceback with first 'useful' frame.
+
+  This method reads through the stack stored in op._traceback looking for the
+  innermost frame which (hopefully) belongs to the caller.  It accomplishes this
+  by rejecting frames whose filename appears to come from TensorFlow (see
+  error_interpolation._BAD_FILE_SUBSTRINGS for the list of rejected substrings).
+
+  Args:
+    op: the Operation object for which we would like to find the defining
+        location.
+
+  Returns:
+    Integer index into op._traceback where the first non-TF file was found
+    (innermost to outermost), or 0 (for the outermost stack frame) if all files
+    came from TensorFlow.
+  """
+  # pylint: disable=protected-access
+  # Index 0 of tf_traceback is the outermost frame.
+  tf_traceback = tf_stack.convert_stack(op._traceback)
+  size = len(tf_traceback)
+  # pylint: enable=protected-access
+  filenames = [frame[tf_stack.TB_FILENAME] for frame in tf_traceback]
+  # We process the filenames from the innermost frame to outermost.
+  for idx, filename in enumerate(reversed(filenames)):
+    contains_bad_substrings = [ss in filename for ss in _BAD_FILE_SUBSTRINGS]
+    if not any(contains_bad_substrings):
+      return size - idx - 1
+  return 0
+
+
+def interpolate(error_message, graph):
   """Interpolates an error message.
 
   The error message can contain tags of the form ^^type:name:format^^ which will
@@ -82,11 +133,38 @@
 
   Args:
     error_message: A string to interpolate.
+    graph: ops.Graph object containing all nodes referenced in the error
+        message.
 
   Returns:
     The string with tags of the form ^^type:name:format^^ interpolated.
   """
   seps, tags = _parse_message(error_message)
-  subs = [string.Template(tag.format).safe_substitute({}) for tag in tags]
+
+  node_name_to_substitution_dict = {}
+  for name in [t.name for t in tags]:
+    try:
+      op = graph.get_operation_by_name(name)
+    except KeyError:
+      op = None
+
+    if op:
+      frame_index = _find_index_of_defining_frame_for_op(op)
+      # pylint: disable=protected-access
+      field_dict = _get_field_dict_from_traceback(op._traceback, frame_index)
+      # pylint: enable=protected-access
+    else:
+      field_dict = {
+          "file": "<NA>",
+          "line": "<NA>",
+          "func": "<NA>",
+          "code": None,
+      }
+    node_name_to_substitution_dict[name] = field_dict
+
+  subs = [
+      string.Template(tag.format).safe_substitute(
+          node_name_to_substitution_dict[tag.name]) for tag in tags
+  ]
   return "".join(
       itertools.chain(*six.moves.zip_longest(seps, subs, fillvalue="")))
diff --git a/tensorflow/python/framework/error_interpolation_test.py b/tensorflow/python/framework/error_interpolation_test.py
index ad448de..b661531 100644
--- a/tensorflow/python/framework/error_interpolation_test.py
+++ b/tensorflow/python/framework/error_interpolation_test.py
@@ -18,31 +18,120 @@
 from __future__ import division
 from __future__ import print_function
 
+import os
+
+from tensorflow.python.framework import constant_op
 from tensorflow.python.framework import error_interpolation
 from tensorflow.python.platform import test
+from tensorflow.python.util import tf_stack
+
+
+def _make_frame_with_filename(op, idx, filename):
+  """Return a copy of an existing stack frame with a new filename."""
+  stack_frame = list(op._traceback[idx])
+  stack_frame[tf_stack.TB_FILENAME] = filename
+  return tuple(stack_frame)
+
+
+def _modify_op_stack_with_filenames(op, num_user_frames, user_filename,
+                                    num_inner_tf_frames):
+  """Replace op._traceback with a new traceback using special filenames."""
+  tf_filename = "%d" + error_interpolation._BAD_FILE_SUBSTRINGS[0]
+  user_filename = os.path.join("%d", "my_favorite_file.py")
+
+  num_requested_frames = num_user_frames + num_inner_tf_frames
+  num_actual_frames = len(op._traceback)
+  num_outer_frames = num_actual_frames - num_requested_frames
+  assert num_requested_frames <= num_actual_frames, "Too few real frames."
+
+  # The op's traceback has outermost frame at index 0.
+  stack = []
+  for idx in range(0, num_outer_frames):
+    stack.append(op._traceback[idx])
+  for idx in range(len(stack), len(stack)+num_user_frames):
+    stack.append(_make_frame_with_filename(op, idx, user_filename % idx))
+  for idx in range(len(stack), len(stack)+num_inner_tf_frames):
+    stack.append(_make_frame_with_filename(op, idx, tf_filename % idx))
+  op._traceback = stack
 
 
 class InterpolateTest(test.TestCase):
 
+  def setUp(self):
+    # Add nodes to the graph for retrieval by name later.
+    constant_op.constant(1, name="One")
+    constant_op.constant(2, name="Two")
+    three = constant_op.constant(3, name="Three")
+    self.graph = three.graph
+
+    # Change the list of bad file substrings so that constant_op.py is chosen
+    # as the defining stack frame for constant_op.constant ops.
+    self.old_bad_strings = error_interpolation._BAD_FILE_SUBSTRINGS
+    error_interpolation._BAD_FILE_SUBSTRINGS = [
+        "%sops.py" % os.sep,
+        "%sutil" % os.sep,
+    ]
+
+  def tearDown(self):
+    error_interpolation._BAD_FILE_SUBSTRINGS = self.old_bad_strings
+
+  def testFindIndexOfDefiningFrameForOp(self):
+    local_op = constant_op.constant(42).op
+    user_filename = "hope.py"
+    _modify_op_stack_with_filenames(local_op,
+                                    num_user_frames=3,
+                                    user_filename=user_filename,
+                                    num_inner_tf_frames=5)
+    idx = error_interpolation._find_index_of_defining_frame_for_op(local_op)
+    # Expected frame is 6th from the end because there are 5 inner frames witih
+    # TF filenames.
+    expected_frame = len(local_op._traceback) - 6
+    self.assertEqual(expected_frame, idx)
+
+  def testFindIndexOfDefiningFrameForOpReturnsZeroOnError(self):
+    local_op = constant_op.constant(43).op
+    # Truncate stack to known length.
+    local_op._traceback = local_op._traceback[:7]
+    # Ensure all frames look like TF frames.
+    _modify_op_stack_with_filenames(local_op,
+                                    num_user_frames=0,
+                                    user_filename="user_file.py",
+                                    num_inner_tf_frames=7)
+    idx = error_interpolation._find_index_of_defining_frame_for_op(local_op)
+    self.assertEqual(0, idx)
+
   def testNothingToDo(self):
     normal_string = "This is just a normal string"
-    interpolated_string = error_interpolation.interpolate(normal_string)
+    interpolated_string = error_interpolation.interpolate(normal_string,
+                                                          self.graph)
     self.assertEqual(interpolated_string, normal_string)
 
   def testOneTag(self):
-    one_tag_string = "^^node:Foo:${file}^^"
-    interpolated_string = error_interpolation.interpolate(one_tag_string)
-    self.assertEqual(interpolated_string, "${file}")
+    one_tag_string = "^^node:Two:${file}^^"
+    interpolated_string = error_interpolation.interpolate(one_tag_string,
+                                                          self.graph)
+    self.assertTrue(interpolated_string.endswith("constant_op.py"),
+                    "interpolated_string '%s' did not end with constant_op.py"
+                    % interpolated_string)
+
+  def testOneTagWithAFakeNameResultsInPlaceholders(self):
+    one_tag_string = "^^node:MinusOne:${file}^^"
+    interpolated_string = error_interpolation.interpolate(one_tag_string,
+                                                          self.graph)
+    self.assertEqual(interpolated_string, "<NA>")
 
   def testTwoTagsNoSeps(self):
-    two_tags_no_seps = "^^node:Foo:${file}^^^^node:Bar:${line}^^"
-    interpolated_string = error_interpolation.interpolate(two_tags_no_seps)
-    self.assertEqual(interpolated_string, "${file}${line}")
+    two_tags_no_seps = "^^node:One:${file}^^^^node:Three:${line}^^"
+    interpolated_string = error_interpolation.interpolate(two_tags_no_seps,
+                                                          self.graph)
+    self.assertRegexpMatches(interpolated_string, "constant_op.py[0-9]+")
 
   def testTwoTagsWithSeps(self):
-    two_tags_with_seps = "123^^node:Foo:${file}^^456^^node:Bar:${line}^^789"
-    interpolated_string = error_interpolation.interpolate(two_tags_with_seps)
-    self.assertEqual(interpolated_string, "123${file}456${line}789")
+    two_tags_with_seps = ";;;^^node:Two:${file}^^,,,^^node:Three:${line}^^;;;"
+    interpolated_string = error_interpolation.interpolate(two_tags_with_seps,
+                                                          self.graph)
+    expected_regex = "^;;;.*constant_op.py,,,[0-9]*;;;$"
+    self.assertRegexpMatches(interpolated_string, expected_regex)
 
 
 if __name__ == "__main__":
diff --git a/tensorflow/python/framework/function_test.py b/tensorflow/python/framework/function_test.py
index 15e41ba..1707f92 100644
--- a/tensorflow/python/framework/function_test.py
+++ b/tensorflow/python/framework/function_test.py
@@ -537,19 +537,25 @@
   def testResourceVarAsImplicitInput(self):
     g = ops.Graph()
     with g.as_default(), ops.device("cpu:0"):
+      expected_type = dtypes.float32
+      expected_shape = tensor_shape.TensorShape((4, 4))
       v = variable_scope.get_variable(
-          "var", (4, 4), dtypes.float32, use_resource=True)
+          "var", expected_shape, expected_type, use_resource=True)
 
       @function.Defun()
       def Foo():
-        return array_ops.identity(v)
+        captured = array_ops.identity(v)
+        self.assertEqual(expected_type, captured.dtype)
+        self.assertEqual(expected_shape, captured.shape)
+        return captured, array_ops.shape(captured)
 
-      y = v.value()
-      z = Foo()
+      expected_val = v.value()
+      actual_val, actual_shape = Foo()
 
     with self.test_session(graph=g):
       v.initializer.run()
-      self.assertAllEqual(y.eval(), z.eval())
+      self.assertAllEqual(expected_val.eval(), actual_val.eval())
+      self.assertAllEqual(expected_shape, actual_shape.eval())
 
   def testDefineErrors(self):
     with ops.Graph().as_default():
diff --git a/tensorflow/python/framework/ops.py b/tensorflow/python/framework/ops.py
index c4f58f0..a3b56b0 100644
--- a/tensorflow/python/framework/ops.py
+++ b/tensorflow/python/framework/ops.py
@@ -2772,23 +2772,23 @@
     # This step makes a copy of the existing stack, and it also initializes
     # self._thread_local._variable_creator_stack if it doesn't exist yet.
     old = list(self._variable_creator_stack)
-    self._thread_local._variable_creator_stack.append(creator)
+    self._thread_local._variable_creator_stack.append(creator)  # pylint: disable=protected-access
     try:
       yield
     finally:
-      self._thread_local._variable_creator_stack = old
+      self._thread_local._variable_creator_stack = old  # pylint: disable=protected-access
 
   # Note: this method is private because the API of tf.Graph() is public and
   # frozen, and this functionality is still not ready for public visibility.
   @property
   def _variable_creator_stack(self):
     if not hasattr(self._thread_local, "_variable_creator_stack"):
-      self._thread_local._variable_creator_stack = []
-    return list(self._thread_local._variable_creator_stack)
+      self._thread_local._variable_creator_stack = []  # pylint: disable=protected-access
+    return list(self._thread_local._variable_creator_stack)  # pylint: disable=protected-access
 
   @_variable_creator_stack.setter
   def _variable_creator_stack(self, variable_creator_stack):
-    self._thread_local._variable_creator_stack = variable_creator_stack
+    self._thread_local._variable_creator_stack = variable_creator_stack  # pylint: disable=protected-access
 
   def _check_not_finalized(self):
     """Check if the graph is finalized.
diff --git a/tensorflow/python/framework/python_op_gen.cc b/tensorflow/python/framework/python_op_gen.cc
index ec3748b..76d4c20 100644
--- a/tensorflow/python/framework/python_op_gen.cc
+++ b/tensorflow/python/framework/python_op_gen.cc
@@ -943,6 +943,7 @@
 from tensorflow.python.framework import op_def_registry as _op_def_registry
 from tensorflow.python.framework import ops as _ops
 from tensorflow.python.framework import op_def_library as _op_def_library
+from tensorflow.python.util.deprecation import deprecated_endpoints
 from tensorflow.python.util.tf_export import tf_export
 
 )");
diff --git a/tensorflow/python/framework/python_op_gen_internal.cc b/tensorflow/python/framework/python_op_gen_internal.cc
index 940bffb..031b4a3 100644
--- a/tensorflow/python/framework/python_op_gen_internal.cc
+++ b/tensorflow/python/framework/python_op_gen_internal.cc
@@ -588,10 +588,12 @@
     return;
   }
 
+  // Add @tf_export decorator.
   strings::StrAppend(&result_, "@tf_export(");
 
   // Add all endpoint names to tf_export.
   bool first_endpoint = true;
+  std::vector<string> deprecated_endpoints;
   for (const auto& endpoint : api_def_.endpoint()) {
     if (!first_endpoint) {
       strings::StrAppend(&result_, ", ");
@@ -601,9 +603,32 @@
     string endpoint_name;
     python_op_gen_internal::GenerateLowerCaseOpName(endpoint.name(),
                                                     &endpoint_name);
+    if (endpoint.deprecated()) {
+      deprecated_endpoints.push_back(endpoint_name);
+    }
     strings::StrAppend(&result_, "'", endpoint_name, "'");
   }
   strings::StrAppend(&result_, ")\n");
+
+  // If all endpoints are deprecated, add @deprecated decorator.
+  if (!api_def_.deprecation_message().empty()) {
+    const string instructions = api_def_.deprecation_message();
+    strings::StrAppend(&result_, "@deprecated(None, '", instructions, "')\n");
+  }
+  // Add @deprecated_endpoints decorator.
+  if (!deprecated_endpoints.empty()) {
+    strings::StrAppend(&result_, "@deprecated_endpoints(");
+    bool first_endpoint = true;
+    for (auto& endpoint_name : deprecated_endpoints) {
+      if (first_endpoint) {
+        first_endpoint = false;
+      } else {
+        strings::StrAppend(&result_, ", ");
+      }
+      strings::StrAppend(&result_, "'", endpoint_name, "'");
+    }
+    strings::StrAppend(&result_, ")\n");
+  }
 }
 
 void GenPythonOp::AddDefLine(const string& function_name,
diff --git a/tensorflow/python/framework/subscribe.py b/tensorflow/python/framework/subscribe.py
index 7797d99..cee7398 100644
--- a/tensorflow/python/framework/subscribe.py
+++ b/tensorflow/python/framework/subscribe.py
@@ -47,7 +47,7 @@
   tensors_type = type(tensors)
   if tensors_type is ops.Tensor:
     return apply_fn(tensors)
-  elif tensors_type is variables.Variable:
+  elif isinstance(tensors, variables.Variable):
     return apply_fn(tensors.value())
   elif isinstance(tensors, (list, tuple)):
     tensors = [_recursive_apply(t, apply_fn) for t in tensors]
diff --git a/tensorflow/python/framework/traceable_stack_test.py b/tensorflow/python/framework/traceable_stack_test.py
index 168f908..3e7876f 100644
--- a/tensorflow/python/framework/traceable_stack_test.py
+++ b/tensorflow/python/framework/traceable_stack_test.py
@@ -66,10 +66,12 @@
     result = t_obj.set_filename_and_line_from_caller(offset=300)
 
     # We expect a heuristic to be used because we are not currently 300 frames
-    # down on the stack.  The filename should be some wacky thing from the
-    # outermost stack frame -- definitely not equal to this filename.
+    # down on the stack.  The filename and lineno of the outermost frame are not
+    # predictable -- in some environments the filename is this test file, but in
+    # other environments it is not (e.g. due to a test runner calling this
+    # file).  Therefore we only test that the called function knows it applied a
+    # heuristic for the ridiculous stack offset.
     self.assertEqual(t_obj.HEURISTIC_USED, result)
-    self.assertNotEqual(_THIS_FILENAME, t_obj.filename)
 
 
 class TraceableStackTest(test_util.TensorFlowTestCase):
diff --git a/tensorflow/python/keras/BUILD b/tensorflow/python/keras/BUILD
index 4056818..01f1184 100755
--- a/tensorflow/python/keras/BUILD
+++ b/tensorflow/python/keras/BUILD
@@ -793,6 +793,19 @@
 )
 
 py_test(
+    name = "training_utils_test",
+    size = "medium",
+    srcs = ["engine/training_utils_test.py"],
+    srcs_version = "PY2AND3",
+    tags = ["notsan"],
+    deps = [
+        ":keras",
+        "//tensorflow/python:client_testlib",
+        "//third_party/py/numpy",
+    ],
+)
+
+py_test(
     name = "model_subclassing_test",
     size = "medium",
     srcs = ["model_subclassing_test.py"],
diff --git a/tensorflow/python/keras/applications/mobilenet.py b/tensorflow/python/keras/applications/mobilenet.py
index e56c695..7285e03 100644
--- a/tensorflow/python/keras/applications/mobilenet.py
+++ b/tensorflow/python/keras/applications/mobilenet.py
@@ -72,13 +72,9 @@
 import os
 
 from tensorflow.python.keras import backend as K
-from tensorflow.python.keras import constraints
-from tensorflow.python.keras import initializers
-from tensorflow.python.keras import regularizers
 from tensorflow.python.keras.applications import imagenet_utils
 from tensorflow.python.keras.applications.imagenet_utils import _obtain_input_shape
 from tensorflow.python.keras.applications.imagenet_utils import decode_predictions
-from tensorflow.python.keras.engine.base_layer import InputSpec
 from tensorflow.python.keras.layers import Activation
 from tensorflow.python.keras.layers import BatchNormalization
 from tensorflow.python.keras.layers import Conv2D
@@ -87,10 +83,10 @@
 from tensorflow.python.keras.layers import GlobalAveragePooling2D
 from tensorflow.python.keras.layers import GlobalMaxPooling2D
 from tensorflow.python.keras.layers import Input
+from tensorflow.python.keras.layers import ReLU
 from tensorflow.python.keras.layers import Reshape
 from tensorflow.python.keras.layers import ZeroPadding2D
 from tensorflow.python.keras.models import Model
-from tensorflow.python.keras.utils import conv_utils
 from tensorflow.python.keras.utils import layer_utils
 from tensorflow.python.keras.utils.data_utils import get_file
 from tensorflow.python.platform import tf_logging as logging
@@ -100,10 +96,6 @@
 BASE_WEIGHT_PATH = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.6/'
 
 
-def relu6(x):
-  return K.relu(x, max_value=6)
-
-
 @tf_export('keras.applications.mobilenet.preprocess_input')
 def preprocess_input(x):
   """Preprocesses a numpy array encoding a batch of images.
@@ -130,12 +122,6 @@
               classes=1000):
   """Instantiates the MobileNet architecture.
 
-  To load a MobileNet model via `load_model`, import the custom
-  objects `relu6` and pass them to the `custom_objects` parameter.
-  E.g.
-  model = load_model('mobilenet.h5', custom_objects={
-                     'relu6': mobilenet.relu6})
-
   Arguments:
       input_shape: optional shape tuple, only to be specified
           if `include_top` is False (otherwise the input shape
@@ -412,7 +398,7 @@
       strides=strides,
       name='conv1')(x)
   x = BatchNormalization(axis=channel_axis, name='conv1_bn')(x)
-  return Activation(relu6, name='conv1_relu')(x)
+  return ReLU(6, name='conv1_relu')(x)
 
 
 def _depthwise_conv_block(inputs,
@@ -479,7 +465,7 @@
       use_bias=False,
       name='conv_dw_%d' % block_id)(x)
   x = BatchNormalization(axis=channel_axis, name='conv_dw_%d_bn' % block_id)(x)
-  x = Activation(relu6, name='conv_dw_%d_relu' % block_id)(x)
+  x = ReLU(6, name='conv_dw_%d_relu' % block_id)(x)
 
   x = Conv2D(
       pointwise_conv_filters, (1, 1),
@@ -489,4 +475,4 @@
       name='conv_pw_%d' % block_id)(
           x)
   x = BatchNormalization(axis=channel_axis, name='conv_pw_%d_bn' % block_id)(x)
-  return Activation(relu6, name='conv_pw_%d_relu' % block_id)(x)
+  return ReLU(6, name='conv_pw_%d_relu' % block_id)(x)
diff --git a/tensorflow/python/keras/callbacks.py b/tensorflow/python/keras/callbacks.py
index 5d66db2..0857a32 100644
--- a/tensorflow/python/keras/callbacks.py
+++ b/tensorflow/python/keras/callbacks.py
@@ -31,13 +31,16 @@
 import numpy as np
 import six
 
+from tensorflow.python.framework import dtypes
 from tensorflow.python.keras import backend as K
-from tensorflow.python.keras import optimizers
+from tensorflow.python.keras.engine.training_utils import standardize_input_data
 from tensorflow.python.keras.utils.generic_utils import Progbar
 from tensorflow.python.ops import array_ops
-from tensorflow.python.ops.resource_variable_ops import ResourceVariable as Variable
+from tensorflow.python.ops import state_ops
+from tensorflow.python.ops import variables
 from tensorflow.python.platform import tf_logging as logging
 from tensorflow.python.summary import summary as tf_summary
+from tensorflow.python.training import saver
 from tensorflow.python.util.tf_export import tf_export
 
 
@@ -644,35 +647,17 @@
     self.verbose = verbose
 
   def on_epoch_begin(self, epoch, logs=None):
-    # TODO(yashkatariya): Change the property checking when the learning
-    # rate attribute is unified across all TF Optimizers.
-    if isinstance(self.model.optimizer, optimizers.TFOptimizer):
-      if not hasattr(self.model.optimizer.optimizer, '_lr') and not hasattr(
-          self.model.optimizer.optimizer, '_learning_rate'):
-        raise ValueError(
-            'TF Optimizer must have a "_lr" or "_learning_rate" attribute.')
-      else:
-        opt = self.model.optimizer.optimizer
-        if hasattr(opt, '_lr'):
-          opt_lr = Variable(opt._lr)  # pylint: disable=protected-access
-        elif hasattr(opt, '_learning_rate'):
-          opt_lr = Variable(opt._learning_rate)  # pylint: disable=protected-access
-    else:
-      if not hasattr(self.model.optimizer, 'lr'):
-        raise ValueError('Optimizer must have a "lr" attribute.')
-      else:
-        opt = self.model.optimizer
-        opt_lr = opt.lr
-
+    if not hasattr(self.model.optimizer, 'lr'):
+      raise ValueError('Optimizer must have a "lr" attribute.')
     try:  # new API
-      lr = float(K.get_value(opt_lr))
+      lr = float(K.get_value(self.model.optimizer.lr))
       lr = self.schedule(epoch, lr)
     except TypeError:  # Support for old API for backward compatibility
       lr = self.schedule(epoch)
     if not isinstance(lr, (float, np.float32, np.float64)):
       raise ValueError('The output of the "schedule" function '
                        'should be float.')
-    K.set_value(opt_lr, lr)
+    K.set_value(self.model.optimizer.lr, lr)
     if self.verbose > 0:
       print('\nEpoch %05d: LearningRateScheduler reducing learning '
             'rate to %s.' % (epoch + 1, lr))
@@ -717,7 +702,9 @@
       write_images: whether to write model weights to visualize as
           image in TensorBoard.
       embeddings_freq: frequency (in epochs) at which selected embedding
-          layers will be saved.
+          layers will be saved. If set to 0, embeddings won't be computed.
+          Data to be visualized in TensorBoard's Embedding tab must be passed
+          as `embeddings_data`.
       embeddings_layer_names: a list of names of layers to keep eye on. If
           None or empty list all the embedding layer will be watched.
       embeddings_metadata: a dictionary which maps layer name to a file name
@@ -725,6 +712,10 @@
           [details](https://www.tensorflow.org/how_tos/embedding_viz/#metadata_optional)
           about metadata files format. In case if the same metadata file is
           used for all embedding layers, string can be passed.
+      embeddings_data: data to be embedded at layers specified in
+          `embeddings_layer_names`. Numpy array (if the model has a single
+          input) or list of Numpy arrays (if the model has multiple inputs).
+          Learn [more about embeddings](https://www.tensorflow.org/programmers_guide/embedding)
   """
 
   # pylint: enable=line-too-long
@@ -735,7 +726,11 @@
                batch_size=32,
                write_graph=True,
                write_grads=False,
-               write_images=False):
+               write_images=False,
+               embeddings_freq=0,
+               embeddings_layer_names=None,
+               embeddings_metadata=None,
+               embeddings_data=None):
     super(TensorBoard, self).__init__()
     self.log_dir = log_dir
     self.histogram_freq = histogram_freq
@@ -747,6 +742,10 @@
     self._current_batch = 0
     # abstracted writer class to be able to stub for testing
     self._writer_class = tf_summary.FileWriter
+    self.embeddings_freq = embeddings_freq
+    self.embeddings_layer_names = embeddings_layer_names
+    self.embeddings_metadata = embeddings_metadata
+    self.embeddings_data = embeddings_data
 
   def set_model(self, model):
     """Sets Keras model and creates summary ops."""
@@ -798,7 +797,11 @@
             tf_summary.histogram('{}_grad'.format(mapped_weight_name), grads)
 
         if hasattr(layer, 'output'):
-          tf_summary.histogram('{}_out'.format(layer.name), layer.output)
+          if isinstance(layer.output, list):
+            for i, output in enumerate(layer.output):
+              tf_summary.histogram('{}_out_{}'.format(layer.name, i), output)
+          else:
+            tf_summary.histogram('{}_out'.format(layer.name), layer.output)
     self.merged = tf_summary.merge_all()
 
     if self.write_graph:
@@ -806,6 +809,74 @@
     else:
       self.writer = self._writer_class(self.log_dir)
 
+    # If both embedding_freq and embeddings_data are available, we will
+    # visualize embeddings.
+    if self.embeddings_freq and self.embeddings_data is not None:
+      self.embeddings_data = standardize_input_data(self.embeddings_data,
+                                                    model.input_names)
+
+      # If embedding_layer_names are not provided, get all of the embedding
+      # layers from the model.
+      embeddings_layer_names = self.embeddings_layer_names
+      if not embeddings_layer_names:
+        embeddings_layer_names = [
+            layer.name
+            for layer in self.model.layers
+            if type(layer).__name__ == 'Embedding'
+        ]
+
+      self.assign_embeddings = []
+      embeddings_vars = {}
+
+      self.batch_id = batch_id = array_ops.placeholder(dtypes.int32)
+      self.step = step = array_ops.placeholder(dtypes.int32)
+
+      for layer in self.model.layers:
+        if layer.name in embeddings_layer_names:
+          embedding_input = self.model.get_layer(layer.name).output
+          embedding_size = np.prod(embedding_input.shape[1:])
+          embedding_input = array_ops.reshape(embedding_input,
+                                              (step, int(embedding_size)))
+          shape = (self.embeddings_data[0].shape[0], int(embedding_size))
+          embedding = variables.Variable(
+              array_ops.zeros(shape), name=layer.name + '_embedding')
+          embeddings_vars[layer.name] = embedding
+          batch = state_ops.assign(embedding[batch_id:batch_id + step],
+                                   embedding_input)
+          self.assign_embeddings.append(batch)
+
+      self.saver = saver.Saver(list(embeddings_vars.values()))
+
+      # Create embeddings_metadata dictionary
+      if isinstance(self.embeddings_metadata, str):
+        embeddings_metadata = {
+            layer_name: self.embeddings_metadata
+            for layer_name in embeddings_vars.keys()
+        }
+      else:
+        # If embedding_metadata is already a dictionary
+        embeddings_metadata = self.embeddings_metadata
+
+      try:
+        from tensorboard.plugins import projector
+      except ImportError:
+        raise ImportError('Failed to import TensorBoard. Please make sure that '
+                          'TensorBoard integration is complete."')
+
+      # TODO(psv): Add integration tests to test embedding visualization
+      # with TensorBoard callback. We are unable to write a unit test for this
+      # because TensorBoard dependency assumes TensorFlow package is installed.
+      config = projector.ProjectorConfig()
+      for layer_name, tensor in embeddings_vars.items():
+        embedding = config.embeddings.add()
+        embedding.tensor_name = tensor.name
+
+        if (embeddings_metadata is not None and
+            layer_name in embeddings_metadata):
+          embedding.metadata_path = embeddings_metadata[layer_name]
+
+      projector.visualize_embeddings(self.writer, config)
+
   def _fetch_callback(self, summary):
     self.writer.add_summary(
         summary,
@@ -853,6 +924,46 @@
       if self.merged in self.model.test_function.fetch_callbacks:
         self.model.test_function.fetch_callbacks.pop(self.merged)
 
+    if self.embeddings_data is None and self.embeddings_freq:
+      raise ValueError('To visualize embeddings, embeddings_data must '
+                       'be provided.')
+
+    if self.embeddings_freq and self.embeddings_data is not None:
+      if epoch % self.embeddings_freq == 0:
+        # We need a second forward-pass here because we're passing
+        # the `embeddings_data` explicitly. This design allows to pass
+        # arbitrary data as `embeddings_data` and results from the fact
+        # that we need to know the size of the `tf.Variable`s which
+        # hold the embeddings in `set_model`. At this point, however,
+        # the `validation_data` is not yet set.
+
+        embeddings_data = self.embeddings_data
+        n_samples = embeddings_data[0].shape[0]
+        i = 0
+        while i < n_samples:
+          step = min(self.batch_size, n_samples - i)
+          batch = slice(i, i + step)
+
+          if isinstance(self.model.input, list):
+            feed_dict = {
+                model_input: embeddings_data[idx][batch]
+                for idx, model_input in enumerate(self.model.input)
+            }
+          else:
+            feed_dict = {self.model.input: embeddings_data[0][batch]}
+
+          feed_dict.update({self.batch_id: i, self.step: step})
+
+          if self.model.uses_learning_phase:
+            feed_dict[K.learning_phase()] = False
+
+          self.sess.run(self.assign_embeddings, feed_dict=feed_dict)
+          self.saver.save(self.sess,
+                          os.path.join(self.log_dir, 'keras_embedding.ckpt'),
+                          epoch)
+
+          i += self.batch_size
+
     for name, value in logs.items():
       if name in ['batch', 'size']:
         continue
diff --git a/tensorflow/python/keras/callbacks_test.py b/tensorflow/python/keras/callbacks_test.py
index 244d485..45598ca 100644
--- a/tensorflow/python/keras/callbacks_test.py
+++ b/tensorflow/python/keras/callbacks_test.py
@@ -29,16 +29,10 @@
 
 from tensorflow.core.framework import summary_pb2
 from tensorflow.python import keras
-from tensorflow.python.eager import context
-from tensorflow.python.framework import test_util
 from tensorflow.python.keras import testing_utils
-from tensorflow.python.ops.resource_variable_ops import ResourceVariable as Variable
 from tensorflow.python.platform import test
 from tensorflow.python.platform import tf_logging as logging
 from tensorflow.python.summary.writer import writer_cache
-from tensorflow.python.training.adam import AdamOptimizer
-from tensorflow.python.training.gradient_descent import GradientDescentOptimizer
-
 
 try:
   import h5py  # pylint:disable=g-import-not-at-top
@@ -376,76 +370,6 @@
           float(keras.backend.get_value(
               model.optimizer.lr)) - 0.01 / 4) < keras.backend.epsilon()
 
-  @test_util.run_in_graph_and_eager_modes
-  def test_TF_LearningRateScheduler_Adam(self):
-    with self.test_session():
-      with context.eager_mode():
-        np.random.seed(1337)
-        (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
-            train_samples=TRAIN_SAMPLES,
-            test_samples=TEST_SAMPLES,
-            input_shape=(INPUT_DIM,),
-            num_classes=NUM_CLASSES)
-        y_test = keras.utils.to_categorical(y_test)
-        y_train = keras.utils.to_categorical(y_train)
-        model = keras.models.Sequential()
-        model.add(
-            keras.layers.Dense(
-                NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
-        model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
-        model.compile(
-            loss='categorical_crossentropy',
-            optimizer=AdamOptimizer(),
-            metrics=['accuracy'])
-        cbks = [keras.callbacks.LearningRateScheduler(lambda x: 1. / (1. + x))]
-        model.fit(
-            x_train,
-            y_train,
-            batch_size=BATCH_SIZE,
-            validation_data=(x_test, y_test),
-            callbacks=cbks,
-            epochs=5,
-            verbose=0)
-        opt_lr = model.optimizer.optimizer._lr
-        self.assertLess(
-            float(keras.backend.get_value(
-                Variable(opt_lr))) - 0.2, keras.backend.epsilon())
-
-  @test_util.run_in_graph_and_eager_modes
-  def test_TF_LearningRateScheduler_GradientDescent(self):
-    with self.test_session():
-      with context.eager_mode():
-        np.random.seed(1337)
-        (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
-            train_samples=TRAIN_SAMPLES,
-            test_samples=TEST_SAMPLES,
-            input_shape=(INPUT_DIM,),
-            num_classes=NUM_CLASSES)
-        y_test = keras.utils.to_categorical(y_test)
-        y_train = keras.utils.to_categorical(y_train)
-        model = keras.models.Sequential()
-        model.add(
-            keras.layers.Dense(
-                NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
-        model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
-        model.compile(
-            loss='categorical_crossentropy',
-            optimizer=GradientDescentOptimizer(1e-3),
-            metrics=['accuracy'])
-        cbks = [keras.callbacks.LearningRateScheduler(lambda x: 1. / (1. + x))]
-        model.fit(
-            x_train,
-            y_train,
-            batch_size=BATCH_SIZE,
-            validation_data=(x_test, y_test),
-            callbacks=cbks,
-            epochs=5,
-            verbose=0)
-        opt_lr = model.optimizer.optimizer._learning_rate
-        self.assertLess(
-            float(keras.backend.get_value(
-                Variable(opt_lr))) - 0.2, keras.backend.epsilon())
-
   def test_ReduceLROnPlateau(self):
     with self.test_session():
       np.random.seed(1337)
diff --git a/tensorflow/python/keras/engine/training_eager.py b/tensorflow/python/keras/engine/training_eager.py
index c78684c..397de42 100644
--- a/tensorflow/python/keras/engine/training_eager.py
+++ b/tensorflow/python/keras/engine/training_eager.py
@@ -34,7 +34,6 @@
 from tensorflow.python.keras import metrics as metrics_module
 from tensorflow.python.keras.engine import training_utils
 from tensorflow.python.keras.utils import generic_utils
-from tensorflow.python.ops import array_ops
 from tensorflow.python.platform import tf_logging as logging
 
 
@@ -194,7 +193,8 @@
                       callbacks=None,
                       callback_metrics=None,
                       validation_steps=None,
-                      do_validation=False):
+                      do_validation=False,
+                      batch_size=None):
   """Fit function for eager execution when input is given as dataset iterator.
 
   Updates the given epoch logs.
@@ -224,16 +224,23 @@
       validation_steps: Number of steps to run validation for (only if doing
         validation from data tensors). Ignored with default value of `None`.
       do_validation: Boolean value indicating whether we should do validation.
+      batch_size: int, val_inputs and val_targets will be evaled batch by
+        batch with size batch_size if they are array.
 
   Raises:
       ValueError: In case of mismatch between given number of inputs and
         expectations of the model.
   """
   assert isinstance(inputs, iterator_ops.EagerIterator)
+
+  # make sure either x,y or x,y,sample_weights is provided
+  if (not isinstance(inputs.output_shapes, (list, tuple)) or
+      len(inputs.output_shapes) not in (2, 3)):
+    raise ValueError('Please provide either inputs and targets'
+                     'or inputs, targets, and sample_weights')
+
   for step_index in range(steps_per_epoch):
-    batch_logs = {}
-    batch_logs['batch'] = step_index
-    batch_logs['size'] = 1
+    batch_logs = {'batch': step_index, 'size': 1}
     callbacks.on_batch_begin(step_index, batch_logs)
 
     # Get data from the iterator.
@@ -247,19 +254,21 @@
           'batches (in this case, %d batches).' % steps_per_epoch * epochs)
       break
 
-    if not isinstance(next_element, (list, tuple)) or len(next_element) != 2:
-      raise ValueError('Please provide data as a list or tuple of 2 elements '
-                       ' - input and target pair. Received %s' % next_element)
-    x, y = next_element
+    if len(inputs.output_shapes) == 2:
+      x, y = next_element
+      sample_weights = None
+    else:
+      x, y, sample_weights = next_element
 
     # Validate and standardize data.
     x, y, sample_weights = model._standardize_user_data(
-        x, y, class_weight=class_weight)
+        x, y, sample_weight=sample_weights, class_weight=class_weight)
     x = training_utils.cast_if_floating_dtype(x)
     y = training_utils.cast_if_floating_dtype(y)
     if sample_weights:
       sample_weights = [
-          ops.convert_to_tensor(val, dtype=backend.floatx())
+          training_utils.cast_if_floating_dtype(
+              ops.convert_to_tensor(val, dtype=backend.floatx()))
           if val is not None else None for val in sample_weights
       ]
 
@@ -307,122 +316,8 @@
             val_targets,
             sample_weights=val_sample_weights,
             steps=validation_steps,
-            verbose=0)
-        if not isinstance(val_outs, list):
-          val_outs = [val_outs]
-        # Same labels assumed.
-        for l, o in zip(out_labels, val_outs):
-          epoch_logs['val_' + l] = o
-
-
-def batch_fit_loop(model,
-                   inputs,
-                   targets,
-                   epoch_logs,
-                   index_array,
-                   out_labels,
-                   callback_model,
-                   batch_size,
-                   sample_weights=None,
-                   val_inputs=None,
-                   val_targets=None,
-                   val_sample_weights=None,
-                   callbacks=None,
-                   shuffle=True,
-                   num_train_samples=None,
-                   do_validation=False):
-  """Fit function for eager execution when input is given as arrays or tensors.
-
-  Updates the given epoch logs.
-
-  Arguments:
-      model: Instance of the `Model`.
-      inputs: List of input arrays.
-      targets: List of target arrays.
-      epoch_logs: Dictionary of logs from every epoch.
-      index_array: Index array generated from number of training samples.
-      out_labels: Output labels generated from model metric names.
-      callback_model: Instance of `Model` to callback.
-      batch_size: Integer batch size or None if unknown.
-      sample_weights: Optional list of sample weight arrays.
-      val_inputs: Input data for validation.
-      val_targets: Target data for validation.
-      val_sample_weights: Sample weight data for validation.
-      callbacks: List of callbacks to be called during training.
-      shuffle: Whether to shuffle the data at the beginning of each epoch.
-      num_train_samples: Integer number of training samples.
-      do_validation: Boolean value indicating whether we should do validation.
-  """
-  # TODO(psv): Create a dataset iterator instead of manually creating batches
-  # here and in batch_test_loop, batch_predict_loop.
-  if shuffle == 'batch':
-    index_array = model._batch_shuffle(index_array, batch_size)
-  elif shuffle:
-    np.random.shuffle(index_array)
-
-  batches = generic_utils.make_batches(num_train_samples, batch_size)
-
-  for batch_index, (batch_start, batch_end) in enumerate(batches):
-    batch_ids = index_array[batch_start:batch_end]
-    inputs_batch = slice_arrays(inputs, batch_ids, contiguous=not shuffle)
-    targets_batch = slice_arrays(targets, batch_ids, contiguous=not shuffle)
-    if sample_weights:
-      sample_weights_batch = slice_arrays(
-          sample_weights, batch_ids, contiguous=not shuffle)
-    else:
-      sample_weights_batch = None
-    batch_logs = {}
-    batch_logs['batch'] = batch_index
-    batch_logs['size'] = len(batch_ids)
-
-    callbacks.on_batch_begin(batch_index, batch_logs)
-
-    inputs_batch = [
-        ops.convert_to_tensor(val, dtype=backend.floatx())
-        for val in inputs_batch
-    ]
-    targets_batch = [
-        ops.convert_to_tensor(val, dtype=backend.floatx())
-        for val in targets_batch
-    ]
-    if sample_weights:
-      sample_weights_batch = [
-          ops.convert_to_tensor(val, dtype=backend.floatx())
-          if val is not None else None for val in sample_weights_batch
-      ]
-
-    outs, loss, loss_metrics = _process_single_batch(
-        model,
-        inputs_batch,
-        targets_batch,
-        sample_weights=sample_weights_batch,
-        training=True)
-
-    if not isinstance(outs, list):
-      outs = [outs]
-
-    for l, o in zip(out_labels, outs):
-      batch_logs[l] = o
-    # Required for eager execution
-    metrics_results = _eager_metrics_fn(model, outs, targets_batch)
-    batch_logs['loss'] = tensor_util.constant_value(backend.mean(loss))
-
-    for k, v in zip(model.metrics_names,
-                    [backend.mean(loss)] + loss_metrics + metrics_results):
-      batch_logs[k] = tensor_util.constant_value(v)
-    callbacks.on_batch_end(batch_index, batch_logs)
-    if callback_model.stop_training:
-      break
-
-    if batch_index == len(batches) - 1:  # Last batch.
-      if do_validation:
-        val_outs = test_loop(
-            model,
-            val_inputs,
-            val_targets,
-            sample_weights=val_sample_weights,
-            batch_size=batch_size,
-            verbose=0)
+            verbose=0,
+            batch_size=batch_size)
         if not isinstance(val_outs, list):
           val_outs = [val_outs]
         # Same labels assumed.
@@ -451,6 +346,11 @@
         expectations of the model.
   """
   assert isinstance(inputs, iterator_ops.EagerIterator)
+  # make sure either x,y or x,y,sample_weights is provided
+  if (not isinstance(inputs.output_shapes, (list, tuple)) or
+      len(inputs.output_shapes) < 2 or len(inputs.output_shapes) > 3):
+    raise ValueError('Please provide either inputs and targets'
+                     'or inputs, targets, and sample_weights')
   outs = []
   num_samples = 0
   if verbose == 1:
@@ -466,10 +366,11 @@
           '(in this case, %d batches).', steps)
       break
 
-    if not isinstance(next_element, (list, tuple)) or len(next_element) != 2:
-      raise ValueError('Please provide data as a list or tuple of 2 elements '
-                       ' - input and target pair. Received %s' % next_element)
-    x, y = next_element
+    if len(inputs.output_shapes) == 2:
+      x, y = next_element
+      sample_weights = None
+    else:
+      x, y, sample_weights = next_element
 
     # Validate and standardize data.
     x, y, sample_weights = model._standardize_user_data(x, y)
@@ -512,94 +413,6 @@
   return outs
 
 
-def batch_test_loop(model,
-                    inputs,
-                    targets,
-                    batch_size,
-                    sample_weights=None,
-                    verbose=0):
-  """Test function for eager execution when input is given as arrays or tensors.
-
-  Arguments:
-      model: Model instance that is being evaluated in Eager mode.
-      inputs: List of input arrays.
-      targets: List of target arrays.
-      batch_size: Integer batch size.
-      sample_weights: Optional list of sample weight arrays.
-      verbose: Verbosity mode.
-
-  Returns:
-      Scalar loss (if the model has a single output and no metrics)
-      or list of scalars (if the model has multiple outputs
-      and/or metrics). The attribute `model.metrics_names` will give you
-      the display labels for the scalar outputs.
-  """
-  outs = []
-  feed_data = inputs + targets
-  if sample_weights:
-    feed_data += sample_weights
-  num_samples = training_utils.check_num_samples(
-      feed_data, batch_size=batch_size)
-  if verbose == 1:
-    progbar = generic_utils.Progbar(target=num_samples)
-  batches = generic_utils.make_batches(num_samples, batch_size)
-  index_array = np.arange(num_samples)
-  for batch_index, (batch_start, batch_end) in enumerate(batches):
-    batch_ids = index_array[batch_start:batch_end]
-    inputs_batch = slice_arrays(inputs, batch_ids)
-    targets_batch = slice_arrays(targets, batch_ids)
-    if sample_weights:
-      sample_weights_batch = slice_arrays(sample_weights, batch_ids)
-    else:
-      sample_weights_batch = None
-
-    inputs_batch = [
-        ops.convert_to_tensor(val, dtype=backend.floatx())
-        for val in inputs_batch
-    ]
-    targets_batch = [
-        ops.convert_to_tensor(val, dtype=backend.floatx())
-        for val in targets_batch
-    ]
-    if sample_weights:
-      sample_weights_batch = [
-          ops.convert_to_tensor(val, dtype=backend.floatx())
-          if val is not None else None for val in sample_weights_batch
-      ]
-
-    loss_outs, loss, loss_metrics = _model_loss(
-        model,
-        inputs_batch,
-        targets_batch,
-        sample_weights=sample_weights_batch,
-        training=False)
-    metrics_results = _eager_metrics_fn(model, loss_outs, targets_batch)
-    batch_outs = []
-    for _, v in zip(model.metrics_names,
-                    [backend.mean(loss)] + loss_metrics + metrics_results):
-      batch_outs.append(tensor_util.constant_value(v))
-
-    if isinstance(batch_outs, list):
-      if batch_index == 0:
-        for _ in enumerate(batch_outs):
-          outs.append(0.)
-      for i, batch_out in enumerate(batch_outs):
-        outs[i] += batch_out * len(batch_ids)
-    else:
-      if batch_index == 0:
-        outs.append(0.)
-      outs[0] += batch_outs * len(batch_ids)
-
-    if verbose == 1:
-      progbar.update(batch_end)
-
-  for i in range(len(outs)):
-    outs[i] /= num_samples
-  if len(outs) == 1:
-    return outs[0]
-  return outs
-
-
 def iterator_predict_loop(model, inputs, steps, verbose=0):
   """Predict function for eager execution when input is dataset iterator.
 
@@ -619,6 +432,12 @@
         expectations of the model.
   """
   assert isinstance(inputs, iterator_ops.EagerIterator)
+  if not isinstance(inputs.output_shapes,
+                    (list, tuple)) or len(inputs.output_shapes) > 2:
+    raise ValueError(
+        'Please provide data as a list or tuple of 1 or 2 elements '
+        ' - input or input and target pair. Received %s. We do not use the '
+        '`target` value here.' % inputs.output_shapes)
   outs = []
   if verbose == 1:
     progbar = generic_utils.Progbar(target=steps)
@@ -634,12 +453,8 @@
           'batches (in this case, %d batches).', steps)
       break
 
-    if not isinstance(next_element, (list, tuple)) or len(next_element) != 2:
-      raise ValueError(
-          'Please provide data as a list or tuple of 2 elements '
-          ' - input and target pair. Received %s. We do not use the '
-          '`target` value here.' % next_element)
-    x, _ = next_element
+    # expects a tuple, where first element of tuple represents inputs
+    x = next_element[0]
 
     # Validate and standardize data.
     x, _, _ = model._standardize_user_data(x)
@@ -670,99 +485,6 @@
   return outs
 
 
-def batch_predict_loop(model, inputs, batch_size, verbose=0):
-  """Predict function for eager execution when input is arrays or tensors.
-
-  Arguments:
-      model: Instance of `Model`.
-      inputs: List of input arrays.
-      batch_size: Integer batch size.
-      verbose: Verbosity mode.
-
-  Returns:
-      Array of predictions (if the model has a single output)
-      or list of arrays of predictions (if the model has multiple outputs).
-  """
-  outs = []
-  num_samples = training_utils.check_num_samples(inputs, batch_size)
-  if verbose == 1:
-    progbar = generic_utils.Progbar(target=num_samples)
-  batches = generic_utils.make_batches(num_samples, batch_size)
-  index_array = np.arange(num_samples)
-  for batch_index, (batch_start, batch_end) in enumerate(batches):
-    batch_ids = index_array[batch_start:batch_end]
-    inputs_batch = slice_arrays(inputs, batch_ids)
-
-    inputs_batch = [
-        ops.convert_to_tensor(val, dtype=backend.floatx())
-        for val in inputs_batch
-    ]
-
-    if len(inputs_batch) == 1:
-      if model._expects_training_arg:
-        batch_outs = model.call(inputs_batch[0], training=False)
-      else:
-        batch_outs = model.call(inputs_batch[0])
-    else:
-      if model._expects_training_arg:
-        batch_outs = model.call(inputs_batch, training=False)
-      else:
-        batch_outs = model.call(inputs_batch)
-
-    if not isinstance(batch_outs, list):
-      batch_outs = [batch_outs]
-    if batch_index == 0:
-      # Pre-allocate the results arrays.
-      for batch_out in batch_outs:
-        dims = batch_out.shape[1:].dims
-        dims_list = [d.value for d in dims]
-        shape = (num_samples,) + tuple(dims_list)
-        outs.append(np.zeros(shape, dtype=batch_out.dtype.as_numpy_dtype))
-    for i, batch_out in enumerate(batch_outs):
-      outs[i][batch_start:batch_end] = batch_out
-    if verbose == 1:
-      progbar.update(batch_end)
-
-  if len(outs) == 1:
-    return outs[0]
-  return outs
-
-
-def slice_arrays(arrays, indices, contiguous=True):
-  """Slices batches out of provided arrays (workaround for eager tensors).
-
-  Unfortunately eager tensors don't have the same slicing behavior as
-  Numpy arrays (they follow the same slicing behavior as symbolic TF tensors),
-  hence we cannot use `generic_utils.slice_arrays` directly
-  and we have to implement this workaround based on `concat`. This has a
-  performance cost.
-
-  Arguments:
-    arrays: Single array or list of arrays.
-    indices: List of indices in the array that should be included in the output
-      batch.
-    contiguous: Boolean flag indicating whether the indices are contiguous.
-
-  Returns:
-    Slice of data (either single array or list of arrays).
-  """
-  if any(tensor_util.is_tensor(x) for x in arrays):
-    converted_to_list = False
-    if not isinstance(arrays, list):
-      converted_to_list = True
-      arrays = [arrays]
-    if not contiguous:
-      entries = [[x[i:i + 1] for i in indices] for x in arrays]
-      slices = [array_ops.concat(x, axis=0) for x in entries]
-    else:
-      slices = [x[indices[0]:indices[-1] + 1] for x in arrays]
-    if converted_to_list:
-      slices = slices[0]
-    return slices
-  else:
-    return generic_utils.slice_arrays(arrays, indices)
-
-
 def _process_single_batch(model,
                           inputs,
                           targets,
@@ -935,19 +657,24 @@
   Raises:
     ValueError: In case of invalid argument values.
   """
+  # Convert training inputs to an EagerIterator
+  inputs, steps_per_epoch = training_utils.convert_to_iterator(
+      x=inputs,
+      y=targets,
+      sample_weights=sample_weights,
+      batch_size=batch_size,
+      steps_per_epoch=steps_per_epoch,
+      epochs=epochs,
+      shuffle=shuffle)
   # Required for eager execution
   with backend.learning_phase_scope(1):
     do_validation = False
     if val_inputs:
       do_validation = True
-      if (steps_per_epoch is None and verbose and inputs and
-          hasattr(inputs[0], 'shape') and hasattr(val_inputs[0], 'shape')):
-        print('Train on %d samples, validate on %d samples' %
-              (inputs[0].shape[0], val_inputs[0].shape[0]))
 
     num_train_samples = None
     out_labels = None
-    if steps_per_epoch is None or model._is_compiled:
+    if model._is_compiled:
       out_labels = model.metrics_names
       if do_validation:
         callback_metrics = copy.copy(out_labels) + [
@@ -956,28 +683,10 @@
       else:
         callback_metrics = copy.copy(out_labels)
 
-    if steps_per_epoch is None:
-      if sample_weights:
-        feed_data = inputs + targets + sample_weights
-      else:
-        feed_data = inputs + targets
-      num_train_samples = training_utils.check_num_samples(
-          feed_data,
-          batch_size=batch_size,
-          steps=steps_per_epoch,
-          steps_name='steps_per_epoch')
-
-      if num_train_samples is not None:
-        index_array = np.arange(num_train_samples)
-
     model.history = cbks.History()
     callbacks = [cbks.BaseLogger()] + (callbacks or []) + [model.history]
     if verbose:
-      if steps_per_epoch is not None:
-        count_mode = 'steps'
-      else:
-        count_mode = 'samples'
-      callbacks += [cbks.ProgbarLogger(count_mode)]
+      callbacks += [cbks.ProgbarLogger('steps')]
     callbacks = cbks.CallbackList(callbacks)
 
     # it's possible to callback a different model than self
@@ -1019,43 +728,24 @@
     for epoch in range(initial_epoch, epochs):
       callbacks.on_epoch_begin(epoch)
       epoch_logs = {}
-
-      if steps_per_epoch is not None:
-        iterator_fit_loop(
-            model,
-            inputs,
-            class_weight,
-            steps_per_epoch=steps_per_epoch,
-            callback_model=callback_model,
-            out_labels=out_labels,
-            epoch_logs=epoch_logs,
-            val_inputs=val_inputs,
-            val_targets=val_targets,
-            val_sample_weights=val_sample_weights,
-            epochs=epochs,
-            verbose=verbose,
-            callbacks=callbacks,
-            callback_metrics=callback_metrics,
-            validation_steps=validation_steps,
-            do_validation=do_validation)
-      else:
-        batch_fit_loop(
-            model,
-            inputs,
-            targets,
-            epoch_logs=epoch_logs,
-            index_array=index_array,
-            out_labels=out_labels,
-            callback_model=callback_model,
-            batch_size=batch_size,
-            sample_weights=sample_weights,
-            val_inputs=val_inputs,
-            val_targets=val_targets,
-            val_sample_weights=val_sample_weights,
-            callbacks=callbacks,
-            shuffle=shuffle,
-            num_train_samples=num_train_samples,
-            do_validation=do_validation)
+      iterator_fit_loop(
+          model,
+          inputs,
+          class_weight,
+          steps_per_epoch=steps_per_epoch,
+          callback_model=callback_model,
+          out_labels=out_labels,
+          epoch_logs=epoch_logs,
+          val_inputs=val_inputs,
+          val_targets=val_targets,
+          val_sample_weights=val_sample_weights,
+          epochs=epochs,
+          verbose=verbose,
+          callbacks=callbacks,
+          callback_metrics=callback_metrics,
+          validation_steps=validation_steps,
+          do_validation=do_validation,
+          batch_size=batch_size)
       callbacks.on_epoch_end(epoch, epoch_logs)
       if callback_model.stop_training:
         break
@@ -1087,17 +777,14 @@
       and/or metrics). The attribute `model.metrics_names` will give you
       the display labels for the scalar outputs.
   """
+  inputs, steps = training_utils.convert_to_iterator(
+      x=inputs,
+      y=targets,
+      sample_weights=sample_weights,
+      batch_size=batch_size,
+      steps_per_epoch=steps)
   with backend.learning_phase_scope(0):
-    if steps is not None:
-      return iterator_test_loop(model, inputs, steps, verbose=verbose)
-    else:
-      return batch_test_loop(
-          model,
-          inputs,
-          targets,
-          batch_size=batch_size,
-          sample_weights=sample_weights,
-          verbose=verbose)
+    return iterator_test_loop(model, inputs, steps, verbose=verbose)
 
 
 def predict_loop(model, inputs,
@@ -1121,8 +808,6 @@
       (if the model has multiple outputs).
   """
   with backend.learning_phase_scope(0):
-    if steps is not None:
-      return iterator_predict_loop(model, inputs, steps, verbose=verbose)
-    else:
-      return batch_predict_loop(
-          model, inputs, batch_size=batch_size, verbose=verbose)
+    inputs, steps = training_utils.convert_to_iterator(
+        x=inputs, batch_size=batch_size, steps_per_epoch=steps)
+    return iterator_predict_loop(model, inputs, steps, verbose=verbose)
diff --git a/tensorflow/python/keras/engine/training_utils.py b/tensorflow/python/keras/engine/training_utils.py
index 728a2b4..dbbc87d 100644
--- a/tensorflow/python/keras/engine/training_utils.py
+++ b/tensorflow/python/keras/engine/training_utils.py
@@ -19,9 +19,11 @@
 from __future__ import print_function
 
 import copy
+import math
 
 import numpy as np
 
+from tensorflow.python.data.ops import dataset_ops
 from tensorflow.python.data.ops import iterator_ops
 from tensorflow.python.eager import context
 from tensorflow.python.framework import tensor_util
@@ -31,6 +33,135 @@
 from tensorflow.python.ops import math_ops
 
 
+def _map_nested(data, func):
+  """Maps each nested element using func."""
+  if isinstance(data, list):
+    return [_map_nested(nested_data, func) for nested_data in data]
+  elif isinstance(data, tuple):
+    return tuple(_map_nested(nested_data, func) for nested_data in data)
+  elif isinstance(data, dict):
+    return {
+        k: _map_nested(nested_data, func) for k, nested_data in data.items()
+    }
+  else:
+    return func(data)
+
+
+def _nested_all(data, cond_func):
+  """Checks if all elements in a nested structure satisfy cond_func."""
+  if isinstance(data, (tuple, list)):
+    return all([_nested_all(nested_data, cond_func) for nested_data in data])
+  elif isinstance(data, dict):
+    return all(
+        [_nested_all(nested_data, cond_func) for nested_data in data.values()])
+  else:
+    return cond_func(data)
+
+
+def _nested_any(data, cond_func):
+  """Checks if any nested_elements in a nested structure satisfy cond_func."""
+  if isinstance(data, (tuple, list)):
+    return any([_nested_any(nested_data, cond_func) for nested_data in data])
+  elif isinstance(data, dict):
+    return any(
+        [_nested_any(nested_data, cond_func) for nested_data in data.values()])
+  else:
+    return cond_func(data)
+
+
+def _convert_lists_to_tuples(data):
+  """Converts all lists to tuples, since Datasets expect tuples."""
+  if isinstance(data, (tuple, list)):
+    return tuple(_convert_lists_to_tuples(nested_data) for nested_data in data)
+  elif isinstance(data, dict):
+    return {
+        k: _convert_lists_to_tuples(nested_data)
+        for k, nested_data in data.items()
+    }
+  else:
+    return data
+
+
+def _get_batch_axis_size(data):
+  """Returns batch axis shape for nested data."""
+  if isinstance(data, (tuple, list)):
+    return _get_batch_axis_size(data[0])
+  elif isinstance(data, dict):
+    return _get_batch_axis_size(list(data.values()))
+  else:
+    return int(data.shape[0])
+
+
+def convert_to_iterator(x=None,
+                        y=None,
+                        sample_weights=None,
+                        batch_size=None,
+                        steps_per_epoch=None,
+                        epochs=1,
+                        shuffle=False):
+  """Converts NumPy arrays or EagerTensors to an EagerIterator.
+
+  Combines all provided data into a single EagerIterator.
+
+  Arguments:
+      x: NumPy array or EagerTensor,  or list of Numpy arrays or EagerTensors
+        representing inputs to a model.
+      y: Optional. NumPy array or EagerTensor, or list of Numpy arrays or
+        EagerTensors representing targets of a model.
+      sample_weights: Optional NumPy array or EagerTensor representing sample
+        weights.
+      batch_size: Used to batch data and calculate how many steps EagerIterator
+        should take per epoch.
+      steps_per_epoch: If provided, how many steps EagerIterator should take per
+        epoch.
+      epochs: Epochs to repeat iterator for.
+      shuffle: Whether to shuffle data after each epoch.
+
+  Raises:
+      ValueError: if steps_per_epoch cannot be calculated from the data
+      provided.
+
+  Returns:
+      (Iterator, steps_per_epoch).
+
+  """
+  if isinstance(x, iterator_ops.EagerIterator):
+    return x, steps_per_epoch
+
+  if not _nested_any(sample_weights, lambda x: x is None):
+    data = (x, y, sample_weights)
+  elif not _nested_any(y, lambda x: x is None):
+    data = (x, y)
+  else:
+    # always wrap in a tuple, so we know y, sample_weights weren't set
+    # even when x has multiple elements
+    data = (x,)
+
+  data = _convert_lists_to_tuples(data)
+  if steps_per_epoch is None and batch_size is not None:
+    num_samples = _get_batch_axis_size(data)
+    steps_per_epoch = int(math.ceil(num_samples / batch_size))
+
+  if steps_per_epoch is None:
+    raise ValueError('Could not determine steps_per_epoch.'
+                     'Please provide either batch_size or'
+                     'steps_per_epoch.')
+
+  # TODO(omalleyt) for NumPy arrays in graph mode
+  # placeholder ops should be used
+  # this is only ideal for eager mode
+  dataset = dataset_ops.Dataset.from_tensor_slices(data)
+
+  if batch_size is not None:
+    dataset = dataset.batch(batch_size)
+  if shuffle:
+    dataset = dataset.shuffle(buffer_size=10000)
+  dataset = dataset.repeat(epochs)
+  iterator = dataset.make_one_shot_iterator()
+
+  return iterator, steps_per_epoch
+
+
 def check_num_samples(ins,
                       batch_size=None,
                       steps=None,
@@ -128,8 +259,8 @@
     except KeyError as e:
       raise ValueError('No data provided for "' + e.args[0] + '". Need data '
                        'for each key in: ' + str(names))
-  elif isinstance(data, list):
-    if isinstance(data[0], list):
+  elif isinstance(data, (list, tuple)):
+    if isinstance(data[0], (list, tuple)):
       data = [np.asarray(d) for d in data]
     elif len(names) == 1 and isinstance(data[0], (float, int)):
       data = [np.asarray(data)]
@@ -482,6 +613,9 @@
   Raises:
       ValueError: In case of invalid user-provided arguments.
   """
+  # Iterator may return sample_weight as 1-tuple
+  if isinstance(sample_weight, tuple):
+    sample_weight = sample_weight[0]
   if sample_weight_mode is not None:
     if sample_weight_mode != 'temporal':
       raise ValueError('"sample_weight_mode '
diff --git a/tensorflow/python/keras/engine/training_utils_test.py b/tensorflow/python/keras/engine/training_utils_test.py
new file mode 100644
index 0000000..297a1ae
--- /dev/null
+++ b/tensorflow/python/keras/engine/training_utils_test.py
@@ -0,0 +1,150 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Tests for training utility functions."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import numpy as np
+
+from tensorflow.python.framework import ops
+from tensorflow.python.framework import test_util
+from tensorflow.python.keras.engine import training_utils
+from tensorflow.python.platform import test
+
+
+class TrainingUtilTest(test.TestCase):
+
+  @test_util.run_in_graph_and_eager_modes
+  def test_convert_to_iterator_single_numpy(self):
+    batch_size = 2
+    a = np.ones([10, 10])
+    iterator, steps_per_epoch = training_utils.convert_to_iterator(
+        x=a, batch_size=batch_size)
+    self.assertEquals(steps_per_epoch, 5)
+
+    expected_batch = a[:batch_size, :]
+    actual_batch, = iterator.get_next()
+    self.assertAllEqual(expected_batch, actual_batch)
+
+  @test_util.run_in_graph_and_eager_modes
+  def test_convert_to_iterator_single_tensor(self):
+    batch_size = 2
+    a = ops.convert_to_tensor(np.ones([10, 10]))
+    iterator, steps_per_epoch = training_utils.convert_to_iterator(
+        x=a, batch_size=batch_size)
+    self.assertEquals(steps_per_epoch, 5)
+
+    expected_batch = a[:batch_size, :]
+    actual_batch, = iterator.get_next()
+    self.assertAllEqual(expected_batch, actual_batch)
+
+  @test_util.run_in_graph_and_eager_modes
+  def test_convert_to_iterator_y(self):
+    batch_size = 2
+    a = np.ones([10, 100])
+    b = np.ones([10, 10])
+    iterator, steps_per_epoch = training_utils.convert_to_iterator(
+        x=a, y=b, batch_size=batch_size)
+    self.assertEquals(steps_per_epoch, 5)
+
+    expected_x = a[:batch_size, :]
+    expected_y = b[:batch_size, :]
+    actual_x, actual_y = iterator.get_next()
+    self.assertAllEqual(expected_x, actual_x)
+    self.assertAllEqual(expected_y, actual_y)
+
+  @test_util.run_in_graph_and_eager_modes
+  def test_convert_to_iterator_sample_weights(self):
+    batch_size = 2
+    a = ops.convert_to_tensor(np.ones([10, 100]))
+    b = ops.convert_to_tensor(np.ones([10, 10]))
+    sw = ops.convert_to_tensor(np.ones([10]))
+    iterator, steps_per_epoch = training_utils.convert_to_iterator(
+        x=a, y=b, sample_weights=sw, batch_size=batch_size)
+    self.assertEquals(steps_per_epoch, 5)
+
+    expected_x = a[:batch_size, :]
+    expected_y = b[:batch_size, :]
+    expected_sw = sw[:batch_size]
+    actual_x, actual_y, actual_sw = iterator.get_next()
+    self.assertAllEqual(expected_x, actual_x)
+    self.assertAllEqual(expected_y, actual_y)
+    self.assertAllEqual(expected_sw, actual_sw)
+
+  @test_util.run_in_graph_and_eager_modes
+  def test_convert_to_iterator_nested(self):
+    batch_size = 2
+    x = {'1': np.ones([10, 100]), '2': [np.zeros([10, 10]), np.ones([10, 20])]}
+    iterator, steps_per_epoch = training_utils.convert_to_iterator(
+        x=x, batch_size=batch_size)
+    self.assertEquals(steps_per_epoch, 5)
+
+    expected_x1 = x['1'][:batch_size, :]
+    expected_x2_0 = x['2'][0][:batch_size, :]
+    expected_x2_1 = x['2'][1][:batch_size, :]
+
+    actual_x, = iterator.get_next()
+    actual_x1 = actual_x['1'][:batch_size, :]
+    actual_x2_0 = actual_x['2'][0][:batch_size, :]
+    actual_x2_1 = actual_x['2'][1][:batch_size, :]
+
+    self.assertAllEqual(expected_x1, actual_x1)
+    self.assertAllEqual(expected_x2_0, actual_x2_0)
+    self.assertAllEqual(expected_x2_1, actual_x2_1)
+
+  @test_util.run_in_graph_and_eager_modes
+  def test_convert_to_iterator_epochs(self):
+    batch_size = 2
+    a = np.ones([10, 10])
+    iterator, steps_per_epoch = training_utils.convert_to_iterator(
+        x=a, batch_size=batch_size, epochs=2)
+    self.assertEquals(steps_per_epoch, 5)
+
+    expected_batch = a[:batch_size, :]
+    # loop through one whole epoch
+    for _ in range(6):
+      actual_batch, = iterator.get_next()
+    self.assertAllEqual(expected_batch, actual_batch)
+
+  @test_util.run_in_graph_and_eager_modes
+  def test_convert_to_iterator_insufficient_info(self):
+    # with batch_size and steps_per_epoch not set
+    with self.assertRaises(ValueError):
+      a = np.ones([10, 10])
+      _ = training_utils.convert_to_iterator(x=a)
+
+  def test_nested_all(self):
+    nested_data = {'a': True, 'b': [True, True, (False, True)]}
+    all_true = training_utils._nested_all(nested_data, lambda x: x)
+    self.assertEquals(all_true, False)
+
+    nested_data = {'a': True, 'b': [True, True, (True, True)]}
+    all_true = training_utils._nested_all(nested_data, lambda x: x)
+    self.assertEquals(all_true, True)
+
+  def test_nested_any(self):
+    nested_data = [False, {'a': False, 'b': (False, True)}]
+    any_true = training_utils._nested_any(nested_data, lambda x: x)
+    self.assertEquals(any_true, True)
+
+    nested_data = [False, {'a': False, 'b': (False, False)}]
+    any_true = training_utils._nested_any(nested_data, lambda x: x)
+    self.assertEquals(any_true, False)
+
+
+if __name__ == '__main__':
+  test.main()
diff --git a/tensorflow/python/keras/layers/convolutional_recurrent.py b/tensorflow/python/keras/layers/convolutional_recurrent.py
index 84d794c..e61dd30 100644
--- a/tensorflow/python/keras/layers/convolutional_recurrent.py
+++ b/tensorflow/python/keras/layers/convolutional_recurrent.py
@@ -788,7 +788,7 @@
 
   Arguments:
     filters: Integer, the dimensionality of the output space
-        (i.e. the number output of filters in the convolution).
+        (i.e. the number of output filters in the convolution).
     kernel_size: An integer or tuple/list of n integers, specifying the
         dimensions of the convolution window.
     strides: An integer or tuple/list of n integers,
diff --git a/tensorflow/python/keras/layers/normalization.py b/tensorflow/python/keras/layers/normalization.py
index 58c8a8a..a7835bc 100644
--- a/tensorflow/python/keras/layers/normalization.py
+++ b/tensorflow/python/keras/layers/normalization.py
@@ -370,7 +370,7 @@
         decay = ops.convert_to_tensor(1.0 - momentum, name='decay')
         if decay.dtype != variable.dtype.base_dtype:
           decay = math_ops.cast(decay, variable.dtype.base_dtype)
-        update_delta = (variable - value) * decay
+        update_delta = (variable - math_ops.cast(value, variable.dtype)) * decay
         return state_ops.assign_sub(variable, update_delta, name=scope)
 
   def _fused_batch_norm(self, inputs, training):
@@ -619,6 +619,10 @@
     else:
       mean, variance = self.moving_mean, self.moving_variance
 
+    mean = math_ops.cast(mean, inputs.dtype)
+    variance = math_ops.cast(variance, inputs.dtype)
+    if offset is not None:
+      offset = math_ops.cast(offset, inputs.dtype)
     outputs = nn.batch_normalization(inputs,
                                      _broadcast(mean),
                                      _broadcast(variance),
diff --git a/tensorflow/python/keras/layers/normalization_test.py b/tensorflow/python/keras/layers/normalization_test.py
index b22f3bd..a97b4ca 100644
--- a/tensorflow/python/keras/layers/normalization_test.py
+++ b/tensorflow/python/keras/layers/normalization_test.py
@@ -95,6 +95,24 @@
       np.testing.assert_allclose(out.mean(), 0.0, atol=1e-1)
       np.testing.assert_allclose(out.std(), 1.0, atol=1e-1)
 
+  def test_batchnorm_mixed_precision(self):
+    with self.test_session():
+      model = keras.models.Sequential()
+      norm = keras.layers.BatchNormalization(input_shape=(10,), momentum=0.8)
+      model.add(norm)
+      model.compile(loss='mse', optimizer='sgd')
+
+      # centered on 5.0, variance 10.0
+      x = np.random.normal(
+          loc=5.0, scale=10.0, size=(1000, 10)).astype(np.float16)
+      model.fit(x, x, epochs=4, verbose=0)
+      out = model.predict(x)
+      out -= keras.backend.eval(norm.beta)
+      out /= keras.backend.eval(norm.gamma)
+
+      np.testing.assert_allclose(out.mean(), 0.0, atol=1e-1)
+      np.testing.assert_allclose(out.std(), 1.0, atol=1e-1)
+
   def test_batchnorm_convnet(self):
     if test.is_gpu_available(cuda_only=True):
       with self.test_session(use_gpu=True):
diff --git a/tensorflow/python/keras/layers/recurrent.py b/tensorflow/python/keras/layers/recurrent.py
index 61775da..534c0ec 100644
--- a/tensorflow/python/keras/layers/recurrent.py
+++ b/tensorflow/python/keras/layers/recurrent.py
@@ -37,6 +37,7 @@
 from tensorflow.python.ops import math_ops
 from tensorflow.python.ops import state_ops
 from tensorflow.python.platform import tf_logging as logging
+from tensorflow.python.training.checkpointable import base as checkpointable
 from tensorflow.python.util.tf_export import tf_export
 
 
@@ -403,6 +404,8 @@
                        'one integer per RNN state).')
     super(RNN, self).__init__(**kwargs)
     self.cell = cell
+    if isinstance(cell, checkpointable.CheckpointableBase):
+      self._track_checkpointable(self.cell, name='cell')
     self.return_sequences = return_sequences
     self.return_state = return_state
     self.go_backwards = go_backwards
diff --git a/tensorflow/python/keras/layers/recurrent_test.py b/tensorflow/python/keras/layers/recurrent_test.py
index 802374d..fefb928 100644
--- a/tensorflow/python/keras/layers/recurrent_test.py
+++ b/tensorflow/python/keras/layers/recurrent_test.py
@@ -28,6 +28,7 @@
 from tensorflow.python.ops import math_ops
 from tensorflow.python.ops import state_ops
 from tensorflow.python.platform import test
+from tensorflow.python.training.checkpointable import util as checkpointable_util
 
 
 class RNNTest(test.TestCase):
@@ -556,5 +557,22 @@
         [tuple(o.as_list()) for o in output_shape],
         expected_output_shape)
 
+  def test_checkpointable_dependencies(self):
+    rnn = keras.layers.SimpleRNN
+    with self.test_session():
+      x = np.random.random((2, 2, 2))
+      y = np.random.random((2, 2))
+      model = keras.models.Sequential()
+      model.add(rnn(2))
+      model.compile(optimizer='rmsprop', loss='mse')
+      model.fit(x, y, epochs=1, batch_size=1)
+
+      # check whether the model variables are present in the
+      # checkpointable list of objects
+      checkpointed_objects = set(checkpointable_util.list_objects(model))
+      for v in model.variables:
+        self.assertIn(v, checkpointed_objects)
+
+
 if __name__ == '__main__':
   test.main()
diff --git a/tensorflow/python/keras/layers/wrappers.py b/tensorflow/python/keras/layers/wrappers.py
index f651e03..f0c1e76 100644
--- a/tensorflow/python/keras/layers/wrappers.py
+++ b/tensorflow/python/keras/layers/wrappers.py
@@ -47,7 +47,6 @@
   def __init__(self, layer, **kwargs):
     assert isinstance(layer, Layer)
     self.layer = layer
-    self._track_checkpointable(layer, name='layer')
     # Tracks mapping of Wrapper inputs to inner layer inputs. Useful when
     # the inner layer has update ops that depend on its inputs (as opposed
     # to the inputs to the Wrapper layer).
@@ -168,6 +167,7 @@
           '`Layer` instance. You passed: {input}'.format(input=layer))
     super(TimeDistributed, self).__init__(layer, **kwargs)
     self.supports_masking = True
+    self._track_checkpointable(layer, name='layer')
 
   def _get_shape_tuple(self, init_tuple, tensor, start_idx, int_shape=None):
     """Finds non-specific dimensions in the static shapes.
@@ -417,6 +417,8 @@
     self._num_constants = None
     super(Bidirectional, self).__init__(layer, **kwargs)
     self.input_spec = layer.input_spec
+    self._track_checkpointable(self.forward_layer, name='forward_layer')
+    self._track_checkpointable(self.backward_layer, name='backward_layer')
 
   @property
   def trainable(self):
@@ -526,7 +528,8 @@
     else:
       return super(Bidirectional, self).__call__(inputs, **kwargs)
 
-  def call(self, inputs,
+  def call(self,
+           inputs,
            training=None,
            mask=None,
            initial_state=None,
diff --git a/tensorflow/python/keras/layers/wrappers_test.py b/tensorflow/python/keras/layers/wrappers_test.py
index 3f268ac..0cd774e 100644
--- a/tensorflow/python/keras/layers/wrappers_test.py
+++ b/tensorflow/python/keras/layers/wrappers_test.py
@@ -87,6 +87,8 @@
     # test config
     model.get_config()
 
+    # check whether the model variables are present in the
+    # checkpointable list of objects
     checkpointed_objects = set(checkpointable_util.list_objects(model))
     for v in model.variables:
       self.assertIn(v, checkpointed_objects)
@@ -278,6 +280,12 @@
         model.compile(optimizer=RMSPropOptimizer(0.01), loss='mse')
         model.fit(x, y, epochs=1, batch_size=1)
 
+        # check whether the model variables are present in the
+        # checkpointable list of objects
+        checkpointed_objects = set(checkpointable_util.list_objects(model))
+        for v in model.variables:
+          self.assertIn(v, checkpointed_objects)
+
         # test compute output shape
         ref_shape = model.layers[-1].output.get_shape()
         shape = model.layers[-1].compute_output_shape(
diff --git a/tensorflow/python/kernel_tests/linalg/BUILD b/tensorflow/python/kernel_tests/linalg/BUILD
index 69d3aa4..487418e 100644
--- a/tensorflow/python/kernel_tests/linalg/BUILD
+++ b/tensorflow/python/kernel_tests/linalg/BUILD
@@ -197,7 +197,7 @@
 
 cuda_py_test(
     name = "linear_operator_low_rank_update_test",
-    size = "medium",
+    size = "large",
     srcs = ["linear_operator_low_rank_update_test.py"],
     additional_deps = [
         "//tensorflow/python/ops/linalg",
diff --git a/tensorflow/python/lib/core/numpy.h b/tensorflow/python/lib/core/numpy.h
index d4621d6..0098d93 100644
--- a/tensorflow/python/lib/core/numpy.h
+++ b/tensorflow/python/lib/core/numpy.h
@@ -30,9 +30,10 @@
 #endif
 
 // Place `<locale>` before <Python.h> to avoid build failure in macOS.
-#include <Python.h>
 #include <locale>
 
+#include <Python.h>
+
 #include "numpy/arrayobject.h"
 #include "numpy/ufuncobject.h"
 
diff --git a/tensorflow/python/lib/core/py_util.cc b/tensorflow/python/lib/core/py_util.cc
index 6b6c820..2ee898e 100644
--- a/tensorflow/python/lib/core/py_util.cc
+++ b/tensorflow/python/lib/core/py_util.cc
@@ -16,9 +16,10 @@
 #include "tensorflow/python/lib/core/py_util.h"
 
 // Place `<locale>` before <Python.h> to avoid build failure in macOS.
-#include <Python.h>
 #include <locale>
 
+#include <Python.h>
+
 #include "tensorflow/core/lib/core/errors.h"
 #include "tensorflow/core/lib/strings/strcat.h"
 
diff --git a/tensorflow/python/ops/control_flow_ops.py b/tensorflow/python/ops/control_flow_ops.py
index 04545cc..888075b 100644
--- a/tensorflow/python/ops/control_flow_ops.py
+++ b/tensorflow/python/ops/control_flow_ops.py
@@ -3146,7 +3146,7 @@
   happen is that the thread updating `x` can never get ahead of the
   counter thread because the thread incrementing `x` depends on the value
   of the counter.
-  
+
   ```python
   import tensorflow as tf
 
diff --git a/tensorflow/python/ops/image_ops_impl.py b/tensorflow/python/ops/image_ops_impl.py
index 5b384fd..9440bab 100644
--- a/tensorflow/python/ops/image_ops_impl.py
+++ b/tensorflow/python/ops/image_ops_impl.py
@@ -1753,6 +1753,22 @@
     return math_ops.equal(substr, b'\xff\xd8\xff', name=name)
 
 
+def _is_png(contents, name=None):
+  r"""Convenience function to check if the 'contents' encodes a PNG image.
+
+  Args:
+    contents: 0-D `string`. The encoded image bytes.
+    name: A name for the operation (optional)
+
+  Returns:
+     A scalar boolean tensor indicating if 'contents' may be a PNG image.
+     is_png is susceptible to false positives.
+  """
+  with ops.name_scope(name, 'is_png'):
+    substr = string_ops.substr(contents, 0, 3)
+    return math_ops.equal(substr, b'\211PN', name=name)
+
+
 @tf_export('image.decode_image')
 def decode_image(contents, channels=None, dtype=dtypes.uint8, name=None):
   """Convenience function for `decode_bmp`, `decode_gif`, `decode_jpeg`,
@@ -1830,8 +1846,8 @@
 
     def check_png():
       """Checks if an image is PNG."""
-      is_png = math_ops.equal(substr, b'\211PN', name='is_png')
-      return control_flow_ops.cond(is_png, _png, check_gif, name='cond_png')
+      return control_flow_ops.cond(
+          _is_png(contents), _png, check_gif, name='cond_png')
 
     def _jpeg():
       """Decodes a jpeg image."""
diff --git a/tensorflow/python/ops/linalg_ops.py b/tensorflow/python/ops/linalg_ops.py
index a0dfa54..f4a9356 100644
--- a/tensorflow/python/ops/linalg_ops.py
+++ b/tensorflow/python/ops/linalg_ops.py
@@ -401,7 +401,7 @@
   import tensorflow as tf
   import numpy as np
   s, u, v = tf.linalg.svd(a)
-  tf_a_approx = tf.matmul(u, tf.matmul(tf.linalg.diag(s), v, adjoint_v=True))
+  tf_a_approx = tf.matmul(u, tf.matmul(tf.linalg.diag(s), v, adjoint_b=True))
   u, s, v_adj = np.linalg.svd(a, full_matrices=False)
   np_a_approx = np.dot(u, np.dot(np.diag(s), v_adj))
   # tf_a_approx and np_a_approx should be numerically close.
diff --git a/tensorflow/python/ops/parallel_for/BUILD b/tensorflow/python/ops/parallel_for/BUILD
index 065c2ca..6c804a5 100644
--- a/tensorflow/python/ops/parallel_for/BUILD
+++ b/tensorflow/python/ops/parallel_for/BUILD
@@ -125,5 +125,4 @@
         "//tensorflow/python:random_ops",
         "//tensorflow/python/ops/losses",
     ],
-    tags = ["no_gpu"],  # TODO(b/80127739): test is flaky
 )
diff --git a/tensorflow/python/ops/parallel_for/gradients_test.py b/tensorflow/python/ops/parallel_for/gradients_test.py
index 310a215..3a6d914 100644
--- a/tensorflow/python/ops/parallel_for/gradients_test.py
+++ b/tensorflow/python/ops/parallel_for/gradients_test.py
@@ -19,6 +19,7 @@
 from __future__ import print_function
 
 import functools
+import os
 import time
 
 import numpy as np
@@ -444,6 +445,10 @@
     self.run_and_assert_equal(pfor_outputs, while_outputs)
 
   def test_mnist_per_eg_grad(self):
+    # It looks like CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD_NONFUSED
+    # configuration of Winograd can cause low precision output resulting in
+    # tests failing. So we disable that here.
+    os.environ["TF_ENABLE_WINOGRAD_NONFUSED"] = "0"
     data_format = ("channels_first"
                    if test.is_gpu_available() else "channels_last")
     # Note that we we are setting training=False here so that dropout produces
@@ -451,8 +456,13 @@
     pfor_outputs, while_outputs = create_mnist_per_eg_grad(
         4, data_format, training=False)
     self.run_and_assert_equal(pfor_outputs, while_outputs, rtol=1e-3)
+    os.environ.pop("TF_ENABLE_WINOGRAD_NONFUSED", None)
 
   def test_mnist_per_eg_jacobian(self):
+    # It looks like CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD_NONFUSED
+    # configuration of Winograd can cause low precision output resulting in
+    # tests failing. So we disable that here.
+    os.environ["TF_ENABLE_WINOGRAD_NONFUSED"] = "0"
     data_format = ("channels_first"
                    if test.is_gpu_available() else "channels_last")
     # Note that we we are setting training=False here so that dropout produces
@@ -460,6 +470,7 @@
     pfor_outputs, while_outputs = create_mnist_per_eg_jacobian(
         2, data_format, training=False)
     self.run_and_assert_equal(pfor_outputs, while_outputs, rtol=1e-3)
+    os.environ.pop("TF_ENABLE_WINOGRAD_NONFUSED", None)
 
   def test_fc_jacobian(self):
     jacobians, per_eg_jacobians_pfor, per_eg_jacobians_while = (
diff --git a/tensorflow/python/ops/resource_variable_ops.py b/tensorflow/python/ops/resource_variable_ops.py
index 70a89e5..1f56ad2 100644
--- a/tensorflow/python/ops/resource_variable_ops.py
+++ b/tensorflow/python/ops/resource_variable_ops.py
@@ -181,7 +181,8 @@
                                                       name=name)
 
 
-class ResourceVariable(variables.Variable):
+# TODO(apassos) make this be variables.Variable
+class ResourceVariable(variables.RefVariable):
   """Variable based on resource handles.
 
   See the @{$variables$Variables How To} for a high level overview.
diff --git a/tensorflow/python/ops/variables.py b/tensorflow/python/ops/variables.py
index d3b8da6..87e0de1 100644
--- a/tensorflow/python/ops/variables.py
+++ b/tensorflow/python/ops/variables.py
@@ -17,6 +17,8 @@
 from __future__ import division
 from __future__ import print_function
 
+import six
+
 from tensorflow.core.framework import attr_value_pb2
 from tensorflow.core.framework import variable_pb2
 from tensorflow.python.eager import context
@@ -36,8 +38,32 @@
 from tensorflow.python.util.tf_export import tf_export
 
 
+def _default_variable_creator(_, *args, **kwds):
+  return RefVariable(*args, **kwds)
+
+
+def _make_getter(captured_getter, captured_previous):
+  """To avoid capturing loop variables."""
+  def getter(*args, **kwargs):
+    return captured_getter(captured_previous, *args, **kwargs)
+  return getter
+
+
+class VariableMetaclass(type):
+  """Metaclass to allow construction of tf.Variable to be overridden."""
+
+  def __call__(cls, *args, **kwargs):
+    if cls is Variable:
+      previous_getter = lambda *a, **k: _default_variable_creator(None, *a, **k)
+      # TODO(apassos) use a stack of getters here
+      return previous_getter(*args, **kwargs)
+    else:
+      return super(VariableMetaclass, cls).__call__(*args, **kwargs)
+
+
 @tf_export("Variable")
-class Variable(checkpointable.CheckpointableBase):
+class Variable(six.with_metaclass(VariableMetaclass,
+                                  checkpointable.CheckpointableBase)):
   """See the @{$variables$Variables How To} for a high level overview.
 
   A variable maintains state in the graph across calls to `run()`. You add a
@@ -234,6 +260,554 @@
     for details on how variables work in eager execution.
     @end_compatibility
     """
+    raise NotImplementedError
+
+  def __repr__(self):
+    raise NotImplementedError
+
+  def value(self):
+    """Returns the last snapshot of this variable.
+
+    You usually do not need to call this method as all ops that need the value
+    of the variable call it automatically through a `convert_to_tensor()` call.
+
+    Returns a `Tensor` which holds the value of the variable.  You can not
+    assign a new value to this tensor as it is not a reference to the variable.
+
+    To avoid copies, if the consumer of the returned value is on the same device
+    as the variable, this actually returns the live value of the variable, not
+    a copy.  Updates to the variable are seen by the consumer.  If the consumer
+    is on a different device it will get a copy of the variable.
+
+    Returns:
+      A `Tensor` containing the value of the variable.
+    """
+    raise NotImplementedError
+
+  def read_value(self):
+    """Returns the value of this variable, read in the current context.
+
+    Can be different from value() if it's on another device, with control
+    dependencies, etc.
+
+    Returns:
+      A `Tensor` containing the value of the variable.
+    """
+    raise NotImplementedError
+
+  def set_shape(self, shape):
+    """Overrides the shape for this variable.
+
+    Args:
+      shape: the `TensorShape` representing the overridden shape.
+    """
+    raise NotImplementedError
+
+  @property
+  def trainable(self):
+    raise NotImplementedError
+
+  def eval(self, session=None):
+    """In a session, computes and returns the value of this variable.
+
+    This is not a graph construction method, it does not add ops to the graph.
+
+    This convenience method requires a session where the graph
+    containing this variable has been launched. If no session is
+    passed, the default session is used.  See @{tf.Session} for more
+    information on launching a graph and on sessions.
+
+    ```python
+    v = tf.Variable([1, 2])
+    init = tf.global_variables_initializer()
+
+    with tf.Session() as sess:
+        sess.run(init)
+        # Usage passing the session explicitly.
+        print(v.eval(sess))
+        # Usage with the default session.  The 'with' block
+        # above makes 'sess' the default session.
+        print(v.eval())
+    ```
+
+    Args:
+      session: The session to use to evaluate this variable. If
+        none, the default session is used.
+
+    Returns:
+      A numpy `ndarray` with a copy of the value of this variable.
+    """
+    raise NotImplementedError
+
+  def initialized_value(self):
+    """Returns the value of the initialized variable.
+
+    You should use this instead of the variable itself to initialize another
+    variable with a value that depends on the value of this variable.
+
+    ```python
+    # Initialize 'v' with a random tensor.
+    v = tf.Variable(tf.truncated_normal([10, 40]))
+    # Use `initialized_value` to guarantee that `v` has been
+    # initialized before its value is used to initialize `w`.
+    # The random values are picked only once.
+    w = tf.Variable(v.initialized_value() * 2.0)
+    ```
+
+    Returns:
+      A `Tensor` holding the value of this variable after its initializer
+      has run.
+    """
+    raise NotImplementedError
+
+  @property
+  def initial_value(self):
+    """Returns the Tensor used as the initial value for the variable.
+
+    Note that this is different from `initialized_value()` which runs
+    the op that initializes the variable before returning its value.
+    This method returns the tensor that is used by the op that initializes
+    the variable.
+
+    Returns:
+      A `Tensor`.
+    """
+    raise NotImplementedError
+
+  @property
+  def constraint(self):
+    """Returns the constraint function associated with this variable.
+
+    Returns:
+      The constraint function that was passed to the variable constructor.
+      Can be `None` if no constraint was passed.
+    """
+    raise NotImplementedError
+
+  def assign(self, value, use_locking=False):
+    """Assigns a new value to the variable.
+
+    This is essentially a shortcut for `assign(self, value)`.
+
+    Args:
+      value: A `Tensor`. The new value for this variable.
+      use_locking: If `True`, use locking during the assignment.
+
+    Returns:
+      A `Tensor` that will hold the new value of this variable after
+      the assignment has completed.
+    """
+    raise NotImplementedError
+
+  def assign_add(self, delta, use_locking=False):
+    """Adds a value to this variable.
+
+     This is essentially a shortcut for `assign_add(self, delta)`.
+
+    Args:
+      delta: A `Tensor`. The value to add to this variable.
+      use_locking: If `True`, use locking during the operation.
+
+    Returns:
+      A `Tensor` that will hold the new value of this variable after
+      the addition has completed.
+    """
+    raise NotImplementedError
+
+  def assign_sub(self, delta, use_locking=False):
+    """Subtracts a value from this variable.
+
+    This is essentially a shortcut for `assign_sub(self, delta)`.
+
+    Args:
+      delta: A `Tensor`. The value to subtract from this variable.
+      use_locking: If `True`, use locking during the operation.
+
+    Returns:
+      A `Tensor` that will hold the new value of this variable after
+      the subtraction has completed.
+    """
+    raise NotImplementedError
+
+  def scatter_sub(self, sparse_delta, use_locking=False):
+    """Subtracts `IndexedSlices` from this variable.
+
+    This is essentially a shortcut for `scatter_sub(self, sparse_delta.indices,
+    sparse_delta.values)`.
+
+    Args:
+      sparse_delta: `IndexedSlices` to be subtracted from this variable.
+      use_locking: If `True`, use locking during the operation.
+
+    Returns:
+      A `Tensor` that will hold the new value of this variable after
+      the scattered subtraction has completed.
+
+    Raises:
+      ValueError: if `sparse_delta` is not an `IndexedSlices`.
+    """
+    raise NotImplementedError
+
+  def count_up_to(self, limit):
+    """Increments this variable until it reaches `limit`.
+
+    When that Op is run it tries to increment the variable by `1`. If
+    incrementing the variable would bring it above `limit` then the Op raises
+    the exception `OutOfRangeError`.
+
+    If no error is raised, the Op outputs the value of the variable before
+    the increment.
+
+    This is essentially a shortcut for `count_up_to(self, limit)`.
+
+    Args:
+      limit: value at which incrementing the variable raises an error.
+
+    Returns:
+      A `Tensor` that will hold the variable value before the increment. If no
+      other Op modifies this variable, the values produced will all be
+      distinct.
+    """
+    raise NotImplementedError
+
+  def load(self, value, session=None):
+    """Load new value into this variable.
+
+    Writes new value to variable's memory. Doesn't add ops to the graph.
+
+    This convenience method requires a session where the graph
+    containing this variable has been launched. If no session is
+    passed, the default session is used.  See @{tf.Session} for more
+    information on launching a graph and on sessions.
+
+    ```python
+    v = tf.Variable([1, 2])
+    init = tf.global_variables_initializer()
+
+    with tf.Session() as sess:
+        sess.run(init)
+        # Usage passing the session explicitly.
+        v.load([2, 3], sess)
+        print(v.eval(sess)) # prints [2 3]
+        # Usage with the default session.  The 'with' block
+        # above makes 'sess' the default session.
+        v.load([3, 4], sess)
+        print(v.eval()) # prints [3 4]
+    ```
+
+    Args:
+        value: New variable value
+        session: The session to use to evaluate this variable. If
+          none, the default session is used.
+
+    Raises:
+        ValueError: Session is not passed and no default session
+    """
+    raise NotImplementedError
+
+  # Conversion to tensor.
+  @staticmethod
+  def _TensorConversionFunction(v, dtype=None, name=None, as_ref=False):  # pylint: disable=invalid-name
+    """Utility function for converting a Variable to a Tensor."""
+    _ = name
+    if dtype and not dtype.is_compatible_with(v.dtype):
+      raise ValueError(
+          "Incompatible type conversion requested to type '%s' for variable "
+          "of type '%s'" % (dtype.name, v.dtype.name))
+    if as_ref:
+      return v._ref()  # pylint: disable=protected-access
+    else:
+      return v.value()
+
+  @staticmethod
+  def _OverloadAllOperators():  # pylint: disable=invalid-name
+    """Register overloads for all operators."""
+    for operator in ops.Tensor.OVERLOADABLE_OPERATORS:
+      Variable._OverloadOperator(operator)
+    # For slicing, bind getitem differently than a tensor (use SliceHelperVar
+    # instead)
+    # pylint: disable=protected-access
+    setattr(Variable, "__getitem__", array_ops._SliceHelperVar)
+
+  @staticmethod
+  def _OverloadOperator(operator):  # pylint: disable=invalid-name
+    """Defer an operator overload to `ops.Tensor`.
+
+    We pull the operator out of ops.Tensor dynamically to avoid ordering issues.
+
+    Args:
+      operator: string. The operator name.
+    """
+
+    def _run_op(a, *args):
+      # pylint: disable=protected-access
+      return getattr(ops.Tensor, operator)(a._AsTensor(), *args)
+    # Propagate __doc__ to wrapper
+    try:
+      _run_op.__doc__ = getattr(ops.Tensor, operator).__doc__
+    except AttributeError:
+      pass
+
+    setattr(Variable, operator, _run_op)
+
+  # NOTE(mrry): This enables the Variable's overloaded "right" binary
+  # operators to run when the left operand is an ndarray, because it
+  # accords the Variable class higher priority than an ndarray, or a
+  # numpy matrix.
+  # TODO(mrry): Convert this to using numpy's __numpy_ufunc__
+  # mechanism, which allows more control over how Variables interact
+  # with ndarrays.
+  __array_priority__ = 100
+
+  @property
+  def name(self):
+    """The name of this variable."""
+    raise NotImplementedError
+
+  @property
+  def initializer(self):
+    """The initializer operation for this variable."""
+    raise NotImplementedError
+
+  @property
+  def device(self):
+    """The device of this variable."""
+    raise NotImplementedError
+
+  @property
+  def dtype(self):
+    """The `DType` of this variable."""
+    raise NotImplementedError
+
+  @property
+  def op(self):
+    """The `Operation` of this variable."""
+    raise NotImplementedError
+
+  @property
+  def graph(self):
+    """The `Graph` of this variable."""
+    raise NotImplementedError
+
+  @property
+  def shape(self):
+    """The `TensorShape` of this variable.
+
+    Returns:
+      A `TensorShape`.
+    """
+    raise NotImplementedError
+
+  def get_shape(self):
+    """Alias of Variable.shape."""
+    raise NotImplementedError
+
+  def to_proto(self, export_scope=None):
+    """Converts a `Variable` to a `VariableDef` protocol buffer.
+
+    Args:
+      export_scope: Optional `string`. Name scope to remove.
+
+    Returns:
+      A `VariableDef` protocol buffer, or `None` if the `Variable` is not
+      in the specified name scope.
+    """
+    raise NotImplementedError
+
+  @staticmethod
+  def from_proto(variable_def, import_scope=None):
+    """Returns a `Variable` object created from `variable_def`."""
+    return Variable(variable_def=variable_def,
+                    import_scope=import_scope)
+
+  class SaveSliceInfo(object):
+    """Information on how to save this Variable as a slice.
+
+    Provides internal support for saving variables as slices of a larger
+    variable.  This API is not public and is subject to change.
+
+    Available properties:
+
+    * full_name
+    * full_shape
+    * var_offset
+    * var_shape
+    """
+
+    def __init__(self,
+                 full_name=None,
+                 full_shape=None,
+                 var_offset=None,
+                 var_shape=None,
+                 save_slice_info_def=None,
+                 import_scope=None):
+      """Create a `SaveSliceInfo`.
+
+      Args:
+        full_name: Name of the full variable of which this `Variable` is a
+            slice.
+        full_shape: Shape of the full variable, as a list of int.
+        var_offset: Offset of this `Variable` into the full variable, as a
+            list of int.
+        var_shape: Shape of this `Variable`, as a list of int.
+        save_slice_info_def: `SaveSliceInfoDef` protocol buffer. If not `None`,
+          recreates the SaveSliceInfo object its contents.
+          `save_slice_info_def` and other arguments are mutually
+          exclusive.
+        import_scope: Optional `string`. Name scope to add. Only used
+          when initializing from protocol buffer.
+      """
+      if save_slice_info_def:
+        assert isinstance(save_slice_info_def, variable_pb2.SaveSliceInfoDef)
+        self.full_name = ops.prepend_name_scope(
+            save_slice_info_def.full_name, import_scope=import_scope)
+        self.full_shape = [i for i in save_slice_info_def.full_shape]
+        self.var_offset = [i for i in save_slice_info_def.var_offset]
+        self.var_shape = [i for i in save_slice_info_def.var_shape]
+      else:
+        self.full_name = full_name
+        self.full_shape = full_shape
+        self.var_offset = var_offset
+        self.var_shape = var_shape
+
+    @property
+    def spec(self):
+      """Computes the spec string used for saving."""
+      full_shape_str = " ".join(["%d" % d for d in self.full_shape]) + " "
+      sl_spec = ":".join([
+          "%d,%d" % (o, s) for o, s in zip(self.var_offset, self.var_shape)
+      ])
+      return full_shape_str + sl_spec
+
+    def to_proto(self, export_scope=None):
+      """Returns a SaveSliceInfoDef() proto.
+
+      Args:
+        export_scope: Optional `string`. Name scope to remove.
+
+      Returns:
+        A `SaveSliceInfoDef` protocol buffer, or None if the `Variable` is not
+        in the specified name scope.
+      """
+      if (export_scope is None or
+          self.full_name.startswith(export_scope)):
+        save_slice_info_def = variable_pb2.SaveSliceInfoDef()
+        save_slice_info_def.full_name = ops.strip_name_scope(
+            self.full_name, export_scope)
+        for i in self.full_shape:
+          save_slice_info_def.full_shape.append(i)
+        for i in self.var_offset:
+          save_slice_info_def.var_offset.append(i)
+        for i in self.var_shape:
+          save_slice_info_def.var_shape.append(i)
+        return save_slice_info_def
+      else:
+        return None
+
+  def __iadd__(self, other):
+    raise NotImplementedError
+
+  def __isub__(self, other):
+    raise NotImplementedError
+
+  def __imul__(self, other):
+    raise NotImplementedError
+
+  def __idiv__(self, other):
+    raise NotImplementedError
+
+  def __itruediv__(self, other):
+    raise NotImplementedError
+
+  def __irealdiv__(self, other):
+    raise NotImplementedError
+
+  def __ipow__(self, other):
+    raise NotImplementedError
+
+
+# TODO(apassos): do not repeat all comments here
+class RefVariable(Variable):
+  """Ref-based implementation of variables."""
+
+  def __init__(self,
+               initial_value=None,
+               trainable=True,
+               collections=None,
+               validate_shape=True,
+               caching_device=None,
+               name=None,
+               variable_def=None,
+               dtype=None,
+               expected_shape=None,
+               import_scope=None,
+               constraint=None):
+    """Creates a new variable with value `initial_value`.
+
+    The new variable is added to the graph collections listed in `collections`,
+    which defaults to `[GraphKeys.GLOBAL_VARIABLES]`.
+
+    If `trainable` is `True` the variable is also added to the graph collection
+    `GraphKeys.TRAINABLE_VARIABLES`.
+
+    This constructor creates both a `variable` Op and an `assign` Op to set the
+    variable to its initial value.
+
+    Args:
+      initial_value: A `Tensor`, or Python object convertible to a `Tensor`,
+        which is the initial value for the Variable. The initial value must have
+        a shape specified unless `validate_shape` is set to False. Can also be a
+        callable with no argument that returns the initial value when called. In
+        that case, `dtype` must be specified. (Note that initializer functions
+        from init_ops.py must first be bound to a shape before being used here.)
+      trainable: If `True`, the default, also adds the variable to the graph
+        collection `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as
+        the default list of variables to use by the `Optimizer` classes.
+      collections: List of graph collections keys. The new variable is added to
+        these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`.
+      validate_shape: If `False`, allows the variable to be initialized with a
+        value of unknown shape. If `True`, the default, the shape of
+        `initial_value` must be known.
+      caching_device: Optional device string describing where the Variable
+        should be cached for reading.  Defaults to the Variable's device.
+        If not `None`, caches on another device.  Typical use is to cache
+        on the device where the Ops using the Variable reside, to deduplicate
+        copying through `Switch` and other conditional statements.
+      name: Optional name for the variable. Defaults to `'Variable'` and gets
+        uniquified automatically.
+      variable_def: `VariableDef` protocol buffer. If not `None`, recreates
+        the Variable object with its contents, referencing the variable's nodes
+        in the graph, which must already exist. The graph is not changed.
+        `variable_def` and the other arguments are mutually exclusive.
+      dtype: If set, initial_value will be converted to the given type.
+        If `None`, either the datatype will be kept (if `initial_value` is
+        a Tensor), or `convert_to_tensor` will decide.
+      expected_shape: A TensorShape. If set, initial_value is expected
+        to have this shape.
+      import_scope: Optional `string`. Name scope to add to the
+        `Variable.` Only used when initializing from protocol buffer.
+      constraint: An optional projection function to be applied to the variable
+        after being updated by an `Optimizer` (e.g. used to implement norm
+        constraints or value constraints for layer weights). The function must
+        take as input the unprojected Tensor representing the value of the
+        variable and return the Tensor for the projected value
+        (which must have the same shape). Constraints are not safe to
+        use when doing asynchronous distributed training.
+
+    Raises:
+      ValueError: If both `variable_def` and initial_value are specified.
+      ValueError: If the initial value is not specified, or does not have a
+        shape and `validate_shape` is `True`.
+      RuntimeError: If eager execution is enabled.
+
+    @compatibility(eager)
+    `tf.Variable` is not compatible with eager execution.  Use
+    `tfe.Variable` instead which is compatible with both eager execution
+    and graph construction.  See [the TensorFlow Eager Execution
+    guide](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/eager/python/g3doc/guide.md#variables-and-optimizers)
+    for details on how variables work in eager execution.
+    @end_compatibility
+    """
     if context.executing_eagerly():
       raise RuntimeError(
           "tf.Variable not supported when eager execution is enabled. "
@@ -1068,12 +1642,6 @@
     else:
       return None
 
-  @staticmethod
-  def from_proto(variable_def, import_scope=None):
-    """Returns a `Variable` object created from `variable_def`."""
-    return Variable(variable_def=variable_def,
-                    import_scope=import_scope)
-
   def __iadd__(self, other):
     logging.log_first_n(
         logging.WARN,
@@ -1130,90 +1698,6 @@
         " if you want a new python Tensor object.", 1)
     return self ** other
 
-  class SaveSliceInfo(object):
-    """Information on how to save this Variable as a slice.
-
-    Provides internal support for saving variables as slices of a larger
-    variable.  This API is not public and is subject to change.
-
-    Available properties:
-
-    * full_name
-    * full_shape
-    * var_offset
-    * var_shape
-    """
-
-    def __init__(self,
-                 full_name=None,
-                 full_shape=None,
-                 var_offset=None,
-                 var_shape=None,
-                 save_slice_info_def=None,
-                 import_scope=None):
-      """Create a `SaveSliceInfo`.
-
-      Args:
-        full_name: Name of the full variable of which this `Variable` is a
-            slice.
-        full_shape: Shape of the full variable, as a list of int.
-        var_offset: Offset of this `Variable` into the full variable, as a
-            list of int.
-        var_shape: Shape of this `Variable`, as a list of int.
-        save_slice_info_def: `SaveSliceInfoDef` protocol buffer. If not `None`,
-          recreates the SaveSliceInfo object its contents.
-          `save_slice_info_def` and other arguments are mutually
-          exclusive.
-        import_scope: Optional `string`. Name scope to add. Only used
-          when initializing from protocol buffer.
-      """
-      if save_slice_info_def:
-        assert isinstance(save_slice_info_def, variable_pb2.SaveSliceInfoDef)
-        self.full_name = ops.prepend_name_scope(
-            save_slice_info_def.full_name, import_scope=import_scope)
-        self.full_shape = [i for i in save_slice_info_def.full_shape]
-        self.var_offset = [i for i in save_slice_info_def.var_offset]
-        self.var_shape = [i for i in save_slice_info_def.var_shape]
-      else:
-        self.full_name = full_name
-        self.full_shape = full_shape
-        self.var_offset = var_offset
-        self.var_shape = var_shape
-
-    @property
-    def spec(self):
-      """Computes the spec string used for saving."""
-      full_shape_str = " ".join(["%d" % d for d in self.full_shape]) + " "
-      sl_spec = ":".join([
-          "%d,%d" % (o, s) for o, s in zip(self.var_offset, self.var_shape)
-      ])
-      return full_shape_str + sl_spec
-
-    def to_proto(self, export_scope=None):
-      """Returns a SaveSliceInfoDef() proto.
-
-      Args:
-        export_scope: Optional `string`. Name scope to remove.
-
-      Returns:
-        A `SaveSliceInfoDef` protocol buffer, or None if the `Variable` is not
-        in the specified name scope.
-      """
-      if (export_scope is None or
-          self.full_name.startswith(export_scope)):
-        save_slice_info_def = variable_pb2.SaveSliceInfoDef()
-        save_slice_info_def.full_name = ops.strip_name_scope(
-            self.full_name, export_scope)
-        for i in self.full_shape:
-          save_slice_info_def.full_shape.append(i)
-        for i in self.var_offset:
-          save_slice_info_def.var_offset.append(i)
-        for i in self.var_shape:
-          save_slice_info_def.var_shape.append(i)
-        return save_slice_info_def
-      else:
-        return None
-
   def _set_save_slice_info(self, save_slice_info):
     """Sets the slice info for this `Variable`.
 
diff --git a/tensorflow/python/profiler/model_analyzer_test.py b/tensorflow/python/profiler/model_analyzer_test.py
index f9891f3..c0e16ca 100644
--- a/tensorflow/python/profiler/model_analyzer_test.py
+++ b/tensorflow/python/profiler/model_analyzer_test.py
@@ -106,7 +106,7 @@
               # Make sure time is profiled.
               gap = 1 if test.is_gpu_available() else 2
               for i in range(3, 6, gap):
-                mat = re.search('(.*)[um]s/(.*)[um]s', metrics[i])
+                mat = re.search('(.*)(?:us|ms|sec)/(.*)(?:us|ms|sec)', metrics[i])
                 self.assertGreater(float(mat.group(1)), 0.0)
                 self.assertGreater(float(mat.group(2)), 0.0)
               # Make sure device is profiled.
diff --git a/tensorflow/tools/api/generator/BUILD b/tensorflow/python/tools/api/generator/BUILD
similarity index 71%
rename from tensorflow/tools/api/generator/BUILD
rename to tensorflow/python/tools/api/generator/BUILD
index 8c760e6..223d128 100644
--- a/tensorflow/tools/api/generator/BUILD
+++ b/tensorflow/python/tools/api/generator/BUILD
@@ -3,8 +3,9 @@
 
 licenses(["notice"])  # Apache 2.0
 
-load("//tensorflow/tools/api/generator:api_gen.bzl", "ESTIMATOR_API_INIT_FILES")
-load("//tensorflow/tools/api/generator:api_gen.bzl", "TENSORFLOW_API_INIT_FILES")
+load("//tensorflow:tensorflow.bzl", "py_test")
+load("//tensorflow/python/tools/api/generator:api_gen.bzl", "ESTIMATOR_API_INIT_FILES")
+load("//tensorflow/python/tools/api/generator:api_gen.bzl", "TENSORFLOW_API_INIT_FILES")
 
 exports_files(
     [
@@ -13,6 +14,18 @@
     ],
 )
 
+py_binary(
+    name = "create_python_api",
+    srcs = ["//tensorflow/python/tools/api/generator:create_python_api.py"],
+    main = "//tensorflow/python/tools/api/generator:create_python_api.py",
+    srcs_version = "PY2AND3",
+    visibility = ["//visibility:public"],
+    deps = [
+        "//tensorflow/python:no_contrib",
+        "//tensorflow/python/tools/api/generator:doc_srcs",
+    ],
+)
+
 py_library(
     name = "doc_srcs",
     srcs = ["doc_srcs.py"],
diff --git a/tensorflow/tools/api/generator/api_gen.bzl b/tensorflow/python/tools/api/generator/api_gen.bzl
similarity index 71%
rename from tensorflow/tools/api/generator/api_gen.bzl
rename to tensorflow/python/tools/api/generator/api_gen.bzl
index ed164bf..00e1c4e 100644
--- a/tensorflow/tools/api/generator/api_gen.bzl
+++ b/tensorflow/python/tools/api/generator/api_gen.bzl
@@ -102,37 +102,41 @@
     # END GENERATED ESTIMATOR FILES
 ]
 
-# Creates a genrule that generates a directory structure with __init__.py
-# files that import all exported modules (i.e. modules with tf_export
-# decorators).
-#
-# Args:
-#   name: name of genrule to create.
-#   output_files: List of __init__.py files that should be generated.
-#     This list should include file name for every module exported using
-#     tf_export. For e.g. if an op is decorated with
-#     @tf_export('module1.module2', 'module3'). Then, output_files should
-#     include module1/module2/__init__.py and module3/__init__.py.
-#   root_init_template: Python init file that should be used as template for
-#     root __init__.py file. "# API IMPORTS PLACEHOLDER" comment inside this
-#     template will be replaced with root imports collected by this genrule.
-#   srcs: genrule sources. If passing root_init_template, the template file
-#     must be included in sources.
-#   api_name: Name of the project that you want to generate API files for
-#     (e.g. "tensorflow" or "estimator").
-#   package: Python package containing the @tf_export decorators you want to
-#     process
-#   package_dep: Python library target containing your package.
-
 def gen_api_init_files(
         name,
         output_files = TENSORFLOW_API_INIT_FILES,
         root_init_template = None,
         srcs = [],
         api_name = "tensorflow",
+        api_version = 2,
         package = "tensorflow.python",
         package_dep = "//tensorflow/python:no_contrib",
         output_package = "tensorflow"):
+    """Creates API directory structure and __init__.py files.
+
+    Creates a genrule that generates a directory structure with __init__.py
+    files that import all exported modules (i.e. modules with tf_export
+    decorators).
+
+    Args:
+      name: name of genrule to create.
+      output_files: List of __init__.py files that should be generated.
+        This list should include file name for every module exported using
+        tf_export. For e.g. if an op is decorated with
+        @tf_export('module1.module2', 'module3'). Then, output_files should
+        include module1/module2/__init__.py and module3/__init__.py.
+      root_init_template: Python init file that should be used as template for
+        root __init__.py file. "# API IMPORTS PLACEHOLDER" comment inside this
+        template will be replaced with root imports collected by this genrule.
+      srcs: genrule sources. If passing root_init_template, the template file
+        must be included in sources.
+      api_name: Name of the project that you want to generate API files for
+        (e.g. "tensorflow" or "estimator").
+      api_version: TensorFlow API version to generate. Must be either 1 or 2.
+      package: Python package containing the @tf_export decorators you want to
+        process
+      package_dep: Python library target containing your package.
+    """
     root_init_template_flag = ""
     if root_init_template:
       root_init_template_flag = "--root_init_template=$(location " + root_init_template + ")"
@@ -140,13 +144,14 @@
     api_gen_binary_target = "create_" + package + "_api"
     native.py_binary(
         name = "create_" + package + "_api",
-        srcs = ["//tensorflow/tools/api/generator:create_python_api.py"],
-        main = "//tensorflow/tools/api/generator:create_python_api.py",
+        srcs = ["//tensorflow/python/tools/api/generator:create_python_api.py"],
+        main = "//tensorflow/python/tools/api/generator:create_python_api.py",
         srcs_version = "PY2AND3",
         visibility = ["//visibility:public"],
         deps = [
             package_dep,
-            "//tensorflow/tools/api/generator:doc_srcs",
+            "//tensorflow/python:util",
+            "//tensorflow/python/tools/api/generator:doc_srcs",
         ],
     )
 
@@ -156,8 +161,8 @@
         cmd = (
             "$(location :" + api_gen_binary_target + ") " +
             root_init_template_flag + " --apidir=$(@D) --apiname=" +
-            api_name + " --package=" + package + " --output_package=" +
-            output_package + " $(OUTS)"),
+            api_name + " --apiversion=" + str(api_version) + " --package=" + package +
+            " --output_package=" + output_package + " $(OUTS)"),
         srcs = srcs,
         tools = [":" + api_gen_binary_target ],
         visibility = ["//tensorflow:__pkg__"],
diff --git a/tensorflow/tools/api/generator/create_python_api.py b/tensorflow/python/tools/api/generator/create_python_api.py
similarity index 91%
rename from tensorflow/tools/api/generator/create_python_api.py
rename to tensorflow/python/tools/api/generator/create_python_api.py
index 7f17360..863c922 100644
--- a/tensorflow/tools/api/generator/create_python_api.py
+++ b/tensorflow/python/tools/api/generator/create_python_api.py
@@ -24,11 +24,12 @@
 import os
 import sys
 
+from tensorflow.python.tools.api.generator import doc_srcs
 from tensorflow.python.util import tf_decorator
 from tensorflow.python.util import tf_export
-from tensorflow.tools.api.generator import doc_srcs
 
 API_ATTRS = tf_export.API_ATTRS
+API_ATTRS_V1 = tf_export.API_ATTRS_V1
 
 _DEFAULT_PACKAGE = 'tensorflow.python'
 _GENFILES_DIR_SUFFIX = 'genfiles/'
@@ -38,7 +39,7 @@
     'tensorflow.python.platform.flags.FLAGS'
 }
 _GENERATED_FILE_HEADER = """# This file is MACHINE GENERATED! Do not edit.
-# Generated by: tensorflow/tools/api/generator/create_python_api.py script.
+# Generated by: tensorflow/python/tools/api/generator/create_python_api.py script.
 \"\"\"%s
 \"\"\"
 
@@ -159,13 +160,16 @@
     return module_text_map
 
 
-def get_api_init_text(package, output_package, api_name):
+def get_api_init_text(package, output_package, api_name, api_version):
   """Get a map from destination module to __init__.py code for that module.
 
   Args:
     package: Base python package containing python with target tf_export
       decorators.
+    output_package: Base output python package where generated API will
+      be added.
     api_name: API you want to generate (e.g. `tensorflow` or `estimator`).
+    api_version: API version you want to generate (`v1` or `v2`).
 
   Returns:
     A dictionary where
@@ -173,6 +177,12 @@
       value: (string) text that should be in __init__.py files for
         corresponding modules.
   """
+  if api_version == 1:
+    names_attr = API_ATTRS_V1[api_name].names
+    constants_attr = API_ATTRS_V1[api_name].constants
+  else:
+    names_attr = API_ATTRS[api_name].names
+    constants_attr = API_ATTRS[api_name].constants
   module_code_builder = _ModuleInitCodeBuilder()
 
   # Traverse over everything imported above. Specifically,
@@ -193,7 +203,7 @@
       attr = getattr(module, module_contents_name)
 
       # If attr is _tf_api_constants attribute, then add the constants.
-      if module_contents_name == API_ATTRS[api_name].constants:
+      if module_contents_name == constants_attr:
         for exports, value in attr:
           for export in exports:
             names = export.split('.')
@@ -205,9 +215,8 @@
       _, attr = tf_decorator.unwrap(attr)
       # If attr is a symbol with _tf_api_names attribute, then
       # add import for it.
-      if (hasattr(attr, '__dict__') and
-          API_ATTRS[api_name].names in attr.__dict__):
-        for export in getattr(attr, API_ATTRS[api_name].names):  # pylint: disable=protected-access
+      if (hasattr(attr, '__dict__') and names_attr in attr.__dict__):
+        for export in getattr(attr, names_attr):  # pylint: disable=protected-access
           names = export.split('.')
           dest_module = '.'.join(names[:-1])
           module_code_builder.add_import(
@@ -297,7 +306,7 @@
 
 def create_api_files(
     output_files, package, root_init_template, output_dir, output_package,
-    api_name):
+    api_name, api_version):
   """Creates __init__.py files for the Python API.
 
   Args:
@@ -309,7 +318,9 @@
       "#API IMPORTS PLACEHOLDER" comment in the template file will be replaced
       with imports.
     output_dir: output API root directory.
+    output_package: Base output package where generated API will be added.
     api_name: API you want to generate (e.g. `tensorflow` or `estimator`).
+    api_version: API version to generate (`v1` or `v2`).
 
   Raises:
     ValueError: if an output file is not under api/ directory,
@@ -326,7 +337,8 @@
       os.makedirs(os.path.dirname(file_path))
     open(file_path, 'a').close()
 
-  module_text_map = get_api_init_text(package, output_package, api_name)
+  module_text_map = get_api_init_text(
+      package, output_package, api_name, api_version)
 
   # Add imports to output files.
   missing_output_files = []
@@ -385,6 +397,10 @@
       choices=API_ATTRS.keys(),
       help='The API you want to generate.')
   parser.add_argument(
+      '--apiversion', default=2, type=int,
+      choices=[1, 2],
+      help='The API version you want to generate.')
+  parser.add_argument(
       '--output_package', default='tensorflow', type=str,
       help='Root output package.')
 
@@ -401,7 +417,8 @@
   # Populate `sys.modules` with modules containing tf_export().
   importlib.import_module(args.package)
   create_api_files(outputs, args.package, args.root_init_template,
-                   args.apidir, args.output_package, args.apiname)
+                   args.apidir, args.output_package, args.apiname,
+                   args.apiversion)
 
 
 if __name__ == '__main__':
diff --git a/tensorflow/tools/api/generator/create_python_api_test.py b/tensorflow/python/tools/api/generator/create_python_api_test.py
similarity index 93%
rename from tensorflow/tools/api/generator/create_python_api_test.py
rename to tensorflow/python/tools/api/generator/create_python_api_test.py
index 1a71874..a565a49 100644
--- a/tensorflow/tools/api/generator/create_python_api_test.py
+++ b/tensorflow/python/tools/api/generator/create_python_api_test.py
@@ -22,8 +22,8 @@
 import sys
 
 from tensorflow.python.platform import test
+from tensorflow.python.tools.api.generator import create_python_api
 from tensorflow.python.util.tf_export import tf_export
-from tensorflow.tools.api.generator import create_python_api
 
 
 @tf_export('test_op', 'test_op1')
@@ -59,7 +59,7 @@
     imports = create_python_api.get_api_init_text(
         package=create_python_api._DEFAULT_PACKAGE,
         output_package='tensorflow',
-        api_name='tensorflow')
+        api_name='tensorflow', api_version=1)
     expected_import = (
         'from tensorflow.python.test_module '
         'import test_op as test_op1')
@@ -77,7 +77,7 @@
     imports = create_python_api.get_api_init_text(
         package=create_python_api._DEFAULT_PACKAGE,
         output_package='tensorflow',
-        api_name='tensorflow')
+        api_name='tensorflow', api_version=2)
     expected_import = ('from tensorflow.python.test_module '
                        'import TestClass')
     self.assertTrue(
@@ -88,7 +88,7 @@
     imports = create_python_api.get_api_init_text(
         package=create_python_api._DEFAULT_PACKAGE,
         output_package='tensorflow',
-        api_name='tensorflow')
+        api_name='tensorflow', api_version=1)
     expected = ('from tensorflow.python.test_module '
                 'import _TEST_CONSTANT')
     self.assertTrue(expected in str(imports),
diff --git a/tensorflow/tools/api/generator/doc_srcs.py b/tensorflow/python/tools/api/generator/doc_srcs.py
similarity index 100%
rename from tensorflow/tools/api/generator/doc_srcs.py
rename to tensorflow/python/tools/api/generator/doc_srcs.py
diff --git a/tensorflow/tools/api/generator/doc_srcs_test.py b/tensorflow/python/tools/api/generator/doc_srcs_test.py
similarity index 95%
rename from tensorflow/tools/api/generator/doc_srcs_test.py
rename to tensorflow/python/tools/api/generator/doc_srcs_test.py
index dbff904..481d987 100644
--- a/tensorflow/tools/api/generator/doc_srcs_test.py
+++ b/tensorflow/python/tools/api/generator/doc_srcs_test.py
@@ -12,7 +12,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 # =============================================================================
-"""Tests for tensorflow.tools.api.generator.doc_srcs."""
+"""Tests for tensorflow.python.tools.api.generator.doc_srcs."""
 
 from __future__ import absolute_import
 from __future__ import division
@@ -23,7 +23,7 @@
 import sys
 
 from tensorflow.python.platform import test
-from tensorflow.tools.api.generator import doc_srcs
+from tensorflow.python.tools.api.generator import doc_srcs
 
 
 FLAGS = None
diff --git a/tensorflow/python/training/quantize_training.i b/tensorflow/python/training/quantize_training.i
index fb5e47e..54d6789 100644
--- a/tensorflow/python/training/quantize_training.i
+++ b/tensorflow/python/training/quantize_training.i
@@ -73,6 +73,8 @@
 
 do_quantize_training_on_graphdef._tf_api_names = [
     'train.do_quantize_training_on_graphdef']
+do_quantize_training_on_graphdef._tf_api_names_v1 = [
+    'train.do_quantize_training_on_graphdef']
 %}
 
 %unignoreall
diff --git a/tensorflow/python/training/server_lib.py b/tensorflow/python/training/server_lib.py
index 2f421d1..58cf5277 100644
--- a/tensorflow/python/training/server_lib.py
+++ b/tensorflow/python/training/server_lib.py
@@ -42,8 +42,8 @@
       Defaults to the value in `server_or_cluster_def`, if specified. Otherwise
       defaults to 0 if the server's job has only one task.
     protocol: (Optional.) Specifies the protocol to be used by the server.
-      Acceptable values include `"grpc"`. Defaults to the value in
-      `server_or_cluster_def`, if specified. Otherwise defaults to `"grpc"`.
+      Acceptable values include `"grpc", "grpc+verbs"`. Defaults to the value
+      in `server_or_cluster_def`, if specified. Otherwise defaults to `"grpc"`.
     config: (Options.) A `tf.ConfigProto` that specifies default configuration
       options for all sessions that run on this server.
 
@@ -129,8 +129,9 @@
         job. Defaults to the value in `server_or_cluster_def`, if specified.
         Otherwise defaults to 0 if the server's job has only one task.
       protocol: (Optional.) Specifies the protocol to be used by the server.
-        Acceptable values include `"grpc"`. Defaults to the value in
-        `server_or_cluster_def`, if specified. Otherwise defaults to `"grpc"`.
+        Acceptable values include `"grpc", "grpc+verbs"`. Defaults to the
+        value in `server_or_cluster_def`, if specified. Otherwise defaults to
+        `"grpc"`.
       config: (Options.) A `tf.ConfigProto` that specifies default
         configuration options for all sessions that run on this server.
       start: (Optional.) Boolean, indicating whether to start the server
diff --git a/tensorflow/python/util/deprecation.py b/tensorflow/python/util/deprecation.py
index c8ed2b7..9e2202e 100644
--- a/tensorflow/python/util/deprecation.py
+++ b/tensorflow/python/util/deprecation.py
@@ -37,6 +37,11 @@
 _PRINTED_WARNING = {}
 
 
+class DeprecatedNamesAlreadySet(Exception):
+  """Raised when setting deprecated names multiple times for the same symbol."""
+  pass
+
+
 def _add_deprecated_function_notice_to_docstring(doc, date, instructions):
   """Adds a deprecation notice to a docstring for deprecated functions."""
   main_text = ['THIS FUNCTION IS DEPRECATED. It will be removed %s.' %
@@ -219,6 +224,35 @@
             func_or_class.__doc__, None, 'Please use %s instead.' % name))
 
 
+def deprecated_endpoints(*args):
+  """Decorator for marking endpoints deprecated.
+
+  This decorator does not print deprecation messages.
+  TODO(annarev): eventually start printing deprecation warnings when
+  @deprecation_endpoints decorator is added.
+
+  Args:
+    *args: Deprecated endpoint names.
+
+  Returns:
+    A function that takes symbol as an argument and adds
+    _tf_deprecated_api_names to that symbol.
+    _tf_deprecated_api_names would be set to a list of deprecated
+    endpoint names for the symbol.
+  """
+  def deprecated_wrapper(func):
+    # pylint: disable=protected-access
+    if '_tf_deprecated_api_names' in func.__dict__:
+      raise DeprecatedNamesAlreadySet(
+          'Cannot set deprecated names for %s to %s. '
+          'Deprecated names are already set to %s.' % (
+              func.__name__, str(args), str(func._tf_deprecated_api_names)))
+    func._tf_deprecated_api_names = args
+    # pylint: disable=protected-access
+    return func
+  return deprecated_wrapper
+
+
 def deprecated(date, instructions, warn_once=True):
   """Decorator for marking functions or methods deprecated.
 
diff --git a/tensorflow/python/util/deprecation_test.py b/tensorflow/python/util/deprecation_test.py
index 1ea695e..90c73a0 100644
--- a/tensorflow/python/util/deprecation_test.py
+++ b/tensorflow/python/util/deprecation_test.py
@@ -935,5 +935,27 @@
     self.assertEqual(new_docs, new_docs_ref)
 
 
+class DeprecatedEndpointsTest(test.TestCase):
+
+  def testSingleDeprecatedEndpoint(self):
+    @deprecation.deprecated_endpoints("foo1")
+    def foo():
+      pass
+    self.assertEqual(("foo1",), foo._tf_deprecated_api_names)
+
+  def testMultipleDeprecatedEndpoint(self):
+    @deprecation.deprecated_endpoints("foo1", "foo2")
+    def foo():
+      pass
+    self.assertEqual(("foo1", "foo2"), foo._tf_deprecated_api_names)
+
+  def testCannotSetDeprecatedEndpointsTwice(self):
+    with self.assertRaises(deprecation.DeprecatedNamesAlreadySet):
+      @deprecation.deprecated_endpoints("foo1")
+      @deprecation.deprecated_endpoints("foo2")
+      def foo():  # pylint: disable=unused-variable
+        pass
+
+
 if __name__ == "__main__":
   test.main()
diff --git a/tensorflow/python/util/py_checkpoint_reader.i b/tensorflow/python/util/py_checkpoint_reader.i
index 8004898..1c73f7f 100644
--- a/tensorflow/python/util/py_checkpoint_reader.i
+++ b/tensorflow/python/util/py_checkpoint_reader.i
@@ -166,6 +166,7 @@
     return CheckpointReader(compat.as_bytes(filepattern), status)
 
 NewCheckpointReader._tf_api_names = ['train.NewCheckpointReader']
+NewCheckpointReader._tf_api_names_v1 = ['train.NewCheckpointReader']
 %}
 
 %include "tensorflow/c/checkpoint_reader.h"
diff --git a/tensorflow/python/util/stat_summarizer.i b/tensorflow/python/util/stat_summarizer.i
index 73fa854..a5a7984 100644
--- a/tensorflow/python/util/stat_summarizer.i
+++ b/tensorflow/python/util/stat_summarizer.i
@@ -27,8 +27,8 @@
 
 %ignoreall
 
-%unignore _NewStatSummarizer;
-%unignore _DeleteStatSummarizer;
+%unignore NewStatSummarizer;
+%unignore DeleteStatSummarizer;
 %unignore tensorflow;
 %unignore tensorflow::StatSummarizer;
 %unignore tensorflow::StatSummarizer::StatSummarizer;
@@ -43,20 +43,20 @@
 
 // TODO(ashankar): Remove the unused argument from the API.
 %{
-tensorflow::StatSummarizer* _NewStatSummarizer(
+tensorflow::StatSummarizer* NewStatSummarizer(
       const string& unused) {
   return new tensorflow::StatSummarizer(tensorflow::StatSummarizerOptions());
 }
 %}
 
 %{
-void _DeleteStatSummarizer(tensorflow::StatSummarizer* ss) {
+void DeleteStatSummarizer(tensorflow::StatSummarizer* ss) {
   delete ss;
 }
 %}
 
-tensorflow::StatSummarizer* _NewStatSummarizer(const string& unused);
-void _DeleteStatSummarizer(tensorflow::StatSummarizer* ss);
+tensorflow::StatSummarizer* NewStatSummarizer(const string& unused);
+void DeleteStatSummarizer(tensorflow::StatSummarizer* ss);
 
 %extend tensorflow::StatSummarizer {
   void ProcessStepStatsStr(const string& step_stats_str) {
@@ -76,16 +76,3 @@
 %include "tensorflow/core/util/stat_summarizer_options.h"
 %include "tensorflow/core/util/stat_summarizer.h"
 %unignoreall
-
-%insert("python") %{
-
-# Wrapping NewStatSummarizer and DeletStatSummarizer because
-# SWIG-generated functions are built-in functions and do not support
-# setting _tf_api_names attribute.
-
-def NewStatSummarizer(unused):
-  return _NewStatSummarizer(unused)
-
-def DeleteStatSummarizer(stat_summarizer):
-  _DeleteStatSummarizer(stat_summarizer)
-%}
diff --git a/tensorflow/python/util/tf_export.py b/tensorflow/python/util/tf_export.py
index e154ffb..274f32c 100644
--- a/tensorflow/python/util/tf_export.py
+++ b/tensorflow/python/util/tf_export.py
@@ -63,12 +63,63 @@
         '_estimator_api_constants')
 }
 
+API_ATTRS_V1 = {
+    TENSORFLOW_API_NAME: _Attributes(
+        '_tf_api_names_v1',
+        '_tf_api_constants_v1'),
+    ESTIMATOR_API_NAME: _Attributes(
+        '_estimator_api_names_v1',
+        '_estimator_api_constants_v1')
+}
+
 
 class SymbolAlreadyExposedError(Exception):
   """Raised when adding API names to symbol that already has API names."""
   pass
 
 
+def get_canonical_name_for_symbol(symbol, api_name=TENSORFLOW_API_NAME):
+  """Get canonical name for the API symbol.
+
+  Canonical name is the first non-deprecated endpoint name.
+
+  Args:
+    symbol: API function or class.
+    api_name: API name (tensorflow or estimator).
+
+  Returns:
+    Canonical name for the API symbol (for e.g. initializers.zeros) if
+    canonical name could be determined. Otherwise, returns None.
+  """
+  if not hasattr(symbol, '__dict__'):
+    return None
+  api_names_attr = API_ATTRS[api_name].names
+  _, undecorated_symbol = tf_decorator.unwrap(symbol)
+  if api_names_attr not in undecorated_symbol.__dict__:
+    return None
+  api_names = getattr(undecorated_symbol, api_names_attr)
+  # TODO(annarev): may be add a separate deprecated attribute
+  # for estimator names.
+  deprecated_api_names = undecorated_symbol.__dict__.get(
+      '_tf_deprecated_api_names', [])
+  return get_canonical_name(api_names, deprecated_api_names)
+
+
+def get_canonical_name(api_names, deprecated_api_names):
+  """Get first non-deprecated endpoint name.
+
+  Args:
+    api_names: API names iterable.
+    deprecated_api_names: Deprecated API names iterable.
+  Returns:
+    Canonical name if there is at least one non-deprecated endpoint.
+    Otherwise returns None.
+  """
+  return next(
+      (name for name in api_names if name not in deprecated_api_names),
+      None)
+
+
 class api_export(object):  # pylint: disable=invalid-name
   """Provides ways to export symbols to the TensorFlow API."""
 
@@ -78,13 +129,16 @@
     Args:
       *args: API names in dot delimited format.
       **kwargs: Optional keyed arguments.
-          overrides: List of symbols that this is overriding
+        v1: Names for the TensorFlow V1 API. If not set, we will use V2 API
+          names both for TensorFlow V1 and V2 APIs.
+        overrides: List of symbols that this is overriding
           (those overrided api exports will be removed). Note: passing overrides
           has no effect on exporting a constant.
-          api_name: Name of the API you want to generate (e.g. `tensorflow` or
+        api_name: Name of the API you want to generate (e.g. `tensorflow` or
           `estimator`). Default is `tensorflow`.
     """
     self._names = args
+    self._names_v1 = kwargs.get('v1', args)
     self._api_name = kwargs.get('api_name', TENSORFLOW_API_NAME)
     self._overrides = kwargs.get('overrides', [])
 
@@ -102,24 +156,27 @@
         and kwarg `allow_multiple_exports` not set.
     """
     api_names_attr = API_ATTRS[self._api_name].names
-
+    api_names_attr_v1 = API_ATTRS_V1[self._api_name].names
     # Undecorate overridden names
     for f in self._overrides:
       _, undecorated_f = tf_decorator.unwrap(f)
       delattr(undecorated_f, api_names_attr)
+      delattr(undecorated_f, api_names_attr_v1)
 
     _, undecorated_func = tf_decorator.unwrap(func)
+    self.set_attr(undecorated_func, api_names_attr, self._names)
+    self.set_attr(undecorated_func, api_names_attr_v1, self._names_v1)
+    return func
 
+  def set_attr(self, func, api_names_attr, names):
     # Check for an existing api. We check if attribute name is in
     # __dict__ instead of using hasattr to verify that subclasses have
     # their own _tf_api_names as opposed to just inheriting it.
-    if api_names_attr in undecorated_func.__dict__:
+    if api_names_attr in func.__dict__:
       raise SymbolAlreadyExposedError(
           'Symbol %s is already exposed as %s.' %
-          (undecorated_func.__name__, getattr(
-              undecorated_func, api_names_attr)))  # pylint: disable=protected-access
-    setattr(undecorated_func, api_names_attr, self._names)
-    return func
+          (func.__name__, getattr(func, api_names_attr)))  # pylint: disable=protected-access
+    setattr(func, api_names_attr, names)
 
   def export_constant(self, module_name, name):
     """Store export information for constants/string literals.
@@ -140,12 +197,20 @@
       name: (string) Current constant name.
     """
     module = sys.modules[module_name]
-    if not hasattr(module, API_ATTRS[self._api_name].constants):
-      setattr(module, API_ATTRS[self._api_name].constants, [])
+    api_constants_attr = API_ATTRS[self._api_name].constants
+    api_constants_attr_v1 = API_ATTRS_V1[self._api_name].constants
+
+    if not hasattr(module, api_constants_attr):
+      setattr(module, api_constants_attr, [])
     # pylint: disable=protected-access
-    getattr(module, API_ATTRS[self._api_name].constants).append(
+    getattr(module, api_constants_attr).append(
         (self._names, name))
 
+    if not hasattr(module, api_constants_attr_v1):
+      setattr(module, api_constants_attr_v1, [])
+    getattr(module, api_constants_attr_v1).append(
+        (self._names_v1, name))
+
 
 tf_export = functools.partial(api_export, api_name=TENSORFLOW_API_NAME)
 estimator_export = functools.partial(tf_export, api_name=ESTIMATOR_API_NAME)
diff --git a/tensorflow/python/util/tf_export_test.py b/tensorflow/python/util/tf_export_test.py
index b9e26ec..4ae1dc5 100644
--- a/tensorflow/python/util/tf_export_test.py
+++ b/tensorflow/python/util/tf_export_test.py
@@ -60,6 +60,8 @@
     for symbol in [_test_function, _test_function, TestClassA, TestClassB]:
       if hasattr(symbol, '_tf_api_names'):
         del symbol._tf_api_names
+      if hasattr(symbol, '_tf_api_names_v1'):
+        del symbol._tf_api_names_v1
 
   def _CreateMockModule(self, name):
     mock_module = self.MockModule(name)
diff --git a/tensorflow/python/util/tf_stack.py b/tensorflow/python/util/tf_stack.py
index dacc1ce..fe4f4a6 100644
--- a/tensorflow/python/util/tf_stack.py
+++ b/tensorflow/python/util/tf_stack.py
@@ -21,6 +21,12 @@
 import linecache
 import sys
 
+# Names for indices into TF traceback tuples.
+TB_FILENAME = 0
+TB_LINENO = 1
+TB_FUNCNAME = 2
+TB_CODEDICT = 3  # Dictionary of Python interpreter state.
+
 
 def extract_stack(extract_frame_info_fn=None):
   """A lightweight, extensible re-implementation of traceback.extract_stack.
diff --git a/tensorflow/security/advisory/tfsa-2018-001.md b/tensorflow/security/advisory/tfsa-2018-001.md
index bb97543..1966789 100644
--- a/tensorflow/security/advisory/tfsa-2018-001.md
+++ b/tensorflow/security/advisory/tfsa-2018-001.md
@@ -22,7 +22,7 @@
 ### Mitigation
 
 We have patched the vulnerability in GitHub commit
-[49f73c55](https://github.com/tensorflow/tensorflow/commit/49f73c55d56edffebde4bca4a407ad69c1cae4333c55).
+[49f73c55](https://github.com/tensorflow/tensorflow/commit/49f73c55d56edffebde4bca4a407ad69c1cae433).
 If users are running TensorFlow in production or on untrusted data, they are
 encouraged to apply this patch.
 
diff --git a/tensorflow/security/index.md b/tensorflow/security/index.md
index ea39e17..0f17615 100644
--- a/tensorflow/security/index.md
+++ b/tensorflow/security/index.md
@@ -4,7 +4,7 @@
 
 *Note*: In conjunction with these security advisories, we strongly encourage
 TensorFlow users to read and understand TensorFlow's security model as outlined
-in (https://github.com/tensorflow/tensorflow/blob/master/SECURITY.md)[SECURITY.md].
+in [SECURITY.md](https://github.com/tensorflow/tensorflow/blob/master/SECURITY.md).
 
 | Advisory Number | Type               | Versions affected | Reported by           | Additional Information      |
 |-----------------|--------------------|:-----------------:|-----------------------|-----------------------------|
diff --git a/tensorflow/stream_executor/cuda/cuda_dnn.cc b/tensorflow/stream_executor/cuda/cuda_dnn.cc
index 8491638..9e24a45 100644
--- a/tensorflow/stream_executor/cuda/cuda_dnn.cc
+++ b/tensorflow/stream_executor/cuda/cuda_dnn.cc
@@ -3603,7 +3603,7 @@
     const dnn::BatchDescriptor& input_dimensions,
     const DeviceMemory<double>& input_data,
     const dnn::BatchDescriptor& output_dimensions,
-    DeviceMemory<double>* output_data) {
+    DeviceMemory<double>* output_data, ScratchAllocator* workspace_allocator) {
   // Alpha is the scaling factor for input.
   double alpha = 1.0;
   // Beta is the scaling factor for output.
@@ -3628,7 +3628,7 @@
     const dnn::BatchDescriptor& input_dimensions,
     const DeviceMemory<float>& input_data,
     const dnn::BatchDescriptor& output_dimensions,
-    DeviceMemory<float>* output_data) {
+    DeviceMemory<float>* output_data, ScratchAllocator* workspace_allocator) {
   // Alpha is the scaling factor for input.
   float alpha = 1.0;
   // Beta is the scaling factor for output.
@@ -3653,7 +3653,8 @@
     const dnn::BatchDescriptor& input_dimensions,
     const DeviceMemory<Eigen::half>& input_data,
     const dnn::BatchDescriptor& output_dimensions,
-    DeviceMemory<Eigen::half>* output_data) {
+    DeviceMemory<Eigen::half>* output_data,
+    ScratchAllocator* workspace_allocator) {
   // Alpha is the scaling factor for input.
   float alpha = 1.0;
   // Beta is the scaling factor for output.
@@ -3679,7 +3680,8 @@
     const dnn::BatchDescriptor& output_dimensions,
     const DeviceMemory<double>& output_data,
     const DeviceMemory<double>& input_diff_data,
-    DeviceMemory<double>* output_diff_data) {
+    DeviceMemory<double>* output_diff_data,
+    ScratchAllocator* workspace_allocator) {
   // Alpha is the scaling factor for input.
   double alpha = 1.0;
   // Beta is the scaling factor for output.
@@ -3708,7 +3710,8 @@
     const dnn::BatchDescriptor& output_dimensions,
     const DeviceMemory<float>& output_data,
     const DeviceMemory<float>& input_diff_data,
-    DeviceMemory<float>* output_diff_data) {
+    DeviceMemory<float>* output_diff_data,
+    ScratchAllocator* workspace_allocator) {
   // Alpha is the scaling factor for input.
   float alpha = 1.0;
   // Beta is the scaling factor for output.
@@ -3737,7 +3740,8 @@
     const dnn::BatchDescriptor& output_dimensions,
     const DeviceMemory<Eigen::half>& output_data,
     const DeviceMemory<Eigen::half>& input_diff_data,
-    DeviceMemory<Eigen::half>* output_diff_data) {
+    DeviceMemory<Eigen::half>* output_diff_data,
+    ScratchAllocator* workspace_allocator) {
   // Alpha is the scaling factor for input.
   float alpha = 1.0;
   // Beta is the scaling factor for output.
@@ -3806,7 +3810,8 @@
     const dnn::BatchDescriptor& dimensions, const DeviceMemory<float>& raw_data,
     const DeviceMemory<float>& normalized_data,
     const DeviceMemory<float>& normalized_variable_gradient,
-    DeviceMemory<float>* raw_variable_gradient) {
+    DeviceMemory<float>* raw_variable_gradient,
+    ScratchAllocator* workspace_allocator) {
   // Check for unsupported modes.
   if (normalize_descriptor.wrap_around()) {
     LOG(ERROR) << "CUDA LRN does not support cudnn-around mode";
diff --git a/tensorflow/stream_executor/cuda/cuda_dnn.h b/tensorflow/stream_executor/cuda/cuda_dnn.h
index c924d41..9d88f97 100644
--- a/tensorflow/stream_executor/cuda/cuda_dnn.h
+++ b/tensorflow/stream_executor/cuda/cuda_dnn.h
@@ -515,21 +515,24 @@
                      const dnn::BatchDescriptor& input_dimensions,
                      const DeviceMemory<double>& input_data,
                      const dnn::BatchDescriptor& output_dimensions,
-                     DeviceMemory<double>* output_data) override;
+                     DeviceMemory<double>* output_data,
+                     ScratchAllocator* workspace_allocator) override;
 
   bool DoPoolForward(Stream* stream,
                      const dnn::PoolingDescriptor& pooling_dimensions,
                      const dnn::BatchDescriptor& input_dimensions,
                      const DeviceMemory<float>& input_data,
                      const dnn::BatchDescriptor& output_dimensions,
-                     DeviceMemory<float>* output_data) override;
+                     DeviceMemory<float>* output_data,
+                     ScratchAllocator* workspace_allocator) override;
 
   bool DoPoolForward(Stream* stream,
                      const dnn::PoolingDescriptor& pooling_dimensions,
                      const dnn::BatchDescriptor& input_dimensions,
                      const DeviceMemory<Eigen::half>& input_data,
                      const dnn::BatchDescriptor& output_dimensions,
-                     DeviceMemory<Eigen::half>* output_data) override;
+                     DeviceMemory<Eigen::half>* output_data,
+                     ScratchAllocator* workspace_allocator) override;
 
   bool DoPoolBackward(Stream* stream,
                       const dnn::PoolingDescriptor& pooling_dimensions,
@@ -538,7 +541,8 @@
                       const dnn::BatchDescriptor& output_dimensions,
                       const DeviceMemory<double>& output_data,
                       const DeviceMemory<double>& input_diff_data,
-                      DeviceMemory<double>* output_diff_data) override;
+                      DeviceMemory<double>* output_diff_data,
+                      ScratchAllocator* workspace_allocator) override;
 
   bool DoPoolBackward(Stream* stream,
                       const dnn::PoolingDescriptor& pooling_dimensions,
@@ -547,7 +551,8 @@
                       const dnn::BatchDescriptor& output_dimensions,
                       const DeviceMemory<float>& output_data,
                       const DeviceMemory<float>& input_diff_data,
-                      DeviceMemory<float>* output_diff_data) override;
+                      DeviceMemory<float>* output_diff_data,
+                      ScratchAllocator* workspace_allocator) override;
 
   bool DoPoolBackward(Stream* stream,
                       const dnn::PoolingDescriptor& pooling_dimensions,
@@ -556,7 +561,8 @@
                       const dnn::BatchDescriptor& output_dimensions,
                       const DeviceMemory<Eigen::half>& output_data,
                       const DeviceMemory<Eigen::half>& input_diff_data,
-                      DeviceMemory<Eigen::half>* output_diff_data) override;
+                      DeviceMemory<Eigen::half>* output_diff_data,
+                      ScratchAllocator* workspace_allocator) override;
 
   bool DoNormalize(Stream* stream,
                    const dnn::NormalizeDescriptor& normalize_descriptor,
@@ -575,7 +581,8 @@
       const DeviceMemory<float>& raw_data,
       const DeviceMemory<float>& normalized_data,
       const DeviceMemory<float>& normalized_variable_gradient,
-      DeviceMemory<float>* raw_variable_gradient) override;
+      DeviceMemory<float>* raw_variable_gradient,
+      ScratchAllocator* workspace_allocator) override;
 
   bool DoDepthConcatenate(
       Stream* stream, port::ArraySlice<dnn::BatchDescriptor> input_dimensions,
diff --git a/tensorflow/stream_executor/dnn.h b/tensorflow/stream_executor/dnn.h
index 9eca5ab..a7449c2 100644
--- a/tensorflow/stream_executor/dnn.h
+++ b/tensorflow/stream_executor/dnn.h
@@ -1552,14 +1552,16 @@
                              const dnn::BatchDescriptor& input_dimensions,
                              const DeviceMemory<float>& input_data,
                              const dnn::BatchDescriptor& output_dimensions,
-                             DeviceMemory<float>* output_data) = 0;
+                             DeviceMemory<float>* output_data,
+                             ScratchAllocator* workspace_allocator) = 0;
 
   virtual bool DoPoolForward(Stream* stream,
                              const dnn::PoolingDescriptor& pooling_dimensions,
                              const dnn::BatchDescriptor& input_dimensions,
                              const DeviceMemory<double>& input_data,
                              const dnn::BatchDescriptor& output_dimensions,
-                             DeviceMemory<double>* output_data) {
+                             DeviceMemory<double>* output_data,
+                             ScratchAllocator* workspace_allocator) {
     LOG(FATAL) << "DoPoolForward not implemented for double.";
     return false;
   }
@@ -1569,7 +1571,8 @@
                              const dnn::BatchDescriptor& input_dimensions,
                              const DeviceMemory<Eigen::half>& input_data,
                              const dnn::BatchDescriptor& output_dimensions,
-                             DeviceMemory<Eigen::half>* output_data) {
+                             DeviceMemory<Eigen::half>* output_data,
+                             ScratchAllocator* workspace_allocator) {
     LOG(FATAL) << "DoPoolForward not implemented for float16.";
     return false;
   }
@@ -1582,7 +1585,8 @@
                               const dnn::BatchDescriptor& output_dimensions,
                               const DeviceMemory<double>& output_data,
                               const DeviceMemory<double>& input_diff_data,
-                              DeviceMemory<double>* output_diff_data) {
+                              DeviceMemory<double>* output_diff_data,
+                              ScratchAllocator* workspace_allocator) {
     LOG(FATAL) << "DoPoolBackward not implemented.";
     return false;
   }
@@ -1594,7 +1598,8 @@
                               const dnn::BatchDescriptor& output_dimensions,
                               const DeviceMemory<float>& output_data,
                               const DeviceMemory<float>& input_diff_data,
-                              DeviceMemory<float>* output_diff_data) {
+                              DeviceMemory<float>* output_diff_data,
+                              ScratchAllocator* workspace_allocator) {
     LOG(FATAL) << "DoPoolBackward not implemented.";
     return false;
   }
@@ -1606,7 +1611,8 @@
                               const dnn::BatchDescriptor& output_dimensions,
                               const DeviceMemory<Eigen::half>& output_data,
                               const DeviceMemory<Eigen::half>& input_diff_data,
-                              DeviceMemory<Eigen::half>* output_diff_data) {
+                              DeviceMemory<Eigen::half>* output_diff_data,
+                              ScratchAllocator* workspace_allocator) {
     LOG(FATAL) << "DoPoolBackward not implemented.";
     return false;
   }
@@ -1653,7 +1659,8 @@
       const DeviceMemory<float>& raw_data,
       const DeviceMemory<float>& normalized_data,
       const DeviceMemory<float>& normalized_variable_gradient,
-      DeviceMemory<float>* raw_variable_gradient) {
+      DeviceMemory<float>* raw_variable_gradient,
+      ScratchAllocator* workspace_allocator) {
     return false;
   }
 
diff --git a/tensorflow/stream_executor/stream.cc b/tensorflow/stream_executor/stream.cc
index 9369183..ca1b8e2 100644
--- a/tensorflow/stream_executor/stream.cc
+++ b/tensorflow/stream_executor/stream.cc
@@ -1377,15 +1377,16 @@
     const dnn::BatchDescriptor &input_dimensions,
     const DeviceMemory<double> &input_data,
     const dnn::BatchDescriptor &output_dimensions,
-    DeviceMemory<double> *output_data) {
+    DeviceMemory<double> *output_data, ScratchAllocator *workspace_allocator) {
   VLOG_CALL(PARAM(pooling_dimensions), PARAM(input_dimensions),
-            PARAM(input_data), PARAM(output_dimensions), PARAM(output_data));
+            PARAM(input_data), PARAM(output_dimensions), PARAM(output_data),
+            PARAM(workspace_allocator));
 
   if (ok()) {
     if (dnn::DnnSupport *dnn = parent_->AsDnn()) {
       CheckError(dnn->DoPoolForward(this, pooling_dimensions, input_dimensions,
-                                    input_data, output_dimensions,
-                                    output_data));
+                                    input_data, output_dimensions, output_data,
+                                    workspace_allocator));
     } else {
       SetError();
       LOG(WARNING)
@@ -1401,15 +1402,16 @@
     const dnn::BatchDescriptor &input_dimensions,
     const DeviceMemory<float> &input_data,
     const dnn::BatchDescriptor &output_dimensions,
-    DeviceMemory<float> *output_data) {
+    DeviceMemory<float> *output_data, ScratchAllocator *workspace_allocator) {
   VLOG_CALL(PARAM(pooling_dimensions), PARAM(input_dimensions),
-            PARAM(input_data), PARAM(output_dimensions), PARAM(output_data));
+            PARAM(input_data), PARAM(output_dimensions), PARAM(output_data),
+            PARAM(workspace_allocator));
 
   if (ok()) {
     if (dnn::DnnSupport *dnn = parent_->AsDnn()) {
       CheckError(dnn->DoPoolForward(this, pooling_dimensions, input_dimensions,
-                                    input_data, output_dimensions,
-                                    output_data));
+                                    input_data, output_dimensions, output_data,
+                                    workspace_allocator));
     } else {
       SetErrorAndLogNoDnnSupport();
     }
@@ -1422,15 +1424,17 @@
     const dnn::BatchDescriptor &input_dimensions,
     const DeviceMemory<Eigen::half> &input_data,
     const dnn::BatchDescriptor &output_dimensions,
-    DeviceMemory<Eigen::half> *output_data) {
+    DeviceMemory<Eigen::half> *output_data,
+    ScratchAllocator *workspace_allocator) {
   VLOG_CALL(PARAM(pooling_dimensions), PARAM(input_dimensions),
-            PARAM(input_data), PARAM(output_dimensions), PARAM(output_data));
+            PARAM(input_data), PARAM(output_dimensions), PARAM(output_data),
+            PARAM(workspace_allocator));
 
   if (ok()) {
     if (dnn::DnnSupport *dnn = parent_->AsDnn()) {
       CheckError(dnn->DoPoolForward(this, pooling_dimensions, input_dimensions,
-                                    input_data, output_dimensions,
-                                    output_data));
+                                    input_data, output_dimensions, output_data,
+                                    workspace_allocator));
     } else {
       SetErrorAndLogNoDnnSupport();
     }
@@ -1445,16 +1449,19 @@
     const dnn::BatchDescriptor &output_dimensions,
     const DeviceMemory<double> &output_data,
     const DeviceMemory<double> &input_diff_data,
-    DeviceMemory<double> *output_diff_data) {
+    DeviceMemory<double> *output_diff_data,
+    ScratchAllocator *workspace_allocator) {
   VLOG_CALL(PARAM(pooling_dimensions), PARAM(input_dimensions),
             PARAM(input_data), PARAM(output_dimensions), PARAM(output_data),
-            PARAM(input_diff_data), PARAM(output_diff_data));
+            PARAM(input_diff_data), PARAM(output_diff_data),
+            PARAM(workspace_allocator));
 
   if (ok()) {
     if (dnn::DnnSupport *dnn = parent_->AsDnn()) {
       CheckError(dnn->DoPoolBackward(this, pooling_dimensions, input_dimensions,
                                      input_data, output_dimensions, output_data,
-                                     input_diff_data, output_diff_data));
+                                     input_diff_data, output_diff_data,
+                                     workspace_allocator));
     } else {
       SetError();
       LOG(WARNING)
@@ -1472,16 +1479,19 @@
     const dnn::BatchDescriptor &output_dimensions,
     const DeviceMemory<float> &output_data,
     const DeviceMemory<float> &input_diff_data,
-    DeviceMemory<float> *output_diff_data) {
+    DeviceMemory<float> *output_diff_data,
+    ScratchAllocator *workspace_allocator) {
   VLOG_CALL(PARAM(pooling_dimensions), PARAM(input_dimensions),
             PARAM(input_data), PARAM(output_dimensions), PARAM(output_data),
-            PARAM(input_diff_data), PARAM(output_diff_data));
+            PARAM(input_diff_data), PARAM(output_diff_data),
+            PARAM(workspace_allocator));
 
   if (ok()) {
     if (dnn::DnnSupport *dnn = parent_->AsDnn()) {
       CheckError(dnn->DoPoolBackward(this, pooling_dimensions, input_dimensions,
                                      input_data, output_dimensions, output_data,
-                                     input_diff_data, output_diff_data));
+                                     input_diff_data, output_diff_data,
+                                     workspace_allocator));
     } else {
       SetErrorAndLogNoDnnSupport();
     }
@@ -1496,16 +1506,19 @@
     const dnn::BatchDescriptor &output_dimensions,
     const DeviceMemory<Eigen::half> &output_data,
     const DeviceMemory<Eigen::half> &input_diff_data,
-    DeviceMemory<Eigen::half> *output_diff_data) {
+    DeviceMemory<Eigen::half> *output_diff_data,
+    ScratchAllocator *workspace_allocator) {
   VLOG_CALL(PARAM(pooling_dimensions), PARAM(input_dimensions),
             PARAM(input_data), PARAM(output_dimensions), PARAM(output_data),
-            PARAM(input_diff_data), PARAM(output_diff_data));
+            PARAM(input_diff_data), PARAM(output_diff_data),
+            PARAM(workspace_allocator));
 
   if (ok()) {
     if (dnn::DnnSupport *dnn = parent_->AsDnn()) {
       CheckError(dnn->DoPoolBackward(this, pooling_dimensions, input_dimensions,
                                      input_data, output_dimensions, output_data,
-                                     input_diff_data, output_diff_data));
+                                     input_diff_data, output_diff_data,
+                                     workspace_allocator));
     } else {
       SetErrorAndLogNoDnnSupport();
     }
@@ -1552,16 +1565,18 @@
     const dnn::BatchDescriptor &dimensions, const DeviceMemory<float> &raw_data,
     const DeviceMemory<float> &normalized_data,
     const DeviceMemory<float> &normalized_variable_gradient,
-    DeviceMemory<float> *raw_variable_gradient) {
+    DeviceMemory<float> *raw_variable_gradient,
+    ScratchAllocator *workspace_allocator) {
   VLOG_CALL(PARAM(normalize_descriptor), PARAM(dimensions), PARAM(raw_data),
             PARAM(normalized_data), PARAM(normalized_variable_gradient),
-            PARAM(raw_variable_gradient));
+            PARAM(raw_variable_gradient), PARAM(workspace_allocator));
 
   if (ok()) {
     if (dnn::DnnSupport *dnn = parent_->AsDnn()) {
       CheckError(dnn->DoNormalizeBackwardWithDimensions(
           this, normalize_descriptor, dimensions, raw_data, normalized_data,
-          normalized_variable_gradient, raw_variable_gradient));
+          normalized_variable_gradient, raw_variable_gradient,
+          workspace_allocator));
     } else {
       SetErrorAndLogNoDnnSupport();
     }
diff --git a/tensorflow/stream_executor/stream.h b/tensorflow/stream_executor/stream.h
index e8885e1..63d6494 100644
--- a/tensorflow/stream_executor/stream.h
+++ b/tensorflow/stream_executor/stream.h
@@ -629,19 +629,22 @@
                           const dnn::BatchDescriptor &input_dimensions,
                           const DeviceMemory<double> &input_data,
                           const dnn::BatchDescriptor &output_dimensions,
-                          DeviceMemory<double> *output_data);
+                          DeviceMemory<double> *output_data,
+                          ScratchAllocator *workspace_allocator = nullptr);
 
   Stream &ThenPoolForward(const dnn::PoolingDescriptor &pooling_dimensions,
                           const dnn::BatchDescriptor &input_dimensions,
                           const DeviceMemory<float> &input_data,
                           const dnn::BatchDescriptor &output_dimensions,
-                          DeviceMemory<float> *output_data);
+                          DeviceMemory<float> *output_data,
+                          ScratchAllocator *workspace_allocator = nullptr);
 
   Stream &ThenPoolForward(const dnn::PoolingDescriptor &pooling_dimensions,
                           const dnn::BatchDescriptor &input_dimensions,
                           const DeviceMemory<Eigen::half> &input_data,
                           const dnn::BatchDescriptor &output_dimensions,
-                          DeviceMemory<Eigen::half> *output_data);
+                          DeviceMemory<Eigen::half> *output_data,
+                          ScratchAllocator *workspace_allocator = nullptr);
 
   Stream &ThenPoolBackward(const dnn::PoolingDescriptor &pooling_dimensions,
                            const dnn::BatchDescriptor &input_dimensions,
@@ -649,7 +652,8 @@
                            const dnn::BatchDescriptor &output_dimensions,
                            const DeviceMemory<double> &output_data,
                            const DeviceMemory<double> &input_diff_data,
-                           DeviceMemory<double> *output_diff_data);
+                           DeviceMemory<double> *output_diff_data,
+                           ScratchAllocator *workspace_allocator = nullptr);
 
   Stream &ThenPoolBackward(const dnn::PoolingDescriptor &pooling_dimensions,
                            const dnn::BatchDescriptor &input_dimensions,
@@ -657,7 +661,8 @@
                            const dnn::BatchDescriptor &output_dimensions,
                            const DeviceMemory<float> &output_data,
                            const DeviceMemory<float> &input_diff_data,
-                           DeviceMemory<float> *output_diff_data);
+                           DeviceMemory<float> *output_diff_data,
+                           ScratchAllocator *workspace_allocator = nullptr);
 
   Stream &ThenPoolBackward(const dnn::PoolingDescriptor &pooling_dimensions,
                            const dnn::BatchDescriptor &input_dimensions,
@@ -665,7 +670,8 @@
                            const dnn::BatchDescriptor &output_dimensions,
                            const DeviceMemory<Eigen::half> &output_data,
                            const DeviceMemory<Eigen::half> &input_diff_data,
-                           DeviceMemory<Eigen::half> *output_diff_data);
+                           DeviceMemory<Eigen::half> *output_diff_data,
+                           ScratchAllocator *workspace_allocator = nullptr);
 
   Stream &ThenNormalize(const dnn::NormalizeDescriptor &normalize_descriptor,
                         const DeviceMemory<float> &input_data,
@@ -684,7 +690,8 @@
       const DeviceMemory<float> &raw_data,
       const DeviceMemory<float> &normalized_data,
       const DeviceMemory<float> &normalized_variable_gradient,
-      DeviceMemory<float> *raw_variable_gradient);
+      DeviceMemory<float> *raw_variable_gradient,
+      ScratchAllocator *workspace_allocator = nullptr);
 
   Stream &ThenActivate(dnn::ActivationMode activation_mode,
                        const dnn::BatchDescriptor &dimensions,
diff --git a/tensorflow/tensorflow.bzl b/tensorflow/tensorflow.bzl
index e424166..955b53f 100644
--- a/tensorflow/tensorflow.bzl
+++ b/tensorflow/tensorflow.bzl
@@ -24,7 +24,10 @@
     "if_mkl",
     "if_mkl_lnx_x64"
 )
-
+load(
+    "//third_party/mkl_dnn:build_defs.bzl",
+    "if_mkl_open_source_only",
+)
 def register_extension_info(**kwargs):
     pass
 
@@ -180,9 +183,13 @@
         "/DEIGEN_AVOID_STL_ARRAY",
         "/Iexternal/gemmlowp",
         "/wd4018",  # -Wno-sign-compare
-        "/U_HAS_EXCEPTIONS",
-        "/D_HAS_EXCEPTIONS=1",
-        "/EHsc",  # -fno-exceptions
+        # Bazel's CROSSTOOL currently pass /EHsc to enable exception by
+        # default. We can't pass /EHs-c- to disable exception, otherwise
+        # we will get a waterfall of flag conflict warnings. Wait for
+        # Bazel to fix this.
+        # "/D_HAS_EXCEPTIONS=0",
+        # "/EHs-c-",
+        "/wd4577",
         "/DNOGDI",
     ]
     if is_external:
@@ -214,6 +221,7 @@
       + if_cuda(["-DGOOGLE_CUDA=1"])
       + if_tensorrt(["-DGOOGLE_TENSORRT=1"])
       + if_mkl(["-DINTEL_MKL=1", "-DEIGEN_USE_VML"])
+      + if_mkl_open_source_only(["-DDO_NOT_USE_ML"])
       + if_mkl_lnx_x64(["-fopenmp"])
       + if_android_arm(["-mfpu=neon"])
       + if_linux_x86_64(["-msse3"])
diff --git a/tensorflow/tools/api/golden/tensorflow.-config-proto.-experimental.pbtxt b/tensorflow/tools/api/golden/tensorflow.-config-proto.-experimental.pbtxt
index 9e09a8d..ef9fe09 100644
--- a/tensorflow/tools/api/golden/tensorflow.-config-proto.-experimental.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.-config-proto.-experimental.pbtxt
@@ -8,5 +8,11 @@
       label: LABEL_OPTIONAL
       type: TYPE_STRING
     }
+    field {
+      name: "client_handles_error_formatting"
+      number: 2
+      label: LABEL_OPTIONAL
+      type: TYPE_BOOL
+    }
   }
 }
diff --git a/tensorflow/tools/api/golden/tensorflow.-config-proto.pbtxt b/tensorflow/tools/api/golden/tensorflow.-config-proto.pbtxt
index 4af4ed7..eeef155 100644
--- a/tensorflow/tools/api/golden/tensorflow.-config-proto.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.-config-proto.pbtxt
@@ -131,6 +131,12 @@
         label: LABEL_OPTIONAL
         type: TYPE_STRING
       }
+      field {
+        name: "client_handles_error_formatting"
+        number: 2
+        label: LABEL_OPTIONAL
+        type: TYPE_BOOL
+      }
     }
   }
 }
diff --git a/tensorflow/tools/api/golden/tensorflow.estimator.-run-config.pbtxt b/tensorflow/tools/api/golden/tensorflow.estimator.-run-config.pbtxt
index c8da55d..5aa4b3d 100644
--- a/tensorflow/tools/api/golden/tensorflow.estimator.-run-config.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.estimator.-run-config.pbtxt
@@ -51,6 +51,10 @@
     mtype: "<type \'property\'>"
   }
   member {
+    name: "protocol"
+    mtype: "<type \'property\'>"
+  }
+  member {
     name: "save_checkpoints_secs"
     mtype: "<type \'property\'>"
   }
@@ -88,7 +92,7 @@
   }
   member_method {
     name: "__init__"
-    argspec: "args=[\'self\', \'model_dir\', \'tf_random_seed\', \'save_summary_steps\', \'save_checkpoints_steps\', \'save_checkpoints_secs\', \'session_config\', \'keep_checkpoint_max\', \'keep_checkpoint_every_n_hours\', \'log_step_count_steps\', \'train_distribute\', \'device_fn\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'100\', \'<object object instance>\', \'<object object instance>\', \'None\', \'5\', \'10000\', \'100\', \'None\', \'None\'], "
+    argspec: "args=[\'self\', \'model_dir\', \'tf_random_seed\', \'save_summary_steps\', \'save_checkpoints_steps\', \'save_checkpoints_secs\', \'session_config\', \'keep_checkpoint_max\', \'keep_checkpoint_every_n_hours\', \'log_step_count_steps\', \'train_distribute\', \'device_fn\', \'protocol\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'100\', \'<object object instance>\', \'<object object instance>\', \'None\', \'5\', \'10000\', \'100\', \'None\', \'None\', \'None\'], "
   }
   member_method {
     name: "replace"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.callbacks.-tensor-board.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.callbacks.-tensor-board.pbtxt
index 2f52464..e58ba18 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.callbacks.-tensor-board.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.callbacks.-tensor-board.pbtxt
@@ -5,7 +5,7 @@
   is_instance: "<type \'object\'>"
   member_method {
     name: "__init__"
-    argspec: "args=[\'self\', \'log_dir\', \'histogram_freq\', \'batch_size\', \'write_graph\', \'write_grads\', \'write_images\'], varargs=None, keywords=None, defaults=[\'./logs\', \'0\', \'32\', \'True\', \'False\', \'False\'], "
+    argspec: "args=[\'self\', \'log_dir\', \'histogram_freq\', \'batch_size\', \'write_graph\', \'write_grads\', \'write_images\', \'embeddings_freq\', \'embeddings_layer_names\', \'embeddings_metadata\', \'embeddings_data\'], varargs=None, keywords=None, defaults=[\'./logs\', \'0\', \'32\', \'True\', \'False\', \'False\', \'0\', \'None\', \'None\', \'None\'], "
   }
   member_method {
     name: "on_batch_begin"
diff --git a/tensorflow/tools/api/golden/tensorflow.pbtxt b/tensorflow/tools/api/golden/tensorflow.pbtxt
index 4f90743..5eb42b4 100644
--- a/tensorflow/tools/api/golden/tensorflow.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.pbtxt
@@ -258,7 +258,7 @@
   }
   member {
     name: "Variable"
-    mtype: "<type \'type\'>"
+    mtype: "<class \'tensorflow.python.ops.variables.VariableMetaclass\'>"
   }
   member {
     name: "VariableAggregation"
diff --git a/tensorflow/tools/api/tests/api_compatibility_test.py b/tensorflow/tools/api/tests/api_compatibility_test.py
index 90375a7..d1b34fb 100644
--- a/tensorflow/tools/api/tests/api_compatibility_test.py
+++ b/tensorflow/tools/api/tests/api_compatibility_test.py
@@ -34,6 +34,13 @@
 import unittest
 
 import tensorflow as tf
+# pylint: disable=g-import-not-at-top
+try:
+  from tensorflow.compat import v1 as tf_v1
+  # We import compat.v1 as tf_v1 instead.
+  del tf.compat.v1
+except ImportError:
+  tf_v1 = None
 
 from google.protobuf import message
 from google.protobuf import text_format
@@ -46,6 +53,7 @@
 from tensorflow.tools.api.lib import python_object_to_proto_visitor
 from tensorflow.tools.common import public_api
 from tensorflow.tools.common import traverse
+# pylint: enable=g-import-not-at-top
 
 
 # FLAGS defined at the bottom:
@@ -215,25 +223,19 @@
     visitor.do_not_descend_map['tf'].append('contrib')
     traverse.traverse(tf, visitor)
 
-  @unittest.skipUnless(
-      sys.version_info.major == 2,
-      'API compabitility test goldens are generated using python2.')
-  def testAPIBackwardsCompatibility(self):
-    # Extract all API stuff.
+  def checkBackwardsCompatibility(self, root, golden_file_pattern):
+     # Extract all API stuff.
     visitor = python_object_to_proto_visitor.PythonObjectToProtoVisitor()
 
     public_api_visitor = public_api.PublicAPIVisitor(visitor)
     public_api_visitor.do_not_descend_map['tf'].append('contrib')
     public_api_visitor.do_not_descend_map['tf.GPUOptions'] = ['Experimental']
-    traverse.traverse(tf, public_api_visitor)
+    traverse.traverse(root, public_api_visitor)
 
     proto_dict = visitor.GetProtos()
 
     # Read all golden files.
-    expression = os.path.join(
-        resource_loader.get_root_dir_with_all_resources(),
-        _KeyToFilePath('*'))
-    golden_file_list = file_io.get_matching_files(expression)
+    golden_file_list = file_io.get_matching_files(golden_file_pattern)
 
     def _ReadFileToProto(filename):
       """Read a filename, create a protobuf from its contents."""
@@ -254,6 +256,26 @@
         verbose=FLAGS.verbose_diffs,
         update_goldens=FLAGS.update_goldens)
 
+  @unittest.skipUnless(
+      sys.version_info.major == 2,
+      'API compabitility test goldens are generated using python2.')
+  def testAPIBackwardsCompatibility(self):
+    golden_file_pattern = os.path.join(
+        resource_loader.get_root_dir_with_all_resources(),
+        _KeyToFilePath('*'))
+    self.checkBackwardsCompatibility(tf, golden_file_pattern)
+
+  @unittest.skipUnless(
+      sys.version_info.major == 2,
+      'API compabitility test goldens are generated using python2.')
+  def testAPIBackwardsCompatibilityV1(self):
+    if not tf_v1:
+      return
+    golden_file_pattern = os.path.join(
+        resource_loader.get_root_dir_with_all_resources(),
+        _KeyToFilePath('*'))
+    self.checkBackwardsCompatibility(tf_v1, golden_file_pattern)
+
 
 if __name__ == '__main__':
   parser = argparse.ArgumentParser()
diff --git a/tensorflow/tools/ci_build/Dockerfile.cpu.ppc64le b/tensorflow/tools/ci_build/Dockerfile.cpu.ppc64le
index e879c34..ada2c63 100644
--- a/tensorflow/tools/ci_build/Dockerfile.cpu.ppc64le
+++ b/tensorflow/tools/ci_build/Dockerfile.cpu.ppc64le
@@ -7,7 +7,7 @@
 RUN /install/install_bootstrap_deb_packages.sh
 RUN add-apt-repository -y ppa:openjdk-r/ppa
 RUN /install/install_deb_packages.sh
-RUN apt-get update && apt-get install -y libopenblas-dev
+RUN /install/install_openblas_ppc64le.sh
 RUN /install/install_hdf5_ppc64le.sh
 RUN /install/install_pip_packages.sh
 RUN /install/install_bazel_from_source.sh
diff --git a/tensorflow/tools/ci_build/Dockerfile.gpu.ppc64le b/tensorflow/tools/ci_build/Dockerfile.gpu.ppc64le
index 8967138..a404f12 100644
--- a/tensorflow/tools/ci_build/Dockerfile.gpu.ppc64le
+++ b/tensorflow/tools/ci_build/Dockerfile.gpu.ppc64le
@@ -13,7 +13,7 @@
 RUN /install/install_bootstrap_deb_packages.sh
 RUN add-apt-repository -y ppa:openjdk-r/ppa
 RUN /install/install_deb_packages.sh
-RUN apt-get update && apt-get install -y libopenblas-dev
+RUN /install/install_openblas_ppc64le.sh 
 RUN /install/install_hdf5_ppc64le.sh
 RUN /install/install_pip_packages.sh
 RUN /install/install_bazel_from_source.sh
diff --git a/tensorflow/tools/ci_build/gpu_build/parallel_gpu_execute.sh b/tensorflow/tools/ci_build/gpu_build/parallel_gpu_execute.sh
index d0816c9..75da9bb 100755
--- a/tensorflow/tools/ci_build/gpu_build/parallel_gpu_execute.sh
+++ b/tensorflow/tools/ci_build/gpu_build/parallel_gpu_execute.sh
@@ -35,6 +35,30 @@
   exit 1
 fi
 
+function is_absolute {
+  [[ "$1" = /* ]] || [[ "$1" =~ ^[a-zA-Z]:[/\\].* ]]
+}
+
+RUNFILES_MANIFEST_FILE="${TEST_SRCDIR}/MANIFEST"
+function rlocation() {
+  if is_absolute "$1" ; then
+    # If the file path is already fully specified, simply return it.
+    echo "$1"
+  elif [[ -e "$TEST_SRCDIR/$1" ]]; then
+    # If the file exists in the $TEST_SRCDIR then just use it.
+    echo "$TEST_SRCDIR/$1"
+  elif [[ -e "$RUNFILES_MANIFEST_FILE" ]]; then
+    # If a runfiles manifest file exists then use it.
+    echo "$(grep "^$1 " "$RUNFILES_MANIFEST_FILE" | sed 's/[^ ]* //')"
+  fi
+}
+
+TEST_BINARY="$(rlocation $TEST_WORKSPACE/${1#./})"
+shift
+
+# Make sure /var/lock exists, this may not be true under MSYS
+mkdir -p /var/lock
+
 TF_GPU_COUNT=${TF_GPU_COUNT:-8}
 
 for i in `seq 0 $((TF_GPU_COUNT-1))`; do
@@ -45,8 +69,8 @@
       # This export only works within the brackets, so it is isolated to one
       # single command.
       export CUDA_VISIBLE_DEVICES=$i
-      echo "Running test $* on GPU $CUDA_VISIBLE_DEVICES"
-      $@
+      echo "Running test $TEST_BINARY $* on GPU $CUDA_VISIBLE_DEVICES"
+      "$TEST_BINARY" $@
     )
     return_code=$?
     flock -u "$lock_fd"
diff --git a/tensorflow/tools/ci_build/install/install_bazel.sh b/tensorflow/tools/ci_build/install/install_bazel.sh
index 3e27a94..adbff8f 100755
--- a/tensorflow/tools/ci_build/install/install_bazel.sh
+++ b/tensorflow/tools/ci_build/install/install_bazel.sh
@@ -15,7 +15,7 @@
 # ==============================================================================
 
 # Select bazel version.
-BAZEL_VERSION="0.11.0"
+BAZEL_VERSION="0.14.1"
 
 set +e
 local_bazel_ver=$(bazel version 2>&1 | grep -i label | awk '{print $3}')
diff --git a/tensorflow/tools/ci_build/install/install_bazel_from_source.sh b/tensorflow/tools/ci_build/install/install_bazel_from_source.sh
index ddad00c..9d24b3e 100755
--- a/tensorflow/tools/ci_build/install/install_bazel_from_source.sh
+++ b/tensorflow/tools/ci_build/install/install_bazel_from_source.sh
@@ -18,7 +18,7 @@
 # It will compile bazel from source and install it in /usr/local/bin
 
 # Select bazel version.
-BAZEL_VERSION="0.11.0"
+BAZEL_VERSION="0.14.1"
 
 set +e
 local_bazel_ver=$(bazel version 2>&1 | grep -i label | awk '{print $3}')
diff --git a/tensorflow/tools/ci_build/install/install_openblas_ppc64le.sh b/tensorflow/tools/ci_build/install/install_openblas_ppc64le.sh
new file mode 100755
index 0000000..107cc61
--- /dev/null
+++ b/tensorflow/tools/ci_build/install/install_openblas_ppc64le.sh
@@ -0,0 +1,29 @@
+#!/usr/bin/env bash
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+
+OPENBLAS_SRC_PATH=/tmp/openblas_src/
+POWER="POWER8"
+USE_OPENMP="USE_OPENMP=1"
+OPENBLAS_INSTALL_PATH="/usr"
+apt-get update
+apt-get install -y gfortran gfortran-5
+rm -rf ${OPENBLAS_SRC_PATH}
+git clone -b release-0.3.0 https://github.com/xianyi/OpenBLAS ${OPENBLAS_SRC_PATH}
+cd ${OPENBLAS_SRC_PATH}
+# Pick up fix for OpenBLAS issue 1571
+git cherry-pick -X theirs 961d25e9c7e4a1758adb1dbeaa15187de69dd052
+make TARGET=${POWER} ${USE_OPENMP} FC=gfortran
+make PREFIX=${OPENBLAS_INSTALL_PATH} install
diff --git a/tensorflow/tools/ci_build/windows/gpu/pip/build_tf_windows.sh b/tensorflow/tools/ci_build/windows/gpu/pip/build_tf_windows.sh
index fe3bce4..36b2142 100644
--- a/tensorflow/tools/ci_build/windows/gpu/pip/build_tf_windows.sh
+++ b/tensorflow/tools/ci_build/windows/gpu/pip/build_tf_windows.sh
@@ -105,14 +105,18 @@
 PIP_NAME=$(ls ${PY_TEST_DIR}/tensorflow-*.whl)
 reinstall_tensorflow_pip ${PIP_NAME}
 
+TF_GPU_COUNT=${TF_GPU_COUNT:-8}
+
 # Define no_tensorflow_py_deps=true so that every py_test has no deps anymore,
 # which will result testing system installed tensorflow
 # GPU tests are very flaky when running concurrently, so set local_test_jobs=1
 bazel test --announce_rc --config=opt -k --test_output=errors \
+  --test_env=TF_GPU_COUNT \
+  --run_under=//tensorflow/tools/ci_build/gpu_build:parallel_gpu_execute \
   --define=no_tensorflow_py_deps=true --test_lang_filters=py \
   --test_tag_filters=-no_pip,-no_windows,-no_windows_gpu,-no_gpu,-no_pip_gpu,-no_oss \
   --build_tag_filters=-no_pip,-no_windows,-no_windows_gpu,-no_gpu,-no_pip_gpu,-no_oss --build_tests_only \
-  --local_test_jobs=1 --test_timeout="300,450,1200,3600" \
+  --local_test_jobs=$TF_GPU_COUNT --test_timeout="300,450,1200,3600" \
   --flaky_test_attempts=3 \
   //${PY_TEST_DIR}/tensorflow/python/... \
   //${PY_TEST_DIR}/tensorflow/contrib/...
diff --git a/tensorflow/tools/docker/Dockerfile.devel-gpu b/tensorflow/tools/docker/Dockerfile.devel-gpu
index 5ec43b8..44120bf 100644
--- a/tensorflow/tools/docker/Dockerfile.devel-gpu
+++ b/tensorflow/tools/docker/Dockerfile.devel-gpu
@@ -15,6 +15,8 @@
         git \
         libcudnn7=7.1.4.18-1+cuda9.0 \
         libcudnn7-dev=7.1.4.18-1+cuda9.0 \
+        libnccl2=2.2.13-1+cuda9.0 \
+        libnccl-dev=2.2.13-1+cuda9.0 \
         libcurl3-dev \
         libfreetype6-dev \
         libhdf5-serial-dev \
@@ -33,6 +35,15 @@
     find /usr/local/cuda-9.0/lib64/ -type f -name 'lib*_static.a' -not -name 'libcudart_static.a' -delete && \
     rm /usr/lib/x86_64-linux-gnu/libcudnn_static_v7.a
 
+# Link NCCL libray and header where the build script expects them.
+RUN mkdir /usr/local/cuda-9.0/lib &&  \
+    ln -s /usr/lib/x86_64-linux-gnu/libnccl.so.2 /usr/local/cuda/lib/libnccl.so.2 && \
+    ln -s /usr/include/nccl.h /usr/local/cuda/include/nccl.h
+
+# TODO(tobyboyd): Remove after license is excluded from BUILD file.
+RUN gunzip /usr/share/doc/libnccl2/NCCL-SLA.txt.gz && \
+    cp /usr/share/doc/libnccl2/NCCL-SLA.txt /usr/local/cuda/
+
 RUN curl -fSsL -O https://bootstrap.pypa.io/get-pip.py && \
     python get-pip.py && \
     rm get-pip.py
@@ -91,10 +102,13 @@
 ENV CI_BUILD_PYTHON python
 ENV LD_LIBRARY_PATH /usr/local/cuda/extras/CUPTI/lib64:$LD_LIBRARY_PATH
 ENV TF_NEED_CUDA 1
-ENV TF_CUDA_COMPUTE_CAPABILITIES=3.0,3.5,5.2,6.0,6.1
+ENV TF_CUDA_COMPUTE_CAPABILITIES=3.5,5.2,6.0,6.1,7.0
 ENV TF_CUDA_VERSION=9.0
 ENV TF_CUDNN_VERSION=7
 
+# NCCL 2.x
+ENV TF_NCCL_VERSION=2
+
 RUN ln -s /usr/local/cuda/lib64/stubs/libcuda.so /usr/local/cuda/lib64/stubs/libcuda.so.1 && \
     LD_LIBRARY_PATH=/usr/local/cuda/lib64/stubs:${LD_LIBRARY_PATH} \
     tensorflow/tools/ci_build/builds/configured GPU \
diff --git a/tensorflow/tools/docker/Dockerfile.gpu b/tensorflow/tools/docker/Dockerfile.gpu
index 9197651..28d4371 100644
--- a/tensorflow/tools/docker/Dockerfile.gpu
+++ b/tensorflow/tools/docker/Dockerfile.gpu
@@ -13,6 +13,7 @@
         cuda-cusparse-9-0 \
         curl \
         libcudnn7=7.1.4.18-1+cuda9.0 \
+        libnccl2=2.2.13-1+cuda9.0 \
         libfreetype6-dev \
         libhdf5-serial-dev \
         libpng12-dev \
diff --git a/tensorflow/tools/docs/doc_generator_visitor.py b/tensorflow/tools/docs/doc_generator_visitor.py
index 259a469..c090dbd 100644
--- a/tensorflow/tools/docs/doc_generator_visitor.py
+++ b/tensorflow/tools/docs/doc_generator_visitor.py
@@ -20,6 +20,7 @@
 
 import six
 
+from tensorflow.python.util import tf_export
 from tensorflow.python.util import tf_inspect
 
 
@@ -201,7 +202,6 @@
             raw_duplicates[master_name] = [master_name, full_name]
         else:
           reverse_index[object_id] = full_name
-
     # Decide on master names, rewire duplicates and make a duplicate_of map
     # mapping all non-master duplicates to the master name. The master symbol
     # does not have an entry in this map.
@@ -211,10 +211,15 @@
     duplicates = {}
     for names in raw_duplicates.values():
       names = sorted(names)
-
-      # Choose the lexicographically first name with the minimum number of
-      # submodules. This will prefer highest level namespace for any symbol.
-      master_name = min(names, key=lambda name: name.count('.'))
+      master_name = (
+          tf_export.get_canonical_name_for_symbol(self._index[names[0]])
+          if names else None)
+      if master_name:
+        master_name = 'tf.%s' % master_name
+      else:
+        # Choose the lexicographically first name with the minimum number of
+        # submodules. This will prefer highest level namespace for any symbol.
+        master_name = min(names, key=lambda name: name.count('.'))
 
       duplicates[master_name] = names
       for name in names:
diff --git a/tensorflow/tools/lib_package/BUILD b/tensorflow/tools/lib_package/BUILD
index 173f418..44d8a37 100644
--- a/tensorflow/tools/lib_package/BUILD
+++ b/tensorflow/tools/lib_package/BUILD
@@ -143,6 +143,7 @@
         "@zlib_archive//:zlib.h",
     ] + if_mkl([
         "//third_party/mkl:LICENSE",
+        "//third_party/mkl_dnn:LICENSE",
     ]),
     outs = ["include/tensorflow/c/LICENSE"],
     cmd = "$(location :concat_licenses.sh) $(SRCS) >$@",
@@ -182,6 +183,7 @@
         "@zlib_archive//:zlib.h",
     ] + if_mkl([
         "//third_party/mkl:LICENSE",
+        "//third_party/mkl_dnn:LICENSE",
     ]),
     outs = ["include/tensorflow/jni/LICENSE"],
     cmd = "$(location :concat_licenses.sh) $(SRCS) >$@",
diff --git a/tensorflow/tools/pip_package/BUILD b/tensorflow/tools/pip_package/BUILD
index c9d53f4..e661fb1 100644
--- a/tensorflow/tools/pip_package/BUILD
+++ b/tensorflow/tools/pip_package/BUILD
@@ -11,7 +11,7 @@
 )
 load("//third_party/mkl:build_defs.bzl", "if_mkl")
 load("//tensorflow:tensorflow.bzl", "if_cuda")
-load("@local_config_tensorrt//:build_defs.bzl", "if_tensorrt")
+load("@local_config_syslibs//:build_defs.bzl", "if_not_system_lib")
 load("//tensorflow/core:platform/default/build_config_root.bzl", "tf_additional_license_deps")
 
 # This returns a list of headers of all public header libraries (e.g.,
@@ -104,6 +104,7 @@
     "//tensorflow/python/kernel_tests/testdata:self_adjoint_eig_op_test_files",
     "//tensorflow/python/saved_model:saved_model",
     "//tensorflow/python/tools:tools_pip",
+    "//tensorflow/python/tools/api/generator:create_python_api",
     "//tensorflow/python:test_ops",
     "//tensorflow/tools/dist_test/server:grpc_tensorflow_server",
 ]
@@ -144,7 +145,6 @@
         "@gast_archive//:PKG-INFO",
         "@gemmlowp//:LICENSE",
         "@gif_archive//:COPYING",
-        "@grpc//:LICENSE",
         "@highwayhash//:LICENSE",
         "@jemalloc//:COPYING",
         "@jpeg//:LICENSE.md",
@@ -153,8 +153,6 @@
         "@lmdb//:LICENSE",
         "@local_config_nccl//:LICENSE",
         "@local_config_sycl//sycl:LICENSE.text",
-        "@grpc//third_party/nanopb:LICENSE.txt",
-        "@grpc//third_party/address_sorting:LICENSE",
         "@nasm//:LICENSE",
         "@nsync//:LICENSE",
         "@pcre//:LICENCE",
@@ -168,7 +166,15 @@
         "@org_python_pypi_backports_weakref//:LICENSE",
     ] + if_mkl([
         "//third_party/mkl:LICENSE",
-    ]) + tf_additional_license_deps(),
+        "//third_party/mkl_dnn:LICENSE",
+    ]) + if_not_system_lib(
+        "grpc",
+        [
+            "@grpc//:LICENSE",
+            "@grpc//third_party/nanopb:LICENSE.txt",
+            "@grpc//third_party/address_sorting:LICENSE",
+        ],
+    ) + tf_additional_license_deps(),
 )
 
 sh_binary(
@@ -183,9 +189,7 @@
             "//tensorflow/contrib/lite/python:tflite_convert",
             "//tensorflow/contrib/lite/toco/python:toco_from_protos",
         ],
-    }) + if_mkl(["//third_party/mkl:intel_binary_blob"]) + if_tensorrt([
-        "//tensorflow/contrib/tensorrt:init_py",
-    ]),
+    }) + if_mkl(["//third_party/mkl:intel_binary_blob"]),
 )
 
 # A genrule for generating a marker file for the pip package on Windows
diff --git a/tensorflow/tools/pip_package/build_pip_package.sh b/tensorflow/tools/pip_package/build_pip_package.sh
index b0089d3..4101b34 100755
--- a/tensorflow/tools/pip_package/build_pip_package.sh
+++ b/tensorflow/tools/pip_package/build_pip_package.sh
@@ -27,7 +27,7 @@
 
   pushd .
   cd "$src_dir"
-  for f in `find . ! -type d ! -name '*.py' ! -path '*local_config_cuda*' ! -path '*local_config_tensorrt*' ! -path '*org_tensorflow*'`; do
+  for f in `find . ! -type d ! -name '*.py' ! -path '*local_config_cuda*' ! -path '*local_config_tensorrt*' ! -path '*local_config_syslibs*' ! -path '*org_tensorflow*'`; do
     mkdir -p "${dest_dir}/$(dirname ${f})"
     cp "${f}" "${dest_dir}/$(dirname ${f})/"
   done
diff --git a/tensorflow/workspace.bzl b/tensorflow/workspace.bzl
index b712954..378de42 100644
--- a/tensorflow/workspace.bzl
+++ b/tensorflow/workspace.bzl
@@ -8,6 +8,7 @@
 load("//third_party/py:python_configure.bzl", "python_configure")
 
 load("//third_party/sycl:sycl_configure.bzl", "sycl_configure")
+load("//third_party/systemlibs:syslibs_configure.bzl", "syslibs_configure")
 load("//third_party/toolchains/clang6:repo.bzl", "clang6_configure")
 load("//third_party/toolchains/cpus/arm:arm_compiler_configure.bzl", "arm_compiler_configure")
 load("//third_party:repo.bzl", "tf_http_archive")
@@ -35,6 +36,7 @@
   nccl_configure(name="local_config_nccl")
   git_configure(name="local_config_git")
   sycl_configure(name="local_config_sycl")
+  syslibs_configure(name="local_config_syslibs")
   python_configure(name="local_config_python")
 
   # For windows bazel build
@@ -161,6 +163,7 @@
       ],
       sha256 = "2f945446b71336e7f5a2bcace1abcf0b23fbba368266c6a1be33de3de3b3c912",
       strip_prefix = "re2-2018-04-01",
+      system_build_file = clean_dep("//third_party/systemlibs:re2.BUILD"),
   )
 
   tf_http_archive(
@@ -226,6 +229,7 @@
       sha256 = "63ec86477ad3f0f6292325fd89e1d93aea2e2fd490070863f17d48f7cd387011",
       strip_prefix = "nasm-2.13.03",
       build_file = clean_dep("//third_party:nasm.BUILD"),
+      system_build_file = clean_dep("//third_party/systemlibs:nasm.BUILD"),
   )
 
   tf_http_archive(
@@ -237,6 +241,7 @@
       sha256 = "1a17020f859cb12711175a67eab5c71fc1904e04b587046218e36106e07eabde",
       strip_prefix = "libjpeg-turbo-1.5.3",
       build_file = clean_dep("//third_party/jpeg:jpeg.BUILD"),
+      system_build_file = clean_dep("//third_party/systemlibs:jpeg.BUILD"),
   )
 
   tf_http_archive(
@@ -249,6 +254,7 @@
       strip_prefix = "libpng-1.6.34",
       build_file = clean_dep("//third_party:png.BUILD"),
       patch_file = clean_dep("//third_party:png_fix_rpi.patch"),
+      system_build_file = clean_dep("//third_party/systemlibs:png.BUILD"),
   )
 
   tf_http_archive(
@@ -260,6 +266,7 @@
       sha256 = "ad68c1216c3a474cf360c7581a4001e952515b3649342100f2d7ca7c8e313da6",
       strip_prefix = "sqlite-amalgamation-3240000",
       build_file = clean_dep("//third_party:sqlite.BUILD"),
+      system_build_file = clean_dep("//third_party/systemlibs:sqlite.BUILD"),
   )
 
   tf_http_archive(
@@ -271,6 +278,7 @@
       sha256 = "34a7377ba834397db019e8eb122e551a49c98f49df75ec3fcc92b9a794a4f6d1",
       strip_prefix = "giflib-5.1.4",
       build_file = clean_dep("//third_party:gif.BUILD"),
+      system_build_file = clean_dep("//third_party/systemlibs:gif.BUILD"),
   )
 
   tf_http_archive(
@@ -282,6 +290,7 @@
       sha256 = "105f8d68616f8248e24bf0e9372ef04d3cc10104f1980f54d57b2ce73a5ad56a",
       strip_prefix = "six-1.10.0",
       build_file = clean_dep("//third_party:six.BUILD"),
+      system_build_file = clean_dep("//third_party/systemlibs:six.BUILD"),
   )
 
   tf_http_archive(
@@ -293,6 +302,7 @@
       sha256 = "ff6d2e2962d834acb125cc4dcc80c54a8c17c253f4cc9d9c43b5102a560bb75d",
       strip_prefix = "astor-0.6.2",
       build_file = clean_dep("//third_party:astor.BUILD"),
+      system_build_file = clean_dep("//third_party/systemlibs:astor.BUILD"),
   )
 
   tf_http_archive(
@@ -315,6 +325,7 @@
       sha256 = "1d6d69ce66211143803fbc56652b41d73b4a400a2891d7bf7a1cdf4c02de613b",
       strip_prefix = "termcolor-1.1.0",
       build_file = clean_dep("//third_party:termcolor.BUILD"),
+      system_build_file = clean_dep("//third_party/systemlibs:termcolor.BUILD"),
   )
 
   tf_http_archive(
@@ -421,6 +432,7 @@
       ],
       strip_prefix = "pcre-8.42",
       build_file = clean_dep("//third_party:pcre.BUILD"),
+      system_build_file = clean_dep("//third_party/systemlibs:pcre.BUILD"),
   )
 
   tf_http_archive(
@@ -433,6 +445,7 @@
       ],
       strip_prefix = "swig-3.0.8",
       build_file = clean_dep("//third_party:swig.BUILD"),
+      system_build_file = clean_dep("//third_party/systemlibs:swig.BUILD"),
   )
 
   tf_http_archive(
@@ -444,6 +457,7 @@
       ],
       strip_prefix = "curl-7.60.0",
       build_file = clean_dep("//third_party:curl.BUILD"),
+      system_build_file = clean_dep("//third_party/systemlibs:curl.BUILD"),
   )
 
   tf_http_archive(
@@ -454,6 +468,7 @@
       ],
       sha256 = "50db9cf2221354485eb7c3bd55a4c27190caef7048a2a1a15fbe60a498f98b44",
       strip_prefix = "grpc-1.13.0",
+      system_build_file = clean_dep("//third_party/systemlibs:grpc.BUILD"),
   )
 
   tf_http_archive(
@@ -472,11 +487,11 @@
   tf_http_archive(
       name = "llvm",
       urls = [
-          "https://mirror.bazel.build/github.com/llvm-mirror/llvm/archive/d5d94ca3a7f8526c2e4e5f663f9dc79ae5d39d93.tar.gz",
-	  "https://github.com/llvm-mirror/llvm/archive/d5d94ca3a7f8526c2e4e5f663f9dc79ae5d39d93.tar.gz",
+          "https://mirror.bazel.build/github.com/llvm-mirror/llvm/archive/bd8c8d759852871609ba2e4e79868420f751949d.tar.gz",
+	  "https://github.com/llvm-mirror/llvm/archive/bd8c8d759852871609ba2e4e79868420f751949d.tar.gz",
       ],
-      sha256 = "280fdc888e2eb88a3a8cc4e7d3034fffc87f98e3e686be31f8c719c6e5b67d2d",
-      strip_prefix = "llvm-d5d94ca3a7f8526c2e4e5f663f9dc79ae5d39d93",
+      sha256 = "0c63e8583b213543309e8577ffe87a0cf34cc22269630d2c5c2f0a2345fda4a8",
+      strip_prefix = "llvm-bd8c8d759852871609ba2e4e79868420f751949d",
       build_file = clean_dep("//third_party/llvm:llvm.autogenerated.BUILD"),
   )
 
@@ -489,6 +504,7 @@
       sha256 = "f3927859882eb608868c8c31586bb7eb84562a40a6bf5cc3e13b6b564641ea28",
       strip_prefix = "lmdb-LMDB_0.9.22/libraries/liblmdb",
       build_file = clean_dep("//third_party:lmdb.BUILD"),
+      system_build_file = clean_dep("//third_party/systemlibs:lmdb.BUILD"),
   )
 
   tf_http_archive(
@@ -500,6 +516,7 @@
       sha256 = "c49deac9e0933bcb7044f08516861a2d560988540b23de2ac1ad443b219afdb6",
       strip_prefix = "jsoncpp-1.8.4",
       build_file = clean_dep("//third_party:jsoncpp.BUILD"),
+      system_build_file = clean_dep("//third_party/systemlibs:jsoncpp.BUILD"),
   )
 
   tf_http_archive(
@@ -521,6 +538,7 @@
       sha256 = "c3e5e9fdd5004dcb542feda5ee4f0ff0744628baf8ed2dd5d66f8ca1197cb1a1",
       strip_prefix = "zlib-1.2.11",
       build_file = clean_dep("//third_party:zlib.BUILD"),
+      system_build_file = clean_dep("//third_party/systemlibs:zlib.BUILD"),
   )
 
   tf_http_archive(
@@ -542,6 +560,7 @@
       sha256 = "3dfa02e873ff51a11ee02b9ca391807f0c8ea0529a4924afa645fbf97163f9d4",
       strip_prefix = "snappy-1.1.7",
       build_file = clean_dep("//third_party:snappy.BUILD"),
+      system_build_file = clean_dep("//third_party/systemlibs:snappy.BUILD"),
   )
 
   tf_http_archive(
@@ -612,6 +631,7 @@
       sha256 = "3c8f25c02e806c3ce0ab5fb7da1817f89fc9732709024e2a81b6b82f7cc792a8",
       strip_prefix = "jemalloc-4.4.0",
       build_file = clean_dep("//third_party:jemalloc.BUILD"),
+      system_build_file = clean_dep("//third_party/systemlibs:jemalloc.BUILD"),
   )
 
   java_import_external(
@@ -690,6 +710,7 @@
       strip_prefix = "cython-0.28.4",
       build_file = clean_dep("//third_party:cython.BUILD"),
       delete = ["BUILD.bazel"],
+      system_build_file = clean_dep("//third_party/systemlibs:cython.BUILD"),
   )
 
   tf_http_archive(
@@ -722,6 +743,7 @@
           "https://github.com/google/flatbuffers/archive/v1.9.0.tar.gz",
       ],
       build_file = clean_dep("//third_party/flatbuffers:flatbuffers.BUILD"),
+      system_build_file = clean_dep("//third_party/systemlibs:flatbuffers.BUILD"),
   )
 
   native.new_http_archive(
diff --git a/third_party/llvm/llvm.autogenerated.BUILD b/third_party/llvm/llvm.autogenerated.BUILD
index bf9f9ca..c3b9ec4 100644
--- a/third_party/llvm/llvm.autogenerated.BUILD
+++ b/third_party/llvm/llvm.autogenerated.BUILD
@@ -28,9 +28,7 @@
 
 llvm_targets = [
     "AArch64",
-    # Uncomment to enable the AMDGPU backend.
-    # TODO(phawkins): use a configure-time test.
-    # "AMDGPU",
+    "AMDGPU",
     "ARM",
     "NVPTX",
     "PowerPC",
@@ -256,13 +254,31 @@
             ("-gen-dag-isel", "lib/Target/AMDGPU/AMDGPUGenDAGISel.inc"),
             ("-gen-callingconv", "lib/Target/AMDGPU/AMDGPUGenCallingConv.inc"),
             ("-gen-subtarget", "lib/Target/AMDGPU/AMDGPUGenSubtargetInfo.inc"),
-            ("-gen-tgt-intrinsic", "lib/Target/AMDGPU/AMDGPUGenIntrinsics.inc"),
+            ("-gen-tgt-intrinsic-impl", "lib/Target/AMDGPU/AMDGPUGenIntrinsicImpl.inc"),
+            ("-gen-tgt-intrinsic-enums", "lib/Target/AMDGPU/AMDGPUGenIntrinsicEnums.inc"),
             ("-gen-emitter", "lib/Target/AMDGPU/AMDGPUGenMCCodeEmitter.inc"),
             ("-gen-dfa-packetizer", "lib/Target/AMDGPU/AMDGPUGenDFAPacketizer.inc"),
             ("-gen-asm-writer", "lib/Target/AMDGPU/AMDGPUGenAsmWriter.inc"),
             ("-gen-asm-matcher", "lib/Target/AMDGPU/AMDGPUGenAsmMatcher.inc"),
             ("-gen-disassembler", "lib/Target/AMDGPU/AMDGPUGenDisassemblerTables.inc"),
             ("-gen-pseudo-lowering", "lib/Target/AMDGPU/AMDGPUGenMCPseudoLowering.inc"),
+            ("-gen-searchable-tables", "lib/Target/AMDGPU/AMDGPUGenSearchableTables.inc"),
+            ("-gen-global-isel", "lib/Target/AMDGPU/AMDGPUGenGlobalISel.inc"),
+        ],
+    },
+    {
+        "name": "AMDGPU",
+        "lower_name": "amdgpu_r600",
+        "short_name": "R600",
+        "tbl_outs": [
+            ("-gen-asm-writer", "lib/Target/AMDGPU/R600GenAsmWriter.inc"),
+            ("-gen-callingconv", "lib/Target/AMDGPU/R600GenCallingConv.inc"),
+            ("-gen-dag-isel", "lib/Target/AMDGPU/R600GenDAGISel.inc"),
+            ("-gen-dfa-packetizer", "lib/Target/AMDGPU/R600GenDFAPacketizer.inc"),
+            ("-gen-instr-info", "lib/Target/AMDGPU/R600GenInstrInfo.inc"),
+            ("-gen-emitter", "lib/Target/AMDGPU/R600GenMCCodeEmitter.inc"),
+            ("-gen-register-info", "lib/Target/AMDGPU/R600GenRegisterInfo.inc"),
+            ("-gen-subtarget", "lib/Target/AMDGPU/R600GenSubtargetInfo.inc"),
         ],
     },
     {
@@ -670,6 +686,7 @@
     ]),
     copts = llvm_copts + ["-Iexternal/llvm/lib/Target/AMDGPU"],
     deps = [
+        ":amdgpu_r600_target_gen",
         ":amdgpu_target_gen",
         ":config",
         ":core",
@@ -692,6 +709,7 @@
     ]),
     copts = llvm_copts + ["-Iexternal/llvm/lib/Target/AMDGPU"],
     deps = [
+        ":amdgpu_r600_target_gen",
         ":amdgpu_target_gen",
         ":config",
         ":core",
diff --git a/third_party/mkl_dnn/BUILD b/third_party/mkl_dnn/BUILD
index 5b01f6e..d075809 100644
--- a/third_party/mkl_dnn/BUILD
+++ b/third_party/mkl_dnn/BUILD
@@ -1 +1,11 @@
 licenses(["notice"])
+
+exports_files(["LICENSE"])
+
+config_setting(
+    name = "using_mkl_dnn_only",
+    values = {
+        "define": "using_mkl_dnn_only=true",
+    },
+    visibility = ["//visibility:public"],
+)
diff --git a/third_party/mkl_dnn/LICENSE b/third_party/mkl_dnn/LICENSE
new file mode 100644
index 0000000..8dada3e
--- /dev/null
+++ b/third_party/mkl_dnn/LICENSE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "{}"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright {yyyy} {name of copyright owner}
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/third_party/mkl_dnn/build_defs.bzl b/third_party/mkl_dnn/build_defs.bzl
new file mode 100644
index 0000000..7ce2a7d
--- /dev/null
+++ b/third_party/mkl_dnn/build_defs.bzl
@@ -0,0 +1,13 @@
+def if_mkl_open_source_only(if_true, if_false = []):
+    """Shorthand for select()'ing on whether we're building with
+    MKL-DNN open source lib only, without depending on MKL binary form.
+
+    Returns a select statement which evaluates to if_true if we're building
+    with MKL-DNN open source lib only. Otherwise,
+    the select statement evaluates to if_false.
+
+    """
+    return select({
+        str(Label("//third_party/mkl_dnn:using_mkl_dnn_only")): if_true,
+        "//conditions:default": if_false,
+    })
diff --git a/third_party/mkl_dnn/mkldnn.BUILD b/third_party/mkl_dnn/mkldnn.BUILD
index 68f24aa..57d2e12 100644
--- a/third_party/mkl_dnn/mkldnn.BUILD
+++ b/third_party/mkl_dnn/mkldnn.BUILD
@@ -1,5 +1,10 @@
 exports_files(["LICENSE"])
 
+load(
+    "@org_tensorflow//third_party/mkl_dnn:build_defs.bzl",
+    "if_mkl_open_source_only",
+)
+
 config_setting(
     name = "clang_linux_x86_64",
     values = {
@@ -15,7 +20,14 @@
         "src/cpu/*.cpp",
     ]),
     hdrs = glob(["include/*"]),
-    copts = ["-fexceptions"] + select({
+    copts = [
+        "-fexceptions",
+        "-DUSE_MKL",
+        "-DUSE_CBLAS",
+    ] + if_mkl_open_source_only([
+        "-UUSE_MKL",
+        "-UUSE_CBLAS",
+    ]) + select({
         "@org_tensorflow//tensorflow:linux_x86_64": [
             "-fopenmp",  # only works with gcc
         ],
@@ -33,4 +45,19 @@
     ],
     nocopts = "-fno-exceptions",
     visibility = ["//visibility:public"],
+    deps = select({
+        "@org_tensorflow//tensorflow:linux_x86_64": [
+            "@mkl_linux//:mkl_headers",
+            "@mkl_linux//:mkl_libs_linux",
+        ],
+        "@org_tensorflow//tensorflow:darwin": [
+            "@mkl_darwin//:mkl_headers",
+            "@mkl_darwin//:mkl_libs_darwin",
+        ],
+        "@org_tensorflow//tensorflow:windows": [
+            "@mkl_windows//:mkl_headers",
+            "@mkl_windows//:mkl_libs_windows",
+        ],
+        "//conditions:default": [],
+    }),
 )
diff --git a/third_party/nccl/nccl_configure.bzl b/third_party/nccl/nccl_configure.bzl
index 9dfcb18..5d1ebf0 100644
--- a/third_party/nccl/nccl_configure.bzl
+++ b/third_party/nccl/nccl_configure.bzl
@@ -47,10 +47,10 @@
 )
 """
 
+# Local build results in dynamic link and the license should not be included.
 _NCCL_LOCAL_BUILD_TEMPLATE = """
 filegroup(
   name = "LICENSE",
-  data = ["nccl/NCCL-SLA.txt"],
   visibility = ["//visibility:public"],
 )
 
diff --git a/third_party/repo.bzl b/third_party/repo.bzl
index 9cee1fc..5cb4269 100644
--- a/third_party/repo.bzl
+++ b/third_party/repo.bzl
@@ -35,6 +35,15 @@
   else:
     return None
 
+# Checks if we should use the system lib instead of the bundled one
+def _use_system_lib(ctx, name):
+  syslibenv = _get_env_var(ctx, "TF_SYSTEM_LIBS")
+  if syslibenv:
+    for n in syslibenv.strip().split(","):
+      if n.strip() == name:
+        return True
+  return False
+
 # Executes specified command with arguments and calls 'fail' if it exited with
 # non-zero code
 def _execute_and_check_ret_code(repo_ctx, cmd_and_args):
@@ -75,17 +84,28 @@
          "Even if you don't have permission to mirror the file, please " +
          "put the correctly formatted mirror URL there anyway, because " +
          "someone will come along shortly thereafter and mirror the file.")
-  ctx.download_and_extract(
-      ctx.attr.urls,
-      "",
-      ctx.attr.sha256,
-      ctx.attr.type,
-      ctx.attr.strip_prefix)
-  if ctx.attr.delete:
-    _apply_delete(ctx, ctx.attr.delete)
-  if ctx.attr.patch_file != None:
-    _apply_patch(ctx, ctx.attr.patch_file)
-  if ctx.attr.build_file != None:
+
+  use_syslib = _use_system_lib(ctx, ctx.attr.name)
+  if not use_syslib:
+    ctx.download_and_extract(
+        ctx.attr.urls,
+        "",
+        ctx.attr.sha256,
+        ctx.attr.type,
+        ctx.attr.strip_prefix)
+    if ctx.attr.delete:
+      _apply_delete(ctx, ctx.attr.delete)
+    if ctx.attr.patch_file != None:
+      _apply_patch(ctx, ctx.attr.patch_file)
+
+  if use_syslib and ctx.attr.system_build_file != None:
+    # Use BUILD.bazel to avoid conflict with third party projects with
+    # BUILD or build (directory) underneath.
+    ctx.template("BUILD.bazel", ctx.attr.system_build_file, {
+        "%prefix%": ".." if _repos_are_siblings() else "external",
+    }, False)
+
+  elif ctx.attr.build_file != None:
     # Use BUILD.bazel to avoid conflict with third party projects with
     # BUILD or build (directory) underneath.
     ctx.template("BUILD.bazel", ctx.attr.build_file, {
@@ -102,7 +122,11 @@
         "delete": attr.string_list(),
         "patch_file": attr.label(),
         "build_file": attr.label(),
-    })
+        "system_build_file": attr.label(),
+    },
+    environ=[
+	"TF_SYSTEM_LIBS",
+    ])
 """Downloads and creates Bazel repos for dependencies.
 
 This is a swappable replacement for both http_archive() and
diff --git a/third_party/systemlibs/BUILD b/third_party/systemlibs/BUILD
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/third_party/systemlibs/BUILD
diff --git a/third_party/systemlibs/BUILD.tpl b/third_party/systemlibs/BUILD.tpl
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/third_party/systemlibs/BUILD.tpl
diff --git a/third_party/systemlibs/astor.BUILD b/third_party/systemlibs/astor.BUILD
new file mode 100644
index 0000000..497ec4b
--- /dev/null
+++ b/third_party/systemlibs/astor.BUILD
@@ -0,0 +1,12 @@
+licenses(["notice"])  # New BSD
+
+filegroup(
+    name = "LICENSE",
+    visibility = ["//visibility:public"],
+)
+
+py_library(
+    name = "astor",
+    srcs_version = "PY2AND3",
+    visibility = ["//visibility:public"],
+)
diff --git a/third_party/systemlibs/build_defs.bzl.tpl b/third_party/systemlibs/build_defs.bzl.tpl
new file mode 100644
index 0000000..3faa46c
--- /dev/null
+++ b/third_party/systemlibs/build_defs.bzl.tpl
@@ -0,0 +1,32 @@
+# -*- Python -*-
+"""Skylark macros for system libraries.
+"""
+
+SYSTEM_LIBS_ENABLED = %{syslibs_enabled}
+
+SYSTEM_LIBS_LIST = [
+%{syslibs_list}
+]
+
+
+def if_any_system_libs(a, b=[]):
+  """Conditional which evaluates to 'a' if any system libraries are configured."""
+  if SYSTEM_LIBS_ENABLED:
+    return a
+  else:
+    return b
+
+
+def if_system_lib(lib, a, b=[]):
+  """Conditional which evaluates to 'a' if we're using the system version of lib"""
+
+  if SYSTEM_LIBS_ENABLED and lib in SYSTEM_LIBS_LIST:
+    return a
+  else:
+    return b
+
+
+def if_not_system_lib(lib, a, b=[]):
+  """Conditional which evaluates to 'a' if we're using the system version of lib"""
+
+  return if_system_lib(lib, b, a)
diff --git a/third_party/systemlibs/curl.BUILD b/third_party/systemlibs/curl.BUILD
new file mode 100644
index 0000000..c5f125c
--- /dev/null
+++ b/third_party/systemlibs/curl.BUILD
@@ -0,0 +1,12 @@
+licenses(["notice"])  # MIT/X derivative license
+
+filegroup(
+    name = "COPYING",
+    visibility = ["//visibility:public"],
+)
+
+cc_library(
+    name = "curl",
+    linkopts = ["-lcurl"],
+    visibility = ["//visibility:public"],
+)
diff --git a/third_party/systemlibs/cython.BUILD b/third_party/systemlibs/cython.BUILD
new file mode 100644
index 0000000..1d52587
--- /dev/null
+++ b/third_party/systemlibs/cython.BUILD
@@ -0,0 +1,13 @@
+licenses(["notice"])  # Apache-2.0
+
+genrule(
+    name = "lncython",
+    outs = ["cython"],
+    cmd = "ln -s $$(which cython) $@",
+)
+
+sh_binary(
+    name = "cython_binary",
+    srcs = ["cython"],
+    visibility = ["//visibility:public"],
+)
diff --git a/third_party/systemlibs/flatbuffers.BUILD b/third_party/systemlibs/flatbuffers.BUILD
new file mode 100644
index 0000000..14fcead
--- /dev/null
+++ b/third_party/systemlibs/flatbuffers.BUILD
@@ -0,0 +1,38 @@
+licenses(["notice"])  # Apache 2.0
+
+filegroup(
+    name = "LICENSE.txt",
+    visibility = ["//visibility:public"],
+)
+
+# Public flatc library to compile flatbuffer files at runtime.
+cc_library(
+    name = "flatbuffers",
+    linkopts = ["-lflatbuffers"],
+    visibility = ["//visibility:public"],
+)
+
+# Public flatc compiler library.
+cc_library(
+    name = "flatc_library",
+    linkopts = ["-lflatbuffers"],
+    visibility = ["//visibility:public"],
+)
+
+genrule(
+    name = "lnflatc",
+    outs = ["flatc.bin"],
+    cmd = "ln -s $$(which flatc) $@",
+)
+
+# Public flatc compiler.
+sh_binary(
+    name = "flatc",
+    srcs = ["flatc.bin"],
+    visibility = ["//visibility:public"],
+)
+
+cc_library(
+    name = "runtime_cc",
+    visibility = ["//visibility:public"],
+)
diff --git a/third_party/systemlibs/gif.BUILD b/third_party/systemlibs/gif.BUILD
new file mode 100644
index 0000000..5eb2c91
--- /dev/null
+++ b/third_party/systemlibs/gif.BUILD
@@ -0,0 +1,12 @@
+licenses(["notice"])  # MIT
+
+filegroup(
+    name = "COPYING",
+    visibility = ["//visibility:public"],
+)
+
+cc_library(
+    name = "gif",
+    linkopts = ["-lgif"],
+    visibility = ["//visibility:public"],
+)
diff --git a/third_party/systemlibs/grpc.BUILD b/third_party/systemlibs/grpc.BUILD
new file mode 100644
index 0000000..fd90eb0
--- /dev/null
+++ b/third_party/systemlibs/grpc.BUILD
@@ -0,0 +1,54 @@
+licenses(["notice"])  # Apache v2
+
+filegroup(
+    name = "LICENSE",
+    visibility = ["//visibility:public"],
+)
+
+cc_library(
+    name = "grpc",
+    linkopts = ["-lgrpc"],
+    visibility = ["//visibility:public"],
+)
+
+cc_library(
+    name = "grpc++",
+    linkopts = ["-lgrpc++"],
+    visibility = ["//visibility:public"],
+)
+
+cc_library(
+    name = "grpc_unsecure",
+    linkopts = ["-lgrpc_unsecure"],
+    visibility = ["//visibility:public"],
+)
+
+cc_library(
+    name = "grpc++_unsecure",
+    linkopts = ["-lgrpc++_unsecure"],
+    visibility = ["//visibility:public"],
+)
+
+genrule(
+    name = "ln_grpc_cpp_plugin",
+    outs = ["grpc_cpp_plugin.bin"],
+    cmd = "ln -s $$(which grpc_cpp_plugin) $@",
+)
+
+sh_binary(
+    name = "grpc_cpp_plugin",
+    srcs = ["grpc_cpp_plugin.bin"],
+    visibility = ["//visibility:public"],
+)
+
+genrule(
+    name = "ln_grpc_python_plugin",
+    outs = ["grpc_python_plugin.bin"],
+    cmd = "ln -s $$(which grpc_python_plugin) $@",
+)
+
+sh_binary(
+    name = "grpc_python_plugin",
+    srcs = ["grpc_python_plugin.bin"],
+    visibility = ["//visibility:public"],
+)
diff --git a/third_party/systemlibs/jemalloc.BUILD b/third_party/systemlibs/jemalloc.BUILD
new file mode 100644
index 0000000..6a48d58
--- /dev/null
+++ b/third_party/systemlibs/jemalloc.BUILD
@@ -0,0 +1,30 @@
+licenses(["notice"])  # BSD
+
+filegroup(
+    name = "COPYING",
+    visibility = ["//visibility:public"],
+)
+
+cc_library(
+    name = "jemalloc_headers",
+    defines = [
+        "jemalloc_posix_memalign=posix_memalign",
+        "jemalloc_malloc=malloc",
+        "jemalloc_realloc=realloc",
+        "jemalloc_free=free",
+    ],
+    visibility = ["//visibility:public"],
+)
+
+cc_library(
+    name = "jemalloc_impl",
+    linkopts = ["-ljemalloc"],
+    defines = [
+        "jemalloc_posix_memalign=posix_memalign",
+        "jemalloc_malloc=malloc",
+        "jemalloc_realloc=realloc",
+        "jemalloc_free=free",
+    ],
+    visibility = ["//visibility:public"],
+    deps = [":jemalloc_headers"],
+)
diff --git a/third_party/systemlibs/jpeg.BUILD b/third_party/systemlibs/jpeg.BUILD
new file mode 100644
index 0000000..f4f52da
--- /dev/null
+++ b/third_party/systemlibs/jpeg.BUILD
@@ -0,0 +1,12 @@
+licenses(["notice"])  # custom notice-style license, see LICENSE.md
+
+filegroup(
+    name = "LICENSE.md",
+    visibility = ["//visibility:public"],
+)
+
+cc_library(
+    name = "jpeg",
+    linkopts = ["-ljpeg"],
+    visibility = ["//visibility:public"],
+)
diff --git a/third_party/systemlibs/jsoncpp.BUILD b/third_party/systemlibs/jsoncpp.BUILD
new file mode 100644
index 0000000..cf91917
--- /dev/null
+++ b/third_party/systemlibs/jsoncpp.BUILD
@@ -0,0 +1,37 @@
+licenses(["unencumbered"])  # Public Domain or MIT
+
+filegroup(
+    name = "LICENSE",
+    visibility = ["//visibility:public"],
+)
+
+HEADERS = [
+    "include/json/autolink.h",
+    "include/json/config.h",
+    "include/json/features.h",
+    "include/json/forwards.h",
+    "include/json/json.h",
+    "include/json/reader.h",
+    "include/json/value.h",
+    "include/json/version.h",
+    "include/json/writer.h",
+]
+
+genrule(
+    name = "link_headers",
+    outs = HEADERS,
+    cmd = """
+      for i in $(OUTS); do
+        i=$${i##*/}
+        ln -vsf /usr/include/jsoncpp/json/$$i $(@D)/include/json/$$i
+      done
+    """,
+)
+
+cc_library(
+    name = "jsoncpp",
+    hdrs = HEADERS,
+    includes = ["."],
+    linkopts = ["-ljsoncpp"],
+    visibility = ["//visibility:public"],
+)
diff --git a/third_party/systemlibs/lmdb.BUILD b/third_party/systemlibs/lmdb.BUILD
new file mode 100644
index 0000000..6177b09
--- /dev/null
+++ b/third_party/systemlibs/lmdb.BUILD
@@ -0,0 +1,12 @@
+licenses(["notice"])  # OpenLDAP Public License
+
+filegroup(
+    name = "LICENSE",
+    visibility = ["//visibility:public"],
+)
+
+cc_library(
+    name = "lmdb",
+    linkopts = ["-llmdb"],
+    visibility = ["//visibility:public"],
+)
diff --git a/third_party/systemlibs/nasm.BUILD b/third_party/systemlibs/nasm.BUILD
new file mode 100644
index 0000000..10ef8d8
--- /dev/null
+++ b/third_party/systemlibs/nasm.BUILD
@@ -0,0 +1,12 @@
+licenses(["notice"])  # BSD 2-clause
+
+filegroup(
+    name = "LICENSE",
+    visibility = ["//visibility:public"],
+)
+
+sh_binary(
+    name = "nasm",
+    srcs = ["nasm"],
+    visibility = ["@jpeg//:__pkg__"],
+)
diff --git a/third_party/systemlibs/pcre.BUILD b/third_party/systemlibs/pcre.BUILD
new file mode 100644
index 0000000..df74238
--- /dev/null
+++ b/third_party/systemlibs/pcre.BUILD
@@ -0,0 +1,12 @@
+licenses(["notice"])  # BSD
+
+filegroup(
+    name = "LICENCE",
+    visibility = ["//visibility:public"],
+)
+
+cc_library(
+    name = "pcre",
+    linkopts = ["-lpcre"],
+    visibility = ["//visibility:public"],
+)
diff --git a/third_party/systemlibs/png.BUILD b/third_party/systemlibs/png.BUILD
new file mode 100644
index 0000000..fc6b6f2
--- /dev/null
+++ b/third_party/systemlibs/png.BUILD
@@ -0,0 +1,12 @@
+licenses(["notice"])  # BSD/MIT-like license
+
+filegroup(
+    name = "LICENSE",
+    visibility = ["//visibility:public"],
+)
+
+cc_library(
+    name = "png",
+    linkopts = ["-lpng"],
+    visibility = ["//visibility:public"],
+)
diff --git a/third_party/systemlibs/re2.BUILD b/third_party/systemlibs/re2.BUILD
new file mode 100644
index 0000000..c18e252
--- /dev/null
+++ b/third_party/systemlibs/re2.BUILD
@@ -0,0 +1,12 @@
+licenses(["notice"])  # BSD/MIT-like license
+
+filegroup(
+    name = "LICENSE",
+    visibility = ["//visibility:public"],
+)
+
+cc_library(
+    name = "re2",
+    linkopts = ["-lre2"],
+    visibility = ["//visibility:public"],
+)
diff --git a/third_party/systemlibs/six.BUILD b/third_party/systemlibs/six.BUILD
new file mode 100644
index 0000000..ff9b1a5
--- /dev/null
+++ b/third_party/systemlibs/six.BUILD
@@ -0,0 +1,11 @@
+licenses(["notice"])  # MIT
+
+filegroup(
+    name = "LICENSE",
+    visibility = ["//visibility:public"],
+)
+
+py_library(
+    name = "six",
+    visibility = ["//visibility:public"],
+)
diff --git a/third_party/systemlibs/snappy.BUILD b/third_party/systemlibs/snappy.BUILD
new file mode 100644
index 0000000..fd2db9e
--- /dev/null
+++ b/third_party/systemlibs/snappy.BUILD
@@ -0,0 +1,12 @@
+licenses(["notice"])  # BSD 3-Clause
+
+filegroup(
+    name = "COPYING",
+    visibility = ["//visibility:public"],
+)
+
+cc_library(
+    name = "snappy",
+    linkopts = ["-lsnappy"],
+    visibility = ["//visibility:public"],
+)
diff --git a/third_party/systemlibs/sqlite.BUILD b/third_party/systemlibs/sqlite.BUILD
new file mode 100644
index 0000000..20ee1eb
--- /dev/null
+++ b/third_party/systemlibs/sqlite.BUILD
@@ -0,0 +1,15 @@
+licenses(["unencumbered"])  # Public Domain
+
+# Production build of SQLite library that's baked into TensorFlow.
+cc_library(
+    name = "org_sqlite",
+    linkopts = ["-lsqlite3"],
+    visibility = ["//visibility:public"],
+)
+
+# This is a Copybara sync helper for Google.
+py_library(
+    name = "python",
+    srcs_version = "PY2AND3",
+    visibility = ["//visibility:public"],
+)
diff --git a/third_party/systemlibs/swig.BUILD b/third_party/systemlibs/swig.BUILD
new file mode 100644
index 0000000..4c9b74d
--- /dev/null
+++ b/third_party/systemlibs/swig.BUILD
@@ -0,0 +1,23 @@
+licenses(["restricted"])  # GPLv3
+
+filegroup(
+    name = "LICENSE",
+    visibility = ["//visibility:public"],
+)
+
+filegroup(
+    name = "templates",
+    visibility = ["//visibility:public"],
+)
+
+genrule(
+    name = "lnswiglink",
+    outs = ["swiglink"],
+    cmd = "ln -s $$(which swig) $@",
+)
+
+sh_binary(
+    name = "swig",
+    srcs = ["swiglink"],
+    visibility = ["//visibility:public"],
+)
diff --git a/third_party/systemlibs/syslibs_configure.bzl b/third_party/systemlibs/syslibs_configure.bzl
new file mode 100644
index 0000000..07a44c3
--- /dev/null
+++ b/third_party/systemlibs/syslibs_configure.bzl
@@ -0,0 +1,160 @@
+# -*- Python -*-
+"""Repository rule for system library autoconfiguration.
+
+`syslibs_configure` depends on the following environment variables:
+
+  * `TF_SYSTEM_LIBS`: list of third party dependencies that should use
+    the system version instead
+"""
+
+_TF_SYSTEM_LIBS="TF_SYSTEM_LIBS"
+
+VALID_LIBS=[
+    "astor_archive",
+    "com_googlesource_code_re2",
+    "curl",
+    "cython",
+    "flatbuffers",
+    "gif_archive",
+    "grpc",
+    "jemalloc",
+    "jpeg",
+    "jsoncpp_git",
+    "lmdb",
+    "nasm",
+    "org_sqlite",
+    "pcre",
+    "png_archive",
+    "six_archive",
+    "snappy",
+    "swig",
+    "termcolor_archive",
+    "zlib_archive",
+]
+
+
+def auto_configure_fail(msg):
+  """Output failure message when syslibs configuration fails."""
+  red = "\033[0;31m"
+  no_color = "\033[0m"
+  fail("\n%sSystem Library Configuration Error:%s %s\n" % (red, no_color, msg))
+
+
+def _is_windows(repository_ctx):
+  """Returns true if the host operating system is windows."""
+  os_name = repository_ctx.os.name.lower()
+  if os_name.find("windows") != -1:
+    return True
+  return False
+
+
+def _enable_syslibs(repository_ctx):
+  s = repository_ctx.os.environ.get(_TF_SYSTEM_LIBS, '').strip()
+  if not _is_windows(repository_ctx) and s != None and s != '':
+    return True
+  return False
+
+
+def _get_system_lib_list(repository_ctx):
+  """Gets the list of deps that should use the system lib.
+
+  Args:
+    repository_ctx: The repository context.
+
+  Returns:
+    A string version of a python list
+  """
+  if _TF_SYSTEM_LIBS not in repository_ctx.os.environ:
+    return []
+
+  libenv = repository_ctx.os.environ[_TF_SYSTEM_LIBS].strip()
+  libs = []
+
+  for lib in list(libenv.split(',')):
+    lib = lib.strip()
+    if lib == "":
+      continue
+    if lib not in VALID_LIBS:
+      auto_configure_fail("Invalid system lib set: %s" % lib)
+      return []
+    libs.append(lib)
+
+  return libs
+
+
+def _format_system_lib_list(repository_ctx):
+  """Formats the list of deps that should use the system lib.
+
+  Args:
+    repository_ctx: The repository context.
+
+  Returns:
+    A list of the names of deps that should use the system lib.
+  """
+  libs = _get_system_lib_list(repository_ctx)
+  ret = ''
+  for lib in libs:
+    ret += "'%s',\n" % lib
+
+  return ret
+
+
+def _tpl(repository_ctx, tpl, substitutions={}, out=None):
+  if not out:
+    out = tpl.replace(":", "")
+  repository_ctx.template(
+      out,
+      Label("//third_party/systemlibs%s.tpl" % tpl),
+      substitutions,
+      False)
+
+
+def _create_dummy_repository(repository_ctx):
+  """Creates the dummy repository to build with all bundled libraries."""
+
+  _tpl(repository_ctx, ":BUILD")
+  _tpl(repository_ctx, ":build_defs.bzl",
+    {
+      "%{syslibs_enabled}": 'False',
+      "%{syslibs_list}": '',
+    })
+
+
+def _create_local_repository(repository_ctx):
+  """Creates the repository to build with system libraries."""
+
+  _tpl(repository_ctx, ":BUILD")
+  _tpl(repository_ctx, ":build_defs.bzl",
+    {
+      "%{syslibs_enabled}": 'True',
+      "%{syslibs_list}": _format_system_lib_list(repository_ctx),
+    })
+
+
+def _syslibs_autoconf_impl(repository_ctx):
+  """Implementation of the syslibs_configure repository rule."""
+  if not _enable_syslibs(repository_ctx):
+    _create_dummy_repository(repository_ctx)
+  else:
+    _create_local_repository(repository_ctx)
+
+
+syslibs_configure = repository_rule(
+    implementation = _syslibs_autoconf_impl,
+    environ = [
+        _TF_SYSTEM_LIBS,
+    ],
+)
+
+"""Configures the build to link to system libraries
+instead of using bundled versions.
+
+Add the following to your WORKSPACE FILE:
+
+```python
+syslibs_configure(name = "local_config_syslibs")
+```
+
+Args:
+  name: A unique name for this workspace rule.
+"""
diff --git a/third_party/systemlibs/termcolor.BUILD b/third_party/systemlibs/termcolor.BUILD
new file mode 100644
index 0000000..915eb62
--- /dev/null
+++ b/third_party/systemlibs/termcolor.BUILD
@@ -0,0 +1,12 @@
+licenses(["notice"])  # MIT
+
+filegroup(
+    name = "COPYING.txt",
+    visibility = ["//visibility:public"],
+)
+
+py_library(
+    name = "termcolor",
+    srcs_version = "PY2AND3",
+    visibility = ["//visibility:public"],
+)
diff --git a/third_party/systemlibs/zlib.BUILD b/third_party/systemlibs/zlib.BUILD
new file mode 100644
index 0000000..69462ae
--- /dev/null
+++ b/third_party/systemlibs/zlib.BUILD
@@ -0,0 +1,12 @@
+licenses(["notice"])  # BSD/MIT-like license (for zlib)
+
+filegroup(
+    name = "zlib.h",
+    visibility = ["//visibility:public"],
+)
+
+cc_library(
+    name = "zlib",
+    linkopts = ["-lz"],
+    visibility = ["//visibility:public"],
+)
diff --git a/tools/bazel.rc b/tools/bazel.rc
index 3559375..913c4bc 100644
--- a/tools/bazel.rc
+++ b/tools/bazel.rc
@@ -27,6 +27,10 @@
 build:mkl --define=using_mkl=true
 build:mkl -c opt
 
+# This config option is used to enable MKL-DNN open source library only,
+# without depending on MKL binary version.
+build:mkl_open_source_only --define=using_mkl_dnn_only=true
+
 build:download_clang --crosstool_top=@local_config_download_clang//:toolchain
 build:download_clang --define=using_clang=true