Rollback of: "Merge changes from github."
Change: 117304114
diff --git a/configure b/configure
index 0faf61c..2d7ec77 100755
--- a/configure
+++ b/configure
@@ -1,4 +1,4 @@
-#!/usr/bin/env bash
+#!/bin/bash
 
 ## Set up python-related environment settings
 while true; do
diff --git a/tensorflow/cc/ops/const_op.cc b/tensorflow/cc/ops/const_op.cc
index ddfa2a5..cdf653a 100644
--- a/tensorflow/cc/ops/const_op.cc
+++ b/tensorflow/cc/ops/const_op.cc
@@ -87,9 +87,6 @@
 DEFINE_CONST_IMPL(complex64, proto.add_scomplex_val(t.begin()->real());
                   proto.add_scomplex_val(t.begin()->imag()););
 
-DEFINE_CONST_IMPL(complex128, proto.add_dcomplex_val(t.begin()->real());
-                  proto.add_dcomplex_val(t.begin()->imag()););
-
 Node* Const(StringPiece s, const GraphDefBuilder::Options& options) {
   if (options.HaveError()) return nullptr;
   NodeBuilder node_builder(options.GetNameForOp(OpName()), OpName(),
diff --git a/tensorflow/cc/ops/const_op.h b/tensorflow/cc/ops/const_op.h
index 0a1ee3f..36a97f8 100644
--- a/tensorflow/cc/ops/const_op.h
+++ b/tensorflow/cc/ops/const_op.h
@@ -49,7 +49,6 @@
 DECLARE_CONST(int16);
 DECLARE_CONST(int8);
 DECLARE_CONST(complex64);
-DECLARE_CONST(complex128);
 DECLARE_CONST(int64);
 DECLARE_CONST(bool);
 
diff --git a/tensorflow/contrib/linear_optimizer/python/ops/sdca_ops.py b/tensorflow/contrib/linear_optimizer/python/ops/sdca_ops.py
index 5e98e14..525a984 100644
--- a/tensorflow/contrib/linear_optimizer/python/ops/sdca_ops.py
+++ b/tensorflow/contrib/linear_optimizer/python/ops/sdca_ops.py
@@ -21,8 +21,6 @@
 import threading
 import uuid
 
-from six.moves import range
-
 from tensorflow.python.framework import dtypes
 from tensorflow.python.framework import ops
 from tensorflow.python.framework.load_library import load_op_library
@@ -225,7 +223,7 @@
       dense_features = self._convert_n_to_tensor(examples['dense_features'])
       dense_variables = self._convert_n_to_tensor(self._variables[
           'dense_features_weights'])
-      for i in range(len(dense_variables)):
+      for i in xrange(len(dense_variables)):
         predictions += dense_features[i] * dense_variables[i]
     return predictions
 
diff --git a/tensorflow/core/framework/allocator.h b/tensorflow/core/framework/allocator.h
index 30c7c19..97a3f61 100644
--- a/tensorflow/core/framework/allocator.h
+++ b/tensorflow/core/framework/allocator.h
@@ -187,14 +187,13 @@
 
   // is_simple<T>::value if T[] can be safely constructed and destructed
   // without running T() and ~T().  We do not use std::is_trivial<T>
-  // directly because std::complex<float> and std::complex<double> are
-  // not trival, but their arrays can be constructed and destructed
-  // without running their default ctors and dtors.
+  // directly because std::complex<float> is not trival but its array
+  // can be constructed and destructed without running its default ctor
+  // and dtor.
   template <typename T>
   struct is_simple {
     static const bool value = std::is_trivial<T>::value ||
                               std::is_same<T, complex64>::value ||
-                              std::is_same<T, complex128>::value ||
                               is_quantized<T>::value;
   };
 
diff --git a/tensorflow/core/framework/node_def_util_test.cc b/tensorflow/core/framework/node_def_util_test.cc
index e7dd1e5..07bd60f 100644
--- a/tensorflow/core/framework/node_def_util_test.cc
+++ b/tensorflow/core/framework/node_def_util_test.cc
@@ -151,7 +151,7 @@
   ExpectFailure(bad, op,
                 "Value for attr 'T' of string is not in the list of allowed "
                 "values: float, double, int64, int32, uint8, uint16, int16, "
-                "int8, complex64, complex128, qint8, quint8, qint32");
+                "int8, complex64, qint8, quint8, qint32");
 }
 
 TEST(NodeDefUtilTest, Enum) {
diff --git a/tensorflow/core/framework/numeric_types.h b/tensorflow/core/framework/numeric_types.h
index 9523e35..c6230da 100644
--- a/tensorflow/core/framework/numeric_types.h
+++ b/tensorflow/core/framework/numeric_types.h
@@ -24,8 +24,6 @@
 
 // Single precision complex.
 typedef std::complex<float> complex64;
-// Double precision complex.
-typedef std::complex<double> complex128;
 
 }  // end namespace tensorflow
 
diff --git a/tensorflow/core/framework/op_def_builder_test.cc b/tensorflow/core/framework/op_def_builder_test.cc
index fbef9eb..2d6a7f0 100644
--- a/tensorflow/core/framework/op_def_builder_test.cc
+++ b/tensorflow/core/framework/op_def_builder_test.cc
@@ -113,7 +113,7 @@
   ExpectSuccess(b().Attr("a:numbertype"),
                 "attr: { name: 'a' type: 'type' allowed_values { list { type: "
                 "[DT_FLOAT, DT_DOUBLE, DT_INT64, DT_INT32, DT_UINT8, DT_INT16, "
-                "DT_UINT16, DT_INT8, DT_COMPLEX64, DT_COMPLEX128, DT_QINT8, DT_QUINT8, "
+                "DT_UINT16, DT_INT8, DT_COMPLEX64, DT_QINT8, DT_QUINT8, "
                 "DT_QINT32] } } }");
   ExpectSuccess(b().Attr("a:realnumbertype"),
                 "attr: { name: 'a' type: 'type' allowed_values { list { type: "
diff --git a/tensorflow/core/framework/op_def_util_test.cc b/tensorflow/core/framework/op_def_util_test.cc
index 813576c..854016f 100644
--- a/tensorflow/core/framework/op_def_util_test.cc
+++ b/tensorflow/core/framework/op_def_util_test.cc
@@ -246,10 +246,6 @@
       TestBuilder(OpDefBuilder("BadAttrtude")
                       .Attr("x: list(realnumbertype) = [DT_COMPLEX64]")),
       "attr 'x' of complex64 is not in the list of allowed values");
-  ExpectFailure(
-      TestBuilder(OpDefBuilder("BadAttrtude")
-                      .Attr("x: list(realnumbertype) = [DT_COMPLEX128]")),
-      "attr 'x' of complex128 is not in the list of allowed values");
   // Is in list of allowed strings.
   TF_EXPECT_OK(TestBuilder(
       OpDefBuilder("GoodAttrtude").Attr("x: {'foo', 'bar'} = 'bar'")));
diff --git a/tensorflow/core/framework/register_types.h b/tensorflow/core/framework/register_types.h
index 1474dc6..d08388a 100644
--- a/tensorflow/core/framework/register_types.h
+++ b/tensorflow/core/framework/register_types.h
@@ -63,16 +63,14 @@
   m(int16);                                   \
   m(int8)
 
-// Call "m" for all number types, including complex64 and complex128.
+// Call "m" for all number types, including complex64.
 #define TF_CALL_NUMBER_TYPES(m) \
   TF_CALL_REAL_NUMBER_TYPES(m); \
-  m(complex64);                 \
-  m(complex128)
+  m(complex64)
 
 #define TF_CALL_NUMBER_TYPES_NO_INT32(m) \
   TF_CALL_REAL_NUMBER_TYPES_NO_INT32(m); \
-  m(complex64);                          \
-  m(complex128)
+  m(complex64)
 
 #define TF_CALL_POD_TYPES(m) \
   TF_CALL_NUMBER_TYPES(m);   \
diff --git a/tensorflow/core/framework/tensor.cc b/tensorflow/core/framework/tensor.cc
index e56db2a..e701b66 100644
--- a/tensorflow/core/framework/tensor.cc
+++ b/tensorflow/core/framework/tensor.cc
@@ -216,22 +216,6 @@
 };
 
 template <>
-struct ProtoHelper<complex128> {
-  typedef Helper<double>::RepeatedFieldType FieldType;
-  static const complex128* Begin(const TensorProto& proto) {
-    return reinterpret_cast<const complex128*>(proto.dcomplex_val().data());
-  }
-  static size_t NumElements(const TensorProto& proto) {
-    return proto.dcomplex_val().size() / 2;
-  }
-  static void Fill(const complex128* data, size_t n, TensorProto* proto) {
-    const double* p = reinterpret_cast<const double*>(data);
-    FieldType copy(p, p + n * 2);
-    proto->mutable_dcomplex_val()->Swap(&copy);
-  }
-};
-
-template <>
 struct ProtoHelper<qint32> {
   typedef Helper<int32>::RepeatedFieldType FieldType;
   static const qint32* Begin(const TensorProto& proto) {
@@ -401,7 +385,6 @@
     CASE(int8, SINGLE_ARG(STMTS))                     \
     CASE(string, SINGLE_ARG(STMTS))                   \
     CASE(complex64, SINGLE_ARG(STMTS))                \
-    CASE(complex128, SINGLE_ARG(STMTS))               \
     CASE(int64, SINGLE_ARG(STMTS))                    \
     CASE(bool, SINGLE_ARG(STMTS))                     \
     CASE(qint32, SINGLE_ARG(STMTS))                   \
diff --git a/tensorflow/core/framework/tensor.proto b/tensorflow/core/framework/tensor.proto
index 59fc964..013a2d0 100644
--- a/tensorflow/core/framework/tensor.proto
+++ b/tensorflow/core/framework/tensor.proto
@@ -57,8 +57,4 @@
 
   // DT_BOOL
   repeated bool bool_val = 11 [packed = true];
-
-  // DT_COMPLEX128. dcomplex_val(2*i) and dcomplex_val(2*i+1) are real
-  // and imaginary parts of i-th double precision complex.
-  repeated double dcomplex_val = 12 [packed = true];
 };
diff --git a/tensorflow/core/framework/tensor_test.cc b/tensorflow/core/framework/tensor_test.cc
index 13896f9..ec0fb57 100644
--- a/tensorflow/core/framework/tensor_test.cc
+++ b/tensorflow/core/framework/tensor_test.cc
@@ -47,17 +47,12 @@
 
   // Unfortunately. std::complex::complex() initializes (0, 0).
   EXPECT_FALSE(std::is_trivial<complex64>::value);
-  EXPECT_FALSE(std::is_trivial<complex128>::value);
+  EXPECT_FALSE(std::is_trivial<std::complex<double>>::value);
   EXPECT_TRUE(std::is_trivial<float[2]>::value);
-  EXPECT_TRUE(std::is_trivial<double[2]>::value);
-  struct MyComplex64 {
+  struct MyComplex {
     float re, im;
   };
-  EXPECT_TRUE(std::is_trivial<MyComplex64>::value);
-  struct MyComplex128 {
-    double re, im;
-  };
-  EXPECT_TRUE(std::is_trivial<MyComplex128>::value);
+  EXPECT_TRUE(std::is_trivial<MyComplex>::value);
 }
 
 template <typename T>
@@ -425,19 +420,13 @@
   test::ExpectTensorEqual<bool>(t1, t2);
 }
 
-TEST(Tensor_Complex, Simple64) {
+TEST(Tensor_Complex, Simple) {
   Tensor t(DT_COMPLEX64, {4, 5, 3, 7});
   t.flat<complex64>().setRandom();
   TestCopies<complex64>(t);
 }
 
-TEST(Tensor_Complex, Simple128) {
-  Tensor t(DT_COMPLEX128, {4, 5, 3, 7});
-  t.flat<complex128>().setRandom();
-  TestCopies<complex128>(t);
-}
-
-TEST(Tensor_Complex, SimpleWithHelper64) {
+TEST(Tensor_Complex, SimpleWithHelper) {
   {
     Tensor t1 = test::AsTensor<complex64>({0,
                                            {1, 1},
@@ -455,7 +444,7 @@
     test::ExpectTensorEqual<complex64>(t2, t3);
   }
 
-  // Does some numeric operations for complex64 numbers.
+  // Does some numeric operations for complex numbers.
   {
     const float PI = std::acos(-1);
     const complex64 rotate_45 = std::polar(1.0f, PI / 4);
@@ -486,55 +475,6 @@
   }
 }
 
-TEST(Tensor_Complex, SimpleWithHelper128) {
-  {
-    Tensor t1 = test::AsTensor<complex128>({0,
-                                           {1, 1},
-                                           complex128(2),
-                                           complex128(3, 3),
-                                           complex128(0, 4),
-                                           complex128(2, 5)},
-                                          {2, 3});
-    Tensor t2(t1.dtype(), t1.shape());
-    t2.flat<complex128>() = t1.flat<complex128>() * complex128(0, 2);
-    Tensor t3 = test::AsTensor<complex128>(
-        {0, {-2, 2}, {0, 4}, {-6, 6}, {-8, 0}, {-10, 4}},
-        // shape
-        {2, 3});
-    test::ExpectTensorEqual<complex128>(t2, t3);
-  }
-
-  // Does some numeric operations for complex128 numbers.
-  {
-    const double PI = std::acos(-1);
-    const complex128 rotate_45 = std::polar(1.0, PI / 4);
-
-    // x contains all the 8-th root of unity.
-    Tensor x(DT_COMPLEX128, TensorShape({8}));
-    for (int i = 0; i < 8; ++i) {
-      x.vec<complex128>()(i) = std::pow(rotate_45, i);
-    }
-
-    // Shift the roots by 45 degree.
-    Tensor y(DT_COMPLEX128, TensorShape({8}));
-    y.vec<complex128>() = x.vec<complex128>() * rotate_45;
-    Tensor y_expected(DT_COMPLEX128, TensorShape({8}));
-    for (int i = 0; i < 8; ++i) {
-      y_expected.vec<complex128>()(i) = std::pow(rotate_45, i + 1);
-    }
-    test::ExpectTensorNear<complex128>(y, y_expected, 1e-5);
-
-    // Raise roots to the power of 8.
-    Tensor z(DT_COMPLEX128, TensorShape({8}));
-    z.vec<complex128>() = x.vec<complex128>().pow(8);
-    Tensor z_expected(DT_COMPLEX128, TensorShape({8}));
-    for (int i = 0; i < 8; ++i) {
-      z_expected.vec<complex128>()(i) = 1;
-    }
-    test::ExpectTensorNear<complex128>(z, z_expected, 1e-5);
-  }
-}
-
 // On the alignment.
 //
 // As of 2015/8, tensorflow::Tensor allocates its buffer with 32-byte
diff --git a/tensorflow/core/framework/tensor_testutil.h b/tensorflow/core/framework/tensor_testutil.h
index 0d88eda..71e1767 100644
--- a/tensorflow/core/framework/tensor_testutil.h
+++ b/tensorflow/core/framework/tensor_testutil.h
@@ -127,12 +127,6 @@
   EXPECT_FLOAT_EQ(a.imag(), b.imag()) << a << " vs. " << b;
 }
 
-template <>
-void ExpectEqual<complex128>(const complex128& a, const complex128& b) {
-  EXPECT_DOUBLE_EQ(a.real(), b.real()) << a << " vs. " << b;
-  EXPECT_DOUBLE_EQ(a.imag(), b.imag()) << a << " vs. " << b;
-}
-
 inline void AssertSameTypeDims(const Tensor& x, const Tensor& y) {
   ASSERT_EQ(x.dtype(), y.dtype());
   ASSERT_TRUE(x.IsSameSize(y))
diff --git a/tensorflow/core/framework/types.cc b/tensorflow/core/framework/types.cc
index c87a044..54b55e4 100644
--- a/tensorflow/core/framework/types.cc
+++ b/tensorflow/core/framework/types.cc
@@ -64,8 +64,6 @@
       return "string";
     case DT_COMPLEX64:
       return "complex64";
-    case DT_COMPLEX128:
-      return "complex128";
     case DT_INT64:
       return "int64";
     case DT_BOOL:
@@ -127,9 +125,6 @@
   } else if (sp == "complex64") {
     *dt = DT_COMPLEX64;
     return true;
-  } else if (sp == "complex128") {
-    *dt = DT_COMPLEX128;
-    return true;
   } else if (sp == "int64") {
     *dt = DT_INT64;
     return true;
@@ -170,10 +165,9 @@
 }
 
 DataTypeVector AllTypes() {
-  return {DT_FLOAT,  DT_DOUBLE, DT_INT32,   DT_UINT8,     DT_INT16,
-          DT_UINT16, DT_INT8,   DT_STRING,  DT_COMPLEX64, DT_COMPLEX128,
-          DT_INT64,  DT_BOOL,   DT_QINT8,   DT_QUINT8,    DT_QINT16,
-          DT_QUINT16, DT_QINT32};
+  return {DT_FLOAT,  DT_DOUBLE, DT_INT32,     DT_UINT8, DT_INT16, DT_UINT16,
+          DT_INT8,   DT_STRING, DT_COMPLEX64, DT_INT64, DT_BOOL,  DT_QINT8,
+          DT_QUINT8, DT_QINT16, DT_QUINT16,   DT_QINT32};
 }
 
 #if !defined(__ANDROID__)
@@ -194,9 +188,8 @@
 }
 
 DataTypeVector NumberTypes() {
-  return {DT_FLOAT,  DT_DOUBLE, DT_INT64, DT_INT32,     DT_UINT8,
-          DT_UINT16, DT_INT16,  DT_INT8,  DT_COMPLEX64, DT_COMPLEX128,
-          DT_QINT8,  DT_QUINT8, DT_QINT32 };
+  return {DT_FLOAT, DT_DOUBLE, DT_INT64,     DT_INT32, DT_UINT8,  DT_UINT16,
+          DT_INT16, DT_INT8,   DT_COMPLEX64, DT_QINT8, DT_QUINT8, DT_QINT32};
 }
 
 #else  // defined(__ANDROID__)
@@ -230,7 +223,6 @@
     case DT_INT16:
     case DT_INT8:
     case DT_COMPLEX64:
-    case DT_COMPLEX128:
     case DT_INT64:
     case DT_BOOL:
     case DT_QINT8:
diff --git a/tensorflow/core/framework/types.h b/tensorflow/core/framework/types.h
index 6de9917..9651d2b 100644
--- a/tensorflow/core/framework/types.h
+++ b/tensorflow/core/framework/types.h
@@ -174,7 +174,6 @@
 MATCH_TYPE_AND_ENUM(int8, DT_INT8);
 MATCH_TYPE_AND_ENUM(string, DT_STRING);
 MATCH_TYPE_AND_ENUM(complex64, DT_COMPLEX64);
-MATCH_TYPE_AND_ENUM(complex128, DT_COMPLEX128);
 MATCH_TYPE_AND_ENUM(int64, DT_INT64);
 MATCH_TYPE_AND_ENUM(bool, DT_BOOL);
 MATCH_TYPE_AND_ENUM(qint8, DT_QINT8);
diff --git a/tensorflow/core/framework/types.proto b/tensorflow/core/framework/types.proto
index 27e0b7e..e6f0b13 100644
--- a/tensorflow/core/framework/types.proto
+++ b/tensorflow/core/framework/types.proto
@@ -30,10 +30,10 @@
   DT_QINT16 = 15;    // Quantized int16
   DT_QUINT16 = 16;   // Quantized uint16
   DT_UINT16 = 17;
-  DT_COMPLEX128 = 18;  // Double-precision complex
 
   // TODO(josh11b): DT_GENERIC_PROTO = ??;
   // TODO(jeff,josh11b): DT_UINT64?  DT_UINT32?
+  // TODO(zhifengc): DT_COMPLEX128 (double-precision complex)?
 
   // Do not use!  These are only for parameters.  Every enum above
   // should have a corresponding value below (verified by types_test).
@@ -54,5 +54,4 @@
   DT_QINT16_REF = 115;
   DT_QUINT16_REF = 116;
   DT_UINT16_REF = 117;
-  DT_COMPLEX128_REF = 118;
 }
diff --git a/tensorflow/core/ops/compat/ops_history.v0.pbtxt b/tensorflow/core/ops/compat/ops_history.v0.pbtxt
index 0d9360a..7d4b14e 100644
--- a/tensorflow/core/ops/compat/ops_history.v0.pbtxt
+++ b/tensorflow/core/ops/compat/ops_history.v0.pbtxt
@@ -206,47 +206,6 @@
   is_commutative: true
 }
 op {
-  name: "AddN"
-  input_arg {
-    name: "inputs"
-    type_attr: "T"
-    number_attr: "N"
-  }
-  output_arg {
-    name: "sum"
-    type_attr: "T"
-  }
-  attr {
-    name: "N"
-    type: "int"
-    has_minimum: true
-    minimum: 1
-  }
-  attr {
-    name: "T"
-    type: "type"
-    allowed_values {
-      list {
-        type: DT_FLOAT
-        type: DT_DOUBLE
-        type: DT_INT64
-        type: DT_INT32
-        type: DT_UINT8
-        type: DT_UINT16
-        type: DT_INT16
-        type: DT_INT8
-        type: DT_COMPLEX64
-        type: DT_COMPLEX128
-        type: DT_QINT8
-        type: DT_QUINT8
-        type: DT_QINT32
-      }
-    }
-  }
-  is_aggregate: true
-  is_commutative: true
-}
-op {
   name: "AdjustContrast"
   input_arg {
     name: "images"
@@ -498,60 +457,6 @@
   }
 }
 op {
-  name: "ApplyAdagrad"
-  input_arg {
-    name: "var"
-    type_attr: "T"
-    is_ref: true
-  }
-  input_arg {
-    name: "accum"
-    type_attr: "T"
-    is_ref: true
-  }
-  input_arg {
-    name: "lr"
-    type_attr: "T"
-  }
-  input_arg {
-    name: "grad"
-    type_attr: "T"
-  }
-  output_arg {
-    name: "out"
-    type_attr: "T"
-    is_ref: true
-  }
-  attr {
-    name: "T"
-    type: "type"
-    allowed_values {
-      list {
-        type: DT_FLOAT
-        type: DT_DOUBLE
-        type: DT_INT64
-        type: DT_INT32
-        type: DT_UINT8
-        type: DT_UINT16
-        type: DT_INT16
-        type: DT_INT8
-        type: DT_COMPLEX64
-        type: DT_COMPLEX128
-        type: DT_QINT8
-        type: DT_QUINT8
-        type: DT_QINT32
-      }
-    }
-  }
-  attr {
-    name: "use_locking"
-    type: "bool"
-    default_value {
-      b: false
-    }
-  }
-}
-op {
   name: "ApplyAdam"
   input_arg {
     name: "var"
@@ -707,155 +612,6 @@
   }
 }
 op {
-  name: "ApplyAdam"
-  input_arg {
-    name: "var"
-    type_attr: "T"
-    is_ref: true
-  }
-  input_arg {
-    name: "m"
-    type_attr: "T"
-    is_ref: true
-  }
-  input_arg {
-    name: "v"
-    type_attr: "T"
-    is_ref: true
-  }
-  input_arg {
-    name: "beta1_power"
-    type_attr: "T"
-  }
-  input_arg {
-    name: "beta2_power"
-    type_attr: "T"
-  }
-  input_arg {
-    name: "lr"
-    type_attr: "T"
-  }
-  input_arg {
-    name: "beta1"
-    type_attr: "T"
-  }
-  input_arg {
-    name: "beta2"
-    type_attr: "T"
-  }
-  input_arg {
-    name: "epsilon"
-    type_attr: "T"
-  }
-  input_arg {
-    name: "grad"
-    type_attr: "T"
-  }
-  output_arg {
-    name: "out"
-    type_attr: "T"
-    is_ref: true
-  }
-  attr {
-    name: "T"
-    type: "type"
-    allowed_values {
-      list {
-        type: DT_FLOAT
-        type: DT_DOUBLE
-        type: DT_INT64
-        type: DT_INT32
-        type: DT_UINT8
-        type: DT_UINT16
-        type: DT_INT16
-        type: DT_INT8
-        type: DT_COMPLEX64
-        type: DT_COMPLEX128
-        type: DT_QINT8
-        type: DT_QUINT8
-        type: DT_QINT32
-      }
-    }
-  }
-  attr {
-    name: "use_locking"
-    type: "bool"
-    default_value {
-      b: false
-    }
-  }
-}
-op {
-  name: "ApplyFtrl"
-  input_arg {
-    name: "var"
-    type_attr: "T"
-    is_ref: true
-  }
-  input_arg {
-    name: "accum"
-    type_attr: "T"
-    is_ref: true
-  }
-  input_arg {
-    name: "linear"
-    type_attr: "T"
-    is_ref: true
-  }
-  input_arg {
-    name: "grad"
-    type_attr: "T"
-  }
-  input_arg {
-    name: "lr"
-    type_attr: "T"
-  }
-  input_arg {
-    name: "l1"
-    type_attr: "T"
-  }
-  input_arg {
-    name: "l2"
-    type_attr: "T"
-  }
-  input_arg {
-    name: "lr_power"
-    type_attr: "T"
-  }
-  output_arg {
-    name: "out"
-    type_attr: "T"
-    is_ref: true
-  }
-  attr {
-    name: "T"
-    type: "type"
-    allowed_values {
-      list {
-        type: DT_FLOAT
-        type: DT_DOUBLE
-        type: DT_INT64
-        type: DT_INT32
-        type: DT_UINT8
-        type: DT_UINT16
-        type: DT_INT16
-        type: DT_INT8
-        type: DT_COMPLEX64
-        type: DT_QINT8
-        type: DT_QUINT8
-        type: DT_QINT32
-      }
-    }
-  }
-  attr {
-    name: "use_locking"
-    type: "bool"
-    default_value {
-      b: false
-    }
-  }
-}
-op {
   name: "ApplyFtrl"
   input_arg {
     name: "var"
@@ -911,7 +667,6 @@
         type: DT_INT16
         type: DT_INT8
         type: DT_COMPLEX64
-        type: DT_COMPLEX128
         type: DT_QINT8
         type: DT_QUINT8
         type: DT_QINT32
@@ -1022,55 +777,6 @@
   }
 }
 op {
-  name: "ApplyGradientDescent"
-  input_arg {
-    name: "var"
-    type_attr: "T"
-    is_ref: true
-  }
-  input_arg {
-    name: "alpha"
-    type_attr: "T"
-  }
-  input_arg {
-    name: "delta"
-    type_attr: "T"
-  }
-  output_arg {
-    name: "out"
-    type_attr: "T"
-    is_ref: true
-  }
-  attr {
-    name: "T"
-    type: "type"
-    allowed_values {
-      list {
-        type: DT_FLOAT
-        type: DT_DOUBLE
-        type: DT_INT64
-        type: DT_INT32
-        type: DT_UINT8
-        type: DT_UINT16
-        type: DT_INT16
-        type: DT_INT8
-        type: DT_COMPLEX64
-        type: DT_COMPLEX128
-        type: DT_QINT8
-        type: DT_QUINT8
-        type: DT_QINT32
-      }
-    }
-  }
-  attr {
-    name: "use_locking"
-    type: "bool"
-    default_value {
-      b: false
-    }
-  }
-}
-op {
   name: "ApplyMomentum"
   input_arg {
     name: "var"
@@ -1184,64 +890,6 @@
   }
 }
 op {
-  name: "ApplyMomentum"
-  input_arg {
-    name: "var"
-    type_attr: "T"
-    is_ref: true
-  }
-  input_arg {
-    name: "accum"
-    type_attr: "T"
-    is_ref: true
-  }
-  input_arg {
-    name: "lr"
-    type_attr: "T"
-  }
-  input_arg {
-    name: "grad"
-    type_attr: "T"
-  }
-  input_arg {
-    name: "momentum"
-    type_attr: "T"
-  }
-  output_arg {
-    name: "out"
-    type_attr: "T"
-    is_ref: true
-  }
-  attr {
-    name: "T"
-    type: "type"
-    allowed_values {
-      list {
-        type: DT_FLOAT
-        type: DT_DOUBLE
-        type: DT_INT64
-        type: DT_INT32
-        type: DT_UINT8
-        type: DT_UINT16
-        type: DT_INT16
-        type: DT_INT8
-        type: DT_COMPLEX64
-        type: DT_COMPLEX128
-        type: DT_QINT8
-        type: DT_QUINT8
-        type: DT_QINT32
-      }
-    }
-  }
-  attr {
-    name: "use_locking"
-    type: "bool"
-    default_value {
-      b: false
-    }
-  }
-}
-op {
   name: "ApplyRMSProp"
   input_arg {
     name: "var"
@@ -1381,77 +1029,6 @@
   }
 }
 op {
-  name: "ApplyRMSProp"
-  input_arg {
-    name: "var"
-    type_attr: "T"
-    is_ref: true
-  }
-  input_arg {
-    name: "ms"
-    type_attr: "T"
-    is_ref: true
-  }
-  input_arg {
-    name: "mom"
-    type_attr: "T"
-    is_ref: true
-  }
-  input_arg {
-    name: "lr"
-    type_attr: "T"
-  }
-  input_arg {
-    name: "rho"
-    type_attr: "T"
-  }
-  input_arg {
-    name: "momentum"
-    type_attr: "T"
-  }
-  input_arg {
-    name: "epsilon"
-    type_attr: "T"
-  }
-  input_arg {
-    name: "grad"
-    type_attr: "T"
-  }
-  output_arg {
-    name: "out"
-    type_attr: "T"
-    is_ref: true
-  }
-  attr {
-    name: "T"
-    type: "type"
-    allowed_values {
-      list {
-        type: DT_FLOAT
-        type: DT_DOUBLE
-        type: DT_INT64
-        type: DT_INT32
-        type: DT_UINT8
-        type: DT_UINT16
-        type: DT_INT16
-        type: DT_INT8
-        type: DT_COMPLEX64
-        type: DT_COMPLEX128
-        type: DT_QINT8
-        type: DT_QUINT8
-        type: DT_QINT32
-      }
-    }
-  }
-  attr {
-    name: "use_locking"
-    type: "bool"
-    default_value {
-      b: false
-    }
-  }
-}
-op {
   name: "ArgMax"
   input_arg {
     name: "input"
@@ -1521,42 +1098,6 @@
   }
 }
 op {
-  name: "ArgMax"
-  input_arg {
-    name: "input"
-    type_attr: "T"
-  }
-  input_arg {
-    name: "dimension"
-    type: DT_INT32
-  }
-  output_arg {
-    name: "output"
-    type: DT_INT64
-  }
-  attr {
-    name: "T"
-    type: "type"
-    allowed_values {
-      list {
-        type: DT_FLOAT
-        type: DT_DOUBLE
-        type: DT_INT64
-        type: DT_INT32
-        type: DT_UINT8
-        type: DT_UINT16
-        type: DT_INT16
-        type: DT_INT8
-        type: DT_COMPLEX64
-        type: DT_COMPLEX128
-        type: DT_QINT8
-        type: DT_QUINT8
-        type: DT_QINT32
-      }
-    }
-  }
-}
-op {
   name: "ArgMin"
   input_arg {
     name: "input"
@@ -1626,42 +1167,6 @@
   }
 }
 op {
-  name: "ArgMin"
-  input_arg {
-    name: "input"
-    type_attr: "T"
-  }
-  input_arg {
-    name: "dimension"
-    type: DT_INT32
-  }
-  output_arg {
-    name: "output"
-    type: DT_INT64
-  }
-  attr {
-    name: "T"
-    type: "type"
-    allowed_values {
-      list {
-        type: DT_FLOAT
-        type: DT_DOUBLE
-        type: DT_INT64
-        type: DT_INT32
-        type: DT_UINT8
-        type: DT_UINT16
-        type: DT_INT16
-        type: DT_INT8
-        type: DT_COMPLEX64
-        type: DT_COMPLEX128
-        type: DT_QINT8
-        type: DT_QUINT8
-        type: DT_QINT32
-      }
-    }
-  }
-}
-op {
   name: "Assert"
   input_arg {
     name: "condition"
@@ -1809,51 +1314,6 @@
   }
 }
 op {
-  name: "AssignAdd"
-  input_arg {
-    name: "ref"
-    type_attr: "T"
-    is_ref: true
-  }
-  input_arg {
-    name: "value"
-    type_attr: "T"
-  }
-  output_arg {
-    name: "output_ref"
-    type_attr: "T"
-    is_ref: true
-  }
-  attr {
-    name: "T"
-    type: "type"
-    allowed_values {
-      list {
-        type: DT_FLOAT
-        type: DT_DOUBLE
-        type: DT_INT64
-        type: DT_INT32
-        type: DT_UINT8
-        type: DT_UINT16
-        type: DT_INT16
-        type: DT_INT8
-        type: DT_COMPLEX64
-        type: DT_COMPLEX128
-        type: DT_QINT8
-        type: DT_QUINT8
-        type: DT_QINT32
-      }
-    }
-  }
-  attr {
-    name: "use_locking"
-    type: "bool"
-    default_value {
-      b: false
-    }
-  }
-}
-op {
   name: "AssignSub"
   input_arg {
     name: "ref"
@@ -1941,51 +1401,6 @@
   }
 }
 op {
-  name: "AssignSub"
-  input_arg {
-    name: "ref"
-    type_attr: "T"
-    is_ref: true
-  }
-  input_arg {
-    name: "value"
-    type_attr: "T"
-  }
-  output_arg {
-    name: "output_ref"
-    type_attr: "T"
-    is_ref: true
-  }
-  attr {
-    name: "T"
-    type: "type"
-    allowed_values {
-      list {
-        type: DT_FLOAT
-        type: DT_DOUBLE
-        type: DT_INT64
-        type: DT_INT32
-        type: DT_UINT8
-        type: DT_UINT16
-        type: DT_INT16
-        type: DT_INT8
-        type: DT_COMPLEX64
-        type: DT_COMPLEX128
-        type: DT_QINT8
-        type: DT_QUINT8
-        type: DT_QINT32
-      }
-    }
-  }
-  attr {
-    name: "use_locking"
-    type: "bool"
-    default_value {
-      b: false
-    }
-  }
-}
-op {
   name: "AvgPool"
   input_arg {
     name: "value"
@@ -2498,62 +1913,6 @@
   }
 }
 op {
-  name: "BatchNormWithGlobalNormalization"
-  input_arg {
-    name: "t"
-    type_attr: "T"
-  }
-  input_arg {
-    name: "m"
-    type_attr: "T"
-  }
-  input_arg {
-    name: "v"
-    type_attr: "T"
-  }
-  input_arg {
-    name: "beta"
-    type_attr: "T"
-  }
-  input_arg {
-    name: "gamma"
-    type_attr: "T"
-  }
-  output_arg {
-    name: "result"
-    type_attr: "T"
-  }
-  attr {
-    name: "T"
-    type: "type"
-    allowed_values {
-      list {
-        type: DT_FLOAT
-        type: DT_DOUBLE
-        type: DT_INT64
-        type: DT_INT32
-        type: DT_UINT8
-        type: DT_UINT16
-        type: DT_INT16
-        type: DT_INT8
-        type: DT_COMPLEX64
-        type: DT_COMPLEX128
-        type: DT_QINT8
-        type: DT_QUINT8
-        type: DT_QINT32
-      }
-    }
-  }
-  attr {
-    name: "variance_epsilon"
-    type: "float"
-  }
-  attr {
-    name: "scale_after_normalization"
-    type: "bool"
-  }
-}
-op {
   name: "BatchNormWithGlobalNormalizationGrad"
   input_arg {
     name: "t"
@@ -2695,78 +2054,6 @@
   }
 }
 op {
-  name: "BatchNormWithGlobalNormalizationGrad"
-  input_arg {
-    name: "t"
-    type_attr: "T"
-  }
-  input_arg {
-    name: "m"
-    type_attr: "T"
-  }
-  input_arg {
-    name: "v"
-    type_attr: "T"
-  }
-  input_arg {
-    name: "gamma"
-    type_attr: "T"
-  }
-  input_arg {
-    name: "backprop"
-    type_attr: "T"
-  }
-  output_arg {
-    name: "dx"
-    type_attr: "T"
-  }
-  output_arg {
-    name: "dm"
-    type_attr: "T"
-  }
-  output_arg {
-    name: "dv"
-    type_attr: "T"
-  }
-  output_arg {
-    name: "db"
-    type_attr: "T"
-  }
-  output_arg {
-    name: "dg"
-    type_attr: "T"
-  }
-  attr {
-    name: "T"
-    type: "type"
-    allowed_values {
-      list {
-        type: DT_FLOAT
-        type: DT_DOUBLE
-        type: DT_INT64
-        type: DT_INT32
-        type: DT_UINT8
-        type: DT_UINT16
-        type: DT_INT16
-        type: DT_INT8
-        type: DT_COMPLEX64
-        type: DT_COMPLEX128
-        type: DT_QINT8
-        type: DT_QUINT8
-        type: DT_QINT32
-      }
-    }
-  }
-  attr {
-    name: "variance_epsilon"
-    type: "float"
-  }
-  attr {
-    name: "scale_after_normalization"
-    type: "bool"
-  }
-}
-op {
   name: "BatchSelfAdjointEig"
   input_arg {
     name: "input"
@@ -2905,55 +2192,6 @@
   }
 }
 op {
-  name: "BiasAdd"
-  input_arg {
-    name: "value"
-    type_attr: "T"
-  }
-  input_arg {
-    name: "bias"
-    type_attr: "T"
-  }
-  output_arg {
-    name: "output"
-    type_attr: "T"
-  }
-  attr {
-    name: "T"
-    type: "type"
-    allowed_values {
-      list {
-        type: DT_FLOAT
-        type: DT_DOUBLE
-        type: DT_INT64
-        type: DT_INT32
-        type: DT_UINT8
-        type: DT_UINT16
-        type: DT_INT16
-        type: DT_INT8
-        type: DT_COMPLEX64
-        type: DT_COMPLEX128
-        type: DT_QINT8
-        type: DT_QUINT8
-        type: DT_QINT32
-      }
-    }
-  }
-  attr {
-    name: "data_format"
-    type: "string"
-    default_value {
-      s: "NHWC"
-    }
-    allowed_values {
-      list {
-        s: "NHWC"
-        s: "NCHW"
-      }
-    }
-  }
-}
-op {
   name: "BiasAddGrad"
   input_arg {
     name: "out_backprop"
@@ -2998,51 +2236,6 @@
   }
 }
 op {
-  name: "BiasAddGrad"
-  input_arg {
-    name: "out_backprop"
-    type_attr: "T"
-  }
-  output_arg {
-    name: "output"
-    type_attr: "T"
-  }
-  attr {
-    name: "T"
-    type: "type"
-    allowed_values {
-      list {
-        type: DT_FLOAT
-        type: DT_DOUBLE
-        type: DT_INT64
-        type: DT_INT32
-        type: DT_UINT8
-        type: DT_UINT16
-        type: DT_INT16
-        type: DT_INT8
-        type: DT_COMPLEX64
-        type: DT_COMPLEX128
-        type: DT_QINT8
-        type: DT_QUINT8
-        type: DT_QINT32
-      }
-    }
-  }
-  attr {
-    name: "data_format"
-    type: "string"
-    default_value {
-      s: "NHWC"
-    }
-    allowed_values {
-      list {
-        s: "NHWC"
-        s: "NCHW"
-      }
-    }
-  }
-}
-op {
   name: "BiasAddV1"
   input_arg {
     name: "value"
@@ -3078,42 +2271,6 @@
   }
 }
 op {
-  name: "BiasAddV1"
-  input_arg {
-    name: "value"
-    type_attr: "T"
-  }
-  input_arg {
-    name: "bias"
-    type_attr: "T"
-  }
-  output_arg {
-    name: "output"
-    type_attr: "T"
-  }
-  attr {
-    name: "T"
-    type: "type"
-    allowed_values {
-      list {
-        type: DT_FLOAT
-        type: DT_DOUBLE
-        type: DT_INT64
-        type: DT_INT32
-        type: DT_UINT8
-        type: DT_UINT16
-        type: DT_INT16
-        type: DT_INT8
-        type: DT_COMPLEX64
-        type: DT_COMPLEX128
-        type: DT_QINT8
-        type: DT_QUINT8
-        type: DT_QINT32
-      }
-    }
-  }
-}
-op {
   name: "Bitcast"
   input_arg {
     name: "input"
@@ -3165,59 +2322,6 @@
   }
 }
 op {
-  name: "Bitcast"
-  input_arg {
-    name: "input"
-    type_attr: "T"
-  }
-  output_arg {
-    name: "output"
-    type_attr: "type"
-  }
-  attr {
-    name: "T"
-    type: "type"
-    allowed_values {
-      list {
-        type: DT_FLOAT
-        type: DT_DOUBLE
-        type: DT_INT64
-        type: DT_INT32
-        type: DT_UINT8
-        type: DT_UINT16
-        type: DT_INT16
-        type: DT_INT8
-        type: DT_COMPLEX64
-        type: DT_COMPLEX128
-        type: DT_QINT8
-        type: DT_QUINT8
-        type: DT_QINT32
-      }
-    }
-  }
-  attr {
-    name: "type"
-    type: "type"
-    allowed_values {
-      list {
-        type: DT_FLOAT
-        type: DT_DOUBLE
-        type: DT_INT64
-        type: DT_INT32
-        type: DT_UINT8
-        type: DT_UINT16
-        type: DT_INT16
-        type: DT_INT8
-        type: DT_COMPLEX64
-        type: DT_COMPLEX128
-        type: DT_QINT8
-        type: DT_QUINT8
-        type: DT_QINT32
-      }
-    }
-  }
-}
-op {
   name: "BroadcastGradientArgs"
   input_arg {
     name: "s0"
@@ -6128,38 +5232,6 @@
   }
 }
 op {
-  name: "L2Loss"
-  input_arg {
-    name: "t"
-    type_attr: "T"
-  }
-  output_arg {
-    name: "output"
-    type_attr: "T"
-  }
-  attr {
-    name: "T"
-    type: "type"
-    allowed_values {
-      list {
-        type: DT_FLOAT
-        type: DT_DOUBLE
-        type: DT_INT64
-        type: DT_INT32
-        type: DT_UINT8
-        type: DT_UINT16
-        type: DT_INT16
-        type: DT_INT8
-        type: DT_COMPLEX64
-        type: DT_COMPLEX128
-        type: DT_QINT8
-        type: DT_QUINT8
-        type: DT_QINT32
-      }
-    }
-  }
-}
-op {
   name: "LRN"
   input_arg {
     name: "input"
@@ -6996,49 +6068,6 @@
   }
 }
 op {
-  name: "Max"
-  input_arg {
-    name: "input"
-    type_attr: "T"
-  }
-  input_arg {
-    name: "reduction_indices"
-    type: DT_INT32
-  }
-  output_arg {
-    name: "output"
-    type_attr: "T"
-  }
-  attr {
-    name: "keep_dims"
-    type: "bool"
-    default_value {
-      b: false
-    }
-  }
-  attr {
-    name: "T"
-    type: "type"
-    allowed_values {
-      list {
-        type: DT_FLOAT
-        type: DT_DOUBLE
-        type: DT_INT64
-        type: DT_INT32
-        type: DT_UINT8
-        type: DT_UINT16
-        type: DT_INT16
-        type: DT_INT8
-        type: DT_COMPLEX64
-        type: DT_COMPLEX128
-        type: DT_QINT8
-        type: DT_QUINT8
-        type: DT_QINT32
-      }
-    }
-  }
-}
-op {
   name: "MaxPool"
   input_arg {
     name: "input"
@@ -7425,49 +6454,6 @@
   }
 }
 op {
-  name: "Mean"
-  input_arg {
-    name: "input"
-    type_attr: "T"
-  }
-  input_arg {
-    name: "reduction_indices"
-    type: DT_INT32
-  }
-  output_arg {
-    name: "output"
-    type_attr: "T"
-  }
-  attr {
-    name: "keep_dims"
-    type: "bool"
-    default_value {
-      b: false
-    }
-  }
-  attr {
-    name: "T"
-    type: "type"
-    allowed_values {
-      list {
-        type: DT_FLOAT
-        type: DT_DOUBLE
-        type: DT_INT64
-        type: DT_INT32
-        type: DT_UINT8
-        type: DT_UINT16
-        type: DT_INT16
-        type: DT_INT8
-        type: DT_COMPLEX64
-        type: DT_COMPLEX128
-        type: DT_QINT8
-        type: DT_QUINT8
-        type: DT_QINT32
-      }
-    }
-  }
-}
-op {
   name: "Merge"
   input_arg {
     name: "inputs"
@@ -7595,49 +6581,6 @@
   }
 }
 op {
-  name: "Min"
-  input_arg {
-    name: "input"
-    type_attr: "T"
-  }
-  input_arg {
-    name: "reduction_indices"
-    type: DT_INT32
-  }
-  output_arg {
-    name: "output"
-    type_attr: "T"
-  }
-  attr {
-    name: "keep_dims"
-    type: "bool"
-    default_value {
-      b: false
-    }
-  }
-  attr {
-    name: "T"
-    type: "type"
-    allowed_values {
-      list {
-        type: DT_FLOAT
-        type: DT_DOUBLE
-        type: DT_INT64
-        type: DT_INT32
-        type: DT_UINT8
-        type: DT_UINT16
-        type: DT_INT16
-        type: DT_INT8
-        type: DT_COMPLEX64
-        type: DT_COMPLEX128
-        type: DT_QINT8
-        type: DT_QUINT8
-        type: DT_QINT32
-      }
-    }
-  }
-}
-op {
   name: "Minimum"
   input_arg {
     name: "x"
@@ -8744,49 +7687,6 @@
   }
 }
 op {
-  name: "Prod"
-  input_arg {
-    name: "input"
-    type_attr: "T"
-  }
-  input_arg {
-    name: "reduction_indices"
-    type: DT_INT32
-  }
-  output_arg {
-    name: "output"
-    type_attr: "T"
-  }
-  attr {
-    name: "keep_dims"
-    type: "bool"
-    default_value {
-      b: false
-    }
-  }
-  attr {
-    name: "T"
-    type: "type"
-    allowed_values {
-      list {
-        type: DT_FLOAT
-        type: DT_DOUBLE
-        type: DT_INT64
-        type: DT_INT32
-        type: DT_UINT8
-        type: DT_UINT16
-        type: DT_INT16
-        type: DT_INT8
-        type: DT_COMPLEX64
-        type: DT_COMPLEX128
-        type: DT_QINT8
-        type: DT_QUINT8
-        type: DT_QINT32
-      }
-    }
-  }
-}
-op {
   name: "PyFunc"
   input_arg {
     name: "input"
@@ -10956,65 +9856,6 @@
   }
 }
 op {
-  name: "ScatterAdd"
-  input_arg {
-    name: "ref"
-    type_attr: "T"
-    is_ref: true
-  }
-  input_arg {
-    name: "indices"
-    type_attr: "Tindices"
-  }
-  input_arg {
-    name: "updates"
-    type_attr: "T"
-  }
-  output_arg {
-    name: "output_ref"
-    type_attr: "T"
-    is_ref: true
-  }
-  attr {
-    name: "T"
-    type: "type"
-    allowed_values {
-      list {
-        type: DT_FLOAT
-        type: DT_DOUBLE
-        type: DT_INT64
-        type: DT_INT32
-        type: DT_UINT8
-        type: DT_UINT16
-        type: DT_INT16
-        type: DT_INT8
-        type: DT_COMPLEX64
-        type: DT_COMPLEX128
-        type: DT_QINT8
-        type: DT_QUINT8
-        type: DT_QINT32
-      }
-    }
-  }
-  attr {
-    name: "Tindices"
-    type: "type"
-    allowed_values {
-      list {
-        type: DT_INT32
-        type: DT_INT64
-      }
-    }
-  }
-  attr {
-    name: "use_locking"
-    type: "bool"
-    default_value {
-      b: false
-    }
-  }
-}
-op {
   name: "ScatterSub"
   input_arg {
     name: "ref"
@@ -11130,65 +9971,6 @@
   }
 }
 op {
-  name: "ScatterSub"
-  input_arg {
-    name: "ref"
-    type_attr: "T"
-    is_ref: true
-  }
-  input_arg {
-    name: "indices"
-    type_attr: "Tindices"
-  }
-  input_arg {
-    name: "updates"
-    type_attr: "T"
-  }
-  output_arg {
-    name: "output_ref"
-    type_attr: "T"
-    is_ref: true
-  }
-  attr {
-    name: "T"
-    type: "type"
-    allowed_values {
-      list {
-        type: DT_FLOAT
-        type: DT_DOUBLE
-        type: DT_INT64
-        type: DT_INT32
-        type: DT_UINT8
-        type: DT_UINT16
-        type: DT_INT16
-        type: DT_INT8
-        type: DT_COMPLEX64
-        type: DT_COMPLEX128
-        type: DT_QINT8
-        type: DT_QUINT8
-        type: DT_QINT32
-      }
-    }
-  }
-  attr {
-    name: "Tindices"
-    type: "type"
-    allowed_values {
-      list {
-        type: DT_INT32
-        type: DT_INT64
-      }
-    }
-  }
-  attr {
-    name: "use_locking"
-    type: "bool"
-    default_value {
-      b: false
-    }
-  }
-}
-op {
   name: "ScatterUpdate"
   input_arg {
     name: "ref"
@@ -12468,74 +11250,6 @@
   }
 }
 op {
-  name: "SparseApplyAdagrad"
-  input_arg {
-    name: "var"
-    type_attr: "T"
-    is_ref: true
-  }
-  input_arg {
-    name: "accum"
-    type_attr: "T"
-    is_ref: true
-  }
-  input_arg {
-    name: "lr"
-    type_attr: "T"
-  }
-  input_arg {
-    name: "grad"
-    type_attr: "T"
-  }
-  input_arg {
-    name: "indices"
-    type_attr: "Tindices"
-  }
-  output_arg {
-    name: "out"
-    type_attr: "T"
-    is_ref: true
-  }
-  attr {
-    name: "T"
-    type: "type"
-    allowed_values {
-      list {
-        type: DT_FLOAT
-        type: DT_DOUBLE
-        type: DT_INT64
-        type: DT_INT32
-        type: DT_UINT8
-        type: DT_UINT16
-        type: DT_INT16
-        type: DT_INT8
-        type: DT_COMPLEX64
-        type: DT_COMPLEX128
-        type: DT_QINT8
-        type: DT_QUINT8
-        type: DT_QINT32
-      }
-    }
-  }
-  attr {
-    name: "Tindices"
-    type: "type"
-    allowed_values {
-      list {
-        type: DT_INT32
-        type: DT_INT64
-      }
-    }
-  }
-  attr {
-    name: "use_locking"
-    type: "bool"
-    default_value {
-      b: false
-    }
-  }
-}
-op {
   name: "SparseApplyFtrl"
   input_arg {
     name: "var"
@@ -12620,91 +11334,6 @@
   }
 }
 op {
-  name: "SparseApplyFtrl"
-  input_arg {
-    name: "var"
-    type_attr: "T"
-    is_ref: true
-  }
-  input_arg {
-    name: "accum"
-    type_attr: "T"
-    is_ref: true
-  }
-  input_arg {
-    name: "linear"
-    type_attr: "T"
-    is_ref: true
-  }
-  input_arg {
-    name: "grad"
-    type_attr: "T"
-  }
-  input_arg {
-    name: "indices"
-    type_attr: "Tindices"
-  }
-  input_arg {
-    name: "lr"
-    type_attr: "T"
-  }
-  input_arg {
-    name: "l1"
-    type_attr: "T"
-  }
-  input_arg {
-    name: "l2"
-    type_attr: "T"
-  }
-  input_arg {
-    name: "lr_power"
-    type_attr: "T"
-  }
-  output_arg {
-    name: "out"
-    type_attr: "T"
-    is_ref: true
-  }
-  attr {
-    name: "T"
-    type: "type"
-    allowed_values {
-      list {
-        type: DT_FLOAT
-        type: DT_DOUBLE
-        type: DT_INT64
-        type: DT_INT32
-        type: DT_UINT8
-        type: DT_UINT16
-        type: DT_INT16
-        type: DT_INT8
-        type: DT_COMPLEX64
-        type: DT_COMPLEX128
-        type: DT_QINT8
-        type: DT_QUINT8
-        type: DT_QINT32
-      }
-    }
-  }
-  attr {
-    name: "Tindices"
-    type: "type"
-    allowed_values {
-      list {
-        type: DT_INT32
-        type: DT_INT64
-      }
-    }
-  }
-  attr {
-    name: "use_locking"
-    type: "bool"
-    default_value {
-      b: false
-    }
-  }
-}
-op {
   name: "SparseApplyMomentum"
   input_arg {
     name: "var"
@@ -12846,78 +11475,6 @@
   }
 }
 op {
-  name: "SparseApplyMomentum"
-  input_arg {
-    name: "var"
-    type_attr: "T"
-    is_ref: true
-  }
-  input_arg {
-    name: "accum"
-    type_attr: "T"
-    is_ref: true
-  }
-  input_arg {
-    name: "lr"
-    type_attr: "T"
-  }
-  input_arg {
-    name: "grad"
-    type_attr: "T"
-  }
-  input_arg {
-    name: "indices"
-    type_attr: "Tindices"
-  }
-  input_arg {
-    name: "momentum"
-    type_attr: "T"
-  }
-  output_arg {
-    name: "out"
-    type_attr: "T"
-    is_ref: true
-  }
-  attr {
-    name: "T"
-    type: "type"
-    allowed_values {
-      list {
-        type: DT_FLOAT
-        type: DT_DOUBLE
-        type: DT_INT64
-        type: DT_INT32
-        type: DT_UINT8
-        type: DT_UINT16
-        type: DT_INT16
-        type: DT_INT8
-        type: DT_COMPLEX64
-        type: DT_COMPLEX128
-        type: DT_QINT8
-        type: DT_QUINT8
-        type: DT_QINT32
-      }
-    }
-  }
-  attr {
-    name: "Tindices"
-    type: "type"
-    allowed_values {
-      list {
-        type: DT_INT32
-        type: DT_INT64
-      }
-    }
-  }
-  attr {
-    name: "use_locking"
-    type: "bool"
-    default_value {
-      b: false
-    }
-  }
-}
-op {
   name: "SparseConcat"
   input_arg {
     name: "indices"
@@ -13848,49 +12405,6 @@
   }
 }
 op {
-  name: "Sum"
-  input_arg {
-    name: "input"
-    type_attr: "T"
-  }
-  input_arg {
-    name: "reduction_indices"
-    type: DT_INT32
-  }
-  output_arg {
-    name: "output"
-    type_attr: "T"
-  }
-  attr {
-    name: "keep_dims"
-    type: "bool"
-    default_value {
-      b: false
-    }
-  }
-  attr {
-    name: "T"
-    type: "type"
-    allowed_values {
-      list {
-        type: DT_FLOAT
-        type: DT_DOUBLE
-        type: DT_INT64
-        type: DT_INT32
-        type: DT_UINT8
-        type: DT_UINT16
-        type: DT_INT16
-        type: DT_INT8
-        type: DT_COMPLEX64
-        type: DT_COMPLEX128
-        type: DT_QINT8
-        type: DT_QUINT8
-        type: DT_QINT32
-      }
-    }
-  }
-}
-op {
   name: "Switch"
   input_arg {
     name: "data"
diff --git a/tensorflow/core/ops/ops.pbtxt b/tensorflow/core/ops/ops.pbtxt
index 703a92c..130c2e4 100644
--- a/tensorflow/core/ops/ops.pbtxt
+++ b/tensorflow/core/ops/ops.pbtxt
@@ -102,7 +102,6 @@
         type: DT_INT16
         type: DT_INT8
         type: DT_COMPLEX64
-        type: DT_COMPLEX128
         type: DT_QINT8
         type: DT_QUINT8
         type: DT_QINT32
@@ -332,7 +331,6 @@
         type: DT_INT16
         type: DT_INT8
         type: DT_COMPLEX64
-        type: DT_COMPLEX128
         type: DT_QINT8
         type: DT_QUINT8
         type: DT_QINT32
@@ -425,7 +423,6 @@
         type: DT_INT16
         type: DT_INT8
         type: DT_COMPLEX64
-        type: DT_COMPLEX128
         type: DT_QINT8
         type: DT_QUINT8
         type: DT_QINT32
@@ -508,7 +505,6 @@
         type: DT_INT16
         type: DT_INT8
         type: DT_COMPLEX64
-        type: DT_COMPLEX128
         type: DT_QINT8
         type: DT_QUINT8
         type: DT_QINT32
@@ -564,7 +560,6 @@
         type: DT_INT16
         type: DT_INT8
         type: DT_COMPLEX64
-        type: DT_COMPLEX128
         type: DT_QINT8
         type: DT_QUINT8
         type: DT_QINT32
@@ -630,7 +625,6 @@
         type: DT_INT16
         type: DT_INT8
         type: DT_COMPLEX64
-        type: DT_COMPLEX128
         type: DT_QINT8
         type: DT_QUINT8
         type: DT_QINT32
@@ -712,7 +706,6 @@
         type: DT_INT16
         type: DT_INT8
         type: DT_COMPLEX64
-        type: DT_COMPLEX128
         type: DT_QINT8
         type: DT_QUINT8
         type: DT_QINT32
@@ -759,7 +752,6 @@
         type: DT_INT16
         type: DT_INT8
         type: DT_COMPLEX64
-        type: DT_COMPLEX128
         type: DT_QINT8
         type: DT_QUINT8
         type: DT_QINT32
@@ -797,7 +789,6 @@
         type: DT_INT16
         type: DT_INT8
         type: DT_COMPLEX64
-        type: DT_COMPLEX128
         type: DT_QINT8
         type: DT_QUINT8
         type: DT_QINT32
@@ -911,7 +902,6 @@
         type: DT_INT16
         type: DT_INT8
         type: DT_COMPLEX64
-        type: DT_COMPLEX128
         type: DT_QINT8
         type: DT_QUINT8
         type: DT_QINT32
@@ -962,7 +952,6 @@
         type: DT_INT16
         type: DT_INT8
         type: DT_COMPLEX64
-        type: DT_COMPLEX128
         type: DT_QINT8
         type: DT_QUINT8
         type: DT_QINT32
@@ -1389,7 +1378,6 @@
         type: DT_INT16
         type: DT_INT8
         type: DT_COMPLEX64
-        type: DT_COMPLEX128
         type: DT_QINT8
         type: DT_QUINT8
         type: DT_QINT32
@@ -1475,7 +1463,6 @@
         type: DT_INT16
         type: DT_INT8
         type: DT_COMPLEX64
-        type: DT_COMPLEX128
         type: DT_QINT8
         type: DT_QUINT8
         type: DT_QINT32
@@ -1551,7 +1538,6 @@
         type: DT_INT16
         type: DT_INT8
         type: DT_COMPLEX64
-        type: DT_COMPLEX128
         type: DT_QINT8
         type: DT_QUINT8
         type: DT_QINT32
@@ -1601,7 +1587,6 @@
         type: DT_INT16
         type: DT_INT8
         type: DT_COMPLEX64
-        type: DT_COMPLEX128
         type: DT_QINT8
         type: DT_QUINT8
         type: DT_QINT32
@@ -1656,7 +1641,6 @@
         type: DT_INT16
         type: DT_INT8
         type: DT_COMPLEX64
-        type: DT_COMPLEX128
         type: DT_QINT8
         type: DT_QUINT8
         type: DT_QINT32
@@ -1690,7 +1674,6 @@
         type: DT_INT16
         type: DT_INT8
         type: DT_COMPLEX64
-        type: DT_COMPLEX128
         type: DT_QINT8
         type: DT_QUINT8
         type: DT_QINT32
@@ -1711,7 +1694,6 @@
         type: DT_INT16
         type: DT_INT8
         type: DT_COMPLEX64
-        type: DT_COMPLEX128
         type: DT_QINT8
         type: DT_QUINT8
         type: DT_QINT32
@@ -4338,7 +4320,6 @@
         type: DT_INT16
         type: DT_INT8
         type: DT_COMPLEX64
-        type: DT_COMPLEX128
         type: DT_QINT8
         type: DT_QUINT8
         type: DT_QINT32
@@ -5128,7 +5109,6 @@
         type: DT_INT16
         type: DT_INT8
         type: DT_COMPLEX64
-        type: DT_COMPLEX128
         type: DT_QINT8
         type: DT_QUINT8
         type: DT_QINT32
@@ -5439,7 +5419,6 @@
         type: DT_INT16
         type: DT_INT8
         type: DT_COMPLEX64
-        type: DT_COMPLEX128
         type: DT_QINT8
         type: DT_QUINT8
         type: DT_QINT32
@@ -5541,7 +5520,6 @@
         type: DT_INT16
         type: DT_INT8
         type: DT_COMPLEX64
-        type: DT_COMPLEX128
         type: DT_QINT8
         type: DT_QUINT8
         type: DT_QINT32
@@ -6442,7 +6420,6 @@
         type: DT_INT16
         type: DT_INT8
         type: DT_COMPLEX64
-        type: DT_COMPLEX128
         type: DT_QINT8
         type: DT_QUINT8
         type: DT_QINT32
@@ -8155,7 +8132,6 @@
         type: DT_INT16
         type: DT_INT8
         type: DT_COMPLEX64
-        type: DT_COMPLEX128
         type: DT_QINT8
         type: DT_QUINT8
         type: DT_QINT32
@@ -8221,7 +8197,6 @@
         type: DT_INT16
         type: DT_INT8
         type: DT_COMPLEX64
-        type: DT_COMPLEX128
         type: DT_QINT8
         type: DT_QUINT8
         type: DT_QINT32
@@ -9171,7 +9146,6 @@
         type: DT_INT16
         type: DT_INT8
         type: DT_COMPLEX64
-        type: DT_COMPLEX128
         type: DT_QINT8
         type: DT_QUINT8
         type: DT_QINT32
@@ -9269,7 +9243,6 @@
         type: DT_INT16
         type: DT_INT8
         type: DT_COMPLEX64
-        type: DT_COMPLEX128
         type: DT_QINT8
         type: DT_QUINT8
         type: DT_QINT32
@@ -9351,7 +9324,6 @@
         type: DT_INT16
         type: DT_INT8
         type: DT_COMPLEX64
-        type: DT_COMPLEX128
         type: DT_QINT8
         type: DT_QUINT8
         type: DT_QINT32
@@ -10262,7 +10234,6 @@
         type: DT_INT16
         type: DT_INT8
         type: DT_COMPLEX64
-        type: DT_COMPLEX128
         type: DT_QINT8
         type: DT_QUINT8
         type: DT_QINT32
diff --git a/tensorflow/core/public/tensor_c_api.h b/tensorflow/core/public/tensor_c_api.h
index b7ac96b..14f4dfa 100644
--- a/tensorflow/core/public/tensor_c_api.h
+++ b/tensorflow/core/public/tensor_c_api.h
@@ -78,8 +78,7 @@
   TF_INT16 = 5,
   TF_INT8 = 6,
   TF_STRING = 7,
-  TF_COMPLEX64 = 8,  // Single-precision complex
-  TF_COMPLEX = 8,    // Old identifier kept for API backwards compatibility
+  TF_COMPLEX = 8,  // Single-precision complex
   TF_INT64 = 9,
   TF_BOOL = 10,
   TF_QINT8 = 11,     // Quantized int8
@@ -89,7 +88,6 @@
   TF_QINT16 = 15,    // Quantized int16
   TF_QUINT16 = 16,   // Quantized uint16
   TF_UINT16 = 17,
-  TF_COMPLEX128 = 18,  // Double-precision complex
 } TF_DataType;
 
 // --------------------------------------------------------------------------
diff --git a/tensorflow/core/util/saved_tensor_slice_util.h b/tensorflow/core/util/saved_tensor_slice_util.h
index ce2dc55..6c3759f 100644
--- a/tensorflow/core/util/saved_tensor_slice_util.h
+++ b/tensorflow/core/util/saved_tensor_slice_util.h
@@ -108,7 +108,6 @@
 TENSOR_PROTO_EXTRACT_TYPE(float, float, float);
 TENSOR_PROTO_EXTRACT_TYPE(double, double, double);
 TENSOR_PROTO_EXTRACT_TYPE_COMPLEX(complex64, scomplex, float);
-TENSOR_PROTO_EXTRACT_TYPE_COMPLEX(complex128, dcomplex, double);
 TENSOR_PROTO_EXTRACT_TYPE(int32, int, int32);
 TENSOR_PROTO_EXTRACT_TYPE(int64, int64, int64);
 TENSOR_PROTO_EXTRACT_TYPE(uint8, int, int32);
diff --git a/tensorflow/examples/android/README.md b/tensorflow/examples/android/README.md
index fb737f7..11c8252 100644
--- a/tensorflow/examples/android/README.md
+++ b/tensorflow/examples/android/README.md
@@ -19,7 +19,7 @@
 3. The Android SDK and build tools may be obtained from:
         https://developer.android.com/tools/revisions/build-tools.html
 
-The Android entries in [`<workspace_root>/WORKSPACE`](../../../WORKSPACE#L2-L13) must be
+The Android entries in [`<workspace_root>/WORKSPACE`](../../WORKSPACE) must be
 uncommented with the paths filled in appropriately depending on where you
 installed the NDK and SDK. Otherwise an error such as:
 "The external label '//external:android/sdk' is not bound to anything" will
@@ -45,8 +45,10 @@
 $ bazel build //tensorflow/examples/android:tensorflow_demo
 ```
 
-If you get build errors about protocol buffers, run
-`git submodule update --init` and build again.
+If you get build errors about protocol buffers then you may have left out the
+`--recurse-submodules` argument to `git clone`. Review the instructions
+here and then build again:
+https://www.tensorflow.org/versions/master/get_started/os_setup.html#clone-the-tensorflow-repository
 
 If adb debugging is enabled on your Android 5.0 or later device, you may then
 use the following command from your workspace root to install the APK once
diff --git a/tensorflow/examples/label_image/README.md b/tensorflow/examples/label_image/README.md
index 1f40e8b..c24ce19 100644
--- a/tensorflow/examples/label_image/README.md
+++ b/tensorflow/examples/label_image/README.md
@@ -43,15 +43,15 @@
 output something similar to this:
 
 ```
-I tensorflow/examples/label_image/main.cc:207] military uniform (866): 0.647299
-I tensorflow/examples/label_image/main.cc:207] suit (794): 0.0477195
-I tensorflow/examples/label_image/main.cc:207] academic gown (896): 0.0232407
-I tensorflow/examples/label_image/main.cc:207] bow tie (817): 0.0157355
-I tensorflow/examples/label_image/main.cc:207] bolo tie (940): 0.0145023
+I tensorflow/examples/label_image/main.cc:200] military uniform (866): 0.902268
+I tensorflow/examples/label_image/main.cc:200] bow tie (817): 0.05407
+I tensorflow/examples/label_image/main.cc:200] suit (794): 0.0113195
+I tensorflow/examples/label_image/main.cc:200] bulletproof vest (833): 0.0100269
+I tensorflow/examples/label_image/main.cc:200] bearskin (849): 0.00649746
 ```
 In this case, we're using the default image of Admiral Grace Hopper, and you can
 see the network correctly spots she's wearing a military uniform, with a high
-score of 0.6.
+score of 0.9.
 
 Next, try it out on your own images by supplying the --image= argument, e.g.
 
diff --git a/tensorflow/examples/udacity/1_notmnist.ipynb b/tensorflow/examples/udacity/1_notmnist.ipynb
index 9d864cc..b4704a3 100644
--- a/tensorflow/examples/udacity/1_notmnist.ipynb
+++ b/tensorflow/examples/udacity/1_notmnist.ipynb
@@ -117,7 +117,7 @@
         "    print('Found and verified', filename)\n",
         "  else:\n",
         "    raise Exception(\n",
-        "      'Failed to verify ' + filename + '. Can you get to it with a browser?')\n",
+        "      'Failed to verify' + filename + '. Can you get to it with a browser?')\n",
         "  return filename\n",
         "\n",
         "train_filename = maybe_download('notMNIST_large.tar.gz', 247336696)\n",
diff --git a/tensorflow/models/image/cifar10/cifar10.py b/tensorflow/models/image/cifar10/cifar10.py
index 05c8e70..8f5fd4f 100644
--- a/tensorflow/models/image/cifar10/cifar10.py
+++ b/tensorflow/models/image/cifar10/cifar10.py
@@ -67,7 +67,7 @@
 LEARNING_RATE_DECAY_FACTOR = 0.1  # Learning rate decay factor.
 INITIAL_LEARNING_RATE = 0.1       # Initial learning rate.
 
-# If a model is trained with multiple GPUs, prefix all Op names with tower_name
+# If a model is trained with multiple GPU's prefix all Op names with tower_name
 # to differentiate the operations. Note that this prefix is removed from the
 # names of the summaries when visualizing a model.
 TOWER_NAME = 'tower'
@@ -255,7 +255,7 @@
 def loss(logits, labels):
   """Add L2Loss to all the trainable variables.
 
-  Add summary for "Loss" and "Loss/avg".
+  Add summary for for "Loss" and "Loss/avg".
   Args:
     logits: Logits from inference().
     labels: Labels from distorted_inputs or inputs(). 1-D tensor
diff --git a/tensorflow/models/image/cifar10/cifar10_input.py b/tensorflow/models/image/cifar10/cifar10_input.py
index 0d48a35..a9a0869 100644
--- a/tensorflow/models/image/cifar10/cifar10_input.py
+++ b/tensorflow/models/image/cifar10/cifar10_input.py
@@ -172,7 +172,7 @@
   distorted_image = tf.image.random_flip_left_right(distorted_image)
 
   # Because these operations are not commutative, consider randomizing
-  # the order their operation.
+  # randomize the order their operation.
   distorted_image = tf.image.random_brightness(distorted_image,
                                                max_delta=63)
   distorted_image = tf.image.random_contrast(distorted_image,
diff --git a/tensorflow/python/__init__.py b/tensorflow/python/__init__.py
index c36cdfe..11bee08 100644
--- a/tensorflow/python/__init__.py
+++ b/tensorflow/python/__init__.py
@@ -181,7 +181,6 @@
     'bfloat16', 'bfloat16_ref',
     'bool', 'bool_ref',
     'complex64', 'complex64_ref',
-    'complex128', 'complex128_ref',
     'double', 'double_ref',
     'float32', 'float32_ref',
     'float64', 'float64_ref',
diff --git a/tensorflow/python/client/session_test.py b/tensorflow/python/client/session_test.py
index 491b293..5586832 100644
--- a/tensorflow/python/client/session_test.py
+++ b/tensorflow/python/client/session_test.py
@@ -687,8 +687,7 @@
                     dtypes.int8,
                     dtypes.int64,
                     dtypes.bool,
-                    dtypes.complex64,
-                    dtypes.complex128]:
+                    dtypes.complex64]:
         for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
           np_dtype = dtype.as_numpy_dtype
 
@@ -701,8 +700,6 @@
             np_array = np_array > 0
           elif dtype == dtypes.complex64:
             np_array = np.sqrt(np_array.astype(np_dtype))
-          elif dtype == dtypes.complex64:
-            np_array = np.sqrt(np_array.astype(np_dtype))
           else:
             np_array = np_array.astype(np_dtype)
 
diff --git a/tensorflow/python/client/tf_session_helper.cc b/tensorflow/python/client/tf_session_helper.cc
index e5cdcdd..02014cf 100644
--- a/tensorflow/python/client/tf_session_helper.cc
+++ b/tensorflow/python/client/tf_session_helper.cc
@@ -1,4 +1,4 @@
-/* Copyright 2016 Google Inc. All Rights Reserved.
+/* Copyright 2015 Google Inc. All Rights Reserved.
 
 Licensed under the Apache License, Version 2.0 (the "License");
 you may not use this file except in compliance with the License.
@@ -121,10 +121,7 @@
       *out_tf_datatype = TF_BOOL;
       break;
     case NPY_COMPLEX64:
-      *out_tf_datatype = TF_COMPLEX64;
-      break;
-    case NPY_COMPLEX128:
-      *out_tf_datatype = TF_COMPLEX128;
+      *out_tf_datatype = TF_COMPLEX;
       break;
     case NPY_OBJECT:
       *out_tf_datatype = TF_STRING;
@@ -171,12 +168,9 @@
     case TF_BOOL:
       *out_pyarray_type = NPY_BOOL;
       break;
-    case TF_COMPLEX64:
+    case TF_COMPLEX:
       *out_pyarray_type = NPY_COMPLEX64;
       break;
-    case TF_COMPLEX128:
-      *out_pyarray_type = NPY_COMPLEX128;
-      break;
     case TF_STRING:
       *out_pyarray_type = NPY_OBJECT;
       break;
diff --git a/tensorflow/python/framework/dtypes.py b/tensorflow/python/framework/dtypes.py
index d964a7f..9c1e05f 100644
--- a/tensorflow/python/framework/dtypes.py
+++ b/tensorflow/python/framework/dtypes.py
@@ -32,7 +32,6 @@
   * `tf.float64`: 64-bit double-precision floating-point.
   * `tf.bfloat16`: 16-bit truncated floating-point.
   * `tf.complex64`: 64-bit single-precision complex.
-  * `tf.complex128`: 128-bit double-precision complex.
 
   * `tf.int8`: 8-bit signed integer.
   * `tf.uint8`: 8-bit unsigned integer.
@@ -123,8 +122,6 @@
     base = self.base_dtype
     if base == complex64:
       return float32
-    elif base == complex128:
-      return float64
     else:
       return self
 
@@ -152,7 +149,7 @@
   @property
   def is_complex(self):
     """Returns whether this is a complex floating point type."""
-    return self.base_dtype in (complex64, complex128)
+    return self.base_dtype == complex64
 
   @property
   def is_quantized(self):
@@ -182,8 +179,8 @@
       TypeError: if this is a non-numeric, unordered, or quantized type.
 
     """
-    if (self.is_quantized or self.base_dtype in
-        (bool, string, complex64, complex128)):
+    if (self.is_quantized or self.base_dtype == bool or
+        self.base_dtype == string or self.base_dtype == complex64):
       raise TypeError("Cannot find minimum value of %s." % self)
 
     # there is no simple way to get the min value of a dtype, we have to check
@@ -204,8 +201,8 @@
       TypeError: if this is a non-numeric, unordered, or quantized type.
 
     """
-    if (self.is_quantized or self.base_dtype in
-        (bool, string, complex64, complex128)):
+    if (self.is_quantized or self.base_dtype == bool or
+        self.base_dtype == string or self.base_dtype == complex64):
       raise TypeError("Cannot find maximum value of %s." % self)
 
     # there is no simple way to get the min value of a dtype, we have to check
@@ -280,7 +277,6 @@
 int8 = DType(types_pb2.DT_INT8)
 string = DType(types_pb2.DT_STRING)
 complex64 = DType(types_pb2.DT_COMPLEX64)
-complex128 = DType(types_pb2.DT_COMPLEX128)
 int64 = DType(types_pb2.DT_INT64)
 bool = DType(types_pb2.DT_BOOL)
 qint8 = DType(types_pb2.DT_QINT8)
@@ -299,7 +295,6 @@
 int8_ref = DType(types_pb2.DT_INT8_REF)
 string_ref = DType(types_pb2.DT_STRING_REF)
 complex64_ref = DType(types_pb2.DT_COMPLEX64_REF)
-complex128_ref = DType(types_pb2.DT_COMPLEX128_REF)
 int64_ref = DType(types_pb2.DT_INT64_REF)
 bool_ref = DType(types_pb2.DT_BOOL_REF)
 qint8_ref = DType(types_pb2.DT_QINT8_REF)
@@ -322,7 +317,6 @@
     types_pb2.DT_INT8: int8,
     types_pb2.DT_STRING: string,
     types_pb2.DT_COMPLEX64: complex64,
-    types_pb2.DT_COMPLEX128: complex128,
     types_pb2.DT_INT64: int64,
     types_pb2.DT_BOOL: bool,
     types_pb2.DT_QINT8: qint8,
@@ -340,7 +334,6 @@
     types_pb2.DT_INT8_REF: int8_ref,
     types_pb2.DT_STRING_REF: string_ref,
     types_pb2.DT_COMPLEX64_REF: complex64_ref,
-    types_pb2.DT_COMPLEX128_REF: complex128_ref,
     types_pb2.DT_INT64_REF: int64_ref,
     types_pb2.DT_BOOL_REF: bool_ref,
     types_pb2.DT_QINT8_REF: qint8_ref,
@@ -363,7 +356,6 @@
     types_pb2.DT_INT8: "int8",
     types_pb2.DT_STRING: "string",
     types_pb2.DT_COMPLEX64: "complex64",
-    types_pb2.DT_COMPLEX128: "complex128",
     types_pb2.DT_INT64: "int64",
     types_pb2.DT_BOOL: "bool",
     types_pb2.DT_QINT8: "qint8",
@@ -381,7 +373,6 @@
     types_pb2.DT_INT8_REF: "int8_ref",
     types_pb2.DT_STRING_REF: "string_ref",
     types_pb2.DT_COMPLEX64_REF: "complex64_ref",
-    types_pb2.DT_COMPLEX128_REF: "complex128_ref",
     types_pb2.DT_INT64_REF: "int64_ref",
     types_pb2.DT_BOOL_REF: "bool_ref",
     types_pb2.DT_QINT8_REF: "qint8_ref",
@@ -423,7 +414,6 @@
     (np.int16, int16),
     (np.int8, int8),
     (np.complex64, complex64),
-    (np.complex128, complex128),
     (np.object, string),
     (np.bool, bool),
     (_np_qint8, qint8),
@@ -445,7 +435,6 @@
     # strings.
     types_pb2.DT_STRING: np.object,
     types_pb2.DT_COMPLEX64: np.complex64,
-    types_pb2.DT_COMPLEX128: np.complex128,
     types_pb2.DT_INT64: np.int64,
     types_pb2.DT_BOOL: np.bool,
     types_pb2.DT_QINT8: _np_qint8,
@@ -465,7 +454,6 @@
     types_pb2.DT_INT8_REF: np.int8,
     types_pb2.DT_STRING_REF: np.object,
     types_pb2.DT_COMPLEX64_REF: np.complex64,
-    types_pb2.DT_COMPLEX128_REF: np.complex128,
     types_pb2.DT_INT64_REF: np.int64,
     types_pb2.DT_BOOL_REF: np.bool,
     types_pb2.DT_QINT8_REF: _np_qint8,
diff --git a/tensorflow/python/framework/dtypes_test.py b/tensorflow/python/framework/dtypes_test.py
index d303918..91fada9 100644
--- a/tensorflow/python/framework/dtypes_test.py
+++ b/tensorflow/python/framework/dtypes_test.py
@@ -71,7 +71,6 @@
     self.assertIs(tf.int16, tf.as_dtype(np.int16))
     self.assertIs(tf.int8, tf.as_dtype(np.int8))
     self.assertIs(tf.complex64, tf.as_dtype(np.complex64))
-    self.assertIs(tf.complex128, tf.as_dtype(np.complex128))
     self.assertIs(tf.string, tf.as_dtype(np.object))
     self.assertIs(tf.string, tf.as_dtype(np.array(["foo", "bar"]).dtype))
     self.assertIs(tf.bool, tf.as_dtype(np.bool))
@@ -83,7 +82,6 @@
                   tf.int32, tf.int64]:
       self.assertIs(dtype.real_dtype, dtype)
     self.assertIs(tf.complex64.real_dtype, tf.float32)
-    self.assertIs(tf.complex128.real_dtype, tf.float64)
 
   def testStringConversion(self):
     self.assertIs(tf.float32, tf.as_dtype("float32"))
@@ -95,7 +93,6 @@
     self.assertIs(tf.int8, tf.as_dtype("int8"))
     self.assertIs(tf.string, tf.as_dtype("string"))
     self.assertIs(tf.complex64, tf.as_dtype("complex64"))
-    self.assertIs(tf.complex128, tf.as_dtype("complex128"))
     self.assertIs(tf.int64, tf.as_dtype("int64"))
     self.assertIs(tf.bool, tf.as_dtype("bool"))
     self.assertIs(tf.qint8, tf.as_dtype("qint8"))
@@ -110,7 +107,6 @@
     self.assertIs(tf.int8_ref, tf.as_dtype("int8_ref"))
     self.assertIs(tf.string_ref, tf.as_dtype("string_ref"))
     self.assertIs(tf.complex64_ref, tf.as_dtype("complex64_ref"))
-    self.assertIs(tf.complex128_ref, tf.as_dtype("complex128_ref"))
     self.assertIs(tf.int64_ref, tf.as_dtype("int64_ref"))
     self.assertIs(tf.bool_ref, tf.as_dtype("bool_ref"))
     self.assertIs(tf.qint8_ref, tf.as_dtype("qint8_ref"))
@@ -139,7 +135,6 @@
     self.assertEqual(tf.as_dtype("uint8").is_integer, True)
     self.assertEqual(tf.as_dtype("uint16").is_integer, True)
     self.assertEqual(tf.as_dtype("complex64").is_integer, False)
-    self.assertEqual(tf.as_dtype("complex128").is_integer, False)
     self.assertEqual(tf.as_dtype("float").is_integer, False)
     self.assertEqual(tf.as_dtype("double").is_integer, False)
     self.assertEqual(tf.as_dtype("string").is_integer, False)
@@ -153,7 +148,6 @@
     self.assertEqual(tf.as_dtype("uint8").is_floating, False)
     self.assertEqual(tf.as_dtype("uint16").is_floating, False)
     self.assertEqual(tf.as_dtype("complex64").is_floating, False)
-    self.assertEqual(tf.as_dtype("complex128").is_floating, False)
     self.assertEqual(tf.as_dtype("float32").is_floating, True)
     self.assertEqual(tf.as_dtype("float64").is_floating, True)
     self.assertEqual(tf.as_dtype("string").is_floating, False)
@@ -167,7 +161,6 @@
     self.assertEqual(tf.as_dtype("uint8").is_complex, False)
     self.assertEqual(tf.as_dtype("uint16").is_complex, False)
     self.assertEqual(tf.as_dtype("complex64").is_complex, True)
-    self.assertEqual(tf.as_dtype("complex128").is_complex, True)
     self.assertEqual(tf.as_dtype("float32").is_complex, False)
     self.assertEqual(tf.as_dtype("float64").is_complex, False)
     self.assertEqual(tf.as_dtype("string").is_complex, False)
@@ -185,7 +178,6 @@
     self.assertEqual(tf.as_dtype("bool").is_unsigned, False)
     self.assertEqual(tf.as_dtype("string").is_unsigned, False)
     self.assertEqual(tf.as_dtype("complex64").is_unsigned, False)
-    self.assertEqual(tf.as_dtype("complex128").is_unsigned, False)
 
   def testMinMax(self):
     # make sure min/max evaluates for all data types that have min/max
@@ -200,8 +192,7 @@
       if (dtype.is_quantized or
           dtype.base_dtype == tf.bool or
           dtype.base_dtype == tf.string or
-          dtype.base_dtype == tf.complex64 or
-          dtype.base_dtype == tf.complex128):
+          dtype.base_dtype == tf.complex64):
         continue
 
       print("%s: %s - %s" % (dtype, dtype.min, dtype.max))
diff --git a/tensorflow/python/framework/ops_test.py b/tensorflow/python/framework/ops_test.py
index afa5c48..cfc96a0 100644
--- a/tensorflow/python/framework/ops_test.py
+++ b/tensorflow/python/framework/ops_test.py
@@ -1289,7 +1289,7 @@
     with ops.colocate_with(a.op):
       with ops.colocate_with(b.op, ignore_existing=True):
         c = constant_op.constant(4.0)
-    self.assertEqual(set([b"loc:@b"]), set(c.op.colocation_groups()))
+    self.assertEqual(set(["loc:@b"]), set(c.op.colocation_groups()))
 
   def testColocateVariables(self):
     a = variables.Variable([2.0], name="a")
diff --git a/tensorflow/python/framework/tensor_util.py b/tensorflow/python/framework/tensor_util.py
index 7a9add3..b1b39f0 100644
--- a/tensorflow/python/framework/tensor_util.py
+++ b/tensorflow/python/framework/tensor_util.py
@@ -76,16 +76,11 @@
   def SlowAppendInt64ArrayToTensorProto(tensor_proto, proto_values):
     tensor_proto.int64_val.extend([np.asscalar(x) for x in proto_values])
 
-  def SlowAppendComplex64ArrayToTensorProto(tensor_proto, proto_values):
+  def SlowAppendComplexArrayToTensorProto(tensor_proto, proto_values):
     tensor_proto.scomplex_val.extend([np.asscalar(v)
                                       for x in proto_values
                                       for v in [x.real, x.imag]])
 
-  def SlowAppendComplex128ArrayToTensorProto(tensor_proto, proto_values):
-    tensor_proto.dcomplex_val.extend([np.asscalar(v)
-                                      for x in proto_values
-                                      for v in [x.real, x.imag]])
-
   def SlowAppendObjectArrayToTensorProto(tensor_proto, proto_values):
     tensor_proto.string_val.extend([compat.as_bytes(x) for x in proto_values])
 
@@ -101,8 +96,8 @@
       np.uint16: SlowAppendIntArrayToTensorProto,
       np.int16: SlowAppendIntArrayToTensorProto,
       np.int8: SlowAppendIntArrayToTensorProto,
-      np.complex64: SlowAppendComplex64ArrayToTensorProto,
-      np.complex128: SlowAppendComplex128ArrayToTensorProto,
+      np.complex64: SlowAppendComplexArrayToTensorProto,
+      np.complex128: SlowAppendComplexArrayToTensorProto,
       np.object: SlowAppendObjectArrayToTensorProto,
       np.bool: SlowAppendBoolArrayToTensorProto,
       dtypes.qint8.as_numpy_dtype: SlowAppendIntArrayToTensorProto,
@@ -245,7 +240,6 @@
     dtypes.int8: _FilterInt,
     dtypes.string: _FilterStr,
     dtypes.complex64: _FilterComplex,
-    dtypes.complex128: _FilterComplex,
     dtypes.int64: _FilterInt,
     dtypes.bool: _FilterBool,
     dtypes.qint32: _FilterInt,
@@ -459,15 +453,6 @@
     else:
       return np.array([complex(x[0], x[1]) for x in zip(it, it)],
                       dtype=dtype).reshape(shape)
-  elif tensor_dtype == dtypes.complex128:
-    it = iter(tensor.dcomplex_val)
-    if len(tensor.dcomplex_val) == 2:
-      return np.repeat(np.array(complex(tensor.dcomplex_val[0],
-                                        tensor.dcomplex_val[1]), dtype=dtype),
-                       num_elements).reshape(shape)
-    else:
-      return np.array([complex(x[0], x[1]) for x in zip(it, it)],
-                      dtype=dtype).reshape(shape)
   elif tensor_dtype == dtypes.bool:
     if len(tensor.bool_val) == 1:
       return np.repeat(np.array(tensor.bool_val[0], dtype=dtype),
diff --git a/tensorflow/python/framework/tensor_util_test.py b/tensorflow/python/framework/tensor_util_test.py
index d1cec3e..a2c28f0 100644
--- a/tensorflow/python/framework/tensor_util_test.py
+++ b/tensorflow/python/framework/tensor_util_test.py
@@ -274,7 +274,7 @@
     self.assertEquals(np.object, a.dtype)
     self.assertAllEqual(np.array([[b"a", b"ab"], [b"abc", b"abcd"]]), a)
 
-  def testComplex64(self):
+  def testComplex(self):
     t = tensor_util.make_tensor_proto((1+2j), dtype=tf.complex64)
     self.assertProtoEquals("""
       dtype: DT_COMPLEX64
@@ -286,30 +286,16 @@
     self.assertEquals(np.complex64, a.dtype)
     self.assertAllEqual(np.array(1 + 2j), a)
 
-  def testComplex128(self):
-    t = tensor_util.make_tensor_proto((1+2j), dtype=tf.complex128)
-    self.assertProtoEquals("""
-      dtype: DT_COMPLEX128
-      tensor_shape {}
-      dcomplex_val: 1
-      dcomplex_val: 2
-      """, t)
-    a = tensor_util.MakeNdarray(t)
-    self.assertEquals(np.complex128, a.dtype)
-    self.assertAllEqual(np.array(1 + 2j), a)
-
   def testComplexWithImplicitRepeat(self):
-    for dtype, np_dtype in [(tf.complex64, np.complex64),
-                            (tf.complex128, np.complex128)]:
-      t = tensor_util.make_tensor_proto((1+1j), shape=[3, 4],
-                                        dtype=dtype)
-      a = tensor_util.MakeNdarray(t)
-      self.assertAllClose(np.array([[(1+1j), (1+1j), (1+1j), (1+1j)],
-                                    [(1+1j), (1+1j), (1+1j), (1+1j)],
-                                    [(1+1j), (1+1j), (1+1j), (1+1j)]],
-                                   dtype=np_dtype), a)
+    t = tensor_util.make_tensor_proto((1+1j), shape=[3, 4],
+                                      dtype=tf.complex64)
+    a = tensor_util.MakeNdarray(t)
+    self.assertAllClose(np.array([[(1+1j), (1+1j), (1+1j), (1+1j)],
+                                  [(1+1j), (1+1j), (1+1j), (1+1j)],
+                                  [(1+1j), (1+1j), (1+1j), (1+1j)]],
+                                 dtype=np.complex64), a)
 
-  def testComplex64N(self):
+  def testComplexN(self):
     t = tensor_util.make_tensor_proto([(1+2j), (3+4j), (5+6j)], shape=[1, 3],
                                       dtype=tf.complex64)
     self.assertProtoEquals("""
@@ -326,24 +312,7 @@
     self.assertEquals(np.complex64, a.dtype)
     self.assertAllEqual(np.array([[(1+2j), (3+4j), (5+6j)]]), a)
 
-  def testComplex128N(self):
-    t = tensor_util.make_tensor_proto([(1+2j), (3+4j), (5+6j)], shape=[1, 3],
-                                      dtype=tf.complex128)
-    self.assertProtoEquals("""
-      dtype: DT_COMPLEX128
-      tensor_shape { dim { size: 1 } dim { size: 3 } }
-      dcomplex_val: 1
-      dcomplex_val: 2
-      dcomplex_val: 3
-      dcomplex_val: 4
-      dcomplex_val: 5
-      dcomplex_val: 6
-      """, t)
-    a = tensor_util.MakeNdarray(t)
-    self.assertEquals(np.complex128, a.dtype)
-    self.assertAllEqual(np.array([[(1+2j), (3+4j), (5+6j)]]), a)
-
-  def testComplex64NpArray(self):
+  def testComplexNpArray(self):
     t = tensor_util.make_tensor_proto(
         np.array([[(1+2j), (3+4j)], [(5+6j), (7+8j)]]), dtype=tf.complex64)
     # scomplex_val are real_0, imag_0, real_1, imag_1, ...
@@ -363,26 +332,6 @@
     self.assertEquals(np.complex64, a.dtype)
     self.assertAllEqual(np.array([[(1+2j), (3+4j)], [(5+6j), (7+8j)]]), a)
 
-  def testComplex128NpArray(self):
-    t = tensor_util.make_tensor_proto(
-        np.array([[(1+2j), (3+4j)], [(5+6j), (7+8j)]]), dtype=tf.complex128)
-    # scomplex_val are real_0, imag_0, real_1, imag_1, ...
-    self.assertProtoEquals("""
-      dtype: DT_COMPLEX128
-      tensor_shape { dim { size: 2 } dim { size: 2 } }
-      dcomplex_val: 1
-      dcomplex_val: 2
-      dcomplex_val: 3
-      dcomplex_val: 4
-      dcomplex_val: 5
-      dcomplex_val: 6
-      dcomplex_val: 7
-      dcomplex_val: 8
-      """, t)
-    a = tensor_util.MakeNdarray(t)
-    self.assertEquals(np.complex128, a.dtype)
-    self.assertAllEqual(np.array([[(1+2j), (3+4j)], [(5+6j), (7+8j)]]), a)
-
   def testUnsupportedDType(self):
     with self.assertRaises(TypeError):
       tensor_util.make_tensor_proto(np.array([1]), 0)
diff --git a/tensorflow/python/lib/core/py_func.cc b/tensorflow/python/lib/core/py_func.cc
index 1949913..5701e8f 100644
--- a/tensorflow/python/lib/core/py_func.cc
+++ b/tensorflow/python/lib/core/py_func.cc
@@ -99,9 +99,6 @@
     case DT_COMPLEX64:
       *np = NPY_COMPLEX64;
       break;
-    case DT_COMPLEX128:
-      *np = NPY_COMPLEX128;
-      break;
     case DT_STRING:
       *np = NPY_OBJECT;
       break;
@@ -213,9 +210,6 @@
     case NPY_COMPLEX64:
       *tf = DT_COMPLEX64;
       break;
-    case NPY_COMPLEX128:
-      *tf = DT_COMPLEX128;
-      break;
     default:
       return errors::Unimplemented("Unsupported numpy type ", np);
   }
diff --git a/tensorflow/python/training/session_manager.py b/tensorflow/python/training/session_manager.py
index 9e5b9a1..08fb65d 100644
--- a/tensorflow/python/training/session_manager.py
+++ b/tensorflow/python/training/session_manager.py
@@ -326,7 +326,7 @@
       try:
         sess.run(self._ready_op)
         return None
-      except errors.FailedPreconditionError as e:
+      except errors.FailedPreconditionError, e:
         if "uninitialized" not in str(e):
           logging.warning("Model not ready raised: %s", str(e))
           raise  e
diff --git a/tensorflow/tools/ci_build/Dockerfile.cpu b/tensorflow/tools/ci_build/Dockerfile.cpu
index 369daa9..acc84f1 100644
--- a/tensorflow/tools/ci_build/Dockerfile.cpu
+++ b/tensorflow/tools/ci_build/Dockerfile.cpu
@@ -7,7 +7,6 @@
 RUN /install/install_bootstrap_deb_packages.sh
 RUN add-apt-repository -y ppa:openjdk-r/ppa
 RUN /install/install_deb_packages.sh
-RUN /install/install_pip_packages.sh
 RUN /install/install_bazel.sh
 
 # Set up bazelrc.
diff --git a/tensorflow/tools/ci_build/Dockerfile.gpu b/tensorflow/tools/ci_build/Dockerfile.gpu
index 81cc4c9..b4b0ccc 100644
--- a/tensorflow/tools/ci_build/Dockerfile.gpu
+++ b/tensorflow/tools/ci_build/Dockerfile.gpu
@@ -7,7 +7,6 @@
 RUN /install/install_bootstrap_deb_packages.sh
 RUN add-apt-repository -y ppa:openjdk-r/ppa
 RUN /install/install_deb_packages.sh
-RUN /install/install_pip_packages.sh
 RUN /install/install_bazel.sh
 
 # Set up bazelrc.
diff --git a/tensorflow/tools/ci_build/builds/pip.sh b/tensorflow/tools/ci_build/builds/pip.sh
index 7255de0..16364fb 100755
--- a/tensorflow/tools/ci_build/builds/pip.sh
+++ b/tensorflow/tools/ci_build/builds/pip.sh
@@ -22,20 +22,11 @@
 #   pip.sh CONTAINER_TYPE [--test_tutorials]
 #
 # When executing the Python unit tests, the script obeys the shell
-# variables: TF_BUILD_BAZEL_CLEAN, TF_BUILD_INSTALL_EXTRA_PIP_PACKAGES,
-# TF_BUILD_NO_CACHING_VIRTUALENV, NO_TEST_ON_INSTALL
+# variables: TF_BUILD_BAZEL_CLEAN, NO_TEST_ON_INSTALL
 #
 # TF_BUILD_BAZEL_CLEAN, if set to any non-empty and non-0 value, directs the
 # script to perform bazel clean prior to main build and test steps.
 #
-# TF_BUILD_INSTALL_EXTRA_PIP_PACKAGES overrides the default extra pip packages
-# to be installed in virtualenv before test_installation.sh is called. Multiple
-# pakcage names are separated with spaces.
-#
-# TF_BUILD_NO_CACHING_VIRTUALENV: If set to any non-empty and non-0 value,
-# will cause the script to force remove any existing (cached) virtualenv
-# directory.
-#
 # If NO_TEST_ON_INSTALL has any non-empty and non-0 value, the test-on-install
 # part will be skipped.
 #
@@ -44,8 +35,6 @@
 # installation and the Python unit tests-on-install step.
 #
 
-INSTALL_EXTRA_PIP_PACKAGES=${TF_BUILD_INSTALL_EXTRA_PIP_PACKAGES}
-
 # Helper functions
 # Get the absolute path from a path
 abs_path() {
@@ -122,7 +111,7 @@
 PIP_WHL_DIR=$(abs_path ${PIP_WHL_DIR})  # Get absolute path
 rm -rf ${PIP_WHL_DIR} && mkdir -p ${PIP_WHL_DIR}
 bazel-bin/tensorflow/tools/pip_package/build_pip_package ${PIP_WHL_DIR} || \
-    die "build_pip_package FAILED"
+die "build_pip_package FAILED"
 
 # Perform installation
 WHL_PATH=$(ls ${PIP_WHL_DIR}/tensorflow*.whl)
@@ -136,46 +125,27 @@
 # Install, in user's local home folder
 echo "Installing pip whl file: ${WHL_PATH}"
 
-# Create virtualenv directory for install test
+# Create temporary directory for install test
 VENV_DIR="${PIP_TEST_ROOT}/venv"
-if [[ -d "${VENV_DIR}" ]] &&
-   [[ ! -z "${TF_BUILD_NO_CACHING_VIRTUALENV}" ]] &&
-   [[ "${TF_BUILD_NO_CACHING_VIRTUALENV}" != "0" ]]; then
-  echo "TF_BUILD_NO_CACHING_VIRTUALENV=${TF_BUILD_NO_CACHING_VIRTUALENV}:"
-  echo "Removing existing virtualenv directory: ${VENV_DIR}"
-
-  rm -rf "${VENV_DIR}" || \
-      die "Failed to remove existing virtualenv directory: ${VENV_DIR}"
-fi
-
-mkdir -p ${VENV_DIR} || \
-    die "FAILED to create virtualenv directory: ${VENV_DIR}"
+rm -rf "${VENV_DIR}" && mkdir -p "${VENV_DIR}"
+echo "Create directory for virtualenv: ${VENV_DIR}"
 
 # Verify that virtualenv exists
 if [[ -z $(which virtualenv) ]]; then
   die "FAILED: virtualenv not available on path"
 fi
 
-virtualenv --system-site-packages -p "${PYTHON_BIN_PATH}" "${VENV_DIR}" || \
-    die "FAILED: Unable to create virtualenv"
+virtualenv -p "${PYTHON_BIN_PATH}" "${VENV_DIR}" ||
+die "FAILED: Unable to create virtualenv"
 
-source "${VENV_DIR}/bin/activate" || \
-    die "FAILED: Unable to activate virtualenv"
-
+source "${VENV_DIR}/bin/activate" ||
+die "FAILED: Unable to activate virtualenv"
 
 # Install the pip file in virtual env
-pip install -v --force-reinstall ${WHL_PATH} \
+pip install -v ${WHL_PATH} \
 && echo "Successfully installed pip package ${WHL_PATH}" \
 || die "pip install (without --upgrade) FAILED"
 
-# Install extra pip packages required by the test-on-install
-for PACKAGE in ${INSTALL_EXTRA_PIP_PACKAGES}; do
-  echo "Installing extra pip package required by test-on-install: ${PACKAGE}"
-
-  pip install ${PACKAGE} || \
-      die "pip install ${PACKAGE} FAILED"
-done
-
 # If NO_TEST_ON_INSTALL is set to any non-empty value, skip all Python
 # tests-on-install and exit right away
 if [[ ! -z "${NO_TEST_ON_INSTALL}" ]] &&
@@ -188,14 +158,14 @@
 # Call test_installation.sh to perform test-on-install
 DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
 
-"${DIR}/test_installation.sh" --virtualenv || \
-    die "PIP tests-on-install FAILED"
+"${DIR}/test_installation.sh" --virtualenv ||
+die "PIP tests-on-install FAILED"
 
 # Optional: Run the tutorial tests
 if [[ "${DO_TEST_TUTORIALS}" == "1" ]]; then
-  "${DIR}/test_tutorials.sh" --virtualenv || \
-      die "PIP tutorial tests-on-install FAILED"
+  "${DIR}/test_tutorials.sh" --virtualenv ||
+die "PIP tutorial tests-on-install FAILED"
 fi
 
-deactivate || \
-    die "FAILED: Unable to deactivate virtualenv"
+deactivate ||
+die "FAILED: Unable to deactivate virtualenv"
diff --git a/tensorflow/tools/ci_build/builds/test_installation.sh b/tensorflow/tools/ci_build/builds/test_installation.sh
index 8fa9b48..d2c8d21 100755
--- a/tensorflow/tools/ci_build/builds/test_installation.sh
+++ b/tensorflow/tools/ci_build/builds/test_installation.sh
@@ -166,8 +166,7 @@
 
 # Run tests
 DIR0=$(pwd)
-ALL_PY_TESTS=$(find tensorflow/{contrib,examples,models,python,tensorboard} \
-    -type f \( -name "*_test.py" -o -name "test_*.py" \) | sort)
+ALL_PY_TESTS=$(find tensorflow/{contrib,examples,models,python,tensorboard} -name "*_test.py" | sort)
 # TODO(cais): Add tests in tensorflow/contrib
 
 PY_TEST_COUNT=$(echo ${ALL_PY_TESTS} | wc -w)
diff --git a/tensorflow/tools/ci_build/ci_parameterized_build.sh b/tensorflow/tools/ci_build/ci_parameterized_build.sh
index 9b7e5ab..46c1740 100755
--- a/tensorflow/tools/ci_build/ci_parameterized_build.sh
+++ b/tensorflow/tools/ci_build/ci_parameterized_build.sh
@@ -306,7 +306,7 @@
 fi
 
 # Write to the tmp script
-echo "#!/usr/bin/env bash" > ${TMP_SCRIPT}
+echo "#!/bin/bash" > ${TMP_SCRIPT}
 if [[ ! -z "${TF_BUILD_BAZEL_CLEAN}" ]] &&
    [[ "${TF_BUILD_BAZEL_CLEAN}" != "0" ]]; then
   echo ${BAZEL_CLEAN_CMD} >> ${TMP_SCRIPT}
diff --git a/tensorflow/tools/ci_build/install/install_deb_packages.sh b/tensorflow/tools/ci_build/install/install_deb_packages.sh
index 1bf77b2..b752e86 100755
--- a/tensorflow/tools/ci_build/install/install_deb_packages.sh
+++ b/tensorflow/tools/ci_build/install/install_deb_packages.sh
@@ -29,12 +29,10 @@
     python-dev \
     python-numpy \
     python-pip \
-    python-scipy \
     python-virtualenv \
     python3-dev \
     python3-numpy \
     python3-pip \
-    python3-scipy \
     sudo \
     swig \
     unzip \
diff --git a/tensorflow/tools/ci_build/install/install_pip_packages.sh b/tensorflow/tools/ci_build/install/install_pip_packages.sh
deleted file mode 100755
index 3958386..0000000
--- a/tensorflow/tools/ci_build/install/install_pip_packages.sh
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/usr/bin/env bash
-# Copyright 2015 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ==============================================================================
-
-set -e
-
-pip install sklearn
-pip3 install scikit-learn
diff --git a/tensorflow/tools/docker/docker_run_gpu.sh b/tensorflow/tools/docker/docker_run_gpu.sh
index ead05f9..9ebfa70 100755
--- a/tensorflow/tools/docker/docker_run_gpu.sh
+++ b/tensorflow/tools/docker/docker_run_gpu.sh
@@ -1,4 +1,4 @@
-#!/usr/bin/env bash
+#!/bin/bash
 # Copyright 2015 Google Inc. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
diff --git a/tensorflow/tools/docker/run_jupyter.sh b/tensorflow/tools/docker/run_jupyter.sh
index eb69d62..ba2f3a3 100755
--- a/tensorflow/tools/docker/run_jupyter.sh
+++ b/tensorflow/tools/docker/run_jupyter.sh
@@ -1,4 +1,4 @@
-#!/usr/bin/env bash
+#!/bin/bash
 # Copyright 2015 Google Inc. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
diff --git a/tensorflow/tools/docs/gen_docs.sh b/tensorflow/tools/docs/gen_docs.sh
index de507fc..95c0092 100755
--- a/tensorflow/tools/docs/gen_docs.sh
+++ b/tensorflow/tools/docs/gen_docs.sh
@@ -1,4 +1,4 @@
-#!/usr/bin/env bash
+#!/bin/bash
 # Copyright 2015 Google Inc. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
diff --git a/tensorflow/tools/docs/gen_docs_test.sh b/tensorflow/tools/docs/gen_docs_test.sh
index 9375784..2f905c8 100755
--- a/tensorflow/tools/docs/gen_docs_test.sh
+++ b/tensorflow/tools/docs/gen_docs_test.sh
@@ -1,4 +1,4 @@
-#!/usr/bin/env bash
+#!/bin/bash -eux
 # Copyright 2015 Google Inc. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,8 +14,6 @@
 # limitations under the License.
 # ==============================================================================
 
-set -eux
-
 TFDIR=$TEST_SRCDIR/tensorflow
 DOXYGEN=doxygen
 DOXYGEN_CONFIG="tf-doxy_for_md-config"
diff --git a/tensorflow/tools/pip_package/build_pip_package.sh b/tensorflow/tools/pip_package/build_pip_package.sh
index 1ae6926..6b4e504 100755
--- a/tensorflow/tools/pip_package/build_pip_package.sh
+++ b/tensorflow/tools/pip_package/build_pip_package.sh
@@ -1,4 +1,4 @@
-#!/usr/bin/env bash
+#!/bin/bash
 # Copyright 2015 Google Inc. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
diff --git a/tensorflow/tools/swig/swig.sh b/tensorflow/tools/swig/swig.sh
index c35b2ee..0601703 100755
--- a/tensorflow/tools/swig/swig.sh
+++ b/tensorflow/tools/swig/swig.sh
@@ -1,4 +1,4 @@
-#!/usr/bin/env bash
+#!/bin/bash
 # Copyright 2015 Google Inc. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
diff --git a/tensorflow/tools/test/BUILD b/tensorflow/tools/test/BUILD
index df2a0b4..a686bbe 100644
--- a/tensorflow/tools/test/BUILD
+++ b/tensorflow/tools/test/BUILD
@@ -53,21 +53,23 @@
 
 # Unit test that calls run_and_gather_logs on a benchmark, and
 # prints the result.
-#cuda_py_test(
-#    name = "run_and_gather_logs_test",
-#    srcs = ["run_and_gather_logs.py"],
-#    additional_deps = [
-#        ":run_and_gather_logs",
-#    ],
-#    args = [
-#        "--test_name=" + "//tensorflow/core/kernels:cast_op_test",
-#        "--test_args=" + "'--benchmarks=BM_cpu_float'",
-#    ],
-#    data = [
-#        "//tensorflow/core/kernels:cast_op_test",
-#    ],
-#    main = "run_and_gather_logs.py",
-#)
+cuda_py_test(
+    name = "run_and_gather_logs_test",
+    srcs = ["run_and_gather_logs.py"],
+    additional_deps = [
+        ":run_and_gather_logs",
+    ],
+    args = [
+        "--test_name=" + "//tensorflow/core/kernels:cast_op_test",
+        "--test_args=" + "'--benchmarks=BM_cpu_float_bfloat16'",
+        "--compilation_mode='$(COMPILATION_MODE)'",
+        "--cc_flags='$(CC_FLAGS)'",
+    ],
+    data = [
+        "//tensorflow/core/kernels:cast_op_test",
+    ],
+    main = "run_and_gather_logs.py",
+)
 
 filegroup(
     name = "all_files",
diff --git a/third_party/gpus/cuda/cuda_config.sh b/third_party/gpus/cuda/cuda_config.sh
index 651e5ae..42cd254 100755
--- a/third_party/gpus/cuda/cuda_config.sh
+++ b/third_party/gpus/cuda/cuda_config.sh
@@ -1,4 +1,4 @@
-#!/usr/bin/env bash
+#!/bin/bash
 # Copyright 2015 Google Inc. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
diff --git a/util/python/python_config.sh b/util/python/python_config.sh
index 83e3856..a5666c2 100755
--- a/util/python/python_config.sh
+++ b/util/python/python_config.sh
@@ -1,4 +1,4 @@
-#!/usr/bin/env bash
+#!/bin/bash
 # Copyright 2015 Google Inc. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");