Snap for 4966162 from bae791c346b5063cd59371bba247f9ccc3583862 to qt-release am: 1f0c18813c

Original change: https://googleplex-android-review.googlesource.com/c/kernel/tests/+/4826255

Change-Id: I761a73a2c6888597f6eed5a8757876b5436ab6ee
diff --git a/METADATA b/METADATA
new file mode 100644
index 0000000..d97975c
--- /dev/null
+++ b/METADATA
@@ -0,0 +1,3 @@
+third_party {
+  license_type: NOTICE
+}
diff --git a/net/test/Android.bp b/net/test/Android.bp
index 2151015..6c4a75f 100644
--- a/net/test/Android.bp
+++ b/net/test/Android.bp
@@ -1,13 +1,29 @@
-python_test {
-    name: "kernel_net_tests",
-    main: "all_tests.py",
+python_defaults {
+    name: "kernel_net_tests_defaults",
     srcs: [
         "*.py",
     ],
     libs: [
         "scapy",
     ],
-    defaults: [
-        "kernel_tests_defaults"
-    ],
+    defaults: ["kernel_tests_defaults",],
+}
+
+// Currently, we keep it for vts10. This could be useful to produce a binary
+// that can be run manually on the device.
+// TODO(b/146651404): Remove all vts10 only test modules after vts11
+// is released.
+python_test {
+    name: "kernel_net_tests",
+    main: "all_tests.py",
+    defaults: ["kernel_net_tests_defaults",],
+}
+
+python_test {
+    name: "vts_kernel_net_tests",
+    stem: "kernel_net_tests_bin",
+    main: "all_tests.py",
+    defaults: ["kernel_net_tests_defaults",],
+    test_suites: ["vts",],
+    test_config: "vts_kernel_net_tests.xml",
 }
diff --git a/net/test/OWNERS b/net/test/OWNERS
index f002a84..cbbfa70 100644
--- a/net/test/OWNERS
+++ b/net/test/OWNERS
@@ -1,2 +1,2 @@
-ek@google.com
 lorenzo@google.com
+maze@google.com
diff --git a/net/test/all_tests.py b/net/test/all_tests.py
index 72d3c4e..17d9701 100755
--- a/net/test/all_tests.py
+++ b/net/test/all_tests.py
@@ -14,10 +14,12 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from importlib import import_module
+import importlib
 import sys
 import unittest
 
+import namespace
+
 test_modules = [
     'anycast_test',
     'bpf_test',
@@ -27,8 +29,10 @@
     'leak_test',
     'multinetwork_test',
     'neighbour_test',
+    'nf_test',
     'pf_key_test',
     'ping6_test',
+    'policy_crash_test',
     'qtaguid_test',
     'removed_feature_test',
     'resilient_rs_test',
@@ -36,6 +40,7 @@
     'srcaddr_selection_test',
     'tcp_fastopen_test',
     'tcp_nuke_addr_test',
+    'tcp_repair_test',
     'tcp_test',
     'xfrm_algorithm_test',
     'xfrm_test',
@@ -43,15 +48,25 @@
 ]
 
 if __name__ == '__main__':
+  # Check whether ADB over TCP is occupying TCP port 5555.
+  if namespace.HasEstablishedTcpSessionOnPort(5555):
+    namespace.IfPossibleEnterNewNetworkNamespace()
   # First, run InjectTests on all modules, to ensure that any parameterized
   # tests in those modules are injected.
   for name in test_modules:
-    import_module(name)
-    if hasattr(sys.modules[name], "InjectTests"):
+    importlib.import_module(name)
+    if hasattr(sys.modules[name], 'InjectTests'):
       sys.modules[name].InjectTests()
 
   loader = unittest.defaultTestLoader
-  test_suite = loader.loadTestsFromNames(test_modules)
+  if len(sys.argv) > 1:
+    test_suite = loader.loadTestsFromNames(sys.argv[1:])
+  else:
+    test_suite = loader.loadTestsFromNames(test_modules)
+
+  assert test_suite.countTestCases() > 0, (
+      'Inconceivable: no tests found! Command line: %s' % ' '.join(sys.argv))
+
   runner = unittest.TextTestRunner(verbosity=2)
   result = runner.run(test_suite)
   sys.exit(not result.wasSuccessful())
diff --git a/net/test/anycast_test.py b/net/test/anycast_test.py
old mode 100755
new mode 100644
index 62d874e..6222580
--- a/net/test/anycast_test.py
+++ b/net/test/anycast_test.py
@@ -93,9 +93,14 @@
     # This will hang if the kernel has the bug.
     thread = CloseFileDescriptorThread(self.tuns[netid])
     thread.start()
-    # Wait up to 0.5 seconds for the thread to finish, but
+    # Wait up to 3 seconds for the thread to finish, but
     # continue and fail the test if the thread hangs.
-    thread.join(0.5)
+
+    # For kernels with MPTCP ported, closing tun interface need more
+    # than 0.5 sec. DAD procedure within MPTCP fullmesh module takes
+    # more time, because duplicate address-timer takes a refcount
+    # on the IPv6-address, preventing it from getting closed.
+    thread.join(3)
 
     # Make teardown work.
     del self.tuns[netid]
diff --git a/net/test/bpf.py b/net/test/bpf.py
index aa50f3e..5062e31 100755
--- a/net/test/bpf.py
+++ b/net/test/bpf.py
@@ -31,12 +31,14 @@
 # are not running with COMPAT_UTS_MACHINE and must be 64-bit at all times.
 # TODO: is there a better way of doing this?
 __NR_bpf = {
+    "aarch64-32bit": 386,
     "aarch64-64bit": 280,
     "armv7l-32bit": 386,
     "armv8l-32bit": 386,
     "armv8l-64bit": 280,
     "i686-32bit": 357,
     "i686-64bit": 321,
+    "x86_64-32bit": 357,
     "x86_64-64bit": 321,
 }[os.uname()[4] + "-" + platform.architecture()[0]]
 
@@ -152,6 +154,7 @@
 BPF_FUNC_map_lookup_elem = 1
 BPF_FUNC_map_update_elem = 2
 BPF_FUNC_map_delete_elem = 3
+BPF_FUNC_get_current_uid_gid = 15
 BPF_FUNC_get_socket_cookie = 46
 BPF_FUNC_get_socket_uid = 47
 
diff --git a/net/test/bpf_test.py b/net/test/bpf_test.py
index 270243e..ea3e56b 100755
--- a/net/test/bpf_test.py
+++ b/net/test/bpf_test.py
@@ -30,10 +30,12 @@
 
 libc = ctypes.CDLL(ctypes.util.find_library("c"), use_errno=True)
 HAVE_EBPF_ACCOUNTING = net_test.LINUX_VERSION >= (4, 9, 0)
+HAVE_EBPF_SOCKET = net_test.LINUX_VERSION >= (4, 14, 0)
 KEY_SIZE = 8
 VALUE_SIZE = 4
 TOTAL_ENTRIES = 20
 TEST_UID = 54321
+TEST_GID = 12345
 # Offset to store the map key in stack register REG10
 key_offset = -8
 # Offset to store the map value in stack register REG10
@@ -350,6 +352,10 @@
       BpfProgDetach(self._cg_fd, BPF_CGROUP_INET_INGRESS)
     except socket.error:
       pass
+    try:
+      BpfProgDetach(self._cg_fd, BPF_CGROUP_INET_SOCK_CREATE)
+    except socket.error:
+      pass
 
   def testCgroupBpfAttach(self):
     self.prog_fd = BpfProgLoad(BPF_PROG_TYPE_CGROUP_SKB, INS_BPF_EXIT_BLOCK)
@@ -392,10 +398,49 @@
       self.assertRaisesErrno(errno.ENOENT, LookupMap, self.map_fd, uid)
       SocketUDPLoopBack(packet_count, 4, None)
       self.assertEquals(packet_count, LookupMap(self.map_fd, uid).value)
-      DeleteMap(self.map_fd, uid);
+      DeleteMap(self.map_fd, uid)
       SocketUDPLoopBack(packet_count, 6, None)
       self.assertEquals(packet_count, LookupMap(self.map_fd, uid).value)
     BpfProgDetach(self._cg_fd, BPF_CGROUP_INET_INGRESS)
 
+  def checkSocketCreate(self, family, socktype, success):
+    try:
+      sock = socket.socket(family, socktype, 0)
+      sock.close()
+    except socket.error, e:
+      if success:
+        self.fail("Failed to create socket family=%d type=%d err=%s" %
+                  (family, socktype, os.strerror(e.errno)))
+      return;
+    if not success:
+      self.fail("unexpected socket family=%d type=%d created, should be blocked" %
+                (family, socktype))
+
+
+  def trySocketCreate(self, success):
+      for family in [socket.AF_INET, socket.AF_INET6]:
+        for socktype in [socket.SOCK_DGRAM, socket.SOCK_STREAM]:
+          self.checkSocketCreate(family, socktype, success)
+
+  @unittest.skipUnless(HAVE_EBPF_SOCKET,
+                     "Cgroup BPF socket is not supported")
+  def testCgroupSocketCreateBlock(self):
+    instructions = [
+        BpfFuncCall(BPF_FUNC_get_current_uid_gid),
+        BpfAlu64Imm(BPF_AND, BPF_REG_0, 0xfffffff),
+        BpfJumpImm(BPF_JNE, BPF_REG_0, TEST_UID, 2),
+    ]
+    instructions += INS_BPF_EXIT_BLOCK + INS_CGROUP_ACCEPT;
+    self.prog_fd = BpfProgLoad(BPF_PROG_TYPE_CGROUP_SOCK, instructions)
+    BpfProgAttach(self.prog_fd, self._cg_fd, BPF_CGROUP_INET_SOCK_CREATE)
+    with net_test.RunAsUid(TEST_UID):
+      # Socket creation with target uid should fail
+      self.trySocketCreate(False);
+    # Socket create with different uid should success
+    self.trySocketCreate(True)
+    BpfProgDetach(self._cg_fd, BPF_CGROUP_INET_SOCK_CREATE)
+    with net_test.RunAsUid(TEST_UID):
+      self.trySocketCreate(True)
+
 if __name__ == "__main__":
   unittest.main()
diff --git a/net/test/build_all_rootfs.sh b/net/test/build_all_rootfs.sh
new file mode 100755
index 0000000..6e4fdd6
--- /dev/null
+++ b/net/test/build_all_rootfs.sh
@@ -0,0 +1,26 @@
+#!/bin/bash
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+set -e
+
+for s in bullseye; do
+  for a in i386 amd64 armhf arm64; do
+    ./build_rootfs.sh -s "${s}" -a "${a}"
+  done
+done
+
+echo 'All rootfs builds completed.'
diff --git a/net/test/build_rootfs.sh b/net/test/build_rootfs.sh
index e0788c1..5563d8a 100755
--- a/net/test/build_rootfs.sh
+++ b/net/test/build_rootfs.sh
@@ -19,25 +19,49 @@
 
 SCRIPT_DIR=$(CDPATH= cd -- "$(dirname -- "$0")" && pwd -P)
 
+# Make sure we're in C locale so build inside chroot does not complain
+# about missing files
+unset LANG LANGUAGE \
+  LC_ADDRESS LC_ALL LC_COLLATE LC_CTYPE LC_IDENTIFICATION LC_MEASUREMENT \
+  LC_MESSAGES LC_MONETARY LC_NAME LC_NUMERIC LC_PAPER LC_TELEPHONE LC_TIME
+export LC_ALL=C
+
 usage() {
-  echo "usage: $0 [-h] [-s wheezy|stretch] [-n net_test.rootfs.`date +%Y%m%d`]"
+  echo -n "usage: $0 [-h] [-s bullseye] [-a i386|amd64|armhf|arm64] "
+  echo "[-m http://mirror/debian] [-n net_test.rootfs.`date +%Y%m%d`]"
   exit 1
 }
 
-name=net_test.rootfs.`date +%Y%m%d`
-suite=stretch
+mirror=http://ftp.debian.org/debian
+debootstrap=debootstrap
+suite=bullseye
+arch=amd64
 
-while getopts ":hs:n:" opt; do
+while getopts ":hs:a:m:n:" opt; do
   case $opt in
     h)
       usage
       ;;
     s)
-      if [ "$OPTARG" != "wheezy" -a "$OPTARG" != "stretch" ]; then
+      if [[ "$OPTARG" != "bullseye" ]]; then
         echo "Invalid suite: $OPTARG" >&2
         usage
       fi
-      suite=$OPTARG
+      suite="${OPTARG}"
+      ;;
+    a)
+      case "${OPTARG}" in
+        i386|amd64|armhf|arm64)
+          arch="${OPTARG}"
+          ;;
+        *)
+          echo "Invalid arch: ${OPTARG}" >&2
+          usage
+          ;;
+      esac
+      ;;
+    m)
+      mirror=$OPTARG
       ;;
     n)
       name=$OPTARG
@@ -53,6 +77,15 @@
   esac
 done
 
+if [[ -z "${name}" ]]; then
+  name=net_test.rootfs.${arch}.${suite}.`date +%Y%m%d`
+fi
+
+# Switch to qemu-debootstrap for incompatible architectures
+if [ "$arch" = "arm64" ]; then
+  debootstrap=qemu-debootstrap
+fi
+
 # Sometimes it isn't obvious when the script fails
 failure() {
   echo "Filesystem generation process failed." >&2
@@ -63,17 +96,23 @@
 packages=`cat $SCRIPT_DIR/rootfs/$suite.list | xargs | tr -s ' ' ','`
 
 # For the debootstrap intermediates
-workdir=`mktemp -d`
-workdir_remove() {
+tmpdir=`mktemp -d`
+tmpdir_remove() {
   echo "Removing temporary files.." >&2
-  sudo rm -rf $workdir
+  sudo rm -rf "${tmpdir}"
 }
-trap workdir_remove EXIT
+trap tmpdir_remove EXIT
+
+workdir="${tmpdir}/_"
+
+mkdir "${workdir}"
+chmod 0755 "${workdir}"
+sudo chown root:root "${workdir}"
 
 # Run the debootstrap first
 cd $workdir
-sudo debootstrap --arch=amd64 --variant=minbase --include=$packages \
-                 $suite . http://ftp.debian.org/debian
+sudo $debootstrap --arch=$arch --variant=minbase --include=$packages \
+                  $suite . $mirror
 # Workarounds for bugs in the debootstrap suite scripts
 for mount in `cat /proc/mounts | cut -d' ' -f2 | grep -e ^$workdir`; do
   echo "Unmounting mountpoint $mount.." >&2
@@ -93,7 +132,7 @@
 mount=`mktemp -d`
 mount_remove() {
  rmdir $mount
- workdir_remove
+ tmpdir_remove
 }
 trap mount_remove EXIT
 
@@ -116,4 +155,4 @@
 sudo dd if=/dev/zero of=$mount/sparse bs=1M 2>/dev/null || true
 sudo rm -f $mount/sparse
 
-echo "Debian $suite filesystem generated at '$name'."
+echo "Debian $suite for $arch filesystem generated at '$name'."
diff --git a/net/test/iproute.py b/net/test/iproute.py
index 77c41b2..470cbf1 100644
--- a/net/test/iproute.py
+++ b/net/test/iproute.py
@@ -47,6 +47,7 @@
 RTM_NEWRULE = 32
 RTM_DELRULE = 33
 RTM_GETRULE = 34
+RTM_NEWNDUSEROPT = 68
 
 # Routing message type values (rtm_type).
 RTN_UNSPEC = 0
@@ -84,6 +85,8 @@
 
 # Netlink groups.
 RTMGRP_IPV6_IFADDR = 0x100
+RTNLGRP_ND_USEROPT = 20
+RTMGRP_ND_USEROPT = (1 << (RTNLGRP_ND_USEROPT - 1))  # Not a kernel constant
 
 # Route metric attributes.
 RTAX_MTU = 2
@@ -97,7 +100,8 @@
     "family dst_len src_len tos table protocol scope type flags")
 RTACacheinfo = cstruct.Struct(
     "RTACacheinfo", "=IIiiI", "clntref lastuse expires error used")
-
+NdUseroptMsg = cstruct.Struct("nduseroptmsg", "=BxHiBBxxxxxx",
+                              "family opts_len ifindex icmp_type icmp_code")
 
 ### Interface address constants. See include/uapi/linux/if_addr.h.
 # Interface address attributes.
@@ -147,6 +151,7 @@
 NDA_LLADDR = 2
 NDA_CACHEINFO = 3
 NDA_PROBES = 4
+NDA_IFINDEX = 8
 
 # Neighbour cache entry states.
 NUD_PERMANENT = 0x80
@@ -534,8 +539,7 @@
     flags = IFA_F_PERMANENT
     if version == 6:
       flags |= IFA_F_NODAD
-      sock = self._OpenNetlinkSocket(netlink.NETLINK_ROUTE,
-                                     groups=RTMGRP_IPV6_IFADDR)
+      sock = self._OpenNetlinkSocket(netlink.NETLINK_ROUTE, RTMGRP_IPV6_IFADDR)
 
     self._Address(version, RTM_NEWADDR, address, prefixlen, flags,
                   RT_SCOPE_UNIVERSE, ifindex)
@@ -639,9 +643,10 @@
     self._Neighbour(version, True, addr, lladdr, dev, state,
                     flags=netlink.NLM_F_REPLACE)
 
-  def DumpNeighbours(self, version):
+  def DumpNeighbours(self, version, ifindex):
     ndmsg = NdMsg((self._AddressFamily(version), 0, 0, 0, 0))
-    return self._Dump(RTM_GETNEIGH, ndmsg, NdMsg, "")
+    attrs = self._NlAttrU32(NDA_IFINDEX, ifindex) if ifindex else ""
+    return self._Dump(RTM_GETNEIGH, ndmsg, NdMsg, attrs)
 
   def ParseNeighbourMessage(self, msg):
     msg, _ = self._ParseNLMsg(msg, NdMsg)
diff --git a/net/test/leak_test.py b/net/test/leak_test.py
index 8ef4b41..8a42611 100755
--- a/net/test/leak_test.py
+++ b/net/test/leak_test.py
@@ -65,18 +65,18 @@
     s.setsockopt(SOL_SOCKET, force_option, val)
     self.assertEquals(2 * val, s.getsockopt(SOL_SOCKET, option))
 
-    # Check that the force option sets the minimum value instead of a negative
-    # value on integer overflow. Because the kernel multiplies passed-in values
-    # by 2, pick a value that becomes a small negative number if treated as
-    # unsigned.
+    # Check that the force option sets at least the minimum value instead
+    # of a negative value on integer overflow. Because the kernel multiplies
+    # passed-in values by 2, pick a value that becomes a small negative number
+    # if treated as unsigned.
     bogusval = 2 ** 31 - val
     s.setsockopt(SOL_SOCKET, force_option, bogusval)
-    self.assertEquals(minbuf, s.getsockopt(SOL_SOCKET, option))
+    self.assertLessEqual(minbuf, s.getsockopt(SOL_SOCKET, option))
 
   def testRcvBufForce(self):
     self.CheckForceSocketBufferOption(SO_RCVBUF, self.SO_RCVBUFFORCE)
 
-  def testRcvBufForce(self):
+  def testSndBufForce(self):
     self.CheckForceSocketBufferOption(SO_SNDBUF, self.SO_SNDBUFFORCE)
 
 
diff --git a/net/test/multinetwork_base.py b/net/test/multinetwork_base.py
index ce653b2..8dbd360 100644
--- a/net/test/multinetwork_base.py
+++ b/net/test/multinetwork_base.py
@@ -470,7 +470,7 @@
 
   def GetRemoteAddress(self, version):
     return {4: self.IPV4_ADDR,
-            5: self.IPV4_ADDR,
+            5: self.IPV4_ADDR,  # see GetRemoteSocketAddress()
             6: self.IPV6_ADDR}[version]
 
   def GetRemoteSocketAddress(self, version):
diff --git a/net/test/multinetwork_test.py b/net/test/multinetwork_test.py
index 68d0ef4..a0b464a 100755
--- a/net/test/multinetwork_test.py
+++ b/net/test/multinetwork_test.py
@@ -14,6 +14,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+import cstruct
 import ctypes
 import errno
 import os
@@ -29,6 +30,7 @@
 import iproute
 import multinetwork_base
 import net_test
+import netlink
 import packets
 
 # For brevity.
@@ -201,12 +203,15 @@
       # check that the packets sent on that socket go out on the right network.
       #
       # For connected sockets, routing is cached in the socket's destination
-      # cache entry. In this case, we check that just re-selecting the netid
-      # (except via SO_BINDTODEVICE) does not change routing, but that
-      # subsequently invalidating the destination cache entry does. Arguably
-      # this is a bug in the kernel because re-selecting the netid should cause
-      # routing to change. But it is a convenient way to check that
-      # InvalidateDstCache actually works.
+      # cache entry. In this case, we check that selecting the network a second
+      # time on the same socket (except via SO_BINDTODEVICE, or SO_MARK on 5.0+
+      # kernels) does not change routing, but that subsequently invalidating the
+      # destination cache entry does. This is a bug in the kernel because
+      # re-selecting the netid should cause routing to change, and future
+      # kernels may fix this bug for per-UID routing and ucast_oif routing like
+      # they already have for mark-based routing. But until they do, this
+      # behaviour provides a convenient way to check that InvalidateDstCache
+      # actually works.
       prevnetid = None
       for netid in self.tuns:
         self.SelectInterface(s, netid, mode)
@@ -223,15 +228,33 @@
             s.sendto(UDP_PAYLOAD, (dstaddr, 53))
           self.ExpectPacketOn(netid, msg, expected)
 
-        if use_connect and mode in ["mark", "uid", "ucast_oif"]:
-          # If we have a destination cache entry, packets are not rerouted...
-          if prevnetid:
+        # Does this socket have a stale dst cache entry that we need to clear?
+        def SocketHasStaleDstCacheEntry():
+          if not prevnetid:
+            # This is the first time we're marking the socket.
+            return False
+          if not use_connect:
+            # Non-connected sockets never have dst cache entries.
+            return False
+          if mode in ["uid", "ucast_oif"]:
+            # No kernel invalidates the dst cache entry if the UID or the
+            # UCAST_OIF socket option changes.
+            return True
+          if mode == "oif":
+            # Changing SO_BINDTODEVICE always invalidates the dst cache entry.
+            return False
+          if mode == "mark":
+            # Changing the mark invalidates the dst cache entry in 5.0+.
+            return net_test.LINUX_VERSION < (5, 0, 0)
+          raise AssertionError("%s must be one of %s" % (mode, modes))
+
+        if SocketHasStaleDstCacheEntry():
             ExpectSendUsesNetid(prevnetid)
             # ... until we invalidate it.
             self.InvalidateDstCache(version, prevnetid)
-          ExpectSendUsesNetid(netid)
-        else:
-          ExpectSendUsesNetid(netid)
+
+        # In any case, future sends must be correct.
+        ExpectSendUsesNetid(netid)
 
         self.SelectInterface(s, None, mode)
         prevnetid = netid
@@ -772,6 +795,11 @@
 
 class RATest(multinetwork_base.MultiNetworkBaseTest):
 
+  ND_ROUTER_ADVERT = 134
+  ND_OPT_PREF64 = 38
+  Pref64Option = cstruct.Struct("pref64_option", "!BBH12s",
+                                "type length lft_plc prefix")
+
   def testDoesNotHaveObsoleteSysctl(self):
     self.assertFalse(os.path.isfile(
         "/proc/sys/net/ipv6/route/autoconf_table_offset"))
@@ -855,6 +883,50 @@
         del self.tuns[i]
     self.assertLess(num_routes, GetNumRoutes())
 
+  def SendNdUseropt(self, option):
+    options = scapy.ICMPv6NDOptRouteInfo(rtlifetime=rtlifetime, plen=plen,
+                                         prefix=prefix, prf=prf)
+    self.SendRA(self.NETID, options=(options,))
+
+  def MakePref64Option(self, prefix, lifetime):
+    prefix = inet_pton(AF_INET6, prefix)[:12]
+    lft_plc = (lifetime & 0xfff8) | 0  # 96-bit prefix length
+    return self.Pref64Option((self.ND_OPT_PREF64, 2, lft_plc, prefix))
+
+  @unittest.skipUnless(net_test.LINUX_VERSION >= (4, 9, 0), "not backported")
+  def testPref64UserOption(self):
+    # Open a netlink socket to receive RTM_NEWNDUSEROPT messages.
+    s = netlink.NetlinkSocket(netlink.NETLINK_ROUTE, iproute.RTMGRP_ND_USEROPT)
+
+    # Send an RA with the PREF64 option.
+    netid = random.choice(self.NETIDS)
+    opt = self.MakePref64Option("64:ff9b::", 300)
+    self.SendRA(netid, options=(opt.Pack(),))
+
+    # Check that we get an an RTM_NEWNDUSEROPT message on the socket with the
+    # expected option.
+    csocket.SetSocketTimeout(s.sock, 100)
+    try:
+      data = s._Recv()
+    except IOError, e:
+      self.fail("Should have received an RTM_NEWNDUSEROPT message. "
+                "Please ensure the kernel supports receiving the "
+                "PREF64 RA option. Error: %s" % e)
+
+    # Check that the message is received correctly.
+    nlmsghdr, data = cstruct.Read(data, netlink.NLMsgHdr)
+    self.assertEquals(iproute.RTM_NEWNDUSEROPT, nlmsghdr.type)
+
+    # Check the option contents.
+    ndopthdr, data = cstruct.Read(data, iproute.NdUseroptMsg)
+    self.assertEquals(AF_INET6, ndopthdr.family)
+    self.assertEquals(self.ND_ROUTER_ADVERT, ndopthdr.icmp_type)
+    self.assertEquals(len(opt), ndopthdr.opts_len)
+
+    actual_opt = self.Pref64Option(data)
+    self.assertEquals(opt, actual_opt)
+
+
 
 class PMTUTest(multinetwork_base.InboundMarkingTest):
 
diff --git a/net/test/namespace.py b/net/test/namespace.py
new file mode 100644
index 0000000..85db654
--- /dev/null
+++ b/net/test/namespace.py
@@ -0,0 +1,165 @@
+#!/usr/bin/python
+#
+# Copyright 2020 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Namespace related support code."""
+
+import ctypes
+import ctypes.util
+import os
+import socket
+
+import net_test
+import sock_diag
+import tcp_test
+
+# //include/linux/fs.h
+MNT_FORCE       = 1         # Attempt to forcibily umount
+MNT_DETACH      = 2         # Just detach from the tree
+MNT_EXPIRE      = 4         # Mark for expiry
+UMOUNT_NOFOLLOW = 8         # Don't follow symlink on umount
+
+# //include/uapi/linux/fs.h
+MS_RDONLY       = 1         # Mount read-only
+MS_NOSUID       = 2         # Ignore suid and sgid bits
+MS_NODEV        = 4         # Disallow access to device special files
+MS_NOEXEC       = 8         # Disallow program execution
+MS_SYNCHRONOUS  = 16        # Writes are synced at once
+MS_REMOUNT      = 32        # Alter flags of a mounted FS
+MS_MANDLOCK     = 64        # Allow mandatory locks on an FS
+MS_DIRSYNC      = 128       # Directory modifications are synchronous
+MS_NOATIME      = 1024      # Do not update access times.
+MS_NODIRATIME   = 2048      # Do not update directory access times
+MS_BIND         = 4096      #
+MS_MOVE         = 8192      #
+MS_REC          = 16384     #
+MS_SILENT       = 32768     #
+MS_POSIXACL     = (1<<16)   # VFS does not apply the umask
+MS_UNBINDABLE   = (1<<17)   # change to unbindable
+MS_PRIVATE      = (1<<18)   # change to private
+MS_SLAVE        = (1<<19)   # change to slave
+MS_SHARED       = (1<<20)   # change to shared
+MS_RELATIME     = (1<<21)   # Update atime relative to mtime/ctime.
+MS_STRICTATIME  = (1<<24)   # Always perform atime updates
+MS_LAZYTIME     = (1<<25)   # Update the on-disk [acm]times lazily
+
+# //include/uapi/linux/sched.h
+CLONE_NEWNS     = 0x00020000   # New mount namespace group
+CLONE_NEWCGROUP = 0x02000000   # New cgroup namespace
+CLONE_NEWUTS    = 0x04000000   # New utsname namespace
+CLONE_NEWIPC    = 0x08000000   # New ipc namespace
+CLONE_NEWUSER   = 0x10000000   # New user namespace
+CLONE_NEWPID    = 0x20000000   # New pid namespace
+CLONE_NEWNET    = 0x40000000   # New network namespace
+
+libc = ctypes.CDLL(ctypes.util.find_library('c'), use_errno=True)
+
+# See the relevant system call's man pages and:
+#   https://docs.python.org/3/library/ctypes.html#fundamental-data-types
+libc.mount.argtypes = (ctypes.c_char_p, ctypes.c_char_p, ctypes.c_char_p,
+                       ctypes.c_ulong, ctypes.c_void_p)
+libc.sethostname.argtype = (ctypes.c_char_p, ctypes.c_size_t)
+libc.umount2.argtypes = (ctypes.c_char_p, ctypes.c_int)
+libc.unshare.argtypes = (ctypes.c_int,)
+
+
+def Mount(src, tgt, fs, flags=MS_NODEV|MS_NOEXEC|MS_NOSUID|MS_RELATIME):
+  ret = libc.mount(src, tgt, fs, flags, None)
+  if ret < 0:
+    errno = ctypes.get_errno()
+    raise OSError(errno, '%s mounting %s on %s (fs=%s flags=0x%x)'
+                  % (os.strerror(errno), src, tgt, fs, flags))
+
+
+def ReMountProc():
+  libc.umount2('/proc', MNT_DETACH)  # Ignore failure: might not be mounted
+  Mount('proc', '/proc', 'proc')
+
+
+def ReMountSys():
+  libc.umount2('/sys', MNT_DETACH)  # Ignore failure: might not be mounted
+  Mount('sysfs', '/sys', 'sysfs')
+
+
+def SetFileContents(f, s):
+  open(f, 'w').write(s)
+
+
+def SetHostname(s):
+  ret = libc.sethostname(s, len(s))
+  if ret < 0:
+    errno = ctypes.get_errno()
+    raise OSError(errno, '%s while sethostname(%s)' % (os.strerror(errno), s))
+
+
+def UnShare(flags):
+  ret = libc.unshare(flags)
+  if ret < 0:
+    errno = ctypes.get_errno()
+    raise OSError(errno, '%s while unshare(0x%x)' % (os.strerror(errno), flags))
+
+
+def DumpMounts(hdr):
+  print
+  print hdr
+  print open('/proc/mounts', 'r').read(),
+  print '---'
+
+
+# Requires at least kernel configuration options:
+#   CONFIG_NAMESPACES=y
+#   CONFIG_NET_NS=y
+#   CONFIG_UTS_NS=y
+def IfPossibleEnterNewNetworkNamespace():
+  """Instantiate and transition into a fresh new network namespace if possible."""
+
+  print 'Creating clean namespace...',
+
+  try:
+    UnShare(CLONE_NEWNS | CLONE_NEWUTS | CLONE_NEWNET)
+  except OSError as err:
+    print 'failed: %s (likely: no privs or lack of kernel support).' % err
+    return False
+
+  try:
+    # DumpMounts('Before:')
+    Mount('none', '/', None, MS_REC|MS_PRIVATE)
+    ReMountProc()
+    ReMountSys()
+    # DumpMounts('After:')
+    SetHostname('netns')
+    SetFileContents('/proc/sys/net/ipv4/ping_group_range', '0 2147483647')
+    net_test.SetInterfaceUp('lo')
+  except:
+    print 'failed.'
+    # We've already transitioned into the new netns -- it's too late to recover.
+    raise
+
+  print 'succeeded.'
+  return True
+
+
+def HasEstablishedTcpSessionOnPort(port):
+  sd = sock_diag.SockDiag()
+
+  sock_id = sd._EmptyInetDiagSockId()
+  sock_id.sport = port
+
+  states = 1 << tcp_test.TCP_ESTABLISHED
+
+  matches = sd.DumpAllInetSockets(socket.IPPROTO_TCP, "",
+                                  sock_id=sock_id, states=states)
+
+  return len(matches) > 0
diff --git a/net/test/neighbour_test.py b/net/test/neighbour_test.py
index 2caba44..2cb5c23 100755
--- a/net/test/neighbour_test.py
+++ b/net/test/neighbour_test.py
@@ -55,15 +55,24 @@
   BASE_REACHABLE_TIME_MS = 2 * DELAY_TIME_MS
   MAX_REACHABLE_TIME_MS = 1.5 * BASE_REACHABLE_TIME_MS
 
+  # Kernel default unicast solicit is 3, but it need be changed larger
+  # when test recofiguration during probing
+  UCAST_SOLICIT_DEFAULT = 3
+  UCAST_SOLICIT_LARGE = 10
+
   @classmethod
   def setUpClass(cls):
     super(NeighbourTest, cls).setUpClass()
     for netid in cls.tuns:
       iface = cls.GetInterfaceName(netid)
       # This can't be set in an RA.
-      cls.SetSysctl(
-          "/proc/sys/net/ipv6/neigh/%s/delay_first_probe_time" % iface,
-          cls.DELAY_TIME_MS / 1000)
+      for proto in ["ipv4", "ipv6"]:
+          cls.SetSysctl(
+              "/proc/sys/net/%s/neigh/%s/delay_first_probe_time" % (proto, iface),
+              cls.DELAY_TIME_MS / 1000)
+          cls.SetSysctl(
+              "/proc/sys/net/%s/neigh/%s/retrans_time_ms" % (proto, iface),
+              cls.RETRANS_TIME_MS)
 
   def setUp(self):
     super(NeighbourTest, self).setUp()
@@ -87,14 +96,49 @@
     self.netid = random.choice(self.tuns.keys())
     self.ifindex = self.ifindices[self.netid]
 
-  def GetNeighbour(self, addr):
+    # MultinetworkBaseTest always uses NUD_PERMANENT for router ARP entries.
+    # Temporarily change those entries to NUD_STALE so we can test them.
+    if net_test.LINUX_VERSION < (4, 9, 0):
+      # Cannot change state from NUD_PERMANENT to NUD_STALE directly,
+      # so delete it to make it NUD_FAILED then change it to NUD_STALE.
+      router = self._RouterAddress(self.netid, 4)
+      macaddr = self.RouterMacAddress(self.netid)
+      self.iproute.DelNeighbour(4, router, macaddr, self.ifindex)
+      self.ExpectNeighbourNotification(router, NUD_FAILED)
+      self.assertNeighbourState(NUD_FAILED, router)
+    self.ChangeRouterNudState(4, NUD_STALE)
+
+  def SetUnicastSolicit(self, proto, iface, value):
+    self.SetSysctl(
+        "/proc/sys/net/%s/neigh/%s/ucast_solicit" % (proto, iface), value)
+
+  def tearDown(self):
+    super(NeighbourTest, self).tearDown()
+    # It is already reset to default by TearDownClass,
+    # but here we need to set it to default after each testcase.
+    iface = self.GetInterfaceName(self.netid)
+    for proto in ["ipv4", "ipv6"]:
+      self.SetUnicastSolicit(proto, iface, self.UCAST_SOLICIT_DEFAULT)
+
+    # Change router ARP entries back to NUD_PERMANENT,
+    # so as not to affect other tests.
+    self.ChangeRouterNudState(4, NUD_PERMANENT)
+
+  def ChangeRouterNudState(self, version, state):
+    router = self._RouterAddress(self.netid, version)
+    macaddr = self.RouterMacAddress(self.netid)
+    self.iproute.UpdateNeighbour(version, router, macaddr, self.ifindex, state)
+    self.ExpectNeighbourNotification(router, state)
+    self.assertNeighbourState(state, router)
+
+  def GetNeighbour(self, addr, ifindex):
     version = csocket.AddressVersion(addr)
-    for msg, args in self.iproute.DumpNeighbours(version):
+    for msg, args in self.iproute.DumpNeighbours(version, ifindex):
       if args["NDA_DST"] == addr:
         return msg, args
 
   def GetNdEntry(self, addr):
-    return self.GetNeighbour(addr)
+    return self.GetNeighbour(addr, self.ifindex)
 
   def CheckNoNdEvents(self):
     self.assertRaisesErrno(errno.EAGAIN, self.sock.recvfrom, 4096, MSG_PEEK)
@@ -116,8 +160,8 @@
 
   def ExpectProbe(self, is_unicast, addr):
     version = csocket.AddressVersion(addr)
+    llsrc = self.MyMacAddress(self.netid)
     if version == 6:
-      llsrc = self.MyMacAddress(self.netid)
       if is_unicast:
         src = self.MyLinkLocalAddress(self.netid)
         dst = addr
@@ -133,8 +177,15 @@
       )
       msg = "%s probe" % ("Unicast" if is_unicast else "Multicast")
       self.ExpectPacketOn(self.netid, msg, expected)
-    else:
-      raise NotImplementedError
+    else:  # version == 4
+      if is_unicast:
+        src = self._MyIPv4Address(self.netid)
+        dst = addr
+      else:
+        raise NotImplementedError("This test does not support broadcast ARP")
+      expected = scapy.ARP(psrc=src, pdst=dst, hwsrc=llsrc, op=1)
+      msg = "Unicast ARP probe"
+      self.ExpectPacketOn(self.netid, msg, expected)
 
   def ExpectUnicastProbe(self, addr):
     self.ExpectProbe(True, addr)
@@ -160,6 +211,14 @@
     else:
       raise NotImplementedError
 
+  def SendDnsRequest(self, addr):
+    version = csocket.AddressVersion(addr)
+    routing_mode = random.choice(["mark", "oif", "uid"])
+    s = self.BuildSocket(version, net_test.UDPSocket, self.netid, routing_mode)
+    s.connect((addr, 53))
+    s.send(net_test.UDP_PAYLOAD)
+    return s
+
   def MonitorSleepMs(self, interval, addr):
     slept = 0
     while slept < interval:
@@ -191,14 +250,11 @@
     """
     router4 = self._RouterAddress(self.netid, 4)
     router6 = self._RouterAddress(self.netid, 6)
-    self.assertNeighbourState(NUD_PERMANENT, router4)
+    self.assertNeighbourState(NUD_STALE, router4)
     self.assertNeighbourState(NUD_STALE, router6)
 
     # Send a packet and check that we go into DELAY.
-    routing_mode = random.choice(["mark", "oif", "uid"])
-    s = self.BuildSocket(6, net_test.UDPSocket, self.netid, routing_mode)
-    s.connect((net_test.IPV6_ADDR, 53))
-    s.send(net_test.UDP_PAYLOAD)
+    s = self.SendDnsRequest(net_test.IPV6_ADDR)
     self.assertNeighbourState(NUD_DELAY, router6)
 
     # Wait for the probe interval, then check that we're in PROBE, and that the
@@ -249,17 +305,19 @@
     router4 = self._RouterAddress(self.netid, 4)
     router6 = self._RouterAddress(self.netid, 6)
     routermac = self.RouterMacAddress(self.netid)
-    self.assertNeighbourState(NUD_PERMANENT, router4)
+    self.assertNeighbourState(NUD_STALE, router4)
     self.assertNeighbourState(NUD_STALE, router6)
 
     def ForceProbe(addr, mac):
       self.iproute.UpdateNeighbour(6, addr, None, self.ifindex, NUD_PROBE)
       self.assertNeighbourState(NUD_PROBE, addr)
+      self.ExpectNeighbourNotification(addr, NUD_PROBE)
       self.SleepMs(1)  # TODO: Why is this necessary?
       self.assertNeighbourState(NUD_PROBE, addr)
       self.ExpectUnicastProbe(addr)
       self.ReceiveUnicastAdvertisement(addr, mac)
       self.assertNeighbourState(NUD_REACHABLE, addr)
+      self.ExpectNeighbourNotification(addr, NUD_REACHABLE)
 
     for _ in xrange(5):
       ForceProbe(router6, routermac)
@@ -277,10 +335,7 @@
     time.sleep(1)
 
     # Send another packet and expect a multicast NS.
-    routing_mode = random.choice(["mark", "oif", "uid"])
-    s = self.BuildSocket(6, net_test.UDPSocket, self.netid, routing_mode)
-    s.connect((net_test.IPV6_ADDR, 53))
-    s.send(net_test.UDP_PAYLOAD)
+    self.SendDnsRequest(net_test.IPV6_ADDR)
     self.ExpectMulticastNS(router6)
 
     # Receive a unicast NA with the R flag set to 0.
@@ -293,6 +348,44 @@
     self.ExpectNeighbourNotification(router6, NUD_REACHABLE)
     self.assertNeighbourState(NUD_REACHABLE, router6)
 
+  def DoReconfigureDuringProbing(self, version):
+    if version == 6:
+      proto = "ipv6"
+      ip_addr = net_test.IPV6_ADDR
+    else:
+      proto = "ipv4"
+      ip_addr = net_test.IPV4_ADDR
+    router = self._RouterAddress(self.netid, version)
+    self.assertNeighbourState(NUD_STALE, router)
+
+    iface = self.GetInterfaceName(self.netid)
+    # set unicast solicit larger.
+    self.SetUnicastSolicit(proto, iface, self.UCAST_SOLICIT_LARGE)
+
+    # Send a packet and check that we go into DELAY.
+    self.SendDnsRequest(ip_addr)
+    self.assertNeighbourState(NUD_DELAY, router)
+
+    # Probing 4 times but no reponse
+    self.SleepMs(self.DELAY_TIME_MS * 1.1)
+    self.ExpectNeighbourNotification(router, NUD_PROBE)
+    self.assertNeighbourState(NUD_PROBE, router)
+    self.ExpectUnicastProbe(router)
+
+    for i in range(0, 3):
+      self.SleepMs(self.RETRANS_TIME_MS)
+      self.ExpectUnicastProbe(router)
+
+    # reconfiguration to 3 while probing and the state change to NUD_FAILED
+    self.SetUnicastSolicit(proto, iface, self.UCAST_SOLICIT_DEFAULT)
+    self.SleepMs(self.RETRANS_TIME_MS)
+    self.ExpectNeighbourNotification(router, NUD_FAILED)
+    self.assertNeighbourState(NUD_FAILED, router)
+
+  # Check neighbor state after re-config ARP probe times.
+  def testReconfigureDuringProbing(self):
+    self.DoReconfigureDuringProbing(4)
+    self.DoReconfigureDuringProbing(6)
 
 if __name__ == "__main__":
   unittest.main()
diff --git a/net/test/net_test.py b/net/test/net_test.py
index 6b19f54..1c7f32f 100755
--- a/net/test/net_test.py
+++ b/net/test/net_test.py
@@ -369,17 +369,17 @@
 
   def __enter__(self):
     if self.uid:
-      self.saved_uid = os.geteuid()
+      self.saved_uids = os.getresuid()
       self.saved_groups = os.getgroups()
       os.setgroups(self.saved_groups + [AID_INET])
-      os.seteuid(self.uid)
+      os.setresuid(self.uid, self.uid, self.saved_uids[0])
     if self.gid:
       self.saved_gid = os.getgid()
       os.setgid(self.gid)
 
   def __exit__(self, unused_type, unused_value, unused_traceback):
     if self.uid:
-      os.seteuid(self.saved_uid)
+      os.setresuid(*self.saved_uids)
       os.setgroups(self.saved_groups)
     if self.gid:
       os.setgid(self.saved_gid)
@@ -390,7 +390,6 @@
   def __init__(self, uid):
     RunAsUidGid.__init__(self, uid, 0)
 
-
 class NetworkTest(unittest.TestCase):
 
   def assertRaisesErrno(self, err_num, f=None, *args):
@@ -433,7 +432,7 @@
 
     if protocol.startswith("tcp"):
       # Real sockets have 5 extra numbers, timewait sockets have none.
-      end_regexp = "(| +[0-9]+ [0-9]+ [0-9]+ [0-9]+ -?[0-9]+|)$"
+      end_regexp = "(| +[0-9]+ [0-9]+ [0-9]+ [0-9]+ -?[0-9]+)$"
     elif re.match("icmp|udp|raw", protocol):
       # Drops.
       end_regexp = " +([0-9]+) *$"
@@ -458,8 +457,11 @@
     # TODO: consider returning a dict or namedtuple instead.
     out = []
     for line in lines:
+      m = regexp.match(line)
+      if m is None:
+        raise ValueError("Failed match on [%s]" % line)
       (_, src, dst, state, mem,
-       _, _, uid, _, _, refcnt, _, extra) = regexp.match(line).groups()
+       _, _, uid, _, _, refcnt, _, extra) = m.groups()
       out.append([src, dst, state, mem, uid, refcnt, extra])
     return out
 
diff --git a/net/test/net_test.sh b/net/test/net_test.sh
index e2ed6b1..6a22c0e 100755
--- a/net/test/net_test.sh
+++ b/net/test/net_test.sh
@@ -1,4 +1,144 @@
 #!/bin/bash
+if [[ -n "${verbose}" ]]; then
+  echo 'Current working directory:'
+  echo " - according to builtin:  [$(pwd)]"
+  echo " - according to /bin/pwd: [$(/bin/pwd)]"
+  echo
+
+  echo 'Shell environment:'
+  env
+  echo
+
+  echo -n "net_test.sh (pid $$, parent ${PPID}, tty $(tty)) running [$0] with args:"
+  for arg in "$@"; do
+    echo -n " [${arg}]"
+  done
+  echo
+  echo
+fi
+
+if [[ "$(tty)" == 'not a tty' ]]; then
+  echo 'not a tty? perhaps not quite real kernel default /dev/console - trying to fix.'
+  if [[ -c /dev/console ]]; then
+    [[ "$(readlink /proc/$$/fd/0)" != '/dev/console' ]] || exec < /dev/console
+    [[ "$(readlink /proc/$$/fd/1)" != '/dev/console' ]] || exec > /dev/console
+    [[ "$(readlink /proc/$$/fd/2)" != '/dev/console' ]] || exec 2> /dev/console
+  fi
+fi
+
+if [[ "$(tty)" == '/dev/console' ]]; then
+  ARCH="$(uname -m)"
+  # Underscore is illegal in hostname, replace with hyphen
+  ARCH="${ARCH//_/-}"
+
+  # setsid + /dev/tty{,AMA,S}0 allows bash's job control to work, ie. Ctrl+C/Z
+  if [[ -e '/proc/exitcode' ]]; then
+    # exists only in UML
+    CON='/dev/tty0'
+    hostname "uml-${ARCH}"
+  elif [[ -c '/dev/ttyAMA0' ]]; then
+    # Qemu for arm (note: /dev/ttyS0 also exists for exitcode)
+    CON='/dev/ttyAMA0'
+    hostname "qemu-${ARCH}"
+  elif [[ -c '/dev/ttyS0' ]]; then
+    # Qemu for x86 (note: /dev/ttyS1 also exists for exitcode)
+    CON='/dev/ttyS0'
+    hostname "qemu-${ARCH}"
+  else
+    # Can't figure it out, job control won't work, tough luck
+    echo 'Unable to figure out proper console - job control will not work.' >&2
+    CON=''
+    hostname "local-${ARCH}"
+  fi
+
+  unset ARCH
+
+  echo -n "$(hostname): Currently tty[/dev/console], but it should be [${CON}]..."
+
+  if [[ -n "${CON}" ]]; then
+    # Redirect std{in,out,err} to the console equivalent tty
+    # which actually supports all standard tty ioctls
+    exec <"${CON}" >&"${CON}"
+
+    # Bash wants to be session leader, hence need for setsid
+    echo " re-executing..."
+    exec /usr/bin/setsid "$0" "$@"
+    # If the above exec fails, we just fall through...
+    # (this implies failure to *find* setsid, not error return from bash,
+    #  in practice due to image construction this cannot happen)
+  else
+    echo
+  fi
+
+  # In case we fall through, clean up
+  unset CON
+fi
+
+if [[ -n "${verbose}" ]]; then
+  echo 'TTY settings:'
+  stty
+  echo
+
+  echo 'TTY settings (verbose):'
+  stty -a
+  echo
+
+  echo 'Restoring TTY sanity...'
+fi
+
+stty sane
+stty 115200
+[[ -z "${console_cols}" ]] || stty columns "${console_cols}"
+[[ -z "${console_rows}" ]] || stty rows    "${console_rows}"
+
+if [[ -n "${verbose}" ]]; then
+  echo
+
+  echo 'TTY settings:'
+  stty
+  echo
+
+  echo 'TTY settings (verbose):'
+  stty -a
+  echo
+fi
+
+# By the time we get here we should have a sane console:
+#  - 115200 baud rate
+#  - appropriate (and known) width and height (note: this assumes
+#    that the terminal doesn't get further resized)
+#  - it is no longer /dev/console, so job control should function
+#    (this means working ctrl+c [abort] and ctrl+z [suspend])
+
+
+# This defaults to 60 which is needlessly long during boot
+# (we will reset it back to the default later)
+echo 0 > /proc/sys/kernel/random/urandom_min_reseed_secs
+
+if [[ -n "${entropy}" ]]; then
+  echo "adding entropy from hex string [${entropy}]" >&2
+
+  # In kernel/include/uapi/linux/random.h RNDADDENTROPY is defined as
+  # _IOW('R', 0x03, int[2]) =(R is 0x52)= 0x40085203 = 1074287107
+  /usr/bin/python 3>/dev/random <<EOF
+import fcntl, struct
+rnd = '${entropy}'.decode('base64')
+fcntl.ioctl(3, 0x40085203, struct.pack('ii', len(rnd) * 8, len(rnd)) + rnd)
+EOF
+
+fi
+
+# Make sure the urandom pool has a chance to initialize before we reset
+# the reseed timer back to 60 seconds.  One timer tick should be enough.
+sleep 1.1
+
+# By this point either 'random: crng init done' (newer kernels)
+# or 'random: nonblocking pool is initialized' (older kernels)
+# should have been printed out to dmesg/console.
+
+# Reset it back to boot time default
+echo 60 > /proc/sys/kernel/random/urandom_min_reseed_secs
+
 
 # In case IPv6 is compiled as a module.
 [ -f /proc/net/if_inet6 ] || insmod $DIR/kernel/net-next/net/ipv6/ipv6.ko
@@ -9,7 +149,7 @@
 ip link set eth0 up
 
 # Allow people to run ping.
-echo "0 65536" > /proc/sys/net/ipv4/ping_group_range
+echo '0 2147483647' > /proc/sys/net/ipv4/ping_group_range
 
 # Read environment variables passed to the kernel to determine if script is
 # running on builder and to find which test to run.
diff --git a/net/test/netlink.py b/net/test/netlink.py
index ceb547b..4e230d4 100644
--- a/net/test/netlink.py
+++ b/net/test/netlink.py
@@ -146,17 +146,17 @@
 
     return attributes
 
-  def _OpenNetlinkSocket(self, family, groups=None):
+  def _OpenNetlinkSocket(self, family, groups):
     sock = socket.socket(socket.AF_NETLINK, socket.SOCK_RAW, family)
     if groups:
       sock.bind((0,  groups))
     sock.connect((0, 0))  # The kernel.
     return sock
 
-  def __init__(self, family):
+  def __init__(self, family, groups=None):
     # Global sequence number.
     self.seq = 0
-    self.sock = self._OpenNetlinkSocket(family)
+    self.sock = self._OpenNetlinkSocket(family, groups)
     self.pid = self.sock.getsockname()[1]
 
   def MaybeDebugCommand(self, command, flags, data):
diff --git a/net/test/nf_test.py b/net/test/nf_test.py
new file mode 100755
index 0000000..cd6c976
--- /dev/null
+++ b/net/test/nf_test.py
@@ -0,0 +1,86 @@
+#!/usr/bin/python
+#
+# Copyright 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import unittest
+
+import errno
+from socket import *
+
+import multinetwork_base
+import net_test
+
+_TEST_IP4_ADDR = "192.0.2.1"
+_TEST_IP6_ADDR = "2001:db8::"
+
+
+# Regression tests for interactions between kernel networking and netfilter
+#
+# These tests were added to ensure that the lookup path for local-ICMP errors
+# do not cause failures. Specifically, local-ICMP packets do not have a
+# net_device in the skb, and has been known to trigger bugs in surrounding code.
+class NetilterRejectTargetTest(multinetwork_base.MultiNetworkBaseTest):
+
+  def setUp(self):
+    multinetwork_base.MultiNetworkBaseTest.setUp(self)
+    net_test.RunIptablesCommand(4, "-A OUTPUT -d " + _TEST_IP4_ADDR + " -j REJECT")
+    net_test.RunIptablesCommand(6, "-A OUTPUT -d " + _TEST_IP6_ADDR + " -j REJECT")
+
+  def tearDown(self):
+    net_test.RunIptablesCommand(4, "-D OUTPUT -d " + _TEST_IP4_ADDR + " -j REJECT")
+    net_test.RunIptablesCommand(6, "-D OUTPUT -d " + _TEST_IP6_ADDR + " -j REJECT")
+    multinetwork_base.MultiNetworkBaseTest.tearDown(self)
+
+  # Test a rejected TCP connect. The responding ICMP may not have skb->dev set.
+  # This tests the local-ICMP output-input path.
+  def CheckRejectedTcp(self, version, addr):
+    sock = net_test.TCPSocket(net_test.GetAddressFamily(version))
+    netid = self.RandomNetid()
+    self.SelectInterface(sock, netid, "mark")
+
+    # Expect this to fail with ICMP unreachable
+    try:
+      sock.connect((addr, 53))
+    except IOError:
+      pass
+
+  def testRejectTcp4(self):
+    self.CheckRejectedTcp(4, _TEST_IP4_ADDR)
+
+  def testRejectTcp6(self):
+    self.CheckRejectedTcp(6, _TEST_IP6_ADDR)
+
+  # Test a rejected UDP connect. The responding ICMP may not have skb->dev set.
+  # This tests the local-ICMP output-input path.
+  def CheckRejectedUdp(self, version, addr):
+    sock = net_test.UDPSocket(net_test.GetAddressFamily(version))
+    netid = self.RandomNetid()
+    self.SelectInterface(sock, netid, "mark")
+
+    # Expect this to fail with ICMP unreachable
+    try:
+      sock.sendto(net_test.UDP_PAYLOAD, (addr, 53))
+    except IOError:
+      pass
+
+  def testRejectUdp4(self):
+    self.CheckRejectedUdp(4, _TEST_IP4_ADDR)
+
+  def testRejectUdp6(self):
+    self.CheckRejectedUdp(6, _TEST_IP6_ADDR)
+
+
+if __name__ == "__main__":
+  unittest.main()
\ No newline at end of file
diff --git a/net/test/no_test b/net/test/no_test
new file mode 100755
index 0000000..b23e556
--- /dev/null
+++ b/net/test/no_test
@@ -0,0 +1 @@
+#!/bin/true
diff --git a/net/test/parallel_tests.sh b/net/test/parallel_tests.sh
index 93a43c8..eb67421 100755
--- a/net/test/parallel_tests.sh
+++ b/net/test/parallel_tests.sh
@@ -15,7 +15,7 @@
   local test=$3
   local j=0
   while ((j < runs)); do
-    $DIR/run_net_test.sh --readonly --builder --nobuild $test \
+    $DIR/run_net_test.sh --builder --nobuild $test \
         > /dev/null 2> $RESULTSDIR/results.$worker.$j
     j=$((j + 1))
     echo -n "." >&2
diff --git a/net/test/policy_crash_test.py b/net/test/policy_crash_test.py
new file mode 100755
index 0000000..536f96d
--- /dev/null
+++ b/net/test/policy_crash_test.py
@@ -0,0 +1,135 @@
+#!/usr/bin/python
+#
+# Copyright 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# ----------------------------------------------------------------------
+
+# This triggers a kernel panic on 4.9.114+ which is fixed in 4.9.136
+#
+# Crash was introduced by ad8b1ffc3efae2f65080bdb11145c87d299b8f9a
+# and reverted in 2edec22d18758c9b29301ded2291f051d65422e9
+
+# ----------------------------------------------------------------------
+
+# Modules linked in:
+# Pid: 305, comm: python Not tainted 4.9.114
+# RIP: 0033:[<0000000060272d73>]
+# RSP: 000000007fd09a10  EFLAGS: 00010246
+# RAX: 0000000060492fa8 RBX: 0000000060272b18 RCX: 000000007ff412a8
+# RDX: 000000007ff41288 RSI: 000000007fd09a98 RDI: 000000007ff14a00
+# RBP: 000000007fd09a40 R08: 0000000000000001 R09: 0100000000000000
+# R10: 0000000000000000 R11: 000000007ff412a8 R12: 0000000000010002
+# R13: 000000000000000a R14: 0000000000000000 R15: 0000000000000000
+# Kernel panic - not syncing: Kernel mode fault at addr 0x48, ip 0x60272d73
+# CPU: 0 PID: 305 Comm: python Not tainted 4.9.114 #7
+# Stack:
+#  7fcd5000 7ff411e0 7ff14a00 7ff41000
+#  00000000 00000000 7fd09b00 6031acd9
+#  00000000 7ff41288 7ff4100c 100000003
+# Call Trace:
+#  [<6031acd9>] ip6t_do_table+0x2a3/0x3d4
+#  [<6026d300>] ? netfilter_net_init+0xd5/0x14f
+#  [<6026d37a>] ? nf_iterate+0x0/0x5c
+#  [<6031c99d>] ip6table_filter_hook+0x21/0x23
+#  [<6026d3b2>] nf_iterate+0x38/0x5c
+#  [<6026d40a>] nf_hook_slow+0x34/0xa2
+#  [<6003166c>] ? set_signals+0x0/0x3f
+#  [<6003165d>] ? get_signals+0x0/0xf
+#  [<603045d4>] rawv6_sendmsg+0x842/0xc4b
+#  [<60033d15>] ? wait_stub_done+0x40/0x10a
+#  [<60021176>] ? copy_chunk_from_user+0x23/0x2e
+#  [<60021153>] ? copy_chunk_from_user+0x0/0x2e
+#  [<60302da3>] ? dst_output+0x0/0x11
+#  [<602b063a>] inet_sendmsg+0x1e/0x5c
+#  [<600fe142>] ? __fdget+0x15/0x17
+#  [<6022636c>] sock_sendmsg+0xf/0x62
+#  [<6022785d>] SyS_sendto+0x108/0x140
+#  [<600389c2>] ? arch_switch_to+0x2b/0x2e
+#  [<60367ce4>] ? __schedule+0x428/0x44f
+#  [<603678bc>] ? __schedule+0x0/0x44f
+#  [<60021125>] handle_syscall+0x79/0xa7
+#  [<6003445c>] userspace+0x3bb/0x453
+#  [<6001dd92>] ? interrupt_end+0x0/0x94
+#  [<6001dc42>] fork_handler+0x85/0x87
+#
+# /android/kernel/tests/net/test/run_net_test.sh: line 397: 50828 Aborted
+# $KERNEL_BINARY umid=net_test mem=512M $blockdevice=$SCRIPT_DIR/$ROOTFS $netconfig $consolemode $cmdline 1>&2
+# Returning exit code 134.
+
+# ----------------------------------------------------------------------
+
+import os
+import socket
+import unittest
+
+import net_test
+
+class RemovedFeatureTest(net_test.NetworkTest):
+
+  def setUp(self):
+    net_test.RunIptablesCommand(6, "-I OUTPUT 1 -m policy --dir out --pol ipsec")
+
+  def tearDown(self):
+    net_test.RunIptablesCommand(6, "-D OUTPUT -m policy --dir out --pol ipsec")
+
+  def testPolicyNetfilterFragPanic(self):
+    ipv6_min_mtu = 1280
+    ipv6_header_size = 40
+    ipv6_frag_header_size = 8
+
+    pkt1_frag_len = ipv6_min_mtu - ipv6_header_size - ipv6_frag_header_size
+    pkt2_frag_len = 1
+
+    ip6loopback = '00000000000000000000000000000001'   # ::1
+
+    # 40 byte IPv6 header
+    ver6 = '6'
+    tclass = '00'
+    flowlbl = '00000'
+    # (uint16) payload length - of rest of packets in octets
+    pkt1_plen = '%04x' % (ipv6_frag_header_size + pkt1_frag_len)
+    pkt2_plen = '%04x' % (ipv6_frag_header_size + pkt2_frag_len)
+    nexthdr = '2c'   # = 44 IPv6-Frag
+    hoplimit = '00'
+    src = ip6loopback
+    dst = ip6loopback
+
+    # 8 byte IPv6 fragmentation header
+    frag_nexthdr = '00'
+    frag_reserved = '00'
+    # 13-bit offset, 2-bit reserved, 1-bit M[ore] flag
+    pkt1_frag_offset = '0001'
+    pkt2_frag_offset = '%04x' % pkt1_frag_len
+    frag_identification = '00000000'
+
+    # Fragmentation payload
+    pkt1_frag_payload = '00' * pkt1_frag_len
+    pkt2_frag_payload = '00' * pkt2_frag_len
+
+    pkt1 = (ver6 + tclass + flowlbl + pkt1_plen + nexthdr + hoplimit + src + dst
+         + frag_nexthdr + frag_reserved + pkt1_frag_offset + frag_identification
+         + pkt1_frag_payload)
+    pkt2 = (ver6 + tclass + flowlbl + pkt2_plen + nexthdr + hoplimit + src + dst
+         + frag_nexthdr + frag_reserved + pkt2_frag_offset + frag_identification
+         + pkt2_frag_payload)
+
+    s = socket.socket(socket.AF_INET6, socket.SOCK_RAW, socket.IPPROTO_RAW)
+    s.sendto(pkt1.decode('hex'), ('::1', 0))
+    s.sendto(pkt2.decode('hex'), ('::1', 0))
+    s.close()
+
+
+if __name__ == "__main__":
+  unittest.main()
diff --git a/net/test/rootfs/stretch.list b/net/test/rootfs/bullseye.list
similarity index 87%
rename from net/test/rootfs/stretch.list
rename to net/test/rootfs/bullseye.list
index e6c0177..b749472 100644
--- a/net/test/rootfs/stretch.list
+++ b/net/test/rootfs/bullseye.list
@@ -8,6 +8,7 @@
 ifupdown
 insserv
 iputils-ping
+iptables
 less
 libnetfilter-conntrack3
 libnfnetlink0
@@ -17,15 +18,17 @@
 netcat-traditional
 net-tools
 openssl
+pciutils
 procps
 psmisc
 python
-python-scapy
+python3-scapy
 strace
 systemd-sysv
 tcpdump
 traceroute
 udev
 udhcpc
+usbutils
 vim-tiny
 wget
diff --git a/net/test/rootfs/stretch.sh b/net/test/rootfs/bullseye.sh
similarity index 76%
rename from net/test/rootfs/stretch.sh
rename to net/test/rootfs/bullseye.sh
index 6d8a9a4..d198aac 100755
--- a/net/test/rootfs/stretch.sh
+++ b/net/test/rootfs/bullseye.sh
@@ -27,17 +27,8 @@
 
 # Add the needed debian sources
 cat >/etc/apt/sources.list <<EOF
-deb http://ftp.debian.org/debian stretch main
-deb-src http://ftp.debian.org/debian stretch main
-deb http://ftp.debian.org/debian stretch-backports main
-deb-src http://ftp.debian.org/debian stretch-backports main
-deb http://ftp.debian.org/debian buster main
-deb-src http://ftp.debian.org/debian buster main
-EOF
-
-# Make sure apt doesn't want to install from buster by default
-cat >/etc/apt/apt.conf.d/80default <<EOF
-APT::Default-Release "stretch";
+deb http://ftp.debian.org/debian bullseye main
+deb-src http://ftp.debian.org/debian bullseye main
 EOF
 
 # Disable the automatic installation of recommended packages
@@ -45,23 +36,13 @@
 APT::Install-Recommends "0";
 EOF
 
-# Deprioritize buster, so it must be specified manually
-cat >/etc/apt/preferences.d/90buster <<EOF
-Package: *
-Pin: release a=buster
-Pin-Priority: 90
-EOF
-
 # Update for the above changes
 apt-get update
 
-# Install python-scapy from buster, because stretch's version is broken
-apt-get install -y -t buster python-scapy
-
 # Note what we have installed; we will go back to this
 LANG=C dpkg --get-selections | sort >originally-installed
 
-# Install everything needed from stretch to build iptables
+# Install everything needed from bullseye to build iptables
 apt-get install -y \
   build-essential \
   autoconf \
@@ -77,25 +58,23 @@
   libnftnl-dev \
   libtool
 
-# Install newer linux-libc headers (these are from 4.16)
-apt-get install -y -t stretch-backports linux-libc-dev
-
 # We are done with apt; reclaim the disk space
 apt-get clean
 
 # Construct the iptables source package to build
-iptables=iptables-1.6.1
+iptables=iptables-1.8.4
 mkdir -p /usr/src/$iptables
 
 cd /usr/src/$iptables
 # Download a specific revision of iptables from AOSP
-aosp_iptables=android-wear-p-preview-2
 wget -qO - \
-  https://android.googlesource.com/platform/external/iptables/+archive/$aosp_iptables.tar.gz | \
+  https://android.googlesource.com/platform/external/iptables/+archive/master.tar.gz | \
   tar -zxf -
 # Download a compatible 'debian' overlay from Debian salsa
 # We don't want all of the sources, just the Debian modifications
-debian_iptables=1.6.1-2_bpo9+1
+# NOTE: This will only work if Android always uses a version of iptables that exists
+#       for Debian as well.
+debian_iptables=1.8.4-3
 debian_iptables_dir=pkg-iptables-debian-$debian_iptables
 wget -qO - \
   https://salsa.debian.org/pkg-netfilter-team/pkg-iptables/-/archive/debian/$debian_iptables/$debian_iptables_dir.tar.gz | \
@@ -144,7 +123,8 @@
   /etc/systemd/system/getty.target.wants/serial-getty\@ttyS0.service
 
 # systemd needs some directories to be created
-mkdir -p /var/lib/systemd/coredump /var/lib/systemd/rfkill
+mkdir -p /var/lib/systemd/coredump /var/lib/systemd/rfkill \
+  /var/lib/systemd/timesync
 
 # Finalize and tidy up the created image
 chroot_cleanup
diff --git a/net/test/rootfs/net_test.sh b/net/test/rootfs/net_test.sh
index 072a8a3..34f4a01 100755
--- a/net/test/rootfs/net_test.sh
+++ b/net/test/rootfs/net_test.sh
@@ -15,20 +15,23 @@
 # limitations under the License.
 #
 
+set -e
+set -u
+
 mount -t proc none /proc
-mount -t sys none /sys
+mount -t sysfs none /sys
 mount -t tmpfs tmpfs /tmp
 mount -t tmpfs tmpfs /run
 
 # If this system was booted under UML, it will always have a /proc/exitcode
 # file. If it was booted natively or under QEMU, it will not have this file.
-if [ -e /proc/exitcode ]; then
+if [[ -e /proc/exitcode ]]; then
   mount -t hostfs hostfs /host
 else
   mount -t 9p -o trans=virtio,version=9p2000.L host /host
 fi
 
-test=$(cat /proc/cmdline | sed -re 's/.*net_test=([^ ]*).*/\1/g')
-cd $(dirname $test)
+test="$(sed -r 's/.*net_test=([^ ]*).*/\1/g' < /proc/cmdline)"
+cd "$(dirname "${test}")"
 ./net_test.sh
 poweroff -f
diff --git a/net/test/rootfs/wheezy.list b/net/test/rootfs/wheezy.list
deleted file mode 100644
index 44e3d85..0000000
--- a/net/test/rootfs/wheezy.list
+++ /dev/null
@@ -1,33 +0,0 @@
-adduser
-apt
-apt-utils
-bash-completion
-binutils
-bsdmainutils
-ca-certificates
-file
-gpgv
-ifupdown
-insserv
-iptables
-iputils-ping
-less
-libpopt0
-mime-support
-netbase
-netcat6
-netcat-traditional
-net-tools
-module-init-tools
-openssl
-procps
-psmisc
-python2.7
-python-scapy
-strace
-tcpdump
-traceroute
-udev
-udhcpc
-vim-tiny
-wget
diff --git a/net/test/rootfs/wheezy.sh b/net/test/rootfs/wheezy.sh
deleted file mode 100755
index 81cfad7..0000000
--- a/net/test/rootfs/wheezy.sh
+++ /dev/null
@@ -1,50 +0,0 @@
-#!/bin/bash
-#
-# Copyright (C) 2018 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# NOTE: It is highly recommended that you do not create new wheezy rootfs
-#       images. This script is here for forensic purposes only, to understand
-#       how the original rootfs was created.
-
-set -e
-
-SCRIPT_DIR=$(CDPATH= cd -- "$(dirname -- "$0")" && pwd -P)
-
-. $SCRIPT_DIR/common.sh
-
-chroot_sanity_check
-
-# Remove things pulled in by debootstrap that we do not need
-dpkg -P \
-  debconf-i18n \
-  liblocale-gettext-perl \
-  libtext-charwidth-perl \
-  libtext-iconv-perl \
-  libtext-wrapi18n-perl \
-  python2.6 \
-  python2.6-minimal \
-  xz-utils
-
-# We are done with apt; reclaim the disk space
-apt-get clean
-
-# Ensure a getty is spawned on ttyS0, if booting the image manually
-# This also removes the vt gettys, as we may have no vt
-sed -i '/tty[123456]/d' /etc/inittab
-echo "s0:1235:respawn:/sbin/getty 115200 ttyS0 linux" >>/etc/inittab
-
-# Finalize and tidy up the created image
-chroot_cleanup
diff --git a/net/test/run_net_test.sh b/net/test/run_net_test.sh
index 4ae7705..d1a66f5 100755
--- a/net/test/run_net_test.sh
+++ b/net/test/run_net_test.sh
@@ -1,7 +1,19 @@
 #!/bin/bash
 
+# Builds mysteriously fail if stdout is non-blocking.
+fixup_ptys() {
+  python << 'EOF'
+import fcntl, os, sys
+fd = sys.stdout.fileno()
+flags = fcntl.fcntl(fd, fcntl.F_GETFL)
+flags &= ~(fcntl.FASYNC | os.O_NONBLOCK | os.O_APPEND)
+fcntl.fcntl(fd, fcntl.F_SETFL, flags)
+EOF
+}
+
 # Common kernel options
 OPTIONS=" DEBUG_SPINLOCK DEBUG_ATOMIC_SLEEP DEBUG_MUTEXES DEBUG_RT_MUTEXES"
+OPTIONS="$OPTIONS WARN_ALL_UNSEEDED_RANDOM IKCONFIG IKCONFIG_PROC"
 OPTIONS="$OPTIONS DEVTMPFS DEVTMPFS_MOUNT FHANDLE"
 OPTIONS="$OPTIONS IPV6 IPV6_ROUTER_PREF IPV6_MULTIPLE_TABLES IPV6_ROUTE_INFO"
 OPTIONS="$OPTIONS TUN SYN_COOKIES IP_ADVANCED_ROUTER IP_MULTIPLE_TABLES"
@@ -12,12 +24,13 @@
 OPTIONS="$OPTIONS IPV6_OPTIMISTIC_DAD"
 OPTIONS="$OPTIONS IPV6_ROUTE_INFO IPV6_ROUTER_PREF"
 OPTIONS="$OPTIONS NETFILTER_XT_TARGET_NFLOG"
+OPTIONS="$OPTIONS NETFILTER_XT_MATCH_POLICY"
 OPTIONS="$OPTIONS NETFILTER_XT_MATCH_QUOTA"
 OPTIONS="$OPTIONS NETFILTER_XT_MATCH_QUOTA2"
 OPTIONS="$OPTIONS NETFILTER_XT_MATCH_QUOTA2_LOG"
 OPTIONS="$OPTIONS NETFILTER_XT_MATCH_SOCKET"
 OPTIONS="$OPTIONS NETFILTER_XT_MATCH_QTAGUID"
-OPTIONS="$OPTIONS INET_UDP_DIAG INET_DIAG_DESTROY"
+OPTIONS="$OPTIONS INET_DIAG INET_UDP_DIAG INET_DIAG_DESTROY"
 OPTIONS="$OPTIONS IP_SCTP"
 OPTIONS="$OPTIONS IP_NF_TARGET_REJECT IP_NF_TARGET_REJECT_SKERR"
 OPTIONS="$OPTIONS IP6_NF_TARGET_REJECT IP6_NF_TARGET_REJECT_SKERR"
@@ -28,6 +41,7 @@
 OPTIONS="$OPTIONS INET6_XFRM_MODE_TRANSPORT INET6_XFRM_MODE_TUNNEL"
 OPTIONS="$OPTIONS CRYPTO_SHA256 CRYPTO_SHA512 CRYPTO_AES_X86_64 CRYPTO_NULL"
 OPTIONS="$OPTIONS CRYPTO_GCM CRYPTO_ECHAINIV NET_IPVTI"
+OPTIONS="$OPTIONS DUMMY"
 
 # Kernel version specific options
 OPTIONS="$OPTIONS XFRM_INTERFACE"                # Various device kernels
@@ -45,7 +59,8 @@
 OPTIONS="$OPTIONS BLK_DEV_UBD HOSTFS"
 
 # QEMU specific options
-OPTIONS="$OPTIONS VIRTIO VIRTIO_PCI VIRTIO_BLK NET_9P NET_9P_VIRTIO 9P_FS"
+OPTIONS="$OPTIONS PCI VIRTIO VIRTIO_PCI VIRTIO_BLK NET_9P NET_9P_VIRTIO 9P_FS"
+OPTIONS="$OPTIONS CRYPTO_DEV_VIRTIO SERIAL_8250 SERIAL_8250_PCI"
 
 # Obsolete options present at some time in Android kernels
 OPTIONS="$OPTIONS IP_NF_TARGET_REJECT_SKERR IP6_NF_TARGET_REJECT_SKERR"
@@ -53,9 +68,6 @@
 # These two break the flo kernel due to differences in -Werror on recent GCC.
 DISABLE_OPTIONS=" REISERFS_FS ANDROID_PMEM"
 
-# This one breaks the fugu kernel due to a nonexistent sem_wait_array.
-DISABLE_OPTIONS="$DISABLE_OPTIONS SYSVIPC"
-
 # How many TAP interfaces to create to provide the VM with real network access
 # via the host. This requires privileges (e.g., root access) on the host.
 #
@@ -69,13 +81,13 @@
 NUMTAPINTERFACES=0
 
 # The root filesystem disk image we'll use.
-ROOTFS=net_test.rootfs.20150203
+ROOTFS=${ROOTFS:-net_test.rootfs.20150203}
 COMPRESSED_ROOTFS=$ROOTFS.xz
 URL=https://dl.google.com/dl/android/$COMPRESSED_ROOTFS
 
 # Parse arguments and figure out which test to run.
 ARCH=${ARCH:-um}
-J=${J:-64}
+J=${J:-$(nproc)}
 MAKE="make"
 OUT_DIR=$(readlink -f ${OUT_DIR:-.})
 KERNEL_DIR=$(readlink -f ${KERNEL_DIR:-.})
@@ -89,24 +101,61 @@
 netconfig=
 testmode=
 cmdline=
-nowrite=0
+nowrite=1
 nobuild=0
 norun=0
 
-while [ -n "$1" ]; do
-  if [ "$1" = "--builder" ]; then
+if [[ -z "${DEFCONFIG}" ]]; then
+  case "${ARCH}" in
+    um)
+      export DEFCONFIG=defconfig
+      ;;
+    arm64)
+      if [[ -e arch/arm64/configs/gki_defconfig ]]; then
+        export DEFCONFIG=gki_defconfig
+      elif [[ -e arch/arm64/configs/cuttlefish_defconfig ]]; then
+        export DEFCONFIG=cuttlefish_defconfig
+      fi
+      ;;
+    x86_64)
+      if [[ -e arch/x86/configs/gki_defconfig ]]; then
+        export DEFCONFIG=gki_defconfig
+      elif [[ -e arch/x86/configs/x86_64_cuttlefish_defconfig ]]; then
+        export DEFCONFIG=x86_64_cuttlefish_defconfig
+      fi
+  esac
+fi
+
+if tty >/dev/null; then
+  verbose=
+else
+  verbose=1
+fi
+
+test=all_tests.sh
+while [[ -n "$1" ]]; do
+  if [[ "$1" == "--builder" || "$1" == "-b" ]]; then
     consolemode="con=null,fd:1"
     testmode=builder
     shift
-  elif [ "$1" == "--readonly" ]; then
+  elif [[ "$1" == "--readwrite" || "$1" == "--rw" ]]; then
+    nowrite=0
+    shift
+  elif [[ "$1" == "--readonly" ||  "$1" == "--ro" ]]; then
     nowrite=1
     shift
-  elif [ "$1" == "--nobuild" ]; then
+  elif [[ "$1" == "--nobuild" ]]; then
     nobuild=1
     shift
-  elif [ "$1" == "--norun" ]; then
+  elif [[ "$1" == "--norun" ]]; then
     norun=1
     shift
+  elif [[ "$1" == "--verbose" ]]; then
+    verbose=1
+    shift
+  elif [[ "$1" == "--noverbose" ]]; then
+    verbose=
+    shift
   else
     test=$1
     break  # Arguments after the test file are passed to the test itself.
@@ -129,16 +178,17 @@
 test_args=${@:2}
 
 function isRunningTest() {
-  [[ -n "$test" ]] && ! (( norun ))
+  ! (( norun ))
 }
 
 function isBuildOnly() {
-  [[ -z "$test" ]] && (( norun )) && ! (( nobuild ))
+  (( norun )) && ! (( nobuild ))
 }
 
 if ! isRunningTest && ! isBuildOnly; then
   echo "Usage:" >&2
-  echo "  $0 [--builder] [--readonly] [--nobuild] <test>" >&2
+  echo "  $0 [--builder] [--readonly|--ro|--readwrite|--rw] [--nobuild] [--verbose] [<test>]" >&2
+  echo "      - if [<test>] is not specified, run all_tests.sh" >&2
   echo "  $0 --norun" >&2
   exit 1
 fi
@@ -195,9 +245,10 @@
   # Set default KERNEL_BINARY location if it was not provided.
   if [ "$ARCH" == "um" ]; then
     KERNEL_BINARY=./linux
-  else
-    # Assume x86_64 bzImage for now
+  elif [ "$ARCH" == "i386" -o "$ARCH" == "x86_64" -o "$ARCH" == "x86" ]; then
     KERNEL_BINARY=./arch/x86/boot/bzImage
+  elif [ "$ARCH" == "arm64" ]; then
+    KERNEL_BINARY=./arch/arm64/boot/Image.gz
   fi
 fi
 
@@ -212,18 +263,9 @@
     # The CC flag is *not* inherited from the environment, so it must be
     # passed in on the command line.
     make_flags="$make_flags CC=$CC"
-    # TODO: Remove this workaround for https://lkml.org/lkml/2018/5/7/534
-    # Needs a change to clang to be merged, an updated toolchain, and
-    # a new __nostackprotector annotation of the affected PARAVIRT
-    # code in the affected kernel branches (android-4.4, android-4.9,
-    # android-4.14). This sidesteps the issue by disabling PARAVIRT.
-    if [ "$CC" == "clang" ]; then
-      DISABLE_OPTIONS="$DISABLE_OPTIONS PARAVIRT"
-    fi
   fi
 
   # If there's no kernel config at all, create one or UML won't work.
-  [ -n "$DEFCONFIG" ] || DEFCONFIG=defconfig
   [ -f $CONFIG_FILE ] || (cd $KERNEL_DIR && $MAKE $make_flags $DEFCONFIG)
 
   # Enable the kernel config options listed in $OPTIONS.
@@ -249,9 +291,29 @@
 if (( nowrite == 1 )); then
   cmdline="ro"
 fi
-cmdline="$cmdline init=/sbin/net_test.sh"
+
+if (( verbose == 1 )); then
+  cmdline="$cmdline verbose=1"
+fi
+
+cmdline="$cmdline panic=1 init=/sbin/net_test.sh"
 cmdline="$cmdline net_test_args=\"$test_args\" net_test_mode=$testmode"
 
+# Experience shows that we need at least 128 bits of entropy for the
+# kernel's crng init to complete (before it fully initializes stuff behaves
+# *weirdly* and there's plenty of kernel warnings and some tests even fail),
+# hence net_test.sh needs at least 32 hex chars (which is the amount of hex
+# in a single random UUID) provided to it on the kernel cmdline.
+#
+# Just to be safe, we'll pass in 384 bits, and we'll do this as a random
+# 64 character base64 seed (because this is shorter than base16).
+# We do this by getting *three* random UUIDs and concatenating their hex
+# digits into an *even* length hex encoded string, which we then convert
+# into base64.
+entropy="$(cat /proc/sys/kernel/random{/,/,/}uuid | tr -d '\n-')"
+entropy="$(xxd -r -p <<< "${entropy}" | base64 -w 0)"
+cmdline="${cmdline} random.trust_cpu=on entropy=${entropy}"
+
 if [ "$ARCH" == "um" ]; then
   # Get the absolute path to the test file that's being run.
   cmdline="$cmdline net_test=/host$SCRIPT_DIR/$test"
@@ -263,11 +325,37 @@
   if ((nowrite == 0)); then
     blockdevice=ubda
   else
-    blockdevice="${blockdevice}r"
+    blockdevice=ubdar
   fi
 
-  exec $KERNEL_BINARY >&2 umid=net_test mem=512M \
-    $blockdevice=$SCRIPT_DIR/$ROOTFS $netconfig $consolemode $cmdline
+  exitcode=0
+  $KERNEL_BINARY >&2 umid=net_test mem=512M \
+    $blockdevice=$SCRIPT_DIR/$ROOTFS $netconfig $consolemode $cmdline \
+  || exitcode=$?
+
+  # UML is kind of crazy in how guest syscalls work.  It requires host kernel
+  # to not be in vsyscall=none mode.
+  if [[ "${exitcode}" != '0' ]]; then
+    {
+      # Hopefully one of these exists
+      cat /proc/config || :
+      zcat /proc/config.gz || :
+      cat "/boot/config-$(uname -r)" || :
+      zcat "/boot/config-$(uname -r).gz" || :
+    } 2>/dev/null \
+    | egrep -q '^CONFIG_LEGACY_VSYSCALL_NONE=y' \
+    && ! egrep -q '(^| )vsyscall=(native|emulate|xonly)( |$)' /proc/cmdline \
+    && {
+      echo -e "\r"
+      echo -e "-----=====-----\r"
+      echo -e "If above you saw a 'net_test.sh[1]: segfault at ...' followed by\r"
+      echo -e "'Kernel panic - not syncing: Attempted to kill init!' then please\r"
+      echo -e "set 'vsyscall=emulate' on *host* kernel command line.\r"
+      echo -e "On Linux 5.2+ you can instead use the slightly safer 'vsyscall=xonly'.\r"
+      echo -e "(for example via GRUB_CMDLINE_LINUX in /etc/default/grub)\r"
+      echo -e "-----=====-----\r"
+    }
+  fi
 else
   # We boot into the filesystem image directly in all cases
   cmdline="$cmdline root=/dev/vda"
@@ -275,6 +363,21 @@
   # The path is stripped by the 9p export; we don't need SCRIPT_DIR
   cmdline="$cmdline net_test=/host/$test"
 
+  # Map the --readonly flag to a QEMU block device flag
+  if ((nowrite > 0)); then
+    blockdevice=",readonly"
+  else
+    blockdevice=
+  fi
+  blockdevice="-drive file=$SCRIPT_DIR/$ROOTFS,format=raw,if=none,id=drive-virtio-disk0$blockdevice"
+  blockdevice="$blockdevice -device virtio-blk-pci,drive=drive-virtio-disk0"
+
+  # Pass through our current console/screen size to inner shell session
+  read rows cols < <(stty size 2>/dev/null)
+  [[ -z "${rows}" ]] || cmdline="${cmdline} console_rows=${rows}"
+  [[ -z "${cols}" ]] || cmdline="${cmdline} console_cols=${cols}"
+  unset rows cols
+
   # QEMU has no way to modify its exitcode; simulate it with a serial port.
   #
   # Choose to do it this way over writing a file to /host, because QEMU will
@@ -282,30 +385,50 @@
   # host filesystem (which is normally not written to) and it allows us to
   # communicate an exit code back in cases we do not have /host mounted.
   #
-  # The assignment of 'ttyS1' here is magical -- we know 'ttyS0' will be our
-  # serial port from the hard-coded '-serial stdio' flag below, and so this
-  # second serial port will be 'ttyS1'.
-  cmdline="$cmdline net_test_exitcode=/dev/ttyS1"
+  if [ "$ARCH" == "i386" -o "$ARCH" == "x86_64" -o "$ARCH" == "x86" ]; then
+    # Assume we have hardware-accelerated virtualization support for amd64
+    qemu="qemu-system-x86_64 -machine pc,accel=kvm -cpu host"
 
-  # Map the --readonly flag to a QEMU block device flag
-  blockdevice=
-  if ((nowrite > 0)); then
-    blockdevice=",readonly"
+    # We know 'ttyS0' will be our serial port on x86 from the hard-coded
+    # '-serial mon:stdio' flag below
+    cmdline="$cmdline console=ttyS0"
+
+    # The assignment of 'ttyS1' here is magical; we know ttyS0 was used up
+    # by '-serial mon:stdio', and so this second serial port will be 'ttyS1'
+    cmdline="$cmdline net_test_exitcode=/dev/ttyS1"
+  elif [ "$ARCH" == "arm64" ]; then
+    # This uses a software model CPU, based on cortex-a57
+    qemu="qemu-system-aarch64 -machine virt -cpu cortex-a57"
+
+    # We know 'ttyAMA0' will be our serial port on arm64 from the hard-coded
+    # '-serial mon:stdio' flag below
+    cmdline="$cmdline console=ttyAMA0"
+
+    # The kernel will print messages via a virtual ARM serial port (ttyAMA0),
+    # but for command line consistency with x86, we put the exitcode serial
+    # port on the PCI bus, and it will be the only one.
+    cmdline="$cmdline net_test_exitcode=/dev/ttyS0"
   fi
-  blockdevice="-drive file=$SCRIPT_DIR/$ROOTFS,format=raw,if=none,id=drive-virtio-disk0$blockdevice"
-  blockdevice="$blockdevice -device virtio-blk-pci,drive=drive-virtio-disk0"
 
-  # Assume x86_64 PC emulation for now
-  qemu-system-x86_64 >&2 -name net_test -m 512 \
+  $qemu >&2 -name net_test -m 512 \
     -kernel $KERNEL_BINARY \
-    -no-user-config -nodefaults -no-reboot -display none \
-    -machine pc,accel=kvm -cpu host -smp 4,sockets=4,cores=1,threads=1 \
+    -no-user-config -nodefaults -no-reboot \
+    -display none -nographic -serial mon:stdio -parallel none \
+    -smp 4,sockets=4,cores=1,threads=1 \
+    -device virtio-rng-pci \
     -chardev file,id=exitcode,path=exitcode \
-    -device isa-serial,chardev=exitcode \
+    -device pci-serial,chardev=exitcode \
     -fsdev local,security_model=mapped-xattr,id=fsdev0,fmode=0644,dmode=0755,path=$SCRIPT_DIR \
     -device virtio-9p-pci,id=fs0,fsdev=fsdev0,mount_tag=host \
-    $blockdevice $netconfig -serial stdio -append "$cmdline"
-  [ -s exitcode ] && exitcode=`cat exitcode | tr -d '\r'` || exitcode=1
+    $blockdevice $netconfig -append "$cmdline"
+  [[ -s exitcode ]] && exitcode=`cat exitcode | tr -d '\r'` || exitcode=1
   rm -f exitcode
-  exit $exitcode
 fi
+
+# UML reliably screws up the ptys, QEMU probably can as well...
+fixup_ptys
+stty sane || :
+tput smam || :
+
+echo "Returning exit code ${exitcode}." 1>&2
+exit "${exitcode}"
diff --git a/net/test/sock_diag_test.py b/net/test/sock_diag_test.py
index e25035b..daa2fa4 100755
--- a/net/test/sock_diag_test.py
+++ b/net/test/sock_diag_test.py
@@ -25,24 +25,42 @@
 import time
 import unittest
 
+import cstruct
 import multinetwork_base
 import net_test
 import packets
 import sock_diag
 import tcp_test
 
+# Mostly empty structure definition containing only the fields we currently use.
+TcpInfo = cstruct.Struct("TcpInfo", "64xI", "tcpi_rcv_ssthresh")
 
 NUM_SOCKETS = 30
 NO_BYTECODE = ""
-HAVE_SO_COOKIE_SUPPORT = net_test.LINUX_VERSION >= (4, 9, 0)
+LINUX_4_9_OR_ABOVE = net_test.LINUX_VERSION >= (4, 9, 0)
+LINUX_4_19_OR_ABOVE = net_test.LINUX_VERSION >= (4, 19, 0)
 
 IPPROTO_SCTP = 132
 
 def HaveUdpDiag():
-  # There is no way to tell whether a dump succeeded: if the appropriate handler
-  # wasn't found, __inet_diag_dump just returns an empty result instead of an
-  # error. So, just check to see if a UDP dump returns no sockets when we know
-  # it should return one.
+  """Checks if the current kernel has config CONFIG_INET_UDP_DIAG enabled.
+
+  This config is required for device running 4.9 kernel that ship with P, In
+  this case always assume the config is there and use the tests to check if the
+  config is enabled as required.
+
+  For all ther other kernel version, there is no way to tell whether a dump
+  succeeded: if the appropriate handler wasn't found, __inet_diag_dump just
+  returns an empty result instead of an error. So, just check to see if a UDP
+  dump returns no sockets when we know it should return one. If not, some tests
+  will be skipped.
+
+  Returns:
+    True if the kernel is 4.9 or above, or the CONFIG_INET_UDP_DIAG is enabled.
+    False otherwise.
+  """
+  if LINUX_4_9_OR_ABOVE:
+      return True;
   s = socket(AF_INET6, SOCK_DGRAM, 0)
   s.bind(("::", 0))
   s.connect((s.getsockname()))
@@ -192,10 +210,10 @@
       self.sock_diag.GetSockInfo(diag_req)
       # No errors? Good.
 
-  def testFindsAllMySockets(self):
+  def CheckFindsAllMySockets(self, socktype, proto):
     """Tests that basic socket dumping works."""
-    self.socketpairs = self._CreateLotsOfSockets(SOCK_STREAM)
-    sockets = self.sock_diag.DumpAllInetSockets(IPPROTO_TCP, NO_BYTECODE)
+    self.socketpairs = self._CreateLotsOfSockets(socktype)
+    sockets = self.sock_diag.DumpAllInetSockets(proto, NO_BYTECODE)
     self.assertGreaterEqual(len(sockets), NUM_SOCKETS)
 
     # Find the cookies for all of our sockets.
@@ -225,9 +243,21 @@
         # Check that we can find a diag_msg once we know the cookie.
         req = self.sock_diag.DiagReqFromSocket(sock)
         req.id.cookie = cookie
+        if proto == IPPROTO_UDP:
+          # Kernel bug: for UDP sockets, the order of arguments must be swapped.
+          # See testDemonstrateUdpGetSockIdBug.
+          req.id.sport, req.id.dport = req.id.dport, req.id.sport
+          req.id.src, req.id.dst = req.id.dst, req.id.src
         info = self.sock_diag.GetSockInfo(req)
         self.assertSockInfoMatchesSocket(sock, info)
 
+  def testFindsAllMySocketsTcp(self):
+    self.CheckFindsAllMySockets(SOCK_STREAM, IPPROTO_TCP)
+
+  @unittest.skipUnless(HAVE_UDP_DIAG, "INET_UDP_DIAG not enabled")
+  def testFindsAllMySocketsUdp(self):
+    self.CheckFindsAllMySockets(SOCK_DGRAM, IPPROTO_UDP)
+
   def testBytecodeCompilation(self):
     # pylint: disable=bad-whitespace
     instructions = [
@@ -360,11 +390,45 @@
       cookie = sock.getsockopt(net_test.SOL_SOCKET, net_test.SO_COOKIE, 8)
       self.assertEqual(diag_msg.id.cookie, cookie)
 
-  @unittest.skipUnless(HAVE_SO_COOKIE_SUPPORT, "SO_COOKIE not supported")
+  @unittest.skipUnless(LINUX_4_9_OR_ABOVE, "SO_COOKIE not supported")
   def testGetsockoptcookie(self):
     self.CheckSocketCookie(AF_INET, "127.0.0.1")
     self.CheckSocketCookie(AF_INET6, "::1")
 
+  @unittest.skipUnless(HAVE_UDP_DIAG, "INET_UDP_DIAG not enabled")
+  def testDemonstrateUdpGetSockIdBug(self):
+    # TODO: this is because udp_dump_one mistakenly uses __udp[46]_lib_lookup
+    # by passing the source address as the source address argument.
+    # Unfortunately those functions are intended to match local sockets based
+    # on received packets, and the argument that ends up being compared with
+    # e.g., sk_daddr is actually saddr, not daddr. udp_diag_destroy does not
+    # have this bug.  Upstream has confirmed that this will not be fixed:
+    # https://www.mail-archive.com/netdev@vger.kernel.org/msg248638.html
+    """Documents a bug: getting UDP sockets requires swapping src and dst."""
+    for version in [4, 5, 6]:
+      family = net_test.GetAddressFamily(version)
+      s = socket(family, SOCK_DGRAM, 0)
+      self.SelectInterface(s, self.RandomNetid(), "mark")
+      s.connect((self.GetRemoteSocketAddress(version), 53))
+
+      # Create a fully-specified diag req from our socket, including cookie if
+      # we can get it.
+      req = self.sock_diag.DiagReqFromSocket(s)
+      if LINUX_4_9_OR_ABOVE:
+        req.id.cookie = s.getsockopt(net_test.SOL_SOCKET, net_test.SO_COOKIE, 8)
+      else:
+        req.id.cookie = "\xff" * 16  # INET_DIAG_NOCOOKIE[2]
+
+      # As is, this request does not find anything.
+      with self.assertRaisesErrno(ENOENT):
+        self.sock_diag.GetSockInfo(req)
+
+      # But if we swap src and dst, the kernel finds our socket.
+      req.id.sport, req.id.dport = req.id.dport, req.id.sport
+      req.id.src, req.id.dst = req.id.dst, req.id.src
+
+      self.assertSockInfoMatchesSocket(s, self.sock_diag.GetSockInfo(req))
+
 
 class SockDestroyTest(SockDiagBaseTest):
   """Tests that SOCK_DESTROY works correctly.
@@ -487,6 +551,50 @@
                        child.id.src)
 
 
+class TcpRcvWindowTest(tcp_test.TcpBaseTest, SockDiagBaseTest):
+
+  RWND_SIZE = 64000 if LINUX_4_19_OR_ABOVE else 42000
+  TCP_DEFAULT_INIT_RWND = "/proc/sys/net/ipv4/tcp_default_init_rwnd"
+
+  def setUp(self):
+    super(TcpRcvWindowTest, self).setUp()
+    if LINUX_4_19_OR_ABOVE:
+      self.assertRaisesErrno(ENOENT, open, self.TCP_DEFAULT_INIT_RWND, "w")
+      return
+
+    f = open(self.TCP_DEFAULT_INIT_RWND, "w")
+    f.write("60")
+
+  def checkInitRwndSize(self, version, netid):
+    self.IncomingConnection(version, tcp_test.TCP_ESTABLISHED, netid)
+    tcpInfo = TcpInfo(self.accepted.getsockopt(net_test.SOL_TCP,
+                                               net_test.TCP_INFO, len(TcpInfo)))
+    self.assertLess(self.RWND_SIZE, tcpInfo.tcpi_rcv_ssthresh,
+                    "Tcp rwnd of netid=%d, version=%d is not enough. "
+                    "Expect: %d, actual: %d" % (netid, version, self.RWND_SIZE,
+                                                tcpInfo.tcpi_rcv_ssthresh))
+
+  def checkSynPacketWindowSize(self, version, netid):
+    s = self.BuildSocket(version, net_test.TCPSocket, netid, "mark")
+    myaddr = self.MyAddress(version, netid)
+    dstaddr = self.GetRemoteAddress(version)
+    dstsockaddr = self.GetRemoteSocketAddress(version)
+    desc, expected = packets.SYN(53, version, myaddr, dstaddr,
+                                 sport=None, seq=None)
+    self.assertRaisesErrno(EINPROGRESS, s.connect, (dstsockaddr, 53))
+    msg = "IPv%s TCP connect: expected %s on %s" % (
+        version, desc, self.GetInterfaceName(netid))
+    syn = self.ExpectPacketOn(netid, msg, expected)
+    self.assertLess(self.RWND_SIZE, syn.window)
+    s.close()
+
+  def testTcpCwndSize(self):
+    for version in [4, 5, 6]:
+      for netid in self.NETIDS:
+        self.checkInitRwndSize(version, netid)
+        self.checkSynPacketWindowSize(version, netid)
+
+
 class SockDestroyTcpTest(tcp_test.TcpBaseTest, SockDiagBaseTest):
 
   def setUp(self):
@@ -877,7 +985,7 @@
       family = {4: AF_INET, 5: AF_INET6, 6: AF_INET6}[version]
       s = net_test.UDPSocket(family)
       self.SelectInterface(s, random.choice(self.NETIDS), "mark")
-      addr = self.GetRemoteAddress(version)
+      addr = self.GetRemoteSocketAddress(version)
 
       # Check that reads on connected sockets are interrupted.
       s.connect((addr, 53))
diff --git a/net/test/srcaddr_selection_test.py b/net/test/srcaddr_selection_test.py
index adbcc54..e57ce16 100755
--- a/net/test/srcaddr_selection_test.py
+++ b/net/test/srcaddr_selection_test.py
@@ -72,6 +72,9 @@
   def SetUseOptimistic(self, ifname, value):
     self.SetSysctl("/proc/sys/net/ipv6/conf/%s/use_optimistic" % ifname, value)
 
+  def SetForwarding(self, value):
+    self.SetSysctl("/proc/sys/net/ipv6/conf/all/forwarding", value)
+
   def GetSourceIP(self, netid, mode="mark"):
     s = self.BuildSocket(6, net_test.UDPSocket, netid, mode)
     # Because why not...testing for temporary addresses is a separate thing.
@@ -163,6 +166,12 @@
     # link-local address is generated.
     self.WaitForDad(self.test_lladdr)
 
+    # Disable forwarding, because optimistic addresses don't work when
+    # forwarding is on. Forwarding will be re-enabled when the sysctls are
+    # restored by MultiNetworkBaseTest.tearDownClass.
+    # TODO: Fix this and remove this hack.
+    self.SetForwarding("0")
+
 
 class TentativeAddressTest(MultiInterfaceSourceAddressSelectionTest):
 
@@ -287,6 +296,7 @@
                    scapy.ICMPv6ND_NA(tgt=self.test_ip, R=0, S=0, O=1) /
                    scapy.ICMPv6NDOptDstLLAddr(lladdr=conflict_macaddr))
     self.ReceiveEtherPacketOn(self.test_netid, dad_defense)
+    self.WaitForDad(self.test_lladdr)
 
     # The address should have failed DAD, and therefore no longer be usable.
     self.assertAddressNotUsable(self.test_ip, self.test_netid)
diff --git a/net/test/tcp_nuke_addr_test.py b/net/test/tcp_nuke_addr_test.py
index deb4012..1f0de76 100755
--- a/net/test/tcp_nuke_addr_test.py
+++ b/net/test/tcp_nuke_addr_test.py
@@ -105,7 +105,7 @@
 
   def testIpv6Unsupported(self):
     self.CheckNukeAddrUnsupported(CreateIPv6SocketPair(), IPV6_LOOPBACK_ADDR)
-    self.CheckNukeAddrUnsupported(CreateIPv4SocketPair(), "::")
+    self.CheckNukeAddrUnsupported(CreateIPv6SocketPair(), "::")
 
 
 if __name__ == "__main__":
diff --git a/net/test/tcp_repair_test.py b/net/test/tcp_repair_test.py
new file mode 100755
index 0000000..ce54aba
--- /dev/null
+++ b/net/test/tcp_repair_test.py
@@ -0,0 +1,341 @@
+#!/usr/bin/python
+#
+# Copyright 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import unittest
+
+from errno import *  # pylint: disable=wildcard-import
+from socket import *  # pylint: disable=wildcard-import
+import ctypes
+import fcntl
+import os
+import random
+import select
+import termios
+import threading
+import time
+from scapy import all as scapy
+
+import multinetwork_base
+import net_test
+import packets
+
+SOL_TCP = net_test.SOL_TCP
+SHUT_RD = net_test.SHUT_RD
+SHUT_WR = net_test.SHUT_WR
+SHUT_RDWR = net_test.SHUT_RDWR
+SIOCINQ = termios.FIONREAD
+SIOCOUTQ = termios.TIOCOUTQ
+
+TEST_PORT = 5555
+
+# Following constants are SOL_TCP level options and arguments.
+# They are defined in linux-kernel: include/uapi/linux/tcp.h
+
+# SOL_TCP level options.
+TCP_REPAIR = 19
+TCP_REPAIR_QUEUE = 20
+TCP_QUEUE_SEQ = 21
+
+# TCP_REPAIR_{OFF, ON} is an argument to TCP_REPAIR.
+TCP_REPAIR_OFF = 0
+TCP_REPAIR_ON = 1
+
+# TCP_{NO, RECV, SEND}_QUEUE is an argument to TCP_REPAIR_QUEUE.
+TCP_NO_QUEUE = 0
+TCP_RECV_QUEUE = 1
+TCP_SEND_QUEUE = 2
+
+# This test is aiming to ensure tcp keep alive offload works correctly
+# when it fetches tcp information from kernel via tcp repair mode.
+class TcpRepairTest(multinetwork_base.MultiNetworkBaseTest):
+
+  def assertSocketNotConnected(self, sock):
+    self.assertRaisesErrno(ENOTCONN, sock.getpeername)
+
+  def assertSocketConnected(self, sock):
+    sock.getpeername()  # No errors? Socket is alive and connected.
+
+  def createConnectedSocket(self, version, netid):
+    s = net_test.TCPSocket(net_test.GetAddressFamily(version))
+    net_test.DisableFinWait(s)
+    self.SelectInterface(s, netid, "mark")
+
+    remotesockaddr = self.GetRemoteSocketAddress(version)
+    remoteaddr = self.GetRemoteAddress(version)
+    self.assertRaisesErrno(EINPROGRESS, s.connect, (remotesockaddr, TEST_PORT))
+    self.assertSocketNotConnected(s)
+
+    myaddr = self.MyAddress(version, netid)
+    port = s.getsockname()[1]
+    self.assertNotEqual(0, port)
+
+    desc, expect_syn = packets.SYN(TEST_PORT, version, myaddr, remoteaddr, port, seq=None)
+    msg = "socket connect: expected %s" % desc
+    syn = self.ExpectPacketOn(netid, msg, expect_syn)
+    synack_desc, synack = packets.SYNACK(version, remoteaddr, myaddr, syn)
+    synack.getlayer("TCP").seq = random.getrandbits(32)
+    synack.getlayer("TCP").window = 14400
+    self.ReceivePacketOn(netid, synack)
+    desc, ack = packets.ACK(version, myaddr, remoteaddr, synack)
+    msg = "socket connect: got SYN+ACK, expected %s" % desc
+    ack = self.ExpectPacketOn(netid, msg, ack)
+    self.last_sent = ack
+    self.last_received = synack
+    return s
+
+  def receiveFin(self, netid, version, sock):
+    self.assertSocketConnected(sock)
+    remoteaddr = self.GetRemoteAddress(version)
+    myaddr = self.MyAddress(version, netid)
+    desc, fin = packets.FIN(version, remoteaddr, myaddr, self.last_sent)
+    self.ReceivePacketOn(netid, fin)
+    self.last_received = fin
+
+  def sendData(self, netid, version, sock, payload):
+    sock.send(payload)
+
+    remoteaddr = self.GetRemoteAddress(version)
+    myaddr = self.MyAddress(version, netid)
+    desc, send = packets.ACK(version, myaddr, remoteaddr,
+                             self.last_received, payload)
+    self.last_sent = send
+
+  def receiveData(self, netid, version, payload):
+    remoteaddr = self.GetRemoteAddress(version)
+    myaddr = self.MyAddress(version, netid)
+
+    desc, received = packets.ACK(version, remoteaddr, myaddr,
+                                 self.last_sent, payload)
+    ack_desc, ack = packets.ACK(version, myaddr, remoteaddr, received)
+    self.ReceivePacketOn(netid, received)
+    time.sleep(0.1)
+    self.ExpectPacketOn(netid, "expecting %s" % ack_desc, ack)
+    self.last_sent = ack
+    self.last_received = received
+
+  # Test the behavior of NO_QUEUE. Expect incoming data will be stored into
+  # the queue, but socket cannot be read/written in NO_QUEUE.
+  def testTcpRepairInNoQueue(self):
+    for version in [4, 5, 6]:
+      self.tcpRepairInNoQueueTest(version)
+
+  def tcpRepairInNoQueueTest(self, version):
+    netid = self.RandomNetid()
+    sock = self.createConnectedSocket(version, netid)
+    sock.setsockopt(SOL_TCP, TCP_REPAIR, TCP_REPAIR_ON)
+
+    # In repair mode with NO_QUEUE, writes fail...
+    self.assertRaisesErrno(EINVAL, sock.send, "write test")
+
+    # remote data is coming.
+    TEST_RECEIVED = net_test.UDP_PAYLOAD
+    self.receiveData(netid, version, TEST_RECEIVED)
+
+    # In repair mode with NO_QUEUE, read fail...
+    self.assertRaisesErrno(EPERM, sock.recv, 4096)
+
+    sock.setsockopt(SOL_TCP, TCP_REPAIR, TCP_REPAIR_OFF)
+    readData = sock.recv(4096)
+    self.assertEquals(readData, TEST_RECEIVED)
+    sock.close()
+
+  # Test whether tcp read/write sequence number can be fetched correctly
+  # by TCP_QUEUE_SEQ.
+  def testGetSequenceNumber(self):
+    for version in [4, 5, 6]:
+      self.GetSequenceNumberTest(version)
+
+  def GetSequenceNumberTest(self, version):
+    netid = self.RandomNetid()
+    sock = self.createConnectedSocket(version, netid)
+    # test write queue sequence number
+    sequence_before = self.GetWriteSequenceNumber(version, sock)
+    expect_sequence = self.last_sent.getlayer("TCP").seq
+    self.assertEquals(sequence_before & 0xffffffff, expect_sequence)
+    TEST_SEND = net_test.UDP_PAYLOAD
+    self.sendData(netid, version, sock, TEST_SEND)
+    sequence_after = self.GetWriteSequenceNumber(version, sock)
+    self.assertEquals(sequence_before + len(TEST_SEND), sequence_after)
+
+    # test read queue sequence number
+    sequence_before = self.GetReadSequenceNumber(version, sock)
+    expect_sequence = self.last_received.getlayer("TCP").seq + 1
+    self.assertEquals(sequence_before & 0xffffffff, expect_sequence)
+    TEST_READ = net_test.UDP_PAYLOAD
+    self.receiveData(netid, version, TEST_READ)
+    sequence_after = self.GetReadSequenceNumber(version, sock)
+    self.assertEquals(sequence_before + len(TEST_READ), sequence_after)
+    sock.close()
+
+  def GetWriteSequenceNumber(self, version, sock):
+    sock.setsockopt(SOL_TCP, TCP_REPAIR, TCP_REPAIR_ON)
+    sock.setsockopt(SOL_TCP, TCP_REPAIR_QUEUE, TCP_SEND_QUEUE)
+    sequence = sock.getsockopt(SOL_TCP, TCP_QUEUE_SEQ)
+    sock.setsockopt(SOL_TCP, TCP_REPAIR_QUEUE, TCP_NO_QUEUE)
+    sock.setsockopt(SOL_TCP, TCP_REPAIR, TCP_REPAIR_OFF)
+    return sequence
+
+  def GetReadSequenceNumber(self, version, sock):
+    sock.setsockopt(SOL_TCP, TCP_REPAIR, TCP_REPAIR_ON)
+    sock.setsockopt(SOL_TCP, TCP_REPAIR_QUEUE, TCP_RECV_QUEUE)
+    sequence = sock.getsockopt(SOL_TCP, TCP_QUEUE_SEQ)
+    sock.setsockopt(SOL_TCP, TCP_REPAIR_QUEUE, TCP_NO_QUEUE)
+    sock.setsockopt(SOL_TCP, TCP_REPAIR, TCP_REPAIR_OFF)
+    return sequence
+
+  # Test whether tcp repair socket can be poll()'ed correctly
+  # in mutiple threads at the same time.
+  def testMultiThreadedPoll(self):
+    for version in [4, 5, 6]:
+      self.PollWhenShutdownTest(version)
+      self.PollWhenReceiveFinTest(version)
+
+  def PollRepairSocketInMultipleThreads(self, netid, version, expected):
+    sock = self.createConnectedSocket(version, netid)
+    sock.setsockopt(SOL_TCP, TCP_REPAIR, TCP_REPAIR_ON)
+
+    multiThreads = []
+    for i in [0, 1]:
+      thread = SocketExceptionThread(sock, lambda sk: self.fdSelect(sock, expected))
+      thread.start()
+      self.assertTrue(thread.is_alive())
+      multiThreads.append(thread)
+
+    return sock, multiThreads
+
+  def assertThreadsStopped(self, multiThreads, msg) :
+    for thread in multiThreads:
+      if (thread.is_alive()):
+        thread.join(1)
+      if (thread.is_alive()):
+        thread.stop()
+        raise AssertionError(msg)
+
+  def PollWhenShutdownTest(self, version):
+    netid = self.RandomNetid()
+    expected = select.POLLIN
+    sock, multiThreads = self.PollRepairSocketInMultipleThreads(netid, version, expected)
+    # Test shutdown RD.
+    sock.shutdown(SHUT_RD)
+    self.assertThreadsStopped(multiThreads, "poll fail during SHUT_RD")
+    sock.close()
+
+    expected = None
+    sock, multiThreads = self.PollRepairSocketInMultipleThreads(netid, version, expected)
+    # Test shutdown WR.
+    sock.shutdown(SHUT_WR)
+    self.assertThreadsStopped(multiThreads, "poll fail during SHUT_WR")
+    sock.close()
+
+    expected = select.POLLIN | select.POLLHUP
+    sock, multiThreads = self.PollRepairSocketInMultipleThreads(netid, version, expected)
+    # Test shutdown RDWR.
+    sock.shutdown(SHUT_RDWR)
+    self.assertThreadsStopped(multiThreads, "poll fail during SHUT_RDWR")
+    sock.close()
+
+  def PollWhenReceiveFinTest(self, version):
+    netid = self.RandomNetid()
+    expected = select.POLLIN
+    sock, multiThreads = self.PollRepairSocketInMultipleThreads(netid, version, expected)
+    self.receiveFin(netid, version, sock)
+    self.assertThreadsStopped(multiThreads, "poll fail during FIN")
+    sock.close()
+
+  # Test whether socket idle can be detected by SIOCINQ and SIOCOUTQ.
+  def testSocketIdle(self):
+    for version in [4, 5, 6]:
+      self.readQueueIdleTest(version)
+      self.writeQueueIdleTest(version)
+
+  def readQueueIdleTest(self, version):
+    netid = self.RandomNetid()
+    sock = self.createConnectedSocket(version, netid)
+
+    buf = ctypes.c_int()
+    fcntl.ioctl(sock, SIOCINQ, buf)
+    self.assertEquals(buf.value, 0)
+
+    TEST_RECV_PAYLOAD = net_test.UDP_PAYLOAD
+    self.receiveData(netid, version, TEST_RECV_PAYLOAD)
+    fcntl.ioctl(sock, SIOCINQ, buf)
+    self.assertEquals(buf.value, len(TEST_RECV_PAYLOAD))
+    sock.close()
+
+  def writeQueueIdleTest(self, version):
+    netid = self.RandomNetid()
+    # Setup a connected socket, write queue is empty.
+    sock = self.createConnectedSocket(version, netid)
+    buf = ctypes.c_int()
+    fcntl.ioctl(sock, SIOCOUTQ, buf)
+    self.assertEquals(buf.value, 0)
+    # Change to repair mode with SEND_QUEUE, writing some data to the queue.
+    sock.setsockopt(SOL_TCP, TCP_REPAIR, TCP_REPAIR_ON)
+    TEST_SEND_PAYLOAD = net_test.UDP_PAYLOAD
+    sock.setsockopt(SOL_TCP, TCP_REPAIR_QUEUE, TCP_SEND_QUEUE)
+    self.sendData(netid, version, sock, TEST_SEND_PAYLOAD)
+    fcntl.ioctl(sock, SIOCOUTQ, buf)
+    self.assertEquals(buf.value, len(TEST_SEND_PAYLOAD))
+    sock.close()
+
+    # Setup a connected socket again.
+    netid = self.RandomNetid()
+    sock = self.createConnectedSocket(version, netid)
+    # Send out some data and don't receive ACK yet.
+    self.sendData(netid, version, sock, TEST_SEND_PAYLOAD)
+    fcntl.ioctl(sock, SIOCOUTQ, buf)
+    self.assertEquals(buf.value, len(TEST_SEND_PAYLOAD))
+    # Receive response ACK.
+    remoteaddr = self.GetRemoteAddress(version)
+    myaddr = self.MyAddress(version, netid)
+    desc_ack, ack = packets.ACK(version, remoteaddr, myaddr, self.last_sent)
+    self.ReceivePacketOn(netid, ack)
+    fcntl.ioctl(sock, SIOCOUTQ, buf)
+    self.assertEquals(buf.value, 0)
+    sock.close()
+
+
+  def fdSelect(self, sock, expected):
+    READ_ONLY = select.POLLIN | select.POLLPRI | select.POLLHUP | select.POLLERR | select.POLLNVAL
+    p = select.poll()
+    p.register(sock, READ_ONLY)
+    events = p.poll(500)
+    for fd,event in events:
+      if fd == sock.fileno():
+        self.assertEquals(event, expected)
+      else:
+        raise AssertionError("unexpected poll fd")
+
+class SocketExceptionThread(threading.Thread):
+
+  def __init__(self, sock, operation):
+    self.exception = None
+    super(SocketExceptionThread, self).__init__()
+    self.daemon = True
+    self.sock = sock
+    self.operation = operation
+
+  def stop(self):
+    self._Thread__stop()
+
+  def run(self):
+    try:
+      self.operation(self.sock)
+    except (IOError, AssertionError), e:
+      self.exception = e
+
+if __name__ == '__main__':
+  unittest.main()
diff --git a/net/test/vts_kernel_net_tests.xml b/net/test/vts_kernel_net_tests.xml
new file mode 100644
index 0000000..34540c6
--- /dev/null
+++ b/net/test/vts_kernel_net_tests.xml
@@ -0,0 +1,34 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (C) 2020 The Android Open Source Project
+
+     Licensed under the Apache License, Version 2.0 (the "License");
+     you may not use this file except in compliance with the License.
+     You may obtain a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+     Unless required by applicable law or agreed to in writing, software
+     distributed under the License is distributed on an "AS IS" BASIS,
+     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     See the License for the specific language governing permissions and
+     limitations under the License.
+-->
+<configuration description="Runs vts_kernel_net_tests.">
+    <target_preparer class="com.android.tradefed.targetprep.RootTargetPreparer">
+    </target_preparer>
+
+    <target_preparer class="com.android.tradefed.targetprep.PushFilePreparer" >
+        <option name="push-file" key="kernel_net_tests_bin" value="/data/local/tmp/vts_kernel_net_tests/kernel_net_tests_bin" />
+        <option name="abort-on-push-failure" value="true" />
+        <option name="cleanup" value="true" />
+    </target_preparer>
+
+    <target_preparer class="com.android.tradefed.targetprep.DeviceSetup">
+        <option name="airplane-mode" value="ON" />
+    </target_preparer>
+
+    <test class="com.android.tradefed.testtype.binary.ExecutableTargetTest" >
+        <option name="per-binary-timeout" value="10m" />
+        <option name="test-command-line" key="vts_kernel_net_tests" value="/data/local/tmp/vts_kernel_net_tests/kernel_net_tests_bin" />
+    </test>
+</configuration>
diff --git a/net/test/xfrm.py b/net/test/xfrm.py
index 56b4774..acdfd4f 100755
--- a/net/test/xfrm.py
+++ b/net/test/xfrm.py
@@ -208,7 +208,7 @@
 NO_LIFETIME_CUR = "\x00" * len(XfrmLifetimeCur)
 
 # IPsec constants.
-IPSEC_PROTO_ANY	= 255
+IPSEC_PROTO_ANY = 255
 
 # ESP header, not technically XFRM but we need a place for a protocol
 # header and this is the only one we have.
@@ -219,6 +219,11 @@
 _DEFAULT_REPLAY_WINDOW = 4
 ALL_ALGORITHMS = 0xffffffff
 
+# Policy-SA match method (for VTI/XFRM-I).
+MATCH_METHOD_ALL = "all"
+MATCH_METHOD_MARK = "mark"
+MATCH_METHOD_IFID = "ifid"
+
 
 def RawAddress(addr):
   """Converts an IP address string to binary format."""
@@ -630,7 +635,7 @@
     self._SendNlRequest(XFRM_MSG_FLUSHSA, usersa_flush.Pack(), flags)
 
   def CreateTunnel(self, direction, selector, src, dst, spi, encryption,
-                   auth_trunc, mark, output_mark, xfrm_if_id):
+                   auth_trunc, mark, output_mark, xfrm_if_id, match_method):
     """Create an XFRM Tunnel Consisting of a Policy and an SA.
 
     Create a unidirectional XFRM tunnel, which entails one Policy and one
@@ -652,9 +657,28 @@
       output_mark: The mark used to select the underlying network for packets
         outbound from xfrm. None means unspecified.
       xfrm_if_id: The ID of the XFRM interface to use or None.
+      match_method: One of MATCH_METHOD_[MARK | ALL | IFID]. This determines how
+        SAs and policies are matched.
     """
     outer_family = net_test.GetAddressFamily(net_test.GetAddressVersion(dst))
 
+    # SA mark is currently unused due to UPDSA not updating marks.
+    # Kept as documentation of ideal/desired behavior.
+    if match_method == MATCH_METHOD_MARK:
+      # sa_mark = mark
+      tmpl_spi = 0
+      if_id = None
+    elif match_method == MATCH_METHOD_ALL:
+      # sa_mark = mark
+      tmpl_spi = spi
+      if_id = xfrm_if_id
+    elif match_method == MATCH_METHOD_IFID:
+      # sa_mark = None
+      tmpl_spi = 0
+      if_id = xfrm_if_id
+    else:
+      raise ValueError("Unknown match_method supplied: %s" % match_method)
+
     # Device code does not use mark; during AllocSpi, the mark is unset, and
     # UPDSA does not update marks at this time. Actual use case will have no
     # mark set. Test this use case.
@@ -668,7 +692,7 @@
 
     for selector in selectors:
       policy = UserPolicy(direction, selector)
-      tmpl = UserTemplate(outer_family, spi, 0, (src, dst))
+      tmpl = UserTemplate(outer_family, tmpl_spi, 0, (src, dst))
       self.AddPolicyInfo(policy, tmpl, mark, xfrm_if_id=xfrm_if_id)
 
   def DeleteTunnel(self, direction, selector, dst, spi, mark, xfrm_if_id):
diff --git a/net/test/xfrm_test.py b/net/test/xfrm_test.py
index 52a1596..64be084 100755
--- a/net/test/xfrm_test.py
+++ b/net/test/xfrm_test.py
@@ -38,6 +38,9 @@
 TEST_ADDR1 = "2001:4860:4860::8888"
 TEST_ADDR2 = "2001:4860:4860::8844"
 
+XFRM_STATS_PROCFILE = "/proc/net/xfrm_stat"
+XFRM_STATS_OUT_NO_STATES = "XfrmOutNoStates"
+
 # IP addresses to use for tunnel endpoints. For generality, these should be
 # different from the addresses we send packets to.
 TUNNEL_ENDPOINTS = {4: "8.8.4.4", 6: TEST_ADDR2}
@@ -132,6 +135,14 @@
         EAGAIN,
         s.sendto, net_test.UDP_PAYLOAD, (remotesockaddr, 53))
 
+    # If there is a user space key manager, calling sendto() after applying the socket policy
+    # creates an SA whose state is XFRM_STATE_ACQ. So this just deletes it.
+    # If there is no user space key manager, deleting SA returns ESRCH as the error code.
+    try:
+        self.xfrm.DeleteSaInfo(self.GetRemoteAddress(xfrm_version), TEST_SPI, IPPROTO_ESP)
+    except IOError as e:
+        self.assertEquals(ESRCH, e.errno, "Unexpected error when deleting ACQ SA")
+
     # Adding a matching SA causes the packet to go out encrypted. The SA's
     # SPI must match the one in our template, and the destination address must
     # match the packet's destination address (in tunnel mode, it has to match
@@ -139,6 +150,7 @@
     self.CreateNewSa(
         net_test.GetWildcardAddress(xfrm_version),
         self.GetRemoteAddress(xfrm_version), TEST_SPI, reqid, None)
+
     s.sendto(net_test.UDP_PAYLOAD, (remotesockaddr, 53))
     expected_length = xfrm_base.GetEspPacketLength(xfrm.XFRM_MODE_TRANSPORT,
                                                 version, False,
@@ -769,7 +781,7 @@
             xfrm_base._ALGO_HMAC_SHA1, None, None, None, 0)
 
   def testUpdateSaAddMark(self):
-    """Test that when an SA has no mark, it can be updated to add a mark."""
+    """Test that an embryonic SA can be updated to add a mark."""
     for version in [4, 6]:
       spi = 0xABCD
       # Test that an SA created with ALLOCSPI can be updated with the mark.
@@ -789,7 +801,104 @@
       self.xfrm.DeleteSaInfo(net_test.GetWildcardAddress(version),
                              spi, IPPROTO_ESP, mark)
 
-      # TODO: we might also need to update the mark for a VALID SA.
+  def getXfrmStat(self, statName):
+    stateVal = 0
+    with open(XFRM_STATS_PROCFILE, 'r') as f:
+      for line in f:
+          if statName in line:
+            stateVal = int(line.split()[1])
+            break
+      f.close()
+    return stateVal
+
+  def testUpdateActiveSaMarks(self):
+    """Test that the OUTPUT_MARK can be updated on an ACTIVE SA."""
+    for version in [4, 6]:
+      family = net_test.GetAddressFamily(version)
+      netid = self.RandomNetid()
+      remote = self.GetRemoteAddress(version)
+      local = self.MyAddress(version, netid)
+      s = socket(family, SOCK_DGRAM, 0)
+      self.SelectInterface(s, netid, "mark")
+      # Create a mark that we will apply to the policy and later the SA
+      mark = xfrm.ExactMatchMark(netid)
+
+      # Create a global policy that selects using the mark.
+      sel = xfrm.EmptySelector(family)
+      policy = xfrm.UserPolicy(xfrm.XFRM_POLICY_OUT, sel)
+      tmpl = xfrm.UserTemplate(family, 0, 0, (local, remote))
+      self.xfrm.AddPolicyInfo(policy, tmpl, mark)
+
+      # Pull /proc/net/xfrm_stats for baseline
+      outNoStateCount = self.getXfrmStat(XFRM_STATS_OUT_NO_STATES);
+
+      # should increment XfrmOutNoStates
+      s.sendto(net_test.UDP_PAYLOAD, (remote, 53))
+
+      # Check to make sure XfrmOutNoStates is incremented by exactly 1
+      self.assertEquals(outNoStateCount + 1,
+                        self.getXfrmStat(XFRM_STATS_OUT_NO_STATES))
+
+      length = xfrm_base.GetEspPacketLength(xfrm.XFRM_MODE_TUNNEL,
+                                            version, False,
+                                            net_test.UDP_PAYLOAD,
+                                            xfrm_base._ALGO_HMAC_SHA1,
+                                            xfrm_base._ALGO_CBC_AES_256)
+
+      # Add a default SA with no mark that routes to nowhere.
+      try:
+          self.xfrm.AddSaInfo(local,
+                              remote,
+                              TEST_SPI, xfrm.XFRM_MODE_TUNNEL, 0,
+                              xfrm_base._ALGO_CBC_AES_256,
+                              xfrm_base._ALGO_HMAC_SHA1,
+                              None, None, mark, 0, is_update=False)
+      except IOError as e:
+          self.assertEquals(EEXIST, e.errno, "SA exists")
+          self.xfrm.AddSaInfo(local,
+                              remote,
+                              TEST_SPI, xfrm.XFRM_MODE_TUNNEL, 0,
+                              xfrm_base._ALGO_CBC_AES_256,
+                              xfrm_base._ALGO_HMAC_SHA1,
+                              None, None, mark, 0, is_update=True)
+
+      self.assertRaisesErrno(
+          ENETUNREACH,
+          s.sendto, net_test.UDP_PAYLOAD, (remote, 53))
+
+      # Update the SA to route to a valid netid.
+      self.xfrm.AddSaInfo(local,
+                          remote,
+                          TEST_SPI, xfrm.XFRM_MODE_TUNNEL, 0,
+                          xfrm_base._ALGO_CBC_AES_256,
+                          xfrm_base._ALGO_HMAC_SHA1,
+                          None, None, mark, netid, is_update=True)
+
+      # Now the payload routes to the updated netid.
+      s.sendto(net_test.UDP_PAYLOAD, (remote, 53))
+      self._ExpectEspPacketOn(netid, TEST_SPI, 1, length, None, None)
+
+      # Get a new netid and reroute the packets to the new netid.
+      reroute_netid = self.RandomNetid(netid)
+      # Update the SA to change the output mark.
+      self.xfrm.AddSaInfo(local,
+                         remote,
+                         TEST_SPI, xfrm.XFRM_MODE_TUNNEL, 0,
+                         xfrm_base._ALGO_CBC_AES_256,
+                         xfrm_base._ALGO_HMAC_SHA1,
+                         None, None, mark, reroute_netid, is_update=True)
+
+      s.sendto(net_test.UDP_PAYLOAD, (remote, 53))
+      self._ExpectEspPacketOn(reroute_netid, TEST_SPI, 2, length, None, None)
+
+      dump = self.xfrm.DumpSaInfo()
+
+      self.assertEquals(1, len(dump)) # check that update updated
+      sainfo, attributes = dump[0]
+      self.assertEquals(reroute_netid, attributes["XFRMA_OUTPUT_MARK"])
+
+      self.xfrm.DeleteSaInfo(remote, TEST_SPI, IPPROTO_ESP, mark)
+      self.xfrm.DeletePolicyInfo(sel, xfrm.XFRM_POLICY_OUT, mark)
 
 if __name__ == "__main__":
   unittest.main()
diff --git a/net/test/xfrm_tunnel_test.py b/net/test/xfrm_tunnel_test.py
index 652a0c2..eb1a46e 100755
--- a/net/test/xfrm_tunnel_test.py
+++ b/net/test/xfrm_tunnel_test.py
@@ -87,7 +87,10 @@
 
 def _GetNullAuthCryptTunnelModePkt(inner_version, src_inner, src_outer,
                                    src_port, dst_inner, dst_outer,
-                                   dst_port, spi, seq_num, ip_hdr_options={}):
+                                   dst_port, spi, seq_num, ip_hdr_options=None):
+  if ip_hdr_options is None:
+    ip_hdr_options = {}
+
   ip_hdr_options.update({'src': src_inner, 'dst': dst_inner})
 
   # Build and receive an ESP packet destined for the inner socket
@@ -109,7 +112,7 @@
   # The second parameter of the tuple is the port number regardless of AF.
   local_port = read_sock.getsockname()[1]
   # Guard against the eventuality of the receive failing.
-  net_test.SetNonBlocking(read_sock.fileno())
+  csocket.SetSocketTimeout(read_sock, 500)
 
   return read_sock, local_port
 
@@ -169,7 +172,8 @@
     self.assertEquals(net_test.UDP_PAYLOAD, data)
     self.assertEquals((remote_inner, _TEST_REMOTE_PORT), src[:2])
 
-  def _TestTunnel(self, inner_version, outer_version, func, direction):
+  def _TestTunnel(self, inner_version, outer_version, func, direction,
+                  test_output_mark_unset):
     """Test a unidirectional XFRM Tunnel with explicit selectors"""
     # Select the underlying netid, which represents the external
     # interface from/to which to route ESP packets.
@@ -183,35 +187,46 @@
     local_outer = self.MyAddress(outer_version, u_netid)
     remote_outer = _GetRemoteOuterAddress(outer_version)
 
-    # Create input/ouput SPs, SAs and sockets to simulate a more realistic
-    # environment.
-    self.xfrm.CreateTunnel(xfrm.XFRM_POLICY_IN,
-                           xfrm.SrcDstSelector(remote_inner, local_inner),
-                           remote_outer, local_outer, _TEST_IN_SPI,
-                           xfrm_base._ALGO_CRYPT_NULL,
-                           xfrm_base._ALGO_AUTH_NULL, None, None, None)
+    output_mark = u_netid
+    if test_output_mark_unset:
+      output_mark = None
+      self.SetDefaultNetwork(u_netid)
 
-    self.xfrm.CreateTunnel(xfrm.XFRM_POLICY_OUT,
-                           xfrm.SrcDstSelector(local_inner, remote_inner),
-                           local_outer, remote_outer, _TEST_OUT_SPI,
-                           xfrm_base._ALGO_CBC_AES_256,
-                           xfrm_base._ALGO_HMAC_SHA1, None, u_netid, None)
+    try:
+      # Create input/ouput SPs, SAs and sockets to simulate a more realistic
+      # environment.
+      self.xfrm.CreateTunnel(
+          xfrm.XFRM_POLICY_IN, xfrm.SrcDstSelector(remote_inner, local_inner),
+          remote_outer, local_outer, _TEST_IN_SPI, xfrm_base._ALGO_CRYPT_NULL,
+          xfrm_base._ALGO_AUTH_NULL, None, None, None, xfrm.MATCH_METHOD_ALL)
 
-    write_sock = socket(net_test.GetAddressFamily(inner_version), SOCK_DGRAM, 0)
-    self.SelectInterface(write_sock, netid, "mark")
-    read_sock, _ = _CreateReceiveSock(inner_version)
+      self.xfrm.CreateTunnel(
+          xfrm.XFRM_POLICY_OUT, xfrm.SrcDstSelector(local_inner, remote_inner),
+          local_outer, remote_outer, _TEST_OUT_SPI, xfrm_base._ALGO_CBC_AES_256,
+          xfrm_base._ALGO_HMAC_SHA1, None, output_mark, None, xfrm.MATCH_METHOD_ALL)
 
-    sock = write_sock if direction == xfrm.XFRM_POLICY_OUT else read_sock
-    func(inner_version, outer_version, u_netid, netid, local_inner,
-         remote_inner, local_outer, remote_outer, sock)
+      write_sock = socket(net_test.GetAddressFamily(inner_version), SOCK_DGRAM, 0)
+      self.SelectInterface(write_sock, netid, "mark")
+      read_sock, _ = _CreateReceiveSock(inner_version)
+
+      sock = write_sock if direction == xfrm.XFRM_POLICY_OUT else read_sock
+      func(inner_version, outer_version, u_netid, netid, local_inner,
+          remote_inner, local_outer, remote_outer, sock)
+    finally:
+      if test_output_mark_unset:
+        self.ClearDefaultNetwork()
 
   def ParamTestTunnelInput(self, inner_version, outer_version):
     self._TestTunnel(inner_version, outer_version, self._CheckTunnelInput,
-                     xfrm.XFRM_POLICY_IN)
+                     xfrm.XFRM_POLICY_IN, False)
 
   def ParamTestTunnelOutput(self, inner_version, outer_version):
     self._TestTunnel(inner_version, outer_version, self._CheckTunnelOutput,
-                     xfrm.XFRM_POLICY_OUT)
+                     xfrm.XFRM_POLICY_OUT, False)
+
+  def ParamTestTunnelOutputNoSetMark(self, inner_version, outer_version):
+    self._TestTunnel(inner_version, outer_version, self._CheckTunnelOutput,
+                     xfrm.XFRM_POLICY_OUT, True)
 
 
 @unittest.skipUnless(net_test.LINUX_VERSION >= (3, 18, 0), "VTI Unsupported")
@@ -321,12 +336,31 @@
 
     self._SetupXfrmByType(auth, crypt)
 
+  def Rekey(self, outer_family, new_out_sa, new_in_sa):
+    """Rekeys the Tunnel Interface
+
+    Creates new SAs and updates the outbound security policy to use new SAs.
+
+    Args:
+      outer_family: AF_INET or AF_INET6
+      new_out_sa: An SaInfo struct representing the new outbound SA's info
+      new_in_sa: An SaInfo struct representing the new inbound SA's info
+    """
+    self._Rekey(outer_family, new_out_sa, new_in_sa)
+
+    # Update Interface object
+    self.out_sa = new_out_sa
+    self.in_sa = new_in_sa
+
   def TeardownXfrm(self):
     raise NotImplementedError("Subclasses should implement this")
 
   def _SetupXfrmByType(self, auth_algo, crypt_algo):
     raise NotImplementedError("Subclasses should implement this")
 
+  def _Rekey(self, outer_family, new_out_sa, new_in_sa):
+    raise NotImplementedError("Subclasses should implement this")
+
 
 class VtiInterface(IpSecBaseInterface):
 
@@ -351,11 +385,12 @@
     self.xfrm.CreateTunnel(xfrm.XFRM_POLICY_OUT, None, self.local, self.remote,
                            self.out_sa.spi, crypt_algo, auth_algo,
                            xfrm.ExactMatchMark(self.okey),
-                           self.underlying_netid, None)
+                           self.underlying_netid, None, xfrm.MATCH_METHOD_ALL)
 
     self.xfrm.CreateTunnel(xfrm.XFRM_POLICY_IN, None, self.remote, self.local,
                            self.in_sa.spi, crypt_algo, auth_algo,
-                           xfrm.ExactMatchMark(self.ikey), None, None)
+                           xfrm.ExactMatchMark(self.ikey), None, None,
+                           xfrm.MATCH_METHOD_MARK)
 
   def TeardownXfrm(self):
     self.xfrm.DeleteTunnel(xfrm.XFRM_POLICY_OUT, None, self.remote,
@@ -363,6 +398,35 @@
     self.xfrm.DeleteTunnel(xfrm.XFRM_POLICY_IN, None, self.local,
                            self.in_sa.spi, self.ikey, None)
 
+  def _Rekey(self, outer_family, new_out_sa, new_in_sa):
+    # TODO: Consider ways to share code with xfrm.CreateTunnel(). It's mostly
+    #       the same, but rekeys are asymmetric, and only update the outbound
+    #       policy.
+    self.xfrm.AddSaInfo(self.local, self.remote, new_out_sa.spi,
+                        xfrm.XFRM_MODE_TUNNEL, 0, xfrm_base._ALGO_CRYPT_NULL,
+                        xfrm_base._ALGO_AUTH_NULL, None, None,
+                        xfrm.ExactMatchMark(self.okey), self.underlying_netid)
+
+    self.xfrm.AddSaInfo(self.remote, self.local, new_in_sa.spi,
+                        xfrm.XFRM_MODE_TUNNEL, 0, xfrm_base._ALGO_CRYPT_NULL,
+                        xfrm_base._ALGO_AUTH_NULL, None, None,
+                        xfrm.ExactMatchMark(self.ikey), None)
+
+    # Create new policies for IPv4 and IPv6.
+    for sel in [xfrm.EmptySelector(AF_INET), xfrm.EmptySelector(AF_INET6)]:
+      # Add SPI-specific output policy to enforce using new outbound SPI
+      policy = xfrm.UserPolicy(xfrm.XFRM_POLICY_OUT, sel)
+      tmpl = xfrm.UserTemplate(outer_family, new_out_sa.spi, 0,
+                                    (self.local, self.remote))
+      self.xfrm.UpdatePolicyInfo(policy, tmpl, xfrm.ExactMatchMark(self.okey),
+                                 0)
+
+  def DeleteOldSaInfo(self, outer_family, old_in_spi, old_out_spi):
+    self.xfrm.DeleteSaInfo(self.local, old_in_spi, IPPROTO_ESP,
+                           xfrm.ExactMatchMark(self.ikey))
+    self.xfrm.DeleteSaInfo(self.remote, old_out_spi, IPPROTO_ESP,
+                           xfrm.ExactMatchMark(self.okey))
+
 
 @unittest.skipUnless(HAVE_XFRM_INTERFACES, "XFRM interfaces unsupported")
 class XfrmAddDeleteXfrmInterfaceTest(xfrm_base.XfrmBaseTest):
@@ -401,10 +465,11 @@
   def _SetupXfrmByType(self, auth_algo, crypt_algo):
     self.xfrm.CreateTunnel(xfrm.XFRM_POLICY_OUT, None, self.local, self.remote,
                            self.out_sa.spi, crypt_algo, auth_algo, None,
-                           self.underlying_netid, self.xfrm_if_id)
+                           self.underlying_netid, self.xfrm_if_id,
+                           xfrm.MATCH_METHOD_ALL)
     self.xfrm.CreateTunnel(xfrm.XFRM_POLICY_IN, None, self.remote, self.local,
                            self.in_sa.spi, crypt_algo, auth_algo, None, None,
-                           self.xfrm_if_id)
+                           self.xfrm_if_id, xfrm.MATCH_METHOD_IFID)
 
   def TeardownXfrm(self):
     self.xfrm.DeleteTunnel(xfrm.XFRM_POLICY_OUT, None, self.remote,
@@ -412,6 +477,33 @@
     self.xfrm.DeleteTunnel(xfrm.XFRM_POLICY_IN, None, self.local,
                            self.in_sa.spi, None, self.xfrm_if_id)
 
+  def _Rekey(self, outer_family, new_out_sa, new_in_sa):
+    # TODO: Consider ways to share code with xfrm.CreateTunnel(). It's mostly
+    #       the same, but rekeys are asymmetric, and only update the outbound
+    #       policy.
+    self.xfrm.AddSaInfo(
+        self.local, self.remote, new_out_sa.spi, xfrm.XFRM_MODE_TUNNEL, 0,
+        xfrm_base._ALGO_CRYPT_NULL, xfrm_base._ALGO_AUTH_NULL, None, None,
+        None, self.underlying_netid, xfrm_if_id=self.xfrm_if_id)
+
+    self.xfrm.AddSaInfo(
+        self.remote, self.local, new_in_sa.spi, xfrm.XFRM_MODE_TUNNEL, 0,
+        xfrm_base._ALGO_CRYPT_NULL, xfrm_base._ALGO_AUTH_NULL, None, None,
+        None, None, xfrm_if_id=self.xfrm_if_id)
+
+    # Create new policies for IPv4 and IPv6.
+    for sel in [xfrm.EmptySelector(AF_INET), xfrm.EmptySelector(AF_INET6)]:
+      # Add SPI-specific output policy to enforce using new outbound SPI
+      policy = xfrm.UserPolicy(xfrm.XFRM_POLICY_OUT, sel)
+      tmpl = xfrm.UserTemplate(outer_family, new_out_sa.spi, 0,
+                                    (self.local, self.remote))
+      self.xfrm.UpdatePolicyInfo(policy, tmpl, None, self.xfrm_if_id)
+
+  def DeleteOldSaInfo(self, outer_family, old_in_spi, old_out_spi):
+    self.xfrm.DeleteSaInfo(self.local, old_in_spi, IPPROTO_ESP, None,
+                           self.xfrm_if_id)
+    self.xfrm.DeleteSaInfo(self.remote, old_out_spi, IPPROTO_ESP, None,
+                           self.xfrm_if_id)
 
 
 class XfrmTunnelBase(xfrm_base.XfrmBaseTest):
@@ -555,7 +647,7 @@
     sa_info.seq_num += 1
 
   def _CheckTunnelInput(self, tunnel, inner_version, local_inner, remote_inner,
-                        sa_info=None):
+                        sa_info=None, expect_fail=False):
     """Test null-crypt input path over an IPsec interface."""
     if sa_info is None:
       sa_info = tunnel.in_sa
@@ -566,11 +658,14 @@
         local_inner, tunnel.local, local_port, sa_info.spi, sa_info.seq_num)
     self.ReceivePacketOn(tunnel.underlying_netid, input_pkt)
 
-    # Verify that the packet data and src are correct
-    self.assertReceivedPacket(tunnel, sa_info)
-    data, src = read_sock.recvfrom(4096)
-    self.assertEquals(net_test.UDP_PAYLOAD, data)
-    self.assertEquals((remote_inner, _TEST_REMOTE_PORT), src[:2])
+    if expect_fail:
+      self.assertRaisesErrno(EAGAIN, read_sock.recv, 4096)
+    else:
+      # Verify that the packet data and src are correct
+      data, src = read_sock.recvfrom(4096)
+      self.assertReceivedPacket(tunnel, sa_info)
+      self.assertEquals(net_test.UDP_PAYLOAD, data)
+      self.assertEquals((remote_inner, _TEST_REMOTE_PORT), src[:2])
 
   def _CheckTunnelOutput(self, tunnel, inner_version, local_inner,
                          remote_inner, sa_info=None):
@@ -711,13 +806,25 @@
     tunnel = self.randomTunnel(outer_version)
 
     try:
+      # Some tests require that the out_seq_num and in_seq_num are the same
+      # (Specifically encrypted tests), rebuild SAs to ensure seq_num is 1
+      #
+      # Until we get better scapy support, the only way we can build an
+      # encrypted packet is to send it out, and read the packet from the wire.
+      # We then generally use this as the "inbound" encrypted packet, injecting
+      # it into the interface for which it is expected on.
+      #
+      # As such, this is required to ensure that encrypted packets (which we
+      # currently have no way to easily modify) are not considered replay
+      # attacks by the inbound SA.  (eg: received 3 packets, seq_num_in = 3,
+      # sent only 1, # seq_num_out = 1, inbound SA would consider this a replay
+      # attack)
       tunnel.TeardownXfrm()
       tunnel.SetupXfrm(use_null_crypt)
 
       local_inner = tunnel.addrs[inner_version]
       remote_inner = _GetRemoteInnerAddress(inner_version)
 
-      # Run twice to ensure sequence numbers are tested
       for i in range(2):
         func(tunnel, inner_version, local_inner, remote_inner)
     finally:
@@ -725,6 +832,67 @@
         tunnel.TeardownXfrm()
         tunnel.SetupXfrm(False)
 
+  def _CheckTunnelRekey(self, tunnel, inner_version, local_inner, remote_inner):
+    old_out_sa = tunnel.out_sa
+    old_in_sa = tunnel.in_sa
+
+    # Check to make sure that both directions work before rekey
+    self._CheckTunnelInput(tunnel, inner_version, local_inner, remote_inner,
+                           old_in_sa)
+    self._CheckTunnelOutput(tunnel, inner_version, local_inner, remote_inner,
+                            old_out_sa)
+
+    # Rekey
+    outer_family = net_test.GetAddressFamily(tunnel.version)
+
+    # Create new SA
+    # Distinguish the new SAs with new SPIs.
+    new_out_sa = SaInfo(old_out_sa.spi + 1)
+    new_in_sa = SaInfo(old_in_sa.spi + 1)
+
+    # Perform Rekey
+    tunnel.Rekey(outer_family, new_out_sa, new_in_sa)
+
+    # Expect that the old SPI still works for inbound packets
+    self._CheckTunnelInput(tunnel, inner_version, local_inner, remote_inner,
+                           old_in_sa)
+
+    # Test both paths with new SPIs, expect outbound to use new SPI
+    self._CheckTunnelInput(tunnel, inner_version, local_inner, remote_inner,
+                           new_in_sa)
+    self._CheckTunnelOutput(tunnel, inner_version, local_inner, remote_inner,
+                            new_out_sa)
+
+    # Delete old SAs
+    tunnel.DeleteOldSaInfo(outer_family, old_in_sa.spi, old_out_sa.spi)
+
+    # Test both paths with new SPIs; should still work
+    self._CheckTunnelInput(tunnel, inner_version, local_inner, remote_inner,
+                           new_in_sa)
+    self._CheckTunnelOutput(tunnel, inner_version, local_inner, remote_inner,
+                            new_out_sa)
+
+    # Expect failure upon trying to receive a packet with the deleted SPI
+    self._CheckTunnelInput(tunnel, inner_version, local_inner, remote_inner,
+                           old_in_sa, True)
+
+  def _TestTunnelRekey(self, inner_version, outer_version):
+    """Test packet input and output over a Virtual Tunnel Interface."""
+    tunnel = self.randomTunnel(outer_version)
+
+    try:
+      # Always use null_crypt, so we can check input and output separately
+      tunnel.TeardownXfrm()
+      tunnel.SetupXfrm(True)
+
+      local_inner = tunnel.addrs[inner_version]
+      remote_inner = _GetRemoteInnerAddress(inner_version)
+
+      self._CheckTunnelRekey(tunnel, inner_version, local_inner, remote_inner)
+    finally:
+      tunnel.TeardownXfrm()
+      tunnel.SetupXfrm(False)
+
 
 @unittest.skipUnless(net_test.LINUX_VERSION >= (3, 18, 0), "VTI Unsupported")
 class XfrmVtiTest(XfrmTunnelBase):
@@ -749,6 +917,9 @@
     self._TestTunnel(inner_version, outer_version,
                      self._CheckTunnelEncryptionWithIcmp, False)
 
+  def ParamTestVtiRekey(self, inner_version, outer_version):
+    self._TestTunnelRekey(inner_version, outer_version)
+
 
 @unittest.skipUnless(HAVE_XFRM_INTERFACES, "XFRM interfaces unsupported")
 class XfrmInterfaceTest(XfrmTunnelBase):
@@ -773,6 +944,9 @@
     self._TestTunnel(inner_version, outer_version,
                      self._CheckTunnelEncryptionWithIcmp, False)
 
+  def ParamTestXfrmIntfRekey(self, inner_version, outer_version):
+    self._TestTunnelRekey(inner_version, outer_version)
+
 
 if __name__ == "__main__":
   InjectTests()