[Do Not Merge] Update platform security string to 2017-10-01 in KLP dev Bug:64896113 am: f2f5343ba1 am: dafd187ab5 am: 47c92b6200 -s ours am: bc464d7e8f -s ours am: 5c68c85011 am: f1f39061a7 -s ours am: c5287e96a4 am: 0412b3d0e3 am: 2df41e5dec am: 09ec3bc311 -s ours am: 9dbcf7c176
am: 3eac316736
Change-Id: Iaa4d265c2217402121ed45b51d2eb103c4514f22
diff --git a/core/Makefile b/core/Makefile
index 5e83903..6a70b49 100644
--- a/core/Makefile
+++ b/core/Makefile
@@ -1383,6 +1383,8 @@
$(INSTALLED_USERDATATARBALL_TARGET): $(FS_GET_STATS) $(INTERNAL_USERDATAIMAGE_FILES)
$(build-userdatatarball-target)
+$(call dist-for-goals,userdatatarball,$(INSTALLED_USERDATATARBALL_TARGET))
+
.PHONY: userdatatarball-nodeps
userdatatarball-nodeps: $(FS_GET_STATS)
$(build-userdatatarball-target)
@@ -2050,7 +2052,7 @@
# the dependency will be set up later in build/core/main.mk.
$(PROGUARD_DICT_ZIP) :
@echo "Packaging Proguard obfuscation dictionary files."
- $(hide) dict_files=`find $(TARGET_OUT_COMMON_INTERMEDIATES)/APPS -name proguard_dictionary`; \
+ $(hide) dict_files=`find $(TARGET_OUT_COMMON_INTERMEDIATES)/APPS -name proguard_dictionary -o -name jack_dictionary`; \
if [ -n "$$dict_files" ]; then \
unobfuscated_jars=$${dict_files//proguard_dictionary/classes.jar}; \
zip -qX $@ $$dict_files $$unobfuscated_jars; \
diff --git a/core/pathmap.mk b/core/pathmap.mk
index edc584b..effc878 100644
--- a/core/pathmap.mk
+++ b/core/pathmap.mk
@@ -121,6 +121,7 @@
design \
percent \
recommendation \
+ transition \
v7/preference \
v14/preference \
v17/preference-leanback \
diff --git a/core/version_defaults.mk b/core/version_defaults.mk
index c1c4d11..befd346 100644
--- a/core/version_defaults.mk
+++ b/core/version_defaults.mk
@@ -43,7 +43,7 @@
# which is the version that we reveal to the end user.
# Update this value when the platform version changes (rather
# than overriding it somewhere else). Can be an arbitrary string.
- PLATFORM_VERSION := 7.1
+ PLATFORM_VERSION := 7.1.1
endif
ifeq "" "$(PLATFORM_SDK_VERSION)"
diff --git a/target/board/generic/BoardConfig.mk b/target/board/generic/BoardConfig.mk
index 325b0ce..02c1c88 100644
--- a/target/board/generic/BoardConfig.mk
+++ b/target/board/generic/BoardConfig.mk
@@ -45,28 +45,6 @@
# the GLES renderer disables itself if host GL acceleration isn't available.
USE_OPENGL_RENDERER := true
-# Set the phase offset of the system's vsync event relative to the hardware
-# vsync. The system's vsync event drives Choreographer and SurfaceFlinger's
-# rendering. This value is the number of nanoseconds after the hardware vsync
-# that the system vsync event will occur.
-#
-# This phase offset allows adjustment of the minimum latency from application
-# wake-up (by Choregographer) time to the time at which the resulting window
-# image is displayed. This value may be either positive (after the HW vsync)
-# or negative (before the HW vsync). Setting it to 0 will result in a
-# minimum latency of two vsync periods because the app and SurfaceFlinger
-# will run just after the HW vsync. Setting it to a positive number will
-# result in the minimum latency being:
-#
-# (2 * VSYNC_PERIOD - (vsyncPhaseOffsetNs % VSYNC_PERIOD))
-#
-# Note that reducing this latency makes it more likely for the applications
-# to not have their window content image ready in time. When this happens
-# the latency will end up being an additional vsync period, and animations
-# will hiccup. Therefore, this latency should be tuned somewhat
-# conservatively (or at least with awareness of the trade-off being made).
-VSYNC_EVENT_PHASE_OFFSET_NS := 0
-
TARGET_USERIMAGES_USE_EXT4 := true
BOARD_SYSTEMIMAGE_PARTITION_SIZE := 1879048192 # 1.75 GB
BOARD_USERDATAIMAGE_PARTITION_SIZE := 576716800
diff --git a/target/board/generic/sepolicy/file_contexts b/target/board/generic/sepolicy/file_contexts
index e8d32f7..e9502d9 100644
--- a/target/board/generic/sepolicy/file_contexts
+++ b/target/board/generic/sepolicy/file_contexts
@@ -9,6 +9,7 @@
/dev/block/vdc u:object_r:userdata_block_device:s0
/dev/goldfish_pipe u:object_r:qemu_device:s0
+/dev/goldfish_sync u:object_r:qemu_device:s0
/dev/qemu_.* u:object_r:qemu_device:s0
/dev/socket/qemud u:object_r:qemud_socket:s0
/dev/ttyGF[0-9]* u:object_r:serial_device:s0
diff --git a/target/board/generic_x86_64/BoardConfig.mk b/target/board/generic_x86_64/BoardConfig.mk
index 553bec9..283e9cc 100755
--- a/target/board/generic_x86_64/BoardConfig.mk
+++ b/target/board/generic_x86_64/BoardConfig.mk
@@ -38,7 +38,7 @@
USE_OPENGL_RENDERER := true
TARGET_USERIMAGES_USE_EXT4 := true
-BOARD_SYSTEMIMAGE_PARTITION_SIZE := 1879048192 # 1.75 GB
+BOARD_SYSTEMIMAGE_PARTITION_SIZE := 2147483648 # 2 GB
BOARD_USERDATAIMAGE_PARTITION_SIZE := 576716800
BOARD_CACHEIMAGE_PARTITION_SIZE := 69206016
BOARD_CACHEIMAGE_FILE_SYSTEM_TYPE := ext4
diff --git a/tools/releasetools/add_img_to_target_files.py b/tools/releasetools/add_img_to_target_files.py
index 6acc69d..0bbd8f6 100755
--- a/tools/releasetools/add_img_to_target_files.py
+++ b/tools/releasetools/add_img_to_target_files.py
@@ -346,6 +346,14 @@
if recovery_image:
recovery_image.AddToZip(output_zip)
+ banner("recovery (two-step image)")
+ # The special recovery.img for two-step package use.
+ recovery_two_step_image = common.GetBootableImage(
+ "IMAGES/recovery-two-step.img", "recovery-two-step.img",
+ OPTIONS.input_tmp, "RECOVERY", two_step_image=True)
+ if recovery_two_step_image:
+ recovery_two_step_image.AddToZip(output_zip)
+
banner("system")
system_imgname = AddSystem(output_zip, recovery_img=recovery_image,
boot_img=boot_image)
diff --git a/tools/releasetools/blockimgdiff.py b/tools/releasetools/blockimgdiff.py
index 0d9aabd..31dabc7 100644
--- a/tools/releasetools/blockimgdiff.py
+++ b/tools/releasetools/blockimgdiff.py
@@ -696,10 +696,19 @@
with open(prefix + ".new.dat", "wb") as new_f:
for xf in self.transfers:
if xf.style == "zero":
- pass
+ tgt_size = xf.tgt_ranges.size() * self.tgt.blocksize
+ print("%10d %10d (%6.2f%%) %7s %s %s" % (
+ tgt_size, tgt_size, 100.0, xf.style, xf.tgt_name,
+ str(xf.tgt_ranges)))
+
elif xf.style == "new":
for piece in self.tgt.ReadRangeSet(xf.tgt_ranges):
new_f.write(piece)
+ tgt_size = xf.tgt_ranges.size() * self.tgt.blocksize
+ print("%10d %10d (%6.2f%%) %7s %s %s" % (
+ tgt_size, tgt_size, 100.0, xf.style,
+ xf.tgt_name, str(xf.tgt_ranges)))
+
elif xf.style == "diff":
src = self.src.ReadRangeSet(xf.src_ranges)
tgt = self.tgt.ReadRangeSet(xf.tgt_ranges)
@@ -726,6 +735,12 @@
# These are identical; we don't need to generate a patch,
# just issue copy commands on the device.
xf.style = "move"
+ if xf.src_ranges != xf.tgt_ranges:
+ print("%10d %10d (%6.2f%%) %7s %s %s (from %s)" % (
+ tgt_size, tgt_size, 100.0, xf.style,
+ xf.tgt_name if xf.tgt_name == xf.src_name else (
+ xf.tgt_name + " (from " + xf.src_name + ")"),
+ str(xf.tgt_ranges), str(xf.src_ranges)))
else:
# For files in zip format (eg, APKs, JARs, etc.) we would
# like to use imgdiff -z if possible (because it usually
@@ -773,10 +788,11 @@
size = len(patch)
with lock:
patches[patchnum] = (patch, xf)
- print("%10d %10d (%6.2f%%) %7s %s" % (
+ print("%10d %10d (%6.2f%%) %7s %s %s %s" % (
size, tgt_size, size * 100.0 / tgt_size, xf.style,
xf.tgt_name if xf.tgt_name == xf.src_name else (
- xf.tgt_name + " (from " + xf.src_name + ")")))
+ xf.tgt_name + " (from " + xf.src_name + ")"),
+ str(xf.tgt_ranges), str(xf.src_ranges)))
threads = [threading.Thread(target=diff_worker)
for _ in range(self.threads)]
@@ -1102,27 +1118,23 @@
def FindTransfers(self):
"""Parse the file_map to generate all the transfers."""
- def AddTransfer(tgt_name, src_name, tgt_ranges, src_ranges, style, by_id,
- split=False):
- """Wrapper function for adding a Transfer().
+ def AddSplitTransfers(tgt_name, src_name, tgt_ranges, src_ranges,
+ style, by_id):
+ """Add one or multiple Transfer()s by splitting large files.
For BBOTA v3, we need to stash source blocks for resumable feature.
However, with the growth of file size and the shrink of the cache
partition source blocks are too large to be stashed. If a file occupies
- too many blocks (greater than MAX_BLOCKS_PER_DIFF_TRANSFER), we split it
- into smaller pieces by getting multiple Transfer()s.
+ too many blocks, we split it into smaller pieces by getting multiple
+ Transfer()s.
The downside is that after splitting, we may increase the package size
since the split pieces don't align well. According to our experiments,
1/8 of the cache size as the per-piece limit appears to be optimal.
Compared to the fixed 1024-block limit, it reduces the overall package
- size by 30% volantis, and 20% for angler and bullhead."""
+ size by 30% for volantis, and 20% for angler and bullhead."""
- # We care about diff transfers only.
- if style != "diff" or not split:
- Transfer(tgt_name, src_name, tgt_ranges, src_ranges, style, by_id)
- return
-
+ # Possibly split large files into smaller chunks.
pieces = 0
cache_size = common.OPTIONS.cache_size
split_threshold = 0.125
@@ -1158,6 +1170,74 @@
Transfer(tgt_split_name, src_split_name, tgt_ranges, src_ranges, style,
by_id)
+ def AddTransfer(tgt_name, src_name, tgt_ranges, src_ranges, style, by_id,
+ split=False):
+ """Wrapper function for adding a Transfer()."""
+
+ # We specialize diff transfers only (which covers bsdiff/imgdiff/move);
+ # otherwise add the Transfer() as is.
+ if style != "diff" or not split:
+ Transfer(tgt_name, src_name, tgt_ranges, src_ranges, style, by_id)
+ return
+
+ # Handle .odex files specially to analyze the block-wise difference. If
+ # most of the blocks are identical with only few changes (e.g. header),
+ # we will patch the changed blocks only. This avoids stashing unchanged
+ # blocks while patching. We limit the analysis to files without size
+ # changes only. This is to avoid sacrificing the OTA generation cost too
+ # much.
+ if (tgt_name.split(".")[-1].lower() == 'odex' and
+ tgt_ranges.size() == src_ranges.size()):
+
+ # 0.5 threshold can be further tuned. The tradeoff is: if only very
+ # few blocks remain identical, we lose the opportunity to use imgdiff
+ # that may have better compression ratio than bsdiff.
+ crop_threshold = 0.5
+
+ tgt_skipped = RangeSet()
+ src_skipped = RangeSet()
+ tgt_size = tgt_ranges.size()
+ tgt_changed = 0
+ for src_block, tgt_block in zip(src_ranges.next_item(),
+ tgt_ranges.next_item()):
+ src_rs = RangeSet(str(src_block))
+ tgt_rs = RangeSet(str(tgt_block))
+ if self.src.ReadRangeSet(src_rs) == self.tgt.ReadRangeSet(tgt_rs):
+ tgt_skipped = tgt_skipped.union(tgt_rs)
+ src_skipped = src_skipped.union(src_rs)
+ else:
+ tgt_changed += tgt_rs.size()
+
+ # Terminate early if no clear sign of benefits.
+ if tgt_changed > tgt_size * crop_threshold:
+ break
+
+ if tgt_changed < tgt_size * crop_threshold:
+ assert tgt_changed + tgt_skipped.size() == tgt_size
+ print('%10d %10d (%6.2f%%) %s' % (tgt_skipped.size(), tgt_size,
+ tgt_skipped.size() * 100.0 / tgt_size, tgt_name))
+ AddSplitTransfers(
+ "%s-skipped" % (tgt_name,),
+ "%s-skipped" % (src_name,),
+ tgt_skipped, src_skipped, style, by_id)
+
+ # Intentionally change the file extension to avoid being imgdiff'd as
+ # the files are no longer in their original format.
+ tgt_name = "%s-cropped" % (tgt_name,)
+ src_name = "%s-cropped" % (src_name,)
+ tgt_ranges = tgt_ranges.subtract(tgt_skipped)
+ src_ranges = src_ranges.subtract(src_skipped)
+
+ # Possibly having no changed blocks.
+ if not tgt_ranges:
+ return
+
+ # Add the transfer(s).
+ AddSplitTransfers(
+ tgt_name, src_name, tgt_ranges, src_ranges, style, by_id)
+
+ print("Finding transfers...")
+
empty = RangeSet()
for tgt_fn, tgt_ranges in self.tgt.file_map.items():
if tgt_fn == "__ZERO":
diff --git a/tools/releasetools/common.py b/tools/releasetools/common.py
index d69192e..c4bf893 100644
--- a/tools/releasetools/common.py
+++ b/tools/releasetools/common.py
@@ -401,13 +401,17 @@
def _BuildBootableImage(sourcedir, fs_config_file, info_dict=None,
- has_ramdisk=False):
+ has_ramdisk=False, two_step_image=False):
"""Build a bootable image from the specified sourcedir.
Take a kernel, cmdline, and optionally a ramdisk directory from the input (in
- 'sourcedir'), and turn them into a boot image. Return the image data, or
- None if sourcedir does not appear to contains files for building the
- requested image."""
+ 'sourcedir'), and turn them into a boot image. 'two_step_image' indicates if
+ we are building a two-step special image (i.e. building a recovery image to
+ be loaded into /boot in two-step OTAs).
+
+ Return the image data, or None if sourcedir does not appear to contains files
+ for building the requested image.
+ """
def make_ramdisk():
ramdisk_img = tempfile.NamedTemporaryFile()
@@ -491,7 +495,12 @@
if (info_dict.get("boot_signer", None) == "true" and
info_dict.get("verity_key", None)):
- path = "/" + os.path.basename(sourcedir).lower()
+ # Hard-code the path as "/boot" for two-step special recovery image (which
+ # will be loaded into /boot during the two-step OTA).
+ if two_step_image:
+ path = "/boot"
+ else:
+ path = "/" + os.path.basename(sourcedir).lower()
cmd = [OPTIONS.boot_signer_path]
cmd.extend(OPTIONS.boot_signer_args)
cmd.extend([path, img.name,
@@ -536,7 +545,7 @@
def GetBootableImage(name, prebuilt_name, unpack_dir, tree_subdir,
- info_dict=None):
+ info_dict=None, two_step_image=False):
"""Return a File object with the desired bootable image.
Look for it in 'unpack_dir'/BOOTABLE_IMAGES under the name 'prebuilt_name',
@@ -568,7 +577,7 @@
fs_config = "META/" + tree_subdir.lower() + "_filesystem_config.txt"
data = _BuildBootableImage(os.path.join(unpack_dir, tree_subdir),
os.path.join(unpack_dir, fs_config),
- info_dict, has_ramdisk)
+ info_dict, has_ramdisk, two_step_image)
if data:
return File(name, data)
return None
diff --git a/tools/releasetools/img_from_target_files.py b/tools/releasetools/img_from_target_files.py
index aa21d7e..84e0e63 100755
--- a/tools/releasetools/img_from_target_files.py
+++ b/tools/releasetools/img_from_target_files.py
@@ -86,6 +86,8 @@
continue
if not image.endswith(".img"):
continue
+ if image == "recovery-two-step.img":
+ continue
common.ZipWrite(
output_zip, os.path.join(images_path, image), image)
done = True
diff --git a/tools/releasetools/ota_from_target_files.py b/tools/releasetools/ota_from_target_files.py
index 6433523..24b42ee 100755
--- a/tools/releasetools/ota_from_target_files.py
+++ b/tools/releasetools/ota_from_target_files.py
@@ -480,6 +480,39 @@
script.AssertOemProperty(prop, oem_dict.get(prop))
+def _WriteRecoveryImageToBoot(script, output_zip):
+ """Find and write recovery image to /boot in two-step OTA.
+
+ In two-step OTAs, we write recovery image to /boot as the first step so that
+ we can reboot to there and install a new recovery image to /recovery.
+ A special "recovery-two-step.img" will be preferred, which encodes the correct
+ path of "/boot". Otherwise the device may show "device is corrupt" message
+ when booting into /boot.
+
+ Fall back to using the regular recovery.img if the two-step recovery image
+ doesn't exist. Note that rebuilding the special image at this point may be
+ infeasible, because we don't have the desired boot signer and keys when
+ calling ota_from_target_files.py.
+ """
+
+ recovery_two_step_img_name = "recovery-two-step.img"
+ recovery_two_step_img_path = os.path.join(
+ OPTIONS.input_tmp, "IMAGES", recovery_two_step_img_name)
+ if os.path.exists(recovery_two_step_img_path):
+ recovery_two_step_img = common.GetBootableImage(
+ recovery_two_step_img_name, recovery_two_step_img_name,
+ OPTIONS.input_tmp, "RECOVERY")
+ common.ZipWriteStr(
+ output_zip, recovery_two_step_img_name, recovery_two_step_img.data)
+ print "two-step package: using %s in stage 1/3" % (
+ recovery_two_step_img_name,)
+ script.WriteRawImage("/boot", recovery_two_step_img_name)
+ else:
+ print "two-step package: using recovery.img in stage 1/3"
+ # The "recovery.img" entry has been written into package earlier.
+ script.WriteRawImage("/boot", "recovery.img")
+
+
def HasRecoveryPatch(target_files_zip):
namelist = [name for name in target_files_zip.namelist()]
return ("SYSTEM/recovery-from-boot.p" in namelist or
@@ -629,6 +662,9 @@
script.AppendExtra("""
if get_stage("%(bcb_dev)s") == "2/3" then
""" % bcb_dev)
+
+ # Stage 2/3: Write recovery image to /recovery (currently running /boot).
+ script.Comment("Stage 2/3")
script.WriteRawImage("/recovery", "recovery.img")
script.AppendExtra("""
set_stage("%(bcb_dev)s", "3/3");
@@ -636,6 +672,9 @@
else if get_stage("%(bcb_dev)s") == "3/3" then
""" % bcb_dev)
+ # Stage 3/3: Make changes.
+ script.Comment("Stage 3/3")
+
# Dump fingerprints
script.Print("Target: %s" % CalculateFingerprint(
oem_props, oem_dict, OPTIONS.info_dict))
@@ -735,7 +774,11 @@
set_stage("%(bcb_dev)s", "");
""" % bcb_dev)
script.AppendExtra("else\n")
- script.WriteRawImage("/boot", "recovery.img")
+
+ # Stage 1/3: Nothing to verify for full OTA. Write recovery image to /boot.
+ script.Comment("Stage 1/3")
+ _WriteRecoveryImageToBoot(script, output_zip)
+
script.AppendExtra("""
set_stage("%(bcb_dev)s", "2/3");
reboot_now("%(bcb_dev)s", "");
@@ -966,6 +1009,9 @@
script.AppendExtra("""
if get_stage("%(bcb_dev)s") == "2/3" then
""" % bcb_dev)
+
+ # Stage 2/3: Write recovery image to /recovery (currently running /boot).
+ script.Comment("Stage 2/3")
script.AppendExtra("sleep(20);\n")
script.WriteRawImage("/recovery", "recovery.img")
script.AppendExtra("""
@@ -974,6 +1020,9 @@
else if get_stage("%(bcb_dev)s") != "3/3" then
""" % bcb_dev)
+ # Stage 1/3: (a) Verify the current system.
+ script.Comment("Stage 1/3")
+
# Dump fingerprints
script.Print("Source: %s" % CalculateFingerprint(
oem_props, oem_dict, OPTIONS.source_info_dict))
@@ -1037,13 +1086,18 @@
device_specific.IncrementalOTA_VerifyEnd()
if OPTIONS.two_step:
- script.WriteRawImage("/boot", "recovery.img")
+ # Stage 1/3: (b) Write recovery image to /boot.
+ _WriteRecoveryImageToBoot(script, output_zip)
+
script.AppendExtra("""
set_stage("%(bcb_dev)s", "2/3");
reboot_now("%(bcb_dev)s", "");
else
""" % bcb_dev)
+ # Stage 3/3: Make changes.
+ script.Comment("Stage 3/3")
+
# Verify the existing partitions.
system_diff.WriteVerifyScript(script, touched_blocks_only=True)
if vendor_diff:
@@ -1622,6 +1676,9 @@
script.AppendExtra("""
if get_stage("%(bcb_dev)s") == "2/3" then
""" % bcb_dev)
+
+ # Stage 2/3: Write recovery image to /recovery (currently running /boot).
+ script.Comment("Stage 2/3")
script.AppendExtra("sleep(20);\n")
script.WriteRawImage("/recovery", "recovery.img")
script.AppendExtra("""
@@ -1630,6 +1687,9 @@
else if get_stage("%(bcb_dev)s") != "3/3" then
""" % bcb_dev)
+ # Stage 1/3: (a) Verify the current system.
+ script.Comment("Stage 1/3")
+
# Dump fingerprints
script.Print("Source: %s" % (source_fp,))
script.Print("Target: %s" % (target_fp,))
@@ -1674,13 +1734,18 @@
device_specific.IncrementalOTA_VerifyEnd()
if OPTIONS.two_step:
- script.WriteRawImage("/boot", "recovery.img")
+ # Stage 1/3: (b) Write recovery image to /boot.
+ _WriteRecoveryImageToBoot(script, output_zip)
+
script.AppendExtra("""
set_stage("%(bcb_dev)s", "2/3");
reboot_now("%(bcb_dev)s", "");
else
""" % bcb_dev)
+ # Stage 3/3: Make changes.
+ script.Comment("Stage 3/3")
+
script.Comment("---- start making changes here ----")
device_specific.IncrementalOTA_InstallBegin()
diff --git a/tools/releasetools/rangelib.py b/tools/releasetools/rangelib.py
index aa572cc..c9bd375 100644
--- a/tools/releasetools/rangelib.py
+++ b/tools/releasetools/rangelib.py
@@ -300,6 +300,20 @@
n -= e - s
return RangeSet(data=out)
+ def next_item(self):
+ """Return the next integer represented by the RangeSet.
+
+ >>> list(RangeSet("0-9").next_item())
+ [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
+ >>> list(RangeSet("10-19 3-5").next_item())
+ [3, 4, 5, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
+ >>> list(rangelib.RangeSet("10-19 3 5 7").next_item())
+ [3, 5, 7, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
+ """
+ for s, e in self:
+ for element in range(s, e):
+ yield element
+
if __name__ == "__main__":
import doctest
diff --git a/tools/releasetools/test_rangelib.py b/tools/releasetools/test_rangelib.py
index a61a64e..edf1c4b 100644
--- a/tools/releasetools/test_rangelib.py
+++ b/tools/releasetools/test_rangelib.py
@@ -124,3 +124,14 @@
self.assertTrue(RangeSet(data=[2, 9, 30, 31, 31, 32, 35, 36]).monotonic)
self.assertTrue(RangeSet(data=[0, 5, 5, 10]).monotonic)
self.assertFalse(RangeSet(data=[5, 10, 0, 5]).monotonic)
+
+ def test_next_item(self):
+ self.assertEqual(
+ list(RangeSet("0-9").next_item()),
+ [0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
+ self.assertEqual(
+ list(RangeSet("10-19 3-5").next_item()),
+ [3, 4, 5, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19])
+ self.assertEqual(
+ list(RangeSet("10-19 3 5 7").next_item()),
+ [3, 5, 7, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19])