am 7bd80ca6: Merge "core: Let the build specify mount options for recovery" into lmp-dev
* commit '7bd80ca6ea2b0c5c39bdb05013fb3d0188fbcfa8':
core: Let the build specify mount options for recovery
diff --git a/CleanSpec.mk b/CleanSpec.mk
index f348692..a4a44ca 100644
--- a/CleanSpec.mk
+++ b/CleanSpec.mk
@@ -299,6 +299,11 @@
$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/app/*)
$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/APPS/*)
+# API 22!
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/build.prop)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/app/*)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/APPS/*)
+
# ************************************************
# NEWER CLEAN STEPS MUST BE AT THE END OF THE LIST
# ************************************************
diff --git a/core/Makefile b/core/Makefile
index f411e06..b749bac 100644
--- a/core/Makefile
+++ b/core/Makefile
@@ -1417,6 +1417,7 @@
$(hide) echo "use_set_metadata=1" >> $(zip_root)/META/misc_info.txt
$(hide) echo "multistage_support=1" >> $(zip_root)/META/misc_info.txt
$(hide) echo "update_rename_support=1" >> $(zip_root)/META/misc_info.txt
+ $(hide) echo "blockimgdiff_versions=1,2" >> $(zip_root)/META/misc_info.txt
ifneq ($(OEM_THUMBPRINT_PROPERTIES),)
# OTA scripts are only interested in fingerprint related properties
$(hide) echo "oem_fingerprint_properties=$(OEM_THUMBPRINT_PROPERTIES)" >> $(zip_root)/META/misc_info.txt
diff --git a/core/binary.mk b/core/binary.mk
index d339317..1e313ff 100644
--- a/core/binary.mk
+++ b/core/binary.mk
@@ -184,9 +184,11 @@
my_compiler_dependencies :=
-####################################################
+##################################################################
## Add FDO flags if FDO is turned on and supported
-####################################################
+## Please note that we will do option filtering during FDO build.
+## i.e. Os->O2, remove -fno-early-inline and -finline-limit.
+##################################################################
ifeq ($(strip $(LOCAL_FDO_SUPPORT)), true)
ifeq ($(strip $(LOCAL_IS_HOST_MODULE)),)
my_cflags += $($(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_FDO_CFLAGS)
@@ -923,6 +925,21 @@
my_ldflags := $(call $(LOCAL_2ND_ARCH_VAR_PREFIX)convert-to-$(my_host)clang-flags,$(my_ldflags))
endif
+ifeq ($(LOCAL_FDO_SUPPORT), true)
+ build_with_fdo := false
+ ifeq ($(BUILD_FDO_INSTRUMENT), true)
+ build_with_fdo := true
+ endif
+ ifeq ($(BUILD_FDO_OPTIMIZE), true)
+ build_with_fdo := true
+ endif
+ ifeq ($(build_with_fdo), true)
+ my_cflags := $(patsubst -Os,-O2,$(my_cflags))
+ fdo_incompatible_flags=-fno-early-inlining -finline-limit=%
+ my_cflags := $(filter-out $(fdo_incompatible_flags),$(my_cflags))
+ endif
+endif
+
$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_YACCFLAGS := $(LOCAL_YACCFLAGS)
$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_ASFLAGS := $(my_asflags)
$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_CONLYFLAGS := $(LOCAL_CONLYFLAGS)
diff --git a/core/cleanbuild.mk b/core/cleanbuild.mk
index 1bada38..c820ad5 100644
--- a/core/cleanbuild.mk
+++ b/core/cleanbuild.mk
@@ -220,6 +220,7 @@
$(PRODUCT_OUT)/obj/JAVA_LIBRARIES \
$(PRODUCT_OUT)/obj/FAKE \
$(PRODUCT_OUT)/obj/EXECUTABLES/adbd_intermediates \
+ $(PRODUCT_OUT)/obj/STATIC_LIBRARIES/libfs_mgr_intermediates \
$(PRODUCT_OUT)/obj/EXECUTABLES/init_intermediates \
$(PRODUCT_OUT)/obj/ETC/mac_permissions.xml_intermediates \
$(PRODUCT_OUT)/obj/ETC/sepolicy_intermediates \
diff --git a/core/combo/HOST_darwin-x86.mk b/core/combo/HOST_darwin-x86.mk
index 4a2bfe3..ec37993 100644
--- a/core/combo/HOST_darwin-x86.mk
+++ b/core/combo/HOST_darwin-x86.mk
@@ -37,8 +37,8 @@
ifneq (,$(strip $(wildcard $($(combo_2nd_arch_prefix)HOST_TOOLCHAIN_PREFIX)-gcc)))
$(combo_2nd_arch_prefix)HOST_CC := $($(combo_2nd_arch_prefix)HOST_TOOLCHAIN_PREFIX)-gcc
$(combo_2nd_arch_prefix)HOST_CXX := $($(combo_2nd_arch_prefix)HOST_TOOLCHAIN_PREFIX)-g++
-ifeq ($(mac_sdk_version),10.8)
-# Mac SDK 10.8 no longer has stdarg.h, etc
+ifneq ($(filter 10.8 10.9, $(mac_sdk_version)),)
+# Mac SDK 10.8+ no longer has stdarg.h, etc
host_toolchain_header := $($(combo_2nd_arch_prefix)HOST_TOOLCHAIN_ROOT)/lib/gcc/i686-apple-darwin$(gcc_darwin_version)/4.2.1/include
$(combo_2nd_arch_prefix)HOST_GLOBAL_CFLAGS += -isystem $(host_toolchain_header)
endif
diff --git a/core/combo/HOST_darwin-x86_64.mk b/core/combo/HOST_darwin-x86_64.mk
index 0bc0227..a776a69 100644
--- a/core/combo/HOST_darwin-x86_64.mk
+++ b/core/combo/HOST_darwin-x86_64.mk
@@ -37,8 +37,8 @@
ifneq (,$(strip $(wildcard $(HOST_TOOLCHAIN_PREFIX)-gcc)))
HOST_CC := $(HOST_TOOLCHAIN_PREFIX)-gcc
HOST_CXX := $(HOST_TOOLCHAIN_PREFIX)-g++
-ifeq ($(mac_sdk_version),10.8)
-# Mac SDK 10.8 no longer has stdarg.h, etc
+ifneq ($(filter 10.8 10.9, $(mac_sdk_version)),)
+# Mac SDK 10.8+ no longer has stdarg.h, etc
host_toolchain_header := $(HOST_TOOLCHAIN_ROOT)/lib/gcc/i686-apple-darwin$(gcc_darwin_version)/4.2.1/include
HOST_GLOBAL_CFLAGS += -isystem $(host_toolchain_header)
endif
diff --git a/core/combo/mac_version.mk b/core/combo/mac_version.mk
index b49feee..6defba7 100644
--- a/core/combo/mac_version.mk
+++ b/core/combo/mac_version.mk
@@ -9,7 +9,7 @@
build_mac_version := $(shell sw_vers -productVersion)
-mac_sdk_versions_supported := 10.6 10.7 10.8
+mac_sdk_versions_supported := 10.6 10.7 10.8 10.9
ifneq ($(strip $(MAC_SDK_VERSION)),)
mac_sdk_version := $(MAC_SDK_VERSION)
ifeq ($(filter $(mac_sdk_version),$(mac_sdk_versions_supported)),)
diff --git a/core/version_defaults.mk b/core/version_defaults.mk
index 8cb8d26..1acc320 100644
--- a/core/version_defaults.mk
+++ b/core/version_defaults.mk
@@ -41,7 +41,7 @@
# which is the version that we reveal to the end user.
# Update this value when the platform version changes (rather
# than overriding it somewhere else). Can be an arbitrary string.
- PLATFORM_VERSION := 5.0
+ PLATFORM_VERSION := LollipopMR1
endif
ifeq "" "$(PLATFORM_SDK_VERSION)"
@@ -53,7 +53,7 @@
# intermediate builds). During development, this number remains at the
# SDK version the branch is based on and PLATFORM_VERSION_CODENAME holds
# the code-name of the new development work.
- PLATFORM_SDK_VERSION := 21
+ PLATFORM_SDK_VERSION := 22
endif
ifeq "" "$(PLATFORM_VERSION_CODENAME)"
diff --git a/envsetup.sh b/envsetup.sh
index a9bd707..f4c6566 100644
--- a/envsetup.sh
+++ b/envsetup.sh
@@ -570,7 +570,8 @@
{
local arch="$(echo $* | xargs -n 1 echo | \grep -E '^(arm|x86|mips|armv5|arm64|x86_64|mips64)$' | xargs)"
local variant="$(echo $* | xargs -n 1 echo | \grep -E '^(user|userdebug|eng)$' | xargs)"
- local apps="$(echo $* | xargs -n 1 echo | \grep -E -v '^(user|userdebug|eng|arm|x86|mips|armv5|arm64|x86_64|mips64)$' | xargs)"
+ local density="$(echo $* | xargs -n 1 echo | \grep -E '^(ldpi|mdpi|tvdpi|hdpi|xhdpi|xxhdpi|xxxhdpi|alldpi)$' | xargs)"
+ local apps="$(echo $* | xargs -n 1 echo | \grep -E -v '^(user|userdebug|eng|arm|x86|mips|armv5|arm64|x86_64|mips64|ldpi|mdpi|tvdpi|hdpi|xhdpi|xxhdpi|xxxhdpi|alldpi)$' | xargs)"
if [ $(echo $arch | wc -w) -gt 1 ]; then
echo "tapas: Error: Multiple build archs supplied: $arch"
@@ -580,6 +581,10 @@
echo "tapas: Error: Multiple build variants supplied: $variant"
return
fi
+ if [ $(echo $density | wc -w) -gt 1 ]; then
+ echo "tapas: Error: Multiple densities supplied: $density"
+ return
+ fi
local product=full
case $arch in
@@ -599,6 +604,7 @@
export TARGET_PRODUCT=$product
export TARGET_BUILD_VARIANT=$variant
+ export TARGET_BUILD_DENSITY=$density
export TARGET_BUILD_TYPE=release
export TARGET_BUILD_APPS=$apps
diff --git a/tools/releasetools/blockimgdiff.py b/tools/releasetools/blockimgdiff.py
index 216486c..8b179d5 100644
--- a/tools/releasetools/blockimgdiff.py
+++ b/tools/releasetools/blockimgdiff.py
@@ -16,6 +16,7 @@
from collections import deque, OrderedDict
from hashlib import sha1
+import heapq
import itertools
import multiprocessing
import os
@@ -142,9 +143,16 @@
self.goes_before = {}
self.goes_after = {}
+ self.stash_before = []
+ self.use_stash = []
+
self.id = len(by_id)
by_id.append(self)
+ def NetStashChange(self):
+ return (sum(sr.size() for (_, sr) in self.stash_before) -
+ sum(sr.size() for (_, sr) in self.use_stash))
+
def __str__(self):
return (str(self.id) + ": <" + str(self.src_ranges) + " " + self.style +
" to " + str(self.tgt_ranges) + ">")
@@ -182,11 +190,14 @@
# original image.
class BlockImageDiff(object):
- def __init__(self, tgt, src=None, threads=None):
+ def __init__(self, tgt, src=None, threads=None, version=2):
if threads is None:
threads = multiprocessing.cpu_count() // 2
if threads == 0: threads = 1
self.threads = threads
+ self.version = version
+
+ assert version in (1, 2)
self.tgt = tgt
if src is None:
@@ -221,7 +232,12 @@
self.FindVertexSequence()
# Fix up the ordering dependencies that the sequence didn't
# satisfy.
- self.RemoveBackwardEdges()
+ if self.version == 1:
+ self.RemoveBackwardEdges()
+ else:
+ self.ReverseBackwardEdges()
+ self.ImproveVertexSequence()
+
# Double-check our work.
self.AssertSequenceGood()
@@ -231,18 +247,87 @@
def WriteTransfers(self, prefix):
out = []
- out.append("1\n") # format version number
total = 0
performs_read = False
+ stashes = {}
+ stashed_blocks = 0
+ max_stashed_blocks = 0
+
+ free_stash_ids = []
+ next_stash_id = 0
+
for xf in self.transfers:
- # zero [rangeset]
- # new [rangeset]
- # bsdiff patchstart patchlen [src rangeset] [tgt rangeset]
- # imgdiff patchstart patchlen [src rangeset] [tgt rangeset]
- # move [src rangeset] [tgt rangeset]
- # erase [rangeset]
+ if self.version < 2:
+ assert not xf.stash_before
+ assert not xf.use_stash
+
+ for s, sr in xf.stash_before:
+ assert s not in stashes
+ if free_stash_ids:
+ sid = heapq.heappop(free_stash_ids)
+ else:
+ sid = next_stash_id
+ next_stash_id += 1
+ stashes[s] = sid
+ stashed_blocks += sr.size()
+ out.append("stash %d %s\n" % (sid, sr.to_string_raw()))
+
+ if stashed_blocks > max_stashed_blocks:
+ max_stashed_blocks = stashed_blocks
+
+ if self.version == 1:
+ src_string = xf.src_ranges.to_string_raw()
+ elif self.version == 2:
+
+ # <# blocks> <src ranges>
+ # OR
+ # <# blocks> <src ranges> <src locs> <stash refs...>
+ # OR
+ # <# blocks> - <stash refs...>
+
+ size = xf.src_ranges.size()
+ src_string = [str(size)]
+
+ unstashed_src_ranges = xf.src_ranges
+ mapped_stashes = []
+ for s, sr in xf.use_stash:
+ sid = stashes.pop(s)
+ stashed_blocks -= sr.size()
+ unstashed_src_ranges = unstashed_src_ranges.subtract(sr)
+ sr = xf.src_ranges.map_within(sr)
+ mapped_stashes.append(sr)
+ src_string.append("%d:%s" % (sid, sr.to_string_raw()))
+ heapq.heappush(free_stash_ids, sid)
+
+ if unstashed_src_ranges:
+ src_string.insert(1, unstashed_src_ranges.to_string_raw())
+ if xf.use_stash:
+ mapped_unstashed = xf.src_ranges.map_within(unstashed_src_ranges)
+ src_string.insert(2, mapped_unstashed.to_string_raw())
+ mapped_stashes.append(mapped_unstashed)
+ self.AssertPartition(RangeSet(data=(0, size)), mapped_stashes)
+ else:
+ src_string.insert(1, "-")
+ self.AssertPartition(RangeSet(data=(0, size)), mapped_stashes)
+
+ src_string = " ".join(src_string)
+
+ # both versions:
+ # zero <rangeset>
+ # new <rangeset>
+ # erase <rangeset>
+ #
+ # version 1:
+ # bsdiff patchstart patchlen <src rangeset> <tgt rangeset>
+ # imgdiff patchstart patchlen <src rangeset> <tgt rangeset>
+ # move <src rangeset> <tgt rangeset>
+ #
+ # version 2:
+ # bsdiff patchstart patchlen <tgt rangeset> <src_string>
+ # imgdiff patchstart patchlen <tgt rangeset> <src_string>
+ # move <tgt rangeset> <src_string>
tgt_size = xf.tgt_ranges.size()
@@ -255,17 +340,27 @@
assert xf.tgt_ranges
assert xf.src_ranges.size() == tgt_size
if xf.src_ranges != xf.tgt_ranges:
- out.append("%s %s %s\n" % (
- xf.style,
- xf.src_ranges.to_string_raw(), xf.tgt_ranges.to_string_raw()))
+ if self.version == 1:
+ out.append("%s %s %s\n" % (
+ xf.style,
+ xf.src_ranges.to_string_raw(), xf.tgt_ranges.to_string_raw()))
+ elif self.version == 2:
+ out.append("%s %s %s\n" % (
+ xf.style,
+ xf.tgt_ranges.to_string_raw(), src_string))
total += tgt_size
elif xf.style in ("bsdiff", "imgdiff"):
performs_read = True
assert xf.tgt_ranges
assert xf.src_ranges
- out.append("%s %d %d %s %s\n" % (
- xf.style, xf.patch_start, xf.patch_len,
- xf.src_ranges.to_string_raw(), xf.tgt_ranges.to_string_raw()))
+ if self.version == 1:
+ out.append("%s %d %d %s %s\n" % (
+ xf.style, xf.patch_start, xf.patch_len,
+ xf.src_ranges.to_string_raw(), xf.tgt_ranges.to_string_raw()))
+ elif self.version == 2:
+ out.append("%s %d %d %s %s\n" % (
+ xf.style, xf.patch_start, xf.patch_len,
+ xf.tgt_ranges.to_string_raw(), src_string))
total += tgt_size
elif xf.style == "zero":
assert xf.tgt_ranges
@@ -276,7 +371,10 @@
else:
raise ValueError, "unknown transfer style '%s'\n" % (xf.style,)
- out.insert(1, str(total) + "\n")
+
+ # sanity check: abort if we're going to need more than 512 MB if
+ # stash space
+ assert max_stashed_blocks * self.tgt.blocksize < (512 << 20)
all_tgt = RangeSet(data=(0, self.tgt.total_blocks))
if performs_read:
@@ -289,12 +387,24 @@
else:
# if nothing is read (ie, this is a full OTA), then we can start
# by erasing the entire partition.
- out.insert(2, "erase %s\n" % (all_tgt.to_string_raw(),))
+ out.insert(0, "erase %s\n" % (all_tgt.to_string_raw(),))
+
+ out.insert(0, "%d\n" % (self.version,)) # format version number
+ out.insert(1, str(total) + "\n")
+ if self.version >= 2:
+ # version 2 only: after the total block count, we give the number
+ # of stash slots needed, and the maximum size needed (in blocks)
+ out.insert(2, str(next_stash_id) + "\n")
+ out.insert(3, str(max_stashed_blocks) + "\n")
with open(prefix + ".transfer.list", "wb") as f:
for i in out:
f.write(i)
+ if self.version >= 2:
+ print("max stashed blocks: %d (%d bytes)\n" % (
+ max_stashed_blocks, max_stashed_blocks * self.tgt.blocksize))
+
def ComputePatches(self, prefix):
print("Reticulating splines...")
diff_q = []
@@ -409,7 +519,13 @@
# Imagine processing the transfers in order.
for xf in self.transfers:
# Check that the input blocks for this transfer haven't yet been touched.
- assert not touched.overlaps(xf.src_ranges)
+
+ x = xf.src_ranges
+ if self.version >= 2:
+ for _, sr in xf.use_stash:
+ x = x.subtract(sr)
+
+ assert not touched.overlaps(x)
# Check that the output blocks for this transfer haven't yet been touched.
assert not touched.overlaps(xf.tgt_ranges)
# Touch all the blocks written by this transfer.
@@ -418,6 +534,47 @@
# Check that we've written every target block.
assert touched == self.tgt.care_map
+ def ImproveVertexSequence(self):
+ print("Improving vertex order...")
+
+ # At this point our digraph is acyclic; we reversed any edges that
+ # were backwards in the heuristically-generated sequence. The
+ # previously-generated order is still acceptable, but we hope to
+ # find a better order that needs less memory for stashed data.
+ # Now we do a topological sort to generate a new vertex order,
+ # using a greedy algorithm to choose which vertex goes next
+ # whenever we have a choice.
+
+ # Make a copy of the edge set; this copy will get destroyed by the
+ # algorithm.
+ for xf in self.transfers:
+ xf.incoming = xf.goes_after.copy()
+ xf.outgoing = xf.goes_before.copy()
+
+ L = [] # the new vertex order
+
+ # S is the set of sources in the remaining graph; we always choose
+ # the one that leaves the least amount of stashed data after it's
+ # executed.
+ S = [(u.NetStashChange(), u.order, u) for u in self.transfers
+ if not u.incoming]
+ heapq.heapify(S)
+
+ while S:
+ _, _, xf = heapq.heappop(S)
+ L.append(xf)
+ for u in xf.outgoing:
+ del u.incoming[xf]
+ if not u.incoming:
+ heapq.heappush(S, (u.NetStashChange(), u.order, u))
+
+ # if this fails then our graph had a cycle.
+ assert len(L) == len(self.transfers)
+
+ self.transfers = L
+ for i, xf in enumerate(L):
+ xf.order = i
+
def RemoveBackwardEdges(self):
print("Removing backward edges...")
in_order = 0
@@ -425,19 +582,17 @@
lost_source = 0
for xf in self.transfers:
- io = 0
- ooo = 0
lost = 0
size = xf.src_ranges.size()
for u in xf.goes_before:
# xf should go before u
if xf.order < u.order:
# it does, hurray!
- io += 1
+ in_order += 1
else:
# it doesn't, boo. trim the blocks that u writes from xf's
# source, so that xf can go after u.
- ooo += 1
+ out_of_order += 1
assert xf.src_ranges.overlaps(u.tgt_ranges)
xf.src_ranges = xf.src_ranges.subtract(u.tgt_ranges)
xf.intact = False
@@ -448,8 +603,6 @@
lost = size - xf.src_ranges.size()
lost_source += lost
- in_order += io
- out_of_order += ooo
print((" %d/%d dependencies (%.2f%%) were violated; "
"%d source blocks removed.") %
@@ -458,6 +611,48 @@
if (in_order + out_of_order) else 0.0,
lost_source))
+ def ReverseBackwardEdges(self):
+ print("Reversing backward edges...")
+ in_order = 0
+ out_of_order = 0
+ stashes = 0
+ stash_size = 0
+
+ for xf in self.transfers:
+ lost = 0
+ size = xf.src_ranges.size()
+ for u in xf.goes_before.copy():
+ # xf should go before u
+ if xf.order < u.order:
+ # it does, hurray!
+ in_order += 1
+ else:
+ # it doesn't, boo. modify u to stash the blocks that it
+ # writes that xf wants to read, and then require u to go
+ # before xf.
+ out_of_order += 1
+
+ overlap = xf.src_ranges.intersect(u.tgt_ranges)
+ assert overlap
+
+ u.stash_before.append((stashes, overlap))
+ xf.use_stash.append((stashes, overlap))
+ stashes += 1
+ stash_size += overlap.size()
+
+ # reverse the edge direction; now xf must go after u
+ del xf.goes_before[u]
+ del u.goes_after[xf]
+ xf.goes_after[u] = None # value doesn't matter
+ u.goes_before[xf] = None
+
+ print((" %d/%d dependencies (%.2f%%) were violated; "
+ "%d source blocks stashed.") %
+ (out_of_order, in_order + out_of_order,
+ (out_of_order * 100.0 / (in_order + out_of_order))
+ if (in_order + out_of_order) else 0.0,
+ stash_size))
+
def FindVertexSequence(self):
print("Finding vertex sequence...")
diff --git a/tools/releasetools/common.py b/tools/releasetools/common.py
index 815c76c..96075a9 100644
--- a/tools/releasetools/common.py
+++ b/tools/releasetools/common.py
@@ -1030,7 +1030,14 @@
self.partition = partition
self.check_first_block = check_first_block
- b = blockimgdiff.BlockImageDiff(tgt, src, threads=OPTIONS.worker_threads)
+ version = 1
+ if OPTIONS.info_dict:
+ version = max(
+ int(i) for i in
+ OPTIONS.info_dict.get("blockimgdiff_versions", "1").split(","))
+
+ b = blockimgdiff.BlockImageDiff(tgt, src, threads=OPTIONS.worker_threads,
+ version=version)
tmpdir = tempfile.mkdtemp()
OPTIONS.tempfiles.append(tmpdir)
self.path = os.path.join(tmpdir, partition)
diff --git a/tools/releasetools/rangelib.py b/tools/releasetools/rangelib.py
index 8a85d2d..7279c60 100644
--- a/tools/releasetools/rangelib.py
+++ b/tools/releasetools/rangelib.py
@@ -24,7 +24,9 @@
lots of runs."""
def __init__(self, data=None):
- if data:
+ if isinstance(data, str):
+ self._parse_internal(data)
+ elif data:
self.data = tuple(self._remove_pairs(data))
else:
self.data = ()
@@ -46,6 +48,9 @@
else:
return self.to_string()
+ def __repr__(self):
+ return '<RangeSet("' + self.to_string() + '")>'
+
@classmethod
def parse(cls, text):
"""Parse a text string consisting of a space-separated list of
@@ -59,7 +64,9 @@
"15-20 30 10-14" is not, even though they represent the same set
of blocks (and the two RangeSets will compare equal with ==).
"""
+ return cls(text)
+ def _parse_internal(self, text):
data = []
last = -1
monotonic = True
@@ -81,9 +88,8 @@
else:
monotonic = True
data.sort()
- r = RangeSet(cls._remove_pairs(data))
- r.monotonic = monotonic
- return r
+ self.data = tuple(self._remove_pairs(data))
+ self.monotonic = monotonic
@staticmethod
def _remove_pairs(source):
@@ -113,7 +119,13 @@
def union(self, other):
"""Return a new RangeSet representing the union of this RangeSet
- with the argument."""
+ with the argument.
+
+ >>> RangeSet("10-19 30-34").union(RangeSet("18-29"))
+ <RangeSet("10-34")>
+ >>> RangeSet("10-19 30-34").union(RangeSet("22 32"))
+ <RangeSet("10-19 22 30-34")>
+ """
out = []
z = 0
for p, d in heapq.merge(zip(self.data, itertools.cycle((+1, -1))),
@@ -125,7 +137,13 @@
def intersect(self, other):
"""Return a new RangeSet representing the intersection of this
- RangeSet with the argument."""
+ RangeSet with the argument.
+
+ >>> RangeSet("10-19 30-34").intersect(RangeSet("18-32"))
+ <RangeSet("18-19 30-32")>
+ >>> RangeSet("10-19 30-34").intersect(RangeSet("22-28"))
+ <RangeSet("")>
+ """
out = []
z = 0
for p, d in heapq.merge(zip(self.data, itertools.cycle((+1, -1))),
@@ -137,7 +155,13 @@
def subtract(self, other):
"""Return a new RangeSet representing subtracting the argument
- from this RangeSet."""
+ from this RangeSet.
+
+ >>> RangeSet("10-19 30-34").subtract(RangeSet("18-32"))
+ <RangeSet("10-17 33-34")>
+ >>> RangeSet("10-19 30-34").subtract(RangeSet("22-28"))
+ <RangeSet("10-19 30-34")>
+ """
out = []
z = 0
@@ -150,7 +174,13 @@
def overlaps(self, other):
"""Returns true if the argument has a nonempty overlap with this
- RangeSet."""
+ RangeSet.
+
+ >>> RangeSet("10-19 30-34").overlaps(RangeSet("18-32"))
+ True
+ >>> RangeSet("10-19 30-34").overlaps(RangeSet("22-28"))
+ False
+ """
# This is like intersect, but we can stop as soon as we discover the
# output is going to be nonempty.
@@ -164,7 +194,11 @@
def size(self):
"""Returns the total size of the RangeSet (ie, how many integers
- are in the set)."""
+ are in the set).
+
+ >>> RangeSet("10-19 30-34").size()
+ 15
+ """
total = 0
for i, p in enumerate(self.data):
@@ -173,3 +207,37 @@
else:
total -= p
return total
+
+ def map_within(self, other):
+ """'other' should be a subset of 'self'. Returns a RangeSet
+ representing what 'other' would get translated to if the integers
+ of 'self' were translated down to be contiguous starting at zero.
+
+ >>> RangeSet("0-9").map_within(RangeSet("3-4"))
+ <RangeSet("3-4")>
+ >>> RangeSet("10-19").map_within(RangeSet("13-14"))
+ <RangeSet("3-4")>
+ >>> RangeSet("10-19 30-39").map_within(RangeSet("17-19 30-32"))
+ <RangeSet("7-12")>
+ >>> RangeSet("10-19 30-39").map_within(RangeSet("12-13 17-19 30-32"))
+ <RangeSet("2-3 7-12")>
+ """
+
+ out = []
+ offset = 0
+ start = None
+ for p, d in heapq.merge(zip(self.data, itertools.cycle((-5, +5))),
+ zip(other.data, itertools.cycle((-1, +1)))):
+ if d == -5:
+ start = p
+ elif d == +5:
+ offset += p-start
+ start = None
+ else:
+ out.append(offset + p - start)
+ return RangeSet(data=out)
+
+
+if __name__ == "__main__":
+ import doctest
+ doctest.testmod()