blob: 7eb60d9280a2fe5f891740e8b1834058c0920b5c [file] [log] [blame]
Doug Zongker424296a2014-09-02 08:53:09 -07001# Copyright (C) 2014 The Android Open Source Project
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
Doug Zongkerfc44a512014-08-26 13:10:25 -070015import bisect
16import os
Doug Zongkerfc44a512014-08-26 13:10:25 -070017import struct
Doug Zongkerfc44a512014-08-26 13:10:25 -070018from hashlib import sha1
19
Dan Albert8b72aef2015-03-23 19:13:21 -070020import rangelib
21
Doug Zongkerfc44a512014-08-26 13:10:25 -070022
23class SparseImage(object):
Tao Bao5ece99d2015-05-12 11:42:31 -070024 """Wraps a sparse image file into an image object.
Doug Zongkerfc44a512014-08-26 13:10:25 -070025
Tao Bao5ece99d2015-05-12 11:42:31 -070026 Wraps a sparse image file (and optional file map and clobbered_blocks) into
27 an image object suitable for passing to BlockImageDiff. file_map contains
28 the mapping between files and their blocks. clobbered_blocks contains the set
29 of blocks that should be always written to the target regardless of the old
30 contents (i.e. copying instead of patching). clobbered_blocks should be in
31 the form of a string like "0" or "0 1-5 8".
32 """
33
Sami Tolvanen405e71d2016-02-09 12:28:58 -080034 def __init__(self, simg_fn, file_map_fn=None, clobbered_blocks=None,
35 mode="rb", build_map=True):
36 self.simg_f = f = open(simg_fn, mode)
Doug Zongkerfc44a512014-08-26 13:10:25 -070037
38 header_bin = f.read(28)
39 header = struct.unpack("<I4H4I", header_bin)
40
41 magic = header[0]
42 major_version = header[1]
43 minor_version = header[2]
44 file_hdr_sz = header[3]
45 chunk_hdr_sz = header[4]
46 self.blocksize = blk_sz = header[5]
47 self.total_blocks = total_blks = header[6]
Sami Tolvanen405e71d2016-02-09 12:28:58 -080048 self.total_chunks = total_chunks = header[7]
Doug Zongkerfc44a512014-08-26 13:10:25 -070049
50 if magic != 0xED26FF3A:
51 raise ValueError("Magic should be 0xED26FF3A but is 0x%08X" % (magic,))
52 if major_version != 1 or minor_version != 0:
53 raise ValueError("I know about version 1.0, but this is version %u.%u" %
54 (major_version, minor_version))
55 if file_hdr_sz != 28:
56 raise ValueError("File header size was expected to be 28, but is %u." %
57 (file_hdr_sz,))
58 if chunk_hdr_sz != 12:
59 raise ValueError("Chunk header size was expected to be 12, but is %u." %
60 (chunk_hdr_sz,))
61
62 print("Total of %u %u-byte output blocks in %u input chunks."
63 % (total_blks, blk_sz, total_chunks))
64
Sami Tolvanen405e71d2016-02-09 12:28:58 -080065 if not build_map:
66 return
67
Doug Zongkerfc44a512014-08-26 13:10:25 -070068 pos = 0 # in blocks
69 care_data = []
70 self.offset_map = offset_map = []
Tao Bao5ece99d2015-05-12 11:42:31 -070071 self.clobbered_blocks = rangelib.RangeSet(data=clobbered_blocks)
Doug Zongkerfc44a512014-08-26 13:10:25 -070072
73 for i in range(total_chunks):
74 header_bin = f.read(12)
75 header = struct.unpack("<2H2I", header_bin)
76 chunk_type = header[0]
Doug Zongkerfc44a512014-08-26 13:10:25 -070077 chunk_sz = header[2]
78 total_sz = header[3]
79 data_sz = total_sz - 12
80
81 if chunk_type == 0xCAC1:
82 if data_sz != (chunk_sz * blk_sz):
83 raise ValueError(
84 "Raw chunk input size (%u) does not match output size (%u)" %
85 (data_sz, chunk_sz * blk_sz))
86 else:
87 care_data.append(pos)
88 care_data.append(pos + chunk_sz)
Doug Zongkere18eb502014-10-15 15:55:50 -070089 offset_map.append((pos, chunk_sz, f.tell(), None))
Doug Zongkerfc44a512014-08-26 13:10:25 -070090 pos += chunk_sz
91 f.seek(data_sz, os.SEEK_CUR)
92
93 elif chunk_type == 0xCAC2:
Doug Zongkere18eb502014-10-15 15:55:50 -070094 fill_data = f.read(4)
95 care_data.append(pos)
96 care_data.append(pos + chunk_sz)
97 offset_map.append((pos, chunk_sz, None, fill_data))
98 pos += chunk_sz
Doug Zongkerfc44a512014-08-26 13:10:25 -070099
100 elif chunk_type == 0xCAC3:
101 if data_sz != 0:
102 raise ValueError("Don't care chunk input size is non-zero (%u)" %
103 (data_sz))
104 else:
105 pos += chunk_sz
106
107 elif chunk_type == 0xCAC4:
108 raise ValueError("CRC32 chunks are not supported")
109
110 else:
111 raise ValueError("Unknown chunk type 0x%04X not supported" %
112 (chunk_type,))
113
Dan Albert8b72aef2015-03-23 19:13:21 -0700114 self.care_map = rangelib.RangeSet(care_data)
Doug Zongkerfc44a512014-08-26 13:10:25 -0700115 self.offset_index = [i[0] for i in offset_map]
116
Tao Bao2fd2c9b2015-07-09 17:37:49 -0700117 # Bug: 20881595
118 # Introduce extended blocks as a workaround for the bug. dm-verity may
119 # touch blocks that are not in the care_map due to block device
120 # read-ahead. It will fail if such blocks contain non-zeroes. We zero out
121 # the extended blocks explicitly to avoid dm-verity failures. 512 blocks
122 # are the maximum read-ahead we configure for dm-verity block devices.
123 extended = self.care_map.extend(512)
124 all_blocks = rangelib.RangeSet(data=(0, self.total_blocks))
125 extended = extended.intersect(all_blocks).subtract(self.care_map)
126 self.extended = extended
127
Doug Zongkerfc44a512014-08-26 13:10:25 -0700128 if file_map_fn:
Tao Bao5ece99d2015-05-12 11:42:31 -0700129 self.LoadFileBlockMap(file_map_fn, self.clobbered_blocks)
Doug Zongkerfc44a512014-08-26 13:10:25 -0700130 else:
131 self.file_map = {"__DATA": self.care_map}
132
Sami Tolvanen405e71d2016-02-09 12:28:58 -0800133 def AppendFillChunk(self, data, blocks):
134 f = self.simg_f
135
136 # Append a fill chunk
137 f.seek(0, os.SEEK_END)
138 f.write(struct.pack("<2H3I", 0xCAC2, 0, blocks, 16, data))
139
140 # Update the sparse header
141 self.total_blocks += blocks
142 self.total_chunks += 1
143
144 f.seek(16, os.SEEK_SET)
145 f.write(struct.pack("<2I", self.total_blocks, self.total_chunks))
146
Tao Bao183e56e2017-03-05 17:05:09 -0800147 def RangeSha1(self, ranges):
148 h = sha1()
149 for data in self._GetRangeData(ranges):
150 h.update(data)
151 return h.hexdigest()
152
Doug Zongkerfc44a512014-08-26 13:10:25 -0700153 def ReadRangeSet(self, ranges):
154 return [d for d in self._GetRangeData(ranges)]
155
Tao Bao5fcaaef2015-06-01 13:40:49 -0700156 def TotalSha1(self, include_clobbered_blocks=False):
157 """Return the SHA-1 hash of all data in the 'care' regions.
158
159 If include_clobbered_blocks is True, it returns the hash including the
160 clobbered_blocks."""
161 ranges = self.care_map
162 if not include_clobbered_blocks:
Tao Bao2b4ff172015-06-23 17:30:35 -0700163 ranges = ranges.subtract(self.clobbered_blocks)
Tao Bao183e56e2017-03-05 17:05:09 -0800164 return self.RangeSha1(ranges)
165
166 def WriteRangeDataToFd(self, ranges, fd):
167 for data in self._GetRangeData(ranges):
168 fd.write(data)
Doug Zongkerfc44a512014-08-26 13:10:25 -0700169
170 def _GetRangeData(self, ranges):
171 """Generator that produces all the image data in 'ranges'. The
172 number of individual pieces returned is arbitrary (and in
173 particular is not necessarily equal to the number of ranges in
174 'ranges'.
175
176 This generator is stateful -- it depends on the open file object
177 contained in this SparseImage, so you should not try to run two
178 instances of this generator on the same object simultaneously."""
179
180 f = self.simg_f
181 for s, e in ranges:
182 to_read = e-s
183 idx = bisect.bisect_right(self.offset_index, s) - 1
Doug Zongkere18eb502014-10-15 15:55:50 -0700184 chunk_start, chunk_len, filepos, fill_data = self.offset_map[idx]
Doug Zongkerfc44a512014-08-26 13:10:25 -0700185
186 # for the first chunk we may be starting partway through it.
Doug Zongkerfc44a512014-08-26 13:10:25 -0700187 remain = chunk_len - (s - chunk_start)
Doug Zongkerfc44a512014-08-26 13:10:25 -0700188 this_read = min(remain, to_read)
Doug Zongkere18eb502014-10-15 15:55:50 -0700189 if filepos is not None:
190 p = filepos + ((s - chunk_start) * self.blocksize)
191 f.seek(p, os.SEEK_SET)
192 yield f.read(this_read * self.blocksize)
193 else:
194 yield fill_data * (this_read * (self.blocksize >> 2))
Doug Zongkerfc44a512014-08-26 13:10:25 -0700195 to_read -= this_read
196
197 while to_read > 0:
198 # continue with following chunks if this range spans multiple chunks.
199 idx += 1
Doug Zongkere18eb502014-10-15 15:55:50 -0700200 chunk_start, chunk_len, filepos, fill_data = self.offset_map[idx]
Doug Zongkerfc44a512014-08-26 13:10:25 -0700201 this_read = min(chunk_len, to_read)
Doug Zongkere18eb502014-10-15 15:55:50 -0700202 if filepos is not None:
203 f.seek(filepos, os.SEEK_SET)
204 yield f.read(this_read * self.blocksize)
205 else:
206 yield fill_data * (this_read * (self.blocksize >> 2))
Doug Zongkerfc44a512014-08-26 13:10:25 -0700207 to_read -= this_read
208
Tao Bao5ece99d2015-05-12 11:42:31 -0700209 def LoadFileBlockMap(self, fn, clobbered_blocks):
Doug Zongkerfc44a512014-08-26 13:10:25 -0700210 remaining = self.care_map
211 self.file_map = out = {}
212
213 with open(fn) as f:
214 for line in f:
215 fn, ranges = line.split(None, 1)
Dan Albert8b72aef2015-03-23 19:13:21 -0700216 ranges = rangelib.RangeSet.parse(ranges)
Doug Zongkerfc44a512014-08-26 13:10:25 -0700217 out[fn] = ranges
218 assert ranges.size() == ranges.intersect(remaining).size()
Tao Bao5ece99d2015-05-12 11:42:31 -0700219
220 # Currently we assume that blocks in clobbered_blocks are not part of
221 # any file.
222 assert not clobbered_blocks.overlaps(ranges)
Doug Zongkerfc44a512014-08-26 13:10:25 -0700223 remaining = remaining.subtract(ranges)
224
Tao Bao5ece99d2015-05-12 11:42:31 -0700225 remaining = remaining.subtract(clobbered_blocks)
226
Doug Zongkerfc44a512014-08-26 13:10:25 -0700227 # For all the remaining blocks in the care_map (ie, those that
Tao Bao5ece99d2015-05-12 11:42:31 -0700228 # aren't part of the data for any file nor part of the clobbered_blocks),
229 # divide them into blocks that are all zero and blocks that aren't.
230 # (Zero blocks are handled specially because (1) there are usually
231 # a lot of them and (2) bsdiff handles files with long sequences of
232 # repeated bytes especially poorly.)
Doug Zongkerfc44a512014-08-26 13:10:25 -0700233
234 zero_blocks = []
235 nonzero_blocks = []
236 reference = '\0' * self.blocksize
237
Tao Bao7c4c6f52015-08-19 17:07:50 -0700238 # Workaround for bug 23227672. For squashfs, we don't have a system.map. So
239 # the whole system image will be treated as a single file. But for some
240 # unknown bug, the updater will be killed due to OOM when writing back the
241 # patched image to flash (observed on lenok-userdebug MEA49). Prior to
242 # getting a real fix, we evenly divide the non-zero blocks into smaller
243 # groups (currently 1024 blocks or 4MB per group).
244 # Bug: 23227672
245 MAX_BLOCKS_PER_GROUP = 1024
246 nonzero_groups = []
247
Doug Zongkerfc44a512014-08-26 13:10:25 -0700248 f = self.simg_f
249 for s, e in remaining:
250 for b in range(s, e):
251 idx = bisect.bisect_right(self.offset_index, b) - 1
Dan Albert8b72aef2015-03-23 19:13:21 -0700252 chunk_start, _, filepos, fill_data = self.offset_map[idx]
Doug Zongkere18eb502014-10-15 15:55:50 -0700253 if filepos is not None:
254 filepos += (b-chunk_start) * self.blocksize
255 f.seek(filepos, os.SEEK_SET)
256 data = f.read(self.blocksize)
257 else:
258 if fill_data == reference[:4]: # fill with all zeros
259 data = reference
260 else:
261 data = None
Doug Zongkerfc44a512014-08-26 13:10:25 -0700262
263 if data == reference:
264 zero_blocks.append(b)
265 zero_blocks.append(b+1)
266 else:
267 nonzero_blocks.append(b)
268 nonzero_blocks.append(b+1)
269
Tao Bao7c4c6f52015-08-19 17:07:50 -0700270 if len(nonzero_blocks) >= MAX_BLOCKS_PER_GROUP:
271 nonzero_groups.append(nonzero_blocks)
272 # Clear the list.
273 nonzero_blocks = []
274
275 if nonzero_blocks:
276 nonzero_groups.append(nonzero_blocks)
277 nonzero_blocks = []
278
279 assert zero_blocks or nonzero_groups or clobbered_blocks
Tao Bao7f9470c2015-06-26 17:49:39 -0700280
281 if zero_blocks:
282 out["__ZERO"] = rangelib.RangeSet(data=zero_blocks)
Tao Bao7c4c6f52015-08-19 17:07:50 -0700283 if nonzero_groups:
284 for i, blocks in enumerate(nonzero_groups):
285 out["__NONZERO-%d" % i] = rangelib.RangeSet(data=blocks)
Tao Bao8bd72022015-07-01 18:06:33 -0700286 if clobbered_blocks:
287 out["__COPY"] = clobbered_blocks
Doug Zongkerfc44a512014-08-26 13:10:25 -0700288
289 def ResetFileMap(self):
290 """Throw away the file map and treat the entire image as
291 undifferentiated data."""
292 self.file_map = {"__DATA": self.care_map}